2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_alloc.h"
38 #include "xfs_bmap_util.h"
39 #include "xfs_bmap_btree.h"
40 #include "xfs_rtalloc.h"
41 #include "xfs_error.h"
42 #include "xfs_quota.h"
43 #include "xfs_trans_space.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_trace.h"
46 #include "xfs_symlink.h"
47 #include "xfs_attr_leaf.h"
48 #include "xfs_filestream.h"
50 #include "xfs_ag_resv.h"
53 kmem_zone_t *xfs_bmap_free_item_zone;
56 * Miscellaneous helper functions
60 * Compute and fill in the value of the maximum depth of a bmap btree
61 * in this filesystem. Done once, during mount.
64 xfs_bmap_compute_maxlevels(
65 xfs_mount_t *mp, /* file system mount structure */
66 int whichfork) /* data or attr fork */
68 int level; /* btree level */
69 uint maxblocks; /* max blocks at this level */
70 uint maxleafents; /* max leaf entries possible */
71 int maxrootrecs; /* max records in root block */
72 int minleafrecs; /* min records in leaf block */
73 int minnoderecs; /* min records in node block */
74 int sz; /* root block size */
77 * The maximum number of extents in a file, hence the maximum
78 * number of leaf entries, is controlled by the type of di_nextents
79 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
80 * (a signed 16-bit number, xfs_aextnum_t).
82 * Note that we can no longer assume that if we are in ATTR1 that
83 * the fork offset of all the inodes will be
84 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
85 * with ATTR2 and then mounted back with ATTR1, keeping the
86 * di_forkoff's fixed but probably at various positions. Therefore,
87 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
88 * of a minimum size available.
90 if (whichfork == XFS_DATA_FORK) {
91 maxleafents = MAXEXTNUM;
92 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
94 maxleafents = MAXAEXTNUM;
95 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
97 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
98 minleafrecs = mp->m_bmap_dmnr[0];
99 minnoderecs = mp->m_bmap_dmnr[1];
100 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
101 for (level = 1; maxblocks > 1; level++) {
102 if (maxblocks <= maxrootrecs)
105 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
107 mp->m_bm_maxlevels[whichfork] = level;
110 STATIC int /* error */
112 struct xfs_btree_cur *cur,
116 int *stat) /* success/failure */
118 cur->bc_rec.b.br_startoff = off;
119 cur->bc_rec.b.br_startblock = bno;
120 cur->bc_rec.b.br_blockcount = len;
121 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
124 STATIC int /* error */
126 struct xfs_btree_cur *cur,
130 int *stat) /* success/failure */
132 cur->bc_rec.b.br_startoff = off;
133 cur->bc_rec.b.br_startblock = bno;
134 cur->bc_rec.b.br_blockcount = len;
135 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
139 * Check if the inode needs to be converted to btree format.
141 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
143 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
144 XFS_IFORK_NEXTENTS(ip, whichfork) >
145 XFS_IFORK_MAXEXT(ip, whichfork);
149 * Check if the inode should be converted to extent format.
151 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
153 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
154 XFS_IFORK_NEXTENTS(ip, whichfork) <=
155 XFS_IFORK_MAXEXT(ip, whichfork);
159 * Update the record referred to by cur to the value given
160 * by [off, bno, len, state].
161 * This either works (return 0) or gets an EFSCORRUPTED error.
165 struct xfs_btree_cur *cur,
171 union xfs_btree_rec rec;
173 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
174 return xfs_btree_update(cur, &rec);
178 * Compute the worst-case number of indirect blocks that will be used
179 * for ip's delayed extent of length "len".
182 xfs_bmap_worst_indlen(
183 xfs_inode_t *ip, /* incore inode pointer */
184 xfs_filblks_t len) /* delayed extent length */
186 int level; /* btree level number */
187 int maxrecs; /* maximum record count at this level */
188 xfs_mount_t *mp; /* mount structure */
189 xfs_filblks_t rval; /* return value */
192 maxrecs = mp->m_bmap_dmxr[0];
193 for (level = 0, rval = 0;
194 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
197 do_div(len, maxrecs);
200 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
203 maxrecs = mp->m_bmap_dmxr[1];
209 * Calculate the default attribute fork offset for newly created inodes.
212 xfs_default_attroffset(
213 struct xfs_inode *ip)
215 struct xfs_mount *mp = ip->i_mount;
218 if (mp->m_sb.sb_inodesize == 256) {
219 offset = XFS_LITINO(mp, ip->i_d.di_version) -
220 XFS_BMDR_SPACE_CALC(MINABTPTRS);
222 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
225 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
230 * Helper routine to reset inode di_forkoff field when switching
231 * attribute fork from local to extent format - we reset it where
232 * possible to make space available for inline data fork extents.
235 xfs_bmap_forkoff_reset(
239 if (whichfork == XFS_ATTR_FORK &&
240 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
241 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
242 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
243 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
245 if (dfl_forkoff > ip->i_d.di_forkoff)
246 ip->i_d.di_forkoff = dfl_forkoff;
251 STATIC struct xfs_buf *
253 struct xfs_btree_cur *cur,
256 struct xfs_log_item_desc *lidp;
262 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
263 if (!cur->bc_bufs[i])
265 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
266 return cur->bc_bufs[i];
269 /* Chase down all the log items to see if the bp is there */
270 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
271 struct xfs_buf_log_item *bip;
272 bip = (struct xfs_buf_log_item *)lidp->lid_item;
273 if (bip->bli_item.li_type == XFS_LI_BUF &&
274 XFS_BUF_ADDR(bip->bli_buf) == bno)
283 struct xfs_btree_block *block,
289 __be64 *pp, *thispa; /* pointer to block address */
290 xfs_bmbt_key_t *prevp, *keyp;
292 ASSERT(be16_to_cpu(block->bb_level) > 0);
295 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
296 dmxr = mp->m_bmap_dmxr[0];
297 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
300 ASSERT(be64_to_cpu(prevp->br_startoff) <
301 be64_to_cpu(keyp->br_startoff));
306 * Compare the block numbers to see if there are dups.
309 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
311 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
313 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
315 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
317 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
318 if (*thispa == *pp) {
319 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
321 (unsigned long long)be64_to_cpu(*thispa));
322 panic("%s: ptrs are equal in node\n",
330 * Check that the extents for the inode ip are in the right order in all
331 * btree leaves. THis becomes prohibitively expensive for large extent count
332 * files, so don't bother with inodes that have more than 10,000 extents in
333 * them. The btree record ordering checks will still be done, so for such large
334 * bmapbt constructs that is going to catch most corruptions.
337 xfs_bmap_check_leaf_extents(
338 xfs_btree_cur_t *cur, /* btree cursor or null */
339 xfs_inode_t *ip, /* incore inode pointer */
340 int whichfork) /* data or attr fork */
342 struct xfs_btree_block *block; /* current btree block */
343 xfs_fsblock_t bno; /* block # of "block" */
344 xfs_buf_t *bp; /* buffer for "block" */
345 int error; /* error return value */
346 xfs_extnum_t i=0, j; /* index into the extents list */
347 xfs_ifork_t *ifp; /* fork structure */
348 int level; /* btree level, for checking */
349 xfs_mount_t *mp; /* file system mount structure */
350 __be64 *pp; /* pointer to block address */
351 xfs_bmbt_rec_t *ep; /* pointer to current extent */
352 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
353 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
356 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
360 /* skip large extent count inodes */
361 if (ip->i_d.di_nextents > 10000)
366 ifp = XFS_IFORK_PTR(ip, whichfork);
367 block = ifp->if_broot;
369 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
371 level = be16_to_cpu(block->bb_level);
373 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
374 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
375 bno = be64_to_cpu(*pp);
377 ASSERT(bno != NULLFSBLOCK);
378 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
379 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
382 * Go down the tree until leaf level is reached, following the first
383 * pointer (leftmost) at each level.
385 while (level-- > 0) {
386 /* See if buf is in cur first */
388 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
391 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
397 block = XFS_BUF_TO_BLOCK(bp);
402 * Check this block for basic sanity (increasing keys and
403 * no duplicate blocks).
406 xfs_check_block(block, mp, 0, 0);
407 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
408 bno = be64_to_cpu(*pp);
409 XFS_WANT_CORRUPTED_GOTO(mp,
410 XFS_FSB_SANITY_CHECK(mp, bno), error0);
413 xfs_trans_brelse(NULL, bp);
418 * Here with bp and block set to the leftmost leaf node in the tree.
423 * Loop over all leaf nodes checking that all extents are in the right order.
426 xfs_fsblock_t nextbno;
427 xfs_extnum_t num_recs;
430 num_recs = xfs_btree_get_numrecs(block);
433 * Read-ahead the next leaf block, if any.
436 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
439 * Check all the extents to make sure they are OK.
440 * If we had a previous block, the last entry should
441 * conform with the first entry in this one.
444 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
446 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
447 xfs_bmbt_disk_get_blockcount(&last) <=
448 xfs_bmbt_disk_get_startoff(ep));
450 for (j = 1; j < num_recs; j++) {
451 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
452 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
453 xfs_bmbt_disk_get_blockcount(ep) <=
454 xfs_bmbt_disk_get_startoff(nextp));
462 xfs_trans_brelse(NULL, bp);
466 * If we've reached the end, stop.
468 if (bno == NULLFSBLOCK)
472 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
475 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
481 block = XFS_BUF_TO_BLOCK(bp);
487 xfs_warn(mp, "%s: at error0", __func__);
489 xfs_trans_brelse(NULL, bp);
491 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
493 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
498 * Add bmap trace insert entries for all the contents of the extent records.
501 xfs_bmap_trace_exlist(
502 xfs_inode_t *ip, /* incore inode pointer */
503 xfs_extnum_t cnt, /* count of entries in the list */
504 int whichfork, /* data or attr fork */
505 unsigned long caller_ip)
507 xfs_extnum_t idx; /* extent record index */
508 xfs_ifork_t *ifp; /* inode fork pointer */
511 if (whichfork == XFS_ATTR_FORK)
512 state |= BMAP_ATTRFORK;
514 ifp = XFS_IFORK_PTR(ip, whichfork);
515 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
516 for (idx = 0; idx < cnt; idx++)
517 trace_xfs_extlist(ip, idx, whichfork, caller_ip);
521 * Validate that the bmbt_irecs being returned from bmapi are valid
522 * given the caller's original parameters. Specifically check the
523 * ranges of the returned irecs to ensure that they only extend beyond
524 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
527 xfs_bmap_validate_ret(
531 xfs_bmbt_irec_t *mval,
535 int i; /* index to map values */
537 ASSERT(ret_nmap <= nmap);
539 for (i = 0; i < ret_nmap; i++) {
540 ASSERT(mval[i].br_blockcount > 0);
541 if (!(flags & XFS_BMAPI_ENTIRE)) {
542 ASSERT(mval[i].br_startoff >= bno);
543 ASSERT(mval[i].br_blockcount <= len);
544 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
547 ASSERT(mval[i].br_startoff < bno + len);
548 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
552 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
553 mval[i].br_startoff);
554 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
555 mval[i].br_startblock != HOLESTARTBLOCK);
556 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
557 mval[i].br_state == XFS_EXT_UNWRITTEN);
562 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
563 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
567 * bmap free list manipulation functions
571 * Add the extent to the list of extents to be free at transaction end.
572 * The list is maintained sorted (by block number).
576 struct xfs_mount *mp,
577 struct xfs_defer_ops *dfops,
580 struct xfs_owner_info *oinfo)
582 struct xfs_extent_free_item *new; /* new element */
587 ASSERT(bno != NULLFSBLOCK);
589 ASSERT(len <= MAXEXTLEN);
590 ASSERT(!isnullstartblock(bno));
591 agno = XFS_FSB_TO_AGNO(mp, bno);
592 agbno = XFS_FSB_TO_AGBNO(mp, bno);
593 ASSERT(agno < mp->m_sb.sb_agcount);
594 ASSERT(agbno < mp->m_sb.sb_agblocks);
595 ASSERT(len < mp->m_sb.sb_agblocks);
596 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
598 ASSERT(xfs_bmap_free_item_zone != NULL);
600 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
601 new->xefi_startblock = bno;
602 new->xefi_blockcount = (xfs_extlen_t)len;
604 new->xefi_oinfo = *oinfo;
606 xfs_rmap_skip_owner_update(&new->xefi_oinfo);
607 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
608 XFS_FSB_TO_AGBNO(mp, bno), len);
609 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
613 * Inode fork format manipulation functions
617 * Transform a btree format file with only one leaf node, where the
618 * extents list will fit in the inode, into an extents format file.
619 * Since the file extents are already in-core, all we have to do is
620 * give up the space for the btree root and pitch the leaf block.
622 STATIC int /* error */
623 xfs_bmap_btree_to_extents(
624 xfs_trans_t *tp, /* transaction pointer */
625 xfs_inode_t *ip, /* incore inode pointer */
626 xfs_btree_cur_t *cur, /* btree cursor */
627 int *logflagsp, /* inode logging flags */
628 int whichfork) /* data or attr fork */
631 struct xfs_btree_block *cblock;/* child btree block */
632 xfs_fsblock_t cbno; /* child block number */
633 xfs_buf_t *cbp; /* child block's buffer */
634 int error; /* error return value */
635 xfs_ifork_t *ifp; /* inode fork data */
636 xfs_mount_t *mp; /* mount point structure */
637 __be64 *pp; /* ptr to block address */
638 struct xfs_btree_block *rblock;/* root btree block */
639 struct xfs_owner_info oinfo;
642 ifp = XFS_IFORK_PTR(ip, whichfork);
643 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
644 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
645 rblock = ifp->if_broot;
646 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
647 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
648 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
649 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
650 cbno = be64_to_cpu(*pp);
653 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
656 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
660 cblock = XFS_BUF_TO_BLOCK(cbp);
661 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
663 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
664 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
665 ip->i_d.di_nblocks--;
666 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
667 xfs_trans_binval(tp, cbp);
668 if (cur->bc_bufs[0] == cbp)
669 cur->bc_bufs[0] = NULL;
670 xfs_iroot_realloc(ip, -1, whichfork);
671 ASSERT(ifp->if_broot == NULL);
672 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
673 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
674 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
679 * Convert an extents-format file into a btree-format file.
680 * The new file will have a root block (in the inode) and a single child block.
682 STATIC int /* error */
683 xfs_bmap_extents_to_btree(
684 xfs_trans_t *tp, /* transaction pointer */
685 xfs_inode_t *ip, /* incore inode pointer */
686 xfs_fsblock_t *firstblock, /* first-block-allocated */
687 struct xfs_defer_ops *dfops, /* blocks freed in xaction */
688 xfs_btree_cur_t **curp, /* cursor returned to caller */
689 int wasdel, /* converting a delayed alloc */
690 int *logflagsp, /* inode logging flags */
691 int whichfork) /* data or attr fork */
693 struct xfs_btree_block *ablock; /* allocated (child) bt block */
694 xfs_buf_t *abp; /* buffer for ablock */
695 xfs_alloc_arg_t args; /* allocation arguments */
696 xfs_bmbt_rec_t *arp; /* child record pointer */
697 struct xfs_btree_block *block; /* btree root block */
698 xfs_btree_cur_t *cur; /* bmap btree cursor */
699 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
700 int error; /* error return value */
701 xfs_extnum_t i, cnt; /* extent record index */
702 xfs_ifork_t *ifp; /* inode fork pointer */
703 xfs_bmbt_key_t *kp; /* root block key pointer */
704 xfs_mount_t *mp; /* mount structure */
705 xfs_extnum_t nextents; /* number of file extents */
706 xfs_bmbt_ptr_t *pp; /* root block address pointer */
709 ifp = XFS_IFORK_PTR(ip, whichfork);
710 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
713 * Make space in the inode incore.
715 xfs_iroot_realloc(ip, 1, whichfork);
716 ifp->if_flags |= XFS_IFBROOT;
721 block = ifp->if_broot;
722 if (xfs_sb_version_hascrc(&mp->m_sb))
723 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
724 XFS_BMAP_CRC_MAGIC, 1, 1, ip->i_ino,
725 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
727 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
728 XFS_BMAP_MAGIC, 1, 1, ip->i_ino,
729 XFS_BTREE_LONG_PTRS);
732 * Need a cursor. Can't allocate until bb_level is filled in.
734 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
735 cur->bc_private.b.firstblock = *firstblock;
736 cur->bc_private.b.dfops = dfops;
737 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
739 * Convert to a btree with two levels, one record in root.
741 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
742 memset(&args, 0, sizeof(args));
745 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
746 args.firstblock = *firstblock;
747 if (*firstblock == NULLFSBLOCK) {
748 args.type = XFS_ALLOCTYPE_START_BNO;
749 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
750 } else if (dfops->dop_low) {
751 args.type = XFS_ALLOCTYPE_START_BNO;
752 args.fsbno = *firstblock;
754 args.type = XFS_ALLOCTYPE_NEAR_BNO;
755 args.fsbno = *firstblock;
757 args.minlen = args.maxlen = args.prod = 1;
758 args.wasdel = wasdel;
760 if ((error = xfs_alloc_vextent(&args))) {
761 xfs_iroot_realloc(ip, -1, whichfork);
762 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
766 * Allocation can't fail, the space was reserved.
768 ASSERT(args.fsbno != NULLFSBLOCK);
769 ASSERT(*firstblock == NULLFSBLOCK ||
770 args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
772 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
773 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
774 cur->bc_private.b.allocated++;
775 ip->i_d.di_nblocks++;
776 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
777 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
779 * Fill in the child block.
781 abp->b_ops = &xfs_bmbt_buf_ops;
782 ablock = XFS_BUF_TO_BLOCK(abp);
783 if (xfs_sb_version_hascrc(&mp->m_sb))
784 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
785 XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino,
786 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
788 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
789 XFS_BMAP_MAGIC, 0, 0, ip->i_ino,
790 XFS_BTREE_LONG_PTRS);
792 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
793 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
794 for (cnt = i = 0; i < nextents; i++) {
795 ep = xfs_iext_get_ext(ifp, i);
796 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
797 arp->l0 = cpu_to_be64(ep->l0);
798 arp->l1 = cpu_to_be64(ep->l1);
802 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
803 xfs_btree_set_numrecs(ablock, cnt);
806 * Fill in the root key and pointer.
808 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
809 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
810 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
811 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
812 be16_to_cpu(block->bb_level)));
813 *pp = cpu_to_be64(args.fsbno);
816 * Do all this logging at the end so that
817 * the root is at the right level.
819 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
820 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
821 ASSERT(*curp == NULL);
823 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
828 * Convert a local file to an extents file.
829 * This code is out of bounds for data forks of regular files,
830 * since the file data needs to get logged so things will stay consistent.
831 * (The bmap-level manipulations are ok, though).
834 xfs_bmap_local_to_extents_empty(
835 struct xfs_inode *ip,
838 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
840 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
841 ASSERT(ifp->if_bytes == 0);
842 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
844 xfs_bmap_forkoff_reset(ip, whichfork);
845 ifp->if_flags &= ~XFS_IFINLINE;
846 ifp->if_flags |= XFS_IFEXTENTS;
847 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
851 STATIC int /* error */
852 xfs_bmap_local_to_extents(
853 xfs_trans_t *tp, /* transaction pointer */
854 xfs_inode_t *ip, /* incore inode pointer */
855 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
856 xfs_extlen_t total, /* total blocks needed by transaction */
857 int *logflagsp, /* inode logging flags */
859 void (*init_fn)(struct xfs_trans *tp,
861 struct xfs_inode *ip,
862 struct xfs_ifork *ifp))
865 int flags; /* logging flags returned */
866 xfs_ifork_t *ifp; /* inode fork pointer */
867 xfs_alloc_arg_t args; /* allocation arguments */
868 xfs_buf_t *bp; /* buffer for extent block */
869 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
872 * We don't want to deal with the case of keeping inode data inline yet.
873 * So sending the data fork of a regular inode is invalid.
875 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
876 ifp = XFS_IFORK_PTR(ip, whichfork);
877 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
879 if (!ifp->if_bytes) {
880 xfs_bmap_local_to_extents_empty(ip, whichfork);
881 flags = XFS_ILOG_CORE;
887 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
889 memset(&args, 0, sizeof(args));
891 args.mp = ip->i_mount;
892 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
893 args.firstblock = *firstblock;
895 * Allocate a block. We know we need only one, since the
896 * file currently fits in an inode.
898 if (*firstblock == NULLFSBLOCK) {
899 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
900 args.type = XFS_ALLOCTYPE_START_BNO;
902 args.fsbno = *firstblock;
903 args.type = XFS_ALLOCTYPE_NEAR_BNO;
906 args.minlen = args.maxlen = args.prod = 1;
907 error = xfs_alloc_vextent(&args);
911 /* Can't fail, the space was reserved. */
912 ASSERT(args.fsbno != NULLFSBLOCK);
913 ASSERT(args.len == 1);
914 *firstblock = args.fsbno;
915 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
918 * Initialize the block, copy the data and log the remote buffer.
920 * The callout is responsible for logging because the remote format
921 * might differ from the local format and thus we don't know how much to
922 * log here. Note that init_fn must also set the buffer log item type
925 init_fn(tp, bp, ip, ifp);
927 /* account for the change in fork size */
928 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
929 xfs_bmap_local_to_extents_empty(ip, whichfork);
930 flags |= XFS_ILOG_CORE;
932 xfs_iext_add(ifp, 0, 1);
933 ep = xfs_iext_get_ext(ifp, 0);
934 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
935 trace_xfs_bmap_post_update(ip, 0,
936 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
938 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
939 ip->i_d.di_nblocks = 1;
940 xfs_trans_mod_dquot_byino(tp, ip,
941 XFS_TRANS_DQ_BCOUNT, 1L);
942 flags |= xfs_ilog_fext(whichfork);
950 * Called from xfs_bmap_add_attrfork to handle btree format files.
952 STATIC int /* error */
953 xfs_bmap_add_attrfork_btree(
954 xfs_trans_t *tp, /* transaction pointer */
955 xfs_inode_t *ip, /* incore inode pointer */
956 xfs_fsblock_t *firstblock, /* first block allocated */
957 struct xfs_defer_ops *dfops, /* blocks to free at commit */
958 int *flags) /* inode logging flags */
960 xfs_btree_cur_t *cur; /* btree cursor */
961 int error; /* error return value */
962 xfs_mount_t *mp; /* file system mount struct */
963 int stat; /* newroot status */
966 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
967 *flags |= XFS_ILOG_DBROOT;
969 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
970 cur->bc_private.b.dfops = dfops;
971 cur->bc_private.b.firstblock = *firstblock;
972 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
974 /* must be at least one entry */
975 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
976 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
979 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
982 *firstblock = cur->bc_private.b.firstblock;
983 cur->bc_private.b.allocated = 0;
984 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
988 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
993 * Called from xfs_bmap_add_attrfork to handle extents format files.
995 STATIC int /* error */
996 xfs_bmap_add_attrfork_extents(
997 xfs_trans_t *tp, /* transaction pointer */
998 xfs_inode_t *ip, /* incore inode pointer */
999 xfs_fsblock_t *firstblock, /* first block allocated */
1000 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1001 int *flags) /* inode logging flags */
1003 xfs_btree_cur_t *cur; /* bmap btree cursor */
1004 int error; /* error return value */
1006 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
1009 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
1010 flags, XFS_DATA_FORK);
1012 cur->bc_private.b.allocated = 0;
1013 xfs_btree_del_cursor(cur,
1014 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
1020 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1021 * different data fork content type needs a different callout to do the
1022 * conversion. Some are basic and only require special block initialisation
1023 * callouts for the data formating, others (directories) are so specialised they
1024 * handle everything themselves.
1026 * XXX (dgc): investigate whether directory conversion can use the generic
1027 * formatting callout. It should be possible - it's just a very complex
1030 STATIC int /* error */
1031 xfs_bmap_add_attrfork_local(
1032 xfs_trans_t *tp, /* transaction pointer */
1033 xfs_inode_t *ip, /* incore inode pointer */
1034 xfs_fsblock_t *firstblock, /* first block allocated */
1035 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1036 int *flags) /* inode logging flags */
1038 xfs_da_args_t dargs; /* args for dir/attr code */
1040 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1043 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1044 memset(&dargs, 0, sizeof(dargs));
1045 dargs.geo = ip->i_mount->m_dir_geo;
1047 dargs.firstblock = firstblock;
1048 dargs.dfops = dfops;
1049 dargs.total = dargs.geo->fsbcount;
1050 dargs.whichfork = XFS_DATA_FORK;
1052 return xfs_dir2_sf_to_block(&dargs);
1055 if (S_ISLNK(VFS_I(ip)->i_mode))
1056 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1057 flags, XFS_DATA_FORK,
1058 xfs_symlink_local_to_remote);
1060 /* should only be called for types that support local format data */
1062 return -EFSCORRUPTED;
1066 * Convert inode from non-attributed to attributed.
1067 * Must not be in a transaction, ip must not be locked.
1069 int /* error code */
1070 xfs_bmap_add_attrfork(
1071 xfs_inode_t *ip, /* incore inode pointer */
1072 int size, /* space new attribute needs */
1073 int rsvd) /* xact may use reserved blks */
1075 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1076 struct xfs_defer_ops dfops; /* freed extent records */
1077 xfs_mount_t *mp; /* mount structure */
1078 xfs_trans_t *tp; /* transaction pointer */
1079 int blks; /* space reservation */
1080 int version = 1; /* superblock attr version */
1081 int logflags; /* logging flags */
1082 int error; /* error return value */
1084 ASSERT(XFS_IFORK_Q(ip) == 0);
1087 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1089 blks = XFS_ADDAFORK_SPACE_RES(mp);
1091 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1092 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1096 xfs_ilock(ip, XFS_ILOCK_EXCL);
1097 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1098 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1099 XFS_QMOPT_RES_REGBLKS);
1102 if (XFS_IFORK_Q(ip))
1104 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1106 * For inodes coming from pre-6.2 filesystems.
1108 ASSERT(ip->i_d.di_aformat == 0);
1109 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1111 ASSERT(ip->i_d.di_anextents == 0);
1113 xfs_trans_ijoin(tp, ip, 0);
1114 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1116 switch (ip->i_d.di_format) {
1117 case XFS_DINODE_FMT_DEV:
1118 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1120 case XFS_DINODE_FMT_UUID:
1121 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
1123 case XFS_DINODE_FMT_LOCAL:
1124 case XFS_DINODE_FMT_EXTENTS:
1125 case XFS_DINODE_FMT_BTREE:
1126 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1127 if (!ip->i_d.di_forkoff)
1128 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1129 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1138 ASSERT(ip->i_afp == NULL);
1139 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1140 ip->i_afp->if_flags = XFS_IFEXTENTS;
1142 xfs_defer_init(&dfops, &firstblock);
1143 switch (ip->i_d.di_format) {
1144 case XFS_DINODE_FMT_LOCAL:
1145 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
1148 case XFS_DINODE_FMT_EXTENTS:
1149 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1152 case XFS_DINODE_FMT_BTREE:
1153 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
1161 xfs_trans_log_inode(tp, ip, logflags);
1164 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1165 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1166 bool log_sb = false;
1168 spin_lock(&mp->m_sb_lock);
1169 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1170 xfs_sb_version_addattr(&mp->m_sb);
1173 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1174 xfs_sb_version_addattr2(&mp->m_sb);
1177 spin_unlock(&mp->m_sb_lock);
1182 error = xfs_defer_finish(&tp, &dfops, NULL);
1185 error = xfs_trans_commit(tp);
1186 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1190 xfs_defer_cancel(&dfops);
1192 xfs_trans_cancel(tp);
1193 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1198 * Internal and external extent tree search functions.
1202 * Read in the extents to if_extents.
1203 * All inode fields are set up by caller, we just traverse the btree
1204 * and copy the records in. If the file system cannot contain unwritten
1205 * extents, the records are checked for no "state" flags.
1208 xfs_bmap_read_extents(
1209 xfs_trans_t *tp, /* transaction pointer */
1210 xfs_inode_t *ip, /* incore inode */
1211 int whichfork) /* data or attr fork */
1213 struct xfs_btree_block *block; /* current btree block */
1214 xfs_fsblock_t bno; /* block # of "block" */
1215 xfs_buf_t *bp; /* buffer for "block" */
1216 int error; /* error return value */
1217 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
1218 xfs_extnum_t i, j; /* index into the extents list */
1219 xfs_ifork_t *ifp; /* fork structure */
1220 int level; /* btree level, for checking */
1221 xfs_mount_t *mp; /* file system mount structure */
1222 __be64 *pp; /* pointer to block address */
1224 xfs_extnum_t room; /* number of entries there's room for */
1228 ifp = XFS_IFORK_PTR(ip, whichfork);
1229 exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
1230 XFS_EXTFMT_INODE(ip);
1231 block = ifp->if_broot;
1233 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1235 level = be16_to_cpu(block->bb_level);
1237 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1238 bno = be64_to_cpu(*pp);
1239 ASSERT(bno != NULLFSBLOCK);
1240 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
1241 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
1243 * Go down the tree until leaf level is reached, following the first
1244 * pointer (leftmost) at each level.
1246 while (level-- > 0) {
1247 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1248 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1251 block = XFS_BUF_TO_BLOCK(bp);
1254 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1255 bno = be64_to_cpu(*pp);
1256 XFS_WANT_CORRUPTED_GOTO(mp,
1257 XFS_FSB_SANITY_CHECK(mp, bno), error0);
1258 xfs_trans_brelse(tp, bp);
1261 * Here with bp and block set to the leftmost leaf node in the tree.
1263 room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1266 * Loop over all leaf nodes. Copy information to the extent records.
1269 xfs_bmbt_rec_t *frp;
1270 xfs_fsblock_t nextbno;
1271 xfs_extnum_t num_recs;
1274 num_recs = xfs_btree_get_numrecs(block);
1275 if (unlikely(i + num_recs > room)) {
1276 ASSERT(i + num_recs <= room);
1277 xfs_warn(ip->i_mount,
1278 "corrupt dinode %Lu, (btree extents).",
1279 (unsigned long long) ip->i_ino);
1280 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1281 XFS_ERRLEVEL_LOW, ip->i_mount, block);
1285 * Read-ahead the next leaf block, if any.
1287 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1288 if (nextbno != NULLFSBLOCK)
1289 xfs_btree_reada_bufl(mp, nextbno, 1,
1292 * Copy records into the extent records.
1294 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1296 for (j = 0; j < num_recs; j++, i++, frp++) {
1297 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
1298 trp->l0 = be64_to_cpu(frp->l0);
1299 trp->l1 = be64_to_cpu(frp->l1);
1301 if (exntf == XFS_EXTFMT_NOSTATE) {
1303 * Check all attribute bmap btree records and
1304 * any "older" data bmap btree records for a
1305 * set bit in the "extent flag" position.
1307 if (unlikely(xfs_check_nostate_extents(ifp,
1308 start, num_recs))) {
1309 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1315 xfs_trans_brelse(tp, bp);
1318 * If we've reached the end, stop.
1320 if (bno == NULLFSBLOCK)
1322 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1323 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1326 block = XFS_BUF_TO_BLOCK(bp);
1328 ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
1329 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
1330 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
1333 xfs_trans_brelse(tp, bp);
1334 return -EFSCORRUPTED;
1339 * Search the extent records for the entry containing block bno.
1340 * If bno lies in a hole, point to the next entry. If bno lies
1341 * past eof, *eofp will be set, and *prevp will contain the last
1342 * entry (null if none). Else, *lastxp will be set to the index
1343 * of the found entry; *gotp will contain the entry.
1345 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
1346 xfs_bmap_search_multi_extents(
1347 xfs_ifork_t *ifp, /* inode fork pointer */
1348 xfs_fileoff_t bno, /* block number searched for */
1349 int *eofp, /* out: end of file found */
1350 xfs_extnum_t *lastxp, /* out: last extent index */
1351 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1352 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1354 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1355 xfs_extnum_t lastx; /* last extent index */
1358 * Initialize the extent entry structure to catch access to
1359 * uninitialized br_startblock field.
1361 gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
1362 gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
1363 gotp->br_state = XFS_EXT_INVALID;
1364 gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
1365 prevp->br_startoff = NULLFILEOFF;
1367 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
1369 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
1371 if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
1372 xfs_bmbt_get_all(ep, gotp);
1386 * Search the extents list for the inode, for the extent containing bno.
1387 * If bno lies in a hole, point to the next entry. If bno lies past eof,
1388 * *eofp will be set, and *prevp will contain the last entry (null if none).
1389 * Else, *lastxp will be set to the index of the found
1390 * entry; *gotp will contain the entry.
1392 xfs_bmbt_rec_host_t * /* pointer to found extent entry */
1393 xfs_bmap_search_extents(
1394 xfs_inode_t *ip, /* incore inode pointer */
1395 xfs_fileoff_t bno, /* block number searched for */
1396 int fork, /* data or attr fork */
1397 int *eofp, /* out: end of file found */
1398 xfs_extnum_t *lastxp, /* out: last extent index */
1399 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1400 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1402 xfs_ifork_t *ifp; /* inode fork pointer */
1403 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1405 XFS_STATS_INC(ip->i_mount, xs_look_exlist);
1406 ifp = XFS_IFORK_PTR(ip, fork);
1408 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
1410 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
1411 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
1412 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
1413 "Access to block zero in inode %llu "
1414 "start_block: %llx start_off: %llx "
1415 "blkcnt: %llx extent-state: %x lastx: %x",
1416 (unsigned long long)ip->i_ino,
1417 (unsigned long long)gotp->br_startblock,
1418 (unsigned long long)gotp->br_startoff,
1419 (unsigned long long)gotp->br_blockcount,
1420 gotp->br_state, *lastxp);
1421 *lastxp = NULLEXTNUM;
1429 * Returns the file-relative block number of the first unused block(s)
1430 * in the file with at least "len" logically contiguous blocks free.
1431 * This is the lowest-address hole if the file has holes, else the first block
1432 * past the end of file.
1433 * Return 0 if the file is currently local (in-inode).
1436 xfs_bmap_first_unused(
1437 xfs_trans_t *tp, /* transaction pointer */
1438 xfs_inode_t *ip, /* incore inode */
1439 xfs_extlen_t len, /* size of hole to find */
1440 xfs_fileoff_t *first_unused, /* unused block */
1441 int whichfork) /* data or attr fork */
1443 int error; /* error return value */
1444 int idx; /* extent record index */
1445 xfs_ifork_t *ifp; /* inode fork pointer */
1446 xfs_fileoff_t lastaddr; /* last block number seen */
1447 xfs_fileoff_t lowest; /* lowest useful block */
1448 xfs_fileoff_t max; /* starting useful block */
1449 xfs_fileoff_t off; /* offset for this block */
1450 xfs_extnum_t nextents; /* number of extent entries */
1452 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1453 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1454 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1455 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1459 ifp = XFS_IFORK_PTR(ip, whichfork);
1460 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1461 (error = xfs_iread_extents(tp, ip, whichfork)))
1463 lowest = *first_unused;
1464 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1465 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
1466 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
1467 off = xfs_bmbt_get_startoff(ep);
1469 * See if the hole before this extent will work.
1471 if (off >= lowest + len && off - max >= len) {
1472 *first_unused = max;
1475 lastaddr = off + xfs_bmbt_get_blockcount(ep);
1476 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1478 *first_unused = max;
1483 * Returns the file-relative block number of the last block - 1 before
1484 * last_block (input value) in the file.
1485 * This is not based on i_size, it is based on the extent records.
1486 * Returns 0 for local files, as they do not have extent records.
1489 xfs_bmap_last_before(
1490 xfs_trans_t *tp, /* transaction pointer */
1491 xfs_inode_t *ip, /* incore inode */
1492 xfs_fileoff_t *last_block, /* last block */
1493 int whichfork) /* data or attr fork */
1495 xfs_fileoff_t bno; /* input file offset */
1496 int eof; /* hit end of file */
1497 xfs_bmbt_rec_host_t *ep; /* pointer to last extent */
1498 int error; /* error return value */
1499 xfs_bmbt_irec_t got; /* current extent value */
1500 xfs_ifork_t *ifp; /* inode fork pointer */
1501 xfs_extnum_t lastx; /* last extent used */
1502 xfs_bmbt_irec_t prev; /* previous extent value */
1504 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1505 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
1506 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
1508 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1512 ifp = XFS_IFORK_PTR(ip, whichfork);
1513 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1514 (error = xfs_iread_extents(tp, ip, whichfork)))
1516 bno = *last_block - 1;
1517 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
1519 if (eof || xfs_bmbt_get_startoff(ep) > bno) {
1520 if (prev.br_startoff == NULLFILEOFF)
1523 *last_block = prev.br_startoff + prev.br_blockcount;
1526 * Otherwise *last_block is already the right answer.
1532 xfs_bmap_last_extent(
1533 struct xfs_trans *tp,
1534 struct xfs_inode *ip,
1536 struct xfs_bmbt_irec *rec,
1539 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1543 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1544 error = xfs_iread_extents(tp, ip, whichfork);
1549 nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
1550 if (nextents == 0) {
1555 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
1561 * Check the last inode extent to determine whether this allocation will result
1562 * in blocks being allocated at the end of the file. When we allocate new data
1563 * blocks at the end of the file which do not start at the previous data block,
1564 * we will try to align the new blocks at stripe unit boundaries.
1566 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1567 * at, or past the EOF.
1571 struct xfs_bmalloca *bma,
1574 struct xfs_bmbt_irec rec;
1579 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1590 * Check if we are allocation or past the last extent, or at least into
1591 * the last delayed allocated extent.
1593 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1594 (bma->offset >= rec.br_startoff &&
1595 isnullstartblock(rec.br_startblock));
1600 * Returns the file-relative block number of the first block past eof in
1601 * the file. This is not based on i_size, it is based on the extent records.
1602 * Returns 0 for local files, as they do not have extent records.
1605 xfs_bmap_last_offset(
1606 struct xfs_inode *ip,
1607 xfs_fileoff_t *last_block,
1610 struct xfs_bmbt_irec rec;
1616 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1619 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1620 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1623 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1624 if (error || is_empty)
1627 *last_block = rec.br_startoff + rec.br_blockcount;
1632 * Returns whether the selected fork of the inode has exactly one
1633 * block or not. For the data fork we check this matches di_size,
1634 * implying the file's range is 0..bsize-1.
1636 int /* 1=>1 block, 0=>otherwise */
1638 xfs_inode_t *ip, /* incore inode */
1639 int whichfork) /* data or attr fork */
1641 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
1642 xfs_ifork_t *ifp; /* inode fork pointer */
1643 int rval; /* return value */
1644 xfs_bmbt_irec_t s; /* internal version of extent */
1647 if (whichfork == XFS_DATA_FORK)
1648 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1650 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1652 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1654 ifp = XFS_IFORK_PTR(ip, whichfork);
1655 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1656 ep = xfs_iext_get_ext(ifp, 0);
1657 xfs_bmbt_get_all(ep, &s);
1658 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1659 if (rval && whichfork == XFS_DATA_FORK)
1660 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1665 * Extent tree manipulation functions used during allocation.
1669 * Convert a delayed allocation to a real allocation.
1671 STATIC int /* error */
1672 xfs_bmap_add_extent_delay_real(
1673 struct xfs_bmalloca *bma)
1675 struct xfs_bmbt_irec *new = &bma->got;
1676 int diff; /* temp value */
1677 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
1678 int error; /* error return value */
1679 int i; /* temp state */
1680 xfs_ifork_t *ifp; /* inode fork pointer */
1681 xfs_fileoff_t new_endoff; /* end offset of new entry */
1682 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1683 /* left is 0, right is 1, prev is 2 */
1684 int rval=0; /* return value (logging flags) */
1685 int state = 0;/* state bits, accessed thru macros */
1686 xfs_filblks_t da_new; /* new count del alloc blocks used */
1687 xfs_filblks_t da_old; /* old count del alloc blocks used */
1688 xfs_filblks_t temp=0; /* value for da_new calculations */
1689 xfs_filblks_t temp2=0;/* value for da_new calculations */
1690 int tmp_rval; /* partial logging flags */
1691 int whichfork = XFS_DATA_FORK;
1692 struct xfs_mount *mp;
1694 mp = bma->ip->i_mount;
1695 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1697 ASSERT(bma->idx >= 0);
1698 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
1699 ASSERT(!isnullstartblock(new->br_startblock));
1701 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1703 XFS_STATS_INC(mp, xs_add_exlist);
1710 * Set up a bunch of variables to make the tests simpler.
1712 ep = xfs_iext_get_ext(ifp, bma->idx);
1713 xfs_bmbt_get_all(ep, &PREV);
1714 new_endoff = new->br_startoff + new->br_blockcount;
1715 ASSERT(PREV.br_startoff <= new->br_startoff);
1716 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1718 da_old = startblockval(PREV.br_startblock);
1722 * Set flags determining what part of the previous delayed allocation
1723 * extent is being replaced by a real allocation.
1725 if (PREV.br_startoff == new->br_startoff)
1726 state |= BMAP_LEFT_FILLING;
1727 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1728 state |= BMAP_RIGHT_FILLING;
1731 * Check and set flags if this segment has a left neighbor.
1732 * Don't set contiguous if the combined extent would be too large.
1735 state |= BMAP_LEFT_VALID;
1736 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
1738 if (isnullstartblock(LEFT.br_startblock))
1739 state |= BMAP_LEFT_DELAY;
1742 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1743 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1744 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1745 LEFT.br_state == new->br_state &&
1746 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1747 state |= BMAP_LEFT_CONTIG;
1750 * Check and set flags if this segment has a right neighbor.
1751 * Don't set contiguous if the combined extent would be too large.
1752 * Also check for all-three-contiguous being too large.
1754 if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1755 state |= BMAP_RIGHT_VALID;
1756 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
1758 if (isnullstartblock(RIGHT.br_startblock))
1759 state |= BMAP_RIGHT_DELAY;
1762 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1763 new_endoff == RIGHT.br_startoff &&
1764 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1765 new->br_state == RIGHT.br_state &&
1766 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1767 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1768 BMAP_RIGHT_FILLING)) !=
1769 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1770 BMAP_RIGHT_FILLING) ||
1771 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1773 state |= BMAP_RIGHT_CONTIG;
1777 * Switch out based on the FILLING and CONTIG state bits.
1779 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1780 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1781 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1782 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1784 * Filling in all of a previously delayed allocation extent.
1785 * The left and right neighbors are both contiguous with new.
1788 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1789 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1790 LEFT.br_blockcount + PREV.br_blockcount +
1791 RIGHT.br_blockcount);
1792 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1794 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
1795 bma->ip->i_d.di_nextents--;
1796 if (bma->cur == NULL)
1797 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1799 rval = XFS_ILOG_CORE;
1800 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1801 RIGHT.br_startblock,
1802 RIGHT.br_blockcount, &i);
1805 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1806 error = xfs_btree_delete(bma->cur, &i);
1809 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1810 error = xfs_btree_decrement(bma->cur, 0, &i);
1813 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1814 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1816 LEFT.br_blockcount +
1817 PREV.br_blockcount +
1818 RIGHT.br_blockcount, LEFT.br_state);
1824 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1826 * Filling in all of a previously delayed allocation extent.
1827 * The left neighbor is contiguous, the right is not.
1831 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1832 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1833 LEFT.br_blockcount + PREV.br_blockcount);
1834 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1836 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1837 if (bma->cur == NULL)
1838 rval = XFS_ILOG_DEXT;
1841 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1842 LEFT.br_startblock, LEFT.br_blockcount,
1846 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1847 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1849 LEFT.br_blockcount +
1850 PREV.br_blockcount, LEFT.br_state);
1856 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1858 * Filling in all of a previously delayed allocation extent.
1859 * The right neighbor is contiguous, the left is not.
1861 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1862 xfs_bmbt_set_startblock(ep, new->br_startblock);
1863 xfs_bmbt_set_blockcount(ep,
1864 PREV.br_blockcount + RIGHT.br_blockcount);
1865 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1867 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1868 if (bma->cur == NULL)
1869 rval = XFS_ILOG_DEXT;
1872 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1873 RIGHT.br_startblock,
1874 RIGHT.br_blockcount, &i);
1877 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1878 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
1880 PREV.br_blockcount +
1881 RIGHT.br_blockcount, PREV.br_state);
1887 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1889 * Filling in all of a previously delayed allocation extent.
1890 * Neither the left nor right neighbors are contiguous with
1893 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1894 xfs_bmbt_set_startblock(ep, new->br_startblock);
1895 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1897 bma->ip->i_d.di_nextents++;
1898 if (bma->cur == NULL)
1899 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1901 rval = XFS_ILOG_CORE;
1902 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1903 new->br_startblock, new->br_blockcount,
1907 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1908 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1909 error = xfs_btree_insert(bma->cur, &i);
1912 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1916 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1918 * Filling in the first part of a previous delayed allocation.
1919 * The left neighbor is contiguous.
1921 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1922 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
1923 LEFT.br_blockcount + new->br_blockcount);
1924 xfs_bmbt_set_startoff(ep,
1925 PREV.br_startoff + new->br_blockcount);
1926 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1928 temp = PREV.br_blockcount - new->br_blockcount;
1929 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1930 xfs_bmbt_set_blockcount(ep, temp);
1931 if (bma->cur == NULL)
1932 rval = XFS_ILOG_DEXT;
1935 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1936 LEFT.br_startblock, LEFT.br_blockcount,
1940 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1941 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1943 LEFT.br_blockcount +
1949 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1950 startblockval(PREV.br_startblock));
1951 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1952 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1957 case BMAP_LEFT_FILLING:
1959 * Filling in the first part of a previous delayed allocation.
1960 * The left neighbor is not contiguous.
1962 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1963 xfs_bmbt_set_startoff(ep, new_endoff);
1964 temp = PREV.br_blockcount - new->br_blockcount;
1965 xfs_bmbt_set_blockcount(ep, temp);
1966 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
1967 bma->ip->i_d.di_nextents++;
1968 if (bma->cur == NULL)
1969 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1971 rval = XFS_ILOG_CORE;
1972 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1973 new->br_startblock, new->br_blockcount,
1977 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1978 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1979 error = xfs_btree_insert(bma->cur, &i);
1982 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1985 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1986 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1987 bma->firstblock, bma->dfops,
1988 &bma->cur, 1, &tmp_rval, whichfork);
1993 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1994 startblockval(PREV.br_startblock) -
1995 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1996 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
1997 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1998 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2001 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2003 * Filling in the last part of a previous delayed allocation.
2004 * The right neighbor is contiguous with the new allocation.
2006 temp = PREV.br_blockcount - new->br_blockcount;
2007 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2008 xfs_bmbt_set_blockcount(ep, temp);
2009 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
2010 new->br_startoff, new->br_startblock,
2011 new->br_blockcount + RIGHT.br_blockcount,
2013 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2014 if (bma->cur == NULL)
2015 rval = XFS_ILOG_DEXT;
2018 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
2019 RIGHT.br_startblock,
2020 RIGHT.br_blockcount, &i);
2023 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2024 error = xfs_bmbt_update(bma->cur, new->br_startoff,
2026 new->br_blockcount +
2027 RIGHT.br_blockcount,
2033 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2034 startblockval(PREV.br_startblock));
2035 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2036 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2037 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2042 case BMAP_RIGHT_FILLING:
2044 * Filling in the last part of a previous delayed allocation.
2045 * The right neighbor is not contiguous.
2047 temp = PREV.br_blockcount - new->br_blockcount;
2048 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2049 xfs_bmbt_set_blockcount(ep, temp);
2050 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
2051 bma->ip->i_d.di_nextents++;
2052 if (bma->cur == NULL)
2053 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2055 rval = XFS_ILOG_CORE;
2056 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2057 new->br_startblock, new->br_blockcount,
2061 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2062 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2063 error = xfs_btree_insert(bma->cur, &i);
2066 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2069 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2070 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2071 bma->firstblock, bma->dfops, &bma->cur, 1,
2072 &tmp_rval, whichfork);
2077 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2078 startblockval(PREV.br_startblock) -
2079 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2080 ep = xfs_iext_get_ext(ifp, bma->idx);
2081 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2082 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2089 * Filling in the middle part of a previous delayed allocation.
2090 * Contiguity is impossible here.
2091 * This case is avoided almost all the time.
2093 * We start with a delayed allocation:
2095 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2098 * and we are allocating:
2099 * +rrrrrrrrrrrrrrrrr+
2102 * and we set it up for insertion as:
2103 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2105 * PREV @ idx LEFT RIGHT
2106 * inserted at idx + 1
2108 temp = new->br_startoff - PREV.br_startoff;
2109 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
2110 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
2111 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
2113 RIGHT.br_state = PREV.br_state;
2114 RIGHT.br_startblock = nullstartblock(
2115 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
2116 RIGHT.br_startoff = new_endoff;
2117 RIGHT.br_blockcount = temp2;
2118 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2119 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
2120 bma->ip->i_d.di_nextents++;
2121 if (bma->cur == NULL)
2122 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2124 rval = XFS_ILOG_CORE;
2125 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2126 new->br_startblock, new->br_blockcount,
2130 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2131 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2132 error = xfs_btree_insert(bma->cur, &i);
2135 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2138 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2139 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2140 bma->firstblock, bma->dfops, &bma->cur,
2141 1, &tmp_rval, whichfork);
2146 temp = xfs_bmap_worst_indlen(bma->ip, temp);
2147 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
2148 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
2149 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2151 error = xfs_mod_fdblocks(bma->ip->i_mount,
2152 -((int64_t)diff), false);
2158 ep = xfs_iext_get_ext(ifp, bma->idx);
2159 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2160 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2161 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2162 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
2163 nullstartblock((int)temp2));
2164 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2167 da_new = temp + temp2;
2170 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2171 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2172 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2173 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2174 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2175 case BMAP_LEFT_CONTIG:
2176 case BMAP_RIGHT_CONTIG:
2178 * These cases are all impossible.
2183 /* add reverse mapping */
2184 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
2188 /* convert to a btree if necessary */
2189 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2190 int tmp_logflags; /* partial log flag return val */
2192 ASSERT(bma->cur == NULL);
2193 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2194 bma->firstblock, bma->dfops, &bma->cur,
2195 da_old > 0, &tmp_logflags, whichfork);
2196 bma->logflags |= tmp_logflags;
2201 /* adjust for changes in reserved delayed indirect blocks */
2202 if (da_old || da_new) {
2205 temp += bma->cur->bc_private.b.allocated;
2206 ASSERT(temp <= da_old);
2208 xfs_mod_fdblocks(bma->ip->i_mount,
2209 (int64_t)(da_old - temp), false);
2212 /* clear out the allocated field, done with it now in any case. */
2214 bma->cur->bc_private.b.allocated = 0;
2216 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2218 bma->logflags |= rval;
2226 * Convert an unwritten allocation to a real allocation or vice versa.
2228 STATIC int /* error */
2229 xfs_bmap_add_extent_unwritten_real(
2230 struct xfs_trans *tp,
2231 xfs_inode_t *ip, /* incore inode pointer */
2232 xfs_extnum_t *idx, /* extent number to update/insert */
2233 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2234 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2235 xfs_fsblock_t *first, /* pointer to firstblock variable */
2236 struct xfs_defer_ops *dfops, /* list of extents to be freed */
2237 int *logflagsp) /* inode logging flags */
2239 xfs_btree_cur_t *cur; /* btree cursor */
2240 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
2241 int error; /* error return value */
2242 int i; /* temp state */
2243 xfs_ifork_t *ifp; /* inode fork pointer */
2244 xfs_fileoff_t new_endoff; /* end offset of new entry */
2245 xfs_exntst_t newext; /* new extent state */
2246 xfs_exntst_t oldext; /* old extent state */
2247 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2248 /* left is 0, right is 1, prev is 2 */
2249 int rval=0; /* return value (logging flags) */
2250 int state = 0;/* state bits, accessed thru macros */
2251 struct xfs_mount *mp = tp->t_mountp;
2256 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
2259 ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
2260 ASSERT(!isnullstartblock(new->br_startblock));
2262 XFS_STATS_INC(mp, xs_add_exlist);
2269 * Set up a bunch of variables to make the tests simpler.
2272 ep = xfs_iext_get_ext(ifp, *idx);
2273 xfs_bmbt_get_all(ep, &PREV);
2274 newext = new->br_state;
2275 oldext = (newext == XFS_EXT_UNWRITTEN) ?
2276 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
2277 ASSERT(PREV.br_state == oldext);
2278 new_endoff = new->br_startoff + new->br_blockcount;
2279 ASSERT(PREV.br_startoff <= new->br_startoff);
2280 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2283 * Set flags determining what part of the previous oldext allocation
2284 * extent is being replaced by a newext allocation.
2286 if (PREV.br_startoff == new->br_startoff)
2287 state |= BMAP_LEFT_FILLING;
2288 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2289 state |= BMAP_RIGHT_FILLING;
2292 * Check and set flags if this segment has a left neighbor.
2293 * Don't set contiguous if the combined extent would be too large.
2296 state |= BMAP_LEFT_VALID;
2297 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
2299 if (isnullstartblock(LEFT.br_startblock))
2300 state |= BMAP_LEFT_DELAY;
2303 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2304 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2305 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2306 LEFT.br_state == newext &&
2307 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2308 state |= BMAP_LEFT_CONTIG;
2311 * Check and set flags if this segment has a right neighbor.
2312 * Don't set contiguous if the combined extent would be too large.
2313 * Also check for all-three-contiguous being too large.
2315 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
2316 state |= BMAP_RIGHT_VALID;
2317 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
2318 if (isnullstartblock(RIGHT.br_startblock))
2319 state |= BMAP_RIGHT_DELAY;
2322 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2323 new_endoff == RIGHT.br_startoff &&
2324 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2325 newext == RIGHT.br_state &&
2326 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2327 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2328 BMAP_RIGHT_FILLING)) !=
2329 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2330 BMAP_RIGHT_FILLING) ||
2331 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2333 state |= BMAP_RIGHT_CONTIG;
2336 * Switch out based on the FILLING and CONTIG state bits.
2338 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2339 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2340 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2341 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2343 * Setting all of a previous oldext extent to newext.
2344 * The left and right neighbors are both contiguous with new.
2348 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2349 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2350 LEFT.br_blockcount + PREV.br_blockcount +
2351 RIGHT.br_blockcount);
2352 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2354 xfs_iext_remove(ip, *idx + 1, 2, state);
2355 ip->i_d.di_nextents -= 2;
2357 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2359 rval = XFS_ILOG_CORE;
2360 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2361 RIGHT.br_startblock,
2362 RIGHT.br_blockcount, &i)))
2364 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2365 if ((error = xfs_btree_delete(cur, &i)))
2367 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2368 if ((error = xfs_btree_decrement(cur, 0, &i)))
2370 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2371 if ((error = xfs_btree_delete(cur, &i)))
2373 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2374 if ((error = xfs_btree_decrement(cur, 0, &i)))
2376 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2377 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2379 LEFT.br_blockcount + PREV.br_blockcount +
2380 RIGHT.br_blockcount, LEFT.br_state)))
2385 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2387 * Setting all of a previous oldext extent to newext.
2388 * The left neighbor is contiguous, the right is not.
2392 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2393 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2394 LEFT.br_blockcount + PREV.br_blockcount);
2395 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2397 xfs_iext_remove(ip, *idx + 1, 1, state);
2398 ip->i_d.di_nextents--;
2400 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2402 rval = XFS_ILOG_CORE;
2403 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2404 PREV.br_startblock, PREV.br_blockcount,
2407 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2408 if ((error = xfs_btree_delete(cur, &i)))
2410 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2411 if ((error = xfs_btree_decrement(cur, 0, &i)))
2413 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2414 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2416 LEFT.br_blockcount + PREV.br_blockcount,
2422 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2424 * Setting all of a previous oldext extent to newext.
2425 * The right neighbor is contiguous, the left is not.
2427 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2428 xfs_bmbt_set_blockcount(ep,
2429 PREV.br_blockcount + RIGHT.br_blockcount);
2430 xfs_bmbt_set_state(ep, newext);
2431 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2432 xfs_iext_remove(ip, *idx + 1, 1, state);
2433 ip->i_d.di_nextents--;
2435 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2437 rval = XFS_ILOG_CORE;
2438 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2439 RIGHT.br_startblock,
2440 RIGHT.br_blockcount, &i)))
2442 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2443 if ((error = xfs_btree_delete(cur, &i)))
2445 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2446 if ((error = xfs_btree_decrement(cur, 0, &i)))
2448 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2449 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2451 new->br_blockcount + RIGHT.br_blockcount,
2457 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2459 * Setting all of a previous oldext extent to newext.
2460 * Neither the left nor right neighbors are contiguous with
2463 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2464 xfs_bmbt_set_state(ep, newext);
2465 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2468 rval = XFS_ILOG_DEXT;
2471 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2472 new->br_startblock, new->br_blockcount,
2475 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2476 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2477 new->br_startblock, new->br_blockcount,
2483 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2485 * Setting the first part of a previous oldext extent to newext.
2486 * The left neighbor is contiguous.
2488 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
2489 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
2490 LEFT.br_blockcount + new->br_blockcount);
2491 xfs_bmbt_set_startoff(ep,
2492 PREV.br_startoff + new->br_blockcount);
2493 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
2495 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2496 xfs_bmbt_set_startblock(ep,
2497 new->br_startblock + new->br_blockcount);
2498 xfs_bmbt_set_blockcount(ep,
2499 PREV.br_blockcount - new->br_blockcount);
2500 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2505 rval = XFS_ILOG_DEXT;
2508 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2509 PREV.br_startblock, PREV.br_blockcount,
2512 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2513 if ((error = xfs_bmbt_update(cur,
2514 PREV.br_startoff + new->br_blockcount,
2515 PREV.br_startblock + new->br_blockcount,
2516 PREV.br_blockcount - new->br_blockcount,
2519 if ((error = xfs_btree_decrement(cur, 0, &i)))
2521 error = xfs_bmbt_update(cur, LEFT.br_startoff,
2523 LEFT.br_blockcount + new->br_blockcount,
2530 case BMAP_LEFT_FILLING:
2532 * Setting the first part of a previous oldext extent to newext.
2533 * The left neighbor is not contiguous.
2535 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2536 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
2537 xfs_bmbt_set_startoff(ep, new_endoff);
2538 xfs_bmbt_set_blockcount(ep,
2539 PREV.br_blockcount - new->br_blockcount);
2540 xfs_bmbt_set_startblock(ep,
2541 new->br_startblock + new->br_blockcount);
2542 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2544 xfs_iext_insert(ip, *idx, 1, new, state);
2545 ip->i_d.di_nextents++;
2547 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2549 rval = XFS_ILOG_CORE;
2550 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2551 PREV.br_startblock, PREV.br_blockcount,
2554 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2555 if ((error = xfs_bmbt_update(cur,
2556 PREV.br_startoff + new->br_blockcount,
2557 PREV.br_startblock + new->br_blockcount,
2558 PREV.br_blockcount - new->br_blockcount,
2561 cur->bc_rec.b = *new;
2562 if ((error = xfs_btree_insert(cur, &i)))
2564 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2568 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2570 * Setting the last part of a previous oldext extent to newext.
2571 * The right neighbor is contiguous with the new allocation.
2573 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2574 xfs_bmbt_set_blockcount(ep,
2575 PREV.br_blockcount - new->br_blockcount);
2576 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2580 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2581 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2582 new->br_startoff, new->br_startblock,
2583 new->br_blockcount + RIGHT.br_blockcount, newext);
2584 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2587 rval = XFS_ILOG_DEXT;
2590 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2592 PREV.br_blockcount, &i)))
2594 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2595 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2597 PREV.br_blockcount - new->br_blockcount,
2600 if ((error = xfs_btree_increment(cur, 0, &i)))
2602 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2604 new->br_blockcount + RIGHT.br_blockcount,
2610 case BMAP_RIGHT_FILLING:
2612 * Setting the last part of a previous oldext extent to newext.
2613 * The right neighbor is not contiguous.
2615 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2616 xfs_bmbt_set_blockcount(ep,
2617 PREV.br_blockcount - new->br_blockcount);
2618 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2621 xfs_iext_insert(ip, *idx, 1, new, state);
2623 ip->i_d.di_nextents++;
2625 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2627 rval = XFS_ILOG_CORE;
2628 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2629 PREV.br_startblock, PREV.br_blockcount,
2632 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2633 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2635 PREV.br_blockcount - new->br_blockcount,
2638 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2639 new->br_startblock, new->br_blockcount,
2642 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2643 cur->bc_rec.b.br_state = XFS_EXT_NORM;
2644 if ((error = xfs_btree_insert(cur, &i)))
2646 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2652 * Setting the middle part of a previous oldext extent to
2653 * newext. Contiguity is impossible here.
2654 * One extent becomes three extents.
2656 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2657 xfs_bmbt_set_blockcount(ep,
2658 new->br_startoff - PREV.br_startoff);
2659 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2662 r[1].br_startoff = new_endoff;
2663 r[1].br_blockcount =
2664 PREV.br_startoff + PREV.br_blockcount - new_endoff;
2665 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2666 r[1].br_state = oldext;
2669 xfs_iext_insert(ip, *idx, 2, &r[0], state);
2671 ip->i_d.di_nextents += 2;
2673 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2675 rval = XFS_ILOG_CORE;
2676 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2677 PREV.br_startblock, PREV.br_blockcount,
2680 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2681 /* new right extent - oldext */
2682 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
2683 r[1].br_startblock, r[1].br_blockcount,
2686 /* new left extent - oldext */
2687 cur->bc_rec.b = PREV;
2688 cur->bc_rec.b.br_blockcount =
2689 new->br_startoff - PREV.br_startoff;
2690 if ((error = xfs_btree_insert(cur, &i)))
2692 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2694 * Reset the cursor to the position of the new extent
2695 * we are about to insert as we can't trust it after
2696 * the previous insert.
2698 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2699 new->br_startblock, new->br_blockcount,
2702 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2703 /* new middle extent - newext */
2704 cur->bc_rec.b.br_state = new->br_state;
2705 if ((error = xfs_btree_insert(cur, &i)))
2707 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2711 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2712 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2713 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2714 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2715 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2716 case BMAP_LEFT_CONTIG:
2717 case BMAP_RIGHT_CONTIG:
2719 * These cases are all impossible.
2724 /* update reverse mappings */
2725 error = xfs_rmap_convert_extent(mp, dfops, ip, XFS_DATA_FORK, new);
2729 /* convert to a btree if necessary */
2730 if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
2731 int tmp_logflags; /* partial log flag return val */
2733 ASSERT(cur == NULL);
2734 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
2735 0, &tmp_logflags, XFS_DATA_FORK);
2736 *logflagsp |= tmp_logflags;
2741 /* clear out the allocated field, done with it now in any case. */
2743 cur->bc_private.b.allocated = 0;
2747 xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
2757 * Convert a hole to a delayed allocation.
2760 xfs_bmap_add_extent_hole_delay(
2761 xfs_inode_t *ip, /* incore inode pointer */
2762 xfs_extnum_t *idx, /* extent number to update/insert */
2763 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2765 xfs_ifork_t *ifp; /* inode fork pointer */
2766 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2767 xfs_filblks_t newlen=0; /* new indirect size */
2768 xfs_filblks_t oldlen=0; /* old indirect size */
2769 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2770 int state; /* state bits, accessed thru macros */
2771 xfs_filblks_t temp=0; /* temp for indirect calculations */
2773 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
2775 ASSERT(isnullstartblock(new->br_startblock));
2778 * Check and set flags if this segment has a left neighbor
2781 state |= BMAP_LEFT_VALID;
2782 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2784 if (isnullstartblock(left.br_startblock))
2785 state |= BMAP_LEFT_DELAY;
2789 * Check and set flags if the current (right) segment exists.
2790 * If it doesn't exist, we're converting the hole at end-of-file.
2792 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
2793 state |= BMAP_RIGHT_VALID;
2794 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2796 if (isnullstartblock(right.br_startblock))
2797 state |= BMAP_RIGHT_DELAY;
2801 * Set contiguity flags on the left and right neighbors.
2802 * Don't let extents get too large, even if the pieces are contiguous.
2804 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2805 left.br_startoff + left.br_blockcount == new->br_startoff &&
2806 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2807 state |= BMAP_LEFT_CONTIG;
2809 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2810 new->br_startoff + new->br_blockcount == right.br_startoff &&
2811 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2812 (!(state & BMAP_LEFT_CONTIG) ||
2813 (left.br_blockcount + new->br_blockcount +
2814 right.br_blockcount <= MAXEXTLEN)))
2815 state |= BMAP_RIGHT_CONTIG;
2818 * Switch out based on the contiguity flags.
2820 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2821 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2823 * New allocation is contiguous with delayed allocations
2824 * on the left and on the right.
2825 * Merge all three into a single extent record.
2828 temp = left.br_blockcount + new->br_blockcount +
2829 right.br_blockcount;
2831 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2832 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2833 oldlen = startblockval(left.br_startblock) +
2834 startblockval(new->br_startblock) +
2835 startblockval(right.br_startblock);
2836 newlen = xfs_bmap_worst_indlen(ip, temp);
2837 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2838 nullstartblock((int)newlen));
2839 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2841 xfs_iext_remove(ip, *idx + 1, 1, state);
2844 case BMAP_LEFT_CONTIG:
2846 * New allocation is contiguous with a delayed allocation
2848 * Merge the new allocation with the left neighbor.
2851 temp = left.br_blockcount + new->br_blockcount;
2853 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2854 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2855 oldlen = startblockval(left.br_startblock) +
2856 startblockval(new->br_startblock);
2857 newlen = xfs_bmap_worst_indlen(ip, temp);
2858 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2859 nullstartblock((int)newlen));
2860 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2863 case BMAP_RIGHT_CONTIG:
2865 * New allocation is contiguous with a delayed allocation
2867 * Merge the new allocation with the right neighbor.
2869 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2870 temp = new->br_blockcount + right.br_blockcount;
2871 oldlen = startblockval(new->br_startblock) +
2872 startblockval(right.br_startblock);
2873 newlen = xfs_bmap_worst_indlen(ip, temp);
2874 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2876 nullstartblock((int)newlen), temp, right.br_state);
2877 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2882 * New allocation is not contiguous with another
2883 * delayed allocation.
2884 * Insert a new entry.
2886 oldlen = newlen = 0;
2887 xfs_iext_insert(ip, *idx, 1, new, state);
2890 if (oldlen != newlen) {
2891 ASSERT(oldlen > newlen);
2892 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2895 * Nothing to do for disk quota accounting here.
2901 * Convert a hole to a real allocation.
2903 STATIC int /* error */
2904 xfs_bmap_add_extent_hole_real(
2905 struct xfs_bmalloca *bma,
2908 struct xfs_bmbt_irec *new = &bma->got;
2909 int error; /* error return value */
2910 int i; /* temp state */
2911 xfs_ifork_t *ifp; /* inode fork pointer */
2912 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2913 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2914 int rval=0; /* return value (logging flags) */
2915 int state; /* state bits, accessed thru macros */
2916 struct xfs_mount *mp;
2918 mp = bma->ip->i_mount;
2919 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
2921 ASSERT(bma->idx >= 0);
2922 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
2923 ASSERT(!isnullstartblock(new->br_startblock));
2925 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2927 XFS_STATS_INC(mp, xs_add_exlist);
2930 if (whichfork == XFS_ATTR_FORK)
2931 state |= BMAP_ATTRFORK;
2934 * Check and set flags if this segment has a left neighbor.
2937 state |= BMAP_LEFT_VALID;
2938 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
2939 if (isnullstartblock(left.br_startblock))
2940 state |= BMAP_LEFT_DELAY;
2944 * Check and set flags if this segment has a current value.
2945 * Not true if we're inserting into the "hole" at eof.
2947 if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
2948 state |= BMAP_RIGHT_VALID;
2949 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
2950 if (isnullstartblock(right.br_startblock))
2951 state |= BMAP_RIGHT_DELAY;
2955 * We're inserting a real allocation between "left" and "right".
2956 * Set the contiguity flags. Don't let extents get too large.
2958 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2959 left.br_startoff + left.br_blockcount == new->br_startoff &&
2960 left.br_startblock + left.br_blockcount == new->br_startblock &&
2961 left.br_state == new->br_state &&
2962 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2963 state |= BMAP_LEFT_CONTIG;
2965 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2966 new->br_startoff + new->br_blockcount == right.br_startoff &&
2967 new->br_startblock + new->br_blockcount == right.br_startblock &&
2968 new->br_state == right.br_state &&
2969 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2970 (!(state & BMAP_LEFT_CONTIG) ||
2971 left.br_blockcount + new->br_blockcount +
2972 right.br_blockcount <= MAXEXTLEN))
2973 state |= BMAP_RIGHT_CONTIG;
2977 * Select which case we're in here, and implement it.
2979 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2980 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2982 * New allocation is contiguous with real allocations on the
2983 * left and on the right.
2984 * Merge all three into a single extent record.
2987 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2988 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
2989 left.br_blockcount + new->br_blockcount +
2990 right.br_blockcount);
2991 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2993 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
2995 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
2996 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
2997 if (bma->cur == NULL) {
2998 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3000 rval = XFS_ILOG_CORE;
3001 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
3002 right.br_startblock, right.br_blockcount,
3006 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3007 error = xfs_btree_delete(bma->cur, &i);
3010 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3011 error = xfs_btree_decrement(bma->cur, 0, &i);
3014 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3015 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3017 left.br_blockcount +
3018 new->br_blockcount +
3019 right.br_blockcount,
3026 case BMAP_LEFT_CONTIG:
3028 * New allocation is contiguous with a real allocation
3030 * Merge the new allocation with the left neighbor.
3033 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3034 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
3035 left.br_blockcount + new->br_blockcount);
3036 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3038 if (bma->cur == NULL) {
3039 rval = xfs_ilog_fext(whichfork);
3042 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
3043 left.br_startblock, left.br_blockcount,
3047 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3048 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3050 left.br_blockcount +
3058 case BMAP_RIGHT_CONTIG:
3060 * New allocation is contiguous with a real allocation
3062 * Merge the new allocation with the right neighbor.
3064 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3065 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
3066 new->br_startoff, new->br_startblock,
3067 new->br_blockcount + right.br_blockcount,
3069 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3071 if (bma->cur == NULL) {
3072 rval = xfs_ilog_fext(whichfork);
3075 error = xfs_bmbt_lookup_eq(bma->cur,
3077 right.br_startblock,
3078 right.br_blockcount, &i);
3081 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3082 error = xfs_bmbt_update(bma->cur, new->br_startoff,
3084 new->br_blockcount +
3085 right.br_blockcount,
3094 * New allocation is not contiguous with another
3096 * Insert a new entry.
3098 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
3099 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
3100 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
3101 if (bma->cur == NULL) {
3102 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3104 rval = XFS_ILOG_CORE;
3105 error = xfs_bmbt_lookup_eq(bma->cur,
3108 new->br_blockcount, &i);
3111 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
3112 bma->cur->bc_rec.b.br_state = new->br_state;
3113 error = xfs_btree_insert(bma->cur, &i);
3116 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3121 /* add reverse mapping */
3122 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
3126 /* convert to a btree if necessary */
3127 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
3128 int tmp_logflags; /* partial log flag return val */
3130 ASSERT(bma->cur == NULL);
3131 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
3132 bma->firstblock, bma->dfops, &bma->cur,
3133 0, &tmp_logflags, whichfork);
3134 bma->logflags |= tmp_logflags;
3139 /* clear out the allocated field, done with it now in any case. */
3141 bma->cur->bc_private.b.allocated = 0;
3143 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
3145 bma->logflags |= rval;
3150 * Functions used in the extent read, allocate and remove paths
3154 * Adjust the size of the new extent based on di_extsize and rt extsize.
3157 xfs_bmap_extsize_align(
3159 xfs_bmbt_irec_t *gotp, /* next extent pointer */
3160 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
3161 xfs_extlen_t extsz, /* align to this extent size */
3162 int rt, /* is this a realtime inode? */
3163 int eof, /* is extent at end-of-file? */
3164 int delay, /* creating delalloc extent? */
3165 int convert, /* overwriting unwritten extent? */
3166 xfs_fileoff_t *offp, /* in/out: aligned offset */
3167 xfs_extlen_t *lenp) /* in/out: aligned length */
3169 xfs_fileoff_t orig_off; /* original offset */
3170 xfs_extlen_t orig_alen; /* original length */
3171 xfs_fileoff_t orig_end; /* original off+len */
3172 xfs_fileoff_t nexto; /* next file offset */
3173 xfs_fileoff_t prevo; /* previous file offset */
3174 xfs_fileoff_t align_off; /* temp for offset */
3175 xfs_extlen_t align_alen; /* temp for length */
3176 xfs_extlen_t temp; /* temp for calculations */
3181 orig_off = align_off = *offp;
3182 orig_alen = align_alen = *lenp;
3183 orig_end = orig_off + orig_alen;
3186 * If this request overlaps an existing extent, then don't
3187 * attempt to perform any additional alignment.
3189 if (!delay && !eof &&
3190 (orig_off >= gotp->br_startoff) &&
3191 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3196 * If the file offset is unaligned vs. the extent size
3197 * we need to align it. This will be possible unless
3198 * the file was previously written with a kernel that didn't
3199 * perform this alignment, or if a truncate shot us in the
3202 temp = do_mod(orig_off, extsz);
3208 /* Same adjustment for the end of the requested area. */
3209 temp = (align_alen % extsz);
3211 align_alen += extsz - temp;
3214 * For large extent hint sizes, the aligned extent might be larger than
3215 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3216 * the length back under MAXEXTLEN. The outer allocation loops handle
3217 * short allocation just fine, so it is safe to do this. We only want to
3218 * do it when we are forced to, though, because it means more allocation
3219 * operations are required.
3221 while (align_alen > MAXEXTLEN)
3222 align_alen -= extsz;
3223 ASSERT(align_alen <= MAXEXTLEN);
3226 * If the previous block overlaps with this proposed allocation
3227 * then move the start forward without adjusting the length.
3229 if (prevp->br_startoff != NULLFILEOFF) {
3230 if (prevp->br_startblock == HOLESTARTBLOCK)
3231 prevo = prevp->br_startoff;
3233 prevo = prevp->br_startoff + prevp->br_blockcount;
3236 if (align_off != orig_off && align_off < prevo)
3239 * If the next block overlaps with this proposed allocation
3240 * then move the start back without adjusting the length,
3241 * but not before offset 0.
3242 * This may of course make the start overlap previous block,
3243 * and if we hit the offset 0 limit then the next block
3244 * can still overlap too.
3246 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3247 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3248 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3249 nexto = gotp->br_startoff + gotp->br_blockcount;
3251 nexto = gotp->br_startoff;
3253 nexto = NULLFILEOFF;
3255 align_off + align_alen != orig_end &&
3256 align_off + align_alen > nexto)
3257 align_off = nexto > align_alen ? nexto - align_alen : 0;
3259 * If we're now overlapping the next or previous extent that
3260 * means we can't fit an extsz piece in this hole. Just move
3261 * the start forward to the first valid spot and set
3262 * the length so we hit the end.
3264 if (align_off != orig_off && align_off < prevo)
3266 if (align_off + align_alen != orig_end &&
3267 align_off + align_alen > nexto &&
3268 nexto != NULLFILEOFF) {
3269 ASSERT(nexto > prevo);
3270 align_alen = nexto - align_off;
3274 * If realtime, and the result isn't a multiple of the realtime
3275 * extent size we need to remove blocks until it is.
3277 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3279 * We're not covering the original request, or
3280 * we won't be able to once we fix the length.
3282 if (orig_off < align_off ||
3283 orig_end > align_off + align_alen ||
3284 align_alen - temp < orig_alen)
3287 * Try to fix it by moving the start up.
3289 if (align_off + temp <= orig_off) {
3294 * Try to fix it by moving the end in.
3296 else if (align_off + align_alen - temp >= orig_end)
3299 * Set the start to the minimum then trim the length.
3302 align_alen -= orig_off - align_off;
3303 align_off = orig_off;
3304 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3307 * Result doesn't cover the request, fail it.
3309 if (orig_off < align_off || orig_end > align_off + align_alen)
3312 ASSERT(orig_off >= align_off);
3313 /* see MAXEXTLEN handling above */
3314 ASSERT(orig_end <= align_off + align_alen ||
3315 align_alen + extsz > MAXEXTLEN);
3319 if (!eof && gotp->br_startoff != NULLFILEOFF)
3320 ASSERT(align_off + align_alen <= gotp->br_startoff);
3321 if (prevp->br_startoff != NULLFILEOFF)
3322 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3330 #define XFS_ALLOC_GAP_UNITS 4
3334 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3336 xfs_fsblock_t adjust; /* adjustment to block numbers */
3337 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3338 xfs_mount_t *mp; /* mount point structure */
3339 int nullfb; /* true if ap->firstblock isn't set */
3340 int rt; /* true if inode is realtime */
3342 #define ISVALID(x,y) \
3344 (x) < mp->m_sb.sb_rblocks : \
3345 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3346 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3347 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3349 mp = ap->ip->i_mount;
3350 nullfb = *ap->firstblock == NULLFSBLOCK;
3351 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3352 xfs_alloc_is_userdata(ap->datatype);
3353 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3355 * If allocating at eof, and there's a previous real block,
3356 * try to use its last block as our starting point.
3358 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3359 !isnullstartblock(ap->prev.br_startblock) &&
3360 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3361 ap->prev.br_startblock)) {
3362 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3364 * Adjust for the gap between prevp and us.
3366 adjust = ap->offset -
3367 (ap->prev.br_startoff + ap->prev.br_blockcount);
3369 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3370 ap->blkno += adjust;
3373 * If not at eof, then compare the two neighbor blocks.
3374 * Figure out whether either one gives us a good starting point,
3375 * and pick the better one.
3377 else if (!ap->eof) {
3378 xfs_fsblock_t gotbno; /* right side block number */
3379 xfs_fsblock_t gotdiff=0; /* right side difference */
3380 xfs_fsblock_t prevbno; /* left side block number */
3381 xfs_fsblock_t prevdiff=0; /* left side difference */
3384 * If there's a previous (left) block, select a requested
3385 * start block based on it.
3387 if (ap->prev.br_startoff != NULLFILEOFF &&
3388 !isnullstartblock(ap->prev.br_startblock) &&
3389 (prevbno = ap->prev.br_startblock +
3390 ap->prev.br_blockcount) &&
3391 ISVALID(prevbno, ap->prev.br_startblock)) {
3393 * Calculate gap to end of previous block.
3395 adjust = prevdiff = ap->offset -
3396 (ap->prev.br_startoff +
3397 ap->prev.br_blockcount);
3399 * Figure the startblock based on the previous block's
3400 * end and the gap size.
3402 * If the gap is large relative to the piece we're
3403 * allocating, or using it gives us an invalid block
3404 * number, then just use the end of the previous block.
3406 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3407 ISVALID(prevbno + prevdiff,
3408 ap->prev.br_startblock))
3413 * If the firstblock forbids it, can't use it,
3416 if (!rt && !nullfb &&
3417 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3418 prevbno = NULLFSBLOCK;
3421 * No previous block or can't follow it, just default.
3424 prevbno = NULLFSBLOCK;
3426 * If there's a following (right) block, select a requested
3427 * start block based on it.
3429 if (!isnullstartblock(ap->got.br_startblock)) {
3431 * Calculate gap to start of next block.
3433 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3435 * Figure the startblock based on the next block's
3436 * start and the gap size.
3438 gotbno = ap->got.br_startblock;
3441 * If the gap is large relative to the piece we're
3442 * allocating, or using it gives us an invalid block
3443 * number, then just use the start of the next block
3444 * offset by our length.
3446 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3447 ISVALID(gotbno - gotdiff, gotbno))
3449 else if (ISVALID(gotbno - ap->length, gotbno)) {
3450 gotbno -= ap->length;
3451 gotdiff += adjust - ap->length;
3455 * If the firstblock forbids it, can't use it,
3458 if (!rt && !nullfb &&
3459 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3460 gotbno = NULLFSBLOCK;
3463 * No next block, just default.
3466 gotbno = NULLFSBLOCK;
3468 * If both valid, pick the better one, else the only good
3469 * one, else ap->blkno is already set (to 0 or the inode block).
3471 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3472 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3473 else if (prevbno != NULLFSBLOCK)
3474 ap->blkno = prevbno;
3475 else if (gotbno != NULLFSBLOCK)
3482 xfs_bmap_longest_free_extent(
3483 struct xfs_trans *tp,
3488 struct xfs_mount *mp = tp->t_mountp;
3489 struct xfs_perag *pag;
3490 xfs_extlen_t longest;
3493 pag = xfs_perag_get(mp, ag);
3494 if (!pag->pagf_init) {
3495 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3499 if (!pag->pagf_init) {
3505 longest = xfs_alloc_longest_free_extent(mp, pag,
3506 xfs_alloc_min_freelist(mp, pag),
3507 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3508 if (*blen < longest)
3517 xfs_bmap_select_minlen(
3518 struct xfs_bmalloca *ap,
3519 struct xfs_alloc_arg *args,
3523 if (notinit || *blen < ap->minlen) {
3525 * Since we did a BUF_TRYLOCK above, it is possible that
3526 * there is space for this request.
3528 args->minlen = ap->minlen;
3529 } else if (*blen < args->maxlen) {
3531 * If the best seen length is less than the request length,
3532 * use the best as the minimum.
3534 args->minlen = *blen;
3537 * Otherwise we've seen an extent as big as maxlen, use that
3540 args->minlen = args->maxlen;
3545 xfs_bmap_btalloc_nullfb(
3546 struct xfs_bmalloca *ap,
3547 struct xfs_alloc_arg *args,
3550 struct xfs_mount *mp = ap->ip->i_mount;
3551 xfs_agnumber_t ag, startag;
3555 args->type = XFS_ALLOCTYPE_START_BNO;
3556 args->total = ap->total;
3558 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3559 if (startag == NULLAGNUMBER)
3562 while (*blen < args->maxlen) {
3563 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3568 if (++ag == mp->m_sb.sb_agcount)
3574 xfs_bmap_select_minlen(ap, args, blen, notinit);
3579 xfs_bmap_btalloc_filestreams(
3580 struct xfs_bmalloca *ap,
3581 struct xfs_alloc_arg *args,
3584 struct xfs_mount *mp = ap->ip->i_mount;
3589 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3590 args->total = ap->total;
3592 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3593 if (ag == NULLAGNUMBER)
3596 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3600 if (*blen < args->maxlen) {
3601 error = xfs_filestream_new_ag(ap, &ag);
3605 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3612 xfs_bmap_select_minlen(ap, args, blen, notinit);
3615 * Set the failure fallback case to look in the selected AG as stream
3618 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3624 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3626 xfs_mount_t *mp; /* mount point structure */
3627 xfs_alloctype_t atype = 0; /* type for allocation routines */
3628 xfs_extlen_t align = 0; /* minimum allocation alignment */
3629 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3631 xfs_alloc_arg_t args;
3633 xfs_extlen_t nextminlen = 0;
3634 int nullfb; /* true if ap->firstblock isn't set */
3642 mp = ap->ip->i_mount;
3644 /* stripe alignment for allocation is determined by mount parameters */
3646 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3647 stripe_align = mp->m_swidth;
3648 else if (mp->m_dalign)
3649 stripe_align = mp->m_dalign;
3651 if (xfs_alloc_is_userdata(ap->datatype))
3652 align = xfs_get_extsz_hint(ap->ip);
3653 if (unlikely(align)) {
3654 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3655 align, 0, ap->eof, 0, ap->conv,
3656 &ap->offset, &ap->length);
3662 nullfb = *ap->firstblock == NULLFSBLOCK;
3663 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3665 if (xfs_alloc_is_userdata(ap->datatype) &&
3666 xfs_inode_is_filestream(ap->ip)) {
3667 ag = xfs_filestream_lookup_ag(ap->ip);
3668 ag = (ag != NULLAGNUMBER) ? ag : 0;
3669 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3671 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3674 ap->blkno = *ap->firstblock;
3676 xfs_bmap_adjacent(ap);
3679 * If allowed, use ap->blkno; otherwise must use firstblock since
3680 * it's in the right allocation group.
3682 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3685 ap->blkno = *ap->firstblock;
3687 * Normal allocation, done through xfs_alloc_vextent.
3689 tryagain = isaligned = 0;
3690 memset(&args, 0, sizeof(args));
3693 args.fsbno = ap->blkno;
3694 xfs_rmap_skip_owner_update(&args.oinfo);
3696 /* Trim the allocation back to the maximum an AG can fit. */
3697 args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
3698 args.firstblock = *ap->firstblock;
3702 * Search for an allocation group with a single extent large
3703 * enough for the request. If one isn't found, then adjust
3704 * the minimum allocation size to the largest space found.
3706 if (xfs_alloc_is_userdata(ap->datatype) &&
3707 xfs_inode_is_filestream(ap->ip))
3708 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3710 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3713 } else if (ap->dfops->dop_low) {
3714 if (xfs_inode_is_filestream(ap->ip))
3715 args.type = XFS_ALLOCTYPE_FIRST_AG;
3717 args.type = XFS_ALLOCTYPE_START_BNO;
3718 args.total = args.minlen = ap->minlen;
3720 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3721 args.total = ap->total;
3722 args.minlen = ap->minlen;
3724 /* apply extent size hints if obtained earlier */
3725 if (unlikely(align)) {
3727 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3728 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3729 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3733 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3734 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3735 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3738 * If we are not low on available data blocks, and the
3739 * underlying logical volume manager is a stripe, and
3740 * the file offset is zero then try to allocate data
3741 * blocks on stripe unit boundary.
3742 * NOTE: ap->aeof is only set if the allocation length
3743 * is >= the stripe unit and the allocation offset is
3744 * at the end of file.
3746 if (!ap->dfops->dop_low && ap->aeof) {
3748 args.alignment = stripe_align;
3752 * Adjust for alignment
3754 if (blen > args.alignment && blen <= args.maxlen)
3755 args.minlen = blen - args.alignment;
3756 args.minalignslop = 0;
3759 * First try an exact bno allocation.
3760 * If it fails then do a near or start bno
3761 * allocation with alignment turned on.
3765 args.type = XFS_ALLOCTYPE_THIS_BNO;
3768 * Compute the minlen+alignment for the
3769 * next case. Set slop so that the value
3770 * of minlen+alignment+slop doesn't go up
3771 * between the calls.
3773 if (blen > stripe_align && blen <= args.maxlen)
3774 nextminlen = blen - stripe_align;
3776 nextminlen = args.minlen;
3777 if (nextminlen + stripe_align > args.minlen + 1)
3779 nextminlen + stripe_align -
3782 args.minalignslop = 0;
3786 args.minalignslop = 0;
3788 args.minleft = ap->minleft;
3789 args.wasdel = ap->wasdel;
3790 args.resv = XFS_AG_RESV_NONE;
3791 args.datatype = ap->datatype;
3792 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3795 error = xfs_alloc_vextent(&args);
3799 if (tryagain && args.fsbno == NULLFSBLOCK) {
3801 * Exact allocation failed. Now try with alignment
3805 args.fsbno = ap->blkno;
3806 args.alignment = stripe_align;
3807 args.minlen = nextminlen;
3808 args.minalignslop = 0;
3810 if ((error = xfs_alloc_vextent(&args)))
3813 if (isaligned && args.fsbno == NULLFSBLOCK) {
3815 * allocation failed, so turn off alignment and
3819 args.fsbno = ap->blkno;
3821 if ((error = xfs_alloc_vextent(&args)))
3824 if (args.fsbno == NULLFSBLOCK && nullfb &&
3825 args.minlen > ap->minlen) {
3826 args.minlen = ap->minlen;
3827 args.type = XFS_ALLOCTYPE_START_BNO;
3828 args.fsbno = ap->blkno;
3829 if ((error = xfs_alloc_vextent(&args)))
3832 if (args.fsbno == NULLFSBLOCK && nullfb) {
3834 args.type = XFS_ALLOCTYPE_FIRST_AG;
3835 args.total = ap->minlen;
3837 if ((error = xfs_alloc_vextent(&args)))
3839 ap->dfops->dop_low = true;
3841 if (args.fsbno != NULLFSBLOCK) {
3843 * check the allocation happened at the same or higher AG than
3844 * the first block that was allocated.
3846 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3847 XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
3848 XFS_FSB_TO_AGNO(mp, args.fsbno) ||
3849 (ap->dfops->dop_low &&
3850 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
3851 XFS_FSB_TO_AGNO(mp, args.fsbno)));
3853 ap->blkno = args.fsbno;
3854 if (*ap->firstblock == NULLFSBLOCK)
3855 *ap->firstblock = args.fsbno;
3856 ASSERT(nullfb || fb_agno == args.agno ||
3857 (ap->dfops->dop_low && fb_agno < args.agno));
3858 ap->length = args.len;
3859 ap->ip->i_d.di_nblocks += args.len;
3860 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3862 ap->ip->i_delayed_blks -= args.len;
3864 * Adjust the disk quota also. This was reserved
3867 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3868 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
3869 XFS_TRANS_DQ_BCOUNT,
3872 ap->blkno = NULLFSBLOCK;
3879 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3880 * It figures out where to ask the underlying allocator to put the new extent.
3884 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3886 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3887 xfs_alloc_is_userdata(ap->datatype))
3888 return xfs_bmap_rtalloc(ap);
3889 return xfs_bmap_btalloc(ap);
3893 * Trim the returned map to the required bounds
3897 struct xfs_bmbt_irec *mval,
3898 struct xfs_bmbt_irec *got,
3906 if ((flags & XFS_BMAPI_ENTIRE) ||
3907 got->br_startoff + got->br_blockcount <= obno) {
3909 if (isnullstartblock(got->br_startblock))
3910 mval->br_startblock = DELAYSTARTBLOCK;
3916 ASSERT((*bno >= obno) || (n == 0));
3918 mval->br_startoff = *bno;
3919 if (isnullstartblock(got->br_startblock))
3920 mval->br_startblock = DELAYSTARTBLOCK;
3922 mval->br_startblock = got->br_startblock +
3923 (*bno - got->br_startoff);
3925 * Return the minimum of what we got and what we asked for for
3926 * the length. We can use the len variable here because it is
3927 * modified below and we could have been there before coming
3928 * here if the first part of the allocation didn't overlap what
3931 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3932 got->br_blockcount - (*bno - got->br_startoff));
3933 mval->br_state = got->br_state;
3934 ASSERT(mval->br_blockcount <= len);
3939 * Update and validate the extent map to return
3942 xfs_bmapi_update_map(
3943 struct xfs_bmbt_irec **map,
3951 xfs_bmbt_irec_t *mval = *map;
3953 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3954 ((mval->br_startoff + mval->br_blockcount) <= end));
3955 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3956 (mval->br_startoff < obno));
3958 *bno = mval->br_startoff + mval->br_blockcount;
3960 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3961 /* update previous map with new information */
3962 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3963 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3964 ASSERT(mval->br_state == mval[-1].br_state);
3965 mval[-1].br_blockcount = mval->br_blockcount;
3966 mval[-1].br_state = mval->br_state;
3967 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3968 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3969 mval[-1].br_startblock != HOLESTARTBLOCK &&
3970 mval->br_startblock == mval[-1].br_startblock +
3971 mval[-1].br_blockcount &&
3972 ((flags & XFS_BMAPI_IGSTATE) ||
3973 mval[-1].br_state == mval->br_state)) {
3974 ASSERT(mval->br_startoff ==
3975 mval[-1].br_startoff + mval[-1].br_blockcount);
3976 mval[-1].br_blockcount += mval->br_blockcount;
3977 } else if (*n > 0 &&
3978 mval->br_startblock == DELAYSTARTBLOCK &&
3979 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3980 mval->br_startoff ==
3981 mval[-1].br_startoff + mval[-1].br_blockcount) {
3982 mval[-1].br_blockcount += mval->br_blockcount;
3983 mval[-1].br_state = mval->br_state;
3984 } else if (!((*n == 0) &&
3985 ((mval->br_startoff + mval->br_blockcount) <=
3994 * Map file blocks to filesystem blocks without allocation.
3998 struct xfs_inode *ip,
4001 struct xfs_bmbt_irec *mval,
4005 struct xfs_mount *mp = ip->i_mount;
4006 struct xfs_ifork *ifp;
4007 struct xfs_bmbt_irec got;
4008 struct xfs_bmbt_irec prev;
4015 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4016 XFS_ATTR_FORK : XFS_DATA_FORK;
4019 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
4020 XFS_BMAPI_IGSTATE)));
4021 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
4023 if (unlikely(XFS_TEST_ERROR(
4024 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4025 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4026 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4027 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
4028 return -EFSCORRUPTED;
4031 if (XFS_FORCED_SHUTDOWN(mp))
4034 XFS_STATS_INC(mp, xs_blk_mapr);
4036 ifp = XFS_IFORK_PTR(ip, whichfork);
4038 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4039 error = xfs_iread_extents(NULL, ip, whichfork);
4044 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev);
4048 while (bno < end && n < *nmap) {
4049 /* Reading past eof, act as though there's a hole up to end. */
4051 got.br_startoff = end;
4052 if (got.br_startoff > bno) {
4053 /* Reading in a hole. */
4054 mval->br_startoff = bno;
4055 mval->br_startblock = HOLESTARTBLOCK;
4056 mval->br_blockcount =
4057 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4058 mval->br_state = XFS_EXT_NORM;
4059 bno += mval->br_blockcount;
4060 len -= mval->br_blockcount;
4066 /* set up the extent map to return. */
4067 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4068 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4070 /* If we're done, stop now. */
4071 if (bno >= end || n >= *nmap)
4074 /* Else go on to the next record. */
4075 if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
4076 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4085 xfs_bmapi_reserve_delalloc(
4086 struct xfs_inode *ip,
4089 struct xfs_bmbt_irec *got,
4090 struct xfs_bmbt_irec *prev,
4091 xfs_extnum_t *lastx,
4094 struct xfs_mount *mp = ip->i_mount;
4095 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4097 xfs_extlen_t indlen;
4098 char rt = XFS_IS_REALTIME_INODE(ip);
4102 alen = XFS_FILBLKS_MIN(len, MAXEXTLEN);
4104 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4106 /* Figure out the extent size, adjust alen */
4107 extsz = xfs_get_extsz_hint(ip);
4109 error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
4110 1, 0, &aoff, &alen);
4115 extsz = alen / mp->m_sb.sb_rextsize;
4118 * Make a transaction-less quota reservation for delayed allocation
4119 * blocks. This number gets adjusted later. We return if we haven't
4120 * allocated blocks already inside this loop.
4122 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4123 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4128 * Split changing sb for alen and indlen since they could be coming
4129 * from different places.
4131 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4135 error = xfs_mod_frextents(mp, -((int64_t)extsz));
4137 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4141 goto out_unreserve_quota;
4143 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4145 goto out_unreserve_blocks;
4148 ip->i_delayed_blks += alen;
4150 got->br_startoff = aoff;
4151 got->br_startblock = nullstartblock(indlen);
4152 got->br_blockcount = alen;
4153 got->br_state = XFS_EXT_NORM;
4154 xfs_bmap_add_extent_hole_delay(ip, lastx, got);
4157 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
4158 * might have merged it into one of the neighbouring ones.
4160 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
4162 ASSERT(got->br_startoff <= aoff);
4163 ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
4164 ASSERT(isnullstartblock(got->br_startblock));
4165 ASSERT(got->br_state == XFS_EXT_NORM);
4168 out_unreserve_blocks:
4170 xfs_mod_frextents(mp, extsz);
4172 xfs_mod_fdblocks(mp, alen, false);
4173 out_unreserve_quota:
4174 if (XFS_IS_QUOTA_ON(mp))
4175 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
4176 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4182 struct xfs_bmalloca *bma)
4184 struct xfs_mount *mp = bma->ip->i_mount;
4185 int whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ?
4186 XFS_ATTR_FORK : XFS_DATA_FORK;
4187 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4188 int tmp_logflags = 0;
4191 ASSERT(bma->length > 0);
4194 * For the wasdelay case, we could also just allocate the stuff asked
4195 * for in this bmap call but that wouldn't be as good.
4198 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4199 bma->offset = bma->got.br_startoff;
4200 if (bma->idx != NULLEXTNUM && bma->idx) {
4201 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4205 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4207 bma->length = XFS_FILBLKS_MIN(bma->length,
4208 bma->got.br_startoff - bma->offset);
4212 * Set the data type being allocated. For the data fork, the first data
4213 * in the file is treated differently to all other allocations. For the
4214 * attribute fork, we only need to ensure the allocated range is not on
4217 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4218 bma->datatype = XFS_ALLOC_NOBUSY;
4219 if (whichfork == XFS_DATA_FORK) {
4220 if (bma->offset == 0)
4221 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4223 bma->datatype |= XFS_ALLOC_USERDATA;
4225 if (bma->flags & XFS_BMAPI_ZERO)
4226 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4229 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4232 * Only want to do the alignment at the eof if it is userdata and
4233 * allocation length is larger than a stripe unit.
4235 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4236 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4237 error = xfs_bmap_isaeof(bma, whichfork);
4242 error = xfs_bmap_alloc(bma);
4246 if (bma->dfops->dop_low)
4249 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4250 if (bma->blkno == NULLFSBLOCK)
4252 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4253 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4254 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4255 bma->cur->bc_private.b.dfops = bma->dfops;
4258 * Bump the number of extents we've allocated
4264 bma->cur->bc_private.b.flags =
4265 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4267 bma->got.br_startoff = bma->offset;
4268 bma->got.br_startblock = bma->blkno;
4269 bma->got.br_blockcount = bma->length;
4270 bma->got.br_state = XFS_EXT_NORM;
4273 * A wasdelay extent has been initialized, so shouldn't be flagged
4276 if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) &&
4277 xfs_sb_version_hasextflgbit(&mp->m_sb))
4278 bma->got.br_state = XFS_EXT_UNWRITTEN;
4281 error = xfs_bmap_add_extent_delay_real(bma);
4283 error = xfs_bmap_add_extent_hole_real(bma, whichfork);
4285 bma->logflags |= tmp_logflags;
4290 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4291 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4292 * the neighbouring ones.
4294 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4296 ASSERT(bma->got.br_startoff <= bma->offset);
4297 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4298 bma->offset + bma->length);
4299 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4300 bma->got.br_state == XFS_EXT_UNWRITTEN);
4305 xfs_bmapi_convert_unwritten(
4306 struct xfs_bmalloca *bma,
4307 struct xfs_bmbt_irec *mval,
4311 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4312 XFS_ATTR_FORK : XFS_DATA_FORK;
4313 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4314 int tmp_logflags = 0;
4317 /* check if we need to do unwritten->real conversion */
4318 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4319 (flags & XFS_BMAPI_PREALLOC))
4322 /* check if we need to do real->unwritten conversion */
4323 if (mval->br_state == XFS_EXT_NORM &&
4324 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4325 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4329 * Modify (by adding) the state flag, if writing.
4331 ASSERT(mval->br_blockcount <= len);
4332 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4333 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4334 bma->ip, whichfork);
4335 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4336 bma->cur->bc_private.b.dfops = bma->dfops;
4338 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4339 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4342 * Before insertion into the bmbt, zero the range being converted
4345 if (flags & XFS_BMAPI_ZERO) {
4346 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4347 mval->br_blockcount);
4352 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
4353 &bma->cur, mval, bma->firstblock, bma->dfops,
4356 * Log the inode core unconditionally in the unwritten extent conversion
4357 * path because the conversion might not have done so (e.g., if the
4358 * extent count hasn't changed). We need to make sure the inode is dirty
4359 * in the transaction for the sake of fsync(), even if nothing has
4360 * changed, because fsync() will not force the log for this transaction
4361 * unless it sees the inode pinned.
4363 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4368 * Update our extent pointer, given that
4369 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4370 * of the neighbouring ones.
4372 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4375 * We may have combined previously unwritten space with written space,
4376 * so generate another request.
4378 if (mval->br_blockcount < len)
4384 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4385 * extent state if necessary. Details behaviour is controlled by the flags
4386 * parameter. Only allocates blocks from a single allocation group, to avoid
4389 * The returned value in "firstblock" from the first call in a transaction
4390 * must be remembered and presented to subsequent calls in "firstblock".
4391 * An upper bound for the number of blocks to be allocated is supplied to
4392 * the first call in "total"; if no allocation group has that many free
4393 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4397 struct xfs_trans *tp, /* transaction pointer */
4398 struct xfs_inode *ip, /* incore inode */
4399 xfs_fileoff_t bno, /* starting file offs. mapped */
4400 xfs_filblks_t len, /* length to map in file */
4401 int flags, /* XFS_BMAPI_... */
4402 xfs_fsblock_t *firstblock, /* first allocated block
4403 controls a.g. for allocs */
4404 xfs_extlen_t total, /* total blocks needed */
4405 struct xfs_bmbt_irec *mval, /* output: map values */
4406 int *nmap, /* i/o: mval size/count */
4407 struct xfs_defer_ops *dfops) /* i/o: list extents to free */
4409 struct xfs_mount *mp = ip->i_mount;
4410 struct xfs_ifork *ifp;
4411 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4412 xfs_fileoff_t end; /* end of mapped file region */
4413 int eof; /* after the end of extents */
4414 int error; /* error return */
4415 int n; /* current extent index */
4416 xfs_fileoff_t obno; /* old block number (offset) */
4417 int whichfork; /* data or attr fork */
4418 char inhole; /* current location is hole in file */
4419 char wasdelay; /* old extent was delayed */
4422 xfs_fileoff_t orig_bno; /* original block number value */
4423 int orig_flags; /* original flags arg value */
4424 xfs_filblks_t orig_len; /* original value of len arg */
4425 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4426 int orig_nmap; /* original value of *nmap */
4434 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4435 XFS_ATTR_FORK : XFS_DATA_FORK;
4438 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4439 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4442 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4443 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4445 /* zeroing is for currently only for data extents, not metadata */
4446 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4447 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4449 * we can allocate unwritten extents or pre-zero allocated blocks,
4450 * but it makes no sense to do both at once. This would result in
4451 * zeroing the unwritten extent twice, but it still being an
4452 * unwritten extent....
4454 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4455 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4457 if (unlikely(XFS_TEST_ERROR(
4458 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4459 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4460 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4461 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4462 return -EFSCORRUPTED;
4465 if (XFS_FORCED_SHUTDOWN(mp))
4468 ifp = XFS_IFORK_PTR(ip, whichfork);
4470 XFS_STATS_INC(mp, xs_blk_mapw);
4472 if (*firstblock == NULLFSBLOCK) {
4473 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4474 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4481 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4482 error = xfs_iread_extents(tp, ip, whichfork);
4487 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got,
4498 bma.firstblock = firstblock;
4500 while (bno < end && n < *nmap) {
4501 inhole = eof || bma.got.br_startoff > bno;
4502 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
4505 * First, deal with the hole before the allocated space
4506 * that we found, if any.
4508 if (inhole || wasdelay) {
4510 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4511 bma.wasdel = wasdelay;
4516 * There's a 32/64 bit type mismatch between the
4517 * allocation length request (which can be 64 bits in
4518 * length) and the bma length request, which is
4519 * xfs_extlen_t and therefore 32 bits. Hence we have to
4520 * check for 32-bit overflows and handle them here.
4522 if (len > (xfs_filblks_t)MAXEXTLEN)
4523 bma.length = MAXEXTLEN;
4528 ASSERT(bma.length > 0);
4529 error = xfs_bmapi_allocate(&bma);
4532 if (bma.blkno == NULLFSBLOCK)
4536 /* Deal with the allocated space we found. */
4537 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4540 /* Execute unwritten extent conversion if necessary */
4541 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4542 if (error == -EAGAIN)
4547 /* update the extent map to return */
4548 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4551 * If we're done, stop now. Stop when we've allocated
4552 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4553 * the transaction may get too big.
4555 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4558 /* Else go on to the next record. */
4560 if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) {
4561 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
4569 * Transform from btree to extents, give it cur.
4571 if (xfs_bmap_wants_extents(ip, whichfork)) {
4572 int tmp_logflags = 0;
4575 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4576 &tmp_logflags, whichfork);
4577 bma.logflags |= tmp_logflags;
4582 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4583 XFS_IFORK_NEXTENTS(ip, whichfork) >
4584 XFS_IFORK_MAXEXT(ip, whichfork));
4588 * Log everything. Do this after conversion, there's no point in
4589 * logging the extent records if we've converted to btree format.
4591 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4592 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4593 bma.logflags &= ~xfs_ilog_fext(whichfork);
4594 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4595 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4596 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4598 * Log whatever the flags say, even if error. Otherwise we might miss
4599 * detecting a case where the data is changed, there's an error,
4600 * and it's not logged so we don't shutdown when we should.
4603 xfs_trans_log_inode(tp, ip, bma.logflags);
4607 ASSERT(*firstblock == NULLFSBLOCK ||
4608 XFS_FSB_TO_AGNO(mp, *firstblock) ==
4610 bma.cur->bc_private.b.firstblock) ||
4612 XFS_FSB_TO_AGNO(mp, *firstblock) <
4614 bma.cur->bc_private.b.firstblock)));
4615 *firstblock = bma.cur->bc_private.b.firstblock;
4617 xfs_btree_del_cursor(bma.cur,
4618 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4621 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4627 * When a delalloc extent is split (e.g., due to a hole punch), the original
4628 * indlen reservation must be shared across the two new extents that are left
4631 * Given the original reservation and the worst case indlen for the two new
4632 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4633 * reservation fairly across the two new extents. If necessary, steal available
4634 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4635 * ores == 1). The number of stolen blocks is returned. The availability and
4636 * subsequent accounting of stolen blocks is the responsibility of the caller.
4638 static xfs_filblks_t
4639 xfs_bmap_split_indlen(
4640 xfs_filblks_t ores, /* original res. */
4641 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4642 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4643 xfs_filblks_t avail) /* stealable blocks */
4645 xfs_filblks_t len1 = *indlen1;
4646 xfs_filblks_t len2 = *indlen2;
4647 xfs_filblks_t nres = len1 + len2; /* new total res. */
4648 xfs_filblks_t stolen = 0;
4651 * Steal as many blocks as we can to try and satisfy the worst case
4652 * indlen for both new extents.
4654 while (nres > ores && avail) {
4661 * The only blocks available are those reserved for the original
4662 * extent and what we can steal from the extent being removed.
4663 * If this still isn't enough to satisfy the combined
4664 * requirements for the two new extents, skim blocks off of each
4665 * of the new reservations until they match what is available.
4667 while (nres > ores) {
4687 * Called by xfs_bmapi to update file extent records and the btree
4688 * after removing space (or undoing a delayed allocation).
4690 STATIC int /* error */
4691 xfs_bmap_del_extent(
4692 xfs_inode_t *ip, /* incore inode pointer */
4693 xfs_trans_t *tp, /* current transaction pointer */
4694 xfs_extnum_t *idx, /* extent number to update/delete */
4695 struct xfs_defer_ops *dfops, /* list of extents to be freed */
4696 xfs_btree_cur_t *cur, /* if null, not a btree */
4697 xfs_bmbt_irec_t *del, /* data to remove from extents */
4698 int *logflagsp, /* inode logging flags */
4699 int whichfork) /* data or attr fork */
4701 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
4702 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
4703 xfs_fsblock_t del_endblock=0; /* first block past del */
4704 xfs_fileoff_t del_endoff; /* first offset past del */
4705 int delay; /* current block is delayed allocated */
4706 int do_fx; /* free extent at end of routine */
4707 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
4708 int error; /* error return value */
4709 int flags; /* inode logging flags */
4710 xfs_bmbt_irec_t got; /* current extent entry */
4711 xfs_fileoff_t got_endoff; /* first offset past got */
4712 int i; /* temp state */
4713 xfs_ifork_t *ifp; /* inode fork pointer */
4714 xfs_mount_t *mp; /* mount structure */
4715 xfs_filblks_t nblks; /* quota/sb block count */
4716 xfs_bmbt_irec_t new; /* new record to be inserted */
4718 uint qfield; /* quota field to update */
4719 xfs_filblks_t temp; /* for indirect length calculations */
4720 xfs_filblks_t temp2; /* for indirect length calculations */
4724 XFS_STATS_INC(mp, xs_del_exlist);
4726 if (whichfork == XFS_ATTR_FORK)
4727 state |= BMAP_ATTRFORK;
4729 ifp = XFS_IFORK_PTR(ip, whichfork);
4730 ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
4731 (uint)sizeof(xfs_bmbt_rec_t)));
4732 ASSERT(del->br_blockcount > 0);
4733 ep = xfs_iext_get_ext(ifp, *idx);
4734 xfs_bmbt_get_all(ep, &got);
4735 ASSERT(got.br_startoff <= del->br_startoff);
4736 del_endoff = del->br_startoff + del->br_blockcount;
4737 got_endoff = got.br_startoff + got.br_blockcount;
4738 ASSERT(got_endoff >= del_endoff);
4739 delay = isnullstartblock(got.br_startblock);
4740 ASSERT(isnullstartblock(del->br_startblock) == delay);
4745 * If deleting a real allocation, must free up the disk space.
4748 flags = XFS_ILOG_CORE;
4750 * Realtime allocation. Free it and record di_nblocks update.
4752 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4756 ASSERT(do_mod(del->br_blockcount,
4757 mp->m_sb.sb_rextsize) == 0);
4758 ASSERT(do_mod(del->br_startblock,
4759 mp->m_sb.sb_rextsize) == 0);
4760 bno = del->br_startblock;
4761 len = del->br_blockcount;
4762 do_div(bno, mp->m_sb.sb_rextsize);
4763 do_div(len, mp->m_sb.sb_rextsize);
4764 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
4768 nblks = len * mp->m_sb.sb_rextsize;
4769 qfield = XFS_TRANS_DQ_RTBCOUNT;
4772 * Ordinary allocation.
4776 nblks = del->br_blockcount;
4777 qfield = XFS_TRANS_DQ_BCOUNT;
4780 * Set up del_endblock and cur for later.
4782 del_endblock = del->br_startblock + del->br_blockcount;
4784 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
4785 got.br_startblock, got.br_blockcount,
4788 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4790 da_old = da_new = 0;
4792 da_old = startblockval(got.br_startblock);
4799 * Set flag value to use in switch statement.
4800 * Left-contig is 2, right-contig is 1.
4802 switch (((got.br_startoff == del->br_startoff) << 1) |
4803 (got_endoff == del_endoff)) {
4806 * Matches the whole extent. Delete the entry.
4808 xfs_iext_remove(ip, *idx, 1,
4809 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
4814 XFS_IFORK_NEXT_SET(ip, whichfork,
4815 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
4816 flags |= XFS_ILOG_CORE;
4818 flags |= xfs_ilog_fext(whichfork);
4821 if ((error = xfs_btree_delete(cur, &i)))
4823 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4828 * Deleting the first part of the extent.
4830 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4831 xfs_bmbt_set_startoff(ep, del_endoff);
4832 temp = got.br_blockcount - del->br_blockcount;
4833 xfs_bmbt_set_blockcount(ep, temp);
4835 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
4837 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
4838 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4842 xfs_bmbt_set_startblock(ep, del_endblock);
4843 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4845 flags |= xfs_ilog_fext(whichfork);
4848 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
4849 got.br_blockcount - del->br_blockcount,
4856 * Deleting the last part of the extent.
4858 temp = got.br_blockcount - del->br_blockcount;
4859 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4860 xfs_bmbt_set_blockcount(ep, temp);
4862 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
4864 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
4865 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4869 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4871 flags |= xfs_ilog_fext(whichfork);
4874 if ((error = xfs_bmbt_update(cur, got.br_startoff,
4876 got.br_blockcount - del->br_blockcount,
4883 * Deleting the middle of the extent.
4885 temp = del->br_startoff - got.br_startoff;
4886 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4887 xfs_bmbt_set_blockcount(ep, temp);
4888 new.br_startoff = del_endoff;
4889 temp2 = got_endoff - del_endoff;
4890 new.br_blockcount = temp2;
4891 new.br_state = got.br_state;
4893 new.br_startblock = del_endblock;
4894 flags |= XFS_ILOG_CORE;
4896 if ((error = xfs_bmbt_update(cur,
4898 got.br_startblock, temp,
4901 if ((error = xfs_btree_increment(cur, 0, &i)))
4903 cur->bc_rec.b = new;
4904 error = xfs_btree_insert(cur, &i);
4905 if (error && error != -ENOSPC)
4908 * If get no-space back from btree insert,
4909 * it tried a split, and we have a zero
4910 * block reservation.
4911 * Fix up our state and return the error.
4913 if (error == -ENOSPC) {
4915 * Reset the cursor, don't trust
4916 * it after any insert operation.
4918 if ((error = xfs_bmbt_lookup_eq(cur,
4923 XFS_WANT_CORRUPTED_GOTO(mp,
4926 * Update the btree record back
4927 * to the original value.
4929 if ((error = xfs_bmbt_update(cur,
4936 * Reset the extent record back
4937 * to the original value.
4939 xfs_bmbt_set_blockcount(ep,
4945 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4947 flags |= xfs_ilog_fext(whichfork);
4948 XFS_IFORK_NEXT_SET(ip, whichfork,
4949 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
4951 xfs_filblks_t stolen;
4952 ASSERT(whichfork == XFS_DATA_FORK);
4955 * Distribute the original indlen reservation across the
4956 * two new extents. Steal blocks from the deleted extent
4957 * if necessary. Stealing blocks simply fudges the
4958 * fdblocks accounting in xfs_bunmapi().
4960 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
4961 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4962 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
4963 del->br_blockcount);
4964 da_new = temp + temp2 - stolen;
4965 del->br_blockcount -= stolen;
4968 * Set the reservation for each extent. Warn if either
4969 * is zero as this can lead to delalloc problems.
4971 WARN_ON_ONCE(!temp || !temp2);
4972 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
4973 new.br_startblock = nullstartblock((int)temp2);
4975 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4976 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
4981 /* remove reverse mapping */
4983 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
4989 * If we need to, add to list of extents to delete.
4992 xfs_bmap_add_free(mp, dfops, del->br_startblock,
4993 del->br_blockcount, NULL);
4995 * Adjust inode # blocks in the file.
4998 ip->i_d.di_nblocks -= nblks;
5000 * Adjust quota data.
5003 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5006 * Account for change in delayed indirect blocks.
5007 * Nothing to do for disk quota accounting here.
5009 ASSERT(da_old >= da_new);
5010 if (da_old > da_new)
5011 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5018 * Unmap (remove) blocks from a file.
5019 * If nexts is nonzero then the number of extents to remove is limited to
5020 * that value. If not all extents in the block range can be removed then
5025 xfs_trans_t *tp, /* transaction pointer */
5026 struct xfs_inode *ip, /* incore inode */
5027 xfs_fileoff_t bno, /* starting offset to unmap */
5028 xfs_filblks_t len, /* length to unmap in file */
5029 int flags, /* misc flags */
5030 xfs_extnum_t nexts, /* number of extents max */
5031 xfs_fsblock_t *firstblock, /* first allocated block
5032 controls a.g. for allocs */
5033 struct xfs_defer_ops *dfops, /* i/o: list extents to free */
5034 int *done) /* set if not done yet */
5036 xfs_btree_cur_t *cur; /* bmap btree cursor */
5037 xfs_bmbt_irec_t del; /* extent being deleted */
5038 int eof; /* is deleting at eof */
5039 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
5040 int error; /* error return value */
5041 xfs_extnum_t extno; /* extent number in list */
5042 xfs_bmbt_irec_t got; /* current extent record */
5043 xfs_ifork_t *ifp; /* inode fork pointer */
5044 int isrt; /* freeing in rt area */
5045 xfs_extnum_t lastx; /* last extent index used */
5046 int logflags; /* transaction logging flags */
5047 xfs_extlen_t mod; /* rt extent offset */
5048 xfs_mount_t *mp; /* mount structure */
5049 xfs_extnum_t nextents; /* number of file extents */
5050 xfs_bmbt_irec_t prev; /* previous extent record */
5051 xfs_fileoff_t start; /* first file offset deleted */
5052 int tmp_logflags; /* partial logging flags */
5053 int wasdel; /* was a delayed alloc extent */
5054 int whichfork; /* data or attribute fork */
5057 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5059 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5060 XFS_ATTR_FORK : XFS_DATA_FORK;
5061 ifp = XFS_IFORK_PTR(ip, whichfork);
5063 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5064 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5065 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5067 return -EFSCORRUPTED;
5070 if (XFS_FORCED_SHUTDOWN(mp))
5073 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5077 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5078 (error = xfs_iread_extents(tp, ip, whichfork)))
5080 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5081 if (nextents == 0) {
5085 XFS_STATS_INC(mp, xs_blk_unmap);
5086 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5088 bno = start + len - 1;
5089 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5093 * Check to see if the given block number is past the end of the
5094 * file, back up to the last block if so...
5097 ep = xfs_iext_get_ext(ifp, --lastx);
5098 xfs_bmbt_get_all(ep, &got);
5099 bno = got.br_startoff + got.br_blockcount - 1;
5102 if (ifp->if_flags & XFS_IFBROOT) {
5103 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5104 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5105 cur->bc_private.b.firstblock = *firstblock;
5106 cur->bc_private.b.dfops = dfops;
5107 cur->bc_private.b.flags = 0;
5113 * Synchronize by locking the bitmap inode.
5115 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5116 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5117 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5118 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5122 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5123 (nexts == 0 || extno < nexts)) {
5125 * Is the found extent after a hole in which bno lives?
5126 * Just back up to the previous extent, if so.
5128 if (got.br_startoff > bno) {
5131 ep = xfs_iext_get_ext(ifp, lastx);
5132 xfs_bmbt_get_all(ep, &got);
5135 * Is the last block of this extent before the range
5136 * we're supposed to delete? If so, we're done.
5138 bno = XFS_FILEOFF_MIN(bno,
5139 got.br_startoff + got.br_blockcount - 1);
5143 * Then deal with the (possibly delayed) allocated space
5148 wasdel = isnullstartblock(del.br_startblock);
5149 if (got.br_startoff < start) {
5150 del.br_startoff = start;
5151 del.br_blockcount -= start - got.br_startoff;
5153 del.br_startblock += start - got.br_startoff;
5155 if (del.br_startoff + del.br_blockcount > bno + 1)
5156 del.br_blockcount = bno + 1 - del.br_startoff;
5157 sum = del.br_startblock + del.br_blockcount;
5159 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5161 * Realtime extent not lined up at the end.
5162 * The extent could have been split into written
5163 * and unwritten pieces, or we could just be
5164 * unmapping part of it. But we can't really
5165 * get rid of part of a realtime extent.
5167 if (del.br_state == XFS_EXT_UNWRITTEN ||
5168 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5170 * This piece is unwritten, or we're not
5171 * using unwritten extents. Skip over it.
5174 bno -= mod > del.br_blockcount ?
5175 del.br_blockcount : mod;
5176 if (bno < got.br_startoff) {
5178 xfs_bmbt_get_all(xfs_iext_get_ext(
5184 * It's written, turn it unwritten.
5185 * This is better than zeroing it.
5187 ASSERT(del.br_state == XFS_EXT_NORM);
5188 ASSERT(tp->t_blk_res > 0);
5190 * If this spans a realtime extent boundary,
5191 * chop it back to the start of the one we end at.
5193 if (del.br_blockcount > mod) {
5194 del.br_startoff += del.br_blockcount - mod;
5195 del.br_startblock += del.br_blockcount - mod;
5196 del.br_blockcount = mod;
5198 del.br_state = XFS_EXT_UNWRITTEN;
5199 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5200 &lastx, &cur, &del, firstblock, dfops,
5206 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5208 * Realtime extent is lined up at the end but not
5209 * at the front. We'll get rid of full extents if
5212 mod = mp->m_sb.sb_rextsize - mod;
5213 if (del.br_blockcount > mod) {
5214 del.br_blockcount -= mod;
5215 del.br_startoff += mod;
5216 del.br_startblock += mod;
5217 } else if ((del.br_startoff == start &&
5218 (del.br_state == XFS_EXT_UNWRITTEN ||
5219 tp->t_blk_res == 0)) ||
5220 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5222 * Can't make it unwritten. There isn't
5223 * a full extent here so just skip it.
5225 ASSERT(bno >= del.br_blockcount);
5226 bno -= del.br_blockcount;
5227 if (got.br_startoff > bno) {
5229 ep = xfs_iext_get_ext(ifp,
5231 xfs_bmbt_get_all(ep, &got);
5235 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5237 * This one is already unwritten.
5238 * It must have a written left neighbor.
5239 * Unwrite the killed part of that one and
5243 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5245 ASSERT(prev.br_state == XFS_EXT_NORM);
5246 ASSERT(!isnullstartblock(prev.br_startblock));
5247 ASSERT(del.br_startblock ==
5248 prev.br_startblock + prev.br_blockcount);
5249 if (prev.br_startoff < start) {
5250 mod = start - prev.br_startoff;
5251 prev.br_blockcount -= mod;
5252 prev.br_startblock += mod;
5253 prev.br_startoff = start;
5255 prev.br_state = XFS_EXT_UNWRITTEN;
5257 error = xfs_bmap_add_extent_unwritten_real(tp,
5258 ip, &lastx, &cur, &prev,
5259 firstblock, dfops, &logflags);
5264 ASSERT(del.br_state == XFS_EXT_NORM);
5265 del.br_state = XFS_EXT_UNWRITTEN;
5266 error = xfs_bmap_add_extent_unwritten_real(tp,
5267 ip, &lastx, &cur, &del,
5268 firstblock, dfops, &logflags);
5276 * If it's the case where the directory code is running
5277 * with no block reservation, and the deleted block is in
5278 * the middle of its extent, and the resulting insert
5279 * of an extent would cause transformation to btree format,
5280 * then reject it. The calling code will then swap
5281 * blocks around instead.
5282 * We have to do this now, rather than waiting for the
5283 * conversion to btree format, since the transaction
5286 if (!wasdel && tp->t_blk_res == 0 &&
5287 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5288 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5289 XFS_IFORK_MAXEXT(ip, whichfork) &&
5290 del.br_startoff > got.br_startoff &&
5291 del.br_startoff + del.br_blockcount <
5292 got.br_startoff + got.br_blockcount) {
5298 * Unreserve quota and update realtime free space, if
5299 * appropriate. If delayed allocation, update the inode delalloc
5300 * counter now and wait to update the sb counters as
5301 * xfs_bmap_del_extent() might need to borrow some blocks.
5304 ASSERT(startblockval(del.br_startblock) > 0);
5306 xfs_filblks_t rtexts;
5308 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5309 do_div(rtexts, mp->m_sb.sb_rextsize);
5310 xfs_mod_frextents(mp, (int64_t)rtexts);
5311 (void)xfs_trans_reserve_quota_nblks(NULL,
5312 ip, -((long)del.br_blockcount), 0,
5313 XFS_QMOPT_RES_RTBLKS);
5315 (void)xfs_trans_reserve_quota_nblks(NULL,
5316 ip, -((long)del.br_blockcount), 0,
5317 XFS_QMOPT_RES_REGBLKS);
5319 ip->i_delayed_blks -= del.br_blockcount;
5321 cur->bc_private.b.flags |=
5322 XFS_BTCUR_BPRV_WASDEL;
5324 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5326 error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
5327 &tmp_logflags, whichfork);
5328 logflags |= tmp_logflags;
5332 if (!isrt && wasdel)
5333 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
5335 bno = del.br_startoff - 1;
5338 * If not done go on to the next (previous) record.
5340 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5342 ep = xfs_iext_get_ext(ifp, lastx);
5343 if (xfs_bmbt_get_startoff(ep) > bno) {
5345 ep = xfs_iext_get_ext(ifp,
5348 xfs_bmbt_get_all(ep, &got);
5353 *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5356 * Convert to a btree if necessary.
5358 if (xfs_bmap_needs_btree(ip, whichfork)) {
5359 ASSERT(cur == NULL);
5360 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
5361 &cur, 0, &tmp_logflags, whichfork);
5362 logflags |= tmp_logflags;
5367 * transform from btree to extents, give it cur
5369 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5370 ASSERT(cur != NULL);
5371 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5373 logflags |= tmp_logflags;
5378 * transform from extents to local?
5383 * Log everything. Do this after conversion, there's no point in
5384 * logging the extent records if we've converted to btree format.
5386 if ((logflags & xfs_ilog_fext(whichfork)) &&
5387 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5388 logflags &= ~xfs_ilog_fext(whichfork);
5389 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5390 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5391 logflags &= ~xfs_ilog_fbroot(whichfork);
5393 * Log inode even in the error case, if the transaction
5394 * is dirty we'll need to shut down the filesystem.
5397 xfs_trans_log_inode(tp, ip, logflags);
5400 *firstblock = cur->bc_private.b.firstblock;
5401 cur->bc_private.b.allocated = 0;
5403 xfs_btree_del_cursor(cur,
5404 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5410 * Determine whether an extent shift can be accomplished by a merge with the
5411 * extent that precedes the target hole of the shift.
5415 struct xfs_bmbt_irec *left, /* preceding extent */
5416 struct xfs_bmbt_irec *got, /* current extent to shift */
5417 xfs_fileoff_t shift) /* shift fsb */
5419 xfs_fileoff_t startoff;
5421 startoff = got->br_startoff - shift;
5424 * The extent, once shifted, must be adjacent in-file and on-disk with
5425 * the preceding extent.
5427 if ((left->br_startoff + left->br_blockcount != startoff) ||
5428 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5429 (left->br_state != got->br_state) ||
5430 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5437 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5438 * hole in the file. If an extent shift would result in the extent being fully
5439 * adjacent to the extent that currently precedes the hole, we can merge with
5440 * the preceding extent rather than do the shift.
5442 * This function assumes the caller has verified a shift-by-merge is possible
5443 * with the provided extents via xfs_bmse_can_merge().
5447 struct xfs_inode *ip,
5449 xfs_fileoff_t shift, /* shift fsb */
5450 int current_ext, /* idx of gotp */
5451 struct xfs_bmbt_rec_host *gotp, /* extent to shift */
5452 struct xfs_bmbt_rec_host *leftp, /* preceding extent */
5453 struct xfs_btree_cur *cur,
5454 int *logflags) /* output */
5456 struct xfs_bmbt_irec got;
5457 struct xfs_bmbt_irec left;
5458 xfs_filblks_t blockcount;
5460 struct xfs_mount *mp = ip->i_mount;
5462 xfs_bmbt_get_all(gotp, &got);
5463 xfs_bmbt_get_all(leftp, &left);
5464 blockcount = left.br_blockcount + got.br_blockcount;
5466 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5467 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5468 ASSERT(xfs_bmse_can_merge(&left, &got, shift));
5471 * Merge the in-core extents. Note that the host record pointers and
5472 * current_ext index are invalid once the extent has been removed via
5473 * xfs_iext_remove().
5475 xfs_bmbt_set_blockcount(leftp, blockcount);
5476 xfs_iext_remove(ip, current_ext, 1, 0);
5479 * Update the on-disk extent count, the btree if necessary and log the
5482 XFS_IFORK_NEXT_SET(ip, whichfork,
5483 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5484 *logflags |= XFS_ILOG_CORE;
5486 *logflags |= XFS_ILOG_DEXT;
5490 /* lookup and remove the extent to merge */
5491 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
5492 got.br_blockcount, &i);
5495 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5497 error = xfs_btree_delete(cur, &i);
5500 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5502 /* lookup and update size of the previous extent */
5503 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
5504 left.br_blockcount, &i);
5507 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5509 left.br_blockcount = blockcount;
5511 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
5512 left.br_blockcount, left.br_state);
5516 * Shift a single extent.
5520 struct xfs_inode *ip,
5522 xfs_fileoff_t offset_shift_fsb,
5524 struct xfs_bmbt_rec_host *gotp,
5525 struct xfs_btree_cur *cur,
5527 enum shift_direction direction,
5528 struct xfs_defer_ops *dfops)
5530 struct xfs_ifork *ifp;
5531 struct xfs_mount *mp;
5532 xfs_fileoff_t startoff;
5533 struct xfs_bmbt_rec_host *adj_irecp;
5534 struct xfs_bmbt_irec got;
5535 struct xfs_bmbt_irec adj_irec;
5541 ifp = XFS_IFORK_PTR(ip, whichfork);
5542 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5544 xfs_bmbt_get_all(gotp, &got);
5546 /* delalloc extents should be prevented by caller */
5547 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
5549 if (direction == SHIFT_LEFT) {
5550 startoff = got.br_startoff - offset_shift_fsb;
5553 * Check for merge if we've got an extent to the left,
5554 * otherwise make sure there's enough room at the start
5555 * of the file for the shift.
5557 if (!*current_ext) {
5558 if (got.br_startoff < offset_shift_fsb)
5560 goto update_current_ext;
5563 * grab the left extent and check for a large
5566 adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1);
5567 xfs_bmbt_get_all(adj_irecp, &adj_irec);
5570 adj_irec.br_startoff + adj_irec.br_blockcount)
5573 /* check whether to merge the extent or shift it down */
5574 if (xfs_bmse_can_merge(&adj_irec, &got,
5575 offset_shift_fsb)) {
5576 error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
5577 *current_ext, gotp, adj_irecp,
5585 startoff = got.br_startoff + offset_shift_fsb;
5586 /* nothing to move if this is the last extent */
5587 if (*current_ext >= (total_extents - 1))
5588 goto update_current_ext;
5590 * If this is not the last extent in the file, make sure there
5591 * is enough room between current extent and next extent for
5592 * accommodating the shift.
5594 adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1);
5595 xfs_bmbt_get_all(adj_irecp, &adj_irec);
5596 if (startoff + got.br_blockcount > adj_irec.br_startoff)
5599 * Unlike a left shift (which involves a hole punch),
5600 * a right shift does not modify extent neighbors
5601 * in any way. We should never find mergeable extents
5602 * in this scenario. Check anyways and warn if we
5603 * encounter two extents that could be one.
5605 if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb))
5609 * Increment the extent index for the next iteration, update the start
5610 * offset of the in-core extent and update the btree if applicable.
5613 if (direction == SHIFT_LEFT)
5617 xfs_bmbt_set_startoff(gotp, startoff);
5618 *logflags |= XFS_ILOG_CORE;
5621 *logflags |= XFS_ILOG_DEXT;
5625 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
5626 got.br_blockcount, &i);
5629 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5631 got.br_startoff = startoff;
5632 error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
5633 got.br_blockcount, got.br_state);
5638 /* update reverse mapping */
5639 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &adj_irec);
5642 adj_irec.br_startoff = startoff;
5643 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &adj_irec);
5647 * Shift extent records to the left/right to cover/create a hole.
5649 * The maximum number of extents to be shifted in a single operation is
5650 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
5651 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
5652 * is the length by which each extent is shifted. If there is no hole to shift
5653 * the extents into, this will be considered invalid operation and we abort
5657 xfs_bmap_shift_extents(
5658 struct xfs_trans *tp,
5659 struct xfs_inode *ip,
5660 xfs_fileoff_t *next_fsb,
5661 xfs_fileoff_t offset_shift_fsb,
5663 xfs_fileoff_t stop_fsb,
5664 xfs_fsblock_t *firstblock,
5665 struct xfs_defer_ops *dfops,
5666 enum shift_direction direction,
5669 struct xfs_btree_cur *cur = NULL;
5670 struct xfs_bmbt_rec_host *gotp;
5671 struct xfs_bmbt_irec got;
5672 struct xfs_mount *mp = ip->i_mount;
5673 struct xfs_ifork *ifp;
5674 xfs_extnum_t nexts = 0;
5675 xfs_extnum_t current_ext;
5676 xfs_extnum_t total_extents;
5677 xfs_extnum_t stop_extent;
5679 int whichfork = XFS_DATA_FORK;
5682 if (unlikely(XFS_TEST_ERROR(
5683 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5684 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5685 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
5686 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
5687 XFS_ERRLEVEL_LOW, mp);
5688 return -EFSCORRUPTED;
5691 if (XFS_FORCED_SHUTDOWN(mp))
5694 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5695 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5696 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
5697 ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT);
5699 ifp = XFS_IFORK_PTR(ip, whichfork);
5700 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5701 /* Read in all the extents */
5702 error = xfs_iread_extents(tp, ip, whichfork);
5707 if (ifp->if_flags & XFS_IFBROOT) {
5708 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5709 cur->bc_private.b.firstblock = *firstblock;
5710 cur->bc_private.b.dfops = dfops;
5711 cur->bc_private.b.flags = 0;
5715 * There may be delalloc extents in the data fork before the range we
5716 * are collapsing out, so we cannot use the count of real extents here.
5717 * Instead we have to calculate it from the incore fork.
5719 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5720 if (total_extents == 0) {
5726 * In case of first right shift, we need to initialize next_fsb
5728 if (*next_fsb == NULLFSBLOCK) {
5729 gotp = xfs_iext_get_ext(ifp, total_extents - 1);
5730 xfs_bmbt_get_all(gotp, &got);
5731 *next_fsb = got.br_startoff;
5732 if (stop_fsb > *next_fsb) {
5738 /* Lookup the extent index at which we have to stop */
5739 if (direction == SHIFT_RIGHT) {
5740 gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent);
5741 /* Make stop_extent exclusive of shift range */
5744 stop_extent = total_extents;
5747 * Look up the extent index for the fsb where we start shifting. We can
5748 * henceforth iterate with current_ext as extent list changes are locked
5751 * gotp can be null in 2 cases: 1) if there are no extents or 2)
5752 * *next_fsb lies in a hole beyond which there are no extents. Either
5755 gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, ¤t_ext);
5761 /* some sanity checking before we finally start shifting extents */
5762 if ((direction == SHIFT_LEFT && current_ext >= stop_extent) ||
5763 (direction == SHIFT_RIGHT && current_ext <= stop_extent)) {
5768 while (nexts++ < num_exts) {
5769 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
5770 ¤t_ext, gotp, cur, &logflags,
5775 * If there was an extent merge during the shift, the extent
5776 * count can change. Update the total and grade the next record.
5778 if (direction == SHIFT_LEFT) {
5779 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5780 stop_extent = total_extents;
5783 if (current_ext == stop_extent) {
5785 *next_fsb = NULLFSBLOCK;
5788 gotp = xfs_iext_get_ext(ifp, current_ext);
5792 xfs_bmbt_get_all(gotp, &got);
5793 *next_fsb = got.br_startoff;
5798 xfs_btree_del_cursor(cur,
5799 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5802 xfs_trans_log_inode(tp, ip, logflags);
5808 * Splits an extent into two extents at split_fsb block such that it is
5809 * the first block of the current_ext. @current_ext is a target extent
5810 * to be split. @split_fsb is a block where the extents is split.
5811 * If split_fsb lies in a hole or the first block of extents, just return 0.
5814 xfs_bmap_split_extent_at(
5815 struct xfs_trans *tp,
5816 struct xfs_inode *ip,
5817 xfs_fileoff_t split_fsb,
5818 xfs_fsblock_t *firstfsb,
5819 struct xfs_defer_ops *dfops)
5821 int whichfork = XFS_DATA_FORK;
5822 struct xfs_btree_cur *cur = NULL;
5823 struct xfs_bmbt_rec_host *gotp;
5824 struct xfs_bmbt_irec got;
5825 struct xfs_bmbt_irec new; /* split extent */
5826 struct xfs_mount *mp = ip->i_mount;
5827 struct xfs_ifork *ifp;
5828 xfs_fsblock_t gotblkcnt; /* new block count for got */
5829 xfs_extnum_t current_ext;
5834 if (unlikely(XFS_TEST_ERROR(
5835 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5836 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5837 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
5838 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
5839 XFS_ERRLEVEL_LOW, mp);
5840 return -EFSCORRUPTED;
5843 if (XFS_FORCED_SHUTDOWN(mp))
5846 ifp = XFS_IFORK_PTR(ip, whichfork);
5847 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5848 /* Read in all the extents */
5849 error = xfs_iread_extents(tp, ip, whichfork);
5855 * gotp can be null in 2 cases: 1) if there are no extents
5856 * or 2) split_fsb lies in a hole beyond which there are
5857 * no extents. Either way, we are done.
5859 gotp = xfs_iext_bno_to_ext(ifp, split_fsb, ¤t_ext);
5863 xfs_bmbt_get_all(gotp, &got);
5866 * Check split_fsb lies in a hole or the start boundary offset
5869 if (got.br_startoff >= split_fsb)
5872 gotblkcnt = split_fsb - got.br_startoff;
5873 new.br_startoff = split_fsb;
5874 new.br_startblock = got.br_startblock + gotblkcnt;
5875 new.br_blockcount = got.br_blockcount - gotblkcnt;
5876 new.br_state = got.br_state;
5878 if (ifp->if_flags & XFS_IFBROOT) {
5879 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5880 cur->bc_private.b.firstblock = *firstfsb;
5881 cur->bc_private.b.dfops = dfops;
5882 cur->bc_private.b.flags = 0;
5883 error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
5889 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5892 xfs_bmbt_set_blockcount(gotp, gotblkcnt);
5893 got.br_blockcount = gotblkcnt;
5895 logflags = XFS_ILOG_CORE;
5897 error = xfs_bmbt_update(cur, got.br_startoff,
5904 logflags |= XFS_ILOG_DEXT;
5906 /* Add new extent */
5908 xfs_iext_insert(ip, current_ext, 1, &new, 0);
5909 XFS_IFORK_NEXT_SET(ip, whichfork,
5910 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5913 error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
5914 new.br_startblock, new.br_blockcount,
5918 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
5919 cur->bc_rec.b.br_state = new.br_state;
5921 error = xfs_btree_insert(cur, &i);
5924 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5928 * Convert to a btree if necessary.
5930 if (xfs_bmap_needs_btree(ip, whichfork)) {
5931 int tmp_logflags; /* partial log flag return val */
5933 ASSERT(cur == NULL);
5934 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
5935 &cur, 0, &tmp_logflags, whichfork);
5936 logflags |= tmp_logflags;
5941 cur->bc_private.b.allocated = 0;
5942 xfs_btree_del_cursor(cur,
5943 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5947 xfs_trans_log_inode(tp, ip, logflags);
5952 xfs_bmap_split_extent(
5953 struct xfs_inode *ip,
5954 xfs_fileoff_t split_fsb)
5956 struct xfs_mount *mp = ip->i_mount;
5957 struct xfs_trans *tp;
5958 struct xfs_defer_ops dfops;
5959 xfs_fsblock_t firstfsb;
5962 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
5963 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
5967 xfs_ilock(ip, XFS_ILOCK_EXCL);
5968 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
5970 xfs_defer_init(&dfops, &firstfsb);
5972 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
5977 error = xfs_defer_finish(&tp, &dfops, NULL);
5981 return xfs_trans_commit(tp);
5984 xfs_defer_cancel(&dfops);
5985 xfs_trans_cancel(tp);