2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_alloc.h"
38 #include "xfs_bmap_util.h"
39 #include "xfs_bmap_btree.h"
40 #include "xfs_rtalloc.h"
41 #include "xfs_error.h"
42 #include "xfs_quota.h"
43 #include "xfs_trans_space.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_trace.h"
46 #include "xfs_symlink.h"
47 #include "xfs_attr_leaf.h"
48 #include "xfs_filestream.h"
51 kmem_zone_t *xfs_bmap_free_item_zone;
54 * Miscellaneous helper functions
58 * Compute and fill in the value of the maximum depth of a bmap btree
59 * in this filesystem. Done once, during mount.
62 xfs_bmap_compute_maxlevels(
63 xfs_mount_t *mp, /* file system mount structure */
64 int whichfork) /* data or attr fork */
66 int level; /* btree level */
67 uint maxblocks; /* max blocks at this level */
68 uint maxleafents; /* max leaf entries possible */
69 int maxrootrecs; /* max records in root block */
70 int minleafrecs; /* min records in leaf block */
71 int minnoderecs; /* min records in node block */
72 int sz; /* root block size */
75 * The maximum number of extents in a file, hence the maximum
76 * number of leaf entries, is controlled by the type of di_nextents
77 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
78 * (a signed 16-bit number, xfs_aextnum_t).
80 * Note that we can no longer assume that if we are in ATTR1 that
81 * the fork offset of all the inodes will be
82 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
83 * with ATTR2 and then mounted back with ATTR1, keeping the
84 * di_forkoff's fixed but probably at various positions. Therefore,
85 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
86 * of a minimum size available.
88 if (whichfork == XFS_DATA_FORK) {
89 maxleafents = MAXEXTNUM;
90 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
92 maxleafents = MAXAEXTNUM;
93 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
95 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
96 minleafrecs = mp->m_bmap_dmnr[0];
97 minnoderecs = mp->m_bmap_dmnr[1];
98 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
99 for (level = 1; maxblocks > 1; level++) {
100 if (maxblocks <= maxrootrecs)
103 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
105 mp->m_bm_maxlevels[whichfork] = level;
108 STATIC int /* error */
110 struct xfs_btree_cur *cur,
114 int *stat) /* success/failure */
116 cur->bc_rec.b.br_startoff = off;
117 cur->bc_rec.b.br_startblock = bno;
118 cur->bc_rec.b.br_blockcount = len;
119 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
122 STATIC int /* error */
124 struct xfs_btree_cur *cur,
128 int *stat) /* success/failure */
130 cur->bc_rec.b.br_startoff = off;
131 cur->bc_rec.b.br_startblock = bno;
132 cur->bc_rec.b.br_blockcount = len;
133 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
137 * Check if the inode needs to be converted to btree format.
139 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
141 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
142 XFS_IFORK_NEXTENTS(ip, whichfork) >
143 XFS_IFORK_MAXEXT(ip, whichfork);
147 * Check if the inode should be converted to extent format.
149 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
151 return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
152 XFS_IFORK_NEXTENTS(ip, whichfork) <=
153 XFS_IFORK_MAXEXT(ip, whichfork);
157 * Update the record referred to by cur to the value given
158 * by [off, bno, len, state].
159 * This either works (return 0) or gets an EFSCORRUPTED error.
163 struct xfs_btree_cur *cur,
169 union xfs_btree_rec rec;
171 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
172 return xfs_btree_update(cur, &rec);
176 * Compute the worst-case number of indirect blocks that will be used
177 * for ip's delayed extent of length "len".
180 xfs_bmap_worst_indlen(
181 xfs_inode_t *ip, /* incore inode pointer */
182 xfs_filblks_t len) /* delayed extent length */
184 int level; /* btree level number */
185 int maxrecs; /* maximum record count at this level */
186 xfs_mount_t *mp; /* mount structure */
187 xfs_filblks_t rval; /* return value */
190 maxrecs = mp->m_bmap_dmxr[0];
191 for (level = 0, rval = 0;
192 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
195 do_div(len, maxrecs);
198 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
201 maxrecs = mp->m_bmap_dmxr[1];
207 * Calculate the default attribute fork offset for newly created inodes.
210 xfs_default_attroffset(
211 struct xfs_inode *ip)
213 struct xfs_mount *mp = ip->i_mount;
216 if (mp->m_sb.sb_inodesize == 256) {
217 offset = XFS_LITINO(mp, ip->i_d.di_version) -
218 XFS_BMDR_SPACE_CALC(MINABTPTRS);
220 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
223 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
228 * Helper routine to reset inode di_forkoff field when switching
229 * attribute fork from local to extent format - we reset it where
230 * possible to make space available for inline data fork extents.
233 xfs_bmap_forkoff_reset(
237 if (whichfork == XFS_ATTR_FORK &&
238 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
239 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
240 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
241 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
243 if (dfl_forkoff > ip->i_d.di_forkoff)
244 ip->i_d.di_forkoff = dfl_forkoff;
249 STATIC struct xfs_buf *
251 struct xfs_btree_cur *cur,
254 struct xfs_log_item_desc *lidp;
260 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
261 if (!cur->bc_bufs[i])
263 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
264 return cur->bc_bufs[i];
267 /* Chase down all the log items to see if the bp is there */
268 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
269 struct xfs_buf_log_item *bip;
270 bip = (struct xfs_buf_log_item *)lidp->lid_item;
271 if (bip->bli_item.li_type == XFS_LI_BUF &&
272 XFS_BUF_ADDR(bip->bli_buf) == bno)
281 struct xfs_btree_block *block,
287 __be64 *pp, *thispa; /* pointer to block address */
288 xfs_bmbt_key_t *prevp, *keyp;
290 ASSERT(be16_to_cpu(block->bb_level) > 0);
293 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
294 dmxr = mp->m_bmap_dmxr[0];
295 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
298 ASSERT(be64_to_cpu(prevp->br_startoff) <
299 be64_to_cpu(keyp->br_startoff));
304 * Compare the block numbers to see if there are dups.
307 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
309 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
311 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
313 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
315 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
316 if (*thispa == *pp) {
317 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
319 (unsigned long long)be64_to_cpu(*thispa));
320 panic("%s: ptrs are equal in node\n",
328 * Check that the extents for the inode ip are in the right order in all
329 * btree leaves. THis becomes prohibitively expensive for large extent count
330 * files, so don't bother with inodes that have more than 10,000 extents in
331 * them. The btree record ordering checks will still be done, so for such large
332 * bmapbt constructs that is going to catch most corruptions.
335 xfs_bmap_check_leaf_extents(
336 xfs_btree_cur_t *cur, /* btree cursor or null */
337 xfs_inode_t *ip, /* incore inode pointer */
338 int whichfork) /* data or attr fork */
340 struct xfs_btree_block *block; /* current btree block */
341 xfs_fsblock_t bno; /* block # of "block" */
342 xfs_buf_t *bp; /* buffer for "block" */
343 int error; /* error return value */
344 xfs_extnum_t i=0, j; /* index into the extents list */
345 xfs_ifork_t *ifp; /* fork structure */
346 int level; /* btree level, for checking */
347 xfs_mount_t *mp; /* file system mount structure */
348 __be64 *pp; /* pointer to block address */
349 xfs_bmbt_rec_t *ep; /* pointer to current extent */
350 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
351 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
354 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
358 /* skip large extent count inodes */
359 if (ip->i_d.di_nextents > 10000)
364 ifp = XFS_IFORK_PTR(ip, whichfork);
365 block = ifp->if_broot;
367 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
369 level = be16_to_cpu(block->bb_level);
371 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
372 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
373 bno = be64_to_cpu(*pp);
375 ASSERT(bno != NULLFSBLOCK);
376 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
377 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
380 * Go down the tree until leaf level is reached, following the first
381 * pointer (leftmost) at each level.
383 while (level-- > 0) {
384 /* See if buf is in cur first */
386 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
389 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
395 block = XFS_BUF_TO_BLOCK(bp);
400 * Check this block for basic sanity (increasing keys and
401 * no duplicate blocks).
404 xfs_check_block(block, mp, 0, 0);
405 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
406 bno = be64_to_cpu(*pp);
407 XFS_WANT_CORRUPTED_GOTO(mp,
408 XFS_FSB_SANITY_CHECK(mp, bno), error0);
411 xfs_trans_brelse(NULL, bp);
416 * Here with bp and block set to the leftmost leaf node in the tree.
421 * Loop over all leaf nodes checking that all extents are in the right order.
424 xfs_fsblock_t nextbno;
425 xfs_extnum_t num_recs;
428 num_recs = xfs_btree_get_numrecs(block);
431 * Read-ahead the next leaf block, if any.
434 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
437 * Check all the extents to make sure they are OK.
438 * If we had a previous block, the last entry should
439 * conform with the first entry in this one.
442 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
444 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
445 xfs_bmbt_disk_get_blockcount(&last) <=
446 xfs_bmbt_disk_get_startoff(ep));
448 for (j = 1; j < num_recs; j++) {
449 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
450 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
451 xfs_bmbt_disk_get_blockcount(ep) <=
452 xfs_bmbt_disk_get_startoff(nextp));
460 xfs_trans_brelse(NULL, bp);
464 * If we've reached the end, stop.
466 if (bno == NULLFSBLOCK)
470 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
473 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
479 block = XFS_BUF_TO_BLOCK(bp);
485 xfs_warn(mp, "%s: at error0", __func__);
487 xfs_trans_brelse(NULL, bp);
489 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
491 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
496 * Add bmap trace insert entries for all the contents of the extent records.
499 xfs_bmap_trace_exlist(
500 xfs_inode_t *ip, /* incore inode pointer */
501 xfs_extnum_t cnt, /* count of entries in the list */
502 int whichfork, /* data or attr fork */
503 unsigned long caller_ip)
505 xfs_extnum_t idx; /* extent record index */
506 xfs_ifork_t *ifp; /* inode fork pointer */
509 if (whichfork == XFS_ATTR_FORK)
510 state |= BMAP_ATTRFORK;
512 ifp = XFS_IFORK_PTR(ip, whichfork);
513 ASSERT(cnt == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
514 for (idx = 0; idx < cnt; idx++)
515 trace_xfs_extlist(ip, idx, whichfork, caller_ip);
519 * Validate that the bmbt_irecs being returned from bmapi are valid
520 * given the caller's original parameters. Specifically check the
521 * ranges of the returned irecs to ensure that they only extend beyond
522 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
525 xfs_bmap_validate_ret(
529 xfs_bmbt_irec_t *mval,
533 int i; /* index to map values */
535 ASSERT(ret_nmap <= nmap);
537 for (i = 0; i < ret_nmap; i++) {
538 ASSERT(mval[i].br_blockcount > 0);
539 if (!(flags & XFS_BMAPI_ENTIRE)) {
540 ASSERT(mval[i].br_startoff >= bno);
541 ASSERT(mval[i].br_blockcount <= len);
542 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
545 ASSERT(mval[i].br_startoff < bno + len);
546 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
550 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
551 mval[i].br_startoff);
552 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
553 mval[i].br_startblock != HOLESTARTBLOCK);
554 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
555 mval[i].br_state == XFS_EXT_UNWRITTEN);
560 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
561 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
565 * bmap free list manipulation functions
569 * Add the extent to the list of extents to be free at transaction end.
570 * The list is maintained sorted (by block number).
574 struct xfs_mount *mp, /* mount point structure */
575 struct xfs_bmap_free *flist, /* list of extents */
576 xfs_fsblock_t bno, /* fs block number of extent */
577 xfs_filblks_t len) /* length of extent */
579 struct xfs_bmap_free_item *new; /* new element */
584 ASSERT(bno != NULLFSBLOCK);
586 ASSERT(len <= MAXEXTLEN);
587 ASSERT(!isnullstartblock(bno));
588 agno = XFS_FSB_TO_AGNO(mp, bno);
589 agbno = XFS_FSB_TO_AGBNO(mp, bno);
590 ASSERT(agno < mp->m_sb.sb_agcount);
591 ASSERT(agbno < mp->m_sb.sb_agblocks);
592 ASSERT(len < mp->m_sb.sb_agblocks);
593 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
595 ASSERT(xfs_bmap_free_item_zone != NULL);
596 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
597 new->xbfi_startblock = bno;
598 new->xbfi_blockcount = (xfs_extlen_t)len;
599 xfs_defer_add(flist, XFS_DEFER_OPS_TYPE_FREE, &new->xbfi_list);
603 * Inode fork format manipulation functions
607 * Transform a btree format file with only one leaf node, where the
608 * extents list will fit in the inode, into an extents format file.
609 * Since the file extents are already in-core, all we have to do is
610 * give up the space for the btree root and pitch the leaf block.
612 STATIC int /* error */
613 xfs_bmap_btree_to_extents(
614 xfs_trans_t *tp, /* transaction pointer */
615 xfs_inode_t *ip, /* incore inode pointer */
616 xfs_btree_cur_t *cur, /* btree cursor */
617 int *logflagsp, /* inode logging flags */
618 int whichfork) /* data or attr fork */
621 struct xfs_btree_block *cblock;/* child btree block */
622 xfs_fsblock_t cbno; /* child block number */
623 xfs_buf_t *cbp; /* child block's buffer */
624 int error; /* error return value */
625 xfs_ifork_t *ifp; /* inode fork data */
626 xfs_mount_t *mp; /* mount point structure */
627 __be64 *pp; /* ptr to block address */
628 struct xfs_btree_block *rblock;/* root btree block */
631 ifp = XFS_IFORK_PTR(ip, whichfork);
632 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
633 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
634 rblock = ifp->if_broot;
635 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
636 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
637 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
638 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
639 cbno = be64_to_cpu(*pp);
642 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
645 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
649 cblock = XFS_BUF_TO_BLOCK(cbp);
650 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
652 xfs_bmap_add_free(mp, cur->bc_private.b.flist, cbno, 1);
653 ip->i_d.di_nblocks--;
654 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
655 xfs_trans_binval(tp, cbp);
656 if (cur->bc_bufs[0] == cbp)
657 cur->bc_bufs[0] = NULL;
658 xfs_iroot_realloc(ip, -1, whichfork);
659 ASSERT(ifp->if_broot == NULL);
660 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
661 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
662 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
667 * Convert an extents-format file into a btree-format file.
668 * The new file will have a root block (in the inode) and a single child block.
670 STATIC int /* error */
671 xfs_bmap_extents_to_btree(
672 xfs_trans_t *tp, /* transaction pointer */
673 xfs_inode_t *ip, /* incore inode pointer */
674 xfs_fsblock_t *firstblock, /* first-block-allocated */
675 xfs_bmap_free_t *flist, /* blocks freed in xaction */
676 xfs_btree_cur_t **curp, /* cursor returned to caller */
677 int wasdel, /* converting a delayed alloc */
678 int *logflagsp, /* inode logging flags */
679 int whichfork) /* data or attr fork */
681 struct xfs_btree_block *ablock; /* allocated (child) bt block */
682 xfs_buf_t *abp; /* buffer for ablock */
683 xfs_alloc_arg_t args; /* allocation arguments */
684 xfs_bmbt_rec_t *arp; /* child record pointer */
685 struct xfs_btree_block *block; /* btree root block */
686 xfs_btree_cur_t *cur; /* bmap btree cursor */
687 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
688 int error; /* error return value */
689 xfs_extnum_t i, cnt; /* extent record index */
690 xfs_ifork_t *ifp; /* inode fork pointer */
691 xfs_bmbt_key_t *kp; /* root block key pointer */
692 xfs_mount_t *mp; /* mount structure */
693 xfs_extnum_t nextents; /* number of file extents */
694 xfs_bmbt_ptr_t *pp; /* root block address pointer */
697 ifp = XFS_IFORK_PTR(ip, whichfork);
698 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
701 * Make space in the inode incore.
703 xfs_iroot_realloc(ip, 1, whichfork);
704 ifp->if_flags |= XFS_IFBROOT;
709 block = ifp->if_broot;
710 if (xfs_sb_version_hascrc(&mp->m_sb))
711 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
712 XFS_BMAP_CRC_MAGIC, 1, 1, ip->i_ino,
713 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
715 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
716 XFS_BMAP_MAGIC, 1, 1, ip->i_ino,
717 XFS_BTREE_LONG_PTRS);
720 * Need a cursor. Can't allocate until bb_level is filled in.
722 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
723 cur->bc_private.b.firstblock = *firstblock;
724 cur->bc_private.b.flist = flist;
725 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
727 * Convert to a btree with two levels, one record in root.
729 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
730 memset(&args, 0, sizeof(args));
733 args.firstblock = *firstblock;
734 if (*firstblock == NULLFSBLOCK) {
735 args.type = XFS_ALLOCTYPE_START_BNO;
736 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
737 } else if (flist->dop_low) {
738 args.type = XFS_ALLOCTYPE_START_BNO;
739 args.fsbno = *firstblock;
741 args.type = XFS_ALLOCTYPE_NEAR_BNO;
742 args.fsbno = *firstblock;
744 args.minlen = args.maxlen = args.prod = 1;
745 args.wasdel = wasdel;
747 if ((error = xfs_alloc_vextent(&args))) {
748 xfs_iroot_realloc(ip, -1, whichfork);
749 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
753 * Allocation can't fail, the space was reserved.
755 ASSERT(args.fsbno != NULLFSBLOCK);
756 ASSERT(*firstblock == NULLFSBLOCK ||
757 args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
759 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
760 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
761 cur->bc_private.b.allocated++;
762 ip->i_d.di_nblocks++;
763 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
764 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
766 * Fill in the child block.
768 abp->b_ops = &xfs_bmbt_buf_ops;
769 ablock = XFS_BUF_TO_BLOCK(abp);
770 if (xfs_sb_version_hascrc(&mp->m_sb))
771 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
772 XFS_BMAP_CRC_MAGIC, 0, 0, ip->i_ino,
773 XFS_BTREE_LONG_PTRS | XFS_BTREE_CRC_BLOCKS);
775 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
776 XFS_BMAP_MAGIC, 0, 0, ip->i_ino,
777 XFS_BTREE_LONG_PTRS);
779 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
780 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
781 for (cnt = i = 0; i < nextents; i++) {
782 ep = xfs_iext_get_ext(ifp, i);
783 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
784 arp->l0 = cpu_to_be64(ep->l0);
785 arp->l1 = cpu_to_be64(ep->l1);
789 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
790 xfs_btree_set_numrecs(ablock, cnt);
793 * Fill in the root key and pointer.
795 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
796 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
797 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
798 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
799 be16_to_cpu(block->bb_level)));
800 *pp = cpu_to_be64(args.fsbno);
803 * Do all this logging at the end so that
804 * the root is at the right level.
806 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
807 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
808 ASSERT(*curp == NULL);
810 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
815 * Convert a local file to an extents file.
816 * This code is out of bounds for data forks of regular files,
817 * since the file data needs to get logged so things will stay consistent.
818 * (The bmap-level manipulations are ok, though).
821 xfs_bmap_local_to_extents_empty(
822 struct xfs_inode *ip,
825 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
827 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
828 ASSERT(ifp->if_bytes == 0);
829 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
831 xfs_bmap_forkoff_reset(ip, whichfork);
832 ifp->if_flags &= ~XFS_IFINLINE;
833 ifp->if_flags |= XFS_IFEXTENTS;
834 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
838 STATIC int /* error */
839 xfs_bmap_local_to_extents(
840 xfs_trans_t *tp, /* transaction pointer */
841 xfs_inode_t *ip, /* incore inode pointer */
842 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
843 xfs_extlen_t total, /* total blocks needed by transaction */
844 int *logflagsp, /* inode logging flags */
846 void (*init_fn)(struct xfs_trans *tp,
848 struct xfs_inode *ip,
849 struct xfs_ifork *ifp))
852 int flags; /* logging flags returned */
853 xfs_ifork_t *ifp; /* inode fork pointer */
854 xfs_alloc_arg_t args; /* allocation arguments */
855 xfs_buf_t *bp; /* buffer for extent block */
856 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
859 * We don't want to deal with the case of keeping inode data inline yet.
860 * So sending the data fork of a regular inode is invalid.
862 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
863 ifp = XFS_IFORK_PTR(ip, whichfork);
864 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
866 if (!ifp->if_bytes) {
867 xfs_bmap_local_to_extents_empty(ip, whichfork);
868 flags = XFS_ILOG_CORE;
874 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
876 memset(&args, 0, sizeof(args));
878 args.mp = ip->i_mount;
879 args.firstblock = *firstblock;
881 * Allocate a block. We know we need only one, since the
882 * file currently fits in an inode.
884 if (*firstblock == NULLFSBLOCK) {
885 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
886 args.type = XFS_ALLOCTYPE_START_BNO;
888 args.fsbno = *firstblock;
889 args.type = XFS_ALLOCTYPE_NEAR_BNO;
892 args.minlen = args.maxlen = args.prod = 1;
893 error = xfs_alloc_vextent(&args);
897 /* Can't fail, the space was reserved. */
898 ASSERT(args.fsbno != NULLFSBLOCK);
899 ASSERT(args.len == 1);
900 *firstblock = args.fsbno;
901 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
904 * Initialize the block, copy the data and log the remote buffer.
906 * The callout is responsible for logging because the remote format
907 * might differ from the local format and thus we don't know how much to
908 * log here. Note that init_fn must also set the buffer log item type
911 init_fn(tp, bp, ip, ifp);
913 /* account for the change in fork size */
914 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
915 xfs_bmap_local_to_extents_empty(ip, whichfork);
916 flags |= XFS_ILOG_CORE;
918 xfs_iext_add(ifp, 0, 1);
919 ep = xfs_iext_get_ext(ifp, 0);
920 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
921 trace_xfs_bmap_post_update(ip, 0,
922 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
924 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
925 ip->i_d.di_nblocks = 1;
926 xfs_trans_mod_dquot_byino(tp, ip,
927 XFS_TRANS_DQ_BCOUNT, 1L);
928 flags |= xfs_ilog_fext(whichfork);
936 * Called from xfs_bmap_add_attrfork to handle btree format files.
938 STATIC int /* error */
939 xfs_bmap_add_attrfork_btree(
940 xfs_trans_t *tp, /* transaction pointer */
941 xfs_inode_t *ip, /* incore inode pointer */
942 xfs_fsblock_t *firstblock, /* first block allocated */
943 xfs_bmap_free_t *flist, /* blocks to free at commit */
944 int *flags) /* inode logging flags */
946 xfs_btree_cur_t *cur; /* btree cursor */
947 int error; /* error return value */
948 xfs_mount_t *mp; /* file system mount struct */
949 int stat; /* newroot status */
952 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
953 *flags |= XFS_ILOG_DBROOT;
955 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
956 cur->bc_private.b.flist = flist;
957 cur->bc_private.b.firstblock = *firstblock;
958 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
960 /* must be at least one entry */
961 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
962 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
965 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
968 *firstblock = cur->bc_private.b.firstblock;
969 cur->bc_private.b.allocated = 0;
970 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
974 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
979 * Called from xfs_bmap_add_attrfork to handle extents format files.
981 STATIC int /* error */
982 xfs_bmap_add_attrfork_extents(
983 xfs_trans_t *tp, /* transaction pointer */
984 xfs_inode_t *ip, /* incore inode pointer */
985 xfs_fsblock_t *firstblock, /* first block allocated */
986 xfs_bmap_free_t *flist, /* blocks to free at commit */
987 int *flags) /* inode logging flags */
989 xfs_btree_cur_t *cur; /* bmap btree cursor */
990 int error; /* error return value */
992 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
995 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
996 flags, XFS_DATA_FORK);
998 cur->bc_private.b.allocated = 0;
999 xfs_btree_del_cursor(cur,
1000 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
1006 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1007 * different data fork content type needs a different callout to do the
1008 * conversion. Some are basic and only require special block initialisation
1009 * callouts for the data formating, others (directories) are so specialised they
1010 * handle everything themselves.
1012 * XXX (dgc): investigate whether directory conversion can use the generic
1013 * formatting callout. It should be possible - it's just a very complex
1016 STATIC int /* error */
1017 xfs_bmap_add_attrfork_local(
1018 xfs_trans_t *tp, /* transaction pointer */
1019 xfs_inode_t *ip, /* incore inode pointer */
1020 xfs_fsblock_t *firstblock, /* first block allocated */
1021 xfs_bmap_free_t *flist, /* blocks to free at commit */
1022 int *flags) /* inode logging flags */
1024 xfs_da_args_t dargs; /* args for dir/attr code */
1026 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1029 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1030 memset(&dargs, 0, sizeof(dargs));
1031 dargs.geo = ip->i_mount->m_dir_geo;
1033 dargs.firstblock = firstblock;
1034 dargs.flist = flist;
1035 dargs.total = dargs.geo->fsbcount;
1036 dargs.whichfork = XFS_DATA_FORK;
1038 return xfs_dir2_sf_to_block(&dargs);
1041 if (S_ISLNK(VFS_I(ip)->i_mode))
1042 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1043 flags, XFS_DATA_FORK,
1044 xfs_symlink_local_to_remote);
1046 /* should only be called for types that support local format data */
1048 return -EFSCORRUPTED;
1052 * Convert inode from non-attributed to attributed.
1053 * Must not be in a transaction, ip must not be locked.
1055 int /* error code */
1056 xfs_bmap_add_attrfork(
1057 xfs_inode_t *ip, /* incore inode pointer */
1058 int size, /* space new attribute needs */
1059 int rsvd) /* xact may use reserved blks */
1061 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1062 xfs_bmap_free_t flist; /* freed extent records */
1063 xfs_mount_t *mp; /* mount structure */
1064 xfs_trans_t *tp; /* transaction pointer */
1065 int blks; /* space reservation */
1066 int version = 1; /* superblock attr version */
1067 int logflags; /* logging flags */
1068 int error; /* error return value */
1070 ASSERT(XFS_IFORK_Q(ip) == 0);
1073 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1075 blks = XFS_ADDAFORK_SPACE_RES(mp);
1077 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1078 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1082 xfs_ilock(ip, XFS_ILOCK_EXCL);
1083 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1084 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1085 XFS_QMOPT_RES_REGBLKS);
1088 if (XFS_IFORK_Q(ip))
1090 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1092 * For inodes coming from pre-6.2 filesystems.
1094 ASSERT(ip->i_d.di_aformat == 0);
1095 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1097 ASSERT(ip->i_d.di_anextents == 0);
1099 xfs_trans_ijoin(tp, ip, 0);
1100 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1102 switch (ip->i_d.di_format) {
1103 case XFS_DINODE_FMT_DEV:
1104 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1106 case XFS_DINODE_FMT_UUID:
1107 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
1109 case XFS_DINODE_FMT_LOCAL:
1110 case XFS_DINODE_FMT_EXTENTS:
1111 case XFS_DINODE_FMT_BTREE:
1112 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1113 if (!ip->i_d.di_forkoff)
1114 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1115 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1124 ASSERT(ip->i_afp == NULL);
1125 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1126 ip->i_afp->if_flags = XFS_IFEXTENTS;
1128 xfs_bmap_init(&flist, &firstblock);
1129 switch (ip->i_d.di_format) {
1130 case XFS_DINODE_FMT_LOCAL:
1131 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
1134 case XFS_DINODE_FMT_EXTENTS:
1135 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1138 case XFS_DINODE_FMT_BTREE:
1139 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
1147 xfs_trans_log_inode(tp, ip, logflags);
1150 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1151 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1152 bool log_sb = false;
1154 spin_lock(&mp->m_sb_lock);
1155 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1156 xfs_sb_version_addattr(&mp->m_sb);
1159 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1160 xfs_sb_version_addattr2(&mp->m_sb);
1163 spin_unlock(&mp->m_sb_lock);
1168 error = xfs_bmap_finish(&tp, &flist, NULL);
1171 error = xfs_trans_commit(tp);
1172 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1176 xfs_bmap_cancel(&flist);
1178 xfs_trans_cancel(tp);
1179 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1184 * Internal and external extent tree search functions.
1188 * Read in the extents to if_extents.
1189 * All inode fields are set up by caller, we just traverse the btree
1190 * and copy the records in. If the file system cannot contain unwritten
1191 * extents, the records are checked for no "state" flags.
1194 xfs_bmap_read_extents(
1195 xfs_trans_t *tp, /* transaction pointer */
1196 xfs_inode_t *ip, /* incore inode */
1197 int whichfork) /* data or attr fork */
1199 struct xfs_btree_block *block; /* current btree block */
1200 xfs_fsblock_t bno; /* block # of "block" */
1201 xfs_buf_t *bp; /* buffer for "block" */
1202 int error; /* error return value */
1203 xfs_exntfmt_t exntf; /* XFS_EXTFMT_NOSTATE, if checking */
1204 xfs_extnum_t i, j; /* index into the extents list */
1205 xfs_ifork_t *ifp; /* fork structure */
1206 int level; /* btree level, for checking */
1207 xfs_mount_t *mp; /* file system mount structure */
1208 __be64 *pp; /* pointer to block address */
1210 xfs_extnum_t room; /* number of entries there's room for */
1214 ifp = XFS_IFORK_PTR(ip, whichfork);
1215 exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
1216 XFS_EXTFMT_INODE(ip);
1217 block = ifp->if_broot;
1219 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1221 level = be16_to_cpu(block->bb_level);
1223 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1224 bno = be64_to_cpu(*pp);
1225 ASSERT(bno != NULLFSBLOCK);
1226 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
1227 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
1229 * Go down the tree until leaf level is reached, following the first
1230 * pointer (leftmost) at each level.
1232 while (level-- > 0) {
1233 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1234 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1237 block = XFS_BUF_TO_BLOCK(bp);
1240 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1241 bno = be64_to_cpu(*pp);
1242 XFS_WANT_CORRUPTED_GOTO(mp,
1243 XFS_FSB_SANITY_CHECK(mp, bno), error0);
1244 xfs_trans_brelse(tp, bp);
1247 * Here with bp and block set to the leftmost leaf node in the tree.
1249 room = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1252 * Loop over all leaf nodes. Copy information to the extent records.
1255 xfs_bmbt_rec_t *frp;
1256 xfs_fsblock_t nextbno;
1257 xfs_extnum_t num_recs;
1260 num_recs = xfs_btree_get_numrecs(block);
1261 if (unlikely(i + num_recs > room)) {
1262 ASSERT(i + num_recs <= room);
1263 xfs_warn(ip->i_mount,
1264 "corrupt dinode %Lu, (btree extents).",
1265 (unsigned long long) ip->i_ino);
1266 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1267 XFS_ERRLEVEL_LOW, ip->i_mount, block);
1271 * Read-ahead the next leaf block, if any.
1273 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1274 if (nextbno != NULLFSBLOCK)
1275 xfs_btree_reada_bufl(mp, nextbno, 1,
1278 * Copy records into the extent records.
1280 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1282 for (j = 0; j < num_recs; j++, i++, frp++) {
1283 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
1284 trp->l0 = be64_to_cpu(frp->l0);
1285 trp->l1 = be64_to_cpu(frp->l1);
1287 if (exntf == XFS_EXTFMT_NOSTATE) {
1289 * Check all attribute bmap btree records and
1290 * any "older" data bmap btree records for a
1291 * set bit in the "extent flag" position.
1293 if (unlikely(xfs_check_nostate_extents(ifp,
1294 start, num_recs))) {
1295 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1301 xfs_trans_brelse(tp, bp);
1304 * If we've reached the end, stop.
1306 if (bno == NULLFSBLOCK)
1308 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1309 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1312 block = XFS_BUF_TO_BLOCK(bp);
1314 ASSERT(i == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)));
1315 ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
1316 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
1319 xfs_trans_brelse(tp, bp);
1320 return -EFSCORRUPTED;
1325 * Search the extent records for the entry containing block bno.
1326 * If bno lies in a hole, point to the next entry. If bno lies
1327 * past eof, *eofp will be set, and *prevp will contain the last
1328 * entry (null if none). Else, *lastxp will be set to the index
1329 * of the found entry; *gotp will contain the entry.
1331 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
1332 xfs_bmap_search_multi_extents(
1333 xfs_ifork_t *ifp, /* inode fork pointer */
1334 xfs_fileoff_t bno, /* block number searched for */
1335 int *eofp, /* out: end of file found */
1336 xfs_extnum_t *lastxp, /* out: last extent index */
1337 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1338 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1340 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1341 xfs_extnum_t lastx; /* last extent index */
1344 * Initialize the extent entry structure to catch access to
1345 * uninitialized br_startblock field.
1347 gotp->br_startoff = 0xffa5a5a5a5a5a5a5LL;
1348 gotp->br_blockcount = 0xa55a5a5a5a5a5a5aLL;
1349 gotp->br_state = XFS_EXT_INVALID;
1350 gotp->br_startblock = 0xffffa5a5a5a5a5a5LL;
1351 prevp->br_startoff = NULLFILEOFF;
1353 ep = xfs_iext_bno_to_ext(ifp, bno, &lastx);
1355 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx - 1), prevp);
1357 if (lastx < (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
1358 xfs_bmbt_get_all(ep, gotp);
1372 * Search the extents list for the inode, for the extent containing bno.
1373 * If bno lies in a hole, point to the next entry. If bno lies past eof,
1374 * *eofp will be set, and *prevp will contain the last entry (null if none).
1375 * Else, *lastxp will be set to the index of the found
1376 * entry; *gotp will contain the entry.
1378 STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
1379 xfs_bmap_search_extents(
1380 xfs_inode_t *ip, /* incore inode pointer */
1381 xfs_fileoff_t bno, /* block number searched for */
1382 int fork, /* data or attr fork */
1383 int *eofp, /* out: end of file found */
1384 xfs_extnum_t *lastxp, /* out: last extent index */
1385 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
1386 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
1388 xfs_ifork_t *ifp; /* inode fork pointer */
1389 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
1391 XFS_STATS_INC(ip->i_mount, xs_look_exlist);
1392 ifp = XFS_IFORK_PTR(ip, fork);
1394 ep = xfs_bmap_search_multi_extents(ifp, bno, eofp, lastxp, gotp, prevp);
1396 if (unlikely(!(gotp->br_startblock) && (*lastxp != NULLEXTNUM) &&
1397 !(XFS_IS_REALTIME_INODE(ip) && fork == XFS_DATA_FORK))) {
1398 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
1399 "Access to block zero in inode %llu "
1400 "start_block: %llx start_off: %llx "
1401 "blkcnt: %llx extent-state: %x lastx: %x",
1402 (unsigned long long)ip->i_ino,
1403 (unsigned long long)gotp->br_startblock,
1404 (unsigned long long)gotp->br_startoff,
1405 (unsigned long long)gotp->br_blockcount,
1406 gotp->br_state, *lastxp);
1407 *lastxp = NULLEXTNUM;
1415 * Returns the file-relative block number of the first unused block(s)
1416 * in the file with at least "len" logically contiguous blocks free.
1417 * This is the lowest-address hole if the file has holes, else the first block
1418 * past the end of file.
1419 * Return 0 if the file is currently local (in-inode).
1422 xfs_bmap_first_unused(
1423 xfs_trans_t *tp, /* transaction pointer */
1424 xfs_inode_t *ip, /* incore inode */
1425 xfs_extlen_t len, /* size of hole to find */
1426 xfs_fileoff_t *first_unused, /* unused block */
1427 int whichfork) /* data or attr fork */
1429 int error; /* error return value */
1430 int idx; /* extent record index */
1431 xfs_ifork_t *ifp; /* inode fork pointer */
1432 xfs_fileoff_t lastaddr; /* last block number seen */
1433 xfs_fileoff_t lowest; /* lowest useful block */
1434 xfs_fileoff_t max; /* starting useful block */
1435 xfs_fileoff_t off; /* offset for this block */
1436 xfs_extnum_t nextents; /* number of extent entries */
1438 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1439 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1440 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1441 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1445 ifp = XFS_IFORK_PTR(ip, whichfork);
1446 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1447 (error = xfs_iread_extents(tp, ip, whichfork)))
1449 lowest = *first_unused;
1450 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1451 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
1452 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx);
1453 off = xfs_bmbt_get_startoff(ep);
1455 * See if the hole before this extent will work.
1457 if (off >= lowest + len && off - max >= len) {
1458 *first_unused = max;
1461 lastaddr = off + xfs_bmbt_get_blockcount(ep);
1462 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1464 *first_unused = max;
1469 * Returns the file-relative block number of the last block - 1 before
1470 * last_block (input value) in the file.
1471 * This is not based on i_size, it is based on the extent records.
1472 * Returns 0 for local files, as they do not have extent records.
1475 xfs_bmap_last_before(
1476 xfs_trans_t *tp, /* transaction pointer */
1477 xfs_inode_t *ip, /* incore inode */
1478 xfs_fileoff_t *last_block, /* last block */
1479 int whichfork) /* data or attr fork */
1481 xfs_fileoff_t bno; /* input file offset */
1482 int eof; /* hit end of file */
1483 xfs_bmbt_rec_host_t *ep; /* pointer to last extent */
1484 int error; /* error return value */
1485 xfs_bmbt_irec_t got; /* current extent value */
1486 xfs_ifork_t *ifp; /* inode fork pointer */
1487 xfs_extnum_t lastx; /* last extent used */
1488 xfs_bmbt_irec_t prev; /* previous extent value */
1490 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1491 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
1492 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
1494 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1498 ifp = XFS_IFORK_PTR(ip, whichfork);
1499 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1500 (error = xfs_iread_extents(tp, ip, whichfork)))
1502 bno = *last_block - 1;
1503 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
1505 if (eof || xfs_bmbt_get_startoff(ep) > bno) {
1506 if (prev.br_startoff == NULLFILEOFF)
1509 *last_block = prev.br_startoff + prev.br_blockcount;
1512 * Otherwise *last_block is already the right answer.
1518 xfs_bmap_last_extent(
1519 struct xfs_trans *tp,
1520 struct xfs_inode *ip,
1522 struct xfs_bmbt_irec *rec,
1525 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1529 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1530 error = xfs_iread_extents(tp, ip, whichfork);
1535 nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
1536 if (nextents == 0) {
1541 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
1547 * Check the last inode extent to determine whether this allocation will result
1548 * in blocks being allocated at the end of the file. When we allocate new data
1549 * blocks at the end of the file which do not start at the previous data block,
1550 * we will try to align the new blocks at stripe unit boundaries.
1552 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1553 * at, or past the EOF.
1557 struct xfs_bmalloca *bma,
1560 struct xfs_bmbt_irec rec;
1565 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1576 * Check if we are allocation or past the last extent, or at least into
1577 * the last delayed allocated extent.
1579 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1580 (bma->offset >= rec.br_startoff &&
1581 isnullstartblock(rec.br_startblock));
1586 * Returns the file-relative block number of the first block past eof in
1587 * the file. This is not based on i_size, it is based on the extent records.
1588 * Returns 0 for local files, as they do not have extent records.
1591 xfs_bmap_last_offset(
1592 struct xfs_inode *ip,
1593 xfs_fileoff_t *last_block,
1596 struct xfs_bmbt_irec rec;
1602 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1605 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1606 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1609 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1610 if (error || is_empty)
1613 *last_block = rec.br_startoff + rec.br_blockcount;
1618 * Returns whether the selected fork of the inode has exactly one
1619 * block or not. For the data fork we check this matches di_size,
1620 * implying the file's range is 0..bsize-1.
1622 int /* 1=>1 block, 0=>otherwise */
1624 xfs_inode_t *ip, /* incore inode */
1625 int whichfork) /* data or attr fork */
1627 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
1628 xfs_ifork_t *ifp; /* inode fork pointer */
1629 int rval; /* return value */
1630 xfs_bmbt_irec_t s; /* internal version of extent */
1633 if (whichfork == XFS_DATA_FORK)
1634 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1636 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1638 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1640 ifp = XFS_IFORK_PTR(ip, whichfork);
1641 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1642 ep = xfs_iext_get_ext(ifp, 0);
1643 xfs_bmbt_get_all(ep, &s);
1644 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1645 if (rval && whichfork == XFS_DATA_FORK)
1646 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1651 * Extent tree manipulation functions used during allocation.
1655 * Convert a delayed allocation to a real allocation.
1657 STATIC int /* error */
1658 xfs_bmap_add_extent_delay_real(
1659 struct xfs_bmalloca *bma)
1661 struct xfs_bmbt_irec *new = &bma->got;
1662 int diff; /* temp value */
1663 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
1664 int error; /* error return value */
1665 int i; /* temp state */
1666 xfs_ifork_t *ifp; /* inode fork pointer */
1667 xfs_fileoff_t new_endoff; /* end offset of new entry */
1668 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1669 /* left is 0, right is 1, prev is 2 */
1670 int rval=0; /* return value (logging flags) */
1671 int state = 0;/* state bits, accessed thru macros */
1672 xfs_filblks_t da_new; /* new count del alloc blocks used */
1673 xfs_filblks_t da_old; /* old count del alloc blocks used */
1674 xfs_filblks_t temp=0; /* value for da_new calculations */
1675 xfs_filblks_t temp2=0;/* value for da_new calculations */
1676 int tmp_rval; /* partial logging flags */
1677 int whichfork = XFS_DATA_FORK;
1678 struct xfs_mount *mp;
1680 mp = bma->ip->i_mount;
1681 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1683 ASSERT(bma->idx >= 0);
1684 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
1685 ASSERT(!isnullstartblock(new->br_startblock));
1687 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1689 XFS_STATS_INC(mp, xs_add_exlist);
1696 * Set up a bunch of variables to make the tests simpler.
1698 ep = xfs_iext_get_ext(ifp, bma->idx);
1699 xfs_bmbt_get_all(ep, &PREV);
1700 new_endoff = new->br_startoff + new->br_blockcount;
1701 ASSERT(PREV.br_startoff <= new->br_startoff);
1702 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1704 da_old = startblockval(PREV.br_startblock);
1708 * Set flags determining what part of the previous delayed allocation
1709 * extent is being replaced by a real allocation.
1711 if (PREV.br_startoff == new->br_startoff)
1712 state |= BMAP_LEFT_FILLING;
1713 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1714 state |= BMAP_RIGHT_FILLING;
1717 * Check and set flags if this segment has a left neighbor.
1718 * Don't set contiguous if the combined extent would be too large.
1721 state |= BMAP_LEFT_VALID;
1722 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
1724 if (isnullstartblock(LEFT.br_startblock))
1725 state |= BMAP_LEFT_DELAY;
1728 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1729 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1730 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1731 LEFT.br_state == new->br_state &&
1732 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1733 state |= BMAP_LEFT_CONTIG;
1736 * Check and set flags if this segment has a right neighbor.
1737 * Don't set contiguous if the combined extent would be too large.
1738 * Also check for all-three-contiguous being too large.
1740 if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1741 state |= BMAP_RIGHT_VALID;
1742 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
1744 if (isnullstartblock(RIGHT.br_startblock))
1745 state |= BMAP_RIGHT_DELAY;
1748 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1749 new_endoff == RIGHT.br_startoff &&
1750 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1751 new->br_state == RIGHT.br_state &&
1752 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1753 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1754 BMAP_RIGHT_FILLING)) !=
1755 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1756 BMAP_RIGHT_FILLING) ||
1757 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1759 state |= BMAP_RIGHT_CONTIG;
1763 * Switch out based on the FILLING and CONTIG state bits.
1765 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1766 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1767 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1768 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1770 * Filling in all of a previously delayed allocation extent.
1771 * The left and right neighbors are both contiguous with new.
1774 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1775 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1776 LEFT.br_blockcount + PREV.br_blockcount +
1777 RIGHT.br_blockcount);
1778 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1780 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
1781 bma->ip->i_d.di_nextents--;
1782 if (bma->cur == NULL)
1783 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1785 rval = XFS_ILOG_CORE;
1786 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1787 RIGHT.br_startblock,
1788 RIGHT.br_blockcount, &i);
1791 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1792 error = xfs_btree_delete(bma->cur, &i);
1795 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1796 error = xfs_btree_decrement(bma->cur, 0, &i);
1799 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1800 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1802 LEFT.br_blockcount +
1803 PREV.br_blockcount +
1804 RIGHT.br_blockcount, LEFT.br_state);
1810 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1812 * Filling in all of a previously delayed allocation extent.
1813 * The left neighbor is contiguous, the right is not.
1817 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1818 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1819 LEFT.br_blockcount + PREV.br_blockcount);
1820 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1822 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1823 if (bma->cur == NULL)
1824 rval = XFS_ILOG_DEXT;
1827 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1828 LEFT.br_startblock, LEFT.br_blockcount,
1832 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1833 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1835 LEFT.br_blockcount +
1836 PREV.br_blockcount, LEFT.br_state);
1842 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1844 * Filling in all of a previously delayed allocation extent.
1845 * The right neighbor is contiguous, the left is not.
1847 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1848 xfs_bmbt_set_startblock(ep, new->br_startblock);
1849 xfs_bmbt_set_blockcount(ep,
1850 PREV.br_blockcount + RIGHT.br_blockcount);
1851 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1853 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1854 if (bma->cur == NULL)
1855 rval = XFS_ILOG_DEXT;
1858 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1859 RIGHT.br_startblock,
1860 RIGHT.br_blockcount, &i);
1863 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1864 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
1866 PREV.br_blockcount +
1867 RIGHT.br_blockcount, PREV.br_state);
1873 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1875 * Filling in all of a previously delayed allocation extent.
1876 * Neither the left nor right neighbors are contiguous with
1879 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1880 xfs_bmbt_set_startblock(ep, new->br_startblock);
1881 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1883 bma->ip->i_d.di_nextents++;
1884 if (bma->cur == NULL)
1885 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1887 rval = XFS_ILOG_CORE;
1888 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1889 new->br_startblock, new->br_blockcount,
1893 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1894 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1895 error = xfs_btree_insert(bma->cur, &i);
1898 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1902 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1904 * Filling in the first part of a previous delayed allocation.
1905 * The left neighbor is contiguous.
1907 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1908 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
1909 LEFT.br_blockcount + new->br_blockcount);
1910 xfs_bmbt_set_startoff(ep,
1911 PREV.br_startoff + new->br_blockcount);
1912 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1914 temp = PREV.br_blockcount - new->br_blockcount;
1915 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1916 xfs_bmbt_set_blockcount(ep, temp);
1917 if (bma->cur == NULL)
1918 rval = XFS_ILOG_DEXT;
1921 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1922 LEFT.br_startblock, LEFT.br_blockcount,
1926 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1927 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1929 LEFT.br_blockcount +
1935 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1936 startblockval(PREV.br_startblock));
1937 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1938 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1943 case BMAP_LEFT_FILLING:
1945 * Filling in the first part of a previous delayed allocation.
1946 * The left neighbor is not contiguous.
1948 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1949 xfs_bmbt_set_startoff(ep, new_endoff);
1950 temp = PREV.br_blockcount - new->br_blockcount;
1951 xfs_bmbt_set_blockcount(ep, temp);
1952 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
1953 bma->ip->i_d.di_nextents++;
1954 if (bma->cur == NULL)
1955 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1957 rval = XFS_ILOG_CORE;
1958 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1959 new->br_startblock, new->br_blockcount,
1963 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1964 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1965 error = xfs_btree_insert(bma->cur, &i);
1968 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1971 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1972 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1973 bma->firstblock, bma->flist,
1974 &bma->cur, 1, &tmp_rval, whichfork);
1979 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1980 startblockval(PREV.br_startblock) -
1981 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1982 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
1983 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1984 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1987 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1989 * Filling in the last part of a previous delayed allocation.
1990 * The right neighbor is contiguous with the new allocation.
1992 temp = PREV.br_blockcount - new->br_blockcount;
1993 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1994 xfs_bmbt_set_blockcount(ep, temp);
1995 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
1996 new->br_startoff, new->br_startblock,
1997 new->br_blockcount + RIGHT.br_blockcount,
1999 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
2000 if (bma->cur == NULL)
2001 rval = XFS_ILOG_DEXT;
2004 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
2005 RIGHT.br_startblock,
2006 RIGHT.br_blockcount, &i);
2009 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2010 error = xfs_bmbt_update(bma->cur, new->br_startoff,
2012 new->br_blockcount +
2013 RIGHT.br_blockcount,
2019 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2020 startblockval(PREV.br_startblock));
2021 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2022 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2023 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2028 case BMAP_RIGHT_FILLING:
2030 * Filling in the last part of a previous delayed allocation.
2031 * The right neighbor is not contiguous.
2033 temp = PREV.br_blockcount - new->br_blockcount;
2034 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2035 xfs_bmbt_set_blockcount(ep, temp);
2036 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
2037 bma->ip->i_d.di_nextents++;
2038 if (bma->cur == NULL)
2039 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2041 rval = XFS_ILOG_CORE;
2042 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2043 new->br_startblock, new->br_blockcount,
2047 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2048 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2049 error = xfs_btree_insert(bma->cur, &i);
2052 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2055 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2056 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2057 bma->firstblock, bma->flist, &bma->cur, 1,
2058 &tmp_rval, whichfork);
2063 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
2064 startblockval(PREV.br_startblock) -
2065 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2066 ep = xfs_iext_get_ext(ifp, bma->idx);
2067 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
2068 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2075 * Filling in the middle part of a previous delayed allocation.
2076 * Contiguity is impossible here.
2077 * This case is avoided almost all the time.
2079 * We start with a delayed allocation:
2081 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2084 * and we are allocating:
2085 * +rrrrrrrrrrrrrrrrr+
2088 * and we set it up for insertion as:
2089 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2091 * PREV @ idx LEFT RIGHT
2092 * inserted at idx + 1
2094 temp = new->br_startoff - PREV.br_startoff;
2095 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
2096 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
2097 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
2099 RIGHT.br_state = PREV.br_state;
2100 RIGHT.br_startblock = nullstartblock(
2101 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
2102 RIGHT.br_startoff = new_endoff;
2103 RIGHT.br_blockcount = temp2;
2104 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2105 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
2106 bma->ip->i_d.di_nextents++;
2107 if (bma->cur == NULL)
2108 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2110 rval = XFS_ILOG_CORE;
2111 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2112 new->br_startblock, new->br_blockcount,
2116 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2117 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2118 error = xfs_btree_insert(bma->cur, &i);
2121 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2124 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2125 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2126 bma->firstblock, bma->flist, &bma->cur,
2127 1, &tmp_rval, whichfork);
2132 temp = xfs_bmap_worst_indlen(bma->ip, temp);
2133 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
2134 diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
2135 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
2137 error = xfs_mod_fdblocks(bma->ip->i_mount,
2138 -((int64_t)diff), false);
2144 ep = xfs_iext_get_ext(ifp, bma->idx);
2145 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2146 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2147 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2148 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
2149 nullstartblock((int)temp2));
2150 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2153 da_new = temp + temp2;
2156 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2157 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2158 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2159 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2160 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2161 case BMAP_LEFT_CONTIG:
2162 case BMAP_RIGHT_CONTIG:
2164 * These cases are all impossible.
2169 /* convert to a btree if necessary */
2170 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2171 int tmp_logflags; /* partial log flag return val */
2173 ASSERT(bma->cur == NULL);
2174 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2175 bma->firstblock, bma->flist, &bma->cur,
2176 da_old > 0, &tmp_logflags, whichfork);
2177 bma->logflags |= tmp_logflags;
2182 /* adjust for changes in reserved delayed indirect blocks */
2183 if (da_old || da_new) {
2186 temp += bma->cur->bc_private.b.allocated;
2187 ASSERT(temp <= da_old);
2189 xfs_mod_fdblocks(bma->ip->i_mount,
2190 (int64_t)(da_old - temp), false);
2193 /* clear out the allocated field, done with it now in any case. */
2195 bma->cur->bc_private.b.allocated = 0;
2197 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2199 bma->logflags |= rval;
2207 * Convert an unwritten allocation to a real allocation or vice versa.
2209 STATIC int /* error */
2210 xfs_bmap_add_extent_unwritten_real(
2211 struct xfs_trans *tp,
2212 xfs_inode_t *ip, /* incore inode pointer */
2213 xfs_extnum_t *idx, /* extent number to update/insert */
2214 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2215 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2216 xfs_fsblock_t *first, /* pointer to firstblock variable */
2217 xfs_bmap_free_t *flist, /* list of extents to be freed */
2218 int *logflagsp) /* inode logging flags */
2220 xfs_btree_cur_t *cur; /* btree cursor */
2221 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
2222 int error; /* error return value */
2223 int i; /* temp state */
2224 xfs_ifork_t *ifp; /* inode fork pointer */
2225 xfs_fileoff_t new_endoff; /* end offset of new entry */
2226 xfs_exntst_t newext; /* new extent state */
2227 xfs_exntst_t oldext; /* old extent state */
2228 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2229 /* left is 0, right is 1, prev is 2 */
2230 int rval=0; /* return value (logging flags) */
2231 int state = 0;/* state bits, accessed thru macros */
2232 struct xfs_mount *mp = tp->t_mountp;
2237 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
2240 ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
2241 ASSERT(!isnullstartblock(new->br_startblock));
2243 XFS_STATS_INC(mp, xs_add_exlist);
2250 * Set up a bunch of variables to make the tests simpler.
2253 ep = xfs_iext_get_ext(ifp, *idx);
2254 xfs_bmbt_get_all(ep, &PREV);
2255 newext = new->br_state;
2256 oldext = (newext == XFS_EXT_UNWRITTEN) ?
2257 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
2258 ASSERT(PREV.br_state == oldext);
2259 new_endoff = new->br_startoff + new->br_blockcount;
2260 ASSERT(PREV.br_startoff <= new->br_startoff);
2261 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2264 * Set flags determining what part of the previous oldext allocation
2265 * extent is being replaced by a newext allocation.
2267 if (PREV.br_startoff == new->br_startoff)
2268 state |= BMAP_LEFT_FILLING;
2269 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2270 state |= BMAP_RIGHT_FILLING;
2273 * Check and set flags if this segment has a left neighbor.
2274 * Don't set contiguous if the combined extent would be too large.
2277 state |= BMAP_LEFT_VALID;
2278 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
2280 if (isnullstartblock(LEFT.br_startblock))
2281 state |= BMAP_LEFT_DELAY;
2284 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2285 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2286 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2287 LEFT.br_state == newext &&
2288 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2289 state |= BMAP_LEFT_CONTIG;
2292 * Check and set flags if this segment has a right neighbor.
2293 * Don't set contiguous if the combined extent would be too large.
2294 * Also check for all-three-contiguous being too large.
2296 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
2297 state |= BMAP_RIGHT_VALID;
2298 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
2299 if (isnullstartblock(RIGHT.br_startblock))
2300 state |= BMAP_RIGHT_DELAY;
2303 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2304 new_endoff == RIGHT.br_startoff &&
2305 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2306 newext == RIGHT.br_state &&
2307 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2308 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2309 BMAP_RIGHT_FILLING)) !=
2310 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2311 BMAP_RIGHT_FILLING) ||
2312 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2314 state |= BMAP_RIGHT_CONTIG;
2317 * Switch out based on the FILLING and CONTIG state bits.
2319 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2320 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2321 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2322 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2324 * Setting all of a previous oldext extent to newext.
2325 * The left and right neighbors are both contiguous with new.
2329 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2330 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2331 LEFT.br_blockcount + PREV.br_blockcount +
2332 RIGHT.br_blockcount);
2333 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2335 xfs_iext_remove(ip, *idx + 1, 2, state);
2336 ip->i_d.di_nextents -= 2;
2338 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2340 rval = XFS_ILOG_CORE;
2341 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2342 RIGHT.br_startblock,
2343 RIGHT.br_blockcount, &i)))
2345 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2346 if ((error = xfs_btree_delete(cur, &i)))
2348 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2349 if ((error = xfs_btree_decrement(cur, 0, &i)))
2351 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2352 if ((error = xfs_btree_delete(cur, &i)))
2354 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2355 if ((error = xfs_btree_decrement(cur, 0, &i)))
2357 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2358 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2360 LEFT.br_blockcount + PREV.br_blockcount +
2361 RIGHT.br_blockcount, LEFT.br_state)))
2366 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2368 * Setting all of a previous oldext extent to newext.
2369 * The left neighbor is contiguous, the right is not.
2373 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2374 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2375 LEFT.br_blockcount + PREV.br_blockcount);
2376 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2378 xfs_iext_remove(ip, *idx + 1, 1, state);
2379 ip->i_d.di_nextents--;
2381 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2383 rval = XFS_ILOG_CORE;
2384 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2385 PREV.br_startblock, PREV.br_blockcount,
2388 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2389 if ((error = xfs_btree_delete(cur, &i)))
2391 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2392 if ((error = xfs_btree_decrement(cur, 0, &i)))
2394 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2395 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2397 LEFT.br_blockcount + PREV.br_blockcount,
2403 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2405 * Setting all of a previous oldext extent to newext.
2406 * The right neighbor is contiguous, the left is not.
2408 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2409 xfs_bmbt_set_blockcount(ep,
2410 PREV.br_blockcount + RIGHT.br_blockcount);
2411 xfs_bmbt_set_state(ep, newext);
2412 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2413 xfs_iext_remove(ip, *idx + 1, 1, state);
2414 ip->i_d.di_nextents--;
2416 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2418 rval = XFS_ILOG_CORE;
2419 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2420 RIGHT.br_startblock,
2421 RIGHT.br_blockcount, &i)))
2423 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2424 if ((error = xfs_btree_delete(cur, &i)))
2426 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2427 if ((error = xfs_btree_decrement(cur, 0, &i)))
2429 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2430 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2432 new->br_blockcount + RIGHT.br_blockcount,
2438 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2440 * Setting all of a previous oldext extent to newext.
2441 * Neither the left nor right neighbors are contiguous with
2444 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2445 xfs_bmbt_set_state(ep, newext);
2446 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2449 rval = XFS_ILOG_DEXT;
2452 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2453 new->br_startblock, new->br_blockcount,
2456 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2457 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2458 new->br_startblock, new->br_blockcount,
2464 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2466 * Setting the first part of a previous oldext extent to newext.
2467 * The left neighbor is contiguous.
2469 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
2470 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
2471 LEFT.br_blockcount + new->br_blockcount);
2472 xfs_bmbt_set_startoff(ep,
2473 PREV.br_startoff + new->br_blockcount);
2474 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
2476 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2477 xfs_bmbt_set_startblock(ep,
2478 new->br_startblock + new->br_blockcount);
2479 xfs_bmbt_set_blockcount(ep,
2480 PREV.br_blockcount - new->br_blockcount);
2481 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2486 rval = XFS_ILOG_DEXT;
2489 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2490 PREV.br_startblock, PREV.br_blockcount,
2493 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2494 if ((error = xfs_bmbt_update(cur,
2495 PREV.br_startoff + new->br_blockcount,
2496 PREV.br_startblock + new->br_blockcount,
2497 PREV.br_blockcount - new->br_blockcount,
2500 if ((error = xfs_btree_decrement(cur, 0, &i)))
2502 error = xfs_bmbt_update(cur, LEFT.br_startoff,
2504 LEFT.br_blockcount + new->br_blockcount,
2511 case BMAP_LEFT_FILLING:
2513 * Setting the first part of a previous oldext extent to newext.
2514 * The left neighbor is not contiguous.
2516 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2517 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
2518 xfs_bmbt_set_startoff(ep, new_endoff);
2519 xfs_bmbt_set_blockcount(ep,
2520 PREV.br_blockcount - new->br_blockcount);
2521 xfs_bmbt_set_startblock(ep,
2522 new->br_startblock + new->br_blockcount);
2523 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2525 xfs_iext_insert(ip, *idx, 1, new, state);
2526 ip->i_d.di_nextents++;
2528 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2530 rval = XFS_ILOG_CORE;
2531 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2532 PREV.br_startblock, PREV.br_blockcount,
2535 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2536 if ((error = xfs_bmbt_update(cur,
2537 PREV.br_startoff + new->br_blockcount,
2538 PREV.br_startblock + new->br_blockcount,
2539 PREV.br_blockcount - new->br_blockcount,
2542 cur->bc_rec.b = *new;
2543 if ((error = xfs_btree_insert(cur, &i)))
2545 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2549 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2551 * Setting the last part of a previous oldext extent to newext.
2552 * The right neighbor is contiguous with the new allocation.
2554 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2555 xfs_bmbt_set_blockcount(ep,
2556 PREV.br_blockcount - new->br_blockcount);
2557 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2561 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2562 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2563 new->br_startoff, new->br_startblock,
2564 new->br_blockcount + RIGHT.br_blockcount, newext);
2565 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2568 rval = XFS_ILOG_DEXT;
2571 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2573 PREV.br_blockcount, &i)))
2575 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2576 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2578 PREV.br_blockcount - new->br_blockcount,
2581 if ((error = xfs_btree_increment(cur, 0, &i)))
2583 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2585 new->br_blockcount + RIGHT.br_blockcount,
2591 case BMAP_RIGHT_FILLING:
2593 * Setting the last part of a previous oldext extent to newext.
2594 * The right neighbor is not contiguous.
2596 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2597 xfs_bmbt_set_blockcount(ep,
2598 PREV.br_blockcount - new->br_blockcount);
2599 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2602 xfs_iext_insert(ip, *idx, 1, new, state);
2604 ip->i_d.di_nextents++;
2606 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2608 rval = XFS_ILOG_CORE;
2609 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2610 PREV.br_startblock, PREV.br_blockcount,
2613 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2614 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2616 PREV.br_blockcount - new->br_blockcount,
2619 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2620 new->br_startblock, new->br_blockcount,
2623 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2624 cur->bc_rec.b.br_state = XFS_EXT_NORM;
2625 if ((error = xfs_btree_insert(cur, &i)))
2627 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2633 * Setting the middle part of a previous oldext extent to
2634 * newext. Contiguity is impossible here.
2635 * One extent becomes three extents.
2637 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2638 xfs_bmbt_set_blockcount(ep,
2639 new->br_startoff - PREV.br_startoff);
2640 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2643 r[1].br_startoff = new_endoff;
2644 r[1].br_blockcount =
2645 PREV.br_startoff + PREV.br_blockcount - new_endoff;
2646 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2647 r[1].br_state = oldext;
2650 xfs_iext_insert(ip, *idx, 2, &r[0], state);
2652 ip->i_d.di_nextents += 2;
2654 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2656 rval = XFS_ILOG_CORE;
2657 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2658 PREV.br_startblock, PREV.br_blockcount,
2661 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2662 /* new right extent - oldext */
2663 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
2664 r[1].br_startblock, r[1].br_blockcount,
2667 /* new left extent - oldext */
2668 cur->bc_rec.b = PREV;
2669 cur->bc_rec.b.br_blockcount =
2670 new->br_startoff - PREV.br_startoff;
2671 if ((error = xfs_btree_insert(cur, &i)))
2673 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2675 * Reset the cursor to the position of the new extent
2676 * we are about to insert as we can't trust it after
2677 * the previous insert.
2679 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2680 new->br_startblock, new->br_blockcount,
2683 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2684 /* new middle extent - newext */
2685 cur->bc_rec.b.br_state = new->br_state;
2686 if ((error = xfs_btree_insert(cur, &i)))
2688 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2692 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2693 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2694 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2695 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2696 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2697 case BMAP_LEFT_CONTIG:
2698 case BMAP_RIGHT_CONTIG:
2700 * These cases are all impossible.
2705 /* convert to a btree if necessary */
2706 if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
2707 int tmp_logflags; /* partial log flag return val */
2709 ASSERT(cur == NULL);
2710 error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
2711 0, &tmp_logflags, XFS_DATA_FORK);
2712 *logflagsp |= tmp_logflags;
2717 /* clear out the allocated field, done with it now in any case. */
2719 cur->bc_private.b.allocated = 0;
2723 xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
2733 * Convert a hole to a delayed allocation.
2736 xfs_bmap_add_extent_hole_delay(
2737 xfs_inode_t *ip, /* incore inode pointer */
2738 xfs_extnum_t *idx, /* extent number to update/insert */
2739 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2741 xfs_ifork_t *ifp; /* inode fork pointer */
2742 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2743 xfs_filblks_t newlen=0; /* new indirect size */
2744 xfs_filblks_t oldlen=0; /* old indirect size */
2745 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2746 int state; /* state bits, accessed thru macros */
2747 xfs_filblks_t temp=0; /* temp for indirect calculations */
2749 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
2751 ASSERT(isnullstartblock(new->br_startblock));
2754 * Check and set flags if this segment has a left neighbor
2757 state |= BMAP_LEFT_VALID;
2758 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2760 if (isnullstartblock(left.br_startblock))
2761 state |= BMAP_LEFT_DELAY;
2765 * Check and set flags if the current (right) segment exists.
2766 * If it doesn't exist, we're converting the hole at end-of-file.
2768 if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
2769 state |= BMAP_RIGHT_VALID;
2770 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2772 if (isnullstartblock(right.br_startblock))
2773 state |= BMAP_RIGHT_DELAY;
2777 * Set contiguity flags on the left and right neighbors.
2778 * Don't let extents get too large, even if the pieces are contiguous.
2780 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2781 left.br_startoff + left.br_blockcount == new->br_startoff &&
2782 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2783 state |= BMAP_LEFT_CONTIG;
2785 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2786 new->br_startoff + new->br_blockcount == right.br_startoff &&
2787 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2788 (!(state & BMAP_LEFT_CONTIG) ||
2789 (left.br_blockcount + new->br_blockcount +
2790 right.br_blockcount <= MAXEXTLEN)))
2791 state |= BMAP_RIGHT_CONTIG;
2794 * Switch out based on the contiguity flags.
2796 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2797 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2799 * New allocation is contiguous with delayed allocations
2800 * on the left and on the right.
2801 * Merge all three into a single extent record.
2804 temp = left.br_blockcount + new->br_blockcount +
2805 right.br_blockcount;
2807 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2808 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2809 oldlen = startblockval(left.br_startblock) +
2810 startblockval(new->br_startblock) +
2811 startblockval(right.br_startblock);
2812 newlen = xfs_bmap_worst_indlen(ip, temp);
2813 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2814 nullstartblock((int)newlen));
2815 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2817 xfs_iext_remove(ip, *idx + 1, 1, state);
2820 case BMAP_LEFT_CONTIG:
2822 * New allocation is contiguous with a delayed allocation
2824 * Merge the new allocation with the left neighbor.
2827 temp = left.br_blockcount + new->br_blockcount;
2829 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2830 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2831 oldlen = startblockval(left.br_startblock) +
2832 startblockval(new->br_startblock);
2833 newlen = xfs_bmap_worst_indlen(ip, temp);
2834 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2835 nullstartblock((int)newlen));
2836 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2839 case BMAP_RIGHT_CONTIG:
2841 * New allocation is contiguous with a delayed allocation
2843 * Merge the new allocation with the right neighbor.
2845 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2846 temp = new->br_blockcount + right.br_blockcount;
2847 oldlen = startblockval(new->br_startblock) +
2848 startblockval(right.br_startblock);
2849 newlen = xfs_bmap_worst_indlen(ip, temp);
2850 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2852 nullstartblock((int)newlen), temp, right.br_state);
2853 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2858 * New allocation is not contiguous with another
2859 * delayed allocation.
2860 * Insert a new entry.
2862 oldlen = newlen = 0;
2863 xfs_iext_insert(ip, *idx, 1, new, state);
2866 if (oldlen != newlen) {
2867 ASSERT(oldlen > newlen);
2868 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2871 * Nothing to do for disk quota accounting here.
2877 * Convert a hole to a real allocation.
2879 STATIC int /* error */
2880 xfs_bmap_add_extent_hole_real(
2881 struct xfs_bmalloca *bma,
2884 struct xfs_bmbt_irec *new = &bma->got;
2885 int error; /* error return value */
2886 int i; /* temp state */
2887 xfs_ifork_t *ifp; /* inode fork pointer */
2888 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2889 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2890 int rval=0; /* return value (logging flags) */
2891 int state; /* state bits, accessed thru macros */
2892 struct xfs_mount *mp;
2894 mp = bma->ip->i_mount;
2895 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
2897 ASSERT(bma->idx >= 0);
2898 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
2899 ASSERT(!isnullstartblock(new->br_startblock));
2901 !(bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2903 XFS_STATS_INC(mp, xs_add_exlist);
2906 if (whichfork == XFS_ATTR_FORK)
2907 state |= BMAP_ATTRFORK;
2910 * Check and set flags if this segment has a left neighbor.
2913 state |= BMAP_LEFT_VALID;
2914 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &left);
2915 if (isnullstartblock(left.br_startblock))
2916 state |= BMAP_LEFT_DELAY;
2920 * Check and set flags if this segment has a current value.
2921 * Not true if we're inserting into the "hole" at eof.
2923 if (bma->idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
2924 state |= BMAP_RIGHT_VALID;
2925 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &right);
2926 if (isnullstartblock(right.br_startblock))
2927 state |= BMAP_RIGHT_DELAY;
2931 * We're inserting a real allocation between "left" and "right".
2932 * Set the contiguity flags. Don't let extents get too large.
2934 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2935 left.br_startoff + left.br_blockcount == new->br_startoff &&
2936 left.br_startblock + left.br_blockcount == new->br_startblock &&
2937 left.br_state == new->br_state &&
2938 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2939 state |= BMAP_LEFT_CONTIG;
2941 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2942 new->br_startoff + new->br_blockcount == right.br_startoff &&
2943 new->br_startblock + new->br_blockcount == right.br_startblock &&
2944 new->br_state == right.br_state &&
2945 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2946 (!(state & BMAP_LEFT_CONTIG) ||
2947 left.br_blockcount + new->br_blockcount +
2948 right.br_blockcount <= MAXEXTLEN))
2949 state |= BMAP_RIGHT_CONTIG;
2953 * Select which case we're in here, and implement it.
2955 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2956 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2958 * New allocation is contiguous with real allocations on the
2959 * left and on the right.
2960 * Merge all three into a single extent record.
2963 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
2964 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
2965 left.br_blockcount + new->br_blockcount +
2966 right.br_blockcount);
2967 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2969 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
2971 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
2972 XFS_IFORK_NEXTENTS(bma->ip, whichfork) - 1);
2973 if (bma->cur == NULL) {
2974 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2976 rval = XFS_ILOG_CORE;
2977 error = xfs_bmbt_lookup_eq(bma->cur, right.br_startoff,
2978 right.br_startblock, right.br_blockcount,
2982 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2983 error = xfs_btree_delete(bma->cur, &i);
2986 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2987 error = xfs_btree_decrement(bma->cur, 0, &i);
2990 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2991 error = xfs_bmbt_update(bma->cur, left.br_startoff,
2993 left.br_blockcount +
2994 new->br_blockcount +
2995 right.br_blockcount,
3002 case BMAP_LEFT_CONTIG:
3004 * New allocation is contiguous with a real allocation
3006 * Merge the new allocation with the left neighbor.
3009 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3010 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
3011 left.br_blockcount + new->br_blockcount);
3012 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3014 if (bma->cur == NULL) {
3015 rval = xfs_ilog_fext(whichfork);
3018 error = xfs_bmbt_lookup_eq(bma->cur, left.br_startoff,
3019 left.br_startblock, left.br_blockcount,
3023 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3024 error = xfs_bmbt_update(bma->cur, left.br_startoff,
3026 left.br_blockcount +
3034 case BMAP_RIGHT_CONTIG:
3036 * New allocation is contiguous with a real allocation
3038 * Merge the new allocation with the right neighbor.
3040 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
3041 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx),
3042 new->br_startoff, new->br_startblock,
3043 new->br_blockcount + right.br_blockcount,
3045 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
3047 if (bma->cur == NULL) {
3048 rval = xfs_ilog_fext(whichfork);
3051 error = xfs_bmbt_lookup_eq(bma->cur,
3053 right.br_startblock,
3054 right.br_blockcount, &i);
3057 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3058 error = xfs_bmbt_update(bma->cur, new->br_startoff,
3060 new->br_blockcount +
3061 right.br_blockcount,
3070 * New allocation is not contiguous with another
3072 * Insert a new entry.
3074 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
3075 XFS_IFORK_NEXT_SET(bma->ip, whichfork,
3076 XFS_IFORK_NEXTENTS(bma->ip, whichfork) + 1);
3077 if (bma->cur == NULL) {
3078 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3080 rval = XFS_ILOG_CORE;
3081 error = xfs_bmbt_lookup_eq(bma->cur,
3084 new->br_blockcount, &i);
3087 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
3088 bma->cur->bc_rec.b.br_state = new->br_state;
3089 error = xfs_btree_insert(bma->cur, &i);
3092 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3097 /* convert to a btree if necessary */
3098 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
3099 int tmp_logflags; /* partial log flag return val */
3101 ASSERT(bma->cur == NULL);
3102 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
3103 bma->firstblock, bma->flist, &bma->cur,
3104 0, &tmp_logflags, whichfork);
3105 bma->logflags |= tmp_logflags;
3110 /* clear out the allocated field, done with it now in any case. */
3112 bma->cur->bc_private.b.allocated = 0;
3114 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
3116 bma->logflags |= rval;
3121 * Functions used in the extent read, allocate and remove paths
3125 * Adjust the size of the new extent based on di_extsize and rt extsize.
3128 xfs_bmap_extsize_align(
3130 xfs_bmbt_irec_t *gotp, /* next extent pointer */
3131 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
3132 xfs_extlen_t extsz, /* align to this extent size */
3133 int rt, /* is this a realtime inode? */
3134 int eof, /* is extent at end-of-file? */
3135 int delay, /* creating delalloc extent? */
3136 int convert, /* overwriting unwritten extent? */
3137 xfs_fileoff_t *offp, /* in/out: aligned offset */
3138 xfs_extlen_t *lenp) /* in/out: aligned length */
3140 xfs_fileoff_t orig_off; /* original offset */
3141 xfs_extlen_t orig_alen; /* original length */
3142 xfs_fileoff_t orig_end; /* original off+len */
3143 xfs_fileoff_t nexto; /* next file offset */
3144 xfs_fileoff_t prevo; /* previous file offset */
3145 xfs_fileoff_t align_off; /* temp for offset */
3146 xfs_extlen_t align_alen; /* temp for length */
3147 xfs_extlen_t temp; /* temp for calculations */
3152 orig_off = align_off = *offp;
3153 orig_alen = align_alen = *lenp;
3154 orig_end = orig_off + orig_alen;
3157 * If this request overlaps an existing extent, then don't
3158 * attempt to perform any additional alignment.
3160 if (!delay && !eof &&
3161 (orig_off >= gotp->br_startoff) &&
3162 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3167 * If the file offset is unaligned vs. the extent size
3168 * we need to align it. This will be possible unless
3169 * the file was previously written with a kernel that didn't
3170 * perform this alignment, or if a truncate shot us in the
3173 temp = do_mod(orig_off, extsz);
3179 /* Same adjustment for the end of the requested area. */
3180 temp = (align_alen % extsz);
3182 align_alen += extsz - temp;
3185 * For large extent hint sizes, the aligned extent might be larger than
3186 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3187 * the length back under MAXEXTLEN. The outer allocation loops handle
3188 * short allocation just fine, so it is safe to do this. We only want to
3189 * do it when we are forced to, though, because it means more allocation
3190 * operations are required.
3192 while (align_alen > MAXEXTLEN)
3193 align_alen -= extsz;
3194 ASSERT(align_alen <= MAXEXTLEN);
3197 * If the previous block overlaps with this proposed allocation
3198 * then move the start forward without adjusting the length.
3200 if (prevp->br_startoff != NULLFILEOFF) {
3201 if (prevp->br_startblock == HOLESTARTBLOCK)
3202 prevo = prevp->br_startoff;
3204 prevo = prevp->br_startoff + prevp->br_blockcount;
3207 if (align_off != orig_off && align_off < prevo)
3210 * If the next block overlaps with this proposed allocation
3211 * then move the start back without adjusting the length,
3212 * but not before offset 0.
3213 * This may of course make the start overlap previous block,
3214 * and if we hit the offset 0 limit then the next block
3215 * can still overlap too.
3217 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3218 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3219 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3220 nexto = gotp->br_startoff + gotp->br_blockcount;
3222 nexto = gotp->br_startoff;
3224 nexto = NULLFILEOFF;
3226 align_off + align_alen != orig_end &&
3227 align_off + align_alen > nexto)
3228 align_off = nexto > align_alen ? nexto - align_alen : 0;
3230 * If we're now overlapping the next or previous extent that
3231 * means we can't fit an extsz piece in this hole. Just move
3232 * the start forward to the first valid spot and set
3233 * the length so we hit the end.
3235 if (align_off != orig_off && align_off < prevo)
3237 if (align_off + align_alen != orig_end &&
3238 align_off + align_alen > nexto &&
3239 nexto != NULLFILEOFF) {
3240 ASSERT(nexto > prevo);
3241 align_alen = nexto - align_off;
3245 * If realtime, and the result isn't a multiple of the realtime
3246 * extent size we need to remove blocks until it is.
3248 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3250 * We're not covering the original request, or
3251 * we won't be able to once we fix the length.
3253 if (orig_off < align_off ||
3254 orig_end > align_off + align_alen ||
3255 align_alen - temp < orig_alen)
3258 * Try to fix it by moving the start up.
3260 if (align_off + temp <= orig_off) {
3265 * Try to fix it by moving the end in.
3267 else if (align_off + align_alen - temp >= orig_end)
3270 * Set the start to the minimum then trim the length.
3273 align_alen -= orig_off - align_off;
3274 align_off = orig_off;
3275 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3278 * Result doesn't cover the request, fail it.
3280 if (orig_off < align_off || orig_end > align_off + align_alen)
3283 ASSERT(orig_off >= align_off);
3284 /* see MAXEXTLEN handling above */
3285 ASSERT(orig_end <= align_off + align_alen ||
3286 align_alen + extsz > MAXEXTLEN);
3290 if (!eof && gotp->br_startoff != NULLFILEOFF)
3291 ASSERT(align_off + align_alen <= gotp->br_startoff);
3292 if (prevp->br_startoff != NULLFILEOFF)
3293 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3301 #define XFS_ALLOC_GAP_UNITS 4
3305 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3307 xfs_fsblock_t adjust; /* adjustment to block numbers */
3308 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3309 xfs_mount_t *mp; /* mount point structure */
3310 int nullfb; /* true if ap->firstblock isn't set */
3311 int rt; /* true if inode is realtime */
3313 #define ISVALID(x,y) \
3315 (x) < mp->m_sb.sb_rblocks : \
3316 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3317 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3318 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3320 mp = ap->ip->i_mount;
3321 nullfb = *ap->firstblock == NULLFSBLOCK;
3322 rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
3323 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3325 * If allocating at eof, and there's a previous real block,
3326 * try to use its last block as our starting point.
3328 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3329 !isnullstartblock(ap->prev.br_startblock) &&
3330 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3331 ap->prev.br_startblock)) {
3332 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3334 * Adjust for the gap between prevp and us.
3336 adjust = ap->offset -
3337 (ap->prev.br_startoff + ap->prev.br_blockcount);
3339 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3340 ap->blkno += adjust;
3343 * If not at eof, then compare the two neighbor blocks.
3344 * Figure out whether either one gives us a good starting point,
3345 * and pick the better one.
3347 else if (!ap->eof) {
3348 xfs_fsblock_t gotbno; /* right side block number */
3349 xfs_fsblock_t gotdiff=0; /* right side difference */
3350 xfs_fsblock_t prevbno; /* left side block number */
3351 xfs_fsblock_t prevdiff=0; /* left side difference */
3354 * If there's a previous (left) block, select a requested
3355 * start block based on it.
3357 if (ap->prev.br_startoff != NULLFILEOFF &&
3358 !isnullstartblock(ap->prev.br_startblock) &&
3359 (prevbno = ap->prev.br_startblock +
3360 ap->prev.br_blockcount) &&
3361 ISVALID(prevbno, ap->prev.br_startblock)) {
3363 * Calculate gap to end of previous block.
3365 adjust = prevdiff = ap->offset -
3366 (ap->prev.br_startoff +
3367 ap->prev.br_blockcount);
3369 * Figure the startblock based on the previous block's
3370 * end and the gap size.
3372 * If the gap is large relative to the piece we're
3373 * allocating, or using it gives us an invalid block
3374 * number, then just use the end of the previous block.
3376 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3377 ISVALID(prevbno + prevdiff,
3378 ap->prev.br_startblock))
3383 * If the firstblock forbids it, can't use it,
3386 if (!rt && !nullfb &&
3387 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3388 prevbno = NULLFSBLOCK;
3391 * No previous block or can't follow it, just default.
3394 prevbno = NULLFSBLOCK;
3396 * If there's a following (right) block, select a requested
3397 * start block based on it.
3399 if (!isnullstartblock(ap->got.br_startblock)) {
3401 * Calculate gap to start of next block.
3403 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3405 * Figure the startblock based on the next block's
3406 * start and the gap size.
3408 gotbno = ap->got.br_startblock;
3411 * If the gap is large relative to the piece we're
3412 * allocating, or using it gives us an invalid block
3413 * number, then just use the start of the next block
3414 * offset by our length.
3416 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3417 ISVALID(gotbno - gotdiff, gotbno))
3419 else if (ISVALID(gotbno - ap->length, gotbno)) {
3420 gotbno -= ap->length;
3421 gotdiff += adjust - ap->length;
3425 * If the firstblock forbids it, can't use it,
3428 if (!rt && !nullfb &&
3429 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3430 gotbno = NULLFSBLOCK;
3433 * No next block, just default.
3436 gotbno = NULLFSBLOCK;
3438 * If both valid, pick the better one, else the only good
3439 * one, else ap->blkno is already set (to 0 or the inode block).
3441 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3442 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3443 else if (prevbno != NULLFSBLOCK)
3444 ap->blkno = prevbno;
3445 else if (gotbno != NULLFSBLOCK)
3452 xfs_bmap_longest_free_extent(
3453 struct xfs_trans *tp,
3458 struct xfs_mount *mp = tp->t_mountp;
3459 struct xfs_perag *pag;
3460 xfs_extlen_t longest;
3463 pag = xfs_perag_get(mp, ag);
3464 if (!pag->pagf_init) {
3465 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3469 if (!pag->pagf_init) {
3475 longest = xfs_alloc_longest_free_extent(mp, pag,
3476 xfs_alloc_min_freelist(mp, pag));
3477 if (*blen < longest)
3486 xfs_bmap_select_minlen(
3487 struct xfs_bmalloca *ap,
3488 struct xfs_alloc_arg *args,
3492 if (notinit || *blen < ap->minlen) {
3494 * Since we did a BUF_TRYLOCK above, it is possible that
3495 * there is space for this request.
3497 args->minlen = ap->minlen;
3498 } else if (*blen < args->maxlen) {
3500 * If the best seen length is less than the request length,
3501 * use the best as the minimum.
3503 args->minlen = *blen;
3506 * Otherwise we've seen an extent as big as maxlen, use that
3509 args->minlen = args->maxlen;
3514 xfs_bmap_btalloc_nullfb(
3515 struct xfs_bmalloca *ap,
3516 struct xfs_alloc_arg *args,
3519 struct xfs_mount *mp = ap->ip->i_mount;
3520 xfs_agnumber_t ag, startag;
3524 args->type = XFS_ALLOCTYPE_START_BNO;
3525 args->total = ap->total;
3527 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3528 if (startag == NULLAGNUMBER)
3531 while (*blen < args->maxlen) {
3532 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3537 if (++ag == mp->m_sb.sb_agcount)
3543 xfs_bmap_select_minlen(ap, args, blen, notinit);
3548 xfs_bmap_btalloc_filestreams(
3549 struct xfs_bmalloca *ap,
3550 struct xfs_alloc_arg *args,
3553 struct xfs_mount *mp = ap->ip->i_mount;
3558 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3559 args->total = ap->total;
3561 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3562 if (ag == NULLAGNUMBER)
3565 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3569 if (*blen < args->maxlen) {
3570 error = xfs_filestream_new_ag(ap, &ag);
3574 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3581 xfs_bmap_select_minlen(ap, args, blen, notinit);
3584 * Set the failure fallback case to look in the selected AG as stream
3587 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3593 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3595 xfs_mount_t *mp; /* mount point structure */
3596 xfs_alloctype_t atype = 0; /* type for allocation routines */
3597 xfs_extlen_t align; /* minimum allocation alignment */
3598 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3600 xfs_alloc_arg_t args;
3602 xfs_extlen_t nextminlen = 0;
3603 int nullfb; /* true if ap->firstblock isn't set */
3611 mp = ap->ip->i_mount;
3613 /* stripe alignment for allocation is determined by mount parameters */
3615 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3616 stripe_align = mp->m_swidth;
3617 else if (mp->m_dalign)
3618 stripe_align = mp->m_dalign;
3620 align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
3621 if (unlikely(align)) {
3622 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3623 align, 0, ap->eof, 0, ap->conv,
3624 &ap->offset, &ap->length);
3630 nullfb = *ap->firstblock == NULLFSBLOCK;
3631 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3633 if (ap->userdata && xfs_inode_is_filestream(ap->ip)) {
3634 ag = xfs_filestream_lookup_ag(ap->ip);
3635 ag = (ag != NULLAGNUMBER) ? ag : 0;
3636 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3638 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3641 ap->blkno = *ap->firstblock;
3643 xfs_bmap_adjacent(ap);
3646 * If allowed, use ap->blkno; otherwise must use firstblock since
3647 * it's in the right allocation group.
3649 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3652 ap->blkno = *ap->firstblock;
3654 * Normal allocation, done through xfs_alloc_vextent.
3656 tryagain = isaligned = 0;
3657 memset(&args, 0, sizeof(args));
3660 args.fsbno = ap->blkno;
3662 /* Trim the allocation back to the maximum an AG can fit. */
3663 args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp));
3664 args.firstblock = *ap->firstblock;
3668 * Search for an allocation group with a single extent large
3669 * enough for the request. If one isn't found, then adjust
3670 * the minimum allocation size to the largest space found.
3672 if (ap->userdata && xfs_inode_is_filestream(ap->ip))
3673 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3675 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3678 } else if (ap->flist->dop_low) {
3679 if (xfs_inode_is_filestream(ap->ip))
3680 args.type = XFS_ALLOCTYPE_FIRST_AG;
3682 args.type = XFS_ALLOCTYPE_START_BNO;
3683 args.total = args.minlen = ap->minlen;
3685 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3686 args.total = ap->total;
3687 args.minlen = ap->minlen;
3689 /* apply extent size hints if obtained earlier */
3690 if (unlikely(align)) {
3692 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3693 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3694 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3698 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3699 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3700 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3703 * If we are not low on available data blocks, and the
3704 * underlying logical volume manager is a stripe, and
3705 * the file offset is zero then try to allocate data
3706 * blocks on stripe unit boundary.
3707 * NOTE: ap->aeof is only set if the allocation length
3708 * is >= the stripe unit and the allocation offset is
3709 * at the end of file.
3711 if (!ap->flist->dop_low && ap->aeof) {
3713 args.alignment = stripe_align;
3717 * Adjust for alignment
3719 if (blen > args.alignment && blen <= args.maxlen)
3720 args.minlen = blen - args.alignment;
3721 args.minalignslop = 0;
3724 * First try an exact bno allocation.
3725 * If it fails then do a near or start bno
3726 * allocation with alignment turned on.
3730 args.type = XFS_ALLOCTYPE_THIS_BNO;
3733 * Compute the minlen+alignment for the
3734 * next case. Set slop so that the value
3735 * of minlen+alignment+slop doesn't go up
3736 * between the calls.
3738 if (blen > stripe_align && blen <= args.maxlen)
3739 nextminlen = blen - stripe_align;
3741 nextminlen = args.minlen;
3742 if (nextminlen + stripe_align > args.minlen + 1)
3744 nextminlen + stripe_align -
3747 args.minalignslop = 0;
3751 args.minalignslop = 0;
3753 args.minleft = ap->minleft;
3754 args.wasdel = ap->wasdel;
3756 args.userdata = ap->userdata;
3757 if (ap->userdata & XFS_ALLOC_USERDATA_ZERO)
3760 error = xfs_alloc_vextent(&args);
3764 if (tryagain && args.fsbno == NULLFSBLOCK) {
3766 * Exact allocation failed. Now try with alignment
3770 args.fsbno = ap->blkno;
3771 args.alignment = stripe_align;
3772 args.minlen = nextminlen;
3773 args.minalignslop = 0;
3775 if ((error = xfs_alloc_vextent(&args)))
3778 if (isaligned && args.fsbno == NULLFSBLOCK) {
3780 * allocation failed, so turn off alignment and
3784 args.fsbno = ap->blkno;
3786 if ((error = xfs_alloc_vextent(&args)))
3789 if (args.fsbno == NULLFSBLOCK && nullfb &&
3790 args.minlen > ap->minlen) {
3791 args.minlen = ap->minlen;
3792 args.type = XFS_ALLOCTYPE_START_BNO;
3793 args.fsbno = ap->blkno;
3794 if ((error = xfs_alloc_vextent(&args)))
3797 if (args.fsbno == NULLFSBLOCK && nullfb) {
3799 args.type = XFS_ALLOCTYPE_FIRST_AG;
3800 args.total = ap->minlen;
3802 if ((error = xfs_alloc_vextent(&args)))
3804 ap->flist->dop_low = true;
3806 if (args.fsbno != NULLFSBLOCK) {
3808 * check the allocation happened at the same or higher AG than
3809 * the first block that was allocated.
3811 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3812 XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
3813 XFS_FSB_TO_AGNO(mp, args.fsbno) ||
3814 (ap->flist->dop_low &&
3815 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
3816 XFS_FSB_TO_AGNO(mp, args.fsbno)));
3818 ap->blkno = args.fsbno;
3819 if (*ap->firstblock == NULLFSBLOCK)
3820 *ap->firstblock = args.fsbno;
3821 ASSERT(nullfb || fb_agno == args.agno ||
3822 (ap->flist->dop_low && fb_agno < args.agno));
3823 ap->length = args.len;
3824 ap->ip->i_d.di_nblocks += args.len;
3825 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3827 ap->ip->i_delayed_blks -= args.len;
3829 * Adjust the disk quota also. This was reserved
3832 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3833 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
3834 XFS_TRANS_DQ_BCOUNT,
3837 ap->blkno = NULLFSBLOCK;
3844 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3845 * It figures out where to ask the underlying allocator to put the new extent.
3849 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3851 if (XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata)
3852 return xfs_bmap_rtalloc(ap);
3853 return xfs_bmap_btalloc(ap);
3857 * Trim the returned map to the required bounds
3861 struct xfs_bmbt_irec *mval,
3862 struct xfs_bmbt_irec *got,
3870 if ((flags & XFS_BMAPI_ENTIRE) ||
3871 got->br_startoff + got->br_blockcount <= obno) {
3873 if (isnullstartblock(got->br_startblock))
3874 mval->br_startblock = DELAYSTARTBLOCK;
3880 ASSERT((*bno >= obno) || (n == 0));
3882 mval->br_startoff = *bno;
3883 if (isnullstartblock(got->br_startblock))
3884 mval->br_startblock = DELAYSTARTBLOCK;
3886 mval->br_startblock = got->br_startblock +
3887 (*bno - got->br_startoff);
3889 * Return the minimum of what we got and what we asked for for
3890 * the length. We can use the len variable here because it is
3891 * modified below and we could have been there before coming
3892 * here if the first part of the allocation didn't overlap what
3895 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3896 got->br_blockcount - (*bno - got->br_startoff));
3897 mval->br_state = got->br_state;
3898 ASSERT(mval->br_blockcount <= len);
3903 * Update and validate the extent map to return
3906 xfs_bmapi_update_map(
3907 struct xfs_bmbt_irec **map,
3915 xfs_bmbt_irec_t *mval = *map;
3917 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3918 ((mval->br_startoff + mval->br_blockcount) <= end));
3919 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3920 (mval->br_startoff < obno));
3922 *bno = mval->br_startoff + mval->br_blockcount;
3924 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3925 /* update previous map with new information */
3926 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3927 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3928 ASSERT(mval->br_state == mval[-1].br_state);
3929 mval[-1].br_blockcount = mval->br_blockcount;
3930 mval[-1].br_state = mval->br_state;
3931 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3932 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3933 mval[-1].br_startblock != HOLESTARTBLOCK &&
3934 mval->br_startblock == mval[-1].br_startblock +
3935 mval[-1].br_blockcount &&
3936 ((flags & XFS_BMAPI_IGSTATE) ||
3937 mval[-1].br_state == mval->br_state)) {
3938 ASSERT(mval->br_startoff ==
3939 mval[-1].br_startoff + mval[-1].br_blockcount);
3940 mval[-1].br_blockcount += mval->br_blockcount;
3941 } else if (*n > 0 &&
3942 mval->br_startblock == DELAYSTARTBLOCK &&
3943 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3944 mval->br_startoff ==
3945 mval[-1].br_startoff + mval[-1].br_blockcount) {
3946 mval[-1].br_blockcount += mval->br_blockcount;
3947 mval[-1].br_state = mval->br_state;
3948 } else if (!((*n == 0) &&
3949 ((mval->br_startoff + mval->br_blockcount) <=
3958 * Map file blocks to filesystem blocks without allocation.
3962 struct xfs_inode *ip,
3965 struct xfs_bmbt_irec *mval,
3969 struct xfs_mount *mp = ip->i_mount;
3970 struct xfs_ifork *ifp;
3971 struct xfs_bmbt_irec got;
3972 struct xfs_bmbt_irec prev;
3979 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
3980 XFS_ATTR_FORK : XFS_DATA_FORK;
3983 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
3984 XFS_BMAPI_IGSTATE)));
3985 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3987 if (unlikely(XFS_TEST_ERROR(
3988 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3989 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
3990 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
3991 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
3992 return -EFSCORRUPTED;
3995 if (XFS_FORCED_SHUTDOWN(mp))
3998 XFS_STATS_INC(mp, xs_blk_mapr);
4000 ifp = XFS_IFORK_PTR(ip, whichfork);
4002 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4003 error = xfs_iread_extents(NULL, ip, whichfork);
4008 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got, &prev);
4012 while (bno < end && n < *nmap) {
4013 /* Reading past eof, act as though there's a hole up to end. */
4015 got.br_startoff = end;
4016 if (got.br_startoff > bno) {
4017 /* Reading in a hole. */
4018 mval->br_startoff = bno;
4019 mval->br_startblock = HOLESTARTBLOCK;
4020 mval->br_blockcount =
4021 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4022 mval->br_state = XFS_EXT_NORM;
4023 bno += mval->br_blockcount;
4024 len -= mval->br_blockcount;
4030 /* set up the extent map to return. */
4031 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4032 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4034 /* If we're done, stop now. */
4035 if (bno >= end || n >= *nmap)
4038 /* Else go on to the next record. */
4039 if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
4040 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4049 xfs_bmapi_reserve_delalloc(
4050 struct xfs_inode *ip,
4053 struct xfs_bmbt_irec *got,
4054 struct xfs_bmbt_irec *prev,
4055 xfs_extnum_t *lastx,
4058 struct xfs_mount *mp = ip->i_mount;
4059 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4061 xfs_extlen_t indlen;
4062 char rt = XFS_IS_REALTIME_INODE(ip);
4066 alen = XFS_FILBLKS_MIN(len, MAXEXTLEN);
4068 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4070 /* Figure out the extent size, adjust alen */
4071 extsz = xfs_get_extsz_hint(ip);
4073 error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
4074 1, 0, &aoff, &alen);
4079 extsz = alen / mp->m_sb.sb_rextsize;
4082 * Make a transaction-less quota reservation for delayed allocation
4083 * blocks. This number gets adjusted later. We return if we haven't
4084 * allocated blocks already inside this loop.
4086 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4087 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4092 * Split changing sb for alen and indlen since they could be coming
4093 * from different places.
4095 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4099 error = xfs_mod_frextents(mp, -((int64_t)extsz));
4101 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4105 goto out_unreserve_quota;
4107 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4109 goto out_unreserve_blocks;
4112 ip->i_delayed_blks += alen;
4114 got->br_startoff = aoff;
4115 got->br_startblock = nullstartblock(indlen);
4116 got->br_blockcount = alen;
4117 got->br_state = XFS_EXT_NORM;
4118 xfs_bmap_add_extent_hole_delay(ip, lastx, got);
4121 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
4122 * might have merged it into one of the neighbouring ones.
4124 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
4126 ASSERT(got->br_startoff <= aoff);
4127 ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
4128 ASSERT(isnullstartblock(got->br_startblock));
4129 ASSERT(got->br_state == XFS_EXT_NORM);
4132 out_unreserve_blocks:
4134 xfs_mod_frextents(mp, extsz);
4136 xfs_mod_fdblocks(mp, alen, false);
4137 out_unreserve_quota:
4138 if (XFS_IS_QUOTA_ON(mp))
4139 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
4140 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4145 * Map file blocks to filesystem blocks, adding delayed allocations as needed.
4149 struct xfs_inode *ip, /* incore inode */
4150 xfs_fileoff_t bno, /* starting file offs. mapped */
4151 xfs_filblks_t len, /* length to map in file */
4152 struct xfs_bmbt_irec *mval, /* output: map values */
4153 int *nmap, /* i/o: mval size/count */
4154 int flags) /* XFS_BMAPI_... */
4156 struct xfs_mount *mp = ip->i_mount;
4157 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4158 struct xfs_bmbt_irec got; /* current file extent record */
4159 struct xfs_bmbt_irec prev; /* previous file extent record */
4160 xfs_fileoff_t obno; /* old block number (offset) */
4161 xfs_fileoff_t end; /* end of mapped file region */
4162 xfs_extnum_t lastx; /* last useful extent number */
4163 int eof; /* we've hit the end of extents */
4164 int n = 0; /* current extent index */
4168 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4169 ASSERT(!(flags & ~XFS_BMAPI_ENTIRE));
4170 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4172 if (unlikely(XFS_TEST_ERROR(
4173 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
4174 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
4175 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4176 XFS_ERROR_REPORT("xfs_bmapi_delay", XFS_ERRLEVEL_LOW, mp);
4177 return -EFSCORRUPTED;
4180 if (XFS_FORCED_SHUTDOWN(mp))
4183 XFS_STATS_INC(mp, xs_blk_mapw);
4185 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4186 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
4191 xfs_bmap_search_extents(ip, bno, XFS_DATA_FORK, &eof, &lastx, &got, &prev);
4195 while (bno < end && n < *nmap) {
4196 if (eof || got.br_startoff > bno) {
4197 error = xfs_bmapi_reserve_delalloc(ip, bno, len, &got,
4198 &prev, &lastx, eof);
4208 /* set up the extent map to return. */
4209 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4210 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4212 /* If we're done, stop now. */
4213 if (bno >= end || n >= *nmap)
4216 /* Else go on to the next record. */
4218 if (++lastx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t))
4219 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, lastx), &got);
4231 struct xfs_bmalloca *bma)
4233 struct xfs_mount *mp = bma->ip->i_mount;
4234 int whichfork = (bma->flags & XFS_BMAPI_ATTRFORK) ?
4235 XFS_ATTR_FORK : XFS_DATA_FORK;
4236 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4237 int tmp_logflags = 0;
4240 ASSERT(bma->length > 0);
4243 * For the wasdelay case, we could also just allocate the stuff asked
4244 * for in this bmap call but that wouldn't be as good.
4247 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4248 bma->offset = bma->got.br_startoff;
4249 if (bma->idx != NULLEXTNUM && bma->idx) {
4250 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4254 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4256 bma->length = XFS_FILBLKS_MIN(bma->length,
4257 bma->got.br_startoff - bma->offset);
4261 * Indicate if this is the first user data in the file, or just any
4262 * user data. And if it is userdata, indicate whether it needs to
4263 * be initialised to zero during allocation.
4265 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4266 bma->userdata = (bma->offset == 0) ?
4267 XFS_ALLOC_INITIAL_USER_DATA : XFS_ALLOC_USERDATA;
4268 if (bma->flags & XFS_BMAPI_ZERO)
4269 bma->userdata |= XFS_ALLOC_USERDATA_ZERO;
4272 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4275 * Only want to do the alignment at the eof if it is userdata and
4276 * allocation length is larger than a stripe unit.
4278 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4279 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4280 error = xfs_bmap_isaeof(bma, whichfork);
4285 error = xfs_bmap_alloc(bma);
4289 if (bma->flist->dop_low)
4292 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4293 if (bma->blkno == NULLFSBLOCK)
4295 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4296 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4297 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4298 bma->cur->bc_private.b.flist = bma->flist;
4301 * Bump the number of extents we've allocated
4307 bma->cur->bc_private.b.flags =
4308 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4310 bma->got.br_startoff = bma->offset;
4311 bma->got.br_startblock = bma->blkno;
4312 bma->got.br_blockcount = bma->length;
4313 bma->got.br_state = XFS_EXT_NORM;
4316 * A wasdelay extent has been initialized, so shouldn't be flagged
4319 if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) &&
4320 xfs_sb_version_hasextflgbit(&mp->m_sb))
4321 bma->got.br_state = XFS_EXT_UNWRITTEN;
4324 error = xfs_bmap_add_extent_delay_real(bma);
4326 error = xfs_bmap_add_extent_hole_real(bma, whichfork);
4328 bma->logflags |= tmp_logflags;
4333 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4334 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4335 * the neighbouring ones.
4337 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4339 ASSERT(bma->got.br_startoff <= bma->offset);
4340 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4341 bma->offset + bma->length);
4342 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4343 bma->got.br_state == XFS_EXT_UNWRITTEN);
4348 xfs_bmapi_convert_unwritten(
4349 struct xfs_bmalloca *bma,
4350 struct xfs_bmbt_irec *mval,
4354 int whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4355 XFS_ATTR_FORK : XFS_DATA_FORK;
4356 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4357 int tmp_logflags = 0;
4360 /* check if we need to do unwritten->real conversion */
4361 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4362 (flags & XFS_BMAPI_PREALLOC))
4365 /* check if we need to do real->unwritten conversion */
4366 if (mval->br_state == XFS_EXT_NORM &&
4367 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4368 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4372 * Modify (by adding) the state flag, if writing.
4374 ASSERT(mval->br_blockcount <= len);
4375 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4376 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4377 bma->ip, whichfork);
4378 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4379 bma->cur->bc_private.b.flist = bma->flist;
4381 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4382 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4385 * Before insertion into the bmbt, zero the range being converted
4388 if (flags & XFS_BMAPI_ZERO) {
4389 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4390 mval->br_blockcount);
4395 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
4396 &bma->cur, mval, bma->firstblock, bma->flist,
4399 * Log the inode core unconditionally in the unwritten extent conversion
4400 * path because the conversion might not have done so (e.g., if the
4401 * extent count hasn't changed). We need to make sure the inode is dirty
4402 * in the transaction for the sake of fsync(), even if nothing has
4403 * changed, because fsync() will not force the log for this transaction
4404 * unless it sees the inode pinned.
4406 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4411 * Update our extent pointer, given that
4412 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4413 * of the neighbouring ones.
4415 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4418 * We may have combined previously unwritten space with written space,
4419 * so generate another request.
4421 if (mval->br_blockcount < len)
4427 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4428 * extent state if necessary. Details behaviour is controlled by the flags
4429 * parameter. Only allocates blocks from a single allocation group, to avoid
4432 * The returned value in "firstblock" from the first call in a transaction
4433 * must be remembered and presented to subsequent calls in "firstblock".
4434 * An upper bound for the number of blocks to be allocated is supplied to
4435 * the first call in "total"; if no allocation group has that many free
4436 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4440 struct xfs_trans *tp, /* transaction pointer */
4441 struct xfs_inode *ip, /* incore inode */
4442 xfs_fileoff_t bno, /* starting file offs. mapped */
4443 xfs_filblks_t len, /* length to map in file */
4444 int flags, /* XFS_BMAPI_... */
4445 xfs_fsblock_t *firstblock, /* first allocated block
4446 controls a.g. for allocs */
4447 xfs_extlen_t total, /* total blocks needed */
4448 struct xfs_bmbt_irec *mval, /* output: map values */
4449 int *nmap, /* i/o: mval size/count */
4450 struct xfs_bmap_free *flist) /* i/o: list extents to free */
4452 struct xfs_mount *mp = ip->i_mount;
4453 struct xfs_ifork *ifp;
4454 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4455 xfs_fileoff_t end; /* end of mapped file region */
4456 int eof; /* after the end of extents */
4457 int error; /* error return */
4458 int n; /* current extent index */
4459 xfs_fileoff_t obno; /* old block number (offset) */
4460 int whichfork; /* data or attr fork */
4461 char inhole; /* current location is hole in file */
4462 char wasdelay; /* old extent was delayed */
4465 xfs_fileoff_t orig_bno; /* original block number value */
4466 int orig_flags; /* original flags arg value */
4467 xfs_filblks_t orig_len; /* original value of len arg */
4468 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4469 int orig_nmap; /* original value of *nmap */
4477 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4478 XFS_ATTR_FORK : XFS_DATA_FORK;
4481 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4482 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4485 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4486 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4488 /* zeroing is for currently only for data extents, not metadata */
4489 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4490 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4492 * we can allocate unwritten extents or pre-zero allocated blocks,
4493 * but it makes no sense to do both at once. This would result in
4494 * zeroing the unwritten extent twice, but it still being an
4495 * unwritten extent....
4497 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4498 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4500 if (unlikely(XFS_TEST_ERROR(
4501 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4502 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4503 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4504 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4505 return -EFSCORRUPTED;
4508 if (XFS_FORCED_SHUTDOWN(mp))
4511 ifp = XFS_IFORK_PTR(ip, whichfork);
4513 XFS_STATS_INC(mp, xs_blk_mapw);
4515 if (*firstblock == NULLFSBLOCK) {
4516 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4517 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4524 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4525 error = xfs_iread_extents(tp, ip, whichfork);
4530 xfs_bmap_search_extents(ip, bno, whichfork, &eof, &bma.idx, &bma.got,
4541 bma.firstblock = firstblock;
4543 while (bno < end && n < *nmap) {
4544 inhole = eof || bma.got.br_startoff > bno;
4545 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
4548 * First, deal with the hole before the allocated space
4549 * that we found, if any.
4551 if (inhole || wasdelay) {
4553 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4554 bma.wasdel = wasdelay;
4559 * There's a 32/64 bit type mismatch between the
4560 * allocation length request (which can be 64 bits in
4561 * length) and the bma length request, which is
4562 * xfs_extlen_t and therefore 32 bits. Hence we have to
4563 * check for 32-bit overflows and handle them here.
4565 if (len > (xfs_filblks_t)MAXEXTLEN)
4566 bma.length = MAXEXTLEN;
4571 ASSERT(bma.length > 0);
4572 error = xfs_bmapi_allocate(&bma);
4575 if (bma.blkno == NULLFSBLOCK)
4579 /* Deal with the allocated space we found. */
4580 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4583 /* Execute unwritten extent conversion if necessary */
4584 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4585 if (error == -EAGAIN)
4590 /* update the extent map to return */
4591 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4594 * If we're done, stop now. Stop when we've allocated
4595 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4596 * the transaction may get too big.
4598 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4601 /* Else go on to the next record. */
4603 if (++bma.idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)) {
4604 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma.idx),
4612 * Transform from btree to extents, give it cur.
4614 if (xfs_bmap_wants_extents(ip, whichfork)) {
4615 int tmp_logflags = 0;
4618 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4619 &tmp_logflags, whichfork);
4620 bma.logflags |= tmp_logflags;
4625 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4626 XFS_IFORK_NEXTENTS(ip, whichfork) >
4627 XFS_IFORK_MAXEXT(ip, whichfork));
4631 * Log everything. Do this after conversion, there's no point in
4632 * logging the extent records if we've converted to btree format.
4634 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4635 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4636 bma.logflags &= ~xfs_ilog_fext(whichfork);
4637 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4638 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4639 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4641 * Log whatever the flags say, even if error. Otherwise we might miss
4642 * detecting a case where the data is changed, there's an error,
4643 * and it's not logged so we don't shutdown when we should.
4646 xfs_trans_log_inode(tp, ip, bma.logflags);
4650 ASSERT(*firstblock == NULLFSBLOCK ||
4651 XFS_FSB_TO_AGNO(mp, *firstblock) ==
4653 bma.cur->bc_private.b.firstblock) ||
4655 XFS_FSB_TO_AGNO(mp, *firstblock) <
4657 bma.cur->bc_private.b.firstblock)));
4658 *firstblock = bma.cur->bc_private.b.firstblock;
4660 xfs_btree_del_cursor(bma.cur,
4661 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4664 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4670 * When a delalloc extent is split (e.g., due to a hole punch), the original
4671 * indlen reservation must be shared across the two new extents that are left
4674 * Given the original reservation and the worst case indlen for the two new
4675 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4676 * reservation fairly across the two new extents. If necessary, steal available
4677 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4678 * ores == 1). The number of stolen blocks is returned. The availability and
4679 * subsequent accounting of stolen blocks is the responsibility of the caller.
4681 static xfs_filblks_t
4682 xfs_bmap_split_indlen(
4683 xfs_filblks_t ores, /* original res. */
4684 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4685 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4686 xfs_filblks_t avail) /* stealable blocks */
4688 xfs_filblks_t len1 = *indlen1;
4689 xfs_filblks_t len2 = *indlen2;
4690 xfs_filblks_t nres = len1 + len2; /* new total res. */
4691 xfs_filblks_t stolen = 0;
4694 * Steal as many blocks as we can to try and satisfy the worst case
4695 * indlen for both new extents.
4697 while (nres > ores && avail) {
4704 * The only blocks available are those reserved for the original
4705 * extent and what we can steal from the extent being removed.
4706 * If this still isn't enough to satisfy the combined
4707 * requirements for the two new extents, skim blocks off of each
4708 * of the new reservations until they match what is available.
4710 while (nres > ores) {
4730 * Called by xfs_bmapi to update file extent records and the btree
4731 * after removing space (or undoing a delayed allocation).
4733 STATIC int /* error */
4734 xfs_bmap_del_extent(
4735 xfs_inode_t *ip, /* incore inode pointer */
4736 xfs_trans_t *tp, /* current transaction pointer */
4737 xfs_extnum_t *idx, /* extent number to update/delete */
4738 xfs_bmap_free_t *flist, /* list of extents to be freed */
4739 xfs_btree_cur_t *cur, /* if null, not a btree */
4740 xfs_bmbt_irec_t *del, /* data to remove from extents */
4741 int *logflagsp, /* inode logging flags */
4742 int whichfork) /* data or attr fork */
4744 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
4745 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
4746 xfs_fsblock_t del_endblock=0; /* first block past del */
4747 xfs_fileoff_t del_endoff; /* first offset past del */
4748 int delay; /* current block is delayed allocated */
4749 int do_fx; /* free extent at end of routine */
4750 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
4751 int error; /* error return value */
4752 int flags; /* inode logging flags */
4753 xfs_bmbt_irec_t got; /* current extent entry */
4754 xfs_fileoff_t got_endoff; /* first offset past got */
4755 int i; /* temp state */
4756 xfs_ifork_t *ifp; /* inode fork pointer */
4757 xfs_mount_t *mp; /* mount structure */
4758 xfs_filblks_t nblks; /* quota/sb block count */
4759 xfs_bmbt_irec_t new; /* new record to be inserted */
4761 uint qfield; /* quota field to update */
4762 xfs_filblks_t temp; /* for indirect length calculations */
4763 xfs_filblks_t temp2; /* for indirect length calculations */
4767 XFS_STATS_INC(mp, xs_del_exlist);
4769 if (whichfork == XFS_ATTR_FORK)
4770 state |= BMAP_ATTRFORK;
4772 ifp = XFS_IFORK_PTR(ip, whichfork);
4773 ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
4774 (uint)sizeof(xfs_bmbt_rec_t)));
4775 ASSERT(del->br_blockcount > 0);
4776 ep = xfs_iext_get_ext(ifp, *idx);
4777 xfs_bmbt_get_all(ep, &got);
4778 ASSERT(got.br_startoff <= del->br_startoff);
4779 del_endoff = del->br_startoff + del->br_blockcount;
4780 got_endoff = got.br_startoff + got.br_blockcount;
4781 ASSERT(got_endoff >= del_endoff);
4782 delay = isnullstartblock(got.br_startblock);
4783 ASSERT(isnullstartblock(del->br_startblock) == delay);
4788 * If deleting a real allocation, must free up the disk space.
4791 flags = XFS_ILOG_CORE;
4793 * Realtime allocation. Free it and record di_nblocks update.
4795 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4799 ASSERT(do_mod(del->br_blockcount,
4800 mp->m_sb.sb_rextsize) == 0);
4801 ASSERT(do_mod(del->br_startblock,
4802 mp->m_sb.sb_rextsize) == 0);
4803 bno = del->br_startblock;
4804 len = del->br_blockcount;
4805 do_div(bno, mp->m_sb.sb_rextsize);
4806 do_div(len, mp->m_sb.sb_rextsize);
4807 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
4811 nblks = len * mp->m_sb.sb_rextsize;
4812 qfield = XFS_TRANS_DQ_RTBCOUNT;
4815 * Ordinary allocation.
4819 nblks = del->br_blockcount;
4820 qfield = XFS_TRANS_DQ_BCOUNT;
4823 * Set up del_endblock and cur for later.
4825 del_endblock = del->br_startblock + del->br_blockcount;
4827 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
4828 got.br_startblock, got.br_blockcount,
4831 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4833 da_old = da_new = 0;
4835 da_old = startblockval(got.br_startblock);
4841 * Set flag value to use in switch statement.
4842 * Left-contig is 2, right-contig is 1.
4844 switch (((got.br_startoff == del->br_startoff) << 1) |
4845 (got_endoff == del_endoff)) {
4848 * Matches the whole extent. Delete the entry.
4850 xfs_iext_remove(ip, *idx, 1,
4851 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
4856 XFS_IFORK_NEXT_SET(ip, whichfork,
4857 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
4858 flags |= XFS_ILOG_CORE;
4860 flags |= xfs_ilog_fext(whichfork);
4863 if ((error = xfs_btree_delete(cur, &i)))
4865 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4870 * Deleting the first part of the extent.
4872 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4873 xfs_bmbt_set_startoff(ep, del_endoff);
4874 temp = got.br_blockcount - del->br_blockcount;
4875 xfs_bmbt_set_blockcount(ep, temp);
4877 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
4879 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
4880 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4884 xfs_bmbt_set_startblock(ep, del_endblock);
4885 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4887 flags |= xfs_ilog_fext(whichfork);
4890 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
4891 got.br_blockcount - del->br_blockcount,
4898 * Deleting the last part of the extent.
4900 temp = got.br_blockcount - del->br_blockcount;
4901 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4902 xfs_bmbt_set_blockcount(ep, temp);
4904 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
4906 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
4907 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4911 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4913 flags |= xfs_ilog_fext(whichfork);
4916 if ((error = xfs_bmbt_update(cur, got.br_startoff,
4918 got.br_blockcount - del->br_blockcount,
4925 * Deleting the middle of the extent.
4927 temp = del->br_startoff - got.br_startoff;
4928 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4929 xfs_bmbt_set_blockcount(ep, temp);
4930 new.br_startoff = del_endoff;
4931 temp2 = got_endoff - del_endoff;
4932 new.br_blockcount = temp2;
4933 new.br_state = got.br_state;
4935 new.br_startblock = del_endblock;
4936 flags |= XFS_ILOG_CORE;
4938 if ((error = xfs_bmbt_update(cur,
4940 got.br_startblock, temp,
4943 if ((error = xfs_btree_increment(cur, 0, &i)))
4945 cur->bc_rec.b = new;
4946 error = xfs_btree_insert(cur, &i);
4947 if (error && error != -ENOSPC)
4950 * If get no-space back from btree insert,
4951 * it tried a split, and we have a zero
4952 * block reservation.
4953 * Fix up our state and return the error.
4955 if (error == -ENOSPC) {
4957 * Reset the cursor, don't trust
4958 * it after any insert operation.
4960 if ((error = xfs_bmbt_lookup_eq(cur,
4965 XFS_WANT_CORRUPTED_GOTO(mp,
4968 * Update the btree record back
4969 * to the original value.
4971 if ((error = xfs_bmbt_update(cur,
4978 * Reset the extent record back
4979 * to the original value.
4981 xfs_bmbt_set_blockcount(ep,
4987 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
4989 flags |= xfs_ilog_fext(whichfork);
4990 XFS_IFORK_NEXT_SET(ip, whichfork,
4991 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
4993 xfs_filblks_t stolen;
4994 ASSERT(whichfork == XFS_DATA_FORK);
4997 * Distribute the original indlen reservation across the
4998 * two new extents. Steal blocks from the deleted extent
4999 * if necessary. Stealing blocks simply fudges the
5000 * fdblocks accounting in xfs_bunmapi().
5002 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
5003 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5004 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
5005 del->br_blockcount);
5006 da_new = temp + temp2 - stolen;
5007 del->br_blockcount -= stolen;
5010 * Set the reservation for each extent. Warn if either
5011 * is zero as this can lead to delalloc problems.
5013 WARN_ON_ONCE(!temp || !temp2);
5014 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5015 new.br_startblock = nullstartblock((int)temp2);
5017 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5018 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
5023 * If we need to, add to list of extents to delete.
5026 xfs_bmap_add_free(mp, flist, del->br_startblock,
5027 del->br_blockcount);
5029 * Adjust inode # blocks in the file.
5032 ip->i_d.di_nblocks -= nblks;
5034 * Adjust quota data.
5037 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5040 * Account for change in delayed indirect blocks.
5041 * Nothing to do for disk quota accounting here.
5043 ASSERT(da_old >= da_new);
5044 if (da_old > da_new)
5045 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5052 * Unmap (remove) blocks from a file.
5053 * If nexts is nonzero then the number of extents to remove is limited to
5054 * that value. If not all extents in the block range can be removed then
5059 xfs_trans_t *tp, /* transaction pointer */
5060 struct xfs_inode *ip, /* incore inode */
5061 xfs_fileoff_t bno, /* starting offset to unmap */
5062 xfs_filblks_t len, /* length to unmap in file */
5063 int flags, /* misc flags */
5064 xfs_extnum_t nexts, /* number of extents max */
5065 xfs_fsblock_t *firstblock, /* first allocated block
5066 controls a.g. for allocs */
5067 xfs_bmap_free_t *flist, /* i/o: list extents to free */
5068 int *done) /* set if not done yet */
5070 xfs_btree_cur_t *cur; /* bmap btree cursor */
5071 xfs_bmbt_irec_t del; /* extent being deleted */
5072 int eof; /* is deleting at eof */
5073 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
5074 int error; /* error return value */
5075 xfs_extnum_t extno; /* extent number in list */
5076 xfs_bmbt_irec_t got; /* current extent record */
5077 xfs_ifork_t *ifp; /* inode fork pointer */
5078 int isrt; /* freeing in rt area */
5079 xfs_extnum_t lastx; /* last extent index used */
5080 int logflags; /* transaction logging flags */
5081 xfs_extlen_t mod; /* rt extent offset */
5082 xfs_mount_t *mp; /* mount structure */
5083 xfs_extnum_t nextents; /* number of file extents */
5084 xfs_bmbt_irec_t prev; /* previous extent record */
5085 xfs_fileoff_t start; /* first file offset deleted */
5086 int tmp_logflags; /* partial logging flags */
5087 int wasdel; /* was a delayed alloc extent */
5088 int whichfork; /* data or attribute fork */
5091 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5093 whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5094 XFS_ATTR_FORK : XFS_DATA_FORK;
5095 ifp = XFS_IFORK_PTR(ip, whichfork);
5097 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5098 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5099 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5101 return -EFSCORRUPTED;
5104 if (XFS_FORCED_SHUTDOWN(mp))
5107 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5111 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5112 (error = xfs_iread_extents(tp, ip, whichfork)))
5114 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5115 if (nextents == 0) {
5119 XFS_STATS_INC(mp, xs_blk_unmap);
5120 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5122 bno = start + len - 1;
5123 ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5127 * Check to see if the given block number is past the end of the
5128 * file, back up to the last block if so...
5131 ep = xfs_iext_get_ext(ifp, --lastx);
5132 xfs_bmbt_get_all(ep, &got);
5133 bno = got.br_startoff + got.br_blockcount - 1;
5136 if (ifp->if_flags & XFS_IFBROOT) {
5137 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5138 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5139 cur->bc_private.b.firstblock = *firstblock;
5140 cur->bc_private.b.flist = flist;
5141 cur->bc_private.b.flags = 0;
5147 * Synchronize by locking the bitmap inode.
5149 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5150 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5151 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5152 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5156 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5157 (nexts == 0 || extno < nexts)) {
5159 * Is the found extent after a hole in which bno lives?
5160 * Just back up to the previous extent, if so.
5162 if (got.br_startoff > bno) {
5165 ep = xfs_iext_get_ext(ifp, lastx);
5166 xfs_bmbt_get_all(ep, &got);
5169 * Is the last block of this extent before the range
5170 * we're supposed to delete? If so, we're done.
5172 bno = XFS_FILEOFF_MIN(bno,
5173 got.br_startoff + got.br_blockcount - 1);
5177 * Then deal with the (possibly delayed) allocated space
5182 wasdel = isnullstartblock(del.br_startblock);
5183 if (got.br_startoff < start) {
5184 del.br_startoff = start;
5185 del.br_blockcount -= start - got.br_startoff;
5187 del.br_startblock += start - got.br_startoff;
5189 if (del.br_startoff + del.br_blockcount > bno + 1)
5190 del.br_blockcount = bno + 1 - del.br_startoff;
5191 sum = del.br_startblock + del.br_blockcount;
5193 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5195 * Realtime extent not lined up at the end.
5196 * The extent could have been split into written
5197 * and unwritten pieces, or we could just be
5198 * unmapping part of it. But we can't really
5199 * get rid of part of a realtime extent.
5201 if (del.br_state == XFS_EXT_UNWRITTEN ||
5202 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5204 * This piece is unwritten, or we're not
5205 * using unwritten extents. Skip over it.
5208 bno -= mod > del.br_blockcount ?
5209 del.br_blockcount : mod;
5210 if (bno < got.br_startoff) {
5212 xfs_bmbt_get_all(xfs_iext_get_ext(
5218 * It's written, turn it unwritten.
5219 * This is better than zeroing it.
5221 ASSERT(del.br_state == XFS_EXT_NORM);
5222 ASSERT(tp->t_blk_res > 0);
5224 * If this spans a realtime extent boundary,
5225 * chop it back to the start of the one we end at.
5227 if (del.br_blockcount > mod) {
5228 del.br_startoff += del.br_blockcount - mod;
5229 del.br_startblock += del.br_blockcount - mod;
5230 del.br_blockcount = mod;
5232 del.br_state = XFS_EXT_UNWRITTEN;
5233 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5234 &lastx, &cur, &del, firstblock, flist,
5240 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5242 * Realtime extent is lined up at the end but not
5243 * at the front. We'll get rid of full extents if
5246 mod = mp->m_sb.sb_rextsize - mod;
5247 if (del.br_blockcount > mod) {
5248 del.br_blockcount -= mod;
5249 del.br_startoff += mod;
5250 del.br_startblock += mod;
5251 } else if ((del.br_startoff == start &&
5252 (del.br_state == XFS_EXT_UNWRITTEN ||
5253 tp->t_blk_res == 0)) ||
5254 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5256 * Can't make it unwritten. There isn't
5257 * a full extent here so just skip it.
5259 ASSERT(bno >= del.br_blockcount);
5260 bno -= del.br_blockcount;
5261 if (got.br_startoff > bno) {
5263 ep = xfs_iext_get_ext(ifp,
5265 xfs_bmbt_get_all(ep, &got);
5269 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5271 * This one is already unwritten.
5272 * It must have a written left neighbor.
5273 * Unwrite the killed part of that one and
5277 xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
5279 ASSERT(prev.br_state == XFS_EXT_NORM);
5280 ASSERT(!isnullstartblock(prev.br_startblock));
5281 ASSERT(del.br_startblock ==
5282 prev.br_startblock + prev.br_blockcount);
5283 if (prev.br_startoff < start) {
5284 mod = start - prev.br_startoff;
5285 prev.br_blockcount -= mod;
5286 prev.br_startblock += mod;
5287 prev.br_startoff = start;
5289 prev.br_state = XFS_EXT_UNWRITTEN;
5291 error = xfs_bmap_add_extent_unwritten_real(tp,
5292 ip, &lastx, &cur, &prev,
5293 firstblock, flist, &logflags);
5298 ASSERT(del.br_state == XFS_EXT_NORM);
5299 del.br_state = XFS_EXT_UNWRITTEN;
5300 error = xfs_bmap_add_extent_unwritten_real(tp,
5301 ip, &lastx, &cur, &del,
5302 firstblock, flist, &logflags);
5310 * If it's the case where the directory code is running
5311 * with no block reservation, and the deleted block is in
5312 * the middle of its extent, and the resulting insert
5313 * of an extent would cause transformation to btree format,
5314 * then reject it. The calling code will then swap
5315 * blocks around instead.
5316 * We have to do this now, rather than waiting for the
5317 * conversion to btree format, since the transaction
5320 if (!wasdel && tp->t_blk_res == 0 &&
5321 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5322 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5323 XFS_IFORK_MAXEXT(ip, whichfork) &&
5324 del.br_startoff > got.br_startoff &&
5325 del.br_startoff + del.br_blockcount <
5326 got.br_startoff + got.br_blockcount) {
5332 * Unreserve quota and update realtime free space, if
5333 * appropriate. If delayed allocation, update the inode delalloc
5334 * counter now and wait to update the sb counters as
5335 * xfs_bmap_del_extent() might need to borrow some blocks.
5338 ASSERT(startblockval(del.br_startblock) > 0);
5340 xfs_filblks_t rtexts;
5342 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5343 do_div(rtexts, mp->m_sb.sb_rextsize);
5344 xfs_mod_frextents(mp, (int64_t)rtexts);
5345 (void)xfs_trans_reserve_quota_nblks(NULL,
5346 ip, -((long)del.br_blockcount), 0,
5347 XFS_QMOPT_RES_RTBLKS);
5349 (void)xfs_trans_reserve_quota_nblks(NULL,
5350 ip, -((long)del.br_blockcount), 0,
5351 XFS_QMOPT_RES_REGBLKS);
5353 ip->i_delayed_blks -= del.br_blockcount;
5355 cur->bc_private.b.flags |=
5356 XFS_BTCUR_BPRV_WASDEL;
5358 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5360 error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
5361 &tmp_logflags, whichfork);
5362 logflags |= tmp_logflags;
5366 if (!isrt && wasdel)
5367 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
5369 bno = del.br_startoff - 1;
5372 * If not done go on to the next (previous) record.
5374 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5376 ep = xfs_iext_get_ext(ifp, lastx);
5377 if (xfs_bmbt_get_startoff(ep) > bno) {
5379 ep = xfs_iext_get_ext(ifp,
5382 xfs_bmbt_get_all(ep, &got);
5387 *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5390 * Convert to a btree if necessary.
5392 if (xfs_bmap_needs_btree(ip, whichfork)) {
5393 ASSERT(cur == NULL);
5394 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
5395 &cur, 0, &tmp_logflags, whichfork);
5396 logflags |= tmp_logflags;
5401 * transform from btree to extents, give it cur
5403 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5404 ASSERT(cur != NULL);
5405 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5407 logflags |= tmp_logflags;
5412 * transform from extents to local?
5417 * Log everything. Do this after conversion, there's no point in
5418 * logging the extent records if we've converted to btree format.
5420 if ((logflags & xfs_ilog_fext(whichfork)) &&
5421 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5422 logflags &= ~xfs_ilog_fext(whichfork);
5423 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5424 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5425 logflags &= ~xfs_ilog_fbroot(whichfork);
5427 * Log inode even in the error case, if the transaction
5428 * is dirty we'll need to shut down the filesystem.
5431 xfs_trans_log_inode(tp, ip, logflags);
5434 *firstblock = cur->bc_private.b.firstblock;
5435 cur->bc_private.b.allocated = 0;
5437 xfs_btree_del_cursor(cur,
5438 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5444 * Determine whether an extent shift can be accomplished by a merge with the
5445 * extent that precedes the target hole of the shift.
5449 struct xfs_bmbt_irec *left, /* preceding extent */
5450 struct xfs_bmbt_irec *got, /* current extent to shift */
5451 xfs_fileoff_t shift) /* shift fsb */
5453 xfs_fileoff_t startoff;
5455 startoff = got->br_startoff - shift;
5458 * The extent, once shifted, must be adjacent in-file and on-disk with
5459 * the preceding extent.
5461 if ((left->br_startoff + left->br_blockcount != startoff) ||
5462 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5463 (left->br_state != got->br_state) ||
5464 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5471 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5472 * hole in the file. If an extent shift would result in the extent being fully
5473 * adjacent to the extent that currently precedes the hole, we can merge with
5474 * the preceding extent rather than do the shift.
5476 * This function assumes the caller has verified a shift-by-merge is possible
5477 * with the provided extents via xfs_bmse_can_merge().
5481 struct xfs_inode *ip,
5483 xfs_fileoff_t shift, /* shift fsb */
5484 int current_ext, /* idx of gotp */
5485 struct xfs_bmbt_rec_host *gotp, /* extent to shift */
5486 struct xfs_bmbt_rec_host *leftp, /* preceding extent */
5487 struct xfs_btree_cur *cur,
5488 int *logflags) /* output */
5490 struct xfs_bmbt_irec got;
5491 struct xfs_bmbt_irec left;
5492 xfs_filblks_t blockcount;
5494 struct xfs_mount *mp = ip->i_mount;
5496 xfs_bmbt_get_all(gotp, &got);
5497 xfs_bmbt_get_all(leftp, &left);
5498 blockcount = left.br_blockcount + got.br_blockcount;
5500 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5501 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5502 ASSERT(xfs_bmse_can_merge(&left, &got, shift));
5505 * Merge the in-core extents. Note that the host record pointers and
5506 * current_ext index are invalid once the extent has been removed via
5507 * xfs_iext_remove().
5509 xfs_bmbt_set_blockcount(leftp, blockcount);
5510 xfs_iext_remove(ip, current_ext, 1, 0);
5513 * Update the on-disk extent count, the btree if necessary and log the
5516 XFS_IFORK_NEXT_SET(ip, whichfork,
5517 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5518 *logflags |= XFS_ILOG_CORE;
5520 *logflags |= XFS_ILOG_DEXT;
5524 /* lookup and remove the extent to merge */
5525 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
5526 got.br_blockcount, &i);
5529 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5531 error = xfs_btree_delete(cur, &i);
5534 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5536 /* lookup and update size of the previous extent */
5537 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
5538 left.br_blockcount, &i);
5541 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5543 left.br_blockcount = blockcount;
5545 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
5546 left.br_blockcount, left.br_state);
5550 * Shift a single extent.
5554 struct xfs_inode *ip,
5556 xfs_fileoff_t offset_shift_fsb,
5558 struct xfs_bmbt_rec_host *gotp,
5559 struct xfs_btree_cur *cur,
5561 enum shift_direction direction)
5563 struct xfs_ifork *ifp;
5564 struct xfs_mount *mp;
5565 xfs_fileoff_t startoff;
5566 struct xfs_bmbt_rec_host *adj_irecp;
5567 struct xfs_bmbt_irec got;
5568 struct xfs_bmbt_irec adj_irec;
5574 ifp = XFS_IFORK_PTR(ip, whichfork);
5575 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5577 xfs_bmbt_get_all(gotp, &got);
5579 /* delalloc extents should be prevented by caller */
5580 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got.br_startblock));
5582 if (direction == SHIFT_LEFT) {
5583 startoff = got.br_startoff - offset_shift_fsb;
5586 * Check for merge if we've got an extent to the left,
5587 * otherwise make sure there's enough room at the start
5588 * of the file for the shift.
5590 if (!*current_ext) {
5591 if (got.br_startoff < offset_shift_fsb)
5593 goto update_current_ext;
5596 * grab the left extent and check for a large
5599 adj_irecp = xfs_iext_get_ext(ifp, *current_ext - 1);
5600 xfs_bmbt_get_all(adj_irecp, &adj_irec);
5603 adj_irec.br_startoff + adj_irec.br_blockcount)
5606 /* check whether to merge the extent or shift it down */
5607 if (xfs_bmse_can_merge(&adj_irec, &got,
5608 offset_shift_fsb)) {
5609 return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
5610 *current_ext, gotp, adj_irecp,
5614 startoff = got.br_startoff + offset_shift_fsb;
5615 /* nothing to move if this is the last extent */
5616 if (*current_ext >= (total_extents - 1))
5617 goto update_current_ext;
5619 * If this is not the last extent in the file, make sure there
5620 * is enough room between current extent and next extent for
5621 * accommodating the shift.
5623 adj_irecp = xfs_iext_get_ext(ifp, *current_ext + 1);
5624 xfs_bmbt_get_all(adj_irecp, &adj_irec);
5625 if (startoff + got.br_blockcount > adj_irec.br_startoff)
5628 * Unlike a left shift (which involves a hole punch),
5629 * a right shift does not modify extent neighbors
5630 * in any way. We should never find mergeable extents
5631 * in this scenario. Check anyways and warn if we
5632 * encounter two extents that could be one.
5634 if (xfs_bmse_can_merge(&got, &adj_irec, offset_shift_fsb))
5638 * Increment the extent index for the next iteration, update the start
5639 * offset of the in-core extent and update the btree if applicable.
5642 if (direction == SHIFT_LEFT)
5646 xfs_bmbt_set_startoff(gotp, startoff);
5647 *logflags |= XFS_ILOG_CORE;
5649 *logflags |= XFS_ILOG_DEXT;
5653 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
5654 got.br_blockcount, &i);
5657 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5659 got.br_startoff = startoff;
5660 return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
5661 got.br_blockcount, got.br_state);
5665 * Shift extent records to the left/right to cover/create a hole.
5667 * The maximum number of extents to be shifted in a single operation is
5668 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
5669 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
5670 * is the length by which each extent is shifted. If there is no hole to shift
5671 * the extents into, this will be considered invalid operation and we abort
5675 xfs_bmap_shift_extents(
5676 struct xfs_trans *tp,
5677 struct xfs_inode *ip,
5678 xfs_fileoff_t *next_fsb,
5679 xfs_fileoff_t offset_shift_fsb,
5681 xfs_fileoff_t stop_fsb,
5682 xfs_fsblock_t *firstblock,
5683 struct xfs_bmap_free *flist,
5684 enum shift_direction direction,
5687 struct xfs_btree_cur *cur = NULL;
5688 struct xfs_bmbt_rec_host *gotp;
5689 struct xfs_bmbt_irec got;
5690 struct xfs_mount *mp = ip->i_mount;
5691 struct xfs_ifork *ifp;
5692 xfs_extnum_t nexts = 0;
5693 xfs_extnum_t current_ext;
5694 xfs_extnum_t total_extents;
5695 xfs_extnum_t stop_extent;
5697 int whichfork = XFS_DATA_FORK;
5700 if (unlikely(XFS_TEST_ERROR(
5701 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5702 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5703 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
5704 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
5705 XFS_ERRLEVEL_LOW, mp);
5706 return -EFSCORRUPTED;
5709 if (XFS_FORCED_SHUTDOWN(mp))
5712 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5713 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5714 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
5715 ASSERT(*next_fsb != NULLFSBLOCK || direction == SHIFT_RIGHT);
5717 ifp = XFS_IFORK_PTR(ip, whichfork);
5718 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5719 /* Read in all the extents */
5720 error = xfs_iread_extents(tp, ip, whichfork);
5725 if (ifp->if_flags & XFS_IFBROOT) {
5726 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5727 cur->bc_private.b.firstblock = *firstblock;
5728 cur->bc_private.b.flist = flist;
5729 cur->bc_private.b.flags = 0;
5733 * There may be delalloc extents in the data fork before the range we
5734 * are collapsing out, so we cannot use the count of real extents here.
5735 * Instead we have to calculate it from the incore fork.
5737 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5738 if (total_extents == 0) {
5744 * In case of first right shift, we need to initialize next_fsb
5746 if (*next_fsb == NULLFSBLOCK) {
5747 gotp = xfs_iext_get_ext(ifp, total_extents - 1);
5748 xfs_bmbt_get_all(gotp, &got);
5749 *next_fsb = got.br_startoff;
5750 if (stop_fsb > *next_fsb) {
5756 /* Lookup the extent index at which we have to stop */
5757 if (direction == SHIFT_RIGHT) {
5758 gotp = xfs_iext_bno_to_ext(ifp, stop_fsb, &stop_extent);
5759 /* Make stop_extent exclusive of shift range */
5762 stop_extent = total_extents;
5765 * Look up the extent index for the fsb where we start shifting. We can
5766 * henceforth iterate with current_ext as extent list changes are locked
5769 * gotp can be null in 2 cases: 1) if there are no extents or 2)
5770 * *next_fsb lies in a hole beyond which there are no extents. Either
5773 gotp = xfs_iext_bno_to_ext(ifp, *next_fsb, ¤t_ext);
5779 /* some sanity checking before we finally start shifting extents */
5780 if ((direction == SHIFT_LEFT && current_ext >= stop_extent) ||
5781 (direction == SHIFT_RIGHT && current_ext <= stop_extent)) {
5786 while (nexts++ < num_exts) {
5787 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
5788 ¤t_ext, gotp, cur, &logflags,
5793 * If there was an extent merge during the shift, the extent
5794 * count can change. Update the total and grade the next record.
5796 if (direction == SHIFT_LEFT) {
5797 total_extents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
5798 stop_extent = total_extents;
5801 if (current_ext == stop_extent) {
5803 *next_fsb = NULLFSBLOCK;
5806 gotp = xfs_iext_get_ext(ifp, current_ext);
5810 xfs_bmbt_get_all(gotp, &got);
5811 *next_fsb = got.br_startoff;
5816 xfs_btree_del_cursor(cur,
5817 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5820 xfs_trans_log_inode(tp, ip, logflags);
5826 * Splits an extent into two extents at split_fsb block such that it is
5827 * the first block of the current_ext. @current_ext is a target extent
5828 * to be split. @split_fsb is a block where the extents is split.
5829 * If split_fsb lies in a hole or the first block of extents, just return 0.
5832 xfs_bmap_split_extent_at(
5833 struct xfs_trans *tp,
5834 struct xfs_inode *ip,
5835 xfs_fileoff_t split_fsb,
5836 xfs_fsblock_t *firstfsb,
5837 struct xfs_bmap_free *free_list)
5839 int whichfork = XFS_DATA_FORK;
5840 struct xfs_btree_cur *cur = NULL;
5841 struct xfs_bmbt_rec_host *gotp;
5842 struct xfs_bmbt_irec got;
5843 struct xfs_bmbt_irec new; /* split extent */
5844 struct xfs_mount *mp = ip->i_mount;
5845 struct xfs_ifork *ifp;
5846 xfs_fsblock_t gotblkcnt; /* new block count for got */
5847 xfs_extnum_t current_ext;
5852 if (unlikely(XFS_TEST_ERROR(
5853 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5854 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5855 mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
5856 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
5857 XFS_ERRLEVEL_LOW, mp);
5858 return -EFSCORRUPTED;
5861 if (XFS_FORCED_SHUTDOWN(mp))
5864 ifp = XFS_IFORK_PTR(ip, whichfork);
5865 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5866 /* Read in all the extents */
5867 error = xfs_iread_extents(tp, ip, whichfork);
5873 * gotp can be null in 2 cases: 1) if there are no extents
5874 * or 2) split_fsb lies in a hole beyond which there are
5875 * no extents. Either way, we are done.
5877 gotp = xfs_iext_bno_to_ext(ifp, split_fsb, ¤t_ext);
5881 xfs_bmbt_get_all(gotp, &got);
5884 * Check split_fsb lies in a hole or the start boundary offset
5887 if (got.br_startoff >= split_fsb)
5890 gotblkcnt = split_fsb - got.br_startoff;
5891 new.br_startoff = split_fsb;
5892 new.br_startblock = got.br_startblock + gotblkcnt;
5893 new.br_blockcount = got.br_blockcount - gotblkcnt;
5894 new.br_state = got.br_state;
5896 if (ifp->if_flags & XFS_IFBROOT) {
5897 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5898 cur->bc_private.b.firstblock = *firstfsb;
5899 cur->bc_private.b.flist = free_list;
5900 cur->bc_private.b.flags = 0;
5901 error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
5907 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5910 xfs_bmbt_set_blockcount(gotp, gotblkcnt);
5911 got.br_blockcount = gotblkcnt;
5913 logflags = XFS_ILOG_CORE;
5915 error = xfs_bmbt_update(cur, got.br_startoff,
5922 logflags |= XFS_ILOG_DEXT;
5924 /* Add new extent */
5926 xfs_iext_insert(ip, current_ext, 1, &new, 0);
5927 XFS_IFORK_NEXT_SET(ip, whichfork,
5928 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5931 error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
5932 new.br_startblock, new.br_blockcount,
5936 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
5937 cur->bc_rec.b.br_state = new.br_state;
5939 error = xfs_btree_insert(cur, &i);
5942 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5946 * Convert to a btree if necessary.
5948 if (xfs_bmap_needs_btree(ip, whichfork)) {
5949 int tmp_logflags; /* partial log flag return val */
5951 ASSERT(cur == NULL);
5952 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, free_list,
5953 &cur, 0, &tmp_logflags, whichfork);
5954 logflags |= tmp_logflags;
5959 cur->bc_private.b.allocated = 0;
5960 xfs_btree_del_cursor(cur,
5961 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5965 xfs_trans_log_inode(tp, ip, logflags);
5970 xfs_bmap_split_extent(
5971 struct xfs_inode *ip,
5972 xfs_fileoff_t split_fsb)
5974 struct xfs_mount *mp = ip->i_mount;
5975 struct xfs_trans *tp;
5976 struct xfs_bmap_free free_list;
5977 xfs_fsblock_t firstfsb;
5980 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
5981 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
5985 xfs_ilock(ip, XFS_ILOCK_EXCL);
5986 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
5988 xfs_bmap_init(&free_list, &firstfsb);
5990 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
5991 &firstfsb, &free_list);
5995 error = xfs_bmap_finish(&tp, &free_list, NULL);
5999 return xfs_trans_commit(tp);
6002 xfs_bmap_cancel(&free_list);
6003 xfs_trans_cancel(tp);