Merge HEAD from oss.sgi.com:/oss/git/linux-2.6.git
[linux-2.6] / fs / xfs / xfs_bmap.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir.h"
28 #include "xfs_dir2.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir_sf.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_dmapi.h"
40 #include "xfs_mount.h"
41 #include "xfs_ialloc.h"
42 #include "xfs_itable.h"
43 #include "xfs_inode_item.h"
44 #include "xfs_extfree_item.h"
45 #include "xfs_alloc.h"
46 #include "xfs_bmap.h"
47 #include "xfs_rtalloc.h"
48 #include "xfs_error.h"
49 #include "xfs_dir_leaf.h"
50 #include "xfs_attr_leaf.h"
51 #include "xfs_rw.h"
52 #include "xfs_quota.h"
53 #include "xfs_trans_space.h"
54 #include "xfs_buf_item.h"
55
56
57 #ifdef DEBUG
58 STATIC void
59 xfs_bmap_check_leaf_extents(xfs_btree_cur_t *cur, xfs_inode_t *ip, int whichfork);
60 #endif
61
62 kmem_zone_t             *xfs_bmap_free_item_zone;
63
64 /*
65  * Prototypes for internal bmap routines.
66  */
67
68
69 /*
70  * Called from xfs_bmap_add_attrfork to handle extents format files.
71  */
72 STATIC int                                      /* error */
73 xfs_bmap_add_attrfork_extents(
74         xfs_trans_t             *tp,            /* transaction pointer */
75         xfs_inode_t             *ip,            /* incore inode pointer */
76         xfs_fsblock_t           *firstblock,    /* first block allocated */
77         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
78         int                     *flags);        /* inode logging flags */
79
80 /*
81  * Called from xfs_bmap_add_attrfork to handle local format files.
82  */
83 STATIC int                                      /* error */
84 xfs_bmap_add_attrfork_local(
85         xfs_trans_t             *tp,            /* transaction pointer */
86         xfs_inode_t             *ip,            /* incore inode pointer */
87         xfs_fsblock_t           *firstblock,    /* first block allocated */
88         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
89         int                     *flags);        /* inode logging flags */
90
91 /*
92  * Called by xfs_bmapi to update extent list structure and the btree
93  * after allocating space (or doing a delayed allocation).
94  */
95 STATIC int                              /* error */
96 xfs_bmap_add_extent(
97         xfs_inode_t             *ip,    /* incore inode pointer */
98         xfs_extnum_t            idx,    /* extent number to update/insert */
99         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
100         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
101         xfs_fsblock_t           *first, /* pointer to firstblock variable */
102         xfs_bmap_free_t         *flist, /* list of extents to be freed */
103         int                     *logflagsp, /* inode logging flags */
104         int                     whichfork, /* data or attr fork */
105         int                     rsvd);  /* OK to allocate reserved blocks */
106
107 /*
108  * Called by xfs_bmap_add_extent to handle cases converting a delayed
109  * allocation to a real allocation.
110  */
111 STATIC int                              /* error */
112 xfs_bmap_add_extent_delay_real(
113         xfs_inode_t             *ip,    /* incore inode pointer */
114         xfs_extnum_t            idx,    /* extent number to update/insert */
115         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
116         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
117         xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
118         xfs_fsblock_t           *first, /* pointer to firstblock variable */
119         xfs_bmap_free_t         *flist, /* list of extents to be freed */
120         int                     *logflagsp, /* inode logging flags */
121         int                     rsvd);  /* OK to allocate reserved blocks */
122
123 /*
124  * Called by xfs_bmap_add_extent to handle cases converting a hole
125  * to a delayed allocation.
126  */
127 STATIC int                              /* error */
128 xfs_bmap_add_extent_hole_delay(
129         xfs_inode_t             *ip,    /* incore inode pointer */
130         xfs_extnum_t            idx,    /* extent number to update/insert */
131         xfs_btree_cur_t         *cur,   /* if null, not a btree */
132         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
133         int                     *logflagsp,/* inode logging flags */
134         int                     rsvd);  /* OK to allocate reserved blocks */
135
136 /*
137  * Called by xfs_bmap_add_extent to handle cases converting a hole
138  * to a real allocation.
139  */
140 STATIC int                              /* error */
141 xfs_bmap_add_extent_hole_real(
142         xfs_inode_t             *ip,    /* incore inode pointer */
143         xfs_extnum_t            idx,    /* extent number to update/insert */
144         xfs_btree_cur_t         *cur,   /* if null, not a btree */
145         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
146         int                     *logflagsp, /* inode logging flags */
147         int                     whichfork); /* data or attr fork */
148
149 /*
150  * Called by xfs_bmap_add_extent to handle cases converting an unwritten
151  * allocation to a real allocation or vice versa.
152  */
153 STATIC int                              /* error */
154 xfs_bmap_add_extent_unwritten_real(
155         xfs_inode_t             *ip,    /* incore inode pointer */
156         xfs_extnum_t            idx,    /* extent number to update/insert */
157         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
158         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
159         int                     *logflagsp); /* inode logging flags */
160
161 /*
162  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
163  * It figures out where to ask the underlying allocator to put the new extent.
164  */
165 STATIC int                              /* error */
166 xfs_bmap_alloc(
167         xfs_bmalloca_t          *ap);   /* bmap alloc argument struct */
168
169 /*
170  * Transform a btree format file with only one leaf node, where the
171  * extents list will fit in the inode, into an extents format file.
172  * Since the extent list is already in-core, all we have to do is
173  * give up the space for the btree root and pitch the leaf block.
174  */
175 STATIC int                              /* error */
176 xfs_bmap_btree_to_extents(
177         xfs_trans_t             *tp,    /* transaction pointer */
178         xfs_inode_t             *ip,    /* incore inode pointer */
179         xfs_btree_cur_t         *cur,   /* btree cursor */
180         int                     *logflagsp, /* inode logging flags */
181         int                     whichfork); /* data or attr fork */
182
183 #ifdef DEBUG
184 /*
185  * Check that the extents list for the inode ip is in the right order.
186  */
187 STATIC void
188 xfs_bmap_check_extents(
189         xfs_inode_t             *ip,            /* incore inode pointer */
190         int                     whichfork);     /* data or attr fork */
191 #endif
192
193 /*
194  * Called by xfs_bmapi to update extent list structure and the btree
195  * after removing space (or undoing a delayed allocation).
196  */
197 STATIC int                              /* error */
198 xfs_bmap_del_extent(
199         xfs_inode_t             *ip,    /* incore inode pointer */
200         xfs_trans_t             *tp,    /* current trans pointer */
201         xfs_extnum_t            idx,    /* extent number to update/insert */
202         xfs_bmap_free_t         *flist, /* list of extents to be freed */
203         xfs_btree_cur_t         *cur,   /* if null, not a btree */
204         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
205         int                     *logflagsp,/* inode logging flags */
206         int                     whichfork, /* data or attr fork */
207         int                     rsvd);   /* OK to allocate reserved blocks */
208
209 /*
210  * Remove the entry "free" from the free item list.  Prev points to the
211  * previous entry, unless "free" is the head of the list.
212  */
213 STATIC void
214 xfs_bmap_del_free(
215         xfs_bmap_free_t         *flist, /* free item list header */
216         xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
217         xfs_bmap_free_item_t    *free); /* list item to be freed */
218
219 /*
220  * Remove count entries from the extents array for inode "ip", starting
221  * at index "idx".  Copies the remaining items down over the deleted ones,
222  * and gives back the excess memory.
223  */
224 STATIC void
225 xfs_bmap_delete_exlist(
226         xfs_inode_t     *ip,            /* incode inode pointer */
227         xfs_extnum_t    idx,            /* starting delete index */
228         xfs_extnum_t    count,          /* count of items to delete */
229         int             whichfork);     /* data or attr fork */
230
231 /*
232  * Convert an extents-format file into a btree-format file.
233  * The new file will have a root block (in the inode) and a single child block.
234  */
235 STATIC int                                      /* error */
236 xfs_bmap_extents_to_btree(
237         xfs_trans_t             *tp,            /* transaction pointer */
238         xfs_inode_t             *ip,            /* incore inode pointer */
239         xfs_fsblock_t           *firstblock,    /* first-block-allocated */
240         xfs_bmap_free_t         *flist,         /* blocks freed in xaction */
241         xfs_btree_cur_t         **curp,         /* cursor returned to caller */
242         int                     wasdel,         /* converting a delayed alloc */
243         int                     *logflagsp,     /* inode logging flags */
244         int                     whichfork);     /* data or attr fork */
245
246 /*
247  * Insert new item(s) in the extent list for inode "ip".
248  * Count new items are inserted at offset idx.
249  */
250 STATIC void
251 xfs_bmap_insert_exlist(
252         xfs_inode_t     *ip,            /* incore inode pointer */
253         xfs_extnum_t    idx,            /* starting index of new items */
254         xfs_extnum_t    count,          /* number of inserted items */
255         xfs_bmbt_irec_t *new,           /* items to insert */
256         int             whichfork);     /* data or attr fork */
257
258 /*
259  * Convert a local file to an extents file.
260  * This code is sort of bogus, since the file data needs to get
261  * logged so it won't be lost.  The bmap-level manipulations are ok, though.
262  */
263 STATIC int                              /* error */
264 xfs_bmap_local_to_extents(
265         xfs_trans_t     *tp,            /* transaction pointer */
266         xfs_inode_t     *ip,            /* incore inode pointer */
267         xfs_fsblock_t   *firstblock,    /* first block allocated in xaction */
268         xfs_extlen_t    total,          /* total blocks needed by transaction */
269         int             *logflagsp,     /* inode logging flags */
270         int             whichfork);     /* data or attr fork */
271
272 /*
273  * Search the extents list for the inode, for the extent containing bno.
274  * If bno lies in a hole, point to the next entry.  If bno lies past eof,
275  * *eofp will be set, and *prevp will contain the last entry (null if none).
276  * Else, *lastxp will be set to the index of the found
277  * entry; *gotp will contain the entry.
278  */
279 STATIC xfs_bmbt_rec_t *                 /* pointer to found extent entry */
280 xfs_bmap_search_extents(
281         xfs_inode_t     *ip,            /* incore inode pointer */
282         xfs_fileoff_t   bno,            /* block number searched for */
283         int             whichfork,      /* data or attr fork */
284         int             *eofp,          /* out: end of file found */
285         xfs_extnum_t    *lastxp,        /* out: last extent index */
286         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
287         xfs_bmbt_irec_t *prevp);        /* out: previous extent entry found */
288
289 /*
290  * Check the last inode extent to determine whether this allocation will result
291  * in blocks being allocated at the end of the file. When we allocate new data
292  * blocks at the end of the file which do not start at the previous data block,
293  * we will try to align the new blocks at stripe unit boundaries.
294  */
295 STATIC int                              /* error */
296 xfs_bmap_isaeof(
297         xfs_inode_t     *ip,            /* incore inode pointer */
298         xfs_fileoff_t   off,            /* file offset in fsblocks */
299         int             whichfork,      /* data or attribute fork */
300         char            *aeof);         /* return value */
301
302 #ifdef XFS_BMAP_TRACE
303 /*
304  * Add a bmap trace buffer entry.  Base routine for the others.
305  */
306 STATIC void
307 xfs_bmap_trace_addentry(
308         int             opcode,         /* operation */
309         char            *fname,         /* function name */
310         char            *desc,          /* operation description */
311         xfs_inode_t     *ip,            /* incore inode pointer */
312         xfs_extnum_t    idx,            /* index of entry(ies) */
313         xfs_extnum_t    cnt,            /* count of entries, 1 or 2 */
314         xfs_bmbt_rec_t  *r1,            /* first record */
315         xfs_bmbt_rec_t  *r2,            /* second record or null */
316         int             whichfork);     /* data or attr fork */
317
318 /*
319  * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist.
320  */
321 STATIC void
322 xfs_bmap_trace_delete(
323         char            *fname,         /* function name */
324         char            *desc,          /* operation description */
325         xfs_inode_t     *ip,            /* incore inode pointer */
326         xfs_extnum_t    idx,            /* index of entry(entries) deleted */
327         xfs_extnum_t    cnt,            /* count of entries deleted, 1 or 2 */
328         int             whichfork);     /* data or attr fork */
329
330 /*
331  * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or
332  * reading in the extents list from the disk (in the btree).
333  */
334 STATIC void
335 xfs_bmap_trace_insert(
336         char            *fname,         /* function name */
337         char            *desc,          /* operation description */
338         xfs_inode_t     *ip,            /* incore inode pointer */
339         xfs_extnum_t    idx,            /* index of entry(entries) inserted */
340         xfs_extnum_t    cnt,            /* count of entries inserted, 1 or 2 */
341         xfs_bmbt_irec_t *r1,            /* inserted record 1 */
342         xfs_bmbt_irec_t *r2,            /* inserted record 2 or null */
343         int             whichfork);     /* data or attr fork */
344
345 /*
346  * Add bmap trace entry after updating an extent list entry in place.
347  */
348 STATIC void
349 xfs_bmap_trace_post_update(
350         char            *fname,         /* function name */
351         char            *desc,          /* operation description */
352         xfs_inode_t     *ip,            /* incore inode pointer */
353         xfs_extnum_t    idx,            /* index of entry updated */
354         int             whichfork);     /* data or attr fork */
355
356 /*
357  * Add bmap trace entry prior to updating an extent list entry in place.
358  */
359 STATIC void
360 xfs_bmap_trace_pre_update(
361         char            *fname,         /* function name */
362         char            *desc,          /* operation description */
363         xfs_inode_t     *ip,            /* incore inode pointer */
364         xfs_extnum_t    idx,            /* index of entry to be updated */
365         int             whichfork);     /* data or attr fork */
366
367 #else
368 #define xfs_bmap_trace_delete(f,d,ip,i,c,w)
369 #define xfs_bmap_trace_insert(f,d,ip,i,c,r1,r2,w)
370 #define xfs_bmap_trace_post_update(f,d,ip,i,w)
371 #define xfs_bmap_trace_pre_update(f,d,ip,i,w)
372 #endif  /* XFS_BMAP_TRACE */
373
374 /*
375  * Compute the worst-case number of indirect blocks that will be used
376  * for ip's delayed extent of length "len".
377  */
378 STATIC xfs_filblks_t
379 xfs_bmap_worst_indlen(
380         xfs_inode_t             *ip,    /* incore inode pointer */
381         xfs_filblks_t           len);   /* delayed extent length */
382
383 #ifdef DEBUG
384 /*
385  * Perform various validation checks on the values being returned
386  * from xfs_bmapi().
387  */
388 STATIC void
389 xfs_bmap_validate_ret(
390         xfs_fileoff_t           bno,
391         xfs_filblks_t           len,
392         int                     flags,
393         xfs_bmbt_irec_t         *mval,
394         int                     nmap,
395         int                     ret_nmap);
396 #else
397 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
398 #endif /* DEBUG */
399
400 #if defined(XFS_RW_TRACE)
401 STATIC void
402 xfs_bunmap_trace(
403         xfs_inode_t             *ip,
404         xfs_fileoff_t           bno,
405         xfs_filblks_t           len,
406         int                     flags,
407         inst_t                  *ra);
408 #else
409 #define xfs_bunmap_trace(ip, bno, len, flags, ra)
410 #endif  /* XFS_RW_TRACE */
411
412 STATIC int
413 xfs_bmap_count_tree(
414         xfs_mount_t     *mp,
415         xfs_trans_t     *tp,
416         xfs_fsblock_t   blockno,
417         int             levelin,
418         int             *count);
419
420 STATIC int
421 xfs_bmap_count_leaves(
422         xfs_bmbt_rec_t          *frp,
423         int                     numrecs,
424         int                     *count);
425
426 STATIC int
427 xfs_bmap_disk_count_leaves(
428         xfs_bmbt_rec_t          *frp,
429         int                     numrecs,
430         int                     *count);
431
432 /*
433  * Bmap internal routines.
434  */
435
436 /*
437  * Called from xfs_bmap_add_attrfork to handle btree format files.
438  */
439 STATIC int                                      /* error */
440 xfs_bmap_add_attrfork_btree(
441         xfs_trans_t             *tp,            /* transaction pointer */
442         xfs_inode_t             *ip,            /* incore inode pointer */
443         xfs_fsblock_t           *firstblock,    /* first block allocated */
444         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
445         int                     *flags)         /* inode logging flags */
446 {
447         xfs_btree_cur_t         *cur;           /* btree cursor */
448         int                     error;          /* error return value */
449         xfs_mount_t             *mp;            /* file system mount struct */
450         int                     stat;           /* newroot status */
451
452         mp = ip->i_mount;
453         if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
454                 *flags |= XFS_ILOG_DBROOT;
455         else {
456                 cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
457                         XFS_DATA_FORK);
458                 cur->bc_private.b.flist = flist;
459                 cur->bc_private.b.firstblock = *firstblock;
460                 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
461                         goto error0;
462                 ASSERT(stat == 1);      /* must be at least one entry */
463                 if ((error = xfs_bmbt_newroot(cur, flags, &stat)))
464                         goto error0;
465                 if (stat == 0) {
466                         xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
467                         return XFS_ERROR(ENOSPC);
468                 }
469                 *firstblock = cur->bc_private.b.firstblock;
470                 cur->bc_private.b.allocated = 0;
471                 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
472         }
473         return 0;
474 error0:
475         xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
476         return error;
477 }
478
479 /*
480  * Called from xfs_bmap_add_attrfork to handle extents format files.
481  */
482 STATIC int                                      /* error */
483 xfs_bmap_add_attrfork_extents(
484         xfs_trans_t             *tp,            /* transaction pointer */
485         xfs_inode_t             *ip,            /* incore inode pointer */
486         xfs_fsblock_t           *firstblock,    /* first block allocated */
487         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
488         int                     *flags)         /* inode logging flags */
489 {
490         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
491         int                     error;          /* error return value */
492
493         if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
494                 return 0;
495         cur = NULL;
496         error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
497                 flags, XFS_DATA_FORK);
498         if (cur) {
499                 cur->bc_private.b.allocated = 0;
500                 xfs_btree_del_cursor(cur,
501                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
502         }
503         return error;
504 }
505
506 /*
507  * Called from xfs_bmap_add_attrfork to handle local format files.
508  */
509 STATIC int                                      /* error */
510 xfs_bmap_add_attrfork_local(
511         xfs_trans_t             *tp,            /* transaction pointer */
512         xfs_inode_t             *ip,            /* incore inode pointer */
513         xfs_fsblock_t           *firstblock,    /* first block allocated */
514         xfs_bmap_free_t         *flist,         /* blocks to free at commit */
515         int                     *flags)         /* inode logging flags */
516 {
517         xfs_da_args_t           dargs;          /* args for dir/attr code */
518         int                     error;          /* error return value */
519         xfs_mount_t             *mp;            /* mount structure pointer */
520
521         if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
522                 return 0;
523         if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
524                 mp = ip->i_mount;
525                 memset(&dargs, 0, sizeof(dargs));
526                 dargs.dp = ip;
527                 dargs.firstblock = firstblock;
528                 dargs.flist = flist;
529                 dargs.total = mp->m_dirblkfsbs;
530                 dargs.whichfork = XFS_DATA_FORK;
531                 dargs.trans = tp;
532                 error = XFS_DIR_SHORTFORM_TO_SINGLE(mp, &dargs);
533         } else
534                 error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
535                         XFS_DATA_FORK);
536         return error;
537 }
538
539 /*
540  * Called by xfs_bmapi to update extent list structure and the btree
541  * after allocating space (or doing a delayed allocation).
542  */
543 STATIC int                              /* error */
544 xfs_bmap_add_extent(
545         xfs_inode_t             *ip,    /* incore inode pointer */
546         xfs_extnum_t            idx,    /* extent number to update/insert */
547         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
548         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
549         xfs_fsblock_t           *first, /* pointer to firstblock variable */
550         xfs_bmap_free_t         *flist, /* list of extents to be freed */
551         int                     *logflagsp, /* inode logging flags */
552         int                     whichfork, /* data or attr fork */
553         int                     rsvd)   /* OK to use reserved data blocks */
554 {
555         xfs_btree_cur_t         *cur;   /* btree cursor or null */
556         xfs_filblks_t           da_new; /* new count del alloc blocks used */
557         xfs_filblks_t           da_old; /* old count del alloc blocks used */
558         int                     error;  /* error return value */
559 #ifdef XFS_BMAP_TRACE
560         static char             fname[] = "xfs_bmap_add_extent";
561 #endif
562         xfs_ifork_t             *ifp;   /* inode fork ptr */
563         int                     logflags; /* returned value */
564         xfs_extnum_t            nextents; /* number of extents in file now */
565
566         XFS_STATS_INC(xs_add_exlist);
567         cur = *curp;
568         ifp = XFS_IFORK_PTR(ip, whichfork);
569         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
570         ASSERT(idx <= nextents);
571         da_old = da_new = 0;
572         error = 0;
573         /*
574          * This is the first extent added to a new/empty file.
575          * Special case this one, so other routines get to assume there are
576          * already extents in the list.
577          */
578         if (nextents == 0) {
579                 xfs_bmap_trace_insert(fname, "insert empty", ip, 0, 1, new,
580                         NULL, whichfork);
581                 xfs_bmap_insert_exlist(ip, 0, 1, new, whichfork);
582                 ASSERT(cur == NULL);
583                 ifp->if_lastex = 0;
584                 if (!ISNULLSTARTBLOCK(new->br_startblock)) {
585                         XFS_IFORK_NEXT_SET(ip, whichfork, 1);
586                         logflags = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
587                 } else
588                         logflags = 0;
589         }
590         /*
591          * Any kind of new delayed allocation goes here.
592          */
593         else if (ISNULLSTARTBLOCK(new->br_startblock)) {
594                 if (cur)
595                         ASSERT((cur->bc_private.b.flags &
596                                 XFS_BTCUR_BPRV_WASDEL) == 0);
597                 if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, cur, new,
598                                 &logflags, rsvd)))
599                         goto done;
600         }
601         /*
602          * Real allocation off the end of the file.
603          */
604         else if (idx == nextents) {
605                 if (cur)
606                         ASSERT((cur->bc_private.b.flags &
607                                 XFS_BTCUR_BPRV_WASDEL) == 0);
608                 if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
609                                 &logflags, whichfork)))
610                         goto done;
611         } else {
612                 xfs_bmbt_irec_t prev;   /* old extent at offset idx */
613
614                 /*
615                  * Get the record referred to by idx.
616                  */
617                 xfs_bmbt_get_all(&ifp->if_u1.if_extents[idx], &prev);
618                 /*
619                  * If it's a real allocation record, and the new allocation ends
620                  * after the start of the referred to record, then we're filling
621                  * in a delayed or unwritten allocation with a real one, or
622                  * converting real back to unwritten.
623                  */
624                 if (!ISNULLSTARTBLOCK(new->br_startblock) &&
625                     new->br_startoff + new->br_blockcount > prev.br_startoff) {
626                         if (prev.br_state != XFS_EXT_UNWRITTEN &&
627                             ISNULLSTARTBLOCK(prev.br_startblock)) {
628                                 da_old = STARTBLOCKVAL(prev.br_startblock);
629                                 if (cur)
630                                         ASSERT(cur->bc_private.b.flags &
631                                                 XFS_BTCUR_BPRV_WASDEL);
632                                 if ((error = xfs_bmap_add_extent_delay_real(ip,
633                                         idx, &cur, new, &da_new, first, flist,
634                                         &logflags, rsvd)))
635                                         goto done;
636                         } else if (new->br_state == XFS_EXT_NORM) {
637                                 ASSERT(new->br_state == XFS_EXT_NORM);
638                                 if ((error = xfs_bmap_add_extent_unwritten_real(
639                                         ip, idx, &cur, new, &logflags)))
640                                         goto done;
641                         } else {
642                                 ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
643                                 if ((error = xfs_bmap_add_extent_unwritten_real(
644                                         ip, idx, &cur, new, &logflags)))
645                                         goto done;
646                         }
647                         ASSERT(*curp == cur || *curp == NULL);
648                 }
649                 /*
650                  * Otherwise we're filling in a hole with an allocation.
651                  */
652                 else {
653                         if (cur)
654                                 ASSERT((cur->bc_private.b.flags &
655                                         XFS_BTCUR_BPRV_WASDEL) == 0);
656                         if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
657                                         new, &logflags, whichfork)))
658                                 goto done;
659                 }
660         }
661
662         ASSERT(*curp == cur || *curp == NULL);
663         /*
664          * Convert to a btree if necessary.
665          */
666         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
667             XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
668                 int     tmp_logflags;   /* partial log flag return val */
669
670                 ASSERT(cur == NULL);
671                 error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first,
672                         flist, &cur, da_old > 0, &tmp_logflags, whichfork);
673                 logflags |= tmp_logflags;
674                 if (error)
675                         goto done;
676         }
677         /*
678          * Adjust for changes in reserved delayed indirect blocks.
679          * Nothing to do for disk quotas here.
680          */
681         if (da_old || da_new) {
682                 xfs_filblks_t   nblks;
683
684                 nblks = da_new;
685                 if (cur)
686                         nblks += cur->bc_private.b.allocated;
687                 ASSERT(nblks <= da_old);
688                 if (nblks < da_old)
689                         xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
690                                 (int)(da_old - nblks), rsvd);
691         }
692         /*
693          * Clear out the allocated field, done with it now in any case.
694          */
695         if (cur) {
696                 cur->bc_private.b.allocated = 0;
697                 *curp = cur;
698         }
699 done:
700 #ifdef DEBUG
701         if (!error)
702                 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
703 #endif
704         *logflagsp = logflags;
705         return error;
706 }
707
708 /*
709  * Called by xfs_bmap_add_extent to handle cases converting a delayed
710  * allocation to a real allocation.
711  */
712 STATIC int                              /* error */
713 xfs_bmap_add_extent_delay_real(
714         xfs_inode_t             *ip,    /* incore inode pointer */
715         xfs_extnum_t            idx,    /* extent number to update/insert */
716         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
717         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
718         xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
719         xfs_fsblock_t           *first, /* pointer to firstblock variable */
720         xfs_bmap_free_t         *flist, /* list of extents to be freed */
721         int                     *logflagsp, /* inode logging flags */
722         int                     rsvd)   /* OK to use reserved data block allocation */
723 {
724         xfs_bmbt_rec_t          *base;  /* base of extent entry list */
725         xfs_btree_cur_t         *cur;   /* btree cursor */
726         int                     diff;   /* temp value */
727         xfs_bmbt_rec_t          *ep;    /* extent entry for idx */
728         int                     error;  /* error return value */
729 #ifdef XFS_BMAP_TRACE
730         static char             fname[] = "xfs_bmap_add_extent_delay_real";
731 #endif
732         int                     i;      /* temp state */
733         xfs_fileoff_t           new_endoff;     /* end offset of new entry */
734         xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
735                                         /* left is 0, right is 1, prev is 2 */
736         int                     rval=0; /* return value (logging flags) */
737         int                     state = 0;/* state bits, accessed thru macros */
738         xfs_filblks_t           temp;   /* value for dnew calculations */
739         xfs_filblks_t           temp2;  /* value for dnew calculations */
740         int                     tmp_rval;       /* partial logging flags */
741         enum {                          /* bit number definitions for state */
742                 LEFT_CONTIG,    RIGHT_CONTIG,
743                 LEFT_FILLING,   RIGHT_FILLING,
744                 LEFT_DELAY,     RIGHT_DELAY,
745                 LEFT_VALID,     RIGHT_VALID
746         };
747
748 #define LEFT            r[0]
749 #define RIGHT           r[1]
750 #define PREV            r[2]
751 #define MASK(b)         (1 << (b))
752 #define MASK2(a,b)      (MASK(a) | MASK(b))
753 #define MASK3(a,b,c)    (MASK2(a,b) | MASK(c))
754 #define MASK4(a,b,c,d)  (MASK3(a,b,c) | MASK(d))
755 #define STATE_SET(b,v)  ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
756 #define STATE_TEST(b)   (state & MASK(b))
757 #define STATE_SET_TEST(b,v)     ((v) ? ((state |= MASK(b)), 1) : \
758                                        ((state &= ~MASK(b)), 0))
759 #define SWITCH_STATE            \
760         (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
761
762         /*
763          * Set up a bunch of variables to make the tests simpler.
764          */
765         cur = *curp;
766         base = ip->i_df.if_u1.if_extents;
767         ep = &base[idx];
768         xfs_bmbt_get_all(ep, &PREV);
769         new_endoff = new->br_startoff + new->br_blockcount;
770         ASSERT(PREV.br_startoff <= new->br_startoff);
771         ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
772         /*
773          * Set flags determining what part of the previous delayed allocation
774          * extent is being replaced by a real allocation.
775          */
776         STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
777         STATE_SET(RIGHT_FILLING,
778                 PREV.br_startoff + PREV.br_blockcount == new_endoff);
779         /*
780          * Check and set flags if this segment has a left neighbor.
781          * Don't set contiguous if the combined extent would be too large.
782          */
783         if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
784                 xfs_bmbt_get_all(ep - 1, &LEFT);
785                 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
786         }
787         STATE_SET(LEFT_CONTIG,
788                 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
789                 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
790                 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
791                 LEFT.br_state == new->br_state &&
792                 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
793         /*
794          * Check and set flags if this segment has a right neighbor.
795          * Don't set contiguous if the combined extent would be too large.
796          * Also check for all-three-contiguous being too large.
797          */
798         if (STATE_SET_TEST(RIGHT_VALID,
799                         idx <
800                         ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
801                 xfs_bmbt_get_all(ep + 1, &RIGHT);
802                 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
803         }
804         STATE_SET(RIGHT_CONTIG,
805                 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
806                 new_endoff == RIGHT.br_startoff &&
807                 new->br_startblock + new->br_blockcount ==
808                     RIGHT.br_startblock &&
809                 new->br_state == RIGHT.br_state &&
810                 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
811                 ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
812                   MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
813                  LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
814                      <= MAXEXTLEN));
815         error = 0;
816         /*
817          * Switch out based on the FILLING and CONTIG state bits.
818          */
819         switch (SWITCH_STATE) {
820
821         case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
822                 /*
823                  * Filling in all of a previously delayed allocation extent.
824                  * The left and right neighbors are both contiguous with new.
825                  */
826                 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1,
827                         XFS_DATA_FORK);
828                 xfs_bmbt_set_blockcount(ep - 1,
829                         LEFT.br_blockcount + PREV.br_blockcount +
830                         RIGHT.br_blockcount);
831                 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1,
832                         XFS_DATA_FORK);
833                 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
834                         XFS_DATA_FORK);
835                 xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK);
836                 ip->i_df.if_lastex = idx - 1;
837                 ip->i_d.di_nextents--;
838                 if (cur == NULL)
839                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
840                 else {
841                         rval = XFS_ILOG_CORE;
842                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
843                                         RIGHT.br_startblock,
844                                         RIGHT.br_blockcount, &i)))
845                                 goto done;
846                         ASSERT(i == 1);
847                         if ((error = xfs_bmbt_delete(cur, &i)))
848                                 goto done;
849                         ASSERT(i == 1);
850                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
851                                 goto done;
852                         ASSERT(i == 1);
853                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
854                                         LEFT.br_startblock,
855                                         LEFT.br_blockcount +
856                                         PREV.br_blockcount +
857                                         RIGHT.br_blockcount, LEFT.br_state)))
858                                 goto done;
859                 }
860                 *dnew = 0;
861                 break;
862
863         case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
864                 /*
865                  * Filling in all of a previously delayed allocation extent.
866                  * The left neighbor is contiguous, the right is not.
867                  */
868                 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1,
869                         XFS_DATA_FORK);
870                 xfs_bmbt_set_blockcount(ep - 1,
871                         LEFT.br_blockcount + PREV.br_blockcount);
872                 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1,
873                         XFS_DATA_FORK);
874                 ip->i_df.if_lastex = idx - 1;
875                 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1,
876                         XFS_DATA_FORK);
877                 xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK);
878                 if (cur == NULL)
879                         rval = XFS_ILOG_DEXT;
880                 else {
881                         rval = 0;
882                         if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
883                                         LEFT.br_startblock, LEFT.br_blockcount,
884                                         &i)))
885                                 goto done;
886                         ASSERT(i == 1);
887                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
888                                         LEFT.br_startblock,
889                                         LEFT.br_blockcount +
890                                         PREV.br_blockcount, LEFT.br_state)))
891                                 goto done;
892                 }
893                 *dnew = 0;
894                 break;
895
896         case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
897                 /*
898                  * Filling in all of a previously delayed allocation extent.
899                  * The right neighbor is contiguous, the left is not.
900                  */
901                 xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx,
902                         XFS_DATA_FORK);
903                 xfs_bmbt_set_startblock(ep, new->br_startblock);
904                 xfs_bmbt_set_blockcount(ep,
905                         PREV.br_blockcount + RIGHT.br_blockcount);
906                 xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx,
907                         XFS_DATA_FORK);
908                 ip->i_df.if_lastex = idx;
909                 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1,
910                         XFS_DATA_FORK);
911                 xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK);
912                 if (cur == NULL)
913                         rval = XFS_ILOG_DEXT;
914                 else {
915                         rval = 0;
916                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
917                                         RIGHT.br_startblock,
918                                         RIGHT.br_blockcount, &i)))
919                                 goto done;
920                         ASSERT(i == 1);
921                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
922                                         new->br_startblock,
923                                         PREV.br_blockcount +
924                                         RIGHT.br_blockcount, PREV.br_state)))
925                                 goto done;
926                 }
927                 *dnew = 0;
928                 break;
929
930         case MASK2(LEFT_FILLING, RIGHT_FILLING):
931                 /*
932                  * Filling in all of a previously delayed allocation extent.
933                  * Neither the left nor right neighbors are contiguous with
934                  * the new one.
935                  */
936                 xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx,
937                         XFS_DATA_FORK);
938                 xfs_bmbt_set_startblock(ep, new->br_startblock);
939                 xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx,
940                         XFS_DATA_FORK);
941                 ip->i_df.if_lastex = idx;
942                 ip->i_d.di_nextents++;
943                 if (cur == NULL)
944                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
945                 else {
946                         rval = XFS_ILOG_CORE;
947                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
948                                         new->br_startblock, new->br_blockcount,
949                                         &i)))
950                                 goto done;
951                         ASSERT(i == 0);
952                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
953                         if ((error = xfs_bmbt_insert(cur, &i)))
954                                 goto done;
955                         ASSERT(i == 1);
956                 }
957                 *dnew = 0;
958                 break;
959
960         case MASK2(LEFT_FILLING, LEFT_CONTIG):
961                 /*
962                  * Filling in the first part of a previous delayed allocation.
963                  * The left neighbor is contiguous.
964                  */
965                 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1,
966                         XFS_DATA_FORK);
967                 xfs_bmbt_set_blockcount(ep - 1,
968                         LEFT.br_blockcount + new->br_blockcount);
969                 xfs_bmbt_set_startoff(ep,
970                         PREV.br_startoff + new->br_blockcount);
971                 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1,
972                         XFS_DATA_FORK);
973                 temp = PREV.br_blockcount - new->br_blockcount;
974                 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx,
975                         XFS_DATA_FORK);
976                 xfs_bmbt_set_blockcount(ep, temp);
977                 ip->i_df.if_lastex = idx - 1;
978                 if (cur == NULL)
979                         rval = XFS_ILOG_DEXT;
980                 else {
981                         rval = 0;
982                         if ((error = xfs_bmbt_lookup_eq(cur, LEFT.br_startoff,
983                                         LEFT.br_startblock, LEFT.br_blockcount,
984                                         &i)))
985                                 goto done;
986                         ASSERT(i == 1);
987                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
988                                         LEFT.br_startblock,
989                                         LEFT.br_blockcount +
990                                         new->br_blockcount,
991                                         LEFT.br_state)))
992                                 goto done;
993                 }
994                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
995                         STARTBLOCKVAL(PREV.br_startblock));
996                 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
997                 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx,
998                         XFS_DATA_FORK);
999                 *dnew = temp;
1000                 break;
1001
1002         case MASK(LEFT_FILLING):
1003                 /*
1004                  * Filling in the first part of a previous delayed allocation.
1005                  * The left neighbor is not contiguous.
1006                  */
1007                 xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK);
1008                 xfs_bmbt_set_startoff(ep, new_endoff);
1009                 temp = PREV.br_blockcount - new->br_blockcount;
1010                 xfs_bmbt_set_blockcount(ep, temp);
1011                 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL,
1012                         XFS_DATA_FORK);
1013                 xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK);
1014                 ip->i_df.if_lastex = idx;
1015                 ip->i_d.di_nextents++;
1016                 if (cur == NULL)
1017                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1018                 else {
1019                         rval = XFS_ILOG_CORE;
1020                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1021                                         new->br_startblock, new->br_blockcount,
1022                                         &i)))
1023                                 goto done;
1024                         ASSERT(i == 0);
1025                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1026                         if ((error = xfs_bmbt_insert(cur, &i)))
1027                                 goto done;
1028                         ASSERT(i == 1);
1029                 }
1030                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1031                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1032                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1033                                         first, flist, &cur, 1, &tmp_rval,
1034                                         XFS_DATA_FORK);
1035                         rval |= tmp_rval;
1036                         if (error)
1037                                 goto done;
1038                 }
1039                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1040                         STARTBLOCKVAL(PREV.br_startblock) -
1041                         (cur ? cur->bc_private.b.allocated : 0));
1042                 base = ip->i_df.if_u1.if_extents;
1043                 ep = &base[idx + 1];
1044                 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1045                 xfs_bmap_trace_post_update(fname, "LF", ip, idx + 1,
1046                         XFS_DATA_FORK);
1047                 *dnew = temp;
1048                 break;
1049
1050         case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
1051                 /*
1052                  * Filling in the last part of a previous delayed allocation.
1053                  * The right neighbor is contiguous with the new allocation.
1054                  */
1055                 temp = PREV.br_blockcount - new->br_blockcount;
1056                 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx,
1057                         XFS_DATA_FORK);
1058                 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1,
1059                         XFS_DATA_FORK);
1060                 xfs_bmbt_set_blockcount(ep, temp);
1061                 xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock,
1062                         new->br_blockcount + RIGHT.br_blockcount,
1063                         RIGHT.br_state);
1064                 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1,
1065                         XFS_DATA_FORK);
1066                 ip->i_df.if_lastex = idx + 1;
1067                 if (cur == NULL)
1068                         rval = XFS_ILOG_DEXT;
1069                 else {
1070                         rval = 0;
1071                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1072                                         RIGHT.br_startblock,
1073                                         RIGHT.br_blockcount, &i)))
1074                                 goto done;
1075                         ASSERT(i == 1);
1076                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1077                                         new->br_startblock,
1078                                         new->br_blockcount +
1079                                         RIGHT.br_blockcount,
1080                                         RIGHT.br_state)))
1081                                 goto done;
1082                 }
1083                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1084                         STARTBLOCKVAL(PREV.br_startblock));
1085                 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1086                 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx,
1087                         XFS_DATA_FORK);
1088                 *dnew = temp;
1089                 break;
1090
1091         case MASK(RIGHT_FILLING):
1092                 /*
1093                  * Filling in the last part of a previous delayed allocation.
1094                  * The right neighbor is not contiguous.
1095                  */
1096                 temp = PREV.br_blockcount - new->br_blockcount;
1097                 xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1098                 xfs_bmbt_set_blockcount(ep, temp);
1099                 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1,
1100                         new, NULL, XFS_DATA_FORK);
1101                 xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK);
1102                 ip->i_df.if_lastex = idx + 1;
1103                 ip->i_d.di_nextents++;
1104                 if (cur == NULL)
1105                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1106                 else {
1107                         rval = XFS_ILOG_CORE;
1108                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1109                                         new->br_startblock, new->br_blockcount,
1110                                         &i)))
1111                                 goto done;
1112                         ASSERT(i == 0);
1113                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1114                         if ((error = xfs_bmbt_insert(cur, &i)))
1115                                 goto done;
1116                         ASSERT(i == 1);
1117                 }
1118                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1119                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1120                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1121                                 first, flist, &cur, 1, &tmp_rval,
1122                                 XFS_DATA_FORK);
1123                         rval |= tmp_rval;
1124                         if (error)
1125                                 goto done;
1126                 }
1127                 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
1128                         STARTBLOCKVAL(PREV.br_startblock) -
1129                         (cur ? cur->bc_private.b.allocated : 0));
1130                 base = ip->i_df.if_u1.if_extents;
1131                 ep = &base[idx];
1132                 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1133                 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1134                 *dnew = temp;
1135                 break;
1136
1137         case 0:
1138                 /*
1139                  * Filling in the middle part of a previous delayed allocation.
1140                  * Contiguity is impossible here.
1141                  * This case is avoided almost all the time.
1142                  */
1143                 temp = new->br_startoff - PREV.br_startoff;
1144                 xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK);
1145                 xfs_bmbt_set_blockcount(ep, temp);
1146                 r[0] = *new;
1147                 r[1].br_startoff = new_endoff;
1148                 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
1149                 r[1].br_blockcount = temp2;
1150                 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1],
1151                         XFS_DATA_FORK);
1152                 xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK);
1153                 ip->i_df.if_lastex = idx + 1;
1154                 ip->i_d.di_nextents++;
1155                 if (cur == NULL)
1156                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1157                 else {
1158                         rval = XFS_ILOG_CORE;
1159                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1160                                         new->br_startblock, new->br_blockcount,
1161                                         &i)))
1162                                 goto done;
1163                         ASSERT(i == 0);
1164                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1165                         if ((error = xfs_bmbt_insert(cur, &i)))
1166                                 goto done;
1167                         ASSERT(i == 1);
1168                 }
1169                 if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1170                     ip->i_d.di_nextents > ip->i_df.if_ext_max) {
1171                         error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
1172                                         first, flist, &cur, 1, &tmp_rval,
1173                                         XFS_DATA_FORK);
1174                         rval |= tmp_rval;
1175                         if (error)
1176                                 goto done;
1177                 }
1178                 temp = xfs_bmap_worst_indlen(ip, temp);
1179                 temp2 = xfs_bmap_worst_indlen(ip, temp2);
1180                 diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) -
1181                         (cur ? cur->bc_private.b.allocated : 0));
1182                 if (diff > 0 &&
1183                     xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -diff, rsvd)) {
1184                         /*
1185                          * Ick gross gag me with a spoon.
1186                          */
1187                         ASSERT(0);      /* want to see if this ever happens! */
1188                         while (diff > 0) {
1189                                 if (temp) {
1190                                         temp--;
1191                                         diff--;
1192                                         if (!diff ||
1193                                             !xfs_mod_incore_sb(ip->i_mount,
1194                                                     XFS_SBS_FDBLOCKS, -diff, rsvd))
1195                                                 break;
1196                                 }
1197                                 if (temp2) {
1198                                         temp2--;
1199                                         diff--;
1200                                         if (!diff ||
1201                                             !xfs_mod_incore_sb(ip->i_mount,
1202                                                     XFS_SBS_FDBLOCKS, -diff, rsvd))
1203                                                 break;
1204                                 }
1205                         }
1206                 }
1207                 base = ip->i_df.if_u1.if_extents;
1208                 ep = &base[idx];
1209                 xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
1210                 xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK);
1211                 xfs_bmap_trace_pre_update(fname, "0", ip, idx + 2,
1212                         XFS_DATA_FORK);
1213                 xfs_bmbt_set_startblock(ep + 2, NULLSTARTBLOCK((int)temp2));
1214                 xfs_bmap_trace_post_update(fname, "0", ip, idx + 2,
1215                         XFS_DATA_FORK);
1216                 *dnew = temp + temp2;
1217                 break;
1218
1219         case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1220         case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1221         case MASK2(LEFT_FILLING, RIGHT_CONTIG):
1222         case MASK2(RIGHT_FILLING, LEFT_CONTIG):
1223         case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
1224         case MASK(LEFT_CONTIG):
1225         case MASK(RIGHT_CONTIG):
1226                 /*
1227                  * These cases are all impossible.
1228                  */
1229                 ASSERT(0);
1230         }
1231         *curp = cur;
1232 done:
1233         *logflagsp = rval;
1234         return error;
1235 #undef  LEFT
1236 #undef  RIGHT
1237 #undef  PREV
1238 #undef  MASK
1239 #undef  MASK2
1240 #undef  MASK3
1241 #undef  MASK4
1242 #undef  STATE_SET
1243 #undef  STATE_TEST
1244 #undef  STATE_SET_TEST
1245 #undef  SWITCH_STATE
1246 }
1247
1248 /*
1249  * Called by xfs_bmap_add_extent to handle cases converting an unwritten
1250  * allocation to a real allocation or vice versa.
1251  */
1252 STATIC int                              /* error */
1253 xfs_bmap_add_extent_unwritten_real(
1254         xfs_inode_t             *ip,    /* incore inode pointer */
1255         xfs_extnum_t            idx,    /* extent number to update/insert */
1256         xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
1257         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
1258         int                     *logflagsp) /* inode logging flags */
1259 {
1260         xfs_bmbt_rec_t          *base;  /* base of extent entry list */
1261         xfs_btree_cur_t         *cur;   /* btree cursor */
1262         xfs_bmbt_rec_t          *ep;    /* extent entry for idx */
1263         int                     error;  /* error return value */
1264 #ifdef XFS_BMAP_TRACE
1265         static char             fname[] = "xfs_bmap_add_extent_unwritten_real";
1266 #endif
1267         int                     i;      /* temp state */
1268         xfs_fileoff_t           new_endoff;     /* end offset of new entry */
1269         xfs_exntst_t            newext; /* new extent state */
1270         xfs_exntst_t            oldext; /* old extent state */
1271         xfs_bmbt_irec_t         r[3];   /* neighbor extent entries */
1272                                         /* left is 0, right is 1, prev is 2 */
1273         int                     rval=0; /* return value (logging flags) */
1274         int                     state = 0;/* state bits, accessed thru macros */
1275         enum {                          /* bit number definitions for state */
1276                 LEFT_CONTIG,    RIGHT_CONTIG,
1277                 LEFT_FILLING,   RIGHT_FILLING,
1278                 LEFT_DELAY,     RIGHT_DELAY,
1279                 LEFT_VALID,     RIGHT_VALID
1280         };
1281
1282 #define LEFT            r[0]
1283 #define RIGHT           r[1]
1284 #define PREV            r[2]
1285 #define MASK(b)         (1 << (b))
1286 #define MASK2(a,b)      (MASK(a) | MASK(b))
1287 #define MASK3(a,b,c)    (MASK2(a,b) | MASK(c))
1288 #define MASK4(a,b,c,d)  (MASK3(a,b,c) | MASK(d))
1289 #define STATE_SET(b,v)  ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1290 #define STATE_TEST(b)   (state & MASK(b))
1291 #define STATE_SET_TEST(b,v)     ((v) ? ((state |= MASK(b)), 1) : \
1292                                        ((state &= ~MASK(b)), 0))
1293 #define SWITCH_STATE            \
1294         (state & MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG))
1295
1296         /*
1297          * Set up a bunch of variables to make the tests simpler.
1298          */
1299         error = 0;
1300         cur = *curp;
1301         base = ip->i_df.if_u1.if_extents;
1302         ep = &base[idx];
1303         xfs_bmbt_get_all(ep, &PREV);
1304         newext = new->br_state;
1305         oldext = (newext == XFS_EXT_UNWRITTEN) ?
1306                 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
1307         ASSERT(PREV.br_state == oldext);
1308         new_endoff = new->br_startoff + new->br_blockcount;
1309         ASSERT(PREV.br_startoff <= new->br_startoff);
1310         ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1311         /*
1312          * Set flags determining what part of the previous oldext allocation
1313          * extent is being replaced by a newext allocation.
1314          */
1315         STATE_SET(LEFT_FILLING, PREV.br_startoff == new->br_startoff);
1316         STATE_SET(RIGHT_FILLING,
1317                 PREV.br_startoff + PREV.br_blockcount == new_endoff);
1318         /*
1319          * Check and set flags if this segment has a left neighbor.
1320          * Don't set contiguous if the combined extent would be too large.
1321          */
1322         if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
1323                 xfs_bmbt_get_all(ep - 1, &LEFT);
1324                 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
1325         }
1326         STATE_SET(LEFT_CONTIG,
1327                 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
1328                 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1329                 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1330                 LEFT.br_state == newext &&
1331                 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN);
1332         /*
1333          * Check and set flags if this segment has a right neighbor.
1334          * Don't set contiguous if the combined extent would be too large.
1335          * Also check for all-three-contiguous being too large.
1336          */
1337         if (STATE_SET_TEST(RIGHT_VALID,
1338                         idx <
1339                         ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
1340                 xfs_bmbt_get_all(ep + 1, &RIGHT);
1341                 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
1342         }
1343         STATE_SET(RIGHT_CONTIG,
1344                 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
1345                 new_endoff == RIGHT.br_startoff &&
1346                 new->br_startblock + new->br_blockcount ==
1347                     RIGHT.br_startblock &&
1348                 newext == RIGHT.br_state &&
1349                 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1350                 ((state & MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING)) !=
1351                   MASK3(LEFT_CONTIG, LEFT_FILLING, RIGHT_FILLING) ||
1352                  LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1353                      <= MAXEXTLEN));
1354         /*
1355          * Switch out based on the FILLING and CONTIG state bits.
1356          */
1357         switch (SWITCH_STATE) {
1358
1359         case MASK4(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1360                 /*
1361                  * Setting all of a previous oldext extent to newext.
1362                  * The left and right neighbors are both contiguous with new.
1363                  */
1364                 xfs_bmap_trace_pre_update(fname, "LF|RF|LC|RC", ip, idx - 1,
1365                         XFS_DATA_FORK);
1366                 xfs_bmbt_set_blockcount(ep - 1,
1367                         LEFT.br_blockcount + PREV.br_blockcount +
1368                         RIGHT.br_blockcount);
1369                 xfs_bmap_trace_post_update(fname, "LF|RF|LC|RC", ip, idx - 1,
1370                         XFS_DATA_FORK);
1371                 xfs_bmap_trace_delete(fname, "LF|RF|LC|RC", ip, idx, 2,
1372                         XFS_DATA_FORK);
1373                 xfs_bmap_delete_exlist(ip, idx, 2, XFS_DATA_FORK);
1374                 ip->i_df.if_lastex = idx - 1;
1375                 ip->i_d.di_nextents -= 2;
1376                 if (cur == NULL)
1377                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1378                 else {
1379                         rval = XFS_ILOG_CORE;
1380                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1381                                         RIGHT.br_startblock,
1382                                         RIGHT.br_blockcount, &i)))
1383                                 goto done;
1384                         ASSERT(i == 1);
1385                         if ((error = xfs_bmbt_delete(cur, &i)))
1386                                 goto done;
1387                         ASSERT(i == 1);
1388                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1389                                 goto done;
1390                         ASSERT(i == 1);
1391                         if ((error = xfs_bmbt_delete(cur, &i)))
1392                                 goto done;
1393                         ASSERT(i == 1);
1394                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1395                                 goto done;
1396                         ASSERT(i == 1);
1397                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1398                                 LEFT.br_startblock,
1399                                 LEFT.br_blockcount + PREV.br_blockcount +
1400                                 RIGHT.br_blockcount, LEFT.br_state)))
1401                                 goto done;
1402                 }
1403                 break;
1404
1405         case MASK3(LEFT_FILLING, RIGHT_FILLING, LEFT_CONTIG):
1406                 /*
1407                  * Setting all of a previous oldext extent to newext.
1408                  * The left neighbor is contiguous, the right is not.
1409                  */
1410                 xfs_bmap_trace_pre_update(fname, "LF|RF|LC", ip, idx - 1,
1411                         XFS_DATA_FORK);
1412                 xfs_bmbt_set_blockcount(ep - 1,
1413                         LEFT.br_blockcount + PREV.br_blockcount);
1414                 xfs_bmap_trace_post_update(fname, "LF|RF|LC", ip, idx - 1,
1415                         XFS_DATA_FORK);
1416                 ip->i_df.if_lastex = idx - 1;
1417                 xfs_bmap_trace_delete(fname, "LF|RF|LC", ip, idx, 1,
1418                         XFS_DATA_FORK);
1419                 xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK);
1420                 ip->i_d.di_nextents--;
1421                 if (cur == NULL)
1422                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1423                 else {
1424                         rval = XFS_ILOG_CORE;
1425                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1426                                         PREV.br_startblock, PREV.br_blockcount,
1427                                         &i)))
1428                                 goto done;
1429                         ASSERT(i == 1);
1430                         if ((error = xfs_bmbt_delete(cur, &i)))
1431                                 goto done;
1432                         ASSERT(i == 1);
1433                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1434                                 goto done;
1435                         ASSERT(i == 1);
1436                         if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1437                                 LEFT.br_startblock,
1438                                 LEFT.br_blockcount + PREV.br_blockcount,
1439                                 LEFT.br_state)))
1440                                 goto done;
1441                 }
1442                 break;
1443
1444         case MASK3(LEFT_FILLING, RIGHT_FILLING, RIGHT_CONTIG):
1445                 /*
1446                  * Setting all of a previous oldext extent to newext.
1447                  * The right neighbor is contiguous, the left is not.
1448                  */
1449                 xfs_bmap_trace_pre_update(fname, "LF|RF|RC", ip, idx,
1450                         XFS_DATA_FORK);
1451                 xfs_bmbt_set_blockcount(ep,
1452                         PREV.br_blockcount + RIGHT.br_blockcount);
1453                 xfs_bmbt_set_state(ep, newext);
1454                 xfs_bmap_trace_post_update(fname, "LF|RF|RC", ip, idx,
1455                         XFS_DATA_FORK);
1456                 ip->i_df.if_lastex = idx;
1457                 xfs_bmap_trace_delete(fname, "LF|RF|RC", ip, idx + 1, 1,
1458                         XFS_DATA_FORK);
1459                 xfs_bmap_delete_exlist(ip, idx + 1, 1, XFS_DATA_FORK);
1460                 ip->i_d.di_nextents--;
1461                 if (cur == NULL)
1462                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1463                 else {
1464                         rval = XFS_ILOG_CORE;
1465                         if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1466                                         RIGHT.br_startblock,
1467                                         RIGHT.br_blockcount, &i)))
1468                                 goto done;
1469                         ASSERT(i == 1);
1470                         if ((error = xfs_bmbt_delete(cur, &i)))
1471                                 goto done;
1472                         ASSERT(i == 1);
1473                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1474                                 goto done;
1475                         ASSERT(i == 1);
1476                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1477                                 new->br_startblock,
1478                                 new->br_blockcount + RIGHT.br_blockcount,
1479                                 newext)))
1480                                 goto done;
1481                 }
1482                 break;
1483
1484         case MASK2(LEFT_FILLING, RIGHT_FILLING):
1485                 /*
1486                  * Setting all of a previous oldext extent to newext.
1487                  * Neither the left nor right neighbors are contiguous with
1488                  * the new one.
1489                  */
1490                 xfs_bmap_trace_pre_update(fname, "LF|RF", ip, idx,
1491                         XFS_DATA_FORK);
1492                 xfs_bmbt_set_state(ep, newext);
1493                 xfs_bmap_trace_post_update(fname, "LF|RF", ip, idx,
1494                         XFS_DATA_FORK);
1495                 ip->i_df.if_lastex = idx;
1496                 if (cur == NULL)
1497                         rval = XFS_ILOG_DEXT;
1498                 else {
1499                         rval = 0;
1500                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1501                                         new->br_startblock, new->br_blockcount,
1502                                         &i)))
1503                                 goto done;
1504                         ASSERT(i == 1);
1505                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1506                                 new->br_startblock, new->br_blockcount,
1507                                 newext)))
1508                                 goto done;
1509                 }
1510                 break;
1511
1512         case MASK2(LEFT_FILLING, LEFT_CONTIG):
1513                 /*
1514                  * Setting the first part of a previous oldext extent to newext.
1515                  * The left neighbor is contiguous.
1516                  */
1517                 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx - 1,
1518                         XFS_DATA_FORK);
1519                 xfs_bmbt_set_blockcount(ep - 1,
1520                         LEFT.br_blockcount + new->br_blockcount);
1521                 xfs_bmbt_set_startoff(ep,
1522                         PREV.br_startoff + new->br_blockcount);
1523                 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx - 1,
1524                         XFS_DATA_FORK);
1525                 xfs_bmap_trace_pre_update(fname, "LF|LC", ip, idx,
1526                         XFS_DATA_FORK);
1527                 xfs_bmbt_set_startblock(ep,
1528                         new->br_startblock + new->br_blockcount);
1529                 xfs_bmbt_set_blockcount(ep,
1530                         PREV.br_blockcount - new->br_blockcount);
1531                 xfs_bmap_trace_post_update(fname, "LF|LC", ip, idx,
1532                         XFS_DATA_FORK);
1533                 ip->i_df.if_lastex = idx - 1;
1534                 if (cur == NULL)
1535                         rval = XFS_ILOG_DEXT;
1536                 else {
1537                         rval = 0;
1538                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1539                                         PREV.br_startblock, PREV.br_blockcount,
1540                                         &i)))
1541                                 goto done;
1542                         ASSERT(i == 1);
1543                         if ((error = xfs_bmbt_update(cur,
1544                                 PREV.br_startoff + new->br_blockcount,
1545                                 PREV.br_startblock + new->br_blockcount,
1546                                 PREV.br_blockcount - new->br_blockcount,
1547                                 oldext)))
1548                                 goto done;
1549                         if ((error = xfs_bmbt_decrement(cur, 0, &i)))
1550                                 goto done;
1551                         if (xfs_bmbt_update(cur, LEFT.br_startoff,
1552                                 LEFT.br_startblock,
1553                                 LEFT.br_blockcount + new->br_blockcount,
1554                                 LEFT.br_state))
1555                                 goto done;
1556                 }
1557                 break;
1558
1559         case MASK(LEFT_FILLING):
1560                 /*
1561                  * Setting the first part of a previous oldext extent to newext.
1562                  * The left neighbor is not contiguous.
1563                  */
1564                 xfs_bmap_trace_pre_update(fname, "LF", ip, idx, XFS_DATA_FORK);
1565                 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1566                 xfs_bmbt_set_startoff(ep, new_endoff);
1567                 xfs_bmbt_set_blockcount(ep,
1568                         PREV.br_blockcount - new->br_blockcount);
1569                 xfs_bmbt_set_startblock(ep,
1570                         new->br_startblock + new->br_blockcount);
1571                 xfs_bmap_trace_post_update(fname, "LF", ip, idx, XFS_DATA_FORK);
1572                 xfs_bmap_trace_insert(fname, "LF", ip, idx, 1, new, NULL,
1573                         XFS_DATA_FORK);
1574                 xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK);
1575                 ip->i_df.if_lastex = idx;
1576                 ip->i_d.di_nextents++;
1577                 if (cur == NULL)
1578                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1579                 else {
1580                         rval = XFS_ILOG_CORE;
1581                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1582                                         PREV.br_startblock, PREV.br_blockcount,
1583                                         &i)))
1584                                 goto done;
1585                         ASSERT(i == 1);
1586                         if ((error = xfs_bmbt_update(cur,
1587                                 PREV.br_startoff + new->br_blockcount,
1588                                 PREV.br_startblock + new->br_blockcount,
1589                                 PREV.br_blockcount - new->br_blockcount,
1590                                 oldext)))
1591                                 goto done;
1592                         cur->bc_rec.b = *new;
1593                         if ((error = xfs_bmbt_insert(cur, &i)))
1594                                 goto done;
1595                         ASSERT(i == 1);
1596                 }
1597                 break;
1598
1599         case MASK2(RIGHT_FILLING, RIGHT_CONTIG):
1600                 /*
1601                  * Setting the last part of a previous oldext extent to newext.
1602                  * The right neighbor is contiguous with the new allocation.
1603                  */
1604                 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx,
1605                         XFS_DATA_FORK);
1606                 xfs_bmap_trace_pre_update(fname, "RF|RC", ip, idx + 1,
1607                         XFS_DATA_FORK);
1608                 xfs_bmbt_set_blockcount(ep,
1609                         PREV.br_blockcount - new->br_blockcount);
1610                 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx,
1611                         XFS_DATA_FORK);
1612                 xfs_bmbt_set_allf(ep + 1, new->br_startoff, new->br_startblock,
1613                         new->br_blockcount + RIGHT.br_blockcount, newext);
1614                 xfs_bmap_trace_post_update(fname, "RF|RC", ip, idx + 1,
1615                         XFS_DATA_FORK);
1616                 ip->i_df.if_lastex = idx + 1;
1617                 if (cur == NULL)
1618                         rval = XFS_ILOG_DEXT;
1619                 else {
1620                         rval = 0;
1621                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1622                                         PREV.br_startblock,
1623                                         PREV.br_blockcount, &i)))
1624                                 goto done;
1625                         ASSERT(i == 1);
1626                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1627                                 PREV.br_startblock,
1628                                 PREV.br_blockcount - new->br_blockcount,
1629                                 oldext)))
1630                                 goto done;
1631                         if ((error = xfs_bmbt_increment(cur, 0, &i)))
1632                                 goto done;
1633                         if ((error = xfs_bmbt_update(cur, new->br_startoff,
1634                                 new->br_startblock,
1635                                 new->br_blockcount + RIGHT.br_blockcount,
1636                                 newext)))
1637                                 goto done;
1638                 }
1639                 break;
1640
1641         case MASK(RIGHT_FILLING):
1642                 /*
1643                  * Setting the last part of a previous oldext extent to newext.
1644                  * The right neighbor is not contiguous.
1645                  */
1646                 xfs_bmap_trace_pre_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1647                 xfs_bmbt_set_blockcount(ep,
1648                         PREV.br_blockcount - new->br_blockcount);
1649                 xfs_bmap_trace_post_update(fname, "RF", ip, idx, XFS_DATA_FORK);
1650                 xfs_bmap_trace_insert(fname, "RF", ip, idx + 1, 1,
1651                         new, NULL, XFS_DATA_FORK);
1652                 xfs_bmap_insert_exlist(ip, idx + 1, 1, new, XFS_DATA_FORK);
1653                 ip->i_df.if_lastex = idx + 1;
1654                 ip->i_d.di_nextents++;
1655                 if (cur == NULL)
1656                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1657                 else {
1658                         rval = XFS_ILOG_CORE;
1659                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1660                                         PREV.br_startblock, PREV.br_blockcount,
1661                                         &i)))
1662                                 goto done;
1663                         ASSERT(i == 1);
1664                         if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1665                                 PREV.br_startblock,
1666                                 PREV.br_blockcount - new->br_blockcount,
1667                                 oldext)))
1668                                 goto done;
1669                         if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1670                                         new->br_startblock, new->br_blockcount,
1671                                         &i)))
1672                                 goto done;
1673                         ASSERT(i == 0);
1674                         cur->bc_rec.b.br_state = XFS_EXT_NORM;
1675                         if ((error = xfs_bmbt_insert(cur, &i)))
1676                                 goto done;
1677                         ASSERT(i == 1);
1678                 }
1679                 break;
1680
1681         case 0:
1682                 /*
1683                  * Setting the middle part of a previous oldext extent to
1684                  * newext.  Contiguity is impossible here.
1685                  * One extent becomes three extents.
1686                  */
1687                 xfs_bmap_trace_pre_update(fname, "0", ip, idx, XFS_DATA_FORK);
1688                 xfs_bmbt_set_blockcount(ep,
1689                         new->br_startoff - PREV.br_startoff);
1690                 xfs_bmap_trace_post_update(fname, "0", ip, idx, XFS_DATA_FORK);
1691                 r[0] = *new;
1692                 r[1].br_startoff = new_endoff;
1693                 r[1].br_blockcount =
1694                         PREV.br_startoff + PREV.br_blockcount - new_endoff;
1695                 r[1].br_startblock = new->br_startblock + new->br_blockcount;
1696                 r[1].br_state = oldext;
1697                 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 2, &r[0], &r[1],
1698                         XFS_DATA_FORK);
1699                 xfs_bmap_insert_exlist(ip, idx + 1, 2, &r[0], XFS_DATA_FORK);
1700                 ip->i_df.if_lastex = idx + 1;
1701                 ip->i_d.di_nextents += 2;
1702                 if (cur == NULL)
1703                         rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1704                 else {
1705                         rval = XFS_ILOG_CORE;
1706                         if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1707                                         PREV.br_startblock, PREV.br_blockcount,
1708                                         &i)))
1709                                 goto done;
1710                         ASSERT(i == 1);
1711                         /* new right extent - oldext */
1712                         if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
1713                                 r[1].br_startblock, r[1].br_blockcount,
1714                                 r[1].br_state)))
1715                                 goto done;
1716                         /* new left extent - oldext */
1717                         PREV.br_blockcount =
1718                                 new->br_startoff - PREV.br_startoff;
1719                         cur->bc_rec.b = PREV;
1720                         if ((error = xfs_bmbt_insert(cur, &i)))
1721                                 goto done;
1722                         ASSERT(i == 1);
1723                         if ((error = xfs_bmbt_increment(cur, 0, &i)))
1724                                 goto done;
1725                         ASSERT(i == 1);
1726                         /* new middle extent - newext */
1727                         cur->bc_rec.b = *new;
1728                         if ((error = xfs_bmbt_insert(cur, &i)))
1729                                 goto done;
1730                         ASSERT(i == 1);
1731                 }
1732                 break;
1733
1734         case MASK3(LEFT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1735         case MASK3(RIGHT_FILLING, LEFT_CONTIG, RIGHT_CONTIG):
1736         case MASK2(LEFT_FILLING, RIGHT_CONTIG):
1737         case MASK2(RIGHT_FILLING, LEFT_CONTIG):
1738         case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
1739         case MASK(LEFT_CONTIG):
1740         case MASK(RIGHT_CONTIG):
1741                 /*
1742                  * These cases are all impossible.
1743                  */
1744                 ASSERT(0);
1745         }
1746         *curp = cur;
1747 done:
1748         *logflagsp = rval;
1749         return error;
1750 #undef  LEFT
1751 #undef  RIGHT
1752 #undef  PREV
1753 #undef  MASK
1754 #undef  MASK2
1755 #undef  MASK3
1756 #undef  MASK4
1757 #undef  STATE_SET
1758 #undef  STATE_TEST
1759 #undef  STATE_SET_TEST
1760 #undef  SWITCH_STATE
1761 }
1762
1763 /*
1764  * Called by xfs_bmap_add_extent to handle cases converting a hole
1765  * to a delayed allocation.
1766  */
1767 /*ARGSUSED*/
1768 STATIC int                              /* error */
1769 xfs_bmap_add_extent_hole_delay(
1770         xfs_inode_t             *ip,    /* incore inode pointer */
1771         xfs_extnum_t            idx,    /* extent number to update/insert */
1772         xfs_btree_cur_t         *cur,   /* if null, not a btree */
1773         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
1774         int                     *logflagsp, /* inode logging flags */
1775         int                     rsvd)           /* OK to allocate reserved blocks */
1776 {
1777         xfs_bmbt_rec_t          *base;  /* base of extent entry list */
1778         xfs_bmbt_rec_t          *ep;    /* extent list entry for idx */
1779 #ifdef XFS_BMAP_TRACE
1780         static char             fname[] = "xfs_bmap_add_extent_hole_delay";
1781 #endif
1782         xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
1783         xfs_filblks_t           newlen=0;       /* new indirect size */
1784         xfs_filblks_t           oldlen=0;       /* old indirect size */
1785         xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
1786         int                     state;  /* state bits, accessed thru macros */
1787         xfs_filblks_t           temp;   /* temp for indirect calculations */
1788         enum {                          /* bit number definitions for state */
1789                 LEFT_CONTIG,    RIGHT_CONTIG,
1790                 LEFT_DELAY,     RIGHT_DELAY,
1791                 LEFT_VALID,     RIGHT_VALID
1792         };
1793
1794 #define MASK(b)                 (1 << (b))
1795 #define MASK2(a,b)              (MASK(a) | MASK(b))
1796 #define STATE_SET(b,v)          ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1797 #define STATE_TEST(b)           (state & MASK(b))
1798 #define STATE_SET_TEST(b,v)     ((v) ? ((state |= MASK(b)), 1) : \
1799                                        ((state &= ~MASK(b)), 0))
1800 #define SWITCH_STATE            (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
1801
1802         base = ip->i_df.if_u1.if_extents;
1803         ep = &base[idx];
1804         state = 0;
1805         ASSERT(ISNULLSTARTBLOCK(new->br_startblock));
1806         /*
1807          * Check and set flags if this segment has a left neighbor
1808          */
1809         if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
1810                 xfs_bmbt_get_all(ep - 1, &left);
1811                 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
1812         }
1813         /*
1814          * Check and set flags if the current (right) segment exists.
1815          * If it doesn't exist, we're converting the hole at end-of-file.
1816          */
1817         if (STATE_SET_TEST(RIGHT_VALID,
1818                            idx <
1819                            ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
1820                 xfs_bmbt_get_all(ep, &right);
1821                 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
1822         }
1823         /*
1824          * Set contiguity flags on the left and right neighbors.
1825          * Don't let extents get too large, even if the pieces are contiguous.
1826          */
1827         STATE_SET(LEFT_CONTIG,
1828                 STATE_TEST(LEFT_VALID) && STATE_TEST(LEFT_DELAY) &&
1829                 left.br_startoff + left.br_blockcount == new->br_startoff &&
1830                 left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
1831         STATE_SET(RIGHT_CONTIG,
1832                 STATE_TEST(RIGHT_VALID) && STATE_TEST(RIGHT_DELAY) &&
1833                 new->br_startoff + new->br_blockcount == right.br_startoff &&
1834                 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1835                 (!STATE_TEST(LEFT_CONTIG) ||
1836                  (left.br_blockcount + new->br_blockcount +
1837                      right.br_blockcount <= MAXEXTLEN)));
1838         /*
1839          * Switch out based on the contiguity flags.
1840          */
1841         switch (SWITCH_STATE) {
1842
1843         case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
1844                 /*
1845                  * New allocation is contiguous with delayed allocations
1846                  * on the left and on the right.
1847                  * Merge all three into a single extent list entry.
1848                  */
1849                 temp = left.br_blockcount + new->br_blockcount +
1850                         right.br_blockcount;
1851                 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1,
1852                         XFS_DATA_FORK);
1853                 xfs_bmbt_set_blockcount(ep - 1, temp);
1854                 oldlen = STARTBLOCKVAL(left.br_startblock) +
1855                         STARTBLOCKVAL(new->br_startblock) +
1856                         STARTBLOCKVAL(right.br_startblock);
1857                 newlen = xfs_bmap_worst_indlen(ip, temp);
1858                 xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen));
1859                 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1,
1860                         XFS_DATA_FORK);
1861                 xfs_bmap_trace_delete(fname, "LC|RC", ip, idx, 1,
1862                         XFS_DATA_FORK);
1863                 xfs_bmap_delete_exlist(ip, idx, 1, XFS_DATA_FORK);
1864                 ip->i_df.if_lastex = idx - 1;
1865                 break;
1866
1867         case MASK(LEFT_CONTIG):
1868                 /*
1869                  * New allocation is contiguous with a delayed allocation
1870                  * on the left.
1871                  * Merge the new allocation with the left neighbor.
1872                  */
1873                 temp = left.br_blockcount + new->br_blockcount;
1874                 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1,
1875                         XFS_DATA_FORK);
1876                 xfs_bmbt_set_blockcount(ep - 1, temp);
1877                 oldlen = STARTBLOCKVAL(left.br_startblock) +
1878                         STARTBLOCKVAL(new->br_startblock);
1879                 newlen = xfs_bmap_worst_indlen(ip, temp);
1880                 xfs_bmbt_set_startblock(ep - 1, NULLSTARTBLOCK((int)newlen));
1881                 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1,
1882                         XFS_DATA_FORK);
1883                 ip->i_df.if_lastex = idx - 1;
1884                 break;
1885
1886         case MASK(RIGHT_CONTIG):
1887                 /*
1888                  * New allocation is contiguous with a delayed allocation
1889                  * on the right.
1890                  * Merge the new allocation with the right neighbor.
1891                  */
1892                 xfs_bmap_trace_pre_update(fname, "RC", ip, idx, XFS_DATA_FORK);
1893                 temp = new->br_blockcount + right.br_blockcount;
1894                 oldlen = STARTBLOCKVAL(new->br_startblock) +
1895                         STARTBLOCKVAL(right.br_startblock);
1896                 newlen = xfs_bmap_worst_indlen(ip, temp);
1897                 xfs_bmbt_set_allf(ep, new->br_startoff,
1898                         NULLSTARTBLOCK((int)newlen), temp, right.br_state);
1899                 xfs_bmap_trace_post_update(fname, "RC", ip, idx, XFS_DATA_FORK);
1900                 ip->i_df.if_lastex = idx;
1901                 break;
1902
1903         case 0:
1904                 /*
1905                  * New allocation is not contiguous with another
1906                  * delayed allocation.
1907                  * Insert a new entry.
1908                  */
1909                 oldlen = newlen = 0;
1910                 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL,
1911                         XFS_DATA_FORK);
1912                 xfs_bmap_insert_exlist(ip, idx, 1, new, XFS_DATA_FORK);
1913                 ip->i_df.if_lastex = idx;
1914                 break;
1915         }
1916         if (oldlen != newlen) {
1917                 ASSERT(oldlen > newlen);
1918                 xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS,
1919                         (int)(oldlen - newlen), rsvd);
1920                 /*
1921                  * Nothing to do for disk quota accounting here.
1922                  */
1923         }
1924         *logflagsp = 0;
1925         return 0;
1926 #undef  MASK
1927 #undef  MASK2
1928 #undef  STATE_SET
1929 #undef  STATE_TEST
1930 #undef  STATE_SET_TEST
1931 #undef  SWITCH_STATE
1932 }
1933
1934 /*
1935  * Called by xfs_bmap_add_extent to handle cases converting a hole
1936  * to a real allocation.
1937  */
1938 STATIC int                              /* error */
1939 xfs_bmap_add_extent_hole_real(
1940         xfs_inode_t             *ip,    /* incore inode pointer */
1941         xfs_extnum_t            idx,    /* extent number to update/insert */
1942         xfs_btree_cur_t         *cur,   /* if null, not a btree */
1943         xfs_bmbt_irec_t         *new,   /* new data to put in extent list */
1944         int                     *logflagsp, /* inode logging flags */
1945         int                     whichfork) /* data or attr fork */
1946 {
1947         xfs_bmbt_rec_t          *ep;    /* pointer to extent entry ins. point */
1948         int                     error;  /* error return value */
1949 #ifdef XFS_BMAP_TRACE
1950         static char             fname[] = "xfs_bmap_add_extent_hole_real";
1951 #endif
1952         int                     i;      /* temp state */
1953         xfs_ifork_t             *ifp;   /* inode fork pointer */
1954         xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
1955         xfs_bmbt_irec_t         right;  /* right neighbor extent entry */
1956         int                     state;  /* state bits, accessed thru macros */
1957         enum {                          /* bit number definitions for state */
1958                 LEFT_CONTIG,    RIGHT_CONTIG,
1959                 LEFT_DELAY,     RIGHT_DELAY,
1960                 LEFT_VALID,     RIGHT_VALID
1961         };
1962
1963 #define MASK(b)                 (1 << (b))
1964 #define MASK2(a,b)              (MASK(a) | MASK(b))
1965 #define STATE_SET(b,v)          ((v) ? (state |= MASK(b)) : (state &= ~MASK(b)))
1966 #define STATE_TEST(b)           (state & MASK(b))
1967 #define STATE_SET_TEST(b,v)     ((v) ? ((state |= MASK(b)), 1) : \
1968                                        ((state &= ~MASK(b)), 0))
1969 #define SWITCH_STATE            (state & MASK2(LEFT_CONTIG, RIGHT_CONTIG))
1970
1971         ifp = XFS_IFORK_PTR(ip, whichfork);
1972         ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
1973         ep = &ifp->if_u1.if_extents[idx];
1974         state = 0;
1975         /*
1976          * Check and set flags if this segment has a left neighbor.
1977          */
1978         if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
1979                 xfs_bmbt_get_all(ep - 1, &left);
1980                 STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
1981         }
1982         /*
1983          * Check and set flags if this segment has a current value.
1984          * Not true if we're inserting into the "hole" at eof.
1985          */
1986         if (STATE_SET_TEST(RIGHT_VALID,
1987                            idx <
1988                            ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
1989                 xfs_bmbt_get_all(ep, &right);
1990                 STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
1991         }
1992         /*
1993          * We're inserting a real allocation between "left" and "right".
1994          * Set the contiguity flags.  Don't let extents get too large.
1995          */
1996         STATE_SET(LEFT_CONTIG,
1997                 STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
1998                 left.br_startoff + left.br_blockcount == new->br_startoff &&
1999                 left.br_startblock + left.br_blockcount == new->br_startblock &&
2000                 left.br_state == new->br_state &&
2001                 left.br_blockcount + new->br_blockcount <= MAXEXTLEN);
2002         STATE_SET(RIGHT_CONTIG,
2003                 STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
2004                 new->br_startoff + new->br_blockcount == right.br_startoff &&
2005                 new->br_startblock + new->br_blockcount ==
2006                     right.br_startblock &&
2007                 new->br_state == right.br_state &&
2008                 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2009                 (!STATE_TEST(LEFT_CONTIG) ||
2010                  left.br_blockcount + new->br_blockcount +
2011                      right.br_blockcount <= MAXEXTLEN));
2012
2013         /*
2014          * Select which case we're in here, and implement it.
2015          */
2016         switch (SWITCH_STATE) {
2017
2018         case MASK2(LEFT_CONTIG, RIGHT_CONTIG):
2019                 /*
2020                  * New allocation is contiguous with real allocations on the
2021                  * left and on the right.
2022                  * Merge all three into a single extent list entry.
2023                  */
2024                 xfs_bmap_trace_pre_update(fname, "LC|RC", ip, idx - 1,
2025                         whichfork);
2026                 xfs_bmbt_set_blockcount(ep - 1,
2027                         left.br_blockcount + new->br_blockcount +
2028                         right.br_blockcount);
2029                 xfs_bmap_trace_post_update(fname, "LC|RC", ip, idx - 1,
2030                         whichfork);
2031                 xfs_bmap_trace_delete(fname, "LC|RC", ip,
2032                         idx, 1, whichfork);
2033                 xfs_bmap_delete_exlist(ip, idx, 1, whichfork);
2034                 ifp->if_lastex = idx - 1;
2035                 XFS_IFORK_NEXT_SET(ip, whichfork,
2036                         XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2037                 if (cur == NULL) {
2038                         *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
2039                         return 0;
2040                 }
2041                 *logflagsp = XFS_ILOG_CORE;
2042                 if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
2043                                 right.br_startblock, right.br_blockcount, &i)))
2044                         return error;
2045                 ASSERT(i == 1);
2046                 if ((error = xfs_bmbt_delete(cur, &i)))
2047                         return error;
2048                 ASSERT(i == 1);
2049                 if ((error = xfs_bmbt_decrement(cur, 0, &i)))
2050                         return error;
2051                 ASSERT(i == 1);
2052                 error = xfs_bmbt_update(cur, left.br_startoff,
2053                                 left.br_startblock,
2054                                 left.br_blockcount + new->br_blockcount +
2055                                 right.br_blockcount, left.br_state);
2056                 return error;
2057
2058         case MASK(LEFT_CONTIG):
2059                 /*
2060                  * New allocation is contiguous with a real allocation
2061                  * on the left.
2062                  * Merge the new allocation with the left neighbor.
2063                  */
2064                 xfs_bmap_trace_pre_update(fname, "LC", ip, idx - 1, whichfork);
2065                 xfs_bmbt_set_blockcount(ep - 1,
2066                         left.br_blockcount + new->br_blockcount);
2067                 xfs_bmap_trace_post_update(fname, "LC", ip, idx - 1, whichfork);
2068                 ifp->if_lastex = idx - 1;
2069                 if (cur == NULL) {
2070                         *logflagsp = XFS_ILOG_FEXT(whichfork);
2071                         return 0;
2072                 }
2073                 *logflagsp = 0;
2074                 if ((error = xfs_bmbt_lookup_eq(cur, left.br_startoff,
2075                                 left.br_startblock, left.br_blockcount, &i)))
2076                         return error;
2077                 ASSERT(i == 1);
2078                 error = xfs_bmbt_update(cur, left.br_startoff,
2079                                 left.br_startblock,
2080                                 left.br_blockcount + new->br_blockcount,
2081                                 left.br_state);
2082                 return error;
2083
2084         case MASK(RIGHT_CONTIG):
2085                 /*
2086                  * New allocation is contiguous with a real allocation
2087                  * on the right.
2088                  * Merge the new allocation with the right neighbor.
2089                  */
2090                 xfs_bmap_trace_pre_update(fname, "RC", ip, idx, whichfork);
2091                 xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
2092                         new->br_blockcount + right.br_blockcount,
2093                         right.br_state);
2094                 xfs_bmap_trace_post_update(fname, "RC", ip, idx, whichfork);
2095                 ifp->if_lastex = idx;
2096                 if (cur == NULL) {
2097                         *logflagsp = XFS_ILOG_FEXT(whichfork);
2098                         return 0;
2099                 }
2100                 *logflagsp = 0;
2101                 if ((error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
2102                                 right.br_startblock, right.br_blockcount, &i)))
2103                         return error;
2104                 ASSERT(i == 1);
2105                 error = xfs_bmbt_update(cur, new->br_startoff,
2106                                 new->br_startblock,
2107                                 new->br_blockcount + right.br_blockcount,
2108                                 right.br_state);
2109                 return error;
2110
2111         case 0:
2112                 /*
2113                  * New allocation is not contiguous with another
2114                  * real allocation.
2115                  * Insert a new entry.
2116                  */
2117                 xfs_bmap_trace_insert(fname, "0", ip, idx, 1, new, NULL,
2118                         whichfork);
2119                 xfs_bmap_insert_exlist(ip, idx, 1, new, whichfork);
2120                 ifp->if_lastex = idx;
2121                 XFS_IFORK_NEXT_SET(ip, whichfork,
2122                         XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2123                 if (cur == NULL) {
2124                         *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
2125                         return 0;
2126                 }
2127                 *logflagsp = XFS_ILOG_CORE;
2128                 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2129                                 new->br_startblock, new->br_blockcount, &i)))
2130                         return error;
2131                 ASSERT(i == 0);
2132                 cur->bc_rec.b.br_state = new->br_state;
2133                 if ((error = xfs_bmbt_insert(cur, &i)))
2134                         return error;
2135                 ASSERT(i == 1);
2136                 return 0;
2137         }
2138 #undef  MASK
2139 #undef  MASK2
2140 #undef  STATE_SET
2141 #undef  STATE_TEST
2142 #undef  STATE_SET_TEST
2143 #undef  SWITCH_STATE
2144         /* NOTREACHED */
2145         ASSERT(0);
2146         return 0; /* keep gcc quite */
2147 }
2148
2149 /*
2150  * Adjust the size of the new extent based on di_extsize and rt extsize.
2151  */
2152 STATIC int
2153 xfs_bmap_extsize_align(
2154         xfs_mount_t     *mp,
2155         xfs_bmbt_irec_t *gotp,          /* next extent pointer */
2156         xfs_bmbt_irec_t *prevp,         /* previous extent pointer */
2157         xfs_extlen_t    extsz,          /* align to this extent size */
2158         int             rt,             /* is this a realtime inode? */
2159         int             eof,            /* is extent at end-of-file? */
2160         int             delay,          /* creating delalloc extent? */
2161         int             convert,        /* overwriting unwritten extent? */
2162         xfs_fileoff_t   *offp,          /* in/out: aligned offset */
2163         xfs_extlen_t    *lenp)          /* in/out: aligned length */
2164 {
2165         xfs_fileoff_t   orig_off;       /* original offset */
2166         xfs_extlen_t    orig_alen;      /* original length */
2167         xfs_fileoff_t   orig_end;       /* original off+len */
2168         xfs_fileoff_t   nexto;          /* next file offset */
2169         xfs_fileoff_t   prevo;          /* previous file offset */
2170         xfs_fileoff_t   align_off;      /* temp for offset */
2171         xfs_extlen_t    align_alen;     /* temp for length */
2172         xfs_extlen_t    temp;           /* temp for calculations */
2173
2174         if (convert)
2175                 return 0;
2176
2177         orig_off = align_off = *offp;
2178         orig_alen = align_alen = *lenp;
2179         orig_end = orig_off + orig_alen;
2180
2181         /*
2182          * If this request overlaps an existing extent, then don't
2183          * attempt to perform any additional alignment.
2184          */
2185         if (!delay && !eof &&
2186             (orig_off >= gotp->br_startoff) &&
2187             (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2188                 return 0;
2189         }
2190
2191         /*
2192          * If the file offset is unaligned vs. the extent size
2193          * we need to align it.  This will be possible unless
2194          * the file was previously written with a kernel that didn't
2195          * perform this alignment, or if a truncate shot us in the
2196          * foot.
2197          */
2198         temp = do_mod(orig_off, extsz);
2199         if (temp) {
2200                 align_alen += temp;
2201                 align_off -= temp;
2202         }
2203         /*
2204          * Same adjustment for the end of the requested area.
2205          */
2206         if ((temp = (align_alen % extsz))) {
2207                 align_alen += extsz - temp;
2208         }
2209         /*
2210          * If the previous block overlaps with this proposed allocation
2211          * then move the start forward without adjusting the length.
2212          */
2213         if (prevp->br_startoff != NULLFILEOFF) {
2214                 if (prevp->br_startblock == HOLESTARTBLOCK)
2215                         prevo = prevp->br_startoff;
2216                 else
2217                         prevo = prevp->br_startoff + prevp->br_blockcount;
2218         } else
2219                 prevo = 0;
2220         if (align_off != orig_off && align_off < prevo)
2221                 align_off = prevo;
2222         /*
2223          * If the next block overlaps with this proposed allocation
2224          * then move the start back without adjusting the length,
2225          * but not before offset 0.
2226          * This may of course make the start overlap previous block,
2227          * and if we hit the offset 0 limit then the next block
2228          * can still overlap too.
2229          */
2230         if (!eof && gotp->br_startoff != NULLFILEOFF) {
2231                 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2232                     (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2233                         nexto = gotp->br_startoff + gotp->br_blockcount;
2234                 else
2235                         nexto = gotp->br_startoff;
2236         } else
2237                 nexto = NULLFILEOFF;
2238         if (!eof &&
2239             align_off + align_alen != orig_end &&
2240             align_off + align_alen > nexto)
2241                 align_off = nexto > align_alen ? nexto - align_alen : 0;
2242         /*
2243          * If we're now overlapping the next or previous extent that
2244          * means we can't fit an extsz piece in this hole.  Just move
2245          * the start forward to the first valid spot and set
2246          * the length so we hit the end.
2247          */
2248         if (align_off != orig_off && align_off < prevo)
2249                 align_off = prevo;
2250         if (align_off + align_alen != orig_end &&
2251             align_off + align_alen > nexto &&
2252             nexto != NULLFILEOFF) {
2253                 ASSERT(nexto > prevo);
2254                 align_alen = nexto - align_off;
2255         }
2256
2257         /*
2258          * If realtime, and the result isn't a multiple of the realtime
2259          * extent size we need to remove blocks until it is.
2260          */
2261         if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2262                 /*
2263                  * We're not covering the original request, or
2264                  * we won't be able to once we fix the length.
2265                  */
2266                 if (orig_off < align_off ||
2267                     orig_end > align_off + align_alen ||
2268                     align_alen - temp < orig_alen)
2269                         return XFS_ERROR(EINVAL);
2270                 /*
2271                  * Try to fix it by moving the start up.
2272                  */
2273                 if (align_off + temp <= orig_off) {
2274                         align_alen -= temp;
2275                         align_off += temp;
2276                 }
2277                 /*
2278                  * Try to fix it by moving the end in.
2279                  */
2280                 else if (align_off + align_alen - temp >= orig_end)
2281                         align_alen -= temp;
2282                 /*
2283                  * Set the start to the minimum then trim the length.
2284                  */
2285                 else {
2286                         align_alen -= orig_off - align_off;
2287                         align_off = orig_off;
2288                         align_alen -= align_alen % mp->m_sb.sb_rextsize;
2289                 }
2290                 /*
2291                  * Result doesn't cover the request, fail it.
2292                  */
2293                 if (orig_off < align_off || orig_end > align_off + align_alen)
2294                         return XFS_ERROR(EINVAL);
2295         } else {
2296                 ASSERT(orig_off >= align_off);
2297                 ASSERT(orig_end <= align_off + align_alen);
2298         }
2299
2300 #ifdef DEBUG
2301         if (!eof && gotp->br_startoff != NULLFILEOFF)
2302                 ASSERT(align_off + align_alen <= gotp->br_startoff);
2303         if (prevp->br_startoff != NULLFILEOFF)
2304                 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
2305 #endif
2306
2307         *lenp = align_alen;
2308         *offp = align_off;
2309         return 0;
2310 }
2311
2312 #define XFS_ALLOC_GAP_UNITS     4
2313
2314 /*
2315  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
2316  * It figures out where to ask the underlying allocator to put the new extent.
2317  */
2318 STATIC int
2319 xfs_bmap_alloc(
2320         xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
2321 {
2322         xfs_fsblock_t   adjust;         /* adjustment to block numbers */
2323         xfs_alloctype_t atype=0;        /* type for allocation routines */
2324         int             error;          /* error return value */
2325         xfs_agnumber_t  fb_agno;        /* ag number of ap->firstblock */
2326         xfs_mount_t     *mp;            /* mount point structure */
2327         int             nullfb;         /* true if ap->firstblock isn't set */
2328         int             rt;             /* true if inode is realtime */
2329         xfs_extlen_t    prod = 0;       /* product factor for allocators */
2330         xfs_extlen_t    ralen = 0;      /* realtime allocation length */
2331         xfs_extlen_t    align;          /* minimum allocation alignment */
2332         xfs_rtblock_t   rtx;
2333
2334 #define ISVALID(x,y)    \
2335         (rt ? \
2336                 (x) < mp->m_sb.sb_rblocks : \
2337                 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
2338                 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
2339                 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
2340
2341         /*
2342          * Set up variables.
2343          */
2344         mp = ap->ip->i_mount;
2345         nullfb = ap->firstblock == NULLFSBLOCK;
2346         rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
2347         fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
2348         if (rt) {
2349                 align = ap->ip->i_d.di_extsize ?
2350                         ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize;
2351                 /* Set prod to match the extent size */
2352                 prod = align / mp->m_sb.sb_rextsize;
2353
2354                 error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2355                                                 align, rt, ap->eof, 0,
2356                                                 ap->conv, &ap->off, &ap->alen);
2357                 if (error)
2358                         return error;
2359                 ASSERT(ap->alen);
2360                 ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
2361
2362                 /*
2363                  * If the offset & length are not perfectly aligned
2364                  * then kill prod, it will just get us in trouble.
2365                  */
2366                 if (do_mod(ap->off, align) || ap->alen % align)
2367                         prod = 1;
2368                 /*
2369                  * Set ralen to be the actual requested length in rtextents.
2370                  */
2371                 ralen = ap->alen / mp->m_sb.sb_rextsize;
2372                 /*
2373                  * If the old value was close enough to MAXEXTLEN that
2374                  * we rounded up to it, cut it back so it's valid again.
2375                  * Note that if it's a really large request (bigger than
2376                  * MAXEXTLEN), we don't hear about that number, and can't
2377                  * adjust the starting point to match it.
2378                  */
2379                 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
2380                         ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
2381                 /*
2382                  * If it's an allocation to an empty file at offset 0,
2383                  * pick an extent that will space things out in the rt area.
2384                  */
2385                 if (ap->eof && ap->off == 0) {
2386                         error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
2387                         if (error)
2388                                 return error;
2389                         ap->rval = rtx * mp->m_sb.sb_rextsize;
2390                 } else
2391                         ap->rval = 0;
2392         } else {
2393                 align = (ap->userdata && ap->ip->i_d.di_extsize &&
2394                         (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ?
2395                         ap->ip->i_d.di_extsize : 0;
2396                 if (unlikely(align)) {
2397                         error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
2398                                                         align, rt,
2399                                                         ap->eof, 0, ap->conv,
2400                                                         &ap->off, &ap->alen);
2401                         ASSERT(!error);
2402                         ASSERT(ap->alen);
2403                 }
2404                 if (nullfb)
2405                         ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
2406                 else
2407                         ap->rval = ap->firstblock;
2408         }
2409
2410         /*
2411          * If allocating at eof, and there's a previous real block,
2412          * try to use it's last block as our starting point.
2413          */
2414         if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
2415             !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
2416             ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount,
2417                     ap->prevp->br_startblock)) {
2418                 ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount;
2419                 /*
2420                  * Adjust for the gap between prevp and us.
2421                  */
2422                 adjust = ap->off -
2423                         (ap->prevp->br_startoff + ap->prevp->br_blockcount);
2424                 if (adjust &&
2425                     ISVALID(ap->rval + adjust, ap->prevp->br_startblock))
2426                         ap->rval += adjust;
2427         }
2428         /*
2429          * If not at eof, then compare the two neighbor blocks.
2430          * Figure out whether either one gives us a good starting point,
2431          * and pick the better one.
2432          */
2433         else if (!ap->eof) {
2434                 xfs_fsblock_t   gotbno;         /* right side block number */
2435                 xfs_fsblock_t   gotdiff=0;      /* right side difference */
2436                 xfs_fsblock_t   prevbno;        /* left side block number */
2437                 xfs_fsblock_t   prevdiff=0;     /* left side difference */
2438
2439                 /*
2440                  * If there's a previous (left) block, select a requested
2441                  * start block based on it.
2442                  */
2443                 if (ap->prevp->br_startoff != NULLFILEOFF &&
2444                     !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
2445                     (prevbno = ap->prevp->br_startblock +
2446                                ap->prevp->br_blockcount) &&
2447                     ISVALID(prevbno, ap->prevp->br_startblock)) {
2448                         /*
2449                          * Calculate gap to end of previous block.
2450                          */
2451                         adjust = prevdiff = ap->off -
2452                                 (ap->prevp->br_startoff +
2453                                  ap->prevp->br_blockcount);
2454                         /*
2455                          * Figure the startblock based on the previous block's
2456                          * end and the gap size.
2457                          * Heuristic!
2458                          * If the gap is large relative to the piece we're
2459                          * allocating, or using it gives us an invalid block
2460                          * number, then just use the end of the previous block.
2461                          */
2462                         if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2463                             ISVALID(prevbno + prevdiff,
2464                                     ap->prevp->br_startblock))
2465                                 prevbno += adjust;
2466                         else
2467                                 prevdiff += adjust;
2468                         /*
2469                          * If the firstblock forbids it, can't use it,
2470                          * must use default.
2471                          */
2472                         if (!rt && !nullfb &&
2473                             XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
2474                                 prevbno = NULLFSBLOCK;
2475                 }
2476                 /*
2477                  * No previous block or can't follow it, just default.
2478                  */
2479                 else
2480                         prevbno = NULLFSBLOCK;
2481                 /*
2482                  * If there's a following (right) block, select a requested
2483                  * start block based on it.
2484                  */
2485                 if (!ISNULLSTARTBLOCK(ap->gotp->br_startblock)) {
2486                         /*
2487                          * Calculate gap to start of next block.
2488                          */
2489                         adjust = gotdiff = ap->gotp->br_startoff - ap->off;
2490                         /*
2491                          * Figure the startblock based on the next block's
2492                          * start and the gap size.
2493                          */
2494                         gotbno = ap->gotp->br_startblock;
2495                         /*
2496                          * Heuristic!
2497                          * If the gap is large relative to the piece we're
2498                          * allocating, or using it gives us an invalid block
2499                          * number, then just use the start of the next block
2500                          * offset by our length.
2501                          */
2502                         if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->alen &&
2503                             ISVALID(gotbno - gotdiff, gotbno))
2504                                 gotbno -= adjust;
2505                         else if (ISVALID(gotbno - ap->alen, gotbno)) {
2506                                 gotbno -= ap->alen;
2507                                 gotdiff += adjust - ap->alen;
2508                         } else
2509                                 gotdiff += adjust;
2510                         /*
2511                          * If the firstblock forbids it, can't use it,
2512                          * must use default.
2513                          */
2514                         if (!rt && !nullfb &&
2515                             XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
2516                                 gotbno = NULLFSBLOCK;
2517                 }
2518                 /*
2519                  * No next block, just default.
2520                  */
2521                 else
2522                         gotbno = NULLFSBLOCK;
2523                 /*
2524                  * If both valid, pick the better one, else the only good
2525                  * one, else ap->rval is already set (to 0 or the inode block).
2526                  */
2527                 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
2528                         ap->rval = prevdiff <= gotdiff ? prevbno : gotbno;
2529                 else if (prevbno != NULLFSBLOCK)
2530                         ap->rval = prevbno;
2531                 else if (gotbno != NULLFSBLOCK)
2532                         ap->rval = gotbno;
2533         }
2534         /*
2535          * If allowed, use ap->rval; otherwise must use firstblock since
2536          * it's in the right allocation group.
2537          */
2538         if (nullfb || rt || XFS_FSB_TO_AGNO(mp, ap->rval) == fb_agno)
2539                 ;
2540         else
2541                 ap->rval = ap->firstblock;
2542         /*
2543          * Realtime allocation, done through xfs_rtallocate_extent.
2544          */
2545         if (rt) {
2546 #ifndef __KERNEL__
2547                 ASSERT(0);
2548 #else
2549                 xfs_rtblock_t   rtb;
2550
2551                 atype = ap->rval == 0 ?
2552                         XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
2553                 do_div(ap->rval, mp->m_sb.sb_rextsize);
2554                 rtb = ap->rval;
2555                 ap->alen = ralen;
2556                 if ((error = xfs_rtallocate_extent(ap->tp, ap->rval, 1, ap->alen,
2557                                 &ralen, atype, ap->wasdel, prod, &rtb)))
2558                         return error;
2559                 if (rtb == NULLFSBLOCK && prod > 1 &&
2560                     (error = xfs_rtallocate_extent(ap->tp, ap->rval, 1,
2561                                                    ap->alen, &ralen, atype,
2562                                                    ap->wasdel, 1, &rtb)))
2563                         return error;
2564                 ap->rval = rtb;
2565                 if (ap->rval != NULLFSBLOCK) {
2566                         ap->rval *= mp->m_sb.sb_rextsize;
2567                         ralen *= mp->m_sb.sb_rextsize;
2568                         ap->alen = ralen;
2569                         ap->ip->i_d.di_nblocks += ralen;
2570                         xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2571                         if (ap->wasdel)
2572                                 ap->ip->i_delayed_blks -= ralen;
2573                         /*
2574                          * Adjust the disk quota also. This was reserved
2575                          * earlier.
2576                          */
2577                         XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
2578                                 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
2579                                                 XFS_TRANS_DQ_RTBCOUNT,
2580                                 (long) ralen);
2581                 } else
2582                         ap->alen = 0;
2583 #endif  /* __KERNEL__ */
2584         }
2585         /*
2586          * Normal allocation, done through xfs_alloc_vextent.
2587          */
2588         else {
2589                 xfs_agnumber_t  ag;
2590                 xfs_alloc_arg_t args;
2591                 xfs_extlen_t    blen;
2592                 xfs_extlen_t    delta;
2593                 int             isaligned;
2594                 xfs_extlen_t    longest;
2595                 xfs_extlen_t    need;
2596                 xfs_extlen_t    nextminlen=0;
2597                 int             notinit;
2598                 xfs_perag_t     *pag;
2599                 xfs_agnumber_t  startag;
2600                 int             tryagain;
2601
2602                 tryagain = isaligned = 0;
2603                 args.tp = ap->tp;
2604                 args.mp = mp;
2605                 args.fsbno = ap->rval;
2606                 args.maxlen = MIN(ap->alen, mp->m_sb.sb_agblocks);
2607                 blen = 0;
2608                 if (nullfb) {
2609                         args.type = XFS_ALLOCTYPE_START_BNO;
2610                         args.total = ap->total;
2611                         /*
2612                          * Find the longest available space.
2613                          * We're going to try for the whole allocation at once.
2614                          */
2615                         startag = ag = XFS_FSB_TO_AGNO(mp, args.fsbno);
2616                         notinit = 0;
2617                         down_read(&mp->m_peraglock);
2618                         while (blen < ap->alen) {
2619                                 pag = &mp->m_perag[ag];
2620                                 if (!pag->pagf_init &&
2621                                     (error = xfs_alloc_pagf_init(mp, args.tp,
2622                                             ag, XFS_ALLOC_FLAG_TRYLOCK))) {
2623                                         up_read(&mp->m_peraglock);
2624                                         return error;
2625                                 }
2626                                 /*
2627                                  * See xfs_alloc_fix_freelist...
2628                                  */
2629                                 if (pag->pagf_init) {
2630                                         need = XFS_MIN_FREELIST_PAG(pag, mp);
2631                                         delta = need > pag->pagf_flcount ?
2632                                                 need - pag->pagf_flcount : 0;
2633                                         longest = (pag->pagf_longest > delta) ?
2634                                                 (pag->pagf_longest - delta) :
2635                                                 (pag->pagf_flcount > 0 ||
2636                                                  pag->pagf_longest > 0);
2637                                         if (blen < longest)
2638                                                 blen = longest;
2639                                 } else
2640                                         notinit = 1;
2641                                 if (++ag == mp->m_sb.sb_agcount)
2642                                         ag = 0;
2643                                 if (ag == startag)
2644                                         break;
2645                         }
2646                         up_read(&mp->m_peraglock);
2647                         /*
2648                          * Since the above loop did a BUF_TRYLOCK, it is
2649                          * possible that there is space for this request.
2650                          */
2651                         if (notinit || blen < ap->minlen)
2652                                 args.minlen = ap->minlen;
2653                         /*
2654                          * If the best seen length is less than the request
2655                          * length, use the best as the minimum.
2656                          */
2657                         else if (blen < ap->alen)
2658                                 args.minlen = blen;
2659                         /*
2660                          * Otherwise we've seen an extent as big as alen,
2661                          * use that as the minimum.
2662                          */
2663                         else
2664                                 args.minlen = ap->alen;
2665                 } else if (ap->low) {
2666                         args.type = XFS_ALLOCTYPE_FIRST_AG;
2667                         args.total = args.minlen = ap->minlen;
2668                 } else {
2669                         args.type = XFS_ALLOCTYPE_NEAR_BNO;
2670                         args.total = ap->total;
2671                         args.minlen = ap->minlen;
2672                 }
2673                 if (unlikely(ap->userdata && ap->ip->i_d.di_extsize &&
2674                             (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) {
2675                         args.prod = ap->ip->i_d.di_extsize;
2676                         if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
2677                                 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2678                 } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) {
2679                         args.prod = 1;
2680                         args.mod = 0;
2681                 } else {
2682                         args.prod = NBPP >> mp->m_sb.sb_blocklog;
2683                         if ((args.mod = (xfs_extlen_t)(do_mod(ap->off, args.prod))))
2684                                 args.mod = (xfs_extlen_t)(args.prod - args.mod);
2685                 }
2686                 /*
2687                  * If we are not low on available data blocks, and the
2688                  * underlying logical volume manager is a stripe, and
2689                  * the file offset is zero then try to allocate data
2690                  * blocks on stripe unit boundary.
2691                  * NOTE: ap->aeof is only set if the allocation length
2692                  * is >= the stripe unit and the allocation offset is
2693                  * at the end of file.
2694                  */
2695                 if (!ap->low && ap->aeof) {
2696                         if (!ap->off) {
2697                                 args.alignment = mp->m_dalign;
2698                                 atype = args.type;
2699                                 isaligned = 1;
2700                                 /*
2701                                  * Adjust for alignment
2702                                  */
2703                                 if (blen > args.alignment && blen <= ap->alen)
2704                                         args.minlen = blen - args.alignment;
2705                                 args.minalignslop = 0;
2706                         } else {
2707                                 /*
2708                                  * First try an exact bno allocation.
2709                                  * If it fails then do a near or start bno
2710                                  * allocation with alignment turned on.
2711                                  */
2712                                 atype = args.type;
2713                                 tryagain = 1;
2714                                 args.type = XFS_ALLOCTYPE_THIS_BNO;
2715                                 args.alignment = 1;
2716                                 /*
2717                                  * Compute the minlen+alignment for the
2718                                  * next case.  Set slop so that the value
2719                                  * of minlen+alignment+slop doesn't go up
2720                                  * between the calls.
2721                                  */
2722                                 if (blen > mp->m_dalign && blen <= ap->alen)
2723                                         nextminlen = blen - mp->m_dalign;
2724                                 else
2725                                         nextminlen = args.minlen;
2726                                 if (nextminlen + mp->m_dalign > args.minlen + 1)
2727                                         args.minalignslop =
2728                                                 nextminlen + mp->m_dalign -
2729                                                 args.minlen - 1;
2730                                 else
2731                                         args.minalignslop = 0;
2732                         }
2733                 } else {
2734                         args.alignment = 1;
2735                         args.minalignslop = 0;
2736                 }
2737                 args.minleft = ap->minleft;
2738                 args.wasdel = ap->wasdel;
2739                 args.isfl = 0;
2740                 args.userdata = ap->userdata;
2741                 if ((error = xfs_alloc_vextent(&args)))
2742                         return error;
2743                 if (tryagain && args.fsbno == NULLFSBLOCK) {
2744                         /*
2745                          * Exact allocation failed. Now try with alignment
2746                          * turned on.
2747                          */
2748                         args.type = atype;
2749                         args.fsbno = ap->rval;
2750                         args.alignment = mp->m_dalign;
2751                         args.minlen = nextminlen;
2752                         args.minalignslop = 0;
2753                         isaligned = 1;
2754                         if ((error = xfs_alloc_vextent(&args)))
2755                                 return error;
2756                 }
2757                 if (isaligned && args.fsbno == NULLFSBLOCK) {
2758                         /*
2759                          * allocation failed, so turn off alignment and
2760                          * try again.
2761                          */
2762                         args.type = atype;
2763                         args.fsbno = ap->rval;
2764                         args.alignment = 0;
2765                         if ((error = xfs_alloc_vextent(&args)))
2766                                 return error;
2767                 }
2768                 if (args.fsbno == NULLFSBLOCK && nullfb &&
2769                     args.minlen > ap->minlen) {
2770                         args.minlen = ap->minlen;
2771                         args.type = XFS_ALLOCTYPE_START_BNO;
2772                         args.fsbno = ap->rval;
2773                         if ((error = xfs_alloc_vextent(&args)))
2774                                 return error;
2775                 }
2776                 if (args.fsbno == NULLFSBLOCK && nullfb) {
2777                         args.fsbno = 0;
2778                         args.type = XFS_ALLOCTYPE_FIRST_AG;
2779                         args.total = ap->minlen;
2780                         args.minleft = 0;
2781                         if ((error = xfs_alloc_vextent(&args)))
2782                                 return error;
2783                         ap->low = 1;
2784                 }
2785                 if (args.fsbno != NULLFSBLOCK) {
2786                         ap->firstblock = ap->rval = args.fsbno;
2787                         ASSERT(nullfb || fb_agno == args.agno ||
2788                                (ap->low && fb_agno < args.agno));
2789                         ap->alen = args.len;
2790                         ap->ip->i_d.di_nblocks += args.len;
2791                         xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
2792                         if (ap->wasdel)
2793                                 ap->ip->i_delayed_blks -= args.len;
2794                         /*
2795                          * Adjust the disk quota also. This was reserved
2796                          * earlier.
2797                          */
2798                         XFS_TRANS_MOD_DQUOT_BYINO(mp, ap->tp, ap->ip,
2799                                 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
2800                                                 XFS_TRANS_DQ_BCOUNT,
2801                                 (long) args.len);
2802                 } else {
2803                         ap->rval = NULLFSBLOCK;
2804                         ap->alen = 0;
2805                 }
2806         }
2807         return 0;
2808 #undef  ISVALID
2809 }
2810
2811 /*
2812  * Transform a btree format file with only one leaf node, where the
2813  * extents list will fit in the inode, into an extents format file.
2814  * Since the extent list is already in-core, all we have to do is
2815  * give up the space for the btree root and pitch the leaf block.
2816  */
2817 STATIC int                              /* error */
2818 xfs_bmap_btree_to_extents(
2819         xfs_trans_t             *tp,    /* transaction pointer */
2820         xfs_inode_t             *ip,    /* incore inode pointer */
2821         xfs_btree_cur_t         *cur,   /* btree cursor */
2822         int                     *logflagsp, /* inode logging flags */
2823         int                     whichfork)  /* data or attr fork */
2824 {
2825         /* REFERENCED */
2826         xfs_bmbt_block_t        *cblock;/* child btree block */
2827         xfs_fsblock_t           cbno;   /* child block number */
2828         xfs_buf_t               *cbp;   /* child block's buffer */
2829         int                     error;  /* error return value */
2830         xfs_ifork_t             *ifp;   /* inode fork data */
2831         xfs_mount_t             *mp;    /* mount point structure */
2832         xfs_bmbt_ptr_t          *pp;    /* ptr to block address */
2833         xfs_bmbt_block_t        *rblock;/* root btree block */
2834
2835         ifp = XFS_IFORK_PTR(ip, whichfork);
2836         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
2837         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
2838         rblock = ifp->if_broot;
2839         ASSERT(be16_to_cpu(rblock->bb_level) == 1);
2840         ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
2841         ASSERT(XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes) == 1);
2842         mp = ip->i_mount;
2843         pp = XFS_BMAP_BROOT_PTR_ADDR(rblock, 1, ifp->if_broot_bytes);
2844         *logflagsp = 0;
2845 #ifdef DEBUG
2846         if ((error = xfs_btree_check_lptr(cur, INT_GET(*pp, ARCH_CONVERT), 1)))
2847                 return error;
2848 #endif
2849         cbno = INT_GET(*pp, ARCH_CONVERT);
2850         if ((error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp,
2851                         XFS_BMAP_BTREE_REF)))
2852                 return error;
2853         cblock = XFS_BUF_TO_BMBT_BLOCK(cbp);
2854         if ((error = xfs_btree_check_lblock(cur, cblock, 0, cbp)))
2855                 return error;
2856         xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
2857         ip->i_d.di_nblocks--;
2858         XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
2859         xfs_trans_binval(tp, cbp);
2860         if (cur->bc_bufs[0] == cbp)
2861                 cur->bc_bufs[0] = NULL;
2862         xfs_iroot_realloc(ip, -1, whichfork);
2863         ASSERT(ifp->if_broot == NULL);
2864         ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
2865         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
2866         *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
2867         return 0;
2868 }
2869
2870 /*
2871  * Called by xfs_bmapi to update extent list structure and the btree
2872  * after removing space (or undoing a delayed allocation).
2873  */
2874 STATIC int                              /* error */
2875 xfs_bmap_del_extent(
2876         xfs_inode_t             *ip,    /* incore inode pointer */
2877         xfs_trans_t             *tp,    /* current transaction pointer */
2878         xfs_extnum_t            idx,    /* extent number to update/delete */
2879         xfs_bmap_free_t         *flist, /* list of extents to be freed */
2880         xfs_btree_cur_t         *cur,   /* if null, not a btree */
2881         xfs_bmbt_irec_t         *del,   /* data to remove from extent list */
2882         int                     *logflagsp, /* inode logging flags */
2883         int                     whichfork, /* data or attr fork */
2884         int                     rsvd)   /* OK to allocate reserved blocks */
2885 {
2886         xfs_filblks_t           da_new; /* new delay-alloc indirect blocks */
2887         xfs_filblks_t           da_old; /* old delay-alloc indirect blocks */
2888         xfs_fsblock_t           del_endblock=0; /* first block past del */
2889         xfs_fileoff_t           del_endoff;     /* first offset past del */
2890         int                     delay;  /* current block is delayed allocated */
2891         int                     do_fx;  /* free extent at end of routine */
2892         xfs_bmbt_rec_t          *ep;    /* current extent entry pointer */
2893         int                     error;  /* error return value */
2894         int                     flags;  /* inode logging flags */
2895 #ifdef XFS_BMAP_TRACE
2896         static char             fname[] = "xfs_bmap_del_extent";
2897 #endif
2898         xfs_bmbt_irec_t         got;    /* current extent entry */
2899         xfs_fileoff_t           got_endoff;     /* first offset past got */
2900         int                     i;      /* temp state */
2901         xfs_ifork_t             *ifp;   /* inode fork pointer */
2902         xfs_mount_t             *mp;    /* mount structure */
2903         xfs_filblks_t           nblks;  /* quota/sb block count */
2904         xfs_bmbt_irec_t         new;    /* new record to be inserted */
2905         /* REFERENCED */
2906         xfs_extnum_t            nextents;       /* number of extents in list */
2907         uint                    qfield; /* quota field to update */
2908         xfs_filblks_t           temp;   /* for indirect length calculations */
2909         xfs_filblks_t           temp2;  /* for indirect length calculations */
2910
2911         XFS_STATS_INC(xs_del_exlist);
2912         mp = ip->i_mount;
2913         ifp = XFS_IFORK_PTR(ip, whichfork);
2914         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2915         ASSERT(idx >= 0 && idx < nextents);
2916         ASSERT(del->br_blockcount > 0);
2917         ep = &ifp->if_u1.if_extents[idx];
2918         xfs_bmbt_get_all(ep, &got);
2919         ASSERT(got.br_startoff <= del->br_startoff);
2920         del_endoff = del->br_startoff + del->br_blockcount;
2921         got_endoff = got.br_startoff + got.br_blockcount;
2922         ASSERT(got_endoff >= del_endoff);
2923         delay = ISNULLSTARTBLOCK(got.br_startblock);
2924         ASSERT(ISNULLSTARTBLOCK(del->br_startblock) == delay);
2925         flags = 0;
2926         qfield = 0;
2927         error = 0;
2928         /*
2929          * If deleting a real allocation, must free up the disk space.
2930          */
2931         if (!delay) {
2932                 flags = XFS_ILOG_CORE;
2933                 /*
2934                  * Realtime allocation.  Free it and record di_nblocks update.
2935                  */
2936                 if (whichfork == XFS_DATA_FORK &&
2937                     (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
2938                         xfs_fsblock_t   bno;
2939                         xfs_filblks_t   len;
2940
2941                         ASSERT(do_mod(del->br_blockcount,
2942                                       mp->m_sb.sb_rextsize) == 0);
2943                         ASSERT(do_mod(del->br_startblock,
2944                                       mp->m_sb.sb_rextsize) == 0);
2945                         bno = del->br_startblock;
2946                         len = del->br_blockcount;
2947                         do_div(bno, mp->m_sb.sb_rextsize);
2948                         do_div(len, mp->m_sb.sb_rextsize);
2949                         if ((error = xfs_rtfree_extent(ip->i_transp, bno,
2950                                         (xfs_extlen_t)len)))
2951                                 goto done;
2952                         do_fx = 0;
2953                         nblks = len * mp->m_sb.sb_rextsize;
2954                         qfield = XFS_TRANS_DQ_RTBCOUNT;
2955                 }
2956                 /*
2957                  * Ordinary allocation.
2958                  */
2959                 else {
2960                         do_fx = 1;
2961                         nblks = del->br_blockcount;
2962                         qfield = XFS_TRANS_DQ_BCOUNT;
2963                 }
2964                 /*
2965                  * Set up del_endblock and cur for later.
2966                  */
2967                 del_endblock = del->br_startblock + del->br_blockcount;
2968                 if (cur) {
2969                         if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
2970                                         got.br_startblock, got.br_blockcount,
2971                                         &i)))
2972                                 goto done;
2973                         ASSERT(i == 1);
2974                 }
2975                 da_old = da_new = 0;
2976         } else {
2977                 da_old = STARTBLOCKVAL(got.br_startblock);
2978                 da_new = 0;
2979                 nblks = 0;
2980                 do_fx = 0;
2981         }
2982         /*
2983          * Set flag value to use in switch statement.
2984          * Left-contig is 2, right-contig is 1.
2985          */
2986         switch (((got.br_startoff == del->br_startoff) << 1) |
2987                 (got_endoff == del_endoff)) {
2988         case 3:
2989                 /*
2990                  * Matches the whole extent.  Delete the entry.
2991                  */
2992                 xfs_bmap_trace_delete(fname, "3", ip, idx, 1, whichfork);
2993                 xfs_bmap_delete_exlist(ip, idx, 1, whichfork);
2994                 ifp->if_lastex = idx;
2995                 if (delay)
2996                         break;
2997                 XFS_IFORK_NEXT_SET(ip, whichfork,
2998                         XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2999                 flags |= XFS_ILOG_CORE;
3000                 if (!cur) {
3001                         flags |= XFS_ILOG_FEXT(whichfork);
3002                         break;
3003                 }
3004                 if ((error = xfs_bmbt_delete(cur, &i)))
3005                         goto done;
3006                 ASSERT(i == 1);
3007                 break;
3008
3009         case 2:
3010                 /*
3011                  * Deleting the first part of the extent.
3012                  */
3013                 xfs_bmap_trace_pre_update(fname, "2", ip, idx, whichfork);
3014                 xfs_bmbt_set_startoff(ep, del_endoff);
3015                 temp = got.br_blockcount - del->br_blockcount;
3016                 xfs_bmbt_set_blockcount(ep, temp);
3017                 ifp->if_lastex = idx;
3018                 if (delay) {
3019                         temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3020                                 da_old);
3021                         xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
3022                         xfs_bmap_trace_post_update(fname, "2", ip, idx,
3023                                 whichfork);
3024                         da_new = temp;
3025                         break;
3026                 }
3027                 xfs_bmbt_set_startblock(ep, del_endblock);
3028                 xfs_bmap_trace_post_update(fname, "2", ip, idx, whichfork);
3029                 if (!cur) {
3030                         flags |= XFS_ILOG_FEXT(whichfork);
3031                         break;
3032                 }
3033                 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
3034                                 got.br_blockcount - del->br_blockcount,
3035                                 got.br_state)))
3036                         goto done;
3037                 break;
3038
3039         case 1:
3040                 /*
3041                  * Deleting the last part of the extent.
3042                  */
3043                 temp = got.br_blockcount - del->br_blockcount;
3044                 xfs_bmap_trace_pre_update(fname, "1", ip, idx, whichfork);
3045                 xfs_bmbt_set_blockcount(ep, temp);
3046                 ifp->if_lastex = idx;
3047                 if (delay) {
3048                         temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
3049                                 da_old);
3050                         xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
3051                         xfs_bmap_trace_post_update(fname, "1", ip, idx,
3052                                 whichfork);
3053                         da_new = temp;
3054                         break;
3055                 }
3056                 xfs_bmap_trace_post_update(fname, "1", ip, idx, whichfork);
3057                 if (!cur) {
3058                         flags |= XFS_ILOG_FEXT(whichfork);
3059                         break;
3060                 }
3061                 if ((error = xfs_bmbt_update(cur, got.br_startoff,
3062                                 got.br_startblock,
3063                                 got.br_blockcount - del->br_blockcount,
3064                                 got.br_state)))
3065                         goto done;
3066                 break;
3067
3068         case 0:
3069                 /*
3070                  * Deleting the middle of the extent.
3071                  */
3072                 temp = del->br_startoff - got.br_startoff;
3073                 xfs_bmap_trace_pre_update(fname, "0", ip, idx, whichfork);
3074                 xfs_bmbt_set_blockcount(ep, temp);
3075                 new.br_startoff = del_endoff;
3076                 temp2 = got_endoff - del_endoff;
3077                 new.br_blockcount = temp2;
3078                 new.br_state = got.br_state;
3079                 if (!delay) {
3080                         new.br_startblock = del_endblock;
3081                         flags |= XFS_ILOG_CORE;
3082                         if (cur) {
3083                                 if ((error = xfs_bmbt_update(cur,
3084                                                 got.br_startoff,
3085                                                 got.br_startblock, temp,
3086                                                 got.br_state)))
3087                                         goto done;
3088                                 if ((error = xfs_bmbt_increment(cur, 0, &i)))
3089                                         goto done;
3090                                 cur->bc_rec.b = new;
3091                                 error = xfs_bmbt_insert(cur, &i);
3092                                 if (error && error != ENOSPC)
3093                                         goto done;
3094                                 /*
3095                                  * If get no-space back from btree insert,
3096                                  * it tried a split, and we have a zero
3097                                  * block reservation.
3098                                  * Fix up our state and return the error.
3099                                  */
3100                                 if (error == ENOSPC) {
3101                                         /*
3102                                          * Reset the cursor, don't trust
3103                                          * it after any insert operation.
3104                                          */
3105                                         if ((error = xfs_bmbt_lookup_eq(cur,
3106                                                         got.br_startoff,
3107                                                         got.br_startblock,
3108                                                         temp, &i)))
3109                                                 goto done;
3110                                         ASSERT(i == 1);
3111                                         /*
3112                                          * Update the btree record back
3113                                          * to the original value.
3114                                          */
3115                                         if ((error = xfs_bmbt_update(cur,
3116                                                         got.br_startoff,
3117                                                         got.br_startblock,
3118                                                         got.br_blockcount,
3119                                                         got.br_state)))
3120                                                 goto done;
3121                                         /*
3122                                          * Reset the extent record back
3123                                          * to the original value.
3124                                          */
3125                                         xfs_bmbt_set_blockcount(ep,
3126                                                 got.br_blockcount);
3127                                         flags = 0;
3128                                         error = XFS_ERROR(ENOSPC);
3129                                         goto done;
3130                                 }
3131                                 ASSERT(i == 1);
3132                         } else
3133                                 flags |= XFS_ILOG_FEXT(whichfork);
3134                         XFS_IFORK_NEXT_SET(ip, whichfork,
3135                                 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
3136                 } else {
3137                         ASSERT(whichfork == XFS_DATA_FORK);
3138                         temp = xfs_bmap_worst_indlen(ip, temp);
3139                         xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
3140                         temp2 = xfs_bmap_worst_indlen(ip, temp2);
3141                         new.br_startblock = NULLSTARTBLOCK((int)temp2);
3142                         da_new = temp + temp2;
3143                         while (da_new > da_old) {
3144                                 if (temp) {
3145                                         temp--;
3146                                         da_new--;
3147                                         xfs_bmbt_set_startblock(ep,
3148                                                 NULLSTARTBLOCK((int)temp));
3149                                 }
3150                                 if (da_new == da_old)
3151                                         break;
3152                                 if (temp2) {
3153                                         temp2--;
3154                                         da_new--;
3155                                         new.br_startblock =
3156                                                 NULLSTARTBLOCK((int)temp2);
3157                                 }
3158                         }
3159                 }
3160                 xfs_bmap_trace_post_update(fname, "0", ip, idx, whichfork);
3161                 xfs_bmap_trace_insert(fname, "0", ip, idx + 1, 1, &new, NULL,
3162                         whichfork);
3163                 xfs_bmap_insert_exlist(ip, idx + 1, 1, &new, whichfork);
3164                 ifp->if_lastex = idx + 1;
3165                 break;
3166         }
3167         /*
3168          * If we need to, add to list of extents to delete.
3169          */
3170         if (do_fx)
3171                 xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
3172                         mp);
3173         /*
3174          * Adjust inode # blocks in the file.
3175          */
3176         if (nblks)
3177                 ip->i_d.di_nblocks -= nblks;
3178         /*
3179          * Adjust quota data.
3180          */
3181         if (qfield)
3182                 XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, qfield, (long)-nblks);
3183
3184         /*
3185          * Account for change in delayed indirect blocks.
3186          * Nothing to do for disk quota accounting here.
3187          */
3188         ASSERT(da_old >= da_new);
3189         if (da_old > da_new)
3190                 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS, (int)(da_old - da_new),
3191                         rsvd);
3192 done:
3193         *logflagsp = flags;
3194         return error;
3195 }
3196
3197 /*
3198  * Remove the entry "free" from the free item list.  Prev points to the
3199  * previous entry, unless "free" is the head of the list.
3200  */
3201 STATIC void
3202 xfs_bmap_del_free(
3203         xfs_bmap_free_t         *flist, /* free item list header */
3204         xfs_bmap_free_item_t    *prev,  /* previous item on list, if any */
3205         xfs_bmap_free_item_t    *free)  /* list item to be freed */
3206 {
3207         if (prev)
3208                 prev->xbfi_next = free->xbfi_next;
3209         else
3210                 flist->xbf_first = free->xbfi_next;
3211         flist->xbf_count--;
3212         kmem_zone_free(xfs_bmap_free_item_zone, free);
3213 }
3214
3215 /*
3216  * Remove count entries from the extents array for inode "ip", starting
3217  * at index "idx".  Copies the remaining items down over the deleted ones,
3218  * and gives back the excess memory.
3219  */
3220 STATIC void
3221 xfs_bmap_delete_exlist(
3222         xfs_inode_t     *ip,            /* incore inode pointer */
3223         xfs_extnum_t    idx,            /* starting delete index */
3224         xfs_extnum_t    count,          /* count of items to delete */
3225         int             whichfork)      /* data or attr fork */
3226 {
3227         xfs_bmbt_rec_t  *base;          /* base of extent list */
3228         xfs_ifork_t     *ifp;           /* inode fork pointer */
3229         xfs_extnum_t    nextents;       /* number of extents in list after */
3230
3231         ifp = XFS_IFORK_PTR(ip, whichfork);
3232         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3233         base = ifp->if_u1.if_extents;
3234         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - count;
3235         memmove(&base[idx], &base[idx + count],
3236                 (nextents - idx) * sizeof(*base));
3237         xfs_iext_realloc(ip, -count, whichfork);
3238 }
3239
3240 /*
3241  * Convert an extents-format file into a btree-format file.
3242  * The new file will have a root block (in the inode) and a single child block.
3243  */
3244 STATIC int                                      /* error */
3245 xfs_bmap_extents_to_btree(
3246         xfs_trans_t             *tp,            /* transaction pointer */
3247         xfs_inode_t             *ip,            /* incore inode pointer */
3248         xfs_fsblock_t           *firstblock,    /* first-block-allocated */
3249         xfs_bmap_free_t         *flist,         /* blocks freed in xaction */
3250         xfs_btree_cur_t         **curp,         /* cursor returned to caller */
3251         int                     wasdel,         /* converting a delayed alloc */
3252         int                     *logflagsp,     /* inode logging flags */
3253         int                     whichfork)      /* data or attr fork */
3254 {
3255         xfs_bmbt_block_t        *ablock;        /* allocated (child) bt block */
3256         xfs_buf_t               *abp;           /* buffer for ablock */
3257         xfs_alloc_arg_t         args;           /* allocation arguments */
3258         xfs_bmbt_rec_t          *arp;           /* child record pointer */
3259         xfs_bmbt_block_t        *block;         /* btree root block */
3260         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
3261         xfs_bmbt_rec_t          *ep;            /* extent list pointer */
3262         int                     error;          /* error return value */
3263         xfs_extnum_t            i, cnt;         /* extent list index */
3264         xfs_ifork_t             *ifp;           /* inode fork pointer */
3265         xfs_bmbt_key_t          *kp;            /* root block key pointer */
3266         xfs_mount_t             *mp;            /* mount structure */
3267         xfs_extnum_t            nextents;       /* extent list size */
3268         xfs_bmbt_ptr_t          *pp;            /* root block address pointer */
3269
3270         ifp = XFS_IFORK_PTR(ip, whichfork);
3271         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
3272         ASSERT(ifp->if_ext_max ==
3273                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
3274         /*
3275          * Make space in the inode incore.
3276          */
3277         xfs_iroot_realloc(ip, 1, whichfork);
3278         ifp->if_flags |= XFS_IFBROOT;
3279         /*
3280          * Fill in the root.
3281          */
3282         block = ifp->if_broot;
3283         block->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3284         block->bb_level = cpu_to_be16(1);
3285         block->bb_numrecs = cpu_to_be16(1);
3286         block->bb_leftsib = cpu_to_be64(NULLDFSBNO);
3287         block->bb_rightsib = cpu_to_be64(NULLDFSBNO);
3288         /*
3289          * Need a cursor.  Can't allocate until bb_level is filled in.
3290          */
3291         mp = ip->i_mount;
3292         cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
3293                 whichfork);
3294         cur->bc_private.b.firstblock = *firstblock;
3295         cur->bc_private.b.flist = flist;
3296         cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
3297         /*
3298          * Convert to a btree with two levels, one record in root.
3299          */
3300         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
3301         args.tp = tp;
3302         args.mp = mp;
3303         if (*firstblock == NULLFSBLOCK) {
3304                 args.type = XFS_ALLOCTYPE_START_BNO;
3305                 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
3306         } else if (flist->xbf_low) {
3307                 args.type = XFS_ALLOCTYPE_START_BNO;
3308                 args.fsbno = *firstblock;
3309         } else {
3310                 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3311                 args.fsbno = *firstblock;
3312         }
3313         args.minlen = args.maxlen = args.prod = 1;
3314         args.total = args.minleft = args.alignment = args.mod = args.isfl =
3315                 args.minalignslop = 0;
3316         args.wasdel = wasdel;
3317         *logflagsp = 0;
3318         if ((error = xfs_alloc_vextent(&args))) {
3319                 xfs_iroot_realloc(ip, -1, whichfork);
3320                 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
3321                 return error;
3322         }
3323         /*
3324          * Allocation can't fail, the space was reserved.
3325          */
3326         ASSERT(args.fsbno != NULLFSBLOCK);
3327         ASSERT(*firstblock == NULLFSBLOCK ||
3328                args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
3329                (flist->xbf_low &&
3330                 args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
3331         *firstblock = cur->bc_private.b.firstblock = args.fsbno;
3332         cur->bc_private.b.allocated++;
3333         ip->i_d.di_nblocks++;
3334         XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
3335         abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
3336         /*
3337          * Fill in the child block.
3338          */
3339         ablock = XFS_BUF_TO_BMBT_BLOCK(abp);
3340         ablock->bb_magic = cpu_to_be32(XFS_BMAP_MAGIC);
3341         ablock->bb_level = 0;
3342         ablock->bb_leftsib = cpu_to_be64(NULLDFSBNO);
3343         ablock->bb_rightsib = cpu_to_be64(NULLDFSBNO);
3344         arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
3345         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3346         for (ep = ifp->if_u1.if_extents, cnt = i = 0; i < nextents; i++, ep++) {
3347                 if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) {
3348                         arp->l0 = INT_GET(ep->l0, ARCH_CONVERT);
3349                         arp->l1 = INT_GET(ep->l1, ARCH_CONVERT);
3350                         arp++; cnt++;
3351                 }
3352         }
3353         ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
3354         ablock->bb_numrecs = cpu_to_be16(cnt);
3355         /*
3356          * Fill in the root key and pointer.
3357          */
3358         kp = XFS_BMAP_KEY_IADDR(block, 1, cur);
3359         arp = XFS_BMAP_REC_IADDR(ablock, 1, cur);
3360         INT_SET(kp->br_startoff, ARCH_CONVERT, xfs_bmbt_disk_get_startoff(arp));
3361         pp = XFS_BMAP_PTR_IADDR(block, 1, cur);
3362         INT_SET(*pp, ARCH_CONVERT, args.fsbno);
3363         /*
3364          * Do all this logging at the end so that
3365          * the root is at the right level.
3366          */
3367         xfs_bmbt_log_block(cur, abp, XFS_BB_ALL_BITS);
3368         xfs_bmbt_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
3369         ASSERT(*curp == NULL);
3370         *curp = cur;
3371         *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork);
3372         return 0;
3373 }
3374
3375 /*
3376  * Insert new item(s) in the extent list for inode "ip".
3377  * Count new items are inserted at offset idx.
3378  */
3379 STATIC void
3380 xfs_bmap_insert_exlist(
3381         xfs_inode_t     *ip,            /* incore inode pointer */
3382         xfs_extnum_t    idx,            /* starting index of new items */
3383         xfs_extnum_t    count,          /* number of inserted items */
3384         xfs_bmbt_irec_t *new,           /* items to insert */
3385         int             whichfork)      /* data or attr fork */
3386 {
3387         xfs_bmbt_rec_t  *base;          /* extent list base */
3388         xfs_ifork_t     *ifp;           /* inode fork pointer */
3389         xfs_extnum_t    nextents;       /* extent list size */
3390         xfs_extnum_t    to;             /* extent list index */
3391
3392         ifp = XFS_IFORK_PTR(ip, whichfork);
3393         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3394         xfs_iext_realloc(ip, count, whichfork);
3395         base = ifp->if_u1.if_extents;
3396         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3397         memmove(&base[idx + count], &base[idx],
3398                 (nextents - (idx + count)) * sizeof(*base));
3399         for (to = idx; to < idx + count; to++, new++)
3400                 xfs_bmbt_set_all(&base[to], new);
3401 }
3402
3403 /*
3404  * Helper routine to reset inode di_forkoff field when switching
3405  * attribute fork from local to extent format - we reset it where
3406  * possible to make space available for inline data fork extents.
3407  */
3408 STATIC void
3409 xfs_bmap_forkoff_reset(
3410         xfs_mount_t     *mp,
3411         xfs_inode_t     *ip,
3412         int             whichfork)
3413 {
3414         if (whichfork == XFS_ATTR_FORK &&
3415             (ip->i_d.di_format != XFS_DINODE_FMT_DEV) &&
3416             (ip->i_d.di_format != XFS_DINODE_FMT_UUID) &&
3417             ((mp->m_attroffset >> 3) > ip->i_d.di_forkoff)) {
3418                 ip->i_d.di_forkoff = mp->m_attroffset >> 3;
3419                 ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) /
3420                                         (uint)sizeof(xfs_bmbt_rec_t);
3421                 ip->i_afp->if_ext_max = XFS_IFORK_ASIZE(ip) /
3422                                         (uint)sizeof(xfs_bmbt_rec_t);
3423         }
3424 }
3425
3426 /*
3427  * Convert a local file to an extents file.
3428  * This code is out of bounds for data forks of regular files,
3429  * since the file data needs to get logged so things will stay consistent.
3430  * (The bmap-level manipulations are ok, though).
3431  */
3432 STATIC int                              /* error */
3433 xfs_bmap_local_to_extents(
3434         xfs_trans_t     *tp,            /* transaction pointer */
3435         xfs_inode_t     *ip,            /* incore inode pointer */
3436         xfs_fsblock_t   *firstblock,    /* first block allocated in xaction */
3437         xfs_extlen_t    total,          /* total blocks needed by transaction */
3438         int             *logflagsp,     /* inode logging flags */
3439         int             whichfork)      /* data or attr fork */
3440 {
3441         int             error;          /* error return value */
3442         int             flags;          /* logging flags returned */
3443 #ifdef XFS_BMAP_TRACE
3444         static char     fname[] = "xfs_bmap_local_to_extents";
3445 #endif
3446         xfs_ifork_t     *ifp;           /* inode fork pointer */
3447
3448         /*
3449          * We don't want to deal with the case of keeping inode data inline yet.
3450          * So sending the data fork of a regular inode is invalid.
3451          */
3452         ASSERT(!((ip->i_d.di_mode & S_IFMT) == S_IFREG &&
3453                  whichfork == XFS_DATA_FORK));
3454         ifp = XFS_IFORK_PTR(ip, whichfork);
3455         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
3456         flags = 0;
3457         error = 0;
3458         if (ifp->if_bytes) {
3459                 xfs_alloc_arg_t args;   /* allocation arguments */
3460                 xfs_buf_t       *bp;    /* buffer for extent list block */
3461                 xfs_bmbt_rec_t  *ep;    /* extent list pointer */
3462
3463                 args.tp = tp;
3464                 args.mp = ip->i_mount;
3465                 ASSERT(ifp->if_flags & XFS_IFINLINE);
3466                 /*
3467                  * Allocate a block.  We know we need only one, since the
3468                  * file currently fits in an inode.
3469                  */
3470                 if (*firstblock == NULLFSBLOCK) {
3471                         args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
3472                         args.type = XFS_ALLOCTYPE_START_BNO;
3473                 } else {
3474                         args.fsbno = *firstblock;
3475                         args.type = XFS_ALLOCTYPE_NEAR_BNO;
3476                 }
3477                 args.total = total;
3478                 args.mod = args.minleft = args.alignment = args.wasdel =
3479                         args.isfl = args.minalignslop = 0;
3480                 args.minlen = args.maxlen = args.prod = 1;
3481                 if ((error = xfs_alloc_vextent(&args)))
3482                         goto done;
3483                 /*
3484                  * Can't fail, the space was reserved.
3485                  */
3486                 ASSERT(args.fsbno != NULLFSBLOCK);
3487                 ASSERT(args.len == 1);
3488                 *firstblock = args.fsbno;
3489                 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
3490                 memcpy((char *)XFS_BUF_PTR(bp), ifp->if_u1.if_data,
3491                         ifp->if_bytes);
3492                 xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
3493                 xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
3494                 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
3495                 xfs_iext_realloc(ip, 1, whichfork);
3496                 ep = ifp->if_u1.if_extents;
3497                 xfs_bmbt_set_allf(ep, 0, args.fsbno, 1, XFS_EXT_NORM);
3498                 xfs_bmap_trace_post_update(fname, "new", ip, 0, whichfork);
3499                 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
3500                 ip->i_d.di_nblocks = 1;
3501                 XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip,
3502                         XFS_TRANS_DQ_BCOUNT, 1L);
3503                 flags |= XFS_ILOG_FEXT(whichfork);
3504         } else {
3505                 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
3506                 xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
3507         }
3508         ifp->if_flags &= ~XFS_IFINLINE;
3509         ifp->if_flags |= XFS_IFEXTENTS;
3510         XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
3511         flags |= XFS_ILOG_CORE;
3512 done:
3513         *logflagsp = flags;
3514         return error;
3515 }
3516
3517 xfs_bmbt_rec_t *                        /* pointer to found extent entry */
3518 xfs_bmap_do_search_extents(
3519         xfs_bmbt_rec_t  *base,          /* base of extent list */
3520         xfs_extnum_t    lastx,          /* last extent index used */
3521         xfs_extnum_t    nextents,       /* extent list size */
3522         xfs_fileoff_t   bno,            /* block number searched for */
3523         int             *eofp,          /* out: end of file found */
3524         xfs_extnum_t    *lastxp,        /* out: last extent index */
3525         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
3526         xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
3527 {
3528         xfs_bmbt_rec_t  *ep;            /* extent list entry pointer */
3529         xfs_bmbt_irec_t got;            /* extent list entry, decoded */
3530         int             high;           /* high index of binary search */
3531         int             low;            /* low index of binary search */
3532
3533         /*
3534          * Initialize the extent entry structure to catch access to
3535          * uninitialized br_startblock field.
3536          */
3537         got.br_startoff = 0xffa5a5a5a5a5a5a5LL;
3538         got.br_blockcount = 0xa55a5a5a5a5a5a5aLL;
3539         got.br_state = XFS_EXT_INVALID;
3540
3541 #if XFS_BIG_BLKNOS
3542         got.br_startblock = 0xffffa5a5a5a5a5a5LL;
3543 #else
3544         got.br_startblock = 0xffffa5a5;
3545 #endif
3546
3547         if (lastx != NULLEXTNUM && lastx < nextents)
3548                 ep = base + lastx;
3549         else
3550                 ep = NULL;
3551         prevp->br_startoff = NULLFILEOFF;
3552         if (ep && bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep)) &&
3553             bno < got.br_startoff +
3554                   (got.br_blockcount = xfs_bmbt_get_blockcount(ep)))
3555                 *eofp = 0;
3556         else if (ep && lastx < nextents - 1 &&
3557                  bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep + 1)) &&
3558                  bno < got.br_startoff +
3559                        (got.br_blockcount = xfs_bmbt_get_blockcount(ep + 1))) {
3560                 lastx++;
3561                 ep++;
3562                 *eofp = 0;
3563         } else if (nextents == 0)
3564                 *eofp = 1;
3565         else if (bno == 0 &&
3566                  (got.br_startoff = xfs_bmbt_get_startoff(base)) == 0) {
3567                 ep = base;
3568                 lastx = 0;
3569                 got.br_blockcount = xfs_bmbt_get_blockcount(ep);
3570                 *eofp = 0;
3571         } else {
3572                 /* binary search the extents array */
3573                 low = 0;
3574                 high = nextents - 1;
3575                 while (low <= high) {
3576                         XFS_STATS_INC(xs_cmp_exlist);
3577                         lastx = (low + high) >> 1;
3578                         ep = base + lastx;
3579                         got.br_startoff = xfs_bmbt_get_startoff(ep);
3580                         got.br_blockcount = xfs_bmbt_get_blockcount(ep);
3581                         if (bno < got.br_startoff)
3582                                 high = lastx - 1;
3583                         else if (bno >= got.br_startoff + got.br_blockcount)
3584                                 low = lastx + 1;
3585                         else {
3586                                 got.br_startblock = xfs_bmbt_get_startblock(ep);
3587                                 got.br_state = xfs_bmbt_get_state(ep);
3588                                 *eofp = 0;
3589                                 *lastxp = lastx;
3590                                 *gotp = got;
3591                                 return ep;
3592                         }
3593                 }
3594                 if (bno >= got.br_startoff + got.br_blockcount) {
3595                         lastx++;
3596                         if (lastx == nextents) {
3597                                 *eofp = 1;
3598                                 got.br_startblock = xfs_bmbt_get_startblock(ep);
3599                                 got.br_state = xfs_bmbt_get_state(ep);
3600                                 *prevp = got;
3601                                 ep = NULL;
3602                         } else {
3603                                 *eofp = 0;
3604                                 xfs_bmbt_get_all(ep, prevp);
3605                                 ep++;
3606                                 got.br_startoff = xfs_bmbt_get_startoff(ep);
3607                                 got.br_blockcount = xfs_bmbt_get_blockcount(ep);
3608                         }
3609                 } else {
3610                         *eofp = 0;
3611                         if (ep > base)
3612                                 xfs_bmbt_get_all(ep - 1, prevp);
3613                 }
3614         }
3615         if (ep) {
3616                 got.br_startblock = xfs_bmbt_get_startblock(ep);
3617                 got.br_state = xfs_bmbt_get_state(ep);
3618         }
3619         *lastxp = lastx;
3620         *gotp = got;
3621         return ep;
3622 }
3623
3624 /*
3625  * Search the extents list for the inode, for the extent containing bno.
3626  * If bno lies in a hole, point to the next entry.  If bno lies past eof,
3627  * *eofp will be set, and *prevp will contain the last entry (null if none).
3628  * Else, *lastxp will be set to the index of the found
3629  * entry; *gotp will contain the entry.
3630  */
3631 STATIC xfs_bmbt_rec_t *                 /* pointer to found extent entry */
3632 xfs_bmap_search_extents(
3633         xfs_inode_t     *ip,            /* incore inode pointer */
3634         xfs_fileoff_t   bno,            /* block number searched for */
3635         int             whichfork,      /* data or attr fork */
3636         int             *eofp,          /* out: end of file found */
3637         xfs_extnum_t    *lastxp,        /* out: last extent index */
3638         xfs_bmbt_irec_t *gotp,          /* out: extent entry found */
3639         xfs_bmbt_irec_t *prevp)         /* out: previous extent entry found */
3640 {
3641         xfs_ifork_t     *ifp;           /* inode fork pointer */
3642         xfs_bmbt_rec_t  *base;          /* base of extent list */
3643         xfs_extnum_t    lastx;          /* last extent index used */
3644         xfs_extnum_t    nextents;       /* extent list size */
3645         xfs_bmbt_rec_t  *ep;            /* extent list entry pointer */
3646         int             rt;             /* realtime flag    */
3647
3648         XFS_STATS_INC(xs_look_exlist);
3649         ifp = XFS_IFORK_PTR(ip, whichfork);
3650         lastx = ifp->if_lastex;
3651         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3652         base = &ifp->if_u1.if_extents[0];
3653
3654         ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp,
3655                                           lastxp, gotp, prevp);
3656         rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
3657         if (unlikely(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM))) {
3658                 cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld "
3659                         "start_block : %llx start_off : %llx blkcnt : %llx "
3660                         "extent-state : %x \n",
3661                         (ip->i_mount)->m_fsname, (long long)ip->i_ino,
3662                         (unsigned long long)gotp->br_startblock,
3663                         (unsigned long long)gotp->br_startoff,
3664                         (unsigned long long)gotp->br_blockcount,
3665                         gotp->br_state);
3666         }
3667         return ep;
3668 }
3669
3670
3671 #ifdef XFS_BMAP_TRACE
3672 ktrace_t        *xfs_bmap_trace_buf;
3673
3674 /*
3675  * Add a bmap trace buffer entry.  Base routine for the others.
3676  */
3677 STATIC void
3678 xfs_bmap_trace_addentry(
3679         int             opcode,         /* operation */
3680         char            *fname,         /* function name */
3681         char            *desc,          /* operation description */
3682         xfs_inode_t     *ip,            /* incore inode pointer */
3683         xfs_extnum_t    idx,            /* index of entry(ies) */
3684         xfs_extnum_t    cnt,            /* count of entries, 1 or 2 */
3685         xfs_bmbt_rec_t  *r1,            /* first record */
3686         xfs_bmbt_rec_t  *r2,            /* second record or null */
3687         int             whichfork)      /* data or attr fork */
3688 {
3689         xfs_bmbt_rec_t  tr2;
3690
3691         ASSERT(cnt == 1 || cnt == 2);
3692         ASSERT(r1 != NULL);
3693         if (cnt == 1) {
3694                 ASSERT(r2 == NULL);
3695                 r2 = &tr2;
3696                 memset(&tr2, 0, sizeof(tr2));
3697         } else
3698                 ASSERT(r2 != NULL);
3699         ktrace_enter(xfs_bmap_trace_buf,
3700                 (void *)(__psint_t)(opcode | (whichfork << 16)),
3701                 (void *)fname, (void *)desc, (void *)ip,
3702                 (void *)(__psint_t)idx,
3703                 (void *)(__psint_t)cnt,
3704                 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3705                 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3706                 (void *)(__psunsigned_t)(r1->l0 >> 32),
3707                 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3708                 (void *)(__psunsigned_t)(r1->l1 >> 32),
3709                 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3710                 (void *)(__psunsigned_t)(r2->l0 >> 32),
3711                 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3712                 (void *)(__psunsigned_t)(r2->l1 >> 32),
3713                 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3714                 );
3715         ASSERT(ip->i_xtrace);
3716         ktrace_enter(ip->i_xtrace,
3717                 (void *)(__psint_t)(opcode | (whichfork << 16)),
3718                 (void *)fname, (void *)desc, (void *)ip,
3719                 (void *)(__psint_t)idx,
3720                 (void *)(__psint_t)cnt,
3721                 (void *)(__psunsigned_t)(ip->i_ino >> 32),
3722                 (void *)(__psunsigned_t)(unsigned)ip->i_ino,
3723                 (void *)(__psunsigned_t)(r1->l0 >> 32),
3724                 (void *)(__psunsigned_t)(unsigned)(r1->l0),
3725                 (void *)(__psunsigned_t)(r1->l1 >> 32),
3726                 (void *)(__psunsigned_t)(unsigned)(r1->l1),
3727                 (void *)(__psunsigned_t)(r2->l0 >> 32),
3728                 (void *)(__psunsigned_t)(unsigned)(r2->l0),
3729                 (void *)(__psunsigned_t)(r2->l1 >> 32),
3730                 (void *)(__psunsigned_t)(unsigned)(r2->l1)
3731                 );
3732 }
3733
3734 /*
3735  * Add bmap trace entry prior to a call to xfs_bmap_delete_exlist.
3736  */
3737 STATIC void
3738 xfs_bmap_trace_delete(
3739         char            *fname,         /* function name */
3740         char            *desc,          /* operation description */
3741         xfs_inode_t     *ip,            /* incore inode pointer */
3742         xfs_extnum_t    idx,            /* index of entry(entries) deleted */
3743         xfs_extnum_t    cnt,            /* count of entries deleted, 1 or 2 */
3744         int             whichfork)      /* data or attr fork */
3745 {
3746         xfs_ifork_t     *ifp;           /* inode fork pointer */
3747
3748         ifp = XFS_IFORK_PTR(ip, whichfork);
3749         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_DELETE, fname, desc, ip, idx,
3750                 cnt, &ifp->if_u1.if_extents[idx],
3751                 cnt == 2 ? &ifp->if_u1.if_extents[idx + 1] : NULL,
3752                 whichfork);
3753 }
3754
3755 /*
3756  * Add bmap trace entry prior to a call to xfs_bmap_insert_exlist, or
3757  * reading in the extents list from the disk (in the btree).
3758  */
3759 STATIC void
3760 xfs_bmap_trace_insert(
3761         char            *fname,         /* function name */
3762         char            *desc,          /* operation description */
3763         xfs_inode_t     *ip,            /* incore inode pointer */
3764         xfs_extnum_t    idx,            /* index of entry(entries) inserted */
3765         xfs_extnum_t    cnt,            /* count of entries inserted, 1 or 2 */
3766         xfs_bmbt_irec_t *r1,            /* inserted record 1 */
3767         xfs_bmbt_irec_t *r2,            /* inserted record 2 or null */
3768         int             whichfork)      /* data or attr fork */
3769 {
3770         xfs_bmbt_rec_t  tr1;            /* compressed record 1 */
3771         xfs_bmbt_rec_t  tr2;            /* compressed record 2 if needed */
3772
3773         xfs_bmbt_set_all(&tr1, r1);
3774         if (cnt == 2) {
3775                 ASSERT(r2 != NULL);
3776                 xfs_bmbt_set_all(&tr2, r2);
3777         } else {
3778                 ASSERT(cnt == 1);
3779                 ASSERT(r2 == NULL);
3780         }
3781         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_INSERT, fname, desc, ip, idx,
3782                 cnt, &tr1, cnt == 2 ? &tr2 : NULL, whichfork);
3783 }
3784
3785 /*
3786  * Add bmap trace entry after updating an extent list entry in place.
3787  */
3788 STATIC void
3789 xfs_bmap_trace_post_update(
3790         char            *fname,         /* function name */
3791         char            *desc,          /* operation description */
3792         xfs_inode_t     *ip,            /* incore inode pointer */
3793         xfs_extnum_t    idx,            /* index of entry updated */
3794         int             whichfork)      /* data or attr fork */
3795 {
3796         xfs_ifork_t     *ifp;           /* inode fork pointer */
3797
3798         ifp = XFS_IFORK_PTR(ip, whichfork);
3799         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_POST_UP, fname, desc, ip, idx,
3800                 1, &ifp->if_u1.if_extents[idx], NULL, whichfork);
3801 }
3802
3803 /*
3804  * Add bmap trace entry prior to updating an extent list entry in place.
3805  */
3806 STATIC void
3807 xfs_bmap_trace_pre_update(
3808         char            *fname,         /* function name */
3809         char            *desc,          /* operation description */
3810         xfs_inode_t     *ip,            /* incore inode pointer */
3811         xfs_extnum_t    idx,            /* index of entry to be updated */
3812         int             whichfork)      /* data or attr fork */
3813 {
3814         xfs_ifork_t     *ifp;           /* inode fork pointer */
3815
3816         ifp = XFS_IFORK_PTR(ip, whichfork);
3817         xfs_bmap_trace_addentry(XFS_BMAP_KTRACE_PRE_UP, fname, desc, ip, idx, 1,
3818                 &ifp->if_u1.if_extents[idx], NULL, whichfork);
3819 }
3820 #endif  /* XFS_BMAP_TRACE */
3821
3822 /*
3823  * Compute the worst-case number of indirect blocks that will be used
3824  * for ip's delayed extent of length "len".
3825  */
3826 STATIC xfs_filblks_t
3827 xfs_bmap_worst_indlen(
3828         xfs_inode_t     *ip,            /* incore inode pointer */
3829         xfs_filblks_t   len)            /* delayed extent length */
3830 {
3831         int             level;          /* btree level number */
3832         int             maxrecs;        /* maximum record count at this level */
3833         xfs_mount_t     *mp;            /* mount structure */
3834         xfs_filblks_t   rval;           /* return value */
3835
3836         mp = ip->i_mount;
3837         maxrecs = mp->m_bmap_dmxr[0];
3838         for (level = 0, rval = 0;
3839              level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
3840              level++) {
3841                 len += maxrecs - 1;
3842                 do_div(len, maxrecs);
3843                 rval += len;
3844                 if (len == 1)
3845                         return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
3846                                 level - 1;
3847                 if (level == 0)
3848                         maxrecs = mp->m_bmap_dmxr[1];
3849         }
3850         return rval;
3851 }
3852
3853 #if defined(XFS_RW_TRACE)
3854 STATIC void
3855 xfs_bunmap_trace(
3856         xfs_inode_t             *ip,
3857         xfs_fileoff_t           bno,
3858         xfs_filblks_t           len,
3859         int                     flags,
3860         inst_t                  *ra)
3861 {
3862         if (ip->i_rwtrace == NULL)
3863                 return;
3864         ktrace_enter(ip->i_rwtrace,
3865                 (void *)(__psint_t)XFS_BUNMAPI,
3866                 (void *)ip,
3867                 (void *)(__psint_t)((ip->i_d.di_size >> 32) & 0xffffffff),
3868                 (void *)(__psint_t)(ip->i_d.di_size & 0xffffffff),
3869                 (void *)(__psint_t)(((xfs_dfiloff_t)bno >> 32) & 0xffffffff),
3870                 (void *)(__psint_t)((xfs_dfiloff_t)bno & 0xffffffff),
3871                 (void *)(__psint_t)len,
3872                 (void *)(__psint_t)flags,
3873                 (void *)(unsigned long)current_cpu(),
3874                 (void *)ra,
3875                 (void *)0,
3876                 (void *)0,
3877                 (void *)0,
3878                 (void *)0,
3879                 (void *)0,
3880                 (void *)0);
3881 }
3882 #endif
3883
3884 /*
3885  * Convert inode from non-attributed to attributed.
3886  * Must not be in a transaction, ip must not be locked.
3887  */
3888 int                                             /* error code */
3889 xfs_bmap_add_attrfork(
3890         xfs_inode_t             *ip,            /* incore inode pointer */
3891         int                     size,           /* space new attribute needs */
3892         int                     rsvd)           /* xact may use reserved blks */
3893 {
3894         xfs_fsblock_t           firstblock;     /* 1st block/ag allocated */
3895         xfs_bmap_free_t         flist;          /* freed extent list */
3896         xfs_mount_t             *mp;            /* mount structure */
3897         xfs_trans_t             *tp;            /* transaction pointer */
3898         unsigned long           s;              /* spinlock spl value */
3899         int                     blks;           /* space reservation */
3900         int                     version = 1;    /* superblock attr version */
3901         int                     committed;      /* xaction was committed */
3902         int                     logflags;       /* logging flags */
3903         int                     error;          /* error return value */
3904
3905         ASSERT(XFS_IFORK_Q(ip) == 0);
3906         ASSERT(ip->i_df.if_ext_max ==
3907                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
3908
3909         mp = ip->i_mount;
3910         ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
3911         tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
3912         blks = XFS_ADDAFORK_SPACE_RES(mp);
3913         if (rsvd)
3914                 tp->t_flags |= XFS_TRANS_RESERVE;
3915         if ((error = xfs_trans_reserve(tp, blks, XFS_ADDAFORK_LOG_RES(mp), 0,
3916                         XFS_TRANS_PERM_LOG_RES, XFS_ADDAFORK_LOG_COUNT)))
3917                 goto error0;
3918         xfs_ilock(ip, XFS_ILOCK_EXCL);
3919         error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, blks, 0, rsvd ?
3920                         XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
3921                         XFS_QMOPT_RES_REGBLKS);
3922         if (error) {
3923                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
3924                 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES);
3925                 return error;
3926         }
3927         if (XFS_IFORK_Q(ip))
3928                 goto error1;
3929         if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
3930                 /*
3931                  * For inodes coming from pre-6.2 filesystems.
3932                  */
3933                 ASSERT(ip->i_d.di_aformat == 0);
3934                 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
3935         }
3936         ASSERT(ip->i_d.di_anextents == 0);
3937         VN_HOLD(XFS_ITOV(ip));
3938         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
3939         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
3940         switch (ip->i_d.di_format) {
3941         case XFS_DINODE_FMT_DEV:
3942                 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
3943                 break;
3944         case XFS_DINODE_FMT_UUID:
3945                 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
3946                 break;
3947         case XFS_DINODE_FMT_LOCAL:
3948         case XFS_DINODE_FMT_EXTENTS:
3949         case XFS_DINODE_FMT_BTREE:
3950                 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
3951                 if (!ip->i_d.di_forkoff)
3952                         ip->i_d.di_forkoff = mp->m_attroffset >> 3;
3953                 else if (mp->m_flags & XFS_MOUNT_ATTR2)
3954                         version = 2;
3955                 break;
3956         default:
3957                 ASSERT(0);
3958                 error = XFS_ERROR(EINVAL);
3959                 goto error1;
3960         }
3961         ip->i_df.if_ext_max =
3962                 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
3963         ASSERT(ip->i_afp == NULL);
3964         ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
3965         ip->i_afp->if_ext_max =
3966                 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
3967         ip->i_afp->if_flags = XFS_IFEXTENTS;
3968         logflags = 0;
3969         XFS_BMAP_INIT(&flist, &firstblock);
3970         switch (ip->i_d.di_format) {
3971         case XFS_DINODE_FMT_LOCAL:
3972                 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
3973                         &logflags);
3974                 break;
3975         case XFS_DINODE_FMT_EXTENTS:
3976                 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
3977                         &flist, &logflags);
3978                 break;
3979         case XFS_DINODE_FMT_BTREE:
3980                 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
3981                         &logflags);
3982                 break;
3983         default:
3984                 error = 0;
3985                 break;
3986         }
3987         if (logflags)
3988                 xfs_trans_log_inode(tp, ip, logflags);
3989         if (error)
3990                 goto error2;
3991         if (!XFS_SB_VERSION_HASATTR(&mp->m_sb) ||
3992            (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2)) {
3993                 __int64_t sbfields = 0;
3994
3995                 s = XFS_SB_LOCK(mp);
3996                 if (!XFS_SB_VERSION_HASATTR(&mp->m_sb)) {
3997                         XFS_SB_VERSION_ADDATTR(&mp->m_sb);
3998                         sbfields |= XFS_SB_VERSIONNUM;
3999                 }
4000                 if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb) && version == 2) {
4001                         XFS_SB_VERSION_ADDATTR2(&mp->m_sb);
4002                         sbfields |= (XFS_SB_VERSIONNUM | XFS_SB_FEATURES2);
4003                 }
4004                 if (sbfields) {
4005                         XFS_SB_UNLOCK(mp, s);
4006                         xfs_mod_sb(tp, sbfields);
4007                 } else
4008                         XFS_SB_UNLOCK(mp, s);
4009         }
4010         if ((error = xfs_bmap_finish(&tp, &flist, firstblock, &committed)))
4011                 goto error2;
4012         error = xfs_trans_commit(tp, XFS_TRANS_PERM_LOG_RES, NULL);
4013         ASSERT(ip->i_df.if_ext_max ==
4014                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
4015         return error;
4016 error2:
4017         xfs_bmap_cancel(&flist);
4018 error1:
4019         ASSERT(ismrlocked(&ip->i_lock,MR_UPDATE));
4020         xfs_iunlock(ip, XFS_ILOCK_EXCL);
4021 error0:
4022         xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
4023         ASSERT(ip->i_df.if_ext_max ==
4024                XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
4025         return error;
4026 }
4027
4028 /*
4029  * Add the extent to the list of extents to be free at transaction end.
4030  * The list is maintained sorted (by block number).
4031  */
4032 /* ARGSUSED */
4033 void
4034 xfs_bmap_add_free(
4035         xfs_fsblock_t           bno,            /* fs block number of extent */
4036         xfs_filblks_t           len,            /* length of extent */
4037         xfs_bmap_free_t         *flist,         /* list of extents */
4038         xfs_mount_t             *mp)            /* mount point structure */
4039 {
4040         xfs_bmap_free_item_t    *cur;           /* current (next) element */
4041         xfs_bmap_free_item_t    *new;           /* new element */
4042         xfs_bmap_free_item_t    *prev;          /* previous element */
4043 #ifdef DEBUG
4044         xfs_agnumber_t          agno;
4045         xfs_agblock_t           agbno;
4046
4047         ASSERT(bno != NULLFSBLOCK);
4048         ASSERT(len > 0);
4049         ASSERT(len <= MAXEXTLEN);
4050         ASSERT(!ISNULLSTARTBLOCK(bno));
4051         agno = XFS_FSB_TO_AGNO(mp, bno);
4052         agbno = XFS_FSB_TO_AGBNO(mp, bno);
4053         ASSERT(agno < mp->m_sb.sb_agcount);
4054         ASSERT(agbno < mp->m_sb.sb_agblocks);
4055         ASSERT(len < mp->m_sb.sb_agblocks);
4056         ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
4057 #endif
4058         ASSERT(xfs_bmap_free_item_zone != NULL);
4059         new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
4060         new->xbfi_startblock = bno;
4061         new->xbfi_blockcount = (xfs_extlen_t)len;
4062         for (prev = NULL, cur = flist->xbf_first;
4063              cur != NULL;
4064              prev = cur, cur = cur->xbfi_next) {
4065                 if (cur->xbfi_startblock >= bno)
4066                         break;
4067         }
4068         if (prev)
4069                 prev->xbfi_next = new;
4070         else
4071                 flist->xbf_first = new;
4072         new->xbfi_next = cur;
4073         flist->xbf_count++;
4074 }
4075
4076 /*
4077  * Compute and fill in the value of the maximum depth of a bmap btree
4078  * in this filesystem.  Done once, during mount.
4079  */
4080 void
4081 xfs_bmap_compute_maxlevels(
4082         xfs_mount_t     *mp,            /* file system mount structure */
4083         int             whichfork)      /* data or attr fork */
4084 {
4085         int             level;          /* btree level */
4086         uint            maxblocks;      /* max blocks at this level */
4087         uint            maxleafents;    /* max leaf entries possible */
4088         int             maxrootrecs;    /* max records in root block */
4089         int             minleafrecs;    /* min records in leaf block */
4090         int             minnoderecs;    /* min records in node block */
4091         int             sz;             /* root block size */
4092
4093         /*
4094          * The maximum number of extents in a file, hence the maximum
4095          * number of leaf entries, is controlled by the type of di_nextents
4096          * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
4097          * (a signed 16-bit number, xfs_aextnum_t).
4098          */
4099         if (whichfork == XFS_DATA_FORK) {
4100                 maxleafents = MAXEXTNUM;
4101                 sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
4102                         XFS_BMDR_SPACE_CALC(MINDBTPTRS) : mp->m_attroffset;
4103         } else {
4104                 maxleafents = MAXAEXTNUM;
4105                 sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
4106                         XFS_BMDR_SPACE_CALC(MINABTPTRS) :
4107                         mp->m_sb.sb_inodesize - mp->m_attroffset;
4108         }
4109         maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0);
4110         minleafrecs = mp->m_bmap_dmnr[0];
4111         minnoderecs = mp->m_bmap_dmnr[1];
4112         maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
4113         for (level = 1; maxblocks > 1; level++) {
4114                 if (maxblocks <= maxrootrecs)
4115                         maxblocks = 1;
4116                 else
4117                         maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
4118         }
4119         mp->m_bm_maxlevels[whichfork] = level;
4120 }
4121
4122 /*
4123  * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
4124  * caller.  Frees all the extents that need freeing, which must be done
4125  * last due to locking considerations.  We never free any extents in
4126  * the first transaction.  This is to allow the caller to make the first
4127  * transaction a synchronous one so that the pointers to the data being
4128  * broken in this transaction will be permanent before the data is actually
4129  * freed.  This is necessary to prevent blocks from being reallocated
4130  * and written to before the free and reallocation are actually permanent.
4131  * We do not just make the first transaction synchronous here, because
4132  * there are more efficient ways to gain the same protection in some cases
4133  * (see the file truncation code).
4134  *
4135  * Return 1 if the given transaction was committed and a new one
4136  * started, and 0 otherwise in the committed parameter.
4137  */
4138 /*ARGSUSED*/
4139 int                                             /* error */
4140 xfs_bmap_finish(
4141         xfs_trans_t             **tp,           /* transaction pointer addr */
4142         xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
4143         xfs_fsblock_t           firstblock,     /* controlled ag for allocs */
4144         int                     *committed)     /* xact committed or not */
4145 {
4146         xfs_efd_log_item_t      *efd;           /* extent free data */
4147         xfs_efi_log_item_t      *efi;           /* extent free intention */
4148         int                     error;          /* error return value */
4149         xfs_bmap_free_item_t    *free;          /* free extent list item */
4150         unsigned int            logres;         /* new log reservation */
4151         unsigned int            logcount;       /* new log count */
4152         xfs_mount_t             *mp;            /* filesystem mount structure */
4153         xfs_bmap_free_item_t    *next;          /* next item on free list */
4154         xfs_trans_t             *ntp;           /* new transaction pointer */
4155
4156         ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
4157         if (flist->xbf_count == 0) {
4158                 *committed = 0;
4159                 return 0;
4160         }
4161         ntp = *tp;
4162         efi = xfs_trans_get_efi(ntp, flist->xbf_count);
4163         for (free = flist->xbf_first; free; free = free->xbfi_next)
4164                 xfs_trans_log_efi_extent(ntp, efi, free->xbfi_startblock,
4165                         free->xbfi_blockcount);
4166         logres = ntp->t_log_res;
4167         logcount = ntp->t_log_count;
4168         ntp = xfs_trans_dup(*tp);
4169         error = xfs_trans_commit(*tp, 0, NULL);
4170         *tp = ntp;
4171         *committed = 1;
4172         /*
4173          * We have a new transaction, so we should return committed=1,
4174          * even though we're returning an error.
4175          */
4176         if (error) {
4177                 return error;
4178         }
4179         if ((error = xfs_trans_reserve(ntp, 0, logres, 0, XFS_TRANS_PERM_LOG_RES,
4180                         logcount)))
4181                 return error;
4182         efd = xfs_trans_get_efd(ntp, efi, flist->xbf_count);
4183         for (free = flist->xbf_first; free != NULL; free = next) {
4184                 next = free->xbfi_next;
4185                 if ((error = xfs_free_extent(ntp, free->xbfi_startblock,
4186                                 free->xbfi_blockcount))) {
4187                         /*
4188                          * The bmap free list will be cleaned up at a
4189                          * higher level.  The EFI will be canceled when
4190                          * this transaction is aborted.
4191                          * Need to force shutdown here to make sure it
4192                          * happens, since this transaction may not be
4193                          * dirty yet.
4194                          */
4195                         mp = ntp->t_mountp;
4196                         if (!XFS_FORCED_SHUTDOWN(mp))
4197                                 xfs_force_shutdown(mp,
4198                                                    (error == EFSCORRUPTED) ?
4199                                                    XFS_CORRUPT_INCORE :
4200                                                    XFS_METADATA_IO_ERROR);
4201                         return error;
4202                 }
4203                 xfs_trans_log_efd_extent(ntp, efd, free->xbfi_startblock,
4204                         free->xbfi_blockcount);
4205                 xfs_bmap_del_free(flist, NULL, free);
4206         }
4207         return 0;
4208 }
4209
4210 /*
4211  * Free up any items left in the list.
4212  */
4213 void
4214 xfs_bmap_cancel(
4215         xfs_bmap_free_t         *flist) /* list of bmap_free_items */
4216 {
4217         xfs_bmap_free_item_t    *free;  /* free list item */
4218         xfs_bmap_free_item_t    *next;
4219
4220         if (flist->xbf_count == 0)
4221                 return;
4222         ASSERT(flist->xbf_first != NULL);
4223         for (free = flist->xbf_first; free; free = next) {
4224                 next = free->xbfi_next;
4225                 xfs_bmap_del_free(flist, NULL, free);
4226         }
4227         ASSERT(flist->xbf_count == 0);
4228 }
4229
4230 /*
4231  * Returns the file-relative block number of the first unused block(s)
4232  * in the file with at least "len" logically contiguous blocks free.
4233  * This is the lowest-address hole if the file has holes, else the first block
4234  * past the end of file.
4235  * Return 0 if the file is currently local (in-inode).
4236  */
4237 int                                             /* error */
4238 xfs_bmap_first_unused(
4239         xfs_trans_t     *tp,                    /* transaction pointer */
4240         xfs_inode_t     *ip,                    /* incore inode */
4241         xfs_extlen_t    len,                    /* size of hole to find */
4242         xfs_fileoff_t   *first_unused,          /* unused block */
4243         int             whichfork)              /* data or attr fork */
4244 {
4245         xfs_bmbt_rec_t  *base;                  /* base of extent array */
4246         xfs_bmbt_rec_t  *ep;                    /* pointer to an extent entry */
4247         int             error;                  /* error return value */
4248         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4249         xfs_fileoff_t   lastaddr;               /* last block number seen */
4250         xfs_fileoff_t   lowest;                 /* lowest useful block */
4251         xfs_fileoff_t   max;                    /* starting useful block */
4252         xfs_fileoff_t   off;                    /* offset for this block */
4253         xfs_extnum_t    nextents;               /* number of extent entries */
4254
4255         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
4256                XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
4257                XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
4258         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4259                 *first_unused = 0;
4260                 return 0;
4261         }
4262         ifp = XFS_IFORK_PTR(ip, whichfork);
4263         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4264             (error = xfs_iread_extents(tp, ip, whichfork)))
4265                 return error;
4266         lowest = *first_unused;
4267         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4268         base = &ifp->if_u1.if_extents[0];
4269         for (lastaddr = 0, max = lowest, ep = base;
4270              ep < &base[nextents];
4271              ep++) {
4272                 off = xfs_bmbt_get_startoff(ep);
4273                 /*
4274                  * See if the hole before this extent will work.
4275                  */
4276                 if (off >= lowest + len && off - max >= len) {
4277                         *first_unused = max;
4278                         return 0;
4279                 }
4280                 lastaddr = off + xfs_bmbt_get_blockcount(ep);
4281                 max = XFS_FILEOFF_MAX(lastaddr, lowest);
4282         }
4283         *first_unused = max;
4284         return 0;
4285 }
4286
4287 /*
4288  * Returns the file-relative block number of the last block + 1 before
4289  * last_block (input value) in the file.
4290  * This is not based on i_size, it is based on the extent list.
4291  * Returns 0 for local files, as they do not have an extent list.
4292  */
4293 int                                             /* error */
4294 xfs_bmap_last_before(
4295         xfs_trans_t     *tp,                    /* transaction pointer */
4296         xfs_inode_t     *ip,                    /* incore inode */
4297         xfs_fileoff_t   *last_block,            /* last block */
4298         int             whichfork)              /* data or attr fork */
4299 {
4300         xfs_fileoff_t   bno;                    /* input file offset */
4301         int             eof;                    /* hit end of file */
4302         xfs_bmbt_rec_t  *ep;                    /* pointer to last extent */
4303         int             error;                  /* error return value */
4304         xfs_bmbt_irec_t got;                    /* current extent value */
4305         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4306         xfs_extnum_t    lastx;                  /* last extent used */
4307         xfs_bmbt_irec_t prev;                   /* previous extent value */
4308
4309         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4310             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4311             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4312                return XFS_ERROR(EIO);
4313         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4314                 *last_block = 0;
4315                 return 0;
4316         }
4317         ifp = XFS_IFORK_PTR(ip, whichfork);
4318         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4319             (error = xfs_iread_extents(tp, ip, whichfork)))
4320                 return error;
4321         bno = *last_block - 1;
4322         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4323                 &prev);
4324         if (eof || xfs_bmbt_get_startoff(ep) > bno) {
4325                 if (prev.br_startoff == NULLFILEOFF)
4326                         *last_block = 0;
4327                 else
4328                         *last_block = prev.br_startoff + prev.br_blockcount;
4329         }
4330         /*
4331          * Otherwise *last_block is already the right answer.
4332          */
4333         return 0;
4334 }
4335
4336 /*
4337  * Returns the file-relative block number of the first block past eof in
4338  * the file.  This is not based on i_size, it is based on the extent list.
4339  * Returns 0 for local files, as they do not have an extent list.
4340  */
4341 int                                             /* error */
4342 xfs_bmap_last_offset(
4343         xfs_trans_t     *tp,                    /* transaction pointer */
4344         xfs_inode_t     *ip,                    /* incore inode */
4345         xfs_fileoff_t   *last_block,            /* last block */
4346         int             whichfork)              /* data or attr fork */
4347 {
4348         xfs_bmbt_rec_t  *base;                  /* base of extent array */
4349         xfs_bmbt_rec_t  *ep;                    /* pointer to last extent */
4350         int             error;                  /* error return value */
4351         xfs_ifork_t     *ifp;                   /* inode fork pointer */
4352         xfs_extnum_t    nextents;               /* number of extent entries */
4353
4354         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4355             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4356             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4357                return XFS_ERROR(EIO);
4358         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4359                 *last_block = 0;
4360                 return 0;
4361         }
4362         ifp = XFS_IFORK_PTR(ip, whichfork);
4363         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4364             (error = xfs_iread_extents(tp, ip, whichfork)))
4365                 return error;
4366         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4367         if (!nextents) {
4368                 *last_block = 0;
4369                 return 0;
4370         }
4371         base = &ifp->if_u1.if_extents[0];
4372         ASSERT(base != NULL);
4373         ep = &base[nextents - 1];
4374         *last_block = xfs_bmbt_get_startoff(ep) + xfs_bmbt_get_blockcount(ep);
4375         return 0;
4376 }
4377
4378 /*
4379  * Returns whether the selected fork of the inode has exactly one
4380  * block or not.  For the data fork we check this matches di_size,
4381  * implying the file's range is 0..bsize-1.
4382  */
4383 int                                     /* 1=>1 block, 0=>otherwise */
4384 xfs_bmap_one_block(
4385         xfs_inode_t     *ip,            /* incore inode */
4386         int             whichfork)      /* data or attr fork */
4387 {
4388         xfs_bmbt_rec_t  *ep;            /* ptr to fork's extent */
4389         xfs_ifork_t     *ifp;           /* inode fork pointer */
4390         int             rval;           /* return value */
4391         xfs_bmbt_irec_t s;              /* internal version of extent */
4392
4393 #ifndef DEBUG
4394         if (whichfork == XFS_DATA_FORK)
4395                 return ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize;
4396 #endif  /* !DEBUG */
4397         if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
4398                 return 0;
4399         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4400                 return 0;
4401         ifp = XFS_IFORK_PTR(ip, whichfork);
4402         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4403         ep = ifp->if_u1.if_extents;
4404         xfs_bmbt_get_all(ep, &s);
4405         rval = s.br_startoff == 0 && s.br_blockcount == 1;
4406         if (rval && whichfork == XFS_DATA_FORK)
4407                 ASSERT(ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
4408         return rval;
4409 }
4410
4411 /*
4412  * Read in the extents to if_extents.
4413  * All inode fields are set up by caller, we just traverse the btree
4414  * and copy the records in. If the file system cannot contain unwritten
4415  * extents, the records are checked for no "state" flags.
4416  */
4417 int                                     /* error */
4418 xfs_bmap_read_extents(
4419         xfs_trans_t             *tp,    /* transaction pointer */
4420         xfs_inode_t             *ip,    /* incore inode */
4421         int                     whichfork) /* data or attr fork */
4422 {
4423         xfs_bmbt_block_t        *block; /* current btree block */
4424         xfs_fsblock_t           bno;    /* block # of "block" */
4425         xfs_buf_t               *bp;    /* buffer for "block" */
4426         int                     error;  /* error return value */
4427         xfs_exntfmt_t           exntf;  /* XFS_EXTFMT_NOSTATE, if checking */
4428 #ifdef XFS_BMAP_TRACE
4429         static char             fname[] = "xfs_bmap_read_extents";
4430 #endif
4431         xfs_extnum_t            i, j;   /* index into the extents list */
4432         xfs_ifork_t             *ifp;   /* fork structure */
4433         int                     level;  /* btree level, for checking */
4434         xfs_mount_t             *mp;    /* file system mount structure */
4435         xfs_bmbt_ptr_t          *pp;    /* pointer to block address */
4436         /* REFERENCED */
4437         xfs_extnum_t            room;   /* number of entries there's room for */
4438         xfs_bmbt_rec_t          *trp;   /* target record pointer */
4439
4440         bno = NULLFSBLOCK;
4441         mp = ip->i_mount;
4442         ifp = XFS_IFORK_PTR(ip, whichfork);
4443         exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
4444                                         XFS_EXTFMT_INODE(ip);
4445         block = ifp->if_broot;
4446         /*
4447          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
4448          */
4449         level = be16_to_cpu(block->bb_level);
4450         ASSERT(level > 0);
4451         pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
4452         ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
4453         ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
4454         ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks);
4455         bno = INT_GET(*pp, ARCH_CONVERT);
4456         /*
4457          * Go down the tree until leaf level is reached, following the first
4458          * pointer (leftmost) at each level.
4459          */
4460         while (level-- > 0) {
4461                 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4462                                 XFS_BMAP_BTREE_REF)))
4463                         return error;
4464                 block = XFS_BUF_TO_BMBT_BLOCK(bp);
4465                 XFS_WANT_CORRUPTED_GOTO(
4466                         XFS_BMAP_SANITY_CHECK(mp, block, level),
4467                         error0);
4468                 if (level == 0)
4469                         break;
4470                 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
4471                         1, mp->m_bmap_dmxr[1]);
4472                 XFS_WANT_CORRUPTED_GOTO(
4473                         XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)),
4474                         error0);
4475                 bno = INT_GET(*pp, ARCH_CONVERT);
4476                 xfs_trans_brelse(tp, bp);
4477         }
4478         /*
4479          * Here with bp and block set to the leftmost leaf node in the tree.
4480          */
4481         room = ifp->if_bytes / (uint)sizeof(*trp);
4482         trp = ifp->if_u1.if_extents;
4483         i = 0;
4484         /*
4485          * Loop over all leaf nodes.  Copy information to the extent list.
4486          */
4487         for (;;) {
4488                 xfs_bmbt_rec_t  *frp, *temp;
4489                 xfs_fsblock_t   nextbno;
4490                 xfs_extnum_t    num_recs;
4491
4492
4493                 num_recs = be16_to_cpu(block->bb_numrecs);
4494                 if (unlikely(i + num_recs > room)) {
4495                         ASSERT(i + num_recs <= room);
4496                         xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
4497                                 "corrupt dinode %Lu, (btree extents).",
4498                                 (unsigned long long) ip->i_ino);
4499                         XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
4500                                          XFS_ERRLEVEL_LOW,
4501                                         ip->i_mount);
4502                         goto error0;
4503                 }
4504                 XFS_WANT_CORRUPTED_GOTO(
4505                         XFS_BMAP_SANITY_CHECK(mp, block, 0),
4506                         error0);
4507                 /*
4508                  * Read-ahead the next leaf block, if any.
4509                  */
4510                 nextbno = be64_to_cpu(block->bb_rightsib);
4511                 if (nextbno != NULLFSBLOCK)
4512                         xfs_btree_reada_bufl(mp, nextbno, 1);
4513                 /*
4514                  * Copy records into the extent list.
4515                  */
4516                 frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
4517                         block, 1, mp->m_bmap_dmxr[0]);
4518                 temp = trp;
4519                 for (j = 0; j < num_recs; j++, frp++, trp++) {
4520                         trp->l0 = INT_GET(frp->l0, ARCH_CONVERT);
4521                         trp->l1 = INT_GET(frp->l1, ARCH_CONVERT);
4522                 }
4523                 if (exntf == XFS_EXTFMT_NOSTATE) {
4524                         /*
4525                          * Check all attribute bmap btree records and
4526                          * any "older" data bmap btree records for a
4527                          * set bit in the "extent flag" position.
4528                          */
4529                         if (unlikely(xfs_check_nostate_extents(temp, num_recs))) {
4530                                 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
4531                                                  XFS_ERRLEVEL_LOW,
4532                                                  ip->i_mount);
4533                                 goto error0;
4534                         }
4535                 }
4536                 i += num_recs;
4537                 xfs_trans_brelse(tp, bp);
4538                 bno = nextbno;
4539                 /*
4540                  * If we've reached the end, stop.
4541                  */
4542                 if (bno == NULLFSBLOCK)
4543                         break;
4544                 if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
4545                                 XFS_BMAP_BTREE_REF)))
4546                         return error;
4547                 block = XFS_BUF_TO_BMBT_BLOCK(bp);
4548         }
4549         ASSERT(i == ifp->if_bytes / (uint)sizeof(*trp));
4550         ASSERT(i == XFS_IFORK_NEXTENTS(ip, whichfork));
4551         xfs_bmap_trace_exlist(fname, ip, i, whichfork);
4552         return 0;
4553 error0:
4554         xfs_trans_brelse(tp, bp);
4555         return XFS_ERROR(EFSCORRUPTED);
4556 }
4557
4558 #ifdef XFS_BMAP_TRACE
4559 /*
4560  * Add bmap trace insert entries for all the contents of the extent list.
4561  */
4562 void
4563 xfs_bmap_trace_exlist(
4564         char            *fname,         /* function name */
4565         xfs_inode_t     *ip,            /* incore inode pointer */
4566         xfs_extnum_t    cnt,            /* count of entries in the list */
4567         int             whichfork)      /* data or attr fork */
4568 {
4569         xfs_bmbt_rec_t  *base;          /* base of extent list */
4570         xfs_bmbt_rec_t  *ep;            /* current entry in extent list */
4571         xfs_extnum_t    idx;            /* extent list entry number */
4572         xfs_ifork_t     *ifp;           /* inode fork pointer */
4573         xfs_bmbt_irec_t s;              /* extent list record */
4574
4575         ifp = XFS_IFORK_PTR(ip, whichfork);
4576         ASSERT(cnt == ifp->if_bytes / (uint)sizeof(*base));
4577         base = ifp->if_u1.if_extents;
4578         for (idx = 0, ep = base; idx < cnt; idx++, ep++) {
4579                 xfs_bmbt_get_all(ep, &s);
4580                 xfs_bmap_trace_insert(fname, "exlist", ip, idx, 1, &s, NULL,
4581                         whichfork);
4582         }
4583 }
4584 #endif
4585
4586 #ifdef DEBUG
4587 /*
4588  * Validate that the bmbt_irecs being returned from bmapi are valid
4589  * given the callers original parameters.  Specifically check the
4590  * ranges of the returned irecs to ensure that they only extent beyond
4591  * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
4592  */
4593 STATIC void
4594 xfs_bmap_validate_ret(
4595         xfs_fileoff_t           bno,
4596         xfs_filblks_t           len,
4597         int                     flags,
4598         xfs_bmbt_irec_t         *mval,
4599         int                     nmap,
4600         int                     ret_nmap)
4601 {
4602         int                     i;              /* index to map values */
4603
4604         ASSERT(ret_nmap <= nmap);
4605
4606         for (i = 0; i < ret_nmap; i++) {
4607                 ASSERT(mval[i].br_blockcount > 0);
4608                 if (!(flags & XFS_BMAPI_ENTIRE)) {
4609                         ASSERT(mval[i].br_startoff >= bno);
4610                         ASSERT(mval[i].br_blockcount <= len);
4611                         ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
4612                                bno + len);
4613                 } else {
4614                         ASSERT(mval[i].br_startoff < bno + len);
4615                         ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
4616                                bno);
4617                 }
4618                 ASSERT(i == 0 ||
4619                        mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
4620                        mval[i].br_startoff);
4621                 if ((flags & XFS_BMAPI_WRITE) && !(flags & XFS_BMAPI_DELAY))
4622                         ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
4623                                mval[i].br_startblock != HOLESTARTBLOCK);
4624                 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
4625                        mval[i].br_state == XFS_EXT_UNWRITTEN);
4626         }
4627 }
4628 #endif /* DEBUG */
4629
4630
4631 /*
4632  * Map file blocks to filesystem blocks.
4633  * File range is given by the bno/len pair.
4634  * Adds blocks to file if a write ("flags & XFS_BMAPI_WRITE" set)
4635  * into a hole or past eof.
4636  * Only allocates blocks from a single allocation group,
4637  * to avoid locking problems.
4638  * The returned value in "firstblock" from the first call in a transaction
4639  * must be remembered and presented to subsequent calls in "firstblock".
4640  * An upper bound for the number of blocks to be allocated is supplied to
4641  * the first call in "total"; if no allocation group has that many free
4642  * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4643  */
4644 int                                     /* error */
4645 xfs_bmapi(
4646         xfs_trans_t     *tp,            /* transaction pointer */
4647         xfs_inode_t     *ip,            /* incore inode */
4648         xfs_fileoff_t   bno,            /* starting file offs. mapped */
4649         xfs_filblks_t   len,            /* length to map in file */
4650         int             flags,          /* XFS_BMAPI_... */
4651         xfs_fsblock_t   *firstblock,    /* first allocated block
4652                                            controls a.g. for allocs */
4653         xfs_extlen_t    total,          /* total blocks needed */
4654         xfs_bmbt_irec_t *mval,          /* output: map values */
4655         int             *nmap,          /* i/o: mval size/count */
4656         xfs_bmap_free_t *flist)         /* i/o: list extents to free */
4657 {
4658         xfs_fsblock_t   abno;           /* allocated block number */
4659         xfs_extlen_t    alen;           /* allocated extent length */
4660         xfs_fileoff_t   aoff;           /* allocated file offset */
4661         xfs_bmalloca_t  bma;            /* args for xfs_bmap_alloc */
4662         xfs_btree_cur_t *cur;           /* bmap btree cursor */
4663         xfs_fileoff_t   end;            /* end of mapped file region */
4664         int             eof;            /* we've hit the end of extent list */
4665         char            contig;         /* allocation must be one extent */
4666         char            delay;          /* this request is for delayed alloc */
4667         char            exact;          /* don't do all of wasdelayed extent */
4668         char            convert;        /* unwritten extent I/O completion */
4669         xfs_bmbt_rec_t  *ep;            /* extent list entry pointer */
4670         int             error;          /* error return */
4671         xfs_bmbt_irec_t got;            /* current extent list record */
4672         xfs_ifork_t     *ifp;           /* inode fork pointer */
4673         xfs_extlen_t    indlen;         /* indirect blocks length */
4674         xfs_extnum_t    lastx;          /* last useful extent number */
4675         int             logflags;       /* flags for transaction logging */
4676         xfs_extlen_t    minleft;        /* min blocks left after allocation */
4677         xfs_extlen_t    minlen;         /* min allocation size */
4678         xfs_mount_t     *mp;            /* xfs mount structure */
4679         int             n;              /* current extent index */
4680         int             nallocs;        /* number of extents alloc\'d */
4681         xfs_extnum_t    nextents;       /* number of extents in file */
4682         xfs_fileoff_t   obno;           /* old block number (offset) */
4683         xfs_bmbt_irec_t prev;           /* previous extent list record */
4684         int             tmp_logflags;   /* temp flags holder */
4685         int             whichfork;      /* data or attr fork */
4686         char            inhole;         /* current location is hole in file */
4687         char            stateless;      /* ignore state flag set */
4688         char            trim;           /* output trimmed to match range */
4689         char            userdata;       /* allocating non-metadata */
4690         char            wasdelay;       /* old extent was delayed */
4691         char            wr;             /* this is a write request */
4692         char            rt;             /* this is a realtime file */
4693         char            rsvd;           /* OK to allocate reserved blocks */
4694 #ifdef DEBUG
4695         xfs_fileoff_t   orig_bno;       /* original block number value */
4696         int             orig_flags;     /* original flags arg value */
4697         xfs_filblks_t   orig_len;       /* original value of len arg */
4698         xfs_bmbt_irec_t *orig_mval;     /* original value of mval */
4699         int             orig_nmap;      /* original value of *nmap */
4700
4701         orig_bno = bno;
4702         orig_len = len;
4703         orig_flags = flags;
4704         orig_mval = mval;
4705         orig_nmap = *nmap;
4706 #endif
4707         ASSERT(*nmap >= 1);
4708         ASSERT(*nmap <= XFS_BMAP_MAX_NMAP || !(flags & XFS_BMAPI_WRITE));
4709         whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
4710                 XFS_ATTR_FORK : XFS_DATA_FORK;
4711         mp = ip->i_mount;
4712         if (unlikely(XFS_TEST_ERROR(
4713             (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4714              XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
4715              XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL),
4716              mp, XFS_ERRTAG_BMAPIFORMAT, XFS_RANDOM_BMAPIFORMAT))) {
4717                 XFS_ERROR_REPORT("xfs_bmapi", XFS_ERRLEVEL_LOW, mp);
4718                 return XFS_ERROR(EFSCORRUPTED);
4719         }
4720         if (XFS_FORCED_SHUTDOWN(mp))
4721                 return XFS_ERROR(EIO);
4722         rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4723         ifp = XFS_IFORK_PTR(ip, whichfork);
4724         ASSERT(ifp->if_ext_max ==
4725                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
4726         if ((wr = (flags & XFS_BMAPI_WRITE)) != 0)
4727                 XFS_STATS_INC(xs_blk_mapw);
4728         else
4729                 XFS_STATS_INC(xs_blk_mapr);
4730         delay = (flags & XFS_BMAPI_DELAY) != 0;
4731         trim = (flags & XFS_BMAPI_ENTIRE) == 0;
4732         userdata = (flags & XFS_BMAPI_METADATA) == 0;
4733         convert = (flags & XFS_BMAPI_CONVERT) != 0;
4734         exact = (flags & XFS_BMAPI_EXACT) != 0;
4735         rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
4736         contig = (flags & XFS_BMAPI_CONTIG) != 0;
4737         /*
4738          * stateless is used to combine extents which
4739          * differ only due to the state of the extents.
4740          * This technique is used from xfs_getbmap()
4741          * when the caller does not wish to see the
4742          * separation (which is the default).
4743          *
4744          * This technique is also used when writing a
4745          * buffer which has been partially written,
4746          * (usually by being flushed during a chunkread),
4747          * to ensure one write takes place. This also
4748          * prevents a change in the xfs inode extents at
4749          * this time, intentionally. This change occurs
4750          * on completion of the write operation, in
4751          * xfs_strat_comp(), where the xfs_bmapi() call
4752          * is transactioned, and the extents combined.
4753          */
4754         stateless = (flags & XFS_BMAPI_IGSTATE) != 0;
4755         if (stateless && wr)    /* if writing unwritten space, no */
4756                 wr = 0;         /* allocations are allowed */
4757         ASSERT(wr || !delay);
4758         logflags = 0;
4759         nallocs = 0;
4760         cur = NULL;
4761         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
4762                 ASSERT(wr && tp);
4763                 if ((error = xfs_bmap_local_to_extents(tp, ip,
4764                                 firstblock, total, &logflags, whichfork)))
4765                         goto error0;
4766         }
4767         if (wr && *firstblock == NULLFSBLOCK) {
4768                 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4769                         minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4770                 else
4771                         minleft = 1;
4772         } else
4773                 minleft = 0;
4774         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
4775             (error = xfs_iread_extents(tp, ip, whichfork)))
4776                 goto error0;
4777         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
4778                 &prev);
4779         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4780         n = 0;
4781         end = bno + len;
4782         obno = bno;
4783         bma.ip = NULL;
4784         while (bno < end && n < *nmap) {
4785                 /*
4786                  * Reading past eof, act as though there's a hole
4787                  * up to end.
4788                  */
4789                 if (eof && !wr)
4790                         got.br_startoff = end;
4791                 inhole = eof || got.br_startoff > bno;
4792                 wasdelay = wr && !inhole && !delay &&
4793                         ISNULLSTARTBLOCK(got.br_startblock);
4794                 /*
4795                  * First, deal with the hole before the allocated space
4796                  * that we found, if any.
4797                  */
4798                 if (wr && (inhole || wasdelay)) {
4799                         /*
4800                          * For the wasdelay case, we could also just
4801                          * allocate the stuff asked for in this bmap call
4802                          * but that wouldn't be as good.
4803                          */
4804                         if (wasdelay && !exact) {
4805                                 alen = (xfs_extlen_t)got.br_blockcount;
4806                                 aoff = got.br_startoff;
4807                                 if (lastx != NULLEXTNUM && lastx) {
4808                                         ep = &ifp->if_u1.if_extents[lastx - 1];
4809                                         xfs_bmbt_get_all(ep, &prev);
4810                                 }
4811                         } else if (wasdelay) {
4812                                 alen = (xfs_extlen_t)
4813                                         XFS_FILBLKS_MIN(len,
4814                                                 (got.br_startoff +
4815                                                  got.br_blockcount) - bno);
4816                                 aoff = bno;
4817                         } else {
4818                                 alen = (xfs_extlen_t)
4819                                         XFS_FILBLKS_MIN(len, MAXEXTLEN);
4820                                 if (!eof)
4821                                         alen = (xfs_extlen_t)
4822                                                 XFS_FILBLKS_MIN(alen,
4823                                                         got.br_startoff - bno);
4824                                 aoff = bno;
4825                         }
4826                         minlen = contig ? alen : 1;
4827                         if (delay) {
4828                                 xfs_extlen_t    extsz;
4829
4830                                 /* Figure out the extent size, adjust alen */
4831                                 if (rt) {
4832                                         if (!(extsz = ip->i_d.di_extsize))
4833                                                 extsz = mp->m_sb.sb_rextsize;
4834                                 } else {
4835                                         extsz = ip->i_d.di_extsize;
4836                                 }
4837                                 if (extsz) {
4838                                         error = xfs_bmap_extsize_align(mp,
4839                                                         &got, &prev, extsz,
4840                                                         rt, eof, delay, convert,
4841                                                         &aoff, &alen);
4842                                         ASSERT(!error);
4843                                 }
4844
4845                                 if (rt)
4846                                         extsz = alen / mp->m_sb.sb_rextsize;
4847
4848                                 /*
4849                                  * Make a transaction-less quota reservation for
4850                                  * delayed allocation blocks. This number gets
4851                                  * adjusted later.
4852                                  * We return EDQUOT if we haven't allocated
4853                                  * blks already inside this loop;
4854                                  */
4855                                 if (XFS_TRANS_RESERVE_QUOTA_NBLKS(
4856                                                 mp, NULL, ip, (long)alen, 0,
4857                                                 rt ? XFS_QMOPT_RES_RTBLKS :
4858                                                      XFS_QMOPT_RES_REGBLKS)) {
4859                                         if (n == 0) {
4860                                                 *nmap = 0;
4861                                                 ASSERT(cur == NULL);
4862                                                 return XFS_ERROR(EDQUOT);
4863                                         }
4864                                         break;
4865                                 }
4866
4867                                 /*
4868                                  * Split changing sb for alen and indlen since
4869                                  * they could be coming from different places.
4870                                  */
4871                                 indlen = (xfs_extlen_t)
4872                                         xfs_bmap_worst_indlen(ip, alen);
4873                                 ASSERT(indlen > 0);
4874
4875                                 if (rt) {
4876                                         error = xfs_mod_incore_sb(mp,
4877                                                         XFS_SBS_FREXTENTS,
4878                                                         -(extsz), rsvd);
4879                                 } else {
4880                                         error = xfs_mod_incore_sb(mp,
4881                                                         XFS_SBS_FDBLOCKS,
4882                                                         -(alen), rsvd);
4883                                 }
4884                                 if (!error) {
4885                                         error = xfs_mod_incore_sb(mp,
4886                                                         XFS_SBS_FDBLOCKS,
4887                                                         -(indlen), rsvd);
4888                                         if (error && rt)
4889                                                 xfs_mod_incore_sb(mp,
4890                                                         XFS_SBS_FREXTENTS,
4891                                                         extsz, rsvd);
4892                                         else if (error)
4893                                                 xfs_mod_incore_sb(mp,
4894                                                         XFS_SBS_FDBLOCKS,
4895                                                         alen, rsvd);
4896                                 }
4897
4898                                 if (error) {
4899                                         if (XFS_IS_QUOTA_ON(mp))
4900                                                 /* unreserve the blocks now */
4901                                                 (void)
4902                                                 XFS_TRANS_UNRESERVE_QUOTA_NBLKS(
4903                                                         mp, NULL, ip,
4904                                                         (long)alen, 0, rt ?
4905                                                         XFS_QMOPT_RES_RTBLKS :
4906                                                         XFS_QMOPT_RES_REGBLKS);
4907                                         break;
4908                                 }
4909
4910                                 ip->i_delayed_blks += alen;
4911                                 abno = NULLSTARTBLOCK(indlen);
4912                         } else {
4913                                 /*
4914                                  * If first time, allocate and fill in
4915                                  * once-only bma fields.
4916                                  */
4917                                 if (bma.ip == NULL) {
4918                                         bma.tp = tp;
4919                                         bma.ip = ip;
4920                                         bma.prevp = &prev;
4921                                         bma.gotp = &got;
4922                                         bma.total = total;
4923                                         bma.userdata = 0;
4924                                 }
4925                                 /* Indicate if this is the first user data
4926                                  * in the file, or just any user data.
4927                                  */
4928                                 if (userdata) {
4929                                         bma.userdata = (aoff == 0) ?
4930                                                 XFS_ALLOC_INITIAL_USER_DATA :
4931                                                 XFS_ALLOC_USERDATA;
4932                                 }
4933                                 /*
4934                                  * Fill in changeable bma fields.
4935                                  */
4936                                 bma.eof = eof;
4937                                 bma.firstblock = *firstblock;
4938                                 bma.alen = alen;
4939                                 bma.off = aoff;
4940                                 bma.conv = convert;
4941                                 bma.wasdel = wasdelay;
4942                                 bma.minlen = minlen;
4943                                 bma.low = flist->xbf_low;
4944                                 bma.minleft = minleft;
4945                                 /*
4946                                  * Only want to do the alignment at the
4947                                  * eof if it is userdata and allocation length
4948                                  * is larger than a stripe unit.
4949                                  */
4950                                 if (mp->m_dalign && alen >= mp->m_dalign &&
4951                                     userdata && whichfork == XFS_DATA_FORK) {
4952                                         if ((error = xfs_bmap_isaeof(ip, aoff,
4953                                                         whichfork, &bma.aeof)))
4954                                                 goto error0;
4955                                 } else
4956                                         bma.aeof = 0;
4957                                 /*
4958                                  * Call allocator.
4959                                  */
4960                                 if ((error = xfs_bmap_alloc(&bma)))
4961                                         goto error0;
4962                                 /*
4963                                  * Copy out result fields.
4964                                  */
4965                                 abno = bma.rval;
4966                                 if ((flist->xbf_low = bma.low))
4967                                         minleft = 0;
4968                                 alen = bma.alen;
4969                                 aoff = bma.off;
4970                                 ASSERT(*firstblock == NULLFSBLOCK ||
4971                                        XFS_FSB_TO_AGNO(mp, *firstblock) ==
4972                                        XFS_FSB_TO_AGNO(mp, bma.firstblock) ||
4973                                        (flist->xbf_low &&
4974                                         XFS_FSB_TO_AGNO(mp, *firstblock) <
4975                                         XFS_FSB_TO_AGNO(mp, bma.firstblock)));
4976                                 *firstblock = bma.firstblock;
4977                                 if (cur)
4978                                         cur->bc_private.b.firstblock =
4979                                                 *firstblock;
4980                                 if (abno == NULLFSBLOCK)
4981                                         break;
4982                                 if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
4983                                         cur = xfs_btree_init_cursor(mp,
4984                                                 tp, NULL, 0, XFS_BTNUM_BMAP,
4985                                                 ip, whichfork);
4986                                         cur->bc_private.b.firstblock =
4987                                                 *firstblock;
4988                                         cur->bc_private.b.flist = flist;
4989                                 }
4990                                 /*
4991                                  * Bump the number of extents we've allocated
4992                                  * in this call.
4993                                  */
4994                                 nallocs++;
4995                         }
4996                         if (cur)
4997                                 cur->bc_private.b.flags =
4998                                         wasdelay ? XFS_BTCUR_BPRV_WASDEL : 0;
4999                         got.br_startoff = aoff;
5000                         got.br_startblock = abno;
5001                         got.br_blockcount = alen;
5002                         got.br_state = XFS_EXT_NORM;    /* assume normal */
5003                         /*
5004                          * Determine state of extent, and the filesystem.
5005                          * A wasdelay extent has been initialized, so
5006                          * shouldn't be flagged as unwritten.
5007                          */
5008                         if (wr && XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
5009                                 if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
5010                                         got.br_state = XFS_EXT_UNWRITTEN;
5011                         }
5012                         error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
5013                                 firstblock, flist, &tmp_logflags, whichfork,
5014                                 rsvd);
5015                         logflags |= tmp_logflags;
5016                         if (error)
5017                                 goto error0;
5018                         lastx = ifp->if_lastex;
5019                         ep = &ifp->if_u1.if_extents[lastx];
5020                         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5021                         xfs_bmbt_get_all(ep, &got);
5022                         ASSERT(got.br_startoff <= aoff);
5023                         ASSERT(got.br_startoff + got.br_blockcount >=
5024                                 aoff + alen);
5025 #ifdef DEBUG
5026                         if (delay) {
5027                                 ASSERT(ISNULLSTARTBLOCK(got.br_startblock));
5028                                 ASSERT(STARTBLOCKVAL(got.br_startblock) > 0);
5029                         }
5030                         ASSERT(got.br_state == XFS_EXT_NORM ||
5031                                got.br_state == XFS_EXT_UNWRITTEN);
5032 #endif
5033                         /*
5034                          * Fall down into the found allocated space case.
5035                          */
5036                 } else if (inhole) {
5037                         /*
5038                          * Reading in a hole.
5039                          */
5040                         mval->br_startoff = bno;
5041                         mval->br_startblock = HOLESTARTBLOCK;
5042                         mval->br_blockcount =
5043                                 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
5044                         mval->br_state = XFS_EXT_NORM;
5045                         bno += mval->br_blockcount;
5046                         len -= mval->br_blockcount;
5047                         mval++;
5048                         n++;
5049                         continue;
5050                 }
5051                 /*
5052                  * Then deal with the allocated space we found.
5053                  */
5054                 ASSERT(ep != NULL);
5055                 if (trim && (got.br_startoff + got.br_blockcount > obno)) {
5056                         if (obno > bno)
5057                                 bno = obno;
5058                         ASSERT((bno >= obno) || (n == 0));
5059                         ASSERT(bno < end);
5060                         mval->br_startoff = bno;
5061                         if (ISNULLSTARTBLOCK(got.br_startblock)) {
5062                                 ASSERT(!wr || delay);
5063                                 mval->br_startblock = DELAYSTARTBLOCK;
5064                         } else
5065                                 mval->br_startblock =
5066                                         got.br_startblock +
5067                                         (bno - got.br_startoff);
5068                         /*
5069                          * Return the minimum of what we got and what we
5070                          * asked for for the length.  We can use the len
5071                          * variable here because it is modified below
5072                          * and we could have been there before coming
5073                          * here if the first part of the allocation
5074                          * didn't overlap what was asked for.
5075                          */
5076                         mval->br_blockcount =
5077                                 XFS_FILBLKS_MIN(end - bno, got.br_blockcount -
5078                                         (bno - got.br_startoff));
5079                         mval->br_state = got.br_state;
5080                         ASSERT(mval->br_blockcount <= len);
5081                 } else {
5082                         *mval = got;
5083                         if (ISNULLSTARTBLOCK(mval->br_startblock)) {
5084                                 ASSERT(!wr || delay);
5085                                 mval->br_startblock = DELAYSTARTBLOCK;
5086                         }
5087                 }
5088
5089                 /*
5090                  * Check if writing previously allocated but
5091                  * unwritten extents.
5092                  */
5093                 if (wr && mval->br_state == XFS_EXT_UNWRITTEN &&
5094                     ((flags & (XFS_BMAPI_PREALLOC|XFS_BMAPI_DELAY)) == 0)) {
5095                         /*
5096                          * Modify (by adding) the state flag, if writing.
5097                          */
5098                         ASSERT(mval->br_blockcount <= len);
5099                         if ((ifp->if_flags & XFS_IFBROOT) && !cur) {
5100                                 cur = xfs_btree_init_cursor(mp,
5101                                         tp, NULL, 0, XFS_BTNUM_BMAP,
5102                                         ip, whichfork);
5103                                 cur->bc_private.b.firstblock =
5104                                         *firstblock;
5105                                 cur->bc_private.b.flist = flist;
5106                         }
5107                         mval->br_state = XFS_EXT_NORM;
5108                         error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
5109                                 firstblock, flist, &tmp_logflags, whichfork,
5110                                 rsvd);
5111                         logflags |= tmp_logflags;
5112                         if (error)
5113                                 goto error0;
5114                         lastx = ifp->if_lastex;
5115                         ep = &ifp->if_u1.if_extents[lastx];
5116                         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5117                         xfs_bmbt_get_all(ep, &got);
5118                         /*
5119                          * We may have combined previously unwritten
5120                          * space with written space, so generate
5121                          * another request.
5122                          */
5123                         if (mval->br_blockcount < len)
5124                                 continue;
5125                 }
5126
5127                 ASSERT(!trim ||
5128                        ((mval->br_startoff + mval->br_blockcount) <= end));
5129                 ASSERT(!trim || (mval->br_blockcount <= len) ||
5130                        (mval->br_startoff < obno));
5131                 bno = mval->br_startoff + mval->br_blockcount;
5132                 len = end - bno;
5133                 if (n > 0 && mval->br_startoff == mval[-1].br_startoff) {
5134                         ASSERT(mval->br_startblock == mval[-1].br_startblock);
5135                         ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
5136                         ASSERT(mval->br_state == mval[-1].br_state);
5137                         mval[-1].br_blockcount = mval->br_blockcount;
5138                         mval[-1].br_state = mval->br_state;
5139                 } else if (n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
5140                            mval[-1].br_startblock != DELAYSTARTBLOCK &&
5141                            mval[-1].br_startblock != HOLESTARTBLOCK &&
5142                            mval->br_startblock ==
5143                            mval[-1].br_startblock + mval[-1].br_blockcount &&
5144                            (stateless || mval[-1].br_state == mval->br_state)) {
5145                         ASSERT(mval->br_startoff ==
5146                                mval[-1].br_startoff + mval[-1].br_blockcount);
5147                         mval[-1].br_blockcount += mval->br_blockcount;
5148                 } else if (n > 0 &&
5149                            mval->br_startblock == DELAYSTARTBLOCK &&
5150                            mval[-1].br_startblock == DELAYSTARTBLOCK &&
5151                            mval->br_startoff ==
5152                            mval[-1].br_startoff + mval[-1].br_blockcount) {
5153                         mval[-1].br_blockcount += mval->br_blockcount;
5154                         mval[-1].br_state = mval->br_state;
5155                 } else if (!((n == 0) &&
5156                              ((mval->br_startoff + mval->br_blockcount) <=
5157                               obno))) {
5158                         mval++;
5159                         n++;
5160                 }
5161                 /*
5162                  * If we're done, stop now.  Stop when we've allocated
5163                  * XFS_BMAP_MAX_NMAP extents no matter what.  Otherwise
5164                  * the transaction may get too big.
5165                  */
5166                 if (bno >= end || n >= *nmap || nallocs >= *nmap)
5167                         break;
5168                 /*
5169                  * Else go on to the next record.
5170                  */
5171                 ep++;
5172                 lastx++;
5173                 if (lastx >= nextents) {
5174                         eof = 1;
5175                         prev = got;
5176                 } else
5177                         xfs_bmbt_get_all(ep, &got);
5178         }
5179         ifp->if_lastex = lastx;
5180         *nmap = n;
5181         /*
5182          * Transform from btree to extents, give it cur.
5183          */
5184         if (tp && XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5185             XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5186                 ASSERT(wr && cur);
5187                 error = xfs_bmap_btree_to_extents(tp, ip, cur,
5188                         &tmp_logflags, whichfork);
5189                 logflags |= tmp_logflags;
5190                 if (error)
5191                         goto error0;
5192         }
5193         ASSERT(ifp->if_ext_max ==
5194                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5195         ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
5196                XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
5197         error = 0;
5198
5199 error0:
5200         /*
5201          * Log everything.  Do this after conversion, there's no point in
5202          * logging the extent list if we've converted to btree format.
5203          */
5204         if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
5205             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5206                 logflags &= ~XFS_ILOG_FEXT(whichfork);
5207         else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
5208                  XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5209                 logflags &= ~XFS_ILOG_FBROOT(whichfork);
5210         /*
5211          * Log whatever the flags say, even if error.  Otherwise we might miss
5212          * detecting a case where the data is changed, there's an error,
5213          * and it's not logged so we don't shutdown when we should.
5214          */
5215         if (logflags) {
5216                 ASSERT(tp && wr);
5217                 xfs_trans_log_inode(tp, ip, logflags);
5218         }
5219         if (cur) {
5220                 if (!error) {
5221                         ASSERT(*firstblock == NULLFSBLOCK ||
5222                                XFS_FSB_TO_AGNO(mp, *firstblock) ==
5223                                XFS_FSB_TO_AGNO(mp,
5224                                        cur->bc_private.b.firstblock) ||
5225                                (flist->xbf_low &&
5226                                 XFS_FSB_TO_AGNO(mp, *firstblock) <
5227                                 XFS_FSB_TO_AGNO(mp,
5228                                         cur->bc_private.b.firstblock)));
5229                         *firstblock = cur->bc_private.b.firstblock;
5230                 }
5231                 xfs_btree_del_cursor(cur,
5232                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5233         }
5234         if (!error)
5235                 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
5236                         orig_nmap, *nmap);
5237         return error;
5238 }
5239
5240 /*
5241  * Map file blocks to filesystem blocks, simple version.
5242  * One block (extent) only, read-only.
5243  * For flags, only the XFS_BMAPI_ATTRFORK flag is examined.
5244  * For the other flag values, the effect is as if XFS_BMAPI_METADATA
5245  * was set and all the others were clear.
5246  */
5247 int                                             /* error */
5248 xfs_bmapi_single(
5249         xfs_trans_t     *tp,            /* transaction pointer */
5250         xfs_inode_t     *ip,            /* incore inode */
5251         int             whichfork,      /* data or attr fork */
5252         xfs_fsblock_t   *fsb,           /* output: mapped block */
5253         xfs_fileoff_t   bno)            /* starting file offs. mapped */
5254 {
5255         int             eof;            /* we've hit the end of extent list */
5256         int             error;          /* error return */
5257         xfs_bmbt_irec_t got;            /* current extent list record */
5258         xfs_ifork_t     *ifp;           /* inode fork pointer */
5259         xfs_extnum_t    lastx;          /* last useful extent number */
5260         xfs_bmbt_irec_t prev;           /* previous extent list record */
5261
5262         ifp = XFS_IFORK_PTR(ip, whichfork);
5263         if (unlikely(
5264             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
5265             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)) {
5266                XFS_ERROR_REPORT("xfs_bmapi_single", XFS_ERRLEVEL_LOW,
5267                                 ip->i_mount);
5268                return XFS_ERROR(EFSCORRUPTED);
5269         }
5270         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5271                 return XFS_ERROR(EIO);
5272         XFS_STATS_INC(xs_blk_mapr);
5273         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5274             (error = xfs_iread_extents(tp, ip, whichfork)))
5275                 return error;
5276         (void)xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5277                 &prev);
5278         /*
5279          * Reading past eof, act as though there's a hole
5280          * up to end.
5281          */
5282         if (eof || got.br_startoff > bno) {
5283                 *fsb = NULLFSBLOCK;
5284                 return 0;
5285         }
5286         ASSERT(!ISNULLSTARTBLOCK(got.br_startblock));
5287         ASSERT(bno < got.br_startoff + got.br_blockcount);
5288         *fsb = got.br_startblock + (bno - got.br_startoff);
5289         ifp->if_lastex = lastx;
5290         return 0;
5291 }
5292
5293 /*
5294  * Unmap (remove) blocks from a file.
5295  * If nexts is nonzero then the number of extents to remove is limited to
5296  * that value.  If not all extents in the block range can be removed then
5297  * *done is set.
5298  */
5299 int                                             /* error */
5300 xfs_bunmapi(
5301         xfs_trans_t             *tp,            /* transaction pointer */
5302         struct xfs_inode        *ip,            /* incore inode */
5303         xfs_fileoff_t           bno,            /* starting offset to unmap */
5304         xfs_filblks_t           len,            /* length to unmap in file */
5305         int                     flags,          /* misc flags */
5306         xfs_extnum_t            nexts,          /* number of extents max */
5307         xfs_fsblock_t           *firstblock,    /* first allocated block
5308                                                    controls a.g. for allocs */
5309         xfs_bmap_free_t         *flist,         /* i/o: list extents to free */
5310         int                     *done)          /* set if not done yet */
5311 {
5312         xfs_btree_cur_t         *cur;           /* bmap btree cursor */
5313         xfs_bmbt_irec_t         del;            /* extent being deleted */
5314         int                     eof;            /* is deleting at eof */
5315         xfs_bmbt_rec_t          *ep;            /* extent list entry pointer */
5316         int                     error;          /* error return value */
5317         xfs_extnum_t            extno;          /* extent number in list */
5318         xfs_bmbt_irec_t         got;            /* current extent list entry */
5319         xfs_ifork_t             *ifp;           /* inode fork pointer */
5320         int                     isrt;           /* freeing in rt area */
5321         xfs_extnum_t            lastx;          /* last extent index used */
5322         int                     logflags;       /* transaction logging flags */
5323         xfs_extlen_t            mod;            /* rt extent offset */
5324         xfs_mount_t             *mp;            /* mount structure */
5325         xfs_extnum_t            nextents;       /* size of extent list */
5326         xfs_bmbt_irec_t         prev;           /* previous extent list entry */
5327         xfs_fileoff_t           start;          /* first file offset deleted */
5328         int                     tmp_logflags;   /* partial logging flags */
5329         int                     wasdel;         /* was a delayed alloc extent */
5330         int                     whichfork;      /* data or attribute fork */
5331         int                     rsvd;           /* OK to allocate reserved blocks */
5332         xfs_fsblock_t           sum;
5333
5334         xfs_bunmap_trace(ip, bno, len, flags, (inst_t *)__return_address);
5335         whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
5336                 XFS_ATTR_FORK : XFS_DATA_FORK;
5337         ifp = XFS_IFORK_PTR(ip, whichfork);
5338         if (unlikely(
5339             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5340             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5341                 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5342                                  ip->i_mount);
5343                 return XFS_ERROR(EFSCORRUPTED);
5344         }
5345         mp = ip->i_mount;
5346         if (XFS_FORCED_SHUTDOWN(mp))
5347                 return XFS_ERROR(EIO);
5348         rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
5349         ASSERT(len > 0);
5350         ASSERT(nexts >= 0);
5351         ASSERT(ifp->if_ext_max ==
5352                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5353         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5354             (error = xfs_iread_extents(tp, ip, whichfork)))
5355                 return error;
5356         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5357         if (nextents == 0) {
5358                 *done = 1;
5359                 return 0;
5360         }
5361         XFS_STATS_INC(xs_blk_unmap);
5362         isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5363         start = bno;
5364         bno = start + len - 1;
5365         ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
5366                 &prev);
5367         /*
5368          * Check to see if the given block number is past the end of the
5369          * file, back up to the last block if so...
5370          */
5371         if (eof) {
5372                 ep = &ifp->if_u1.if_extents[--lastx];
5373                 xfs_bmbt_get_all(ep, &got);
5374                 bno = got.br_startoff + got.br_blockcount - 1;
5375         }
5376         logflags = 0;
5377         if (ifp->if_flags & XFS_IFBROOT) {
5378                 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5379                 cur = xfs_btree_init_cursor(mp, tp, NULL, 0, XFS_BTNUM_BMAP, ip,
5380                         whichfork);
5381                 cur->bc_private.b.firstblock = *firstblock;
5382                 cur->bc_private.b.flist = flist;
5383                 cur->bc_private.b.flags = 0;
5384         } else
5385                 cur = NULL;
5386         extno = 0;
5387         while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5388                (nexts == 0 || extno < nexts)) {
5389                 /*
5390                  * Is the found extent after a hole in which bno lives?
5391                  * Just back up to the previous extent, if so.
5392                  */
5393                 if (got.br_startoff > bno) {
5394                         if (--lastx < 0)
5395                                 break;
5396                         ep--;
5397                         xfs_bmbt_get_all(ep, &got);
5398                 }
5399                 /*
5400                  * Is the last block of this extent before the range
5401                  * we're supposed to delete?  If so, we're done.
5402                  */
5403                 bno = XFS_FILEOFF_MIN(bno,
5404                         got.br_startoff + got.br_blockcount - 1);
5405                 if (bno < start)
5406                         break;
5407                 /*
5408                  * Then deal with the (possibly delayed) allocated space
5409                  * we found.
5410                  */
5411                 ASSERT(ep != NULL);
5412                 del = got;
5413                 wasdel = ISNULLSTARTBLOCK(del.br_startblock);
5414                 if (got.br_startoff < start) {
5415                         del.br_startoff = start;
5416                         del.br_blockcount -= start - got.br_startoff;
5417                         if (!wasdel)
5418                                 del.br_startblock += start - got.br_startoff;
5419                 }
5420                 if (del.br_startoff + del.br_blockcount > bno + 1)
5421                         del.br_blockcount = bno + 1 - del.br_startoff;
5422                 sum = del.br_startblock + del.br_blockcount;
5423                 if (isrt &&
5424                     (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5425                         /*
5426                          * Realtime extent not lined up at the end.
5427                          * The extent could have been split into written
5428                          * and unwritten pieces, or we could just be
5429                          * unmapping part of it.  But we can't really
5430                          * get rid of part of a realtime extent.
5431                          */
5432                         if (del.br_state == XFS_EXT_UNWRITTEN ||
5433                             !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
5434                                 /*
5435                                  * This piece is unwritten, or we're not
5436                                  * using unwritten extents.  Skip over it.
5437                                  */
5438                                 ASSERT(bno >= mod);
5439                                 bno -= mod > del.br_blockcount ?
5440                                         del.br_blockcount : mod;
5441                                 if (bno < got.br_startoff) {
5442                                         if (--lastx >= 0)
5443                                                 xfs_bmbt_get_all(--ep, &got);
5444                                 }
5445                                 continue;
5446                         }
5447                         /*
5448                          * It's written, turn it unwritten.
5449                          * This is better than zeroing it.
5450                          */
5451                         ASSERT(del.br_state == XFS_EXT_NORM);
5452                         ASSERT(xfs_trans_get_block_res(tp) > 0);
5453                         /*
5454                          * If this spans a realtime extent boundary,
5455                          * chop it back to the start of the one we end at.
5456                          */
5457                         if (del.br_blockcount > mod) {
5458                                 del.br_startoff += del.br_blockcount - mod;
5459                                 del.br_startblock += del.br_blockcount - mod;
5460                                 del.br_blockcount = mod;
5461                         }
5462                         del.br_state = XFS_EXT_UNWRITTEN;
5463                         error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
5464                                 firstblock, flist, &logflags, XFS_DATA_FORK, 0);
5465                         if (error)
5466                                 goto error0;
5467                         goto nodelete;
5468                 }
5469                 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5470                         /*
5471                          * Realtime extent is lined up at the end but not
5472                          * at the front.  We'll get rid of full extents if
5473                          * we can.
5474                          */
5475                         mod = mp->m_sb.sb_rextsize - mod;
5476                         if (del.br_blockcount > mod) {
5477                                 del.br_blockcount -= mod;
5478                                 del.br_startoff += mod;
5479                                 del.br_startblock += mod;
5480                         } else if ((del.br_startoff == start &&
5481                                     (del.br_state == XFS_EXT_UNWRITTEN ||
5482                                      xfs_trans_get_block_res(tp) == 0)) ||
5483                                    !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
5484                                 /*
5485                                  * Can't make it unwritten.  There isn't
5486                                  * a full extent here so just skip it.
5487                                  */
5488                                 ASSERT(bno >= del.br_blockcount);
5489                                 bno -= del.br_blockcount;
5490                                 if (bno < got.br_startoff) {
5491                                         if (--lastx >= 0)
5492                                                 xfs_bmbt_get_all(--ep, &got);
5493                                 }
5494                                 continue;
5495                         } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5496                                 /*
5497                                  * This one is already unwritten.
5498                                  * It must have a written left neighbor.
5499                                  * Unwrite the killed part of that one and
5500                                  * try again.
5501                                  */
5502                                 ASSERT(lastx > 0);
5503                                 xfs_bmbt_get_all(ep - 1, &prev);
5504                                 ASSERT(prev.br_state == XFS_EXT_NORM);
5505                                 ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock));
5506                                 ASSERT(del.br_startblock ==
5507                                        prev.br_startblock + prev.br_blockcount);
5508                                 if (prev.br_startoff < start) {
5509                                         mod = start - prev.br_startoff;
5510                                         prev.br_blockcount -= mod;
5511                                         prev.br_startblock += mod;
5512                                         prev.br_startoff = start;
5513                                 }
5514                                 prev.br_state = XFS_EXT_UNWRITTEN;
5515                                 error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
5516                                         &prev, firstblock, flist, &logflags,
5517                                         XFS_DATA_FORK, 0);
5518                                 if (error)
5519                                         goto error0;
5520                                 goto nodelete;
5521                         } else {
5522                                 ASSERT(del.br_state == XFS_EXT_NORM);
5523                                 del.br_state = XFS_EXT_UNWRITTEN;
5524                                 error = xfs_bmap_add_extent(ip, lastx, &cur,
5525                                         &del, firstblock, flist, &logflags,
5526                                         XFS_DATA_FORK, 0);
5527                                 if (error)
5528                                         goto error0;
5529                                 goto nodelete;
5530                         }
5531                 }
5532                 if (wasdel) {
5533                         ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
5534                         /* Update realtime/data freespace, unreserve quota */
5535                         if (isrt) {
5536                                 xfs_filblks_t rtexts;
5537
5538                                 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5539                                 do_div(rtexts, mp->m_sb.sb_rextsize);
5540                                 xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
5541                                                 (int)rtexts, rsvd);
5542                                 (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
5543                                         NULL, ip, -((long)del.br_blockcount), 0,
5544                                         XFS_QMOPT_RES_RTBLKS);
5545                         } else {
5546                                 xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
5547                                                 (int)del.br_blockcount, rsvd);
5548                                 (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
5549                                         NULL, ip, -((long)del.br_blockcount), 0,
5550                                         XFS_QMOPT_RES_REGBLKS);
5551                         }
5552                         ip->i_delayed_blks -= del.br_blockcount;
5553                         if (cur)
5554                                 cur->bc_private.b.flags |=
5555                                         XFS_BTCUR_BPRV_WASDEL;
5556                 } else if (cur)
5557                         cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5558                 /*
5559                  * If it's the case where the directory code is running
5560                  * with no block reservation, and the deleted block is in
5561                  * the middle of its extent, and the resulting insert
5562                  * of an extent would cause transformation to btree format,
5563                  * then reject it.  The calling code will then swap
5564                  * blocks around instead.
5565                  * We have to do this now, rather than waiting for the
5566                  * conversion to btree format, since the transaction
5567                  * will be dirty.
5568                  */
5569                 if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
5570                     XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5571                     XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max &&
5572                     del.br_startoff > got.br_startoff &&
5573                     del.br_startoff + del.br_blockcount <
5574                     got.br_startoff + got.br_blockcount) {
5575                         error = XFS_ERROR(ENOSPC);
5576                         goto error0;
5577                 }
5578                 error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
5579                         &tmp_logflags, whichfork, rsvd);
5580                 logflags |= tmp_logflags;
5581                 if (error)
5582                         goto error0;
5583                 bno = del.br_startoff - 1;
5584 nodelete:
5585                 lastx = ifp->if_lastex;
5586                 /*
5587                  * If not done go on to the next (previous) record.
5588                  * Reset ep in case the extents array was re-alloced.
5589                  */
5590                 ep = &ifp->if_u1.if_extents[lastx];
5591                 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5592                         if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) ||
5593                             xfs_bmbt_get_startoff(ep) > bno) {
5594                                 lastx--;
5595                                 ep--;
5596                         }
5597                         if (lastx >= 0)
5598                                 xfs_bmbt_get_all(ep, &got);
5599                         extno++;
5600                 }
5601         }
5602         ifp->if_lastex = lastx;
5603         *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
5604         ASSERT(ifp->if_ext_max ==
5605                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5606         /*
5607          * Convert to a btree if necessary.
5608          */
5609         if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5610             XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
5611                 ASSERT(cur == NULL);
5612                 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
5613                         &cur, 0, &tmp_logflags, whichfork);
5614                 logflags |= tmp_logflags;
5615                 if (error)
5616                         goto error0;
5617         }
5618         /*
5619          * transform from btree to extents, give it cur
5620          */
5621         else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
5622                  XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
5623                 ASSERT(cur != NULL);
5624                 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5625                         whichfork);
5626                 logflags |= tmp_logflags;
5627                 if (error)
5628                         goto error0;
5629         }
5630         /*
5631          * transform from extents to local?
5632          */
5633         ASSERT(ifp->if_ext_max ==
5634                XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
5635         error = 0;
5636 error0:
5637         /*
5638          * Log everything.  Do this after conversion, there's no point in
5639          * logging the extent list if we've converted to btree format.
5640          */
5641         if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
5642             XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5643                 logflags &= ~XFS_ILOG_FEXT(whichfork);
5644         else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
5645                  XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5646                 logflags &= ~XFS_ILOG_FBROOT(whichfork);
5647         /*
5648          * Log inode even in the error case, if the transaction
5649          * is dirty we'll need to shut down the filesystem.
5650          */
5651         if (logflags)
5652                 xfs_trans_log_inode(tp, ip, logflags);
5653         if (cur) {
5654                 if (!error) {
5655                         *firstblock = cur->bc_private.b.firstblock;
5656                         cur->bc_private.b.allocated = 0;
5657                 }
5658                 xfs_btree_del_cursor(cur,
5659                         error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5660         }
5661         return error;
5662 }
5663
5664 /*
5665  * Fcntl interface to xfs_bmapi.
5666  */
5667 int                                             /* error code */
5668 xfs_getbmap(
5669         bhv_desc_t              *bdp,           /* XFS behavior descriptor*/
5670         struct getbmap          *bmv,           /* user bmap structure */
5671         void                    __user *ap,     /* pointer to user's array */
5672         int                     interface)      /* interface flags */
5673 {
5674         __int64_t               bmvend;         /* last block requested */
5675         int                     error;          /* return value */
5676         __int64_t               fixlen;         /* length for -1 case */
5677         int                     i;              /* extent number */
5678         xfs_inode_t             *ip;            /* xfs incore inode pointer */
5679         vnode_t                 *vp;            /* corresponding vnode */
5680         int                     lock;           /* lock state */
5681         xfs_bmbt_irec_t         *map;           /* buffer for user's data */
5682         xfs_mount_t             *mp;            /* file system mount point */
5683         int                     nex;            /* # of user extents can do */
5684         int                     nexleft;        /* # of user extents left */
5685         int                     subnex;         /* # of bmapi's can do */
5686         int                     nmap;           /* number of map entries */
5687         struct getbmap          out;            /* output structure */
5688         int                     whichfork;      /* data or attr fork */
5689         int                     prealloced;     /* this is a file with
5690                                                  * preallocated data space */
5691         int                     sh_unwritten;   /* true, if unwritten */
5692                                                 /* extents listed separately */
5693         int                     bmapi_flags;    /* flags for xfs_bmapi */
5694         __int32_t               oflags;         /* getbmapx bmv_oflags field */
5695
5696         vp = BHV_TO_VNODE(bdp);
5697         ip = XFS_BHVTOI(bdp);
5698         mp = ip->i_mount;
5699
5700         whichfork = interface & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
5701         sh_unwritten = (interface & BMV_IF_PREALLOC) != 0;
5702
5703         /*      If the BMV_IF_NO_DMAPI_READ interface bit specified, do not
5704          *      generate a DMAPI read event.  Otherwise, if the DM_EVENT_READ
5705          *      bit is set for the file, generate a read event in order
5706          *      that the DMAPI application may do its thing before we return
5707          *      the extents.  Usually this means restoring user file data to
5708          *      regions of the file that look like holes.
5709          *
5710          *      The "old behavior" (from XFS_IOC_GETBMAP) is to not specify
5711          *      BMV_IF_NO_DMAPI_READ so that read events are generated.
5712          *      If this were not true, callers of ioctl( XFS_IOC_GETBMAP )
5713          *      could misinterpret holes in a DMAPI file as true holes,
5714          *      when in fact they may represent offline user data.
5715          */
5716         if (   (interface & BMV_IF_NO_DMAPI_READ) == 0
5717             && DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)
5718             && whichfork == XFS_DATA_FORK) {
5719
5720                 error = XFS_SEND_DATA(mp, DM_EVENT_READ, vp, 0, 0, 0, NULL);
5721                 if (error)
5722                         return XFS_ERROR(error);
5723         }
5724
5725         if (whichfork == XFS_ATTR_FORK) {
5726                 if (XFS_IFORK_Q(ip)) {
5727                         if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
5728                             ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
5729                             ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
5730                                 return XFS_ERROR(EINVAL);
5731                 } else if (unlikely(
5732                            ip->i_d.di_aformat != 0 &&
5733                            ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
5734                         XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
5735                                          ip->i_mount);
5736                         return XFS_ERROR(EFSCORRUPTED);
5737                 }
5738         } else if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
5739                    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
5740                    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
5741                 return XFS_ERROR(EINVAL);
5742         if (whichfork == XFS_DATA_FORK) {
5743                 if ((ip->i_d.di_extsize && (ip->i_d.di_flags &
5744                                 (XFS_DIFLAG_REALTIME|XFS_DIFLAG_EXTSIZE))) ||
5745                     ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
5746                         prealloced = 1;
5747                         fixlen = XFS_MAXIOFFSET(mp);
5748                 } else {
5749                         prealloced = 0;
5750                         fixlen = ip->i_d.di_size;
5751                 }
5752         } else {
5753                 prealloced = 0;
5754                 fixlen = 1LL << 32;
5755         }
5756
5757         if (bmv->bmv_length == -1) {
5758                 fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
5759                 bmv->bmv_length = MAX( (__int64_t)(fixlen - bmv->bmv_offset),
5760                                         (__int64_t)0);
5761         } else if (bmv->bmv_length < 0)
5762                 return XFS_ERROR(EINVAL);
5763         if (bmv->bmv_length == 0) {
5764                 bmv->bmv_entries = 0;
5765                 return 0;
5766         }
5767         nex = bmv->bmv_count - 1;
5768         if (nex <= 0)
5769                 return XFS_ERROR(EINVAL);
5770         bmvend = bmv->bmv_offset + bmv->bmv_length;
5771
5772         xfs_ilock(ip, XFS_IOLOCK_SHARED);
5773
5774         if (whichfork == XFS_DATA_FORK && ip->i_delayed_blks) {
5775                 /* xfs_fsize_t last_byte = xfs_file_last_byte(ip); */
5776                 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
5777         }
5778
5779         ASSERT(whichfork == XFS_ATTR_FORK || ip->i_delayed_blks == 0);
5780
5781         lock = xfs_ilock_map_shared(ip);
5782
5783         /*
5784          * Don't let nex be bigger than the number of extents
5785          * we can have assuming alternating holes and real extents.
5786          */
5787         if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
5788                 nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
5789
5790         bmapi_flags = XFS_BMAPI_AFLAG(whichfork) |
5791                         ((sh_unwritten) ? 0 : XFS_BMAPI_IGSTATE);
5792
5793         /*
5794          * Allocate enough space to handle "subnex" maps at a time.
5795          */
5796         subnex = 16;
5797         map = kmem_alloc(subnex * sizeof(*map), KM_SLEEP);
5798
5799         bmv->bmv_entries = 0;
5800
5801         if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0) {
5802                 error = 0;
5803                 goto unlock_and_return;
5804         }
5805
5806         nexleft = nex;
5807
5808         do {
5809                 nmap = (nexleft > subnex) ? subnex : nexleft;
5810                 error = xfs_bmapi(NULL, ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
5811                                   XFS_BB_TO_FSB(mp, bmv->bmv_length),
5812                                   bmapi_flags, NULL, 0, map, &nmap, NULL);
5813                 if (error)
5814                         goto unlock_and_return;
5815                 ASSERT(nmap <= subnex);
5816
5817                 for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
5818                         nexleft--;
5819                         oflags = (map[i].br_state == XFS_EXT_UNWRITTEN) ?
5820                                         BMV_OF_PREALLOC : 0;
5821                         out.bmv_offset = XFS_FSB_TO_BB(mp, map[i].br_startoff);
5822                         out.bmv_length = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
5823                         ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
5824                         if (map[i].br_startblock == HOLESTARTBLOCK &&
5825                            ((prealloced && out.bmv_offset + out.bmv_length == bmvend) ||
5826                              whichfork == XFS_ATTR_FORK )) {
5827                                 /*
5828                                  * came to hole at end of file or the end of
5829                                    attribute fork
5830                                  */
5831                                 goto unlock_and_return;
5832                         } else {
5833                                 out.bmv_block =
5834                                     (map[i].br_startblock == HOLESTARTBLOCK) ?
5835                                         -1 :
5836                                         XFS_FSB_TO_DB(ip, map[i].br_startblock);
5837
5838                                 /* return either getbmap/getbmapx structure. */
5839                                 if (interface & BMV_IF_EXTENDED) {
5840                                         struct  getbmapx        outx;
5841
5842                                         GETBMAP_CONVERT(out,outx);
5843                                         outx.bmv_oflags = oflags;
5844                                         outx.bmv_unused1 = outx.bmv_unused2 = 0;
5845                                         if (copy_to_user(ap, &outx,
5846                                                         sizeof(outx))) {
5847                                                 error = XFS_ERROR(EFAULT);
5848                                                 goto unlock_and_return;
5849                                         }
5850                                 } else {
5851                                         if (copy_to_user(ap, &out,
5852                                                         sizeof(out))) {
5853                                                 error = XFS_ERROR(EFAULT);
5854                                                 goto unlock_and_return;
5855                                         }
5856                                 }
5857                                 bmv->bmv_offset =
5858                                         out.bmv_offset + out.bmv_length;
5859                                 bmv->bmv_length = MAX((__int64_t)0,
5860                                         (__int64_t)(bmvend - bmv->bmv_offset));
5861                                 bmv->bmv_entries++;
5862                                 ap = (interface & BMV_IF_EXTENDED) ?
5863                                                 (void __user *)
5864                                         ((struct getbmapx __user *)ap + 1) :
5865                                                 (void __user *)
5866                                         ((struct getbmap __user *)ap + 1);
5867                         }
5868                 }
5869         } while (nmap && nexleft && bmv->bmv_length);
5870
5871 unlock_and_return:
5872         xfs_iunlock_map_shared(ip, lock);
5873         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
5874
5875         kmem_free(map, subnex * sizeof(*map));
5876
5877         return error;
5878 }
5879
5880 /*
5881  * Check the last inode extent to determine whether this allocation will result
5882  * in blocks being allocated at the end of the file. When we allocate new data
5883  * blocks at the end of the file which do not start at the previous data block,
5884  * we will try to align the new blocks at stripe unit boundaries.
5885  */
5886 STATIC int                              /* error */
5887 xfs_bmap_isaeof(
5888         xfs_inode_t     *ip,            /* incore inode pointer */
5889         xfs_fileoff_t   off,            /* file offset in fsblocks */
5890         int             whichfork,      /* data or attribute fork */
5891         char            *aeof)          /* return value */
5892 {
5893         int             error;          /* error return value */
5894         xfs_ifork_t     *ifp;           /* inode fork pointer */
5895         xfs_bmbt_rec_t  *lastrec;       /* extent list entry pointer */
5896         xfs_extnum_t    nextents;       /* size of extent list */
5897         xfs_bmbt_irec_t s;              /* expanded extent list entry */
5898
5899         ASSERT(whichfork == XFS_DATA_FORK);
5900         ifp = XFS_IFORK_PTR(ip, whichfork);
5901         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5902             (error = xfs_iread_extents(NULL, ip, whichfork)))
5903                 return error;
5904         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5905         if (nextents == 0) {
5906                 *aeof = 1;
5907                 return 0;
5908         }
5909         /*
5910          * Go to the last extent
5911          */
5912         lastrec = &ifp->if_u1.if_extents[nextents - 1];
5913         xfs_bmbt_get_all(lastrec, &s);
5914         /*
5915          * Check we are allocating in the last extent (for delayed allocations)
5916          * or past the last extent for non-delayed allocations.
5917          */
5918         *aeof = (off >= s.br_startoff &&
5919                  off < s.br_startoff + s.br_blockcount &&
5920                  ISNULLSTARTBLOCK(s.br_startblock)) ||
5921                 off >= s.br_startoff + s.br_blockcount;
5922         return 0;
5923 }
5924
5925 /*
5926  * Check if the endoff is outside the last extent. If so the caller will grow
5927  * the allocation to a stripe unit boundary.
5928  */
5929 int                                     /* error */
5930 xfs_bmap_eof(
5931         xfs_inode_t     *ip,            /* incore inode pointer */
5932         xfs_fileoff_t   endoff,         /* file offset in fsblocks */
5933         int             whichfork,      /* data or attribute fork */
5934         int             *eof)           /* result value */
5935 {
5936         xfs_fsblock_t   blockcount;     /* extent block count */
5937         int             error;          /* error return value */
5938         xfs_ifork_t     *ifp;           /* inode fork pointer */
5939         xfs_bmbt_rec_t  *lastrec;       /* extent list entry pointer */
5940         xfs_extnum_t    nextents;       /* size of extent list */
5941         xfs_fileoff_t   startoff;       /* extent starting file offset */
5942
5943         ASSERT(whichfork == XFS_DATA_FORK);
5944         ifp = XFS_IFORK_PTR(ip, whichfork);
5945         if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5946             (error = xfs_iread_extents(NULL, ip, whichfork)))
5947                 return error;
5948         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5949         if (nextents == 0) {
5950                 *eof = 1;
5951                 return 0;
5952         }
5953         /*
5954          * Go to the last extent
5955          */
5956         lastrec = &ifp->if_u1.if_extents[nextents - 1];
5957         startoff = xfs_bmbt_get_startoff(lastrec);
5958         blockcount = xfs_bmbt_get_blockcount(lastrec);
5959         *eof = endoff >= startoff + blockcount;
5960         return 0;
5961 }
5962
5963 #ifdef DEBUG
5964 /*
5965  * Check that the extents list for the inode ip is in the right order.
5966  */
5967 STATIC void
5968 xfs_bmap_check_extents(
5969         xfs_inode_t             *ip,            /* incore inode pointer */
5970         int                     whichfork)      /* data or attr fork */
5971 {
5972         xfs_bmbt_rec_t          *base;          /* base of extents list */
5973         xfs_bmbt_rec_t          *ep;            /* current extent entry */
5974         xfs_ifork_t             *ifp;           /* inode fork pointer */
5975         xfs_extnum_t            nextents;       /* number of extents in list */
5976
5977         ifp = XFS_IFORK_PTR(ip, whichfork);
5978         ASSERT(ifp->if_flags & XFS_IFEXTENTS);
5979         base = ifp->if_u1.if_extents;
5980         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
5981         for (ep = base; ep < &base[nextents - 1]; ep++) {
5982                 xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
5983                         (void *)(ep + 1));
5984         }
5985 }
5986
5987 STATIC
5988 xfs_buf_t *
5989 xfs_bmap_get_bp(
5990         xfs_btree_cur_t         *cur,
5991         xfs_fsblock_t           bno)
5992 {
5993         int i;
5994         xfs_buf_t *bp;
5995
5996         if (!cur)
5997                 return(NULL);
5998
5999         bp = NULL;
6000         for(i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
6001                 bp = cur->bc_bufs[i];
6002                 if (!bp) break;
6003                 if (XFS_BUF_ADDR(bp) == bno)
6004                         break;  /* Found it */
6005         }
6006         if (i == XFS_BTREE_MAXLEVELS)
6007                 bp = NULL;
6008
6009         if (!bp) { /* Chase down all the log items to see if the bp is there */
6010                 xfs_log_item_chunk_t    *licp;
6011                 xfs_trans_t             *tp;
6012
6013                 tp = cur->bc_tp;
6014                 licp = &tp->t_items;
6015                 while (!bp && licp != NULL) {
6016                         if (XFS_LIC_ARE_ALL_FREE(licp)) {
6017                                 licp = licp->lic_next;
6018                                 continue;
6019                         }
6020                         for (i = 0; i < licp->lic_unused; i++) {
6021                                 xfs_log_item_desc_t     *lidp;
6022                                 xfs_log_item_t          *lip;
6023                                 xfs_buf_log_item_t      *bip;
6024                                 xfs_buf_t               *lbp;
6025
6026                                 if (XFS_LIC_ISFREE(licp, i)) {
6027                                         continue;
6028                                 }
6029
6030                                 lidp = XFS_LIC_SLOT(licp, i);
6031                                 lip = lidp->lid_item;
6032                                 if (lip->li_type != XFS_LI_BUF)
6033                                         continue;
6034
6035                                 bip = (xfs_buf_log_item_t *)lip;
6036                                 lbp = bip->bli_buf;
6037
6038                                 if (XFS_BUF_ADDR(lbp) == bno) {
6039                                         bp = lbp;
6040                                         break; /* Found it */
6041                                 }
6042                         }
6043                         licp = licp->lic_next;
6044                 }
6045         }
6046         return(bp);
6047 }
6048
6049 void
6050 xfs_check_block(
6051         xfs_bmbt_block_t        *block,
6052         xfs_mount_t             *mp,
6053         int                     root,
6054         short                   sz)
6055 {
6056         int                     i, j, dmxr;
6057         xfs_bmbt_ptr_t          *pp, *thispa;   /* pointer to block address */
6058         xfs_bmbt_key_t          *prevp, *keyp;
6059
6060         ASSERT(be16_to_cpu(block->bb_level) > 0);
6061
6062         prevp = NULL;
6063         for( i = 1; i <= be16_to_cpu(block->bb_numrecs); i++) {
6064                 dmxr = mp->m_bmap_dmxr[0];
6065
6066                 if (root) {
6067                         keyp = XFS_BMAP_BROOT_KEY_ADDR(block, i, sz);
6068                 } else {
6069                         keyp = XFS_BTREE_KEY_ADDR(mp->m_sb.sb_blocksize,
6070                                 xfs_bmbt, block, i, dmxr);
6071                 }
6072
6073                 if (prevp) {
6074                         xfs_btree_check_key(XFS_BTNUM_BMAP, prevp, keyp);
6075                 }
6076                 prevp = keyp;
6077
6078                 /*
6079                  * Compare the block numbers to see if there are dups.
6080                  */
6081
6082                 if (root) {
6083                         pp = XFS_BMAP_BROOT_PTR_ADDR(block, i, sz);
6084                 } else {
6085                         pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
6086                                 xfs_bmbt, block, i, dmxr);
6087                 }
6088                 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
6089                         if (root) {
6090                                 thispa = XFS_BMAP_BROOT_PTR_ADDR(block, j, sz);
6091                         } else {
6092                                 thispa = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
6093                                         xfs_bmbt, block, j, dmxr);
6094                         }
6095                         if (INT_GET(*thispa, ARCH_CONVERT) ==
6096                             INT_GET(*pp, ARCH_CONVERT)) {
6097                                 cmn_err(CE_WARN, "%s: thispa(%d) == pp(%d) %Ld",
6098                                         __FUNCTION__, j, i,
6099                                         INT_GET(*thispa, ARCH_CONVERT));
6100                                 panic("%s: ptrs are equal in node\n",
6101                                         __FUNCTION__);
6102                         }
6103                 }
6104         }
6105 }
6106
6107 /*
6108  * Check that the extents for the inode ip are in the right order in all
6109  * btree leaves.
6110  */
6111
6112 STATIC void
6113 xfs_bmap_check_leaf_extents(
6114         xfs_btree_cur_t         *cur,   /* btree cursor or null */
6115         xfs_inode_t             *ip,            /* incore inode pointer */
6116         int                     whichfork)      /* data or attr fork */
6117 {
6118         xfs_bmbt_block_t        *block; /* current btree block */
6119         xfs_fsblock_t           bno;    /* block # of "block" */
6120         xfs_buf_t               *bp;    /* buffer for "block" */
6121         int                     error;  /* error return value */
6122         xfs_extnum_t            i=0;    /* index into the extents list */
6123         xfs_ifork_t             *ifp;   /* fork structure */
6124         int                     level;  /* btree level, for checking */
6125         xfs_mount_t             *mp;    /* file system mount structure */
6126         xfs_bmbt_ptr_t          *pp;    /* pointer to block address */
6127         xfs_bmbt_rec_t          *ep, *lastp;    /* extent pointers in block entry */
6128         int                     bp_release = 0;
6129
6130         if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
6131                 return;
6132         }
6133
6134         bno = NULLFSBLOCK;
6135         mp = ip->i_mount;
6136         ifp = XFS_IFORK_PTR(ip, whichfork);
6137         block = ifp->if_broot;
6138         /*
6139          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
6140          */
6141         level = be16_to_cpu(block->bb_level);
6142         ASSERT(level > 0);
6143         xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
6144         pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
6145         ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
6146         ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
6147         ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks);
6148         bno = INT_GET(*pp, ARCH_CONVERT);
6149         /*
6150          * Go down the tree until leaf level is reached, following the first
6151          * pointer (leftmost) at each level.
6152          */
6153         while (level-- > 0) {
6154                 /* See if buf is in cur first */
6155                 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
6156                 if (bp) {
6157                         bp_release = 0;
6158                 } else {
6159                         bp_release = 1;
6160                 }
6161                 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
6162                                 XFS_BMAP_BTREE_REF)))
6163                         goto error_norelse;
6164                 block = XFS_BUF_TO_BMBT_BLOCK(bp);
6165                 XFS_WANT_CORRUPTED_GOTO(
6166                         XFS_BMAP_SANITY_CHECK(mp, block, level),
6167                         error0);
6168                 if (level == 0)
6169                         break;
6170
6171                 /*
6172                  * Check this block for basic sanity (increasing keys and
6173                  * no duplicate blocks).
6174                  */
6175
6176                 xfs_check_block(block, mp, 0, 0);
6177                 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt, block,
6178                         1, mp->m_bmap_dmxr[1]);
6179                 XFS_WANT_CORRUPTED_GOTO(XFS_FSB_SANITY_CHECK(mp, INT_GET(*pp, ARCH_CONVERT)), error0);
6180                 bno = INT_GET(*pp, ARCH_CONVERT);
6181                 if (bp_release) {
6182                         bp_release = 0;
6183                         xfs_trans_brelse(NULL, bp);
6184                 }
6185         }
6186
6187         /*
6188          * Here with bp and block set to the leftmost leaf node in the tree.
6189          */
6190         i = 0;
6191
6192         /*
6193          * Loop over all leaf nodes checking that all extents are in the right order.
6194          */
6195         lastp = NULL;
6196         for (;;) {
6197                 xfs_bmbt_rec_t  *frp;
6198                 xfs_fsblock_t   nextbno;
6199                 xfs_extnum_t    num_recs;
6200
6201
6202                 num_recs = be16_to_cpu(block->bb_numrecs);
6203
6204                 /*
6205                  * Read-ahead the next leaf block, if any.
6206                  */
6207
6208                 nextbno = be64_to_cpu(block->bb_rightsib);
6209
6210                 /*
6211                  * Check all the extents to make sure they are OK.
6212                  * If we had a previous block, the last entry should
6213                  * conform with the first entry in this one.
6214                  */
6215
6216                 frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize, xfs_bmbt,
6217                         block, 1, mp->m_bmap_dmxr[0]);
6218
6219                 for (ep = frp;ep < frp + (num_recs - 1); ep++) {
6220                         if (lastp) {
6221                                 xfs_btree_check_rec(XFS_BTNUM_BMAP,
6222                                         (void *)lastp, (void *)ep);
6223                         }
6224                         xfs_btree_check_rec(XFS_BTNUM_BMAP, (void *)ep,
6225                                 (void *)(ep + 1));
6226                 }
6227                 lastp = frp + num_recs - 1; /* For the next iteration */
6228
6229                 i += num_recs;
6230                 if (bp_release) {
6231                         bp_release = 0;
6232                         xfs_trans_brelse(NULL, bp);
6233                 }
6234                 bno = nextbno;
6235                 /*
6236                  * If we've reached the end, stop.
6237                  */
6238                 if (bno == NULLFSBLOCK)
6239                         break;
6240
6241                 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
6242                 if (bp) {
6243                         bp_release = 0;
6244                 } else {
6245                         bp_release = 1;
6246                 }
6247                 if (!bp && (error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
6248                                 XFS_BMAP_BTREE_REF)))
6249                         goto error_norelse;
6250                 block = XFS_BUF_TO_BMBT_BLOCK(bp);
6251         }
6252         if (bp_release) {
6253                 bp_release = 0;
6254                 xfs_trans_brelse(NULL, bp);
6255         }
6256         return;
6257
6258 error0:
6259         cmn_err(CE_WARN, "%s: at error0", __FUNCTION__);
6260         if (bp_release)
6261                 xfs_trans_brelse(NULL, bp);
6262 error_norelse:
6263         cmn_err(CE_WARN, "%s: BAD after btree leaves for %d extents",
6264                 __FUNCTION__, i);
6265         panic("%s: CORRUPTED BTREE OR SOMETHING", __FUNCTION__);
6266         return;
6267 }
6268 #endif
6269
6270 /*
6271  * Count fsblocks of the given fork.
6272  */
6273 int                                             /* error */
6274 xfs_bmap_count_blocks(
6275         xfs_trans_t             *tp,            /* transaction pointer */
6276         xfs_inode_t             *ip,            /* incore inode */
6277         int                     whichfork,      /* data or attr fork */
6278         int                     *count)         /* out: count of blocks */
6279 {
6280         xfs_bmbt_block_t        *block; /* current btree block */
6281         xfs_fsblock_t           bno;    /* block # of "block" */
6282         xfs_ifork_t             *ifp;   /* fork structure */
6283         int                     level;  /* btree level, for checking */
6284         xfs_mount_t             *mp;    /* file system mount structure */
6285         xfs_bmbt_ptr_t          *pp;    /* pointer to block address */
6286
6287         bno = NULLFSBLOCK;
6288         mp = ip->i_mount;
6289         ifp = XFS_IFORK_PTR(ip, whichfork);
6290         if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
6291                 if (unlikely(xfs_bmap_count_leaves(ifp->if_u1.if_extents,
6292                         ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
6293                         count) < 0)) {
6294                         XFS_ERROR_REPORT("xfs_bmap_count_blocks(1)",
6295                                          XFS_ERRLEVEL_LOW, mp);
6296                         return XFS_ERROR(EFSCORRUPTED);
6297                 }
6298                 return 0;
6299         }
6300
6301         /*
6302          * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
6303          */
6304         block = ifp->if_broot;
6305         level = be16_to_cpu(block->bb_level);
6306         ASSERT(level > 0);
6307         pp = XFS_BMAP_BROOT_PTR_ADDR(block, 1, ifp->if_broot_bytes);
6308         ASSERT(INT_GET(*pp, ARCH_CONVERT) != NULLDFSBNO);
6309         ASSERT(XFS_FSB_TO_AGNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agcount);
6310         ASSERT(XFS_FSB_TO_AGBNO(mp, INT_GET(*pp, ARCH_CONVERT)) < mp->m_sb.sb_agblocks);
6311         bno = INT_GET(*pp, ARCH_CONVERT);
6312
6313         if (unlikely(xfs_bmap_count_tree(mp, tp, bno, level, count) < 0)) {
6314                 XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
6315                                  mp);
6316                 return XFS_ERROR(EFSCORRUPTED);
6317         }
6318
6319         return 0;
6320 }
6321
6322 /*
6323  * Recursively walks each level of a btree
6324  * to count total fsblocks is use.
6325  */
6326 int                                     /* error */
6327 xfs_bmap_count_tree(
6328         xfs_mount_t     *mp,            /* file system mount point */
6329         xfs_trans_t     *tp,            /* transaction pointer */
6330         xfs_fsblock_t   blockno,        /* file system block number */
6331         int             levelin,        /* level in btree */
6332         int             *count)         /* Count of blocks */
6333 {
6334         int                     error;
6335         xfs_buf_t               *bp, *nbp;
6336         int                     level = levelin;
6337         xfs_bmbt_ptr_t          *pp;
6338         xfs_fsblock_t           bno = blockno;
6339         xfs_fsblock_t           nextbno;
6340         xfs_bmbt_block_t        *block, *nextblock;
6341         int                     numrecs;
6342         xfs_bmbt_rec_t          *frp;
6343
6344         if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF)))
6345                 return error;
6346         *count += 1;
6347         block = XFS_BUF_TO_BMBT_BLOCK(bp);
6348
6349         if (--level) {
6350                 /* Not at node above leafs, count this level of nodes */
6351                 nextbno = be64_to_cpu(block->bb_rightsib);
6352                 while (nextbno != NULLFSBLOCK) {
6353                         if ((error = xfs_btree_read_bufl(mp, tp, nextbno,
6354                                 0, &nbp, XFS_BMAP_BTREE_REF)))
6355                                 return error;
6356                         *count += 1;
6357                         nextblock = XFS_BUF_TO_BMBT_BLOCK(nbp);
6358                         nextbno = be64_to_cpu(nextblock->bb_rightsib);
6359                         xfs_trans_brelse(tp, nbp);
6360                 }
6361
6362                 /* Dive to the next level */
6363                 pp = XFS_BTREE_PTR_ADDR(mp->m_sb.sb_blocksize,
6364                         xfs_bmbt, block, 1, mp->m_bmap_dmxr[1]);
6365                 bno = INT_GET(*pp, ARCH_CONVERT);
6366                 if (unlikely((error =
6367                      xfs_bmap_count_tree(mp, tp, bno, level, count)) < 0)) {
6368                         xfs_trans_brelse(tp, bp);
6369                         XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
6370                                          XFS_ERRLEVEL_LOW, mp);
6371                         return XFS_ERROR(EFSCORRUPTED);
6372                 }
6373                 xfs_trans_brelse(tp, bp);
6374         } else {
6375                 /* count all level 1 nodes and their leaves */
6376                 for (;;) {
6377                         nextbno = be64_to_cpu(block->bb_rightsib);
6378                         numrecs = be16_to_cpu(block->bb_numrecs);
6379                         frp = XFS_BTREE_REC_ADDR(mp->m_sb.sb_blocksize,
6380                                 xfs_bmbt, block, 1, mp->m_bmap_dmxr[0]);
6381                         if (unlikely(xfs_bmap_disk_count_leaves(frp, numrecs, count) < 0)) {
6382                                 xfs_trans_brelse(tp, bp);
6383                                 XFS_ERROR_REPORT("xfs_bmap_count_tree(2)",
6384                                                  XFS_ERRLEVEL_LOW, mp);
6385                                 return XFS_ERROR(EFSCORRUPTED);
6386                         }
6387                         xfs_trans_brelse(tp, bp);
6388                         if (nextbno == NULLFSBLOCK)
6389                                 break;
6390                         bno = nextbno;
6391                         if ((error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
6392                                 XFS_BMAP_BTREE_REF)))
6393                                 return error;
6394                         *count += 1;
6395                         block = XFS_BUF_TO_BMBT_BLOCK(bp);
6396                 }
6397         }
6398         return 0;
6399 }
6400
6401 /*
6402  * Count leaf blocks given a pointer to an extent list.
6403  */
6404 int
6405 xfs_bmap_count_leaves(
6406         xfs_bmbt_rec_t          *frp,
6407         int                     numrecs,
6408         int                     *count)
6409 {
6410         int             b;
6411
6412         for ( b = 1; b <= numrecs; b++, frp++)
6413                 *count += xfs_bmbt_get_blockcount(frp);
6414         return 0;
6415 }
6416
6417 /*
6418  * Count leaf blocks given a pointer to an extent list originally in btree format.
6419  */
6420 int
6421 xfs_bmap_disk_count_leaves(
6422         xfs_bmbt_rec_t          *frp,
6423         int                     numrecs,
6424         int                     *count)
6425 {
6426         int             b;
6427
6428         for ( b = 1; b <= numrecs; b++, frp++)
6429                 *count += xfs_bmbt_disk_get_blockcount(frp);
6430         return 0;
6431 }