5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
23 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
24 * block boundaries (which is not actually allowed)
25 * 12/20/98 added support for strategy 4096
26 * 03/07/99 rewrote udf_block_map (again)
27 * New funcs, inode_bmap, udf_next_aext
28 * 04/19/99 Support for writing device EA's for major/minor #
33 #include <linux/smp_lock.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
43 MODULE_AUTHOR("Ben Fennema");
44 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45 MODULE_LICENSE("GPL");
47 #define EXTENT_MERGE_SIZE 5
49 static mode_t udf_convert_permissions(struct fileEntry *);
50 static int udf_update_inode(struct inode *, int);
51 static void udf_fill_inode(struct inode *, struct buffer_head *);
52 static struct buffer_head *inode_getblk(struct inode *, long, int *,
54 static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
55 kernel_lb_addr, uint32_t, struct buffer_head *);
56 static void udf_split_extents(struct inode *, int *, int, int,
57 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
58 static void udf_prealloc_extents(struct inode *, int, int,
59 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
60 static void udf_merge_extents(struct inode *,
61 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
62 static void udf_update_extents(struct inode *,
63 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
64 kernel_lb_addr, uint32_t, struct buffer_head **);
65 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
71 * Clean-up before the specified inode is destroyed.
74 * This routine is called when the kernel destroys an inode structure
75 * ie. when iput() finds i_count == 0.
78 * July 1, 1997 - Andrew E. Mileski
79 * Written, tested, and released.
81 * Called at the last iput() if i_nlink is zero.
83 void udf_delete_inode(struct inode * inode)
85 truncate_inode_pages(&inode->i_data, 0);
87 if (is_bad_inode(inode))
94 udf_update_inode(inode, IS_SYNC(inode));
95 udf_free_inode(inode);
103 void udf_clear_inode(struct inode *inode)
105 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
107 udf_discard_prealloc(inode);
111 kfree(UDF_I_DATA(inode));
112 UDF_I_DATA(inode) = NULL;
115 static int udf_writepage(struct page *page, struct writeback_control *wbc)
117 return block_write_full_page(page, udf_get_block, wbc);
120 static int udf_readpage(struct file *file, struct page *page)
122 return block_read_full_page(page, udf_get_block);
125 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
127 return block_prepare_write(page, from, to, udf_get_block);
130 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
132 return generic_block_bmap(mapping,block,udf_get_block);
135 const struct address_space_operations udf_aops = {
136 .readpage = udf_readpage,
137 .writepage = udf_writepage,
138 .sync_page = block_sync_page,
139 .prepare_write = udf_prepare_write,
140 .commit_write = generic_commit_write,
144 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
148 struct writeback_control udf_wbc = {
149 .sync_mode = WB_SYNC_NONE,
153 /* from now on we have normal address_space methods */
154 inode->i_data.a_ops = &udf_aops;
156 if (!UDF_I_LENALLOC(inode))
158 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
159 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
161 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
162 mark_inode_dirty(inode);
166 page = grab_cache_page(inode->i_mapping, 0);
167 BUG_ON(!PageLocked(page));
169 if (!PageUptodate(page))
172 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
173 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
174 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
175 UDF_I_LENALLOC(inode));
176 flush_dcache_page(page);
177 SetPageUptodate(page);
180 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
181 UDF_I_LENALLOC(inode));
182 UDF_I_LENALLOC(inode) = 0;
183 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
184 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
186 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
188 inode->i_data.a_ops->writepage(page, &udf_wbc);
189 page_cache_release(page);
191 mark_inode_dirty(inode);
194 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
197 struct buffer_head *sbh = NULL, *dbh = NULL;
198 kernel_lb_addr bloc, eloc;
199 uint32_t elen, extoffset;
202 struct udf_fileident_bh sfibh, dfibh;
203 loff_t f_pos = udf_ext0_offset(inode) >> 2;
204 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
205 struct fileIdentDesc cfi, *sfi, *dfi;
207 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
208 alloctype = ICBTAG_FLAG_AD_SHORT;
210 alloctype = ICBTAG_FLAG_AD_LONG;
214 UDF_I_ALLOCTYPE(inode) = alloctype;
215 mark_inode_dirty(inode);
219 /* alloc block, and copy data to it */
220 *block = udf_new_block(inode->i_sb, inode,
221 UDF_I_LOCATION(inode).partitionReferenceNum,
222 UDF_I_LOCATION(inode).logicalBlockNum, err);
226 newblock = udf_get_pblock(inode->i_sb, *block,
227 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
230 dbh = udf_tgetblk(inode->i_sb, newblock);
234 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
235 set_buffer_uptodate(dbh);
237 mark_buffer_dirty_inode(dbh, inode);
239 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
240 sbh = sfibh.sbh = sfibh.ebh = NULL;
241 dfibh.soffset = dfibh.eoffset = 0;
242 dfibh.sbh = dfibh.ebh = dbh;
243 while ( (f_pos < size) )
245 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
246 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
249 udf_release_data(dbh);
252 UDF_I_ALLOCTYPE(inode) = alloctype;
253 sfi->descTag.tagLocation = cpu_to_le32(*block);
254 dfibh.soffset = dfibh.eoffset;
255 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
256 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
257 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
258 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
260 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
261 udf_release_data(dbh);
265 mark_buffer_dirty_inode(dbh, inode);
267 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
268 UDF_I_LENALLOC(inode) = 0;
269 bloc = UDF_I_LOCATION(inode);
270 eloc.logicalBlockNum = *block;
271 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
272 elen = inode->i_size;
273 UDF_I_LENEXTENTS(inode) = elen;
274 extoffset = udf_file_entry_alloc_offset(inode);
275 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
278 udf_release_data(sbh);
279 mark_inode_dirty(inode);
283 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
286 struct buffer_head *bh;
291 phys = udf_block_map(inode, block);
293 map_bh(bh_result, inode->i_sb, phys);
306 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
308 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
309 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
314 bh = inode_getblk(inode, block, &err, &phys, &new);
321 set_buffer_new(bh_result);
322 map_bh(bh_result, inode->i_sb, phys);
328 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
332 static struct buffer_head *
333 udf_getblk(struct inode *inode, long block, int create, int *err)
335 struct buffer_head dummy;
338 dummy.b_blocknr = -1000;
339 *err = udf_get_block(inode, block, &dummy, create);
340 if (!*err && buffer_mapped(&dummy))
342 struct buffer_head *bh;
343 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
344 if (buffer_new(&dummy))
347 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
348 set_buffer_uptodate(bh);
350 mark_buffer_dirty_inode(bh, inode);
357 static struct buffer_head * inode_getblk(struct inode * inode, long block,
358 int *err, long *phys, int *new)
360 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
361 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
362 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
363 int count = 0, startnum = 0, endnum = 0;
365 kernel_lb_addr eloc, pbloc, cbloc, nbloc;
367 uint64_t lbcount = 0, b_off = 0;
368 uint32_t newblocknum, newblock, offset = 0;
370 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
373 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
374 b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
375 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
377 /* find the extent which contains the block we are looking for.
378 alternate between laarr[0] and laarr[1] for locations of the
379 current extent, and the previous extent */
384 udf_release_data(pbh);
385 atomic_inc(&cbh->b_count);
390 udf_release_data(cbh);
391 atomic_inc(&nbh->b_count);
400 pextoffset = cextoffset;
401 cextoffset = nextoffset;
403 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
408 laarr[c].extLength = (etype << 30) | elen;
409 laarr[c].extLocation = eloc;
411 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
412 pgoal = eloc.logicalBlockNum +
413 ((elen + inode->i_sb->s_blocksize - 1) >>
414 inode->i_sb->s_blocksize_bits);
417 } while (lbcount + elen <= b_off);
420 offset = b_off >> inode->i_sb->s_blocksize_bits;
422 /* if the extent is allocated and recorded, return the block
423 if the extent is not a multiple of the blocksize, round up */
425 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
427 if (elen & (inode->i_sb->s_blocksize - 1))
429 elen = EXT_RECORDED_ALLOCATED |
430 ((elen + inode->i_sb->s_blocksize - 1) &
431 ~(inode->i_sb->s_blocksize - 1));
432 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
434 udf_release_data(pbh);
435 udf_release_data(cbh);
436 udf_release_data(nbh);
437 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
444 endnum = startnum = ((count > 1) ? 1 : count);
445 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
448 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
449 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
450 inode->i_sb->s_blocksize - 1) &
451 ~(inode->i_sb->s_blocksize - 1));
452 UDF_I_LENEXTENTS(inode) =
453 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
454 ~(inode->i_sb->s_blocksize - 1);
457 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
458 ((offset + 1) << inode->i_sb->s_blocksize_bits);
459 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
465 endnum = startnum = ((count > 2) ? 2 : count);
467 /* if the current extent is in position 0, swap it with the previous */
468 if (!c && count != 1)
476 /* if the current block is located in a extent, read the next extent */
479 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
481 laarr[c+1].extLength = (etype << 30) | elen;
482 laarr[c+1].extLocation = eloc;
490 udf_release_data(cbh);
491 udf_release_data(nbh);
493 /* if the current extent is not recorded but allocated, get the
494 block in the extent corresponding to the requested block */
495 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
496 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
497 else /* otherwise, allocate a new block */
499 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
500 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
505 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
508 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
509 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
511 udf_release_data(pbh);
515 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
518 /* if the extent the requsted block is located in contains multiple blocks,
519 split the extent into at most three extents. blocks prior to requested
520 block, requested block, and blocks after requested block */
521 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
523 #ifdef UDF_PREALLOCATE
524 /* preallocate blocks */
525 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
528 /* merge any continuous blocks in laarr */
529 udf_merge_extents(inode, laarr, &endnum);
531 /* write back the new extents, inserting new extents if the new number
532 of extents is greater than the old number, and deleting extents if
533 the new number of extents is less than the old number */
534 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
536 udf_release_data(pbh);
538 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
539 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
546 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
547 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
548 inode->i_ctime = current_fs_time(inode->i_sb);
551 udf_sync_inode(inode);
553 mark_inode_dirty(inode);
557 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
558 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
560 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
561 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
564 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
565 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
566 int8_t etype = (laarr[curr].extLength >> 30);
570 else if (!offset || blen == offset + 1)
572 laarr[curr+2] = laarr[curr+1];
573 laarr[curr+1] = laarr[curr];
577 laarr[curr+3] = laarr[curr+1];
578 laarr[curr+2] = laarr[curr+1] = laarr[curr];
583 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
585 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
586 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
587 (offset << inode->i_sb->s_blocksize_bits);
588 laarr[curr].extLocation.logicalBlockNum = 0;
589 laarr[curr].extLocation.partitionReferenceNum = 0;
592 laarr[curr].extLength = (etype << 30) |
593 (offset << inode->i_sb->s_blocksize_bits);
599 laarr[curr].extLocation.logicalBlockNum = newblocknum;
600 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
601 laarr[curr].extLocation.partitionReferenceNum =
602 UDF_I_LOCATION(inode).partitionReferenceNum;
603 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
604 inode->i_sb->s_blocksize;
607 if (blen != offset + 1)
609 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
610 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
611 laarr[curr].extLength = (etype << 30) |
612 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
619 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
620 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
622 int start, length = 0, currlength = 0, i;
624 if (*endnum >= (c+1))
633 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
636 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
637 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
643 for (i=start+1; i<=*endnum; i++)
648 length += UDF_DEFAULT_PREALLOC_BLOCKS;
650 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
651 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
652 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
659 int next = laarr[start].extLocation.logicalBlockNum +
660 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
661 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
662 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
663 laarr[start].extLocation.partitionReferenceNum,
664 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
665 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
670 laarr[start].extLength +=
671 (numalloc << inode->i_sb->s_blocksize_bits);
674 memmove(&laarr[c+2], &laarr[c+1],
675 sizeof(long_ad) * (*endnum - (c+1)));
677 laarr[c+1].extLocation.logicalBlockNum = next;
678 laarr[c+1].extLocation.partitionReferenceNum =
679 laarr[c].extLocation.partitionReferenceNum;
680 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
681 (numalloc << inode->i_sb->s_blocksize_bits);
685 for (i=start+1; numalloc && i<*endnum; i++)
687 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
688 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
692 laarr[i].extLength -=
693 (numalloc << inode->i_sb->s_blocksize_bits);
700 memmove(&laarr[i], &laarr[i+1],
701 sizeof(long_ad) * (*endnum - (i+1)));
706 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
711 static void udf_merge_extents(struct inode *inode,
712 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
716 for (i=0; i<(*endnum-1); i++)
718 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
720 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
721 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
722 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
723 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
725 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
726 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
727 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
729 laarr[i+1].extLength = (laarr[i+1].extLength -
730 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
731 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
732 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
733 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
734 laarr[i+1].extLocation.logicalBlockNum =
735 laarr[i].extLocation.logicalBlockNum +
736 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
737 inode->i_sb->s_blocksize_bits);
741 laarr[i].extLength = laarr[i+1].extLength +
742 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
743 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
745 memmove(&laarr[i+1], &laarr[i+2],
746 sizeof(long_ad) * (*endnum - (i+2)));
752 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
753 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
755 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
756 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
757 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
758 laarr[i].extLocation.logicalBlockNum = 0;
759 laarr[i].extLocation.partitionReferenceNum = 0;
761 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
762 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
763 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
765 laarr[i+1].extLength = (laarr[i+1].extLength -
766 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
767 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
768 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
769 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
773 laarr[i].extLength = laarr[i+1].extLength +
774 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
775 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
777 memmove(&laarr[i+1], &laarr[i+2],
778 sizeof(long_ad) * (*endnum - (i+2)));
783 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
785 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
786 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
787 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
788 laarr[i].extLocation.logicalBlockNum = 0;
789 laarr[i].extLocation.partitionReferenceNum = 0;
790 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
791 EXT_NOT_RECORDED_NOT_ALLOCATED;
796 static void udf_update_extents(struct inode *inode,
797 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
798 kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
801 kernel_lb_addr tmploc;
804 if (startnum > endnum)
806 for (i=0; i<(startnum-endnum); i++)
808 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
809 laarr[i].extLength, *pbh);
812 else if (startnum < endnum)
814 for (i=0; i<(endnum-startnum); i++)
816 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
817 laarr[i].extLength, *pbh);
818 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
819 &laarr[i].extLength, pbh, 1);
824 for (i=start; i<endnum; i++)
826 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
827 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
828 laarr[i].extLength, *pbh, 1);
832 struct buffer_head * udf_bread(struct inode * inode, int block,
833 int create, int * err)
835 struct buffer_head * bh = NULL;
837 bh = udf_getblk(inode, block, create, err);
841 if (buffer_uptodate(bh))
843 ll_rw_block(READ, 1, &bh);
845 if (buffer_uptodate(bh))
852 void udf_truncate(struct inode * inode)
857 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
858 S_ISLNK(inode->i_mode)))
860 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
864 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
866 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
869 udf_expand_file_adinicb(inode, inode->i_size, &err);
870 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
872 inode->i_size = UDF_I_LENALLOC(inode);
877 udf_truncate_extents(inode);
881 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
882 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
883 UDF_I_LENALLOC(inode) = inode->i_size;
888 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
889 udf_truncate_extents(inode);
892 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
894 udf_sync_inode (inode);
896 mark_inode_dirty(inode);
901 __udf_read_inode(struct inode *inode)
903 struct buffer_head *bh = NULL;
904 struct fileEntry *fe;
908 * Set defaults, but the inode is still incomplete!
909 * Note: get_new_inode() sets the following on a new inode:
912 * i_flags = sb->s_flags
914 * clean_inode(): zero fills and sets
919 inode->i_blksize = PAGE_SIZE;
921 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
925 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
927 make_bad_inode(inode);
931 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
932 ident != TAG_IDENT_USE)
934 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
935 inode->i_ino, ident);
936 udf_release_data(bh);
937 make_bad_inode(inode);
941 fe = (struct fileEntry *)bh->b_data;
943 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
945 struct buffer_head *ibh = NULL, *nbh = NULL;
946 struct indirectEntry *ie;
948 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
949 if (ident == TAG_IDENT_IE)
954 ie = (struct indirectEntry *)ibh->b_data;
956 loc = lelb_to_cpu(ie->indirectICB.extLocation);
958 if (ie->indirectICB.extLength &&
959 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
961 if (ident == TAG_IDENT_FE ||
962 ident == TAG_IDENT_EFE)
964 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
965 udf_release_data(bh);
966 udf_release_data(ibh);
967 udf_release_data(nbh);
968 __udf_read_inode(inode);
973 udf_release_data(nbh);
974 udf_release_data(ibh);
978 udf_release_data(ibh);
982 udf_release_data(ibh);
984 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
986 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
987 le16_to_cpu(fe->icbTag.strategyType));
988 udf_release_data(bh);
989 make_bad_inode(inode);
992 udf_fill_inode(inode, bh);
993 udf_release_data(bh);
996 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
998 struct fileEntry *fe;
999 struct extendedFileEntry *efe;
1004 fe = (struct fileEntry *)bh->b_data;
1005 efe = (struct extendedFileEntry *)bh->b_data;
1007 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1008 UDF_I_STRAT4096(inode) = 0;
1009 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1010 UDF_I_STRAT4096(inode) = 1;
1012 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1013 UDF_I_UNIQUE(inode) = 0;
1014 UDF_I_LENEATTR(inode) = 0;
1015 UDF_I_LENEXTENTS(inode) = 0;
1016 UDF_I_LENALLOC(inode) = 0;
1017 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1018 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1019 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1021 UDF_I_EFE(inode) = 1;
1022 UDF_I_USE(inode) = 0;
1023 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1024 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1026 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1028 UDF_I_EFE(inode) = 0;
1029 UDF_I_USE(inode) = 0;
1030 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1031 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1033 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1035 UDF_I_EFE(inode) = 0;
1036 UDF_I_USE(inode) = 1;
1037 UDF_I_LENALLOC(inode) =
1039 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1040 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1041 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1045 inode->i_uid = le32_to_cpu(fe->uid);
1046 if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1047 UDF_FLAG_UID_IGNORE))
1048 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1050 inode->i_gid = le32_to_cpu(fe->gid);
1051 if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1052 UDF_FLAG_GID_IGNORE))
1053 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1055 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1056 if (!inode->i_nlink)
1059 inode->i_size = le64_to_cpu(fe->informationLength);
1060 UDF_I_LENEXTENTS(inode) = inode->i_size;
1062 inode->i_mode = udf_convert_permissions(fe);
1063 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1065 if (UDF_I_EFE(inode) == 0)
1067 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1068 (inode->i_sb->s_blocksize_bits - 9);
1070 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1071 lets_to_cpu(fe->accessTime)) )
1073 inode->i_atime.tv_sec = convtime;
1074 inode->i_atime.tv_nsec = convtime_usec * 1000;
1078 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1081 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1082 lets_to_cpu(fe->modificationTime)) )
1084 inode->i_mtime.tv_sec = convtime;
1085 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1089 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1092 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1093 lets_to_cpu(fe->attrTime)) )
1095 inode->i_ctime.tv_sec = convtime;
1096 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1100 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1103 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1104 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1105 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1106 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1110 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1111 (inode->i_sb->s_blocksize_bits - 9);
1113 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1114 lets_to_cpu(efe->accessTime)) )
1116 inode->i_atime.tv_sec = convtime;
1117 inode->i_atime.tv_nsec = convtime_usec * 1000;
1121 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1124 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1125 lets_to_cpu(efe->modificationTime)) )
1127 inode->i_mtime.tv_sec = convtime;
1128 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1132 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1135 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1136 lets_to_cpu(efe->createTime)) )
1138 UDF_I_CRTIME(inode).tv_sec = convtime;
1139 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1143 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1146 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1147 lets_to_cpu(efe->attrTime)) )
1149 inode->i_ctime.tv_sec = convtime;
1150 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1154 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1157 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1158 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1159 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1160 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1163 switch (fe->icbTag.fileType)
1165 case ICBTAG_FILE_TYPE_DIRECTORY:
1167 inode->i_op = &udf_dir_inode_operations;
1168 inode->i_fop = &udf_dir_operations;
1169 inode->i_mode |= S_IFDIR;
1173 case ICBTAG_FILE_TYPE_REALTIME:
1174 case ICBTAG_FILE_TYPE_REGULAR:
1175 case ICBTAG_FILE_TYPE_UNDEF:
1177 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1178 inode->i_data.a_ops = &udf_adinicb_aops;
1180 inode->i_data.a_ops = &udf_aops;
1181 inode->i_op = &udf_file_inode_operations;
1182 inode->i_fop = &udf_file_operations;
1183 inode->i_mode |= S_IFREG;
1186 case ICBTAG_FILE_TYPE_BLOCK:
1188 inode->i_mode |= S_IFBLK;
1191 case ICBTAG_FILE_TYPE_CHAR:
1193 inode->i_mode |= S_IFCHR;
1196 case ICBTAG_FILE_TYPE_FIFO:
1198 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1201 case ICBTAG_FILE_TYPE_SOCKET:
1203 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1206 case ICBTAG_FILE_TYPE_SYMLINK:
1208 inode->i_data.a_ops = &udf_symlink_aops;
1209 inode->i_op = &page_symlink_inode_operations;
1210 inode->i_mode = S_IFLNK|S_IRWXUGO;
1215 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1216 inode->i_ino, fe->icbTag.fileType);
1217 make_bad_inode(inode);
1221 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1223 struct deviceSpec *dsea =
1224 (struct deviceSpec *)
1225 udf_get_extendedattr(inode, 12, 1);
1229 init_special_inode(inode, inode->i_mode, MKDEV(
1230 le32_to_cpu(dsea->majorDeviceIdent),
1231 le32_to_cpu(dsea->minorDeviceIdent)));
1232 /* Developer ID ??? */
1236 make_bad_inode(inode);
1242 udf_convert_permissions(struct fileEntry *fe)
1245 uint32_t permissions;
1248 permissions = le32_to_cpu(fe->permissions);
1249 flags = le16_to_cpu(fe->icbTag.flags);
1251 mode = (( permissions ) & S_IRWXO) |
1252 (( permissions >> 2 ) & S_IRWXG) |
1253 (( permissions >> 4 ) & S_IRWXU) |
1254 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1255 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1256 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1265 * Write out the specified inode.
1268 * This routine is called whenever an inode is synced.
1269 * Currently this routine is just a placeholder.
1272 * July 1, 1997 - Andrew E. Mileski
1273 * Written, tested, and released.
1276 int udf_write_inode(struct inode * inode, int sync)
1280 ret = udf_update_inode(inode, sync);
1285 int udf_sync_inode(struct inode * inode)
1287 return udf_update_inode(inode, 1);
1291 udf_update_inode(struct inode *inode, int do_sync)
1293 struct buffer_head *bh = NULL;
1294 struct fileEntry *fe;
1295 struct extendedFileEntry *efe;
1300 kernel_timestamp cpu_time;
1303 bh = udf_tread(inode->i_sb,
1304 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1308 udf_debug("bread failure\n");
1312 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1314 fe = (struct fileEntry *)bh->b_data;
1315 efe = (struct extendedFileEntry *)bh->b_data;
1317 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1319 struct unallocSpaceEntry *use =
1320 (struct unallocSpaceEntry *)bh->b_data;
1322 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1323 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1324 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1326 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1327 use->descTag.descCRCLength = cpu_to_le16(crclen);
1328 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1330 use->descTag.tagChecksum = 0;
1331 for (i=0; i<16; i++)
1333 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1335 mark_buffer_dirty(bh);
1336 udf_release_data(bh);
1340 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1341 fe->uid = cpu_to_le32(-1);
1342 else fe->uid = cpu_to_le32(inode->i_uid);
1344 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1345 fe->gid = cpu_to_le32(-1);
1346 else fe->gid = cpu_to_le32(inode->i_gid);
1348 udfperms = ((inode->i_mode & S_IRWXO) ) |
1349 ((inode->i_mode & S_IRWXG) << 2) |
1350 ((inode->i_mode & S_IRWXU) << 4);
1352 udfperms |= (le32_to_cpu(fe->permissions) &
1353 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1354 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1355 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1356 fe->permissions = cpu_to_le32(udfperms);
1358 if (S_ISDIR(inode->i_mode))
1359 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1361 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1363 fe->informationLength = cpu_to_le64(inode->i_size);
1365 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1368 struct deviceSpec *dsea =
1369 (struct deviceSpec *)
1370 udf_get_extendedattr(inode, 12, 1);
1374 dsea = (struct deviceSpec *)
1375 udf_add_extendedattr(inode,
1376 sizeof(struct deviceSpec) +
1377 sizeof(regid), 12, 0x3);
1378 dsea->attrType = cpu_to_le32(12);
1379 dsea->attrSubtype = 1;
1380 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1382 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1384 eid = (regid *)dsea->impUse;
1385 memset(eid, 0, sizeof(regid));
1386 strcpy(eid->ident, UDF_ID_DEVELOPER);
1387 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1388 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1389 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1390 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1393 if (UDF_I_EFE(inode) == 0)
1395 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1396 fe->logicalBlocksRecorded = cpu_to_le64(
1397 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1398 (inode->i_sb->s_blocksize_bits - 9));
1400 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1401 fe->accessTime = cpu_to_lets(cpu_time);
1402 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1403 fe->modificationTime = cpu_to_lets(cpu_time);
1404 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1405 fe->attrTime = cpu_to_lets(cpu_time);
1406 memset(&(fe->impIdent), 0, sizeof(regid));
1407 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1408 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1409 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1410 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1411 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1412 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1413 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1414 crclen = sizeof(struct fileEntry);
1418 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1419 efe->objectSize = cpu_to_le64(inode->i_size);
1420 efe->logicalBlocksRecorded = cpu_to_le64(
1421 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1422 (inode->i_sb->s_blocksize_bits - 9));
1424 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1425 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1426 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1428 UDF_I_CRTIME(inode) = inode->i_atime;
1430 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1431 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1432 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1434 UDF_I_CRTIME(inode) = inode->i_mtime;
1436 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1437 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1438 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1440 UDF_I_CRTIME(inode) = inode->i_ctime;
1443 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1444 efe->accessTime = cpu_to_lets(cpu_time);
1445 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1446 efe->modificationTime = cpu_to_lets(cpu_time);
1447 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1448 efe->createTime = cpu_to_lets(cpu_time);
1449 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1450 efe->attrTime = cpu_to_lets(cpu_time);
1452 memset(&(efe->impIdent), 0, sizeof(regid));
1453 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1454 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1455 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1456 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1457 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1458 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1459 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1460 crclen = sizeof(struct extendedFileEntry);
1462 if (UDF_I_STRAT4096(inode))
1464 fe->icbTag.strategyType = cpu_to_le16(4096);
1465 fe->icbTag.strategyParameter = cpu_to_le16(1);
1466 fe->icbTag.numEntries = cpu_to_le16(2);
1470 fe->icbTag.strategyType = cpu_to_le16(4);
1471 fe->icbTag.numEntries = cpu_to_le16(1);
1474 if (S_ISDIR(inode->i_mode))
1475 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1476 else if (S_ISREG(inode->i_mode))
1477 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1478 else if (S_ISLNK(inode->i_mode))
1479 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1480 else if (S_ISBLK(inode->i_mode))
1481 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1482 else if (S_ISCHR(inode->i_mode))
1483 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1484 else if (S_ISFIFO(inode->i_mode))
1485 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1486 else if (S_ISSOCK(inode->i_mode))
1487 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1489 icbflags = UDF_I_ALLOCTYPE(inode) |
1490 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1491 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1492 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1493 (le16_to_cpu(fe->icbTag.flags) &
1494 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1495 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1497 fe->icbTag.flags = cpu_to_le16(icbflags);
1498 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1499 fe->descTag.descVersion = cpu_to_le16(3);
1501 fe->descTag.descVersion = cpu_to_le16(2);
1502 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1503 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1504 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1505 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1506 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1508 fe->descTag.tagChecksum = 0;
1509 for (i=0; i<16; i++)
1511 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1513 /* write the data blocks */
1514 mark_buffer_dirty(bh);
1517 sync_dirty_buffer(bh);
1518 if (buffer_req(bh) && !buffer_uptodate(bh))
1520 printk("IO error syncing udf inode [%s:%08lx]\n",
1521 inode->i_sb->s_id, inode->i_ino);
1525 udf_release_data(bh);
1530 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1532 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1533 struct inode *inode = iget_locked(sb, block);
1538 if (inode->i_state & I_NEW) {
1539 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1540 __udf_read_inode(inode);
1541 unlock_new_inode(inode);
1544 if (is_bad_inode(inode))
1547 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1548 udf_debug("block=%d, partition=%d out of range\n",
1549 ino.logicalBlockNum, ino.partitionReferenceNum);
1550 make_bad_inode(inode);
1561 int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1562 kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1565 short_ad *sad = NULL;
1566 long_ad *lad = NULL;
1567 struct allocExtDesc *aed;
1572 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1574 ptr = (*bh)->b_data + *extoffset;
1576 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1577 adsize = sizeof(short_ad);
1578 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1579 adsize = sizeof(long_ad);
1583 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1586 struct buffer_head *nbh;
1588 kernel_lb_addr obloc = *bloc;
1590 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1591 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1595 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1601 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1602 set_buffer_uptodate(nbh);
1604 mark_buffer_dirty_inode(nbh, inode);
1606 aed = (struct allocExtDesc *)(nbh->b_data);
1607 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1608 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1609 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1611 loffset = *extoffset;
1612 aed->lengthAllocDescs = cpu_to_le32(adsize);
1613 sptr = ptr - adsize;
1614 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1615 memcpy(dptr, sptr, adsize);
1616 *extoffset = sizeof(struct allocExtDesc) + adsize;
1620 loffset = *extoffset + adsize;
1621 aed->lengthAllocDescs = cpu_to_le32(0);
1623 *extoffset = sizeof(struct allocExtDesc);
1627 aed = (struct allocExtDesc *)(*bh)->b_data;
1628 aed->lengthAllocDescs =
1629 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1633 UDF_I_LENALLOC(inode) += adsize;
1634 mark_inode_dirty(inode);
1637 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1638 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1639 bloc->logicalBlockNum, sizeof(tag));
1641 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1642 bloc->logicalBlockNum, sizeof(tag));
1643 switch (UDF_I_ALLOCTYPE(inode))
1645 case ICBTAG_FLAG_AD_SHORT:
1647 sad = (short_ad *)sptr;
1648 sad->extLength = cpu_to_le32(
1649 EXT_NEXT_EXTENT_ALLOCDECS |
1650 inode->i_sb->s_blocksize);
1651 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1654 case ICBTAG_FLAG_AD_LONG:
1656 lad = (long_ad *)sptr;
1657 lad->extLength = cpu_to_le32(
1658 EXT_NEXT_EXTENT_ALLOCDECS |
1659 inode->i_sb->s_blocksize);
1660 lad->extLocation = cpu_to_lelb(*bloc);
1661 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1667 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1668 udf_update_tag((*bh)->b_data, loffset);
1670 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1671 mark_buffer_dirty_inode(*bh, inode);
1672 udf_release_data(*bh);
1675 mark_inode_dirty(inode);
1679 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1683 UDF_I_LENALLOC(inode) += adsize;
1684 mark_inode_dirty(inode);
1688 aed = (struct allocExtDesc *)(*bh)->b_data;
1689 aed->lengthAllocDescs =
1690 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1691 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1692 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1694 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1695 mark_buffer_dirty_inode(*bh, inode);
1701 int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
1702 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1708 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1711 ptr = bh->b_data + *extoffset;
1712 atomic_inc(&bh->b_count);
1715 switch (UDF_I_ALLOCTYPE(inode))
1717 case ICBTAG_FLAG_AD_SHORT:
1719 short_ad *sad = (short_ad *)ptr;
1720 sad->extLength = cpu_to_le32(elen);
1721 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1722 adsize = sizeof(short_ad);
1725 case ICBTAG_FLAG_AD_LONG:
1727 long_ad *lad = (long_ad *)ptr;
1728 lad->extLength = cpu_to_le32(elen);
1729 lad->extLocation = cpu_to_lelb(eloc);
1730 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1731 adsize = sizeof(long_ad);
1740 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1742 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1743 udf_update_tag((bh)->b_data,
1744 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1746 mark_buffer_dirty_inode(bh, inode);
1747 udf_release_data(bh);
1750 mark_inode_dirty(inode);
1753 *extoffset += adsize;
1754 return (elen >> 30);
1757 int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1758 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1762 while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1763 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1766 *extoffset = sizeof(struct allocExtDesc);
1767 udf_release_data(*bh);
1768 if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1770 udf_debug("reading block %d failed!\n",
1771 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1779 int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1780 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1789 *extoffset = udf_file_entry_alloc_offset(inode);
1790 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1791 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1796 *extoffset = sizeof(struct allocExtDesc);
1797 ptr = (*bh)->b_data + *extoffset;
1798 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1801 switch (UDF_I_ALLOCTYPE(inode))
1803 case ICBTAG_FLAG_AD_SHORT:
1807 if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1810 etype = le32_to_cpu(sad->extLength) >> 30;
1811 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1812 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1813 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1816 case ICBTAG_FLAG_AD_LONG:
1820 if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1823 etype = le32_to_cpu(lad->extLength) >> 30;
1824 *eloc = lelb_to_cpu(lad->extLocation);
1825 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1830 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1839 udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
1840 kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1842 kernel_lb_addr oeloc;
1847 atomic_inc(&bh->b_count);
1849 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1851 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1854 nelen = (etype << 30) | oelen;
1856 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1857 udf_release_data(bh);
1858 return (nelen >> 30);
1861 int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
1862 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1864 struct buffer_head *obh;
1865 kernel_lb_addr obloc;
1866 int oextoffset, adsize;
1868 struct allocExtDesc *aed;
1872 atomic_inc(&nbh->b_count);
1873 atomic_inc(&nbh->b_count);
1876 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1877 adsize = sizeof(short_ad);
1878 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1879 adsize = sizeof(long_ad);
1885 oextoffset = nextoffset;
1887 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1890 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1892 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1896 udf_release_data(obh);
1897 atomic_inc(&nbh->b_count);
1899 oextoffset = nextoffset - adsize;
1902 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1907 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1908 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1909 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1912 UDF_I_LENALLOC(inode) -= (adsize * 2);
1913 mark_inode_dirty(inode);
1917 aed = (struct allocExtDesc *)(obh)->b_data;
1918 aed->lengthAllocDescs =
1919 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1920 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1921 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1923 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1924 mark_buffer_dirty_inode(obh, inode);
1929 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1932 UDF_I_LENALLOC(inode) -= adsize;
1933 mark_inode_dirty(inode);
1937 aed = (struct allocExtDesc *)(obh)->b_data;
1938 aed->lengthAllocDescs =
1939 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1940 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1941 udf_update_tag((obh)->b_data, oextoffset - adsize);
1943 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1944 mark_buffer_dirty_inode(obh, inode);
1948 udf_release_data(nbh);
1949 udf_release_data(obh);
1950 return (elen >> 30);
1953 int8_t inode_bmap(struct inode *inode, int block, kernel_lb_addr *bloc, uint32_t *extoffset,
1954 kernel_lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
1956 uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
1961 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1967 *bloc = UDF_I_LOCATION(inode);
1971 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1973 *offset = bcount - lbcount;
1974 UDF_I_LENEXTENTS(inode) = lbcount;
1978 } while (lbcount <= bcount);
1980 *offset = bcount + *elen - lbcount;
1985 long udf_block_map(struct inode *inode, long block)
1987 kernel_lb_addr eloc, bloc;
1988 uint32_t offset, extoffset, elen;
1989 struct buffer_head *bh = NULL;
1994 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
1995 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
2000 udf_release_data(bh);
2002 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2003 return udf_fixed_to_variable(ret);