5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
23 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
24 * block boundaries (which is not actually allowed)
25 * 12/20/98 added support for strategy 4096
26 * 03/07/99 rewrote udf_block_map (again)
27 * New funcs, inode_bmap, udf_next_aext
28 * 04/19/99 Support for writing device EA's for major/minor #
33 #include <linux/smp_lock.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
43 MODULE_AUTHOR("Ben Fennema");
44 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45 MODULE_LICENSE("GPL");
47 #define EXTENT_MERGE_SIZE 5
49 static mode_t udf_convert_permissions(struct fileEntry *);
50 static int udf_update_inode(struct inode *, int);
51 static void udf_fill_inode(struct inode *, struct buffer_head *);
52 static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
54 static int8_t udf_insert_aext(struct inode *, struct extent_position,
55 kernel_lb_addr, uint32_t);
56 static void udf_split_extents(struct inode *, int *, int, int,
57 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
58 static void udf_prealloc_extents(struct inode *, int, int,
59 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
60 static void udf_merge_extents(struct inode *,
61 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
62 static void udf_update_extents(struct inode *,
63 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
64 struct extent_position *);
65 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
71 * Clean-up before the specified inode is destroyed.
74 * This routine is called when the kernel destroys an inode structure
75 * ie. when iput() finds i_count == 0.
78 * July 1, 1997 - Andrew E. Mileski
79 * Written, tested, and released.
81 * Called at the last iput() if i_nlink is zero.
83 void udf_delete_inode(struct inode * inode)
85 truncate_inode_pages(&inode->i_data, 0);
87 if (is_bad_inode(inode))
94 udf_update_inode(inode, IS_SYNC(inode));
95 udf_free_inode(inode);
103 void udf_clear_inode(struct inode *inode)
105 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
107 udf_discard_prealloc(inode);
111 kfree(UDF_I_DATA(inode));
112 UDF_I_DATA(inode) = NULL;
115 static int udf_writepage(struct page *page, struct writeback_control *wbc)
117 return block_write_full_page(page, udf_get_block, wbc);
120 static int udf_readpage(struct file *file, struct page *page)
122 return block_read_full_page(page, udf_get_block);
125 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
127 return block_prepare_write(page, from, to, udf_get_block);
130 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
132 return generic_block_bmap(mapping,block,udf_get_block);
135 const struct address_space_operations udf_aops = {
136 .readpage = udf_readpage,
137 .writepage = udf_writepage,
138 .sync_page = block_sync_page,
139 .prepare_write = udf_prepare_write,
140 .commit_write = generic_commit_write,
144 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
148 struct writeback_control udf_wbc = {
149 .sync_mode = WB_SYNC_NONE,
153 /* from now on we have normal address_space methods */
154 inode->i_data.a_ops = &udf_aops;
156 if (!UDF_I_LENALLOC(inode))
158 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
159 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
161 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
162 mark_inode_dirty(inode);
166 page = grab_cache_page(inode->i_mapping, 0);
167 BUG_ON(!PageLocked(page));
169 if (!PageUptodate(page))
172 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
173 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
174 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
175 UDF_I_LENALLOC(inode));
176 flush_dcache_page(page);
177 SetPageUptodate(page);
180 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
181 UDF_I_LENALLOC(inode));
182 UDF_I_LENALLOC(inode) = 0;
183 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
184 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
186 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
188 inode->i_data.a_ops->writepage(page, &udf_wbc);
189 page_cache_release(page);
191 mark_inode_dirty(inode);
194 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
197 struct buffer_head *dbh = NULL;
201 struct extent_position epos;
203 struct udf_fileident_bh sfibh, dfibh;
204 loff_t f_pos = udf_ext0_offset(inode) >> 2;
205 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
206 struct fileIdentDesc cfi, *sfi, *dfi;
208 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
209 alloctype = ICBTAG_FLAG_AD_SHORT;
211 alloctype = ICBTAG_FLAG_AD_LONG;
215 UDF_I_ALLOCTYPE(inode) = alloctype;
216 mark_inode_dirty(inode);
220 /* alloc block, and copy data to it */
221 *block = udf_new_block(inode->i_sb, inode,
222 UDF_I_LOCATION(inode).partitionReferenceNum,
223 UDF_I_LOCATION(inode).logicalBlockNum, err);
227 newblock = udf_get_pblock(inode->i_sb, *block,
228 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
231 dbh = udf_tgetblk(inode->i_sb, newblock);
235 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
236 set_buffer_uptodate(dbh);
238 mark_buffer_dirty_inode(dbh, inode);
240 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
241 sfibh.sbh = sfibh.ebh = NULL;
242 dfibh.soffset = dfibh.eoffset = 0;
243 dfibh.sbh = dfibh.ebh = dbh;
244 while ( (f_pos < size) )
246 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
247 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL);
253 UDF_I_ALLOCTYPE(inode) = alloctype;
254 sfi->descTag.tagLocation = cpu_to_le32(*block);
255 dfibh.soffset = dfibh.eoffset;
256 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
257 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
258 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
259 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
261 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
266 mark_buffer_dirty_inode(dbh, inode);
268 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
269 UDF_I_LENALLOC(inode) = 0;
270 eloc.logicalBlockNum = *block;
271 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
272 elen = inode->i_size;
273 UDF_I_LENEXTENTS(inode) = elen;
275 epos.block = UDF_I_LOCATION(inode);
276 epos.offset = udf_file_entry_alloc_offset(inode);
277 udf_add_aext(inode, &epos, eloc, elen, 0);
281 mark_inode_dirty(inode);
285 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
288 struct buffer_head *bh;
293 phys = udf_block_map(inode, block);
295 map_bh(bh_result, inode->i_sb, phys);
308 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
310 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
311 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
316 bh = inode_getblk(inode, block, &err, &phys, &new);
323 set_buffer_new(bh_result);
324 map_bh(bh_result, inode->i_sb, phys);
330 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
334 static struct buffer_head *
335 udf_getblk(struct inode *inode, long block, int create, int *err)
337 struct buffer_head dummy;
340 dummy.b_blocknr = -1000;
341 *err = udf_get_block(inode, block, &dummy, create);
342 if (!*err && buffer_mapped(&dummy))
344 struct buffer_head *bh;
345 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
346 if (buffer_new(&dummy))
349 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
350 set_buffer_uptodate(bh);
352 mark_buffer_dirty_inode(bh, inode);
359 /* Extend the file by 'blocks' blocks, return the number of extents added */
360 int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
361 kernel_long_ad *last_ext, sector_t blocks)
364 int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
365 struct super_block *sb = inode->i_sb;
366 kernel_lb_addr prealloc_loc = {0, 0};
367 int prealloc_len = 0;
369 /* The previous extent is fake and we should not extend by anything
370 * - there's nothing to do... */
373 /* Round the last extent up to a multiple of block size */
374 if (last_ext->extLength & (sb->s_blocksize - 1)) {
375 last_ext->extLength =
376 (last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
377 (((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
378 sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
379 UDF_I_LENEXTENTS(inode) =
380 (UDF_I_LENEXTENTS(inode) + sb->s_blocksize - 1) &
381 ~(sb->s_blocksize - 1);
383 /* Last extent are just preallocated blocks? */
384 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_ALLOCATED) {
385 /* Save the extent so that we can reattach it to the end */
386 prealloc_loc = last_ext->extLocation;
387 prealloc_len = last_ext->extLength;
388 /* Mark the extent as a hole */
389 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
390 (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
391 last_ext->extLocation.logicalBlockNum = 0;
392 last_ext->extLocation.partitionReferenceNum = 0;
394 /* Can we merge with the previous extent? */
395 if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) == EXT_NOT_RECORDED_NOT_ALLOCATED) {
396 add = ((1<<30) - sb->s_blocksize - (last_ext->extLength &
397 UDF_EXTENT_LENGTH_MASK)) >> sb->s_blocksize_bits;
401 last_ext->extLength += add << sb->s_blocksize_bits;
405 udf_add_aext(inode, last_pos, last_ext->extLocation,
406 last_ext->extLength, 1);
410 udf_write_aext(inode, last_pos, last_ext->extLocation, last_ext->extLength, 1);
411 /* Managed to do everything necessary? */
415 /* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
416 last_ext->extLocation.logicalBlockNum = 0;
417 last_ext->extLocation.partitionReferenceNum = 0;
418 add = (1 << (30-sb->s_blocksize_bits)) - 1;
419 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | (add << sb->s_blocksize_bits);
420 /* Create enough extents to cover the whole hole */
421 while (blocks > add) {
423 if (udf_add_aext(inode, last_pos, last_ext->extLocation,
424 last_ext->extLength, 1) == -1)
429 last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
430 (blocks << sb->s_blocksize_bits);
431 if (udf_add_aext(inode, last_pos, last_ext->extLocation,
432 last_ext->extLength, 1) == -1)
437 /* Do we have some preallocated blocks saved? */
439 if (udf_add_aext(inode, last_pos, prealloc_loc, prealloc_len, 1) == -1)
441 last_ext->extLocation = prealloc_loc;
442 last_ext->extLength = prealloc_len;
445 /* last_pos should point to the last written extent... */
446 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
447 last_pos->offset -= sizeof(short_ad);
448 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
449 last_pos->offset -= sizeof(long_ad);
455 static struct buffer_head * inode_getblk(struct inode * inode, sector_t block,
456 int *err, long *phys, int *new)
458 static sector_t last_block;
459 struct buffer_head *result = NULL;
460 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
461 struct extent_position prev_epos, cur_epos, next_epos;
462 int count = 0, startnum = 0, endnum = 0;
466 loff_t lbcount = 0, b_off = 0;
467 uint32_t newblocknum, newblock;
470 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
473 prev_epos.offset = udf_file_entry_alloc_offset(inode);
474 prev_epos.block = UDF_I_LOCATION(inode);
476 cur_epos = next_epos = prev_epos;
477 b_off = (loff_t)block << inode->i_sb->s_blocksize_bits;
479 /* find the extent which contains the block we are looking for.
480 alternate between laarr[0] and laarr[1] for locations of the
481 current extent, and the previous extent */
484 if (prev_epos.bh != cur_epos.bh)
486 brelse(prev_epos.bh);
488 prev_epos.bh = cur_epos.bh;
490 if (cur_epos.bh != next_epos.bh)
493 get_bh(next_epos.bh);
494 cur_epos.bh = next_epos.bh;
499 prev_epos.block = cur_epos.block;
500 cur_epos.block = next_epos.block;
502 prev_epos.offset = cur_epos.offset;
503 cur_epos.offset = next_epos.offset;
505 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1)) == -1)
510 laarr[c].extLength = (etype << 30) | elen;
511 laarr[c].extLocation = eloc;
513 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
514 pgoal = eloc.logicalBlockNum +
515 ((elen + inode->i_sb->s_blocksize - 1) >>
516 inode->i_sb->s_blocksize_bits);
519 } while (lbcount + elen <= b_off);
522 offset = b_off >> inode->i_sb->s_blocksize_bits;
523 /* Move into indirect extent if we are at a pointer to it */
524 udf_next_aext(inode, &prev_epos, &eloc, &elen, 0);
526 /* if the extent is allocated and recorded, return the block
527 if the extent is not a multiple of the blocksize, round up */
529 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
531 if (elen & (inode->i_sb->s_blocksize - 1))
533 elen = EXT_RECORDED_ALLOCATED |
534 ((elen + inode->i_sb->s_blocksize - 1) &
535 ~(inode->i_sb->s_blocksize - 1));
536 etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
538 brelse(prev_epos.bh);
540 brelse(next_epos.bh);
541 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
547 /* Are we beyond EOF? */
558 /* Create a fake extent when there's not one */
559 memset(&laarr[0].extLocation, 0x00, sizeof(kernel_lb_addr));
560 laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
561 /* Will udf_extend_file() create real extent from a fake one? */
562 startnum = (offset > 0);
564 /* Create extents for the hole between EOF and offset */
565 ret = udf_extend_file(inode, &prev_epos, laarr, offset);
567 brelse(prev_epos.bh);
569 brelse(next_epos.bh);
570 /* We don't really know the error here so we just make
578 /* We are not covered by a preallocated extent? */
579 if ((laarr[0].extLength & UDF_EXTENT_FLAG_MASK) != EXT_NOT_RECORDED_ALLOCATED) {
580 /* Is there any real extent? - otherwise we overwrite
584 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
585 inode->i_sb->s_blocksize;
586 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
594 endnum = startnum = ((count > 2) ? 2 : count);
596 /* if the current extent is in position 0, swap it with the previous */
597 if (!c && count != 1)
605 /* if the current block is located in an extent, read the next extent */
606 if ((etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0)) != -1)
608 laarr[c+1].extLength = (etype << 30) | elen;
609 laarr[c+1].extLocation = eloc;
619 /* if the current extent is not recorded but allocated, get the
620 block in the extent corresponding to the requested block */
621 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
622 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
623 else /* otherwise, allocate a new block */
625 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
626 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
631 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
634 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
635 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
637 brelse(prev_epos.bh);
641 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
644 /* if the extent the requsted block is located in contains multiple blocks,
645 split the extent into at most three extents. blocks prior to requested
646 block, requested block, and blocks after requested block */
647 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
649 #ifdef UDF_PREALLOCATE
650 /* preallocate blocks */
651 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
654 /* merge any continuous blocks in laarr */
655 udf_merge_extents(inode, laarr, &endnum);
657 /* write back the new extents, inserting new extents if the new number
658 of extents is greater than the old number, and deleting extents if
659 the new number of extents is less than the old number */
660 udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
662 brelse(prev_epos.bh);
664 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
665 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
672 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
673 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
674 inode->i_ctime = current_fs_time(inode->i_sb);
677 udf_sync_inode(inode);
679 mark_inode_dirty(inode);
683 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
684 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
686 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
687 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
690 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
691 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
692 int8_t etype = (laarr[curr].extLength >> 30);
696 else if (!offset || blen == offset + 1)
698 laarr[curr+2] = laarr[curr+1];
699 laarr[curr+1] = laarr[curr];
703 laarr[curr+3] = laarr[curr+1];
704 laarr[curr+2] = laarr[curr+1] = laarr[curr];
709 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
711 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
712 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
713 (offset << inode->i_sb->s_blocksize_bits);
714 laarr[curr].extLocation.logicalBlockNum = 0;
715 laarr[curr].extLocation.partitionReferenceNum = 0;
718 laarr[curr].extLength = (etype << 30) |
719 (offset << inode->i_sb->s_blocksize_bits);
725 laarr[curr].extLocation.logicalBlockNum = newblocknum;
726 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
727 laarr[curr].extLocation.partitionReferenceNum =
728 UDF_I_LOCATION(inode).partitionReferenceNum;
729 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
730 inode->i_sb->s_blocksize;
733 if (blen != offset + 1)
735 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
736 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
737 laarr[curr].extLength = (etype << 30) |
738 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
745 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
746 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
748 int start, length = 0, currlength = 0, i;
750 if (*endnum >= (c+1))
759 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
762 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
763 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
769 for (i=start+1; i<=*endnum; i++)
774 length += UDF_DEFAULT_PREALLOC_BLOCKS;
776 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
777 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
778 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
785 int next = laarr[start].extLocation.logicalBlockNum +
786 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
787 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
788 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
789 laarr[start].extLocation.partitionReferenceNum,
790 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
791 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
796 laarr[start].extLength +=
797 (numalloc << inode->i_sb->s_blocksize_bits);
800 memmove(&laarr[c+2], &laarr[c+1],
801 sizeof(long_ad) * (*endnum - (c+1)));
803 laarr[c+1].extLocation.logicalBlockNum = next;
804 laarr[c+1].extLocation.partitionReferenceNum =
805 laarr[c].extLocation.partitionReferenceNum;
806 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
807 (numalloc << inode->i_sb->s_blocksize_bits);
811 for (i=start+1; numalloc && i<*endnum; i++)
813 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
814 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
818 laarr[i].extLength -=
819 (numalloc << inode->i_sb->s_blocksize_bits);
826 memmove(&laarr[i], &laarr[i+1],
827 sizeof(long_ad) * (*endnum - (i+1)));
832 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
837 static void udf_merge_extents(struct inode *inode,
838 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
842 for (i=0; i<(*endnum-1); i++)
844 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
846 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
847 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
848 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
849 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
851 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
852 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
853 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
855 laarr[i+1].extLength = (laarr[i+1].extLength -
856 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
857 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
858 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
859 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
860 laarr[i+1].extLocation.logicalBlockNum =
861 laarr[i].extLocation.logicalBlockNum +
862 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
863 inode->i_sb->s_blocksize_bits);
867 laarr[i].extLength = laarr[i+1].extLength +
868 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
869 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
871 memmove(&laarr[i+1], &laarr[i+2],
872 sizeof(long_ad) * (*endnum - (i+2)));
878 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
879 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
881 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
882 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
883 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
884 laarr[i].extLocation.logicalBlockNum = 0;
885 laarr[i].extLocation.partitionReferenceNum = 0;
887 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
888 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
889 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
891 laarr[i+1].extLength = (laarr[i+1].extLength -
892 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
893 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
894 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
895 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
899 laarr[i].extLength = laarr[i+1].extLength +
900 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
901 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
903 memmove(&laarr[i+1], &laarr[i+2],
904 sizeof(long_ad) * (*endnum - (i+2)));
909 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
911 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
912 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
913 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
914 laarr[i].extLocation.logicalBlockNum = 0;
915 laarr[i].extLocation.partitionReferenceNum = 0;
916 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
917 EXT_NOT_RECORDED_NOT_ALLOCATED;
922 static void udf_update_extents(struct inode *inode,
923 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
924 struct extent_position *epos)
927 kernel_lb_addr tmploc;
930 if (startnum > endnum)
932 for (i=0; i<(startnum-endnum); i++)
933 udf_delete_aext(inode, *epos, laarr[i].extLocation,
936 else if (startnum < endnum)
938 for (i=0; i<(endnum-startnum); i++)
940 udf_insert_aext(inode, *epos, laarr[i].extLocation,
942 udf_next_aext(inode, epos, &laarr[i].extLocation,
943 &laarr[i].extLength, 1);
948 for (i=start; i<endnum; i++)
950 udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
951 udf_write_aext(inode, epos, laarr[i].extLocation,
952 laarr[i].extLength, 1);
956 struct buffer_head * udf_bread(struct inode * inode, int block,
957 int create, int * err)
959 struct buffer_head * bh = NULL;
961 bh = udf_getblk(inode, block, create, err);
965 if (buffer_uptodate(bh))
967 ll_rw_block(READ, 1, &bh);
969 if (buffer_uptodate(bh))
976 void udf_truncate(struct inode * inode)
981 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
982 S_ISLNK(inode->i_mode)))
984 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
988 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
990 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
993 udf_expand_file_adinicb(inode, inode->i_size, &err);
994 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
996 inode->i_size = UDF_I_LENALLOC(inode);
1001 udf_truncate_extents(inode);
1005 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
1006 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
1007 UDF_I_LENALLOC(inode) = inode->i_size;
1012 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
1013 udf_truncate_extents(inode);
1016 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
1018 udf_sync_inode (inode);
1020 mark_inode_dirty(inode);
1025 __udf_read_inode(struct inode *inode)
1027 struct buffer_head *bh = NULL;
1028 struct fileEntry *fe;
1032 * Set defaults, but the inode is still incomplete!
1033 * Note: get_new_inode() sets the following on a new inode:
1036 * i_flags = sb->s_flags
1038 * clean_inode(): zero fills and sets
1043 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
1047 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
1049 make_bad_inode(inode);
1053 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
1054 ident != TAG_IDENT_USE)
1056 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
1057 inode->i_ino, ident);
1059 make_bad_inode(inode);
1063 fe = (struct fileEntry *)bh->b_data;
1065 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
1067 struct buffer_head *ibh = NULL, *nbh = NULL;
1068 struct indirectEntry *ie;
1070 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
1071 if (ident == TAG_IDENT_IE)
1076 ie = (struct indirectEntry *)ibh->b_data;
1078 loc = lelb_to_cpu(ie->indirectICB.extLocation);
1080 if (ie->indirectICB.extLength &&
1081 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
1083 if (ident == TAG_IDENT_FE ||
1084 ident == TAG_IDENT_EFE)
1086 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
1090 __udf_read_inode(inode);
1106 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
1108 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
1109 le16_to_cpu(fe->icbTag.strategyType));
1111 make_bad_inode(inode);
1114 udf_fill_inode(inode, bh);
1119 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1121 struct fileEntry *fe;
1122 struct extendedFileEntry *efe;
1127 fe = (struct fileEntry *)bh->b_data;
1128 efe = (struct extendedFileEntry *)bh->b_data;
1130 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1131 UDF_I_STRAT4096(inode) = 0;
1132 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1133 UDF_I_STRAT4096(inode) = 1;
1135 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1136 UDF_I_UNIQUE(inode) = 0;
1137 UDF_I_LENEATTR(inode) = 0;
1138 UDF_I_LENEXTENTS(inode) = 0;
1139 UDF_I_LENALLOC(inode) = 0;
1140 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1141 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1142 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1144 UDF_I_EFE(inode) = 1;
1145 UDF_I_USE(inode) = 0;
1146 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1147 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1149 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1151 UDF_I_EFE(inode) = 0;
1152 UDF_I_USE(inode) = 0;
1153 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1154 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1156 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1158 UDF_I_EFE(inode) = 0;
1159 UDF_I_USE(inode) = 1;
1160 UDF_I_LENALLOC(inode) =
1162 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1163 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1164 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1168 inode->i_uid = le32_to_cpu(fe->uid);
1169 if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1170 UDF_FLAG_UID_IGNORE))
1171 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1173 inode->i_gid = le32_to_cpu(fe->gid);
1174 if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1175 UDF_FLAG_GID_IGNORE))
1176 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1178 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1179 if (!inode->i_nlink)
1182 inode->i_size = le64_to_cpu(fe->informationLength);
1183 UDF_I_LENEXTENTS(inode) = inode->i_size;
1185 inode->i_mode = udf_convert_permissions(fe);
1186 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1188 if (UDF_I_EFE(inode) == 0)
1190 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1191 (inode->i_sb->s_blocksize_bits - 9);
1193 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1194 lets_to_cpu(fe->accessTime)) )
1196 inode->i_atime.tv_sec = convtime;
1197 inode->i_atime.tv_nsec = convtime_usec * 1000;
1201 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1204 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1205 lets_to_cpu(fe->modificationTime)) )
1207 inode->i_mtime.tv_sec = convtime;
1208 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1212 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1215 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1216 lets_to_cpu(fe->attrTime)) )
1218 inode->i_ctime.tv_sec = convtime;
1219 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1223 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1226 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1227 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1228 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1229 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1233 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1234 (inode->i_sb->s_blocksize_bits - 9);
1236 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1237 lets_to_cpu(efe->accessTime)) )
1239 inode->i_atime.tv_sec = convtime;
1240 inode->i_atime.tv_nsec = convtime_usec * 1000;
1244 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1247 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1248 lets_to_cpu(efe->modificationTime)) )
1250 inode->i_mtime.tv_sec = convtime;
1251 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1255 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1258 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1259 lets_to_cpu(efe->createTime)) )
1261 UDF_I_CRTIME(inode).tv_sec = convtime;
1262 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1266 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1269 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1270 lets_to_cpu(efe->attrTime)) )
1272 inode->i_ctime.tv_sec = convtime;
1273 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1277 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1280 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1281 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1282 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1283 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1286 switch (fe->icbTag.fileType)
1288 case ICBTAG_FILE_TYPE_DIRECTORY:
1290 inode->i_op = &udf_dir_inode_operations;
1291 inode->i_fop = &udf_dir_operations;
1292 inode->i_mode |= S_IFDIR;
1296 case ICBTAG_FILE_TYPE_REALTIME:
1297 case ICBTAG_FILE_TYPE_REGULAR:
1298 case ICBTAG_FILE_TYPE_UNDEF:
1300 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1301 inode->i_data.a_ops = &udf_adinicb_aops;
1303 inode->i_data.a_ops = &udf_aops;
1304 inode->i_op = &udf_file_inode_operations;
1305 inode->i_fop = &udf_file_operations;
1306 inode->i_mode |= S_IFREG;
1309 case ICBTAG_FILE_TYPE_BLOCK:
1311 inode->i_mode |= S_IFBLK;
1314 case ICBTAG_FILE_TYPE_CHAR:
1316 inode->i_mode |= S_IFCHR;
1319 case ICBTAG_FILE_TYPE_FIFO:
1321 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1324 case ICBTAG_FILE_TYPE_SOCKET:
1326 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1329 case ICBTAG_FILE_TYPE_SYMLINK:
1331 inode->i_data.a_ops = &udf_symlink_aops;
1332 inode->i_op = &page_symlink_inode_operations;
1333 inode->i_mode = S_IFLNK|S_IRWXUGO;
1338 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1339 inode->i_ino, fe->icbTag.fileType);
1340 make_bad_inode(inode);
1344 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1346 struct deviceSpec *dsea =
1347 (struct deviceSpec *)
1348 udf_get_extendedattr(inode, 12, 1);
1352 init_special_inode(inode, inode->i_mode, MKDEV(
1353 le32_to_cpu(dsea->majorDeviceIdent),
1354 le32_to_cpu(dsea->minorDeviceIdent)));
1355 /* Developer ID ??? */
1359 make_bad_inode(inode);
1365 udf_convert_permissions(struct fileEntry *fe)
1368 uint32_t permissions;
1371 permissions = le32_to_cpu(fe->permissions);
1372 flags = le16_to_cpu(fe->icbTag.flags);
1374 mode = (( permissions ) & S_IRWXO) |
1375 (( permissions >> 2 ) & S_IRWXG) |
1376 (( permissions >> 4 ) & S_IRWXU) |
1377 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1378 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1379 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1388 * Write out the specified inode.
1391 * This routine is called whenever an inode is synced.
1392 * Currently this routine is just a placeholder.
1395 * July 1, 1997 - Andrew E. Mileski
1396 * Written, tested, and released.
1399 int udf_write_inode(struct inode * inode, int sync)
1403 ret = udf_update_inode(inode, sync);
1408 int udf_sync_inode(struct inode * inode)
1410 return udf_update_inode(inode, 1);
1414 udf_update_inode(struct inode *inode, int do_sync)
1416 struct buffer_head *bh = NULL;
1417 struct fileEntry *fe;
1418 struct extendedFileEntry *efe;
1423 kernel_timestamp cpu_time;
1426 bh = udf_tread(inode->i_sb,
1427 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1431 udf_debug("bread failure\n");
1435 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1437 fe = (struct fileEntry *)bh->b_data;
1438 efe = (struct extendedFileEntry *)bh->b_data;
1440 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1442 struct unallocSpaceEntry *use =
1443 (struct unallocSpaceEntry *)bh->b_data;
1445 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1446 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1447 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1449 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1450 use->descTag.descCRCLength = cpu_to_le16(crclen);
1451 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1453 use->descTag.tagChecksum = 0;
1454 for (i=0; i<16; i++)
1456 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1458 mark_buffer_dirty(bh);
1463 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1464 fe->uid = cpu_to_le32(-1);
1465 else fe->uid = cpu_to_le32(inode->i_uid);
1467 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1468 fe->gid = cpu_to_le32(-1);
1469 else fe->gid = cpu_to_le32(inode->i_gid);
1471 udfperms = ((inode->i_mode & S_IRWXO) ) |
1472 ((inode->i_mode & S_IRWXG) << 2) |
1473 ((inode->i_mode & S_IRWXU) << 4);
1475 udfperms |= (le32_to_cpu(fe->permissions) &
1476 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1477 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1478 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1479 fe->permissions = cpu_to_le32(udfperms);
1481 if (S_ISDIR(inode->i_mode))
1482 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1484 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1486 fe->informationLength = cpu_to_le64(inode->i_size);
1488 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1491 struct deviceSpec *dsea =
1492 (struct deviceSpec *)
1493 udf_get_extendedattr(inode, 12, 1);
1497 dsea = (struct deviceSpec *)
1498 udf_add_extendedattr(inode,
1499 sizeof(struct deviceSpec) +
1500 sizeof(regid), 12, 0x3);
1501 dsea->attrType = cpu_to_le32(12);
1502 dsea->attrSubtype = 1;
1503 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1505 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1507 eid = (regid *)dsea->impUse;
1508 memset(eid, 0, sizeof(regid));
1509 strcpy(eid->ident, UDF_ID_DEVELOPER);
1510 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1511 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1512 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1513 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1516 if (UDF_I_EFE(inode) == 0)
1518 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1519 fe->logicalBlocksRecorded = cpu_to_le64(
1520 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1521 (inode->i_sb->s_blocksize_bits - 9));
1523 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1524 fe->accessTime = cpu_to_lets(cpu_time);
1525 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1526 fe->modificationTime = cpu_to_lets(cpu_time);
1527 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1528 fe->attrTime = cpu_to_lets(cpu_time);
1529 memset(&(fe->impIdent), 0, sizeof(regid));
1530 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1531 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1532 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1533 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1534 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1535 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1536 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1537 crclen = sizeof(struct fileEntry);
1541 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1542 efe->objectSize = cpu_to_le64(inode->i_size);
1543 efe->logicalBlocksRecorded = cpu_to_le64(
1544 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1545 (inode->i_sb->s_blocksize_bits - 9));
1547 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1548 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1549 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1551 UDF_I_CRTIME(inode) = inode->i_atime;
1553 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1554 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1555 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1557 UDF_I_CRTIME(inode) = inode->i_mtime;
1559 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1560 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1561 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1563 UDF_I_CRTIME(inode) = inode->i_ctime;
1566 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1567 efe->accessTime = cpu_to_lets(cpu_time);
1568 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1569 efe->modificationTime = cpu_to_lets(cpu_time);
1570 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1571 efe->createTime = cpu_to_lets(cpu_time);
1572 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1573 efe->attrTime = cpu_to_lets(cpu_time);
1575 memset(&(efe->impIdent), 0, sizeof(regid));
1576 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1577 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1578 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1579 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1580 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1581 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1582 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1583 crclen = sizeof(struct extendedFileEntry);
1585 if (UDF_I_STRAT4096(inode))
1587 fe->icbTag.strategyType = cpu_to_le16(4096);
1588 fe->icbTag.strategyParameter = cpu_to_le16(1);
1589 fe->icbTag.numEntries = cpu_to_le16(2);
1593 fe->icbTag.strategyType = cpu_to_le16(4);
1594 fe->icbTag.numEntries = cpu_to_le16(1);
1597 if (S_ISDIR(inode->i_mode))
1598 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1599 else if (S_ISREG(inode->i_mode))
1600 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1601 else if (S_ISLNK(inode->i_mode))
1602 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1603 else if (S_ISBLK(inode->i_mode))
1604 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1605 else if (S_ISCHR(inode->i_mode))
1606 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1607 else if (S_ISFIFO(inode->i_mode))
1608 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1609 else if (S_ISSOCK(inode->i_mode))
1610 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1612 icbflags = UDF_I_ALLOCTYPE(inode) |
1613 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1614 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1615 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1616 (le16_to_cpu(fe->icbTag.flags) &
1617 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1618 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1620 fe->icbTag.flags = cpu_to_le16(icbflags);
1621 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1622 fe->descTag.descVersion = cpu_to_le16(3);
1624 fe->descTag.descVersion = cpu_to_le16(2);
1625 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1626 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1627 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1628 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1629 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1631 fe->descTag.tagChecksum = 0;
1632 for (i=0; i<16; i++)
1634 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1636 /* write the data blocks */
1637 mark_buffer_dirty(bh);
1640 sync_dirty_buffer(bh);
1641 if (buffer_req(bh) && !buffer_uptodate(bh))
1643 printk("IO error syncing udf inode [%s:%08lx]\n",
1644 inode->i_sb->s_id, inode->i_ino);
1653 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1655 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1656 struct inode *inode = iget_locked(sb, block);
1661 if (inode->i_state & I_NEW) {
1662 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1663 __udf_read_inode(inode);
1664 unlock_new_inode(inode);
1667 if (is_bad_inode(inode))
1670 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1671 udf_debug("block=%d, partition=%d out of range\n",
1672 ino.logicalBlockNum, ino.partitionReferenceNum);
1673 make_bad_inode(inode);
1684 int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
1685 kernel_lb_addr eloc, uint32_t elen, int inc)
1688 short_ad *sad = NULL;
1689 long_ad *lad = NULL;
1690 struct allocExtDesc *aed;
1695 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1697 ptr = epos->bh->b_data + epos->offset;
1699 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1700 adsize = sizeof(short_ad);
1701 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1702 adsize = sizeof(long_ad);
1706 if (epos->offset + (2 * adsize) > inode->i_sb->s_blocksize)
1709 struct buffer_head *nbh;
1711 kernel_lb_addr obloc = epos->block;
1713 if (!(epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1714 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1718 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1724 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1725 set_buffer_uptodate(nbh);
1727 mark_buffer_dirty_inode(nbh, inode);
1729 aed = (struct allocExtDesc *)(nbh->b_data);
1730 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1731 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1732 if (epos->offset + adsize > inode->i_sb->s_blocksize)
1734 loffset = epos->offset;
1735 aed->lengthAllocDescs = cpu_to_le32(adsize);
1736 sptr = ptr - adsize;
1737 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1738 memcpy(dptr, sptr, adsize);
1739 epos->offset = sizeof(struct allocExtDesc) + adsize;
1743 loffset = epos->offset + adsize;
1744 aed->lengthAllocDescs = cpu_to_le32(0);
1746 epos->offset = sizeof(struct allocExtDesc);
1750 aed = (struct allocExtDesc *)epos->bh->b_data;
1751 aed->lengthAllocDescs =
1752 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1756 UDF_I_LENALLOC(inode) += adsize;
1757 mark_inode_dirty(inode);
1760 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1761 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1762 epos->block.logicalBlockNum, sizeof(tag));
1764 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1765 epos->block.logicalBlockNum, sizeof(tag));
1766 switch (UDF_I_ALLOCTYPE(inode))
1768 case ICBTAG_FLAG_AD_SHORT:
1770 sad = (short_ad *)sptr;
1771 sad->extLength = cpu_to_le32(
1772 EXT_NEXT_EXTENT_ALLOCDECS |
1773 inode->i_sb->s_blocksize);
1774 sad->extPosition = cpu_to_le32(epos->block.logicalBlockNum);
1777 case ICBTAG_FLAG_AD_LONG:
1779 lad = (long_ad *)sptr;
1780 lad->extLength = cpu_to_le32(
1781 EXT_NEXT_EXTENT_ALLOCDECS |
1782 inode->i_sb->s_blocksize);
1783 lad->extLocation = cpu_to_lelb(epos->block);
1784 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1790 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1791 udf_update_tag(epos->bh->b_data, loffset);
1793 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1794 mark_buffer_dirty_inode(epos->bh, inode);
1798 mark_inode_dirty(inode);
1802 etype = udf_write_aext(inode, epos, eloc, elen, inc);
1806 UDF_I_LENALLOC(inode) += adsize;
1807 mark_inode_dirty(inode);
1811 aed = (struct allocExtDesc *)epos->bh->b_data;
1812 aed->lengthAllocDescs =
1813 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1814 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1815 udf_update_tag(epos->bh->b_data, epos->offset + (inc ? 0 : adsize));
1817 udf_update_tag(epos->bh->b_data, sizeof(struct allocExtDesc));
1818 mark_buffer_dirty_inode(epos->bh, inode);
1824 int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
1825 kernel_lb_addr eloc, uint32_t elen, int inc)
1831 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1833 ptr = epos->bh->b_data + epos->offset;
1835 switch (UDF_I_ALLOCTYPE(inode))
1837 case ICBTAG_FLAG_AD_SHORT:
1839 short_ad *sad = (short_ad *)ptr;
1840 sad->extLength = cpu_to_le32(elen);
1841 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1842 adsize = sizeof(short_ad);
1845 case ICBTAG_FLAG_AD_LONG:
1847 long_ad *lad = (long_ad *)ptr;
1848 lad->extLength = cpu_to_le32(elen);
1849 lad->extLocation = cpu_to_lelb(eloc);
1850 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1851 adsize = sizeof(long_ad);
1860 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1862 struct allocExtDesc *aed = (struct allocExtDesc *)epos->bh->b_data;
1863 udf_update_tag(epos->bh->b_data,
1864 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1866 mark_buffer_dirty_inode(epos->bh, inode);
1869 mark_inode_dirty(inode);
1872 epos->offset += adsize;
1873 return (elen >> 30);
1876 int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
1877 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1881 while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
1882 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1884 epos->block = *eloc;
1885 epos->offset = sizeof(struct allocExtDesc);
1887 if (!(epos->bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, epos->block, 0))))
1889 udf_debug("reading block %d failed!\n",
1890 udf_get_lb_pblock(inode->i_sb, epos->block, 0));
1898 int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
1899 kernel_lb_addr *eloc, uint32_t *elen, int inc)
1908 epos->offset = udf_file_entry_alloc_offset(inode);
1909 ptr = UDF_I_DATA(inode) + epos->offset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1910 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1915 epos->offset = sizeof(struct allocExtDesc);
1916 ptr = epos->bh->b_data + epos->offset;
1917 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->lengthAllocDescs);
1920 switch (UDF_I_ALLOCTYPE(inode))
1922 case ICBTAG_FLAG_AD_SHORT:
1926 if (!(sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc)))
1929 etype = le32_to_cpu(sad->extLength) >> 30;
1930 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1931 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1932 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1935 case ICBTAG_FLAG_AD_LONG:
1939 if (!(lad = udf_get_filelongad(ptr, alen, &epos->offset, inc)))
1942 etype = le32_to_cpu(lad->extLength) >> 30;
1943 *eloc = lelb_to_cpu(lad->extLocation);
1944 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1949 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1958 udf_insert_aext(struct inode *inode, struct extent_position epos,
1959 kernel_lb_addr neloc, uint32_t nelen)
1961 kernel_lb_addr oeloc;
1968 while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1)
1970 udf_write_aext(inode, &epos, neloc, nelen, 1);
1973 nelen = (etype << 30) | oelen;
1975 udf_add_aext(inode, &epos, neloc, nelen, 1);
1977 return (nelen >> 30);
1980 int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
1981 kernel_lb_addr eloc, uint32_t elen)
1983 struct extent_position oepos;
1986 struct allocExtDesc *aed;
1994 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1995 adsize = sizeof(short_ad);
1996 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1997 adsize = sizeof(long_ad);
2002 if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
2005 while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1)
2007 udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1);
2008 if (oepos.bh != epos.bh)
2010 oepos.block = epos.block;
2014 oepos.offset = epos.offset - adsize;
2017 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
2020 if (epos.bh != oepos.bh)
2022 udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1);
2023 udf_write_aext(inode, &oepos, eloc, elen, 1);
2024 udf_write_aext(inode, &oepos, eloc, elen, 1);
2027 UDF_I_LENALLOC(inode) -= (adsize * 2);
2028 mark_inode_dirty(inode);
2032 aed = (struct allocExtDesc *)oepos.bh->b_data;
2033 aed->lengthAllocDescs =
2034 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
2035 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2036 udf_update_tag(oepos.bh->b_data, oepos.offset - (2*adsize));
2038 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2039 mark_buffer_dirty_inode(oepos.bh, inode);
2044 udf_write_aext(inode, &oepos, eloc, elen, 1);
2047 UDF_I_LENALLOC(inode) -= adsize;
2048 mark_inode_dirty(inode);
2052 aed = (struct allocExtDesc *)oepos.bh->b_data;
2053 aed->lengthAllocDescs =
2054 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
2055 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
2056 udf_update_tag(oepos.bh->b_data, epos.offset - adsize);
2058 udf_update_tag(oepos.bh->b_data, sizeof(struct allocExtDesc));
2059 mark_buffer_dirty_inode(oepos.bh, inode);
2065 return (elen >> 30);
2068 int8_t inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos,
2069 kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset)
2071 loff_t lbcount = 0, bcount = (loff_t)block << inode->i_sb->s_blocksize_bits;
2076 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
2081 pos->block = UDF_I_LOCATION(inode);
2087 if ((etype = udf_next_aext(inode, pos, eloc, elen, 1)) == -1)
2089 *offset = (bcount - lbcount) >> inode->i_sb->s_blocksize_bits;
2090 UDF_I_LENEXTENTS(inode) = lbcount;
2094 } while (lbcount <= bcount);
2096 *offset = (bcount + *elen - lbcount) >> inode->i_sb->s_blocksize_bits;
2101 long udf_block_map(struct inode *inode, sector_t block)
2103 kernel_lb_addr eloc;
2106 struct extent_position epos = { NULL, 0, { 0, 0}};
2111 if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30))
2112 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
2119 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2120 return udf_fixed_to_variable(ret);