5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998 Dave Boynton
14 * (C) 1998-2004 Ben Fennema
15 * (C) 1999-2000 Stelias Computing Inc
19 * 10/04/98 dgb Added rudimentary directory functions
20 * 10/07/98 Fully working udf_block_map! It works!
21 * 11/25/98 bmap altered to better support extents
22 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
23 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
24 * block boundaries (which is not actually allowed)
25 * 12/20/98 added support for strategy 4096
26 * 03/07/99 rewrote udf_block_map (again)
27 * New funcs, inode_bmap, udf_next_aext
28 * 04/19/99 Support for writing device EA's for major/minor #
33 #include <linux/smp_lock.h>
34 #include <linux/module.h>
35 #include <linux/pagemap.h>
36 #include <linux/buffer_head.h>
37 #include <linux/writeback.h>
38 #include <linux/slab.h>
43 MODULE_AUTHOR("Ben Fennema");
44 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
45 MODULE_LICENSE("GPL");
47 #define EXTENT_MERGE_SIZE 5
49 static mode_t udf_convert_permissions(struct fileEntry *);
50 static int udf_update_inode(struct inode *, int);
51 static void udf_fill_inode(struct inode *, struct buffer_head *);
52 static struct buffer_head *inode_getblk(struct inode *, long, int *,
54 static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
55 kernel_lb_addr, uint32_t, struct buffer_head *);
56 static void udf_split_extents(struct inode *, int *, int, int,
57 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
58 static void udf_prealloc_extents(struct inode *, int, int,
59 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
60 static void udf_merge_extents(struct inode *,
61 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
62 static void udf_update_extents(struct inode *,
63 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
64 kernel_lb_addr, uint32_t, struct buffer_head **);
65 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
71 * Clean-up before the specified inode is destroyed.
74 * This routine is called when the kernel destroys an inode structure
75 * ie. when iput() finds i_count == 0.
78 * July 1, 1997 - Andrew E. Mileski
79 * Written, tested, and released.
81 * Called at the last iput() if i_nlink is zero.
83 void udf_delete_inode(struct inode * inode)
85 truncate_inode_pages(&inode->i_data, 0);
87 if (is_bad_inode(inode))
94 udf_update_inode(inode, IS_SYNC(inode));
95 udf_free_inode(inode);
103 void udf_clear_inode(struct inode *inode)
105 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
107 udf_discard_prealloc(inode);
111 kfree(UDF_I_DATA(inode));
112 UDF_I_DATA(inode) = NULL;
115 static int udf_writepage(struct page *page, struct writeback_control *wbc)
117 return block_write_full_page(page, udf_get_block, wbc);
120 static int udf_readpage(struct file *file, struct page *page)
122 return block_read_full_page(page, udf_get_block);
125 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
127 return block_prepare_write(page, from, to, udf_get_block);
130 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
132 return generic_block_bmap(mapping,block,udf_get_block);
135 const struct address_space_operations udf_aops = {
136 .readpage = udf_readpage,
137 .writepage = udf_writepage,
138 .sync_page = block_sync_page,
139 .prepare_write = udf_prepare_write,
140 .commit_write = generic_commit_write,
144 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
148 struct writeback_control udf_wbc = {
149 .sync_mode = WB_SYNC_NONE,
153 /* from now on we have normal address_space methods */
154 inode->i_data.a_ops = &udf_aops;
156 if (!UDF_I_LENALLOC(inode))
158 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
159 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
161 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
162 mark_inode_dirty(inode);
166 page = grab_cache_page(inode->i_mapping, 0);
167 BUG_ON(!PageLocked(page));
169 if (!PageUptodate(page))
172 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
173 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
174 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
175 UDF_I_LENALLOC(inode));
176 flush_dcache_page(page);
177 SetPageUptodate(page);
180 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
181 UDF_I_LENALLOC(inode));
182 UDF_I_LENALLOC(inode) = 0;
183 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
184 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
186 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
188 inode->i_data.a_ops->writepage(page, &udf_wbc);
189 page_cache_release(page);
191 mark_inode_dirty(inode);
194 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
197 struct buffer_head *sbh = NULL, *dbh = NULL;
198 kernel_lb_addr bloc, eloc;
199 uint32_t elen, extoffset;
202 struct udf_fileident_bh sfibh, dfibh;
203 loff_t f_pos = udf_ext0_offset(inode) >> 2;
204 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
205 struct fileIdentDesc cfi, *sfi, *dfi;
207 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
208 alloctype = ICBTAG_FLAG_AD_SHORT;
210 alloctype = ICBTAG_FLAG_AD_LONG;
214 UDF_I_ALLOCTYPE(inode) = alloctype;
215 mark_inode_dirty(inode);
219 /* alloc block, and copy data to it */
220 *block = udf_new_block(inode->i_sb, inode,
221 UDF_I_LOCATION(inode).partitionReferenceNum,
222 UDF_I_LOCATION(inode).logicalBlockNum, err);
226 newblock = udf_get_pblock(inode->i_sb, *block,
227 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
230 dbh = udf_tgetblk(inode->i_sb, newblock);
234 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
235 set_buffer_uptodate(dbh);
237 mark_buffer_dirty_inode(dbh, inode);
239 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
240 sbh = sfibh.sbh = sfibh.ebh = NULL;
241 dfibh.soffset = dfibh.eoffset = 0;
242 dfibh.sbh = dfibh.ebh = dbh;
243 while ( (f_pos < size) )
245 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
246 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
249 udf_release_data(dbh);
252 UDF_I_ALLOCTYPE(inode) = alloctype;
253 sfi->descTag.tagLocation = cpu_to_le32(*block);
254 dfibh.soffset = dfibh.eoffset;
255 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
256 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
257 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
258 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
260 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
261 udf_release_data(dbh);
265 mark_buffer_dirty_inode(dbh, inode);
267 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
268 UDF_I_LENALLOC(inode) = 0;
269 bloc = UDF_I_LOCATION(inode);
270 eloc.logicalBlockNum = *block;
271 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
272 elen = inode->i_size;
273 UDF_I_LENEXTENTS(inode) = elen;
274 extoffset = udf_file_entry_alloc_offset(inode);
275 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
278 udf_release_data(sbh);
279 mark_inode_dirty(inode);
283 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
286 struct buffer_head *bh;
291 phys = udf_block_map(inode, block);
293 map_bh(bh_result, inode->i_sb, phys);
306 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
308 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
309 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
314 bh = inode_getblk(inode, block, &err, &phys, &new);
321 set_buffer_new(bh_result);
322 map_bh(bh_result, inode->i_sb, phys);
328 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
332 static struct buffer_head *
333 udf_getblk(struct inode *inode, long block, int create, int *err)
335 struct buffer_head dummy;
338 dummy.b_blocknr = -1000;
339 *err = udf_get_block(inode, block, &dummy, create);
340 if (!*err && buffer_mapped(&dummy))
342 struct buffer_head *bh;
343 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
344 if (buffer_new(&dummy))
347 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
348 set_buffer_uptodate(bh);
350 mark_buffer_dirty_inode(bh, inode);
357 static struct buffer_head * inode_getblk(struct inode * inode, long block,
358 int *err, long *phys, int *new)
360 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
361 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
362 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
363 int count = 0, startnum = 0, endnum = 0;
365 kernel_lb_addr eloc, pbloc, cbloc, nbloc;
367 uint64_t lbcount = 0, b_off = 0;
368 uint32_t newblocknum, newblock, offset = 0;
370 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
373 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
374 b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
375 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
377 /* find the extent which contains the block we are looking for.
378 alternate between laarr[0] and laarr[1] for locations of the
379 current extent, and the previous extent */
384 udf_release_data(pbh);
385 atomic_inc(&cbh->b_count);
390 udf_release_data(cbh);
391 atomic_inc(&nbh->b_count);
400 pextoffset = cextoffset;
401 cextoffset = nextoffset;
403 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
408 laarr[c].extLength = (etype << 30) | elen;
409 laarr[c].extLocation = eloc;
411 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
412 pgoal = eloc.logicalBlockNum +
413 ((elen + inode->i_sb->s_blocksize - 1) >>
414 inode->i_sb->s_blocksize_bits);
417 } while (lbcount + elen <= b_off);
420 offset = b_off >> inode->i_sb->s_blocksize_bits;
422 /* if the extent is allocated and recorded, return the block
423 if the extent is not a multiple of the blocksize, round up */
425 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
427 if (elen & (inode->i_sb->s_blocksize - 1))
429 elen = EXT_RECORDED_ALLOCATED |
430 ((elen + inode->i_sb->s_blocksize - 1) &
431 ~(inode->i_sb->s_blocksize - 1));
432 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
434 udf_release_data(pbh);
435 udf_release_data(cbh);
436 udf_release_data(nbh);
437 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
444 endnum = startnum = ((count > 1) ? 1 : count);
445 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
448 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
449 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
450 inode->i_sb->s_blocksize - 1) &
451 ~(inode->i_sb->s_blocksize - 1));
452 UDF_I_LENEXTENTS(inode) =
453 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
454 ~(inode->i_sb->s_blocksize - 1);
457 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
458 ((offset + 1) << inode->i_sb->s_blocksize_bits);
459 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
465 endnum = startnum = ((count > 2) ? 2 : count);
467 /* if the current extent is in position 0, swap it with the previous */
468 if (!c && count != 1)
476 /* if the current block is located in a extent, read the next extent */
479 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
481 laarr[c+1].extLength = (etype << 30) | elen;
482 laarr[c+1].extLocation = eloc;
490 udf_release_data(cbh);
491 udf_release_data(nbh);
493 /* if the current extent is not recorded but allocated, get the
494 block in the extent corresponding to the requested block */
495 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
496 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
497 else /* otherwise, allocate a new block */
499 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
500 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
505 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
508 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
509 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
511 udf_release_data(pbh);
515 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
518 /* if the extent the requsted block is located in contains multiple blocks,
519 split the extent into at most three extents. blocks prior to requested
520 block, requested block, and blocks after requested block */
521 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
523 #ifdef UDF_PREALLOCATE
524 /* preallocate blocks */
525 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
528 /* merge any continuous blocks in laarr */
529 udf_merge_extents(inode, laarr, &endnum);
531 /* write back the new extents, inserting new extents if the new number
532 of extents is greater than the old number, and deleting extents if
533 the new number of extents is less than the old number */
534 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
536 udf_release_data(pbh);
538 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
539 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
546 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
547 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
548 inode->i_ctime = current_fs_time(inode->i_sb);
551 udf_sync_inode(inode);
553 mark_inode_dirty(inode);
557 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
558 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
560 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
561 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
564 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
565 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
566 int8_t etype = (laarr[curr].extLength >> 30);
570 else if (!offset || blen == offset + 1)
572 laarr[curr+2] = laarr[curr+1];
573 laarr[curr+1] = laarr[curr];
577 laarr[curr+3] = laarr[curr+1];
578 laarr[curr+2] = laarr[curr+1] = laarr[curr];
583 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
585 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
586 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
587 (offset << inode->i_sb->s_blocksize_bits);
588 laarr[curr].extLocation.logicalBlockNum = 0;
589 laarr[curr].extLocation.partitionReferenceNum = 0;
592 laarr[curr].extLength = (etype << 30) |
593 (offset << inode->i_sb->s_blocksize_bits);
599 laarr[curr].extLocation.logicalBlockNum = newblocknum;
600 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
601 laarr[curr].extLocation.partitionReferenceNum =
602 UDF_I_LOCATION(inode).partitionReferenceNum;
603 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
604 inode->i_sb->s_blocksize;
607 if (blen != offset + 1)
609 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
610 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
611 laarr[curr].extLength = (etype << 30) |
612 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
619 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
620 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
622 int start, length = 0, currlength = 0, i;
624 if (*endnum >= (c+1))
633 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
636 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
637 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
643 for (i=start+1; i<=*endnum; i++)
648 length += UDF_DEFAULT_PREALLOC_BLOCKS;
650 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
651 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
652 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
659 int next = laarr[start].extLocation.logicalBlockNum +
660 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
661 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
662 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
663 laarr[start].extLocation.partitionReferenceNum,
664 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
665 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
670 laarr[start].extLength +=
671 (numalloc << inode->i_sb->s_blocksize_bits);
674 memmove(&laarr[c+2], &laarr[c+1],
675 sizeof(long_ad) * (*endnum - (c+1)));
677 laarr[c+1].extLocation.logicalBlockNum = next;
678 laarr[c+1].extLocation.partitionReferenceNum =
679 laarr[c].extLocation.partitionReferenceNum;
680 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
681 (numalloc << inode->i_sb->s_blocksize_bits);
685 for (i=start+1; numalloc && i<*endnum; i++)
687 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
688 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
692 laarr[i].extLength -=
693 (numalloc << inode->i_sb->s_blocksize_bits);
700 memmove(&laarr[i], &laarr[i+1],
701 sizeof(long_ad) * (*endnum - (i+1)));
706 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
711 static void udf_merge_extents(struct inode *inode,
712 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
716 for (i=0; i<(*endnum-1); i++)
718 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
720 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
721 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
722 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
723 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
725 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
726 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
727 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
729 laarr[i+1].extLength = (laarr[i+1].extLength -
730 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
731 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
732 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
733 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
734 laarr[i+1].extLocation.logicalBlockNum =
735 laarr[i].extLocation.logicalBlockNum +
736 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
737 inode->i_sb->s_blocksize_bits);
741 laarr[i].extLength = laarr[i+1].extLength +
742 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
743 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
745 memmove(&laarr[i+1], &laarr[i+2],
746 sizeof(long_ad) * (*endnum - (i+2)));
752 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
753 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
755 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
756 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
757 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
758 laarr[i].extLocation.logicalBlockNum = 0;
759 laarr[i].extLocation.partitionReferenceNum = 0;
761 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
762 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
763 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
765 laarr[i+1].extLength = (laarr[i+1].extLength -
766 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
767 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
768 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
769 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
773 laarr[i].extLength = laarr[i+1].extLength +
774 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
775 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
777 memmove(&laarr[i+1], &laarr[i+2],
778 sizeof(long_ad) * (*endnum - (i+2)));
783 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
785 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
786 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
787 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
788 laarr[i].extLocation.logicalBlockNum = 0;
789 laarr[i].extLocation.partitionReferenceNum = 0;
790 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
791 EXT_NOT_RECORDED_NOT_ALLOCATED;
796 static void udf_update_extents(struct inode *inode,
797 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
798 kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
801 kernel_lb_addr tmploc;
804 if (startnum > endnum)
806 for (i=0; i<(startnum-endnum); i++)
808 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
809 laarr[i].extLength, *pbh);
812 else if (startnum < endnum)
814 for (i=0; i<(endnum-startnum); i++)
816 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
817 laarr[i].extLength, *pbh);
818 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
819 &laarr[i].extLength, pbh, 1);
824 for (i=start; i<endnum; i++)
826 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
827 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
828 laarr[i].extLength, *pbh, 1);
832 struct buffer_head * udf_bread(struct inode * inode, int block,
833 int create, int * err)
835 struct buffer_head * bh = NULL;
837 bh = udf_getblk(inode, block, create, err);
841 if (buffer_uptodate(bh))
843 ll_rw_block(READ, 1, &bh);
845 if (buffer_uptodate(bh))
852 void udf_truncate(struct inode * inode)
857 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
858 S_ISLNK(inode->i_mode)))
860 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
864 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
866 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
869 udf_expand_file_adinicb(inode, inode->i_size, &err);
870 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
872 inode->i_size = UDF_I_LENALLOC(inode);
877 udf_truncate_extents(inode);
881 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
882 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
883 UDF_I_LENALLOC(inode) = inode->i_size;
888 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
889 udf_truncate_extents(inode);
892 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
894 udf_sync_inode (inode);
896 mark_inode_dirty(inode);
901 __udf_read_inode(struct inode *inode)
903 struct buffer_head *bh = NULL;
904 struct fileEntry *fe;
908 * Set defaults, but the inode is still incomplete!
909 * Note: get_new_inode() sets the following on a new inode:
912 * i_flags = sb->s_flags
914 * clean_inode(): zero fills and sets
919 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
923 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
925 make_bad_inode(inode);
929 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
930 ident != TAG_IDENT_USE)
932 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
933 inode->i_ino, ident);
934 udf_release_data(bh);
935 make_bad_inode(inode);
939 fe = (struct fileEntry *)bh->b_data;
941 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
943 struct buffer_head *ibh = NULL, *nbh = NULL;
944 struct indirectEntry *ie;
946 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
947 if (ident == TAG_IDENT_IE)
952 ie = (struct indirectEntry *)ibh->b_data;
954 loc = lelb_to_cpu(ie->indirectICB.extLocation);
956 if (ie->indirectICB.extLength &&
957 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
959 if (ident == TAG_IDENT_FE ||
960 ident == TAG_IDENT_EFE)
962 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
963 udf_release_data(bh);
964 udf_release_data(ibh);
965 udf_release_data(nbh);
966 __udf_read_inode(inode);
971 udf_release_data(nbh);
972 udf_release_data(ibh);
976 udf_release_data(ibh);
980 udf_release_data(ibh);
982 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
984 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
985 le16_to_cpu(fe->icbTag.strategyType));
986 udf_release_data(bh);
987 make_bad_inode(inode);
990 udf_fill_inode(inode, bh);
991 udf_release_data(bh);
994 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
996 struct fileEntry *fe;
997 struct extendedFileEntry *efe;
1002 fe = (struct fileEntry *)bh->b_data;
1003 efe = (struct extendedFileEntry *)bh->b_data;
1005 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1006 UDF_I_STRAT4096(inode) = 0;
1007 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1008 UDF_I_STRAT4096(inode) = 1;
1010 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1011 UDF_I_UNIQUE(inode) = 0;
1012 UDF_I_LENEATTR(inode) = 0;
1013 UDF_I_LENEXTENTS(inode) = 0;
1014 UDF_I_LENALLOC(inode) = 0;
1015 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1016 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1017 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1019 UDF_I_EFE(inode) = 1;
1020 UDF_I_USE(inode) = 0;
1021 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1022 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1024 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1026 UDF_I_EFE(inode) = 0;
1027 UDF_I_USE(inode) = 0;
1028 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1029 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1031 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1033 UDF_I_EFE(inode) = 0;
1034 UDF_I_USE(inode) = 1;
1035 UDF_I_LENALLOC(inode) =
1037 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1038 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1039 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1043 inode->i_uid = le32_to_cpu(fe->uid);
1044 if (inode->i_uid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1045 UDF_FLAG_UID_IGNORE))
1046 inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1048 inode->i_gid = le32_to_cpu(fe->gid);
1049 if (inode->i_gid == -1 || UDF_QUERY_FLAG(inode->i_sb,
1050 UDF_FLAG_GID_IGNORE))
1051 inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1053 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1054 if (!inode->i_nlink)
1057 inode->i_size = le64_to_cpu(fe->informationLength);
1058 UDF_I_LENEXTENTS(inode) = inode->i_size;
1060 inode->i_mode = udf_convert_permissions(fe);
1061 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1063 if (UDF_I_EFE(inode) == 0)
1065 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1066 (inode->i_sb->s_blocksize_bits - 9);
1068 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1069 lets_to_cpu(fe->accessTime)) )
1071 inode->i_atime.tv_sec = convtime;
1072 inode->i_atime.tv_nsec = convtime_usec * 1000;
1076 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1079 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1080 lets_to_cpu(fe->modificationTime)) )
1082 inode->i_mtime.tv_sec = convtime;
1083 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1087 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1090 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1091 lets_to_cpu(fe->attrTime)) )
1093 inode->i_ctime.tv_sec = convtime;
1094 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1098 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1101 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1102 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1103 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1104 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1108 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1109 (inode->i_sb->s_blocksize_bits - 9);
1111 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1112 lets_to_cpu(efe->accessTime)) )
1114 inode->i_atime.tv_sec = convtime;
1115 inode->i_atime.tv_nsec = convtime_usec * 1000;
1119 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1122 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1123 lets_to_cpu(efe->modificationTime)) )
1125 inode->i_mtime.tv_sec = convtime;
1126 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1130 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1133 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1134 lets_to_cpu(efe->createTime)) )
1136 UDF_I_CRTIME(inode).tv_sec = convtime;
1137 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1141 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1144 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1145 lets_to_cpu(efe->attrTime)) )
1147 inode->i_ctime.tv_sec = convtime;
1148 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1152 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1155 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1156 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1157 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1158 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1161 switch (fe->icbTag.fileType)
1163 case ICBTAG_FILE_TYPE_DIRECTORY:
1165 inode->i_op = &udf_dir_inode_operations;
1166 inode->i_fop = &udf_dir_operations;
1167 inode->i_mode |= S_IFDIR;
1171 case ICBTAG_FILE_TYPE_REALTIME:
1172 case ICBTAG_FILE_TYPE_REGULAR:
1173 case ICBTAG_FILE_TYPE_UNDEF:
1175 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1176 inode->i_data.a_ops = &udf_adinicb_aops;
1178 inode->i_data.a_ops = &udf_aops;
1179 inode->i_op = &udf_file_inode_operations;
1180 inode->i_fop = &udf_file_operations;
1181 inode->i_mode |= S_IFREG;
1184 case ICBTAG_FILE_TYPE_BLOCK:
1186 inode->i_mode |= S_IFBLK;
1189 case ICBTAG_FILE_TYPE_CHAR:
1191 inode->i_mode |= S_IFCHR;
1194 case ICBTAG_FILE_TYPE_FIFO:
1196 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1199 case ICBTAG_FILE_TYPE_SOCKET:
1201 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1204 case ICBTAG_FILE_TYPE_SYMLINK:
1206 inode->i_data.a_ops = &udf_symlink_aops;
1207 inode->i_op = &page_symlink_inode_operations;
1208 inode->i_mode = S_IFLNK|S_IRWXUGO;
1213 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1214 inode->i_ino, fe->icbTag.fileType);
1215 make_bad_inode(inode);
1219 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1221 struct deviceSpec *dsea =
1222 (struct deviceSpec *)
1223 udf_get_extendedattr(inode, 12, 1);
1227 init_special_inode(inode, inode->i_mode, MKDEV(
1228 le32_to_cpu(dsea->majorDeviceIdent),
1229 le32_to_cpu(dsea->minorDeviceIdent)));
1230 /* Developer ID ??? */
1234 make_bad_inode(inode);
1240 udf_convert_permissions(struct fileEntry *fe)
1243 uint32_t permissions;
1246 permissions = le32_to_cpu(fe->permissions);
1247 flags = le16_to_cpu(fe->icbTag.flags);
1249 mode = (( permissions ) & S_IRWXO) |
1250 (( permissions >> 2 ) & S_IRWXG) |
1251 (( permissions >> 4 ) & S_IRWXU) |
1252 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1253 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1254 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1263 * Write out the specified inode.
1266 * This routine is called whenever an inode is synced.
1267 * Currently this routine is just a placeholder.
1270 * July 1, 1997 - Andrew E. Mileski
1271 * Written, tested, and released.
1274 int udf_write_inode(struct inode * inode, int sync)
1278 ret = udf_update_inode(inode, sync);
1283 int udf_sync_inode(struct inode * inode)
1285 return udf_update_inode(inode, 1);
1289 udf_update_inode(struct inode *inode, int do_sync)
1291 struct buffer_head *bh = NULL;
1292 struct fileEntry *fe;
1293 struct extendedFileEntry *efe;
1298 kernel_timestamp cpu_time;
1301 bh = udf_tread(inode->i_sb,
1302 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1306 udf_debug("bread failure\n");
1310 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1312 fe = (struct fileEntry *)bh->b_data;
1313 efe = (struct extendedFileEntry *)bh->b_data;
1315 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1317 struct unallocSpaceEntry *use =
1318 (struct unallocSpaceEntry *)bh->b_data;
1320 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1321 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1322 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1324 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1325 use->descTag.descCRCLength = cpu_to_le16(crclen);
1326 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1328 use->descTag.tagChecksum = 0;
1329 for (i=0; i<16; i++)
1331 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1333 mark_buffer_dirty(bh);
1334 udf_release_data(bh);
1338 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
1339 fe->uid = cpu_to_le32(-1);
1340 else fe->uid = cpu_to_le32(inode->i_uid);
1342 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
1343 fe->gid = cpu_to_le32(-1);
1344 else fe->gid = cpu_to_le32(inode->i_gid);
1346 udfperms = ((inode->i_mode & S_IRWXO) ) |
1347 ((inode->i_mode & S_IRWXG) << 2) |
1348 ((inode->i_mode & S_IRWXU) << 4);
1350 udfperms |= (le32_to_cpu(fe->permissions) &
1351 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1352 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1353 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1354 fe->permissions = cpu_to_le32(udfperms);
1356 if (S_ISDIR(inode->i_mode))
1357 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1359 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1361 fe->informationLength = cpu_to_le64(inode->i_size);
1363 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1366 struct deviceSpec *dsea =
1367 (struct deviceSpec *)
1368 udf_get_extendedattr(inode, 12, 1);
1372 dsea = (struct deviceSpec *)
1373 udf_add_extendedattr(inode,
1374 sizeof(struct deviceSpec) +
1375 sizeof(regid), 12, 0x3);
1376 dsea->attrType = cpu_to_le32(12);
1377 dsea->attrSubtype = 1;
1378 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1380 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1382 eid = (regid *)dsea->impUse;
1383 memset(eid, 0, sizeof(regid));
1384 strcpy(eid->ident, UDF_ID_DEVELOPER);
1385 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1386 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1387 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1388 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1391 if (UDF_I_EFE(inode) == 0)
1393 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1394 fe->logicalBlocksRecorded = cpu_to_le64(
1395 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1396 (inode->i_sb->s_blocksize_bits - 9));
1398 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1399 fe->accessTime = cpu_to_lets(cpu_time);
1400 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1401 fe->modificationTime = cpu_to_lets(cpu_time);
1402 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1403 fe->attrTime = cpu_to_lets(cpu_time);
1404 memset(&(fe->impIdent), 0, sizeof(regid));
1405 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1406 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1407 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1408 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1409 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1410 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1411 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1412 crclen = sizeof(struct fileEntry);
1416 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1417 efe->objectSize = cpu_to_le64(inode->i_size);
1418 efe->logicalBlocksRecorded = cpu_to_le64(
1419 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1420 (inode->i_sb->s_blocksize_bits - 9));
1422 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1423 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1424 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1426 UDF_I_CRTIME(inode) = inode->i_atime;
1428 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1429 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1430 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1432 UDF_I_CRTIME(inode) = inode->i_mtime;
1434 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1435 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1436 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1438 UDF_I_CRTIME(inode) = inode->i_ctime;
1441 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1442 efe->accessTime = cpu_to_lets(cpu_time);
1443 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1444 efe->modificationTime = cpu_to_lets(cpu_time);
1445 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1446 efe->createTime = cpu_to_lets(cpu_time);
1447 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1448 efe->attrTime = cpu_to_lets(cpu_time);
1450 memset(&(efe->impIdent), 0, sizeof(regid));
1451 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1452 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1453 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1454 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1455 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1456 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1457 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1458 crclen = sizeof(struct extendedFileEntry);
1460 if (UDF_I_STRAT4096(inode))
1462 fe->icbTag.strategyType = cpu_to_le16(4096);
1463 fe->icbTag.strategyParameter = cpu_to_le16(1);
1464 fe->icbTag.numEntries = cpu_to_le16(2);
1468 fe->icbTag.strategyType = cpu_to_le16(4);
1469 fe->icbTag.numEntries = cpu_to_le16(1);
1472 if (S_ISDIR(inode->i_mode))
1473 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1474 else if (S_ISREG(inode->i_mode))
1475 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1476 else if (S_ISLNK(inode->i_mode))
1477 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1478 else if (S_ISBLK(inode->i_mode))
1479 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1480 else if (S_ISCHR(inode->i_mode))
1481 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1482 else if (S_ISFIFO(inode->i_mode))
1483 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1484 else if (S_ISSOCK(inode->i_mode))
1485 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1487 icbflags = UDF_I_ALLOCTYPE(inode) |
1488 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1489 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1490 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1491 (le16_to_cpu(fe->icbTag.flags) &
1492 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1493 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1495 fe->icbTag.flags = cpu_to_le16(icbflags);
1496 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1497 fe->descTag.descVersion = cpu_to_le16(3);
1499 fe->descTag.descVersion = cpu_to_le16(2);
1500 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1501 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1502 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1503 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1504 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1506 fe->descTag.tagChecksum = 0;
1507 for (i=0; i<16; i++)
1509 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1511 /* write the data blocks */
1512 mark_buffer_dirty(bh);
1515 sync_dirty_buffer(bh);
1516 if (buffer_req(bh) && !buffer_uptodate(bh))
1518 printk("IO error syncing udf inode [%s:%08lx]\n",
1519 inode->i_sb->s_id, inode->i_ino);
1523 udf_release_data(bh);
1528 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1530 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1531 struct inode *inode = iget_locked(sb, block);
1536 if (inode->i_state & I_NEW) {
1537 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1538 __udf_read_inode(inode);
1539 unlock_new_inode(inode);
1542 if (is_bad_inode(inode))
1545 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1546 udf_debug("block=%d, partition=%d out of range\n",
1547 ino.logicalBlockNum, ino.partitionReferenceNum);
1548 make_bad_inode(inode);
1559 int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1560 kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1563 short_ad *sad = NULL;
1564 long_ad *lad = NULL;
1565 struct allocExtDesc *aed;
1570 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1572 ptr = (*bh)->b_data + *extoffset;
1574 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1575 adsize = sizeof(short_ad);
1576 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1577 adsize = sizeof(long_ad);
1581 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1584 struct buffer_head *nbh;
1586 kernel_lb_addr obloc = *bloc;
1588 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1589 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1593 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1599 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1600 set_buffer_uptodate(nbh);
1602 mark_buffer_dirty_inode(nbh, inode);
1604 aed = (struct allocExtDesc *)(nbh->b_data);
1605 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1606 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1607 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1609 loffset = *extoffset;
1610 aed->lengthAllocDescs = cpu_to_le32(adsize);
1611 sptr = ptr - adsize;
1612 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1613 memcpy(dptr, sptr, adsize);
1614 *extoffset = sizeof(struct allocExtDesc) + adsize;
1618 loffset = *extoffset + adsize;
1619 aed->lengthAllocDescs = cpu_to_le32(0);
1621 *extoffset = sizeof(struct allocExtDesc);
1625 aed = (struct allocExtDesc *)(*bh)->b_data;
1626 aed->lengthAllocDescs =
1627 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1631 UDF_I_LENALLOC(inode) += adsize;
1632 mark_inode_dirty(inode);
1635 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1636 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1637 bloc->logicalBlockNum, sizeof(tag));
1639 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1640 bloc->logicalBlockNum, sizeof(tag));
1641 switch (UDF_I_ALLOCTYPE(inode))
1643 case ICBTAG_FLAG_AD_SHORT:
1645 sad = (short_ad *)sptr;
1646 sad->extLength = cpu_to_le32(
1647 EXT_NEXT_EXTENT_ALLOCDECS |
1648 inode->i_sb->s_blocksize);
1649 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1652 case ICBTAG_FLAG_AD_LONG:
1654 lad = (long_ad *)sptr;
1655 lad->extLength = cpu_to_le32(
1656 EXT_NEXT_EXTENT_ALLOCDECS |
1657 inode->i_sb->s_blocksize);
1658 lad->extLocation = cpu_to_lelb(*bloc);
1659 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1665 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1666 udf_update_tag((*bh)->b_data, loffset);
1668 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1669 mark_buffer_dirty_inode(*bh, inode);
1670 udf_release_data(*bh);
1673 mark_inode_dirty(inode);
1677 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1681 UDF_I_LENALLOC(inode) += adsize;
1682 mark_inode_dirty(inode);
1686 aed = (struct allocExtDesc *)(*bh)->b_data;
1687 aed->lengthAllocDescs =
1688 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1689 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1690 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1692 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1693 mark_buffer_dirty_inode(*bh, inode);
1699 int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
1700 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1706 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1709 ptr = bh->b_data + *extoffset;
1710 atomic_inc(&bh->b_count);
1713 switch (UDF_I_ALLOCTYPE(inode))
1715 case ICBTAG_FLAG_AD_SHORT:
1717 short_ad *sad = (short_ad *)ptr;
1718 sad->extLength = cpu_to_le32(elen);
1719 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1720 adsize = sizeof(short_ad);
1723 case ICBTAG_FLAG_AD_LONG:
1725 long_ad *lad = (long_ad *)ptr;
1726 lad->extLength = cpu_to_le32(elen);
1727 lad->extLocation = cpu_to_lelb(eloc);
1728 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1729 adsize = sizeof(long_ad);
1738 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1740 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1741 udf_update_tag((bh)->b_data,
1742 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1744 mark_buffer_dirty_inode(bh, inode);
1745 udf_release_data(bh);
1748 mark_inode_dirty(inode);
1751 *extoffset += adsize;
1752 return (elen >> 30);
1755 int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1756 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1760 while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1761 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1764 *extoffset = sizeof(struct allocExtDesc);
1765 udf_release_data(*bh);
1766 if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1768 udf_debug("reading block %d failed!\n",
1769 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1777 int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1778 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1787 *extoffset = udf_file_entry_alloc_offset(inode);
1788 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1789 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1794 *extoffset = sizeof(struct allocExtDesc);
1795 ptr = (*bh)->b_data + *extoffset;
1796 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1799 switch (UDF_I_ALLOCTYPE(inode))
1801 case ICBTAG_FLAG_AD_SHORT:
1805 if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1808 etype = le32_to_cpu(sad->extLength) >> 30;
1809 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1810 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1811 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1814 case ICBTAG_FLAG_AD_LONG:
1818 if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1821 etype = le32_to_cpu(lad->extLength) >> 30;
1822 *eloc = lelb_to_cpu(lad->extLocation);
1823 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1828 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1837 udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
1838 kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1840 kernel_lb_addr oeloc;
1845 atomic_inc(&bh->b_count);
1847 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1849 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1852 nelen = (etype << 30) | oelen;
1854 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1855 udf_release_data(bh);
1856 return (nelen >> 30);
1859 int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
1860 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1862 struct buffer_head *obh;
1863 kernel_lb_addr obloc;
1864 int oextoffset, adsize;
1866 struct allocExtDesc *aed;
1870 atomic_inc(&nbh->b_count);
1871 atomic_inc(&nbh->b_count);
1874 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1875 adsize = sizeof(short_ad);
1876 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1877 adsize = sizeof(long_ad);
1883 oextoffset = nextoffset;
1885 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1888 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1890 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1894 udf_release_data(obh);
1895 atomic_inc(&nbh->b_count);
1897 oextoffset = nextoffset - adsize;
1900 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1905 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1906 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1907 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1910 UDF_I_LENALLOC(inode) -= (adsize * 2);
1911 mark_inode_dirty(inode);
1915 aed = (struct allocExtDesc *)(obh)->b_data;
1916 aed->lengthAllocDescs =
1917 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1918 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1919 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1921 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1922 mark_buffer_dirty_inode(obh, inode);
1927 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1930 UDF_I_LENALLOC(inode) -= adsize;
1931 mark_inode_dirty(inode);
1935 aed = (struct allocExtDesc *)(obh)->b_data;
1936 aed->lengthAllocDescs =
1937 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1938 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1939 udf_update_tag((obh)->b_data, oextoffset - adsize);
1941 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1942 mark_buffer_dirty_inode(obh, inode);
1946 udf_release_data(nbh);
1947 udf_release_data(obh);
1948 return (elen >> 30);
1951 int8_t inode_bmap(struct inode *inode, int block, kernel_lb_addr *bloc, uint32_t *extoffset,
1952 kernel_lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
1954 uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
1959 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1965 *bloc = UDF_I_LOCATION(inode);
1969 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1971 *offset = bcount - lbcount;
1972 UDF_I_LENEXTENTS(inode) = lbcount;
1976 } while (lbcount <= bcount);
1978 *offset = bcount + *elen - lbcount;
1983 long udf_block_map(struct inode *inode, long block)
1985 kernel_lb_addr eloc, bloc;
1986 uint32_t offset, extoffset, elen;
1987 struct buffer_head *bh = NULL;
1992 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
1993 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
1998 udf_release_data(bh);
2000 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2001 return udf_fixed_to_variable(ret);