5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hpesjro.fc.hp.com
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1998 Dave Boynton
19 * (C) 1998-2004 Ben Fennema
20 * (C) 1999-2000 Stelias Computing Inc
24 * 10/04/98 dgb Added rudimentary directory functions
25 * 10/07/98 Fully working udf_block_map! It works!
26 * 11/25/98 bmap altered to better support extents
27 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
28 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
29 * block boundaries (which is not actually allowed)
30 * 12/20/98 added support for strategy 4096
31 * 03/07/99 rewrote udf_block_map (again)
32 * New funcs, inode_bmap, udf_next_aext
33 * 04/19/99 Support for writing device EA's for major/minor #
38 #include <linux/smp_lock.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/buffer_head.h>
42 #include <linux/writeback.h>
43 #include <linux/slab.h>
48 MODULE_AUTHOR("Ben Fennema");
49 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
50 MODULE_LICENSE("GPL");
52 #define EXTENT_MERGE_SIZE 5
54 static mode_t udf_convert_permissions(struct fileEntry *);
55 static int udf_update_inode(struct inode *, int);
56 static void udf_fill_inode(struct inode *, struct buffer_head *);
57 static struct buffer_head *inode_getblk(struct inode *, long, int *,
59 static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
60 kernel_lb_addr, uint32_t, struct buffer_head *);
61 static void udf_split_extents(struct inode *, int *, int, int,
62 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
63 static void udf_prealloc_extents(struct inode *, int, int,
64 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
65 static void udf_merge_extents(struct inode *,
66 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
67 static void udf_update_extents(struct inode *,
68 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
69 kernel_lb_addr, uint32_t, struct buffer_head **);
70 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
76 * Clean-up before the specified inode is destroyed.
79 * This routine is called when the kernel destroys an inode structure
80 * ie. when iput() finds i_count == 0.
83 * July 1, 1997 - Andrew E. Mileski
84 * Written, tested, and released.
86 * Called at the last iput() if i_nlink is zero.
88 void udf_delete_inode(struct inode * inode)
90 if (is_bad_inode(inode))
97 udf_update_inode(inode, IS_SYNC(inode));
98 udf_free_inode(inode);
106 void udf_clear_inode(struct inode *inode)
108 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
110 udf_discard_prealloc(inode);
114 kfree(UDF_I_DATA(inode));
115 UDF_I_DATA(inode) = NULL;
118 static int udf_writepage(struct page *page, struct writeback_control *wbc)
120 return block_write_full_page(page, udf_get_block, wbc);
123 static int udf_readpage(struct file *file, struct page *page)
125 return block_read_full_page(page, udf_get_block);
128 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
130 return block_prepare_write(page, from, to, udf_get_block);
133 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
135 return generic_block_bmap(mapping,block,udf_get_block);
138 struct address_space_operations udf_aops = {
139 .readpage = udf_readpage,
140 .writepage = udf_writepage,
141 .sync_page = block_sync_page,
142 .prepare_write = udf_prepare_write,
143 .commit_write = generic_commit_write,
147 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
151 struct writeback_control udf_wbc = {
152 .sync_mode = WB_SYNC_NONE,
156 /* from now on we have normal address_space methods */
157 inode->i_data.a_ops = &udf_aops;
159 if (!UDF_I_LENALLOC(inode))
161 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
162 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
164 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
165 mark_inode_dirty(inode);
169 page = grab_cache_page(inode->i_mapping, 0);
170 BUG_ON(!PageLocked(page));
172 if (!PageUptodate(page))
175 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
176 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
177 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
178 UDF_I_LENALLOC(inode));
179 flush_dcache_page(page);
180 SetPageUptodate(page);
183 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
184 UDF_I_LENALLOC(inode));
185 UDF_I_LENALLOC(inode) = 0;
186 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
187 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
189 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
191 inode->i_data.a_ops->writepage(page, &udf_wbc);
192 page_cache_release(page);
194 mark_inode_dirty(inode);
197 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
200 struct buffer_head *sbh = NULL, *dbh = NULL;
201 kernel_lb_addr bloc, eloc;
202 uint32_t elen, extoffset;
205 struct udf_fileident_bh sfibh, dfibh;
206 loff_t f_pos = udf_ext0_offset(inode) >> 2;
207 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
208 struct fileIdentDesc cfi, *sfi, *dfi;
210 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
211 alloctype = ICBTAG_FLAG_AD_SHORT;
213 alloctype = ICBTAG_FLAG_AD_LONG;
217 UDF_I_ALLOCTYPE(inode) = alloctype;
218 mark_inode_dirty(inode);
222 /* alloc block, and copy data to it */
223 *block = udf_new_block(inode->i_sb, inode,
224 UDF_I_LOCATION(inode).partitionReferenceNum,
225 UDF_I_LOCATION(inode).logicalBlockNum, err);
229 newblock = udf_get_pblock(inode->i_sb, *block,
230 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
233 dbh = udf_tgetblk(inode->i_sb, newblock);
237 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
238 set_buffer_uptodate(dbh);
240 mark_buffer_dirty_inode(dbh, inode);
242 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
243 sbh = sfibh.sbh = sfibh.ebh = NULL;
244 dfibh.soffset = dfibh.eoffset = 0;
245 dfibh.sbh = dfibh.ebh = dbh;
246 while ( (f_pos < size) )
248 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
249 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
252 udf_release_data(dbh);
255 UDF_I_ALLOCTYPE(inode) = alloctype;
256 sfi->descTag.tagLocation = cpu_to_le32(*block);
257 dfibh.soffset = dfibh.eoffset;
258 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
259 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
260 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
261 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
263 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
264 udf_release_data(dbh);
268 mark_buffer_dirty_inode(dbh, inode);
270 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
271 UDF_I_LENALLOC(inode) = 0;
272 bloc = UDF_I_LOCATION(inode);
273 eloc.logicalBlockNum = *block;
274 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
275 elen = inode->i_size;
276 UDF_I_LENEXTENTS(inode) = elen;
277 extoffset = udf_file_entry_alloc_offset(inode);
278 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
281 udf_release_data(sbh);
282 mark_inode_dirty(inode);
286 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
289 struct buffer_head *bh;
294 phys = udf_block_map(inode, block);
296 map_bh(bh_result, inode->i_sb, phys);
309 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
311 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
312 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
317 bh = inode_getblk(inode, block, &err, &phys, &new);
326 set_buffer_new(bh_result);
327 map_bh(bh_result, inode->i_sb, phys);
333 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
337 static struct buffer_head *
338 udf_getblk(struct inode *inode, long block, int create, int *err)
340 struct buffer_head dummy;
343 dummy.b_blocknr = -1000;
344 *err = udf_get_block(inode, block, &dummy, create);
345 if (!*err && buffer_mapped(&dummy))
347 struct buffer_head *bh;
348 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
349 if (buffer_new(&dummy))
352 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
353 set_buffer_uptodate(bh);
355 mark_buffer_dirty_inode(bh, inode);
362 static struct buffer_head * inode_getblk(struct inode * inode, long block,
363 int *err, long *phys, int *new)
365 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
366 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
367 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
368 int count = 0, startnum = 0, endnum = 0;
370 kernel_lb_addr eloc, pbloc, cbloc, nbloc;
372 uint64_t lbcount = 0, b_off = 0;
373 uint32_t newblocknum, newblock, offset = 0;
375 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
378 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
379 b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
380 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
382 /* find the extent which contains the block we are looking for.
383 alternate between laarr[0] and laarr[1] for locations of the
384 current extent, and the previous extent */
389 udf_release_data(pbh);
390 atomic_inc(&cbh->b_count);
395 udf_release_data(cbh);
396 atomic_inc(&nbh->b_count);
405 pextoffset = cextoffset;
406 cextoffset = nextoffset;
408 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
413 laarr[c].extLength = (etype << 30) | elen;
414 laarr[c].extLocation = eloc;
416 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
417 pgoal = eloc.logicalBlockNum +
418 ((elen + inode->i_sb->s_blocksize - 1) >>
419 inode->i_sb->s_blocksize_bits);
422 } while (lbcount + elen <= b_off);
425 offset = b_off >> inode->i_sb->s_blocksize_bits;
427 /* if the extent is allocated and recorded, return the block
428 if the extent is not a multiple of the blocksize, round up */
430 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
432 if (elen & (inode->i_sb->s_blocksize - 1))
434 elen = EXT_RECORDED_ALLOCATED |
435 ((elen + inode->i_sb->s_blocksize - 1) &
436 ~(inode->i_sb->s_blocksize - 1));
437 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
439 udf_release_data(pbh);
440 udf_release_data(cbh);
441 udf_release_data(nbh);
442 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
449 endnum = startnum = ((count > 1) ? 1 : count);
450 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
453 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
454 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
455 inode->i_sb->s_blocksize - 1) &
456 ~(inode->i_sb->s_blocksize - 1));
457 UDF_I_LENEXTENTS(inode) =
458 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
459 ~(inode->i_sb->s_blocksize - 1);
462 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
463 ((offset + 1) << inode->i_sb->s_blocksize_bits);
464 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
470 endnum = startnum = ((count > 2) ? 2 : count);
472 /* if the current extent is in position 0, swap it with the previous */
473 if (!c && count != 1)
481 /* if the current block is located in a extent, read the next extent */
484 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
486 laarr[c+1].extLength = (etype << 30) | elen;
487 laarr[c+1].extLocation = eloc;
495 udf_release_data(cbh);
496 udf_release_data(nbh);
498 /* if the current extent is not recorded but allocated, get the
499 block in the extent corresponding to the requested block */
500 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
501 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
502 else /* otherwise, allocate a new block */
504 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
505 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
510 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
513 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
514 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
516 udf_release_data(pbh);
520 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
523 /* if the extent the requsted block is located in contains multiple blocks,
524 split the extent into at most three extents. blocks prior to requested
525 block, requested block, and blocks after requested block */
526 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
528 #ifdef UDF_PREALLOCATE
529 /* preallocate blocks */
530 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
533 /* merge any continuous blocks in laarr */
534 udf_merge_extents(inode, laarr, &endnum);
536 /* write back the new extents, inserting new extents if the new number
537 of extents is greater than the old number, and deleting extents if
538 the new number of extents is less than the old number */
539 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
541 udf_release_data(pbh);
543 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
544 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
551 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
552 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
553 inode->i_ctime = current_fs_time(inode->i_sb);
556 udf_sync_inode(inode);
558 mark_inode_dirty(inode);
562 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
563 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
565 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
566 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
569 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
570 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
571 int8_t etype = (laarr[curr].extLength >> 30);
575 else if (!offset || blen == offset + 1)
577 laarr[curr+2] = laarr[curr+1];
578 laarr[curr+1] = laarr[curr];
582 laarr[curr+3] = laarr[curr+1];
583 laarr[curr+2] = laarr[curr+1] = laarr[curr];
588 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
590 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
591 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
592 (offset << inode->i_sb->s_blocksize_bits);
593 laarr[curr].extLocation.logicalBlockNum = 0;
594 laarr[curr].extLocation.partitionReferenceNum = 0;
597 laarr[curr].extLength = (etype << 30) |
598 (offset << inode->i_sb->s_blocksize_bits);
604 laarr[curr].extLocation.logicalBlockNum = newblocknum;
605 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
606 laarr[curr].extLocation.partitionReferenceNum =
607 UDF_I_LOCATION(inode).partitionReferenceNum;
608 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
609 inode->i_sb->s_blocksize;
612 if (blen != offset + 1)
614 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
615 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
616 laarr[curr].extLength = (etype << 30) |
617 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
624 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
625 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
627 int start, length = 0, currlength = 0, i;
629 if (*endnum >= (c+1))
638 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
641 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
642 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
648 for (i=start+1; i<=*endnum; i++)
653 length += UDF_DEFAULT_PREALLOC_BLOCKS;
655 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
656 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
657 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
664 int next = laarr[start].extLocation.logicalBlockNum +
665 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
666 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
667 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
668 laarr[start].extLocation.partitionReferenceNum,
669 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
670 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
675 laarr[start].extLength +=
676 (numalloc << inode->i_sb->s_blocksize_bits);
679 memmove(&laarr[c+2], &laarr[c+1],
680 sizeof(long_ad) * (*endnum - (c+1)));
682 laarr[c+1].extLocation.logicalBlockNum = next;
683 laarr[c+1].extLocation.partitionReferenceNum =
684 laarr[c].extLocation.partitionReferenceNum;
685 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
686 (numalloc << inode->i_sb->s_blocksize_bits);
690 for (i=start+1; numalloc && i<*endnum; i++)
692 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
693 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
697 laarr[i].extLength -=
698 (numalloc << inode->i_sb->s_blocksize_bits);
705 memmove(&laarr[i], &laarr[i+1],
706 sizeof(long_ad) * (*endnum - (i+1)));
711 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
716 static void udf_merge_extents(struct inode *inode,
717 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
721 for (i=0; i<(*endnum-1); i++)
723 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
725 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
726 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
727 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
728 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
730 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
731 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
732 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
734 laarr[i+1].extLength = (laarr[i+1].extLength -
735 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
736 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
737 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
738 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
739 laarr[i+1].extLocation.logicalBlockNum =
740 laarr[i].extLocation.logicalBlockNum +
741 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
742 inode->i_sb->s_blocksize_bits);
746 laarr[i].extLength = laarr[i+1].extLength +
747 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
748 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
750 memmove(&laarr[i+1], &laarr[i+2],
751 sizeof(long_ad) * (*endnum - (i+2)));
757 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
758 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
760 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
761 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
762 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
763 laarr[i].extLocation.logicalBlockNum = 0;
764 laarr[i].extLocation.partitionReferenceNum = 0;
766 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
767 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
768 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
770 laarr[i+1].extLength = (laarr[i+1].extLength -
771 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
772 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
773 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
774 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
778 laarr[i].extLength = laarr[i+1].extLength +
779 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
780 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
782 memmove(&laarr[i+1], &laarr[i+2],
783 sizeof(long_ad) * (*endnum - (i+2)));
788 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
790 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
791 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
792 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
793 laarr[i].extLocation.logicalBlockNum = 0;
794 laarr[i].extLocation.partitionReferenceNum = 0;
795 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
796 EXT_NOT_RECORDED_NOT_ALLOCATED;
801 static void udf_update_extents(struct inode *inode,
802 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
803 kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
806 kernel_lb_addr tmploc;
809 if (startnum > endnum)
811 for (i=0; i<(startnum-endnum); i++)
813 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
814 laarr[i].extLength, *pbh);
817 else if (startnum < endnum)
819 for (i=0; i<(endnum-startnum); i++)
821 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
822 laarr[i].extLength, *pbh);
823 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
824 &laarr[i].extLength, pbh, 1);
829 for (i=start; i<endnum; i++)
831 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
832 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
833 laarr[i].extLength, *pbh, 1);
837 struct buffer_head * udf_bread(struct inode * inode, int block,
838 int create, int * err)
840 struct buffer_head * bh = NULL;
842 bh = udf_getblk(inode, block, create, err);
846 if (buffer_uptodate(bh))
848 ll_rw_block(READ, 1, &bh);
850 if (buffer_uptodate(bh))
857 void udf_truncate(struct inode * inode)
862 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
863 S_ISLNK(inode->i_mode)))
865 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
869 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
871 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
874 udf_expand_file_adinicb(inode, inode->i_size, &err);
875 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
877 inode->i_size = UDF_I_LENALLOC(inode);
882 udf_truncate_extents(inode);
886 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
887 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
888 UDF_I_LENALLOC(inode) = inode->i_size;
893 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
894 udf_truncate_extents(inode);
897 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
899 udf_sync_inode (inode);
901 mark_inode_dirty(inode);
906 __udf_read_inode(struct inode *inode)
908 struct buffer_head *bh = NULL;
909 struct fileEntry *fe;
913 * Set defaults, but the inode is still incomplete!
914 * Note: get_new_inode() sets the following on a new inode:
917 * i_flags = sb->s_flags
919 * clean_inode(): zero fills and sets
924 inode->i_blksize = PAGE_SIZE;
926 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
930 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
932 make_bad_inode(inode);
936 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
937 ident != TAG_IDENT_USE)
939 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
940 inode->i_ino, ident);
941 udf_release_data(bh);
942 make_bad_inode(inode);
946 fe = (struct fileEntry *)bh->b_data;
948 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
950 struct buffer_head *ibh = NULL, *nbh = NULL;
951 struct indirectEntry *ie;
953 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
954 if (ident == TAG_IDENT_IE)
959 ie = (struct indirectEntry *)ibh->b_data;
961 loc = lelb_to_cpu(ie->indirectICB.extLocation);
963 if (ie->indirectICB.extLength &&
964 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
966 if (ident == TAG_IDENT_FE ||
967 ident == TAG_IDENT_EFE)
969 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
970 udf_release_data(bh);
971 udf_release_data(ibh);
972 udf_release_data(nbh);
973 __udf_read_inode(inode);
978 udf_release_data(nbh);
979 udf_release_data(ibh);
983 udf_release_data(ibh);
987 udf_release_data(ibh);
989 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
991 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
992 le16_to_cpu(fe->icbTag.strategyType));
993 udf_release_data(bh);
994 make_bad_inode(inode);
997 udf_fill_inode(inode, bh);
998 udf_release_data(bh);
1001 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1003 struct fileEntry *fe;
1004 struct extendedFileEntry *efe;
1009 fe = (struct fileEntry *)bh->b_data;
1010 efe = (struct extendedFileEntry *)bh->b_data;
1012 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1013 UDF_I_STRAT4096(inode) = 0;
1014 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1015 UDF_I_STRAT4096(inode) = 1;
1017 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1018 UDF_I_UNIQUE(inode) = 0;
1019 UDF_I_LENEATTR(inode) = 0;
1020 UDF_I_LENEXTENTS(inode) = 0;
1021 UDF_I_LENALLOC(inode) = 0;
1022 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1023 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1024 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1026 UDF_I_EFE(inode) = 1;
1027 UDF_I_USE(inode) = 0;
1028 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1029 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1031 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1033 UDF_I_EFE(inode) = 0;
1034 UDF_I_USE(inode) = 0;
1035 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1036 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1038 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1040 UDF_I_EFE(inode) = 0;
1041 UDF_I_USE(inode) = 1;
1042 UDF_I_LENALLOC(inode) =
1044 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1045 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1046 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1050 inode->i_uid = le32_to_cpu(fe->uid);
1051 if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1053 inode->i_gid = le32_to_cpu(fe->gid);
1054 if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1056 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1057 if (!inode->i_nlink)
1060 inode->i_size = le64_to_cpu(fe->informationLength);
1061 UDF_I_LENEXTENTS(inode) = inode->i_size;
1063 inode->i_mode = udf_convert_permissions(fe);
1064 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1066 if (UDF_I_EFE(inode) == 0)
1068 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1069 (inode->i_sb->s_blocksize_bits - 9);
1071 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1072 lets_to_cpu(fe->accessTime)) )
1074 inode->i_atime.tv_sec = convtime;
1075 inode->i_atime.tv_nsec = convtime_usec * 1000;
1079 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1082 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1083 lets_to_cpu(fe->modificationTime)) )
1085 inode->i_mtime.tv_sec = convtime;
1086 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1090 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1093 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1094 lets_to_cpu(fe->attrTime)) )
1096 inode->i_ctime.tv_sec = convtime;
1097 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1101 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1104 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1105 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1106 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1107 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1111 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1112 (inode->i_sb->s_blocksize_bits - 9);
1114 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1115 lets_to_cpu(efe->accessTime)) )
1117 inode->i_atime.tv_sec = convtime;
1118 inode->i_atime.tv_nsec = convtime_usec * 1000;
1122 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1125 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1126 lets_to_cpu(efe->modificationTime)) )
1128 inode->i_mtime.tv_sec = convtime;
1129 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1133 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1136 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1137 lets_to_cpu(efe->createTime)) )
1139 UDF_I_CRTIME(inode).tv_sec = convtime;
1140 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1144 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1147 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1148 lets_to_cpu(efe->attrTime)) )
1150 inode->i_ctime.tv_sec = convtime;
1151 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1155 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1158 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1159 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1160 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1161 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1164 switch (fe->icbTag.fileType)
1166 case ICBTAG_FILE_TYPE_DIRECTORY:
1168 inode->i_op = &udf_dir_inode_operations;
1169 inode->i_fop = &udf_dir_operations;
1170 inode->i_mode |= S_IFDIR;
1174 case ICBTAG_FILE_TYPE_REALTIME:
1175 case ICBTAG_FILE_TYPE_REGULAR:
1176 case ICBTAG_FILE_TYPE_UNDEF:
1178 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1179 inode->i_data.a_ops = &udf_adinicb_aops;
1181 inode->i_data.a_ops = &udf_aops;
1182 inode->i_op = &udf_file_inode_operations;
1183 inode->i_fop = &udf_file_operations;
1184 inode->i_mode |= S_IFREG;
1187 case ICBTAG_FILE_TYPE_BLOCK:
1189 inode->i_mode |= S_IFBLK;
1192 case ICBTAG_FILE_TYPE_CHAR:
1194 inode->i_mode |= S_IFCHR;
1197 case ICBTAG_FILE_TYPE_FIFO:
1199 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1202 case ICBTAG_FILE_TYPE_SOCKET:
1204 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1207 case ICBTAG_FILE_TYPE_SYMLINK:
1209 inode->i_data.a_ops = &udf_symlink_aops;
1210 inode->i_op = &page_symlink_inode_operations;
1211 inode->i_mode = S_IFLNK|S_IRWXUGO;
1216 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1217 inode->i_ino, fe->icbTag.fileType);
1218 make_bad_inode(inode);
1222 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1224 struct deviceSpec *dsea =
1225 (struct deviceSpec *)
1226 udf_get_extendedattr(inode, 12, 1);
1230 init_special_inode(inode, inode->i_mode, MKDEV(
1231 le32_to_cpu(dsea->majorDeviceIdent),
1232 le32_to_cpu(dsea->minorDeviceIdent)));
1233 /* Developer ID ??? */
1237 make_bad_inode(inode);
1243 udf_convert_permissions(struct fileEntry *fe)
1246 uint32_t permissions;
1249 permissions = le32_to_cpu(fe->permissions);
1250 flags = le16_to_cpu(fe->icbTag.flags);
1252 mode = (( permissions ) & S_IRWXO) |
1253 (( permissions >> 2 ) & S_IRWXG) |
1254 (( permissions >> 4 ) & S_IRWXU) |
1255 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1256 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1257 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1266 * Write out the specified inode.
1269 * This routine is called whenever an inode is synced.
1270 * Currently this routine is just a placeholder.
1273 * July 1, 1997 - Andrew E. Mileski
1274 * Written, tested, and released.
1277 int udf_write_inode(struct inode * inode, int sync)
1281 ret = udf_update_inode(inode, sync);
1286 int udf_sync_inode(struct inode * inode)
1288 return udf_update_inode(inode, 1);
1292 udf_update_inode(struct inode *inode, int do_sync)
1294 struct buffer_head *bh = NULL;
1295 struct fileEntry *fe;
1296 struct extendedFileEntry *efe;
1301 kernel_timestamp cpu_time;
1304 bh = udf_tread(inode->i_sb,
1305 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1309 udf_debug("bread failure\n");
1313 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1315 fe = (struct fileEntry *)bh->b_data;
1316 efe = (struct extendedFileEntry *)bh->b_data;
1318 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1320 struct unallocSpaceEntry *use =
1321 (struct unallocSpaceEntry *)bh->b_data;
1323 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1324 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1325 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1327 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1328 use->descTag.descCRCLength = cpu_to_le16(crclen);
1329 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1331 use->descTag.tagChecksum = 0;
1332 for (i=0; i<16; i++)
1334 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1336 mark_buffer_dirty(bh);
1337 udf_release_data(bh);
1341 if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
1342 fe->uid = cpu_to_le32(inode->i_uid);
1344 if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
1345 fe->gid = cpu_to_le32(inode->i_gid);
1347 udfperms = ((inode->i_mode & S_IRWXO) ) |
1348 ((inode->i_mode & S_IRWXG) << 2) |
1349 ((inode->i_mode & S_IRWXU) << 4);
1351 udfperms |= (le32_to_cpu(fe->permissions) &
1352 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1353 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1354 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1355 fe->permissions = cpu_to_le32(udfperms);
1357 if (S_ISDIR(inode->i_mode))
1358 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1360 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1362 fe->informationLength = cpu_to_le64(inode->i_size);
1364 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1367 struct deviceSpec *dsea =
1368 (struct deviceSpec *)
1369 udf_get_extendedattr(inode, 12, 1);
1373 dsea = (struct deviceSpec *)
1374 udf_add_extendedattr(inode,
1375 sizeof(struct deviceSpec) +
1376 sizeof(regid), 12, 0x3);
1377 dsea->attrType = cpu_to_le32(12);
1378 dsea->attrSubtype = 1;
1379 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1381 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1383 eid = (regid *)dsea->impUse;
1384 memset(eid, 0, sizeof(regid));
1385 strcpy(eid->ident, UDF_ID_DEVELOPER);
1386 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1387 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1388 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1389 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1392 if (UDF_I_EFE(inode) == 0)
1394 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1395 fe->logicalBlocksRecorded = cpu_to_le64(
1396 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1397 (inode->i_sb->s_blocksize_bits - 9));
1399 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1400 fe->accessTime = cpu_to_lets(cpu_time);
1401 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1402 fe->modificationTime = cpu_to_lets(cpu_time);
1403 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1404 fe->attrTime = cpu_to_lets(cpu_time);
1405 memset(&(fe->impIdent), 0, sizeof(regid));
1406 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1407 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1408 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1409 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1410 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1411 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1412 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1413 crclen = sizeof(struct fileEntry);
1417 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1418 efe->objectSize = cpu_to_le64(inode->i_size);
1419 efe->logicalBlocksRecorded = cpu_to_le64(
1420 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1421 (inode->i_sb->s_blocksize_bits - 9));
1423 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1424 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1425 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1427 UDF_I_CRTIME(inode) = inode->i_atime;
1429 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1430 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1431 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1433 UDF_I_CRTIME(inode) = inode->i_mtime;
1435 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1436 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1437 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1439 UDF_I_CRTIME(inode) = inode->i_ctime;
1442 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1443 efe->accessTime = cpu_to_lets(cpu_time);
1444 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1445 efe->modificationTime = cpu_to_lets(cpu_time);
1446 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1447 efe->createTime = cpu_to_lets(cpu_time);
1448 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1449 efe->attrTime = cpu_to_lets(cpu_time);
1451 memset(&(efe->impIdent), 0, sizeof(regid));
1452 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1453 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1454 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1455 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1456 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1457 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1458 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1459 crclen = sizeof(struct extendedFileEntry);
1461 if (UDF_I_STRAT4096(inode))
1463 fe->icbTag.strategyType = cpu_to_le16(4096);
1464 fe->icbTag.strategyParameter = cpu_to_le16(1);
1465 fe->icbTag.numEntries = cpu_to_le16(2);
1469 fe->icbTag.strategyType = cpu_to_le16(4);
1470 fe->icbTag.numEntries = cpu_to_le16(1);
1473 if (S_ISDIR(inode->i_mode))
1474 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1475 else if (S_ISREG(inode->i_mode))
1476 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1477 else if (S_ISLNK(inode->i_mode))
1478 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1479 else if (S_ISBLK(inode->i_mode))
1480 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1481 else if (S_ISCHR(inode->i_mode))
1482 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1483 else if (S_ISFIFO(inode->i_mode))
1484 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1485 else if (S_ISSOCK(inode->i_mode))
1486 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1488 icbflags = UDF_I_ALLOCTYPE(inode) |
1489 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1490 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1491 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1492 (le16_to_cpu(fe->icbTag.flags) &
1493 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1494 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1496 fe->icbTag.flags = cpu_to_le16(icbflags);
1497 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1498 fe->descTag.descVersion = cpu_to_le16(3);
1500 fe->descTag.descVersion = cpu_to_le16(2);
1501 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1502 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1503 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1504 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1505 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1507 fe->descTag.tagChecksum = 0;
1508 for (i=0; i<16; i++)
1510 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1512 /* write the data blocks */
1513 mark_buffer_dirty(bh);
1516 sync_dirty_buffer(bh);
1517 if (buffer_req(bh) && !buffer_uptodate(bh))
1519 printk("IO error syncing udf inode [%s:%08lx]\n",
1520 inode->i_sb->s_id, inode->i_ino);
1524 udf_release_data(bh);
1529 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1531 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1532 struct inode *inode = iget_locked(sb, block);
1537 if (inode->i_state & I_NEW) {
1538 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1539 __udf_read_inode(inode);
1540 unlock_new_inode(inode);
1543 if (is_bad_inode(inode))
1546 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1547 udf_debug("block=%d, partition=%d out of range\n",
1548 ino.logicalBlockNum, ino.partitionReferenceNum);
1549 make_bad_inode(inode);
1560 int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1561 kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1564 short_ad *sad = NULL;
1565 long_ad *lad = NULL;
1566 struct allocExtDesc *aed;
1571 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1573 ptr = (*bh)->b_data + *extoffset;
1575 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1576 adsize = sizeof(short_ad);
1577 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1578 adsize = sizeof(long_ad);
1582 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1585 struct buffer_head *nbh;
1587 kernel_lb_addr obloc = *bloc;
1589 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1590 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1594 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1600 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1601 set_buffer_uptodate(nbh);
1603 mark_buffer_dirty_inode(nbh, inode);
1605 aed = (struct allocExtDesc *)(nbh->b_data);
1606 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1607 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1608 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1610 loffset = *extoffset;
1611 aed->lengthAllocDescs = cpu_to_le32(adsize);
1612 sptr = ptr - adsize;
1613 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1614 memcpy(dptr, sptr, adsize);
1615 *extoffset = sizeof(struct allocExtDesc) + adsize;
1619 loffset = *extoffset + adsize;
1620 aed->lengthAllocDescs = cpu_to_le32(0);
1622 *extoffset = sizeof(struct allocExtDesc);
1626 aed = (struct allocExtDesc *)(*bh)->b_data;
1627 aed->lengthAllocDescs =
1628 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1632 UDF_I_LENALLOC(inode) += adsize;
1633 mark_inode_dirty(inode);
1636 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1637 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1638 bloc->logicalBlockNum, sizeof(tag));
1640 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1641 bloc->logicalBlockNum, sizeof(tag));
1642 switch (UDF_I_ALLOCTYPE(inode))
1644 case ICBTAG_FLAG_AD_SHORT:
1646 sad = (short_ad *)sptr;
1647 sad->extLength = cpu_to_le32(
1648 EXT_NEXT_EXTENT_ALLOCDECS |
1649 inode->i_sb->s_blocksize);
1650 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1653 case ICBTAG_FLAG_AD_LONG:
1655 lad = (long_ad *)sptr;
1656 lad->extLength = cpu_to_le32(
1657 EXT_NEXT_EXTENT_ALLOCDECS |
1658 inode->i_sb->s_blocksize);
1659 lad->extLocation = cpu_to_lelb(*bloc);
1660 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1666 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1667 udf_update_tag((*bh)->b_data, loffset);
1669 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1670 mark_buffer_dirty_inode(*bh, inode);
1671 udf_release_data(*bh);
1674 mark_inode_dirty(inode);
1678 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1682 UDF_I_LENALLOC(inode) += adsize;
1683 mark_inode_dirty(inode);
1687 aed = (struct allocExtDesc *)(*bh)->b_data;
1688 aed->lengthAllocDescs =
1689 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1690 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1691 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1693 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1694 mark_buffer_dirty_inode(*bh, inode);
1700 int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
1701 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1707 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1710 ptr = bh->b_data + *extoffset;
1711 atomic_inc(&bh->b_count);
1714 switch (UDF_I_ALLOCTYPE(inode))
1716 case ICBTAG_FLAG_AD_SHORT:
1718 short_ad *sad = (short_ad *)ptr;
1719 sad->extLength = cpu_to_le32(elen);
1720 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1721 adsize = sizeof(short_ad);
1724 case ICBTAG_FLAG_AD_LONG:
1726 long_ad *lad = (long_ad *)ptr;
1727 lad->extLength = cpu_to_le32(elen);
1728 lad->extLocation = cpu_to_lelb(eloc);
1729 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1730 adsize = sizeof(long_ad);
1739 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1741 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1742 udf_update_tag((bh)->b_data,
1743 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1745 mark_buffer_dirty_inode(bh, inode);
1746 udf_release_data(bh);
1749 mark_inode_dirty(inode);
1752 *extoffset += adsize;
1753 return (elen >> 30);
1756 int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1757 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1761 while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1762 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1765 *extoffset = sizeof(struct allocExtDesc);
1766 udf_release_data(*bh);
1767 if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1769 udf_debug("reading block %d failed!\n",
1770 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1778 int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1779 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1788 *extoffset = udf_file_entry_alloc_offset(inode);
1789 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1790 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1795 *extoffset = sizeof(struct allocExtDesc);
1796 ptr = (*bh)->b_data + *extoffset;
1797 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1800 switch (UDF_I_ALLOCTYPE(inode))
1802 case ICBTAG_FLAG_AD_SHORT:
1806 if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1809 etype = le32_to_cpu(sad->extLength) >> 30;
1810 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1811 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1812 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1815 case ICBTAG_FLAG_AD_LONG:
1819 if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1822 etype = le32_to_cpu(lad->extLength) >> 30;
1823 *eloc = lelb_to_cpu(lad->extLocation);
1824 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1829 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1838 udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
1839 kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1841 kernel_lb_addr oeloc;
1846 atomic_inc(&bh->b_count);
1848 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1850 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1853 nelen = (etype << 30) | oelen;
1855 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1856 udf_release_data(bh);
1857 return (nelen >> 30);
1860 int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
1861 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1863 struct buffer_head *obh;
1864 kernel_lb_addr obloc;
1865 int oextoffset, adsize;
1867 struct allocExtDesc *aed;
1871 atomic_inc(&nbh->b_count);
1872 atomic_inc(&nbh->b_count);
1875 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1876 adsize = sizeof(short_ad);
1877 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1878 adsize = sizeof(long_ad);
1884 oextoffset = nextoffset;
1886 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1889 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1891 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1895 udf_release_data(obh);
1896 atomic_inc(&nbh->b_count);
1898 oextoffset = nextoffset - adsize;
1901 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1906 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1907 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1908 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1911 UDF_I_LENALLOC(inode) -= (adsize * 2);
1912 mark_inode_dirty(inode);
1916 aed = (struct allocExtDesc *)(obh)->b_data;
1917 aed->lengthAllocDescs =
1918 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1919 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1920 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1922 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1923 mark_buffer_dirty_inode(obh, inode);
1928 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1931 UDF_I_LENALLOC(inode) -= adsize;
1932 mark_inode_dirty(inode);
1936 aed = (struct allocExtDesc *)(obh)->b_data;
1937 aed->lengthAllocDescs =
1938 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1939 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1940 udf_update_tag((obh)->b_data, oextoffset - adsize);
1942 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1943 mark_buffer_dirty_inode(obh, inode);
1947 udf_release_data(nbh);
1948 udf_release_data(obh);
1949 return (elen >> 30);
1952 int8_t inode_bmap(struct inode *inode, int block, kernel_lb_addr *bloc, uint32_t *extoffset,
1953 kernel_lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
1955 uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
1960 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1965 printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
1971 *bloc = UDF_I_LOCATION(inode);
1975 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1977 *offset = bcount - lbcount;
1978 UDF_I_LENEXTENTS(inode) = lbcount;
1982 } while (lbcount <= bcount);
1984 *offset = bcount + *elen - lbcount;
1989 long udf_block_map(struct inode *inode, long block)
1991 kernel_lb_addr eloc, bloc;
1992 uint32_t offset, extoffset, elen;
1993 struct buffer_head *bh = NULL;
1998 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
1999 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
2004 udf_release_data(bh);
2006 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2007 return udf_fixed_to_variable(ret);