5 * Inode handling routines for the OSTA-UDF(tm) filesystem.
8 * E-mail regarding any portion of the Linux UDF file system should be
9 * directed to the development team mailing list (run by majordomo):
10 * linux_udf@hpesjro.fc.hp.com
13 * This file is distributed under the terms of the GNU General Public
14 * License (GPL). Copies of the GPL can be obtained from:
15 * ftp://prep.ai.mit.edu/pub/gnu/GPL
16 * Each contributing author retains all rights to their own work.
18 * (C) 1998 Dave Boynton
19 * (C) 1998-2004 Ben Fennema
20 * (C) 1999-2000 Stelias Computing Inc
24 * 10/04/98 dgb Added rudimentary directory functions
25 * 10/07/98 Fully working udf_block_map! It works!
26 * 11/25/98 bmap altered to better support extents
27 * 12/06/98 blf partition support in udf_iget, udf_block_map and udf_read_inode
28 * 12/12/98 rewrote udf_block_map to handle next extents and descs across
29 * block boundaries (which is not actually allowed)
30 * 12/20/98 added support for strategy 4096
31 * 03/07/99 rewrote udf_block_map (again)
32 * New funcs, inode_bmap, udf_next_aext
33 * 04/19/99 Support for writing device EA's for major/minor #
38 #include <linux/smp_lock.h>
39 #include <linux/module.h>
40 #include <linux/pagemap.h>
41 #include <linux/buffer_head.h>
42 #include <linux/writeback.h>
43 #include <linux/slab.h>
48 MODULE_AUTHOR("Ben Fennema");
49 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
50 MODULE_LICENSE("GPL");
52 #define EXTENT_MERGE_SIZE 5
54 static mode_t udf_convert_permissions(struct fileEntry *);
55 static int udf_update_inode(struct inode *, int);
56 static void udf_fill_inode(struct inode *, struct buffer_head *);
57 static struct buffer_head *inode_getblk(struct inode *, long, int *,
59 static int8_t udf_insert_aext(struct inode *, kernel_lb_addr, int,
60 kernel_lb_addr, uint32_t, struct buffer_head *);
61 static void udf_split_extents(struct inode *, int *, int, int,
62 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
63 static void udf_prealloc_extents(struct inode *, int, int,
64 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
65 static void udf_merge_extents(struct inode *,
66 kernel_long_ad [EXTENT_MERGE_SIZE], int *);
67 static void udf_update_extents(struct inode *,
68 kernel_long_ad [EXTENT_MERGE_SIZE], int, int,
69 kernel_lb_addr, uint32_t, struct buffer_head **);
70 static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
76 * Clean-up before the specified inode is destroyed.
79 * This routine is called when the kernel destroys an inode structure
80 * ie. when iput() finds i_count == 0.
83 * July 1, 1997 - Andrew E. Mileski
84 * Written, tested, and released.
86 * Called at the last iput() if i_nlink is zero.
88 void udf_delete_inode(struct inode * inode)
90 truncate_inode_pages(&inode->i_data, 0);
92 if (is_bad_inode(inode))
99 udf_update_inode(inode, IS_SYNC(inode));
100 udf_free_inode(inode);
108 void udf_clear_inode(struct inode *inode)
110 if (!(inode->i_sb->s_flags & MS_RDONLY)) {
112 udf_discard_prealloc(inode);
116 kfree(UDF_I_DATA(inode));
117 UDF_I_DATA(inode) = NULL;
120 static int udf_writepage(struct page *page, struct writeback_control *wbc)
122 return block_write_full_page(page, udf_get_block, wbc);
125 static int udf_readpage(struct file *file, struct page *page)
127 return block_read_full_page(page, udf_get_block);
130 static int udf_prepare_write(struct file *file, struct page *page, unsigned from, unsigned to)
132 return block_prepare_write(page, from, to, udf_get_block);
135 static sector_t udf_bmap(struct address_space *mapping, sector_t block)
137 return generic_block_bmap(mapping,block,udf_get_block);
140 struct address_space_operations udf_aops = {
141 .readpage = udf_readpage,
142 .writepage = udf_writepage,
143 .sync_page = block_sync_page,
144 .prepare_write = udf_prepare_write,
145 .commit_write = generic_commit_write,
149 void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err)
153 struct writeback_control udf_wbc = {
154 .sync_mode = WB_SYNC_NONE,
158 /* from now on we have normal address_space methods */
159 inode->i_data.a_ops = &udf_aops;
161 if (!UDF_I_LENALLOC(inode))
163 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
164 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
166 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
167 mark_inode_dirty(inode);
171 page = grab_cache_page(inode->i_mapping, 0);
172 BUG_ON(!PageLocked(page));
174 if (!PageUptodate(page))
177 memset(kaddr + UDF_I_LENALLOC(inode), 0x00,
178 PAGE_CACHE_SIZE - UDF_I_LENALLOC(inode));
179 memcpy(kaddr, UDF_I_DATA(inode) + UDF_I_LENEATTR(inode),
180 UDF_I_LENALLOC(inode));
181 flush_dcache_page(page);
182 SetPageUptodate(page);
185 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0x00,
186 UDF_I_LENALLOC(inode));
187 UDF_I_LENALLOC(inode) = 0;
188 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
189 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
191 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
193 inode->i_data.a_ops->writepage(page, &udf_wbc);
194 page_cache_release(page);
196 mark_inode_dirty(inode);
199 struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int *err)
202 struct buffer_head *sbh = NULL, *dbh = NULL;
203 kernel_lb_addr bloc, eloc;
204 uint32_t elen, extoffset;
207 struct udf_fileident_bh sfibh, dfibh;
208 loff_t f_pos = udf_ext0_offset(inode) >> 2;
209 int size = (udf_ext0_offset(inode) + inode->i_size) >> 2;
210 struct fileIdentDesc cfi, *sfi, *dfi;
212 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
213 alloctype = ICBTAG_FLAG_AD_SHORT;
215 alloctype = ICBTAG_FLAG_AD_LONG;
219 UDF_I_ALLOCTYPE(inode) = alloctype;
220 mark_inode_dirty(inode);
224 /* alloc block, and copy data to it */
225 *block = udf_new_block(inode->i_sb, inode,
226 UDF_I_LOCATION(inode).partitionReferenceNum,
227 UDF_I_LOCATION(inode).logicalBlockNum, err);
231 newblock = udf_get_pblock(inode->i_sb, *block,
232 UDF_I_LOCATION(inode).partitionReferenceNum, 0);
235 dbh = udf_tgetblk(inode->i_sb, newblock);
239 memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
240 set_buffer_uptodate(dbh);
242 mark_buffer_dirty_inode(dbh, inode);
244 sfibh.soffset = sfibh.eoffset = (f_pos & ((inode->i_sb->s_blocksize - 1) >> 2)) << 2;
245 sbh = sfibh.sbh = sfibh.ebh = NULL;
246 dfibh.soffset = dfibh.eoffset = 0;
247 dfibh.sbh = dfibh.ebh = dbh;
248 while ( (f_pos < size) )
250 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
251 sfi = udf_fileident_read(inode, &f_pos, &sfibh, &cfi, NULL, NULL, NULL, NULL, NULL, NULL);
254 udf_release_data(dbh);
257 UDF_I_ALLOCTYPE(inode) = alloctype;
258 sfi->descTag.tagLocation = cpu_to_le32(*block);
259 dfibh.soffset = dfibh.eoffset;
260 dfibh.eoffset += (sfibh.eoffset - sfibh.soffset);
261 dfi = (struct fileIdentDesc *)(dbh->b_data + dfibh.soffset);
262 if (udf_write_fi(inode, sfi, dfi, &dfibh, sfi->impUse,
263 sfi->fileIdent + le16_to_cpu(sfi->lengthOfImpUse)))
265 UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
266 udf_release_data(dbh);
270 mark_buffer_dirty_inode(dbh, inode);
272 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), 0, UDF_I_LENALLOC(inode));
273 UDF_I_LENALLOC(inode) = 0;
274 bloc = UDF_I_LOCATION(inode);
275 eloc.logicalBlockNum = *block;
276 eloc.partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
277 elen = inode->i_size;
278 UDF_I_LENEXTENTS(inode) = elen;
279 extoffset = udf_file_entry_alloc_offset(inode);
280 udf_add_aext(inode, &bloc, &extoffset, eloc, elen, &sbh, 0);
283 udf_release_data(sbh);
284 mark_inode_dirty(inode);
288 static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_result, int create)
291 struct buffer_head *bh;
296 phys = udf_block_map(inode, block);
298 map_bh(bh_result, inode->i_sb, phys);
311 if (block == UDF_I_NEXT_ALLOC_BLOCK(inode) + 1)
313 UDF_I_NEXT_ALLOC_BLOCK(inode) ++;
314 UDF_I_NEXT_ALLOC_GOAL(inode) ++;
319 bh = inode_getblk(inode, block, &err, &phys, &new);
328 set_buffer_new(bh_result);
329 map_bh(bh_result, inode->i_sb, phys);
335 udf_warning(inode->i_sb, "udf_get_block", "block < 0");
339 static struct buffer_head *
340 udf_getblk(struct inode *inode, long block, int create, int *err)
342 struct buffer_head dummy;
345 dummy.b_blocknr = -1000;
346 *err = udf_get_block(inode, block, &dummy, create);
347 if (!*err && buffer_mapped(&dummy))
349 struct buffer_head *bh;
350 bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
351 if (buffer_new(&dummy))
354 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
355 set_buffer_uptodate(bh);
357 mark_buffer_dirty_inode(bh, inode);
364 static struct buffer_head * inode_getblk(struct inode * inode, long block,
365 int *err, long *phys, int *new)
367 struct buffer_head *pbh = NULL, *cbh = NULL, *nbh = NULL, *result = NULL;
368 kernel_long_ad laarr[EXTENT_MERGE_SIZE];
369 uint32_t pextoffset = 0, cextoffset = 0, nextoffset = 0;
370 int count = 0, startnum = 0, endnum = 0;
372 kernel_lb_addr eloc, pbloc, cbloc, nbloc;
374 uint64_t lbcount = 0, b_off = 0;
375 uint32_t newblocknum, newblock, offset = 0;
377 int goal = 0, pgoal = UDF_I_LOCATION(inode).logicalBlockNum;
380 pextoffset = cextoffset = nextoffset = udf_file_entry_alloc_offset(inode);
381 b_off = (uint64_t)block << inode->i_sb->s_blocksize_bits;
382 pbloc = cbloc = nbloc = UDF_I_LOCATION(inode);
384 /* find the extent which contains the block we are looking for.
385 alternate between laarr[0] and laarr[1] for locations of the
386 current extent, and the previous extent */
391 udf_release_data(pbh);
392 atomic_inc(&cbh->b_count);
397 udf_release_data(cbh);
398 atomic_inc(&nbh->b_count);
407 pextoffset = cextoffset;
408 cextoffset = nextoffset;
410 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) == -1)
415 laarr[c].extLength = (etype << 30) | elen;
416 laarr[c].extLocation = eloc;
418 if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
419 pgoal = eloc.logicalBlockNum +
420 ((elen + inode->i_sb->s_blocksize - 1) >>
421 inode->i_sb->s_blocksize_bits);
424 } while (lbcount + elen <= b_off);
427 offset = b_off >> inode->i_sb->s_blocksize_bits;
429 /* if the extent is allocated and recorded, return the block
430 if the extent is not a multiple of the blocksize, round up */
432 if (etype == (EXT_RECORDED_ALLOCATED >> 30))
434 if (elen & (inode->i_sb->s_blocksize - 1))
436 elen = EXT_RECORDED_ALLOCATED |
437 ((elen + inode->i_sb->s_blocksize - 1) &
438 ~(inode->i_sb->s_blocksize - 1));
439 etype = udf_write_aext(inode, nbloc, &cextoffset, eloc, elen, nbh, 1);
441 udf_release_data(pbh);
442 udf_release_data(cbh);
443 udf_release_data(nbh);
444 newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
451 endnum = startnum = ((count > 1) ? 1 : count);
452 if (laarr[c].extLength & (inode->i_sb->s_blocksize - 1))
455 (laarr[c].extLength & UDF_EXTENT_FLAG_MASK) |
456 (((laarr[c].extLength & UDF_EXTENT_LENGTH_MASK) +
457 inode->i_sb->s_blocksize - 1) &
458 ~(inode->i_sb->s_blocksize - 1));
459 UDF_I_LENEXTENTS(inode) =
460 (UDF_I_LENEXTENTS(inode) + inode->i_sb->s_blocksize - 1) &
461 ~(inode->i_sb->s_blocksize - 1);
464 laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
465 ((offset + 1) << inode->i_sb->s_blocksize_bits);
466 memset(&laarr[c].extLocation, 0x00, sizeof(kernel_lb_addr));
472 endnum = startnum = ((count > 2) ? 2 : count);
474 /* if the current extent is in position 0, swap it with the previous */
475 if (!c && count != 1)
483 /* if the current block is located in a extent, read the next extent */
486 if ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 0)) != -1)
488 laarr[c+1].extLength = (etype << 30) | elen;
489 laarr[c+1].extLocation = eloc;
497 udf_release_data(cbh);
498 udf_release_data(nbh);
500 /* if the current extent is not recorded but allocated, get the
501 block in the extent corresponding to the requested block */
502 if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
503 newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
504 else /* otherwise, allocate a new block */
506 if (UDF_I_NEXT_ALLOC_BLOCK(inode) == block)
507 goal = UDF_I_NEXT_ALLOC_GOAL(inode);
512 goal = UDF_I_LOCATION(inode).logicalBlockNum + 1;
515 if (!(newblocknum = udf_new_block(inode->i_sb, inode,
516 UDF_I_LOCATION(inode).partitionReferenceNum, goal, err)))
518 udf_release_data(pbh);
522 UDF_I_LENEXTENTS(inode) += inode->i_sb->s_blocksize;
525 /* if the extent the requsted block is located in contains multiple blocks,
526 split the extent into at most three extents. blocks prior to requested
527 block, requested block, and blocks after requested block */
528 udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
530 #ifdef UDF_PREALLOCATE
531 /* preallocate blocks */
532 udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
535 /* merge any continuous blocks in laarr */
536 udf_merge_extents(inode, laarr, &endnum);
538 /* write back the new extents, inserting new extents if the new number
539 of extents is greater than the old number, and deleting extents if
540 the new number of extents is less than the old number */
541 udf_update_extents(inode, laarr, startnum, endnum, pbloc, pextoffset, &pbh);
543 udf_release_data(pbh);
545 if (!(newblock = udf_get_pblock(inode->i_sb, newblocknum,
546 UDF_I_LOCATION(inode).partitionReferenceNum, 0)))
553 UDF_I_NEXT_ALLOC_BLOCK(inode) = block;
554 UDF_I_NEXT_ALLOC_GOAL(inode) = newblocknum;
555 inode->i_ctime = current_fs_time(inode->i_sb);
558 udf_sync_inode(inode);
560 mark_inode_dirty(inode);
564 static void udf_split_extents(struct inode *inode, int *c, int offset, int newblocknum,
565 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
567 if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
568 (laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
571 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
572 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
573 int8_t etype = (laarr[curr].extLength >> 30);
577 else if (!offset || blen == offset + 1)
579 laarr[curr+2] = laarr[curr+1];
580 laarr[curr+1] = laarr[curr];
584 laarr[curr+3] = laarr[curr+1];
585 laarr[curr+2] = laarr[curr+1] = laarr[curr];
590 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
592 udf_free_blocks(inode->i_sb, inode, laarr[curr].extLocation, 0, offset);
593 laarr[curr].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
594 (offset << inode->i_sb->s_blocksize_bits);
595 laarr[curr].extLocation.logicalBlockNum = 0;
596 laarr[curr].extLocation.partitionReferenceNum = 0;
599 laarr[curr].extLength = (etype << 30) |
600 (offset << inode->i_sb->s_blocksize_bits);
606 laarr[curr].extLocation.logicalBlockNum = newblocknum;
607 if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
608 laarr[curr].extLocation.partitionReferenceNum =
609 UDF_I_LOCATION(inode).partitionReferenceNum;
610 laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
611 inode->i_sb->s_blocksize;
614 if (blen != offset + 1)
616 if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
617 laarr[curr].extLocation.logicalBlockNum += (offset + 1);
618 laarr[curr].extLength = (etype << 30) |
619 ((blen - (offset + 1)) << inode->i_sb->s_blocksize_bits);
626 static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
627 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
629 int start, length = 0, currlength = 0, i;
631 if (*endnum >= (c+1))
640 if ((laarr[c+1].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
643 length = currlength = (((laarr[c+1].extLength & UDF_EXTENT_LENGTH_MASK) +
644 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
650 for (i=start+1; i<=*endnum; i++)
655 length += UDF_DEFAULT_PREALLOC_BLOCKS;
657 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
658 length += (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
659 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
666 int next = laarr[start].extLocation.logicalBlockNum +
667 (((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
668 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
669 int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
670 laarr[start].extLocation.partitionReferenceNum,
671 next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ? length :
672 UDF_DEFAULT_PREALLOC_BLOCKS) - currlength);
677 laarr[start].extLength +=
678 (numalloc << inode->i_sb->s_blocksize_bits);
681 memmove(&laarr[c+2], &laarr[c+1],
682 sizeof(long_ad) * (*endnum - (c+1)));
684 laarr[c+1].extLocation.logicalBlockNum = next;
685 laarr[c+1].extLocation.partitionReferenceNum =
686 laarr[c].extLocation.partitionReferenceNum;
687 laarr[c+1].extLength = EXT_NOT_RECORDED_ALLOCATED |
688 (numalloc << inode->i_sb->s_blocksize_bits);
692 for (i=start+1; numalloc && i<*endnum; i++)
694 int elen = ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
695 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits;
699 laarr[i].extLength -=
700 (numalloc << inode->i_sb->s_blocksize_bits);
707 memmove(&laarr[i], &laarr[i+1],
708 sizeof(long_ad) * (*endnum - (i+1)));
713 UDF_I_LENEXTENTS(inode) += numalloc << inode->i_sb->s_blocksize_bits;
718 static void udf_merge_extents(struct inode *inode,
719 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int *endnum)
723 for (i=0; i<(*endnum-1); i++)
725 if ((laarr[i].extLength >> 30) == (laarr[i+1].extLength >> 30))
727 if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
728 ((laarr[i+1].extLocation.logicalBlockNum - laarr[i].extLocation.logicalBlockNum) ==
729 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
730 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits)))
732 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
733 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
734 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
736 laarr[i+1].extLength = (laarr[i+1].extLength -
737 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
738 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
739 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
740 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
741 laarr[i+1].extLocation.logicalBlockNum =
742 laarr[i].extLocation.logicalBlockNum +
743 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) >>
744 inode->i_sb->s_blocksize_bits);
748 laarr[i].extLength = laarr[i+1].extLength +
749 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
750 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
752 memmove(&laarr[i+1], &laarr[i+2],
753 sizeof(long_ad) * (*endnum - (i+2)));
759 else if (((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
760 ((laarr[i+1].extLength >> 30) == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)))
762 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
763 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
764 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
765 laarr[i].extLocation.logicalBlockNum = 0;
766 laarr[i].extLocation.partitionReferenceNum = 0;
768 if (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
769 (laarr[i+1].extLength & UDF_EXTENT_LENGTH_MASK) +
770 inode->i_sb->s_blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK)
772 laarr[i+1].extLength = (laarr[i+1].extLength -
773 (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
774 UDF_EXTENT_LENGTH_MASK) & ~(inode->i_sb->s_blocksize-1);
775 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_FLAG_MASK) +
776 (UDF_EXTENT_LENGTH_MASK + 1) - inode->i_sb->s_blocksize;
780 laarr[i].extLength = laarr[i+1].extLength +
781 (((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
782 inode->i_sb->s_blocksize - 1) & ~(inode->i_sb->s_blocksize-1));
784 memmove(&laarr[i+1], &laarr[i+2],
785 sizeof(long_ad) * (*endnum - (i+2)));
790 else if ((laarr[i].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
792 udf_free_blocks(inode->i_sb, inode, laarr[i].extLocation, 0,
793 ((laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) +
794 inode->i_sb->s_blocksize - 1) >> inode->i_sb->s_blocksize_bits);
795 laarr[i].extLocation.logicalBlockNum = 0;
796 laarr[i].extLocation.partitionReferenceNum = 0;
797 laarr[i].extLength = (laarr[i].extLength & UDF_EXTENT_LENGTH_MASK) |
798 EXT_NOT_RECORDED_NOT_ALLOCATED;
803 static void udf_update_extents(struct inode *inode,
804 kernel_long_ad laarr[EXTENT_MERGE_SIZE], int startnum, int endnum,
805 kernel_lb_addr pbloc, uint32_t pextoffset, struct buffer_head **pbh)
808 kernel_lb_addr tmploc;
811 if (startnum > endnum)
813 for (i=0; i<(startnum-endnum); i++)
815 udf_delete_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
816 laarr[i].extLength, *pbh);
819 else if (startnum < endnum)
821 for (i=0; i<(endnum-startnum); i++)
823 udf_insert_aext(inode, pbloc, pextoffset, laarr[i].extLocation,
824 laarr[i].extLength, *pbh);
825 udf_next_aext(inode, &pbloc, &pextoffset, &laarr[i].extLocation,
826 &laarr[i].extLength, pbh, 1);
831 for (i=start; i<endnum; i++)
833 udf_next_aext(inode, &pbloc, &pextoffset, &tmploc, &tmplen, pbh, 0);
834 udf_write_aext(inode, pbloc, &pextoffset, laarr[i].extLocation,
835 laarr[i].extLength, *pbh, 1);
839 struct buffer_head * udf_bread(struct inode * inode, int block,
840 int create, int * err)
842 struct buffer_head * bh = NULL;
844 bh = udf_getblk(inode, block, create, err);
848 if (buffer_uptodate(bh))
850 ll_rw_block(READ, 1, &bh);
852 if (buffer_uptodate(bh))
859 void udf_truncate(struct inode * inode)
864 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
865 S_ISLNK(inode->i_mode)))
867 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
871 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
873 if (inode->i_sb->s_blocksize < (udf_file_entry_alloc_offset(inode) +
876 udf_expand_file_adinicb(inode, inode->i_size, &err);
877 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
879 inode->i_size = UDF_I_LENALLOC(inode);
884 udf_truncate_extents(inode);
888 offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
889 memset(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode) + offset, 0x00, inode->i_sb->s_blocksize - offset - udf_file_entry_alloc_offset(inode));
890 UDF_I_LENALLOC(inode) = inode->i_size;
895 block_truncate_page(inode->i_mapping, inode->i_size, udf_get_block);
896 udf_truncate_extents(inode);
899 inode->i_mtime = inode->i_ctime = current_fs_time(inode->i_sb);
901 udf_sync_inode (inode);
903 mark_inode_dirty(inode);
908 __udf_read_inode(struct inode *inode)
910 struct buffer_head *bh = NULL;
911 struct fileEntry *fe;
915 * Set defaults, but the inode is still incomplete!
916 * Note: get_new_inode() sets the following on a new inode:
919 * i_flags = sb->s_flags
921 * clean_inode(): zero fills and sets
926 inode->i_blksize = PAGE_SIZE;
928 bh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 0, &ident);
932 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
934 make_bad_inode(inode);
938 if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
939 ident != TAG_IDENT_USE)
941 printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed ident=%d\n",
942 inode->i_ino, ident);
943 udf_release_data(bh);
944 make_bad_inode(inode);
948 fe = (struct fileEntry *)bh->b_data;
950 if (le16_to_cpu(fe->icbTag.strategyType) == 4096)
952 struct buffer_head *ibh = NULL, *nbh = NULL;
953 struct indirectEntry *ie;
955 ibh = udf_read_ptagged(inode->i_sb, UDF_I_LOCATION(inode), 1, &ident);
956 if (ident == TAG_IDENT_IE)
961 ie = (struct indirectEntry *)ibh->b_data;
963 loc = lelb_to_cpu(ie->indirectICB.extLocation);
965 if (ie->indirectICB.extLength &&
966 (nbh = udf_read_ptagged(inode->i_sb, loc, 0, &ident)))
968 if (ident == TAG_IDENT_FE ||
969 ident == TAG_IDENT_EFE)
971 memcpy(&UDF_I_LOCATION(inode), &loc, sizeof(kernel_lb_addr));
972 udf_release_data(bh);
973 udf_release_data(ibh);
974 udf_release_data(nbh);
975 __udf_read_inode(inode);
980 udf_release_data(nbh);
981 udf_release_data(ibh);
985 udf_release_data(ibh);
989 udf_release_data(ibh);
991 else if (le16_to_cpu(fe->icbTag.strategyType) != 4)
993 printk(KERN_ERR "udf: unsupported strategy type: %d\n",
994 le16_to_cpu(fe->icbTag.strategyType));
995 udf_release_data(bh);
996 make_bad_inode(inode);
999 udf_fill_inode(inode, bh);
1000 udf_release_data(bh);
1003 static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
1005 struct fileEntry *fe;
1006 struct extendedFileEntry *efe;
1011 fe = (struct fileEntry *)bh->b_data;
1012 efe = (struct extendedFileEntry *)bh->b_data;
1014 if (le16_to_cpu(fe->icbTag.strategyType) == 4)
1015 UDF_I_STRAT4096(inode) = 0;
1016 else /* if (le16_to_cpu(fe->icbTag.strategyType) == 4096) */
1017 UDF_I_STRAT4096(inode) = 1;
1019 UDF_I_ALLOCTYPE(inode) = le16_to_cpu(fe->icbTag.flags) & ICBTAG_FLAG_AD_MASK;
1020 UDF_I_UNIQUE(inode) = 0;
1021 UDF_I_LENEATTR(inode) = 0;
1022 UDF_I_LENEXTENTS(inode) = 0;
1023 UDF_I_LENALLOC(inode) = 0;
1024 UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
1025 UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
1026 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_EFE)
1028 UDF_I_EFE(inode) = 1;
1029 UDF_I_USE(inode) = 0;
1030 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry), GFP_KERNEL);
1031 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct extendedFileEntry), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1033 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_FE)
1035 UDF_I_EFE(inode) = 0;
1036 UDF_I_USE(inode) = 0;
1037 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct fileEntry), GFP_KERNEL);
1038 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct fileEntry), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1040 else if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1042 UDF_I_EFE(inode) = 0;
1043 UDF_I_USE(inode) = 1;
1044 UDF_I_LENALLOC(inode) =
1046 ((struct unallocSpaceEntry *)bh->b_data)->lengthAllocDescs);
1047 UDF_I_DATA(inode) = kmalloc(inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry), GFP_KERNEL);
1048 memcpy(UDF_I_DATA(inode), bh->b_data + sizeof(struct unallocSpaceEntry), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1052 inode->i_uid = le32_to_cpu(fe->uid);
1053 if ( inode->i_uid == -1 ) inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
1055 inode->i_gid = le32_to_cpu(fe->gid);
1056 if ( inode->i_gid == -1 ) inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
1058 inode->i_nlink = le16_to_cpu(fe->fileLinkCount);
1059 if (!inode->i_nlink)
1062 inode->i_size = le64_to_cpu(fe->informationLength);
1063 UDF_I_LENEXTENTS(inode) = inode->i_size;
1065 inode->i_mode = udf_convert_permissions(fe);
1066 inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
1068 if (UDF_I_EFE(inode) == 0)
1070 inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
1071 (inode->i_sb->s_blocksize_bits - 9);
1073 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1074 lets_to_cpu(fe->accessTime)) )
1076 inode->i_atime.tv_sec = convtime;
1077 inode->i_atime.tv_nsec = convtime_usec * 1000;
1081 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1084 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1085 lets_to_cpu(fe->modificationTime)) )
1087 inode->i_mtime.tv_sec = convtime;
1088 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1092 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1095 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1096 lets_to_cpu(fe->attrTime)) )
1098 inode->i_ctime.tv_sec = convtime;
1099 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1103 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1106 UDF_I_UNIQUE(inode) = le64_to_cpu(fe->uniqueID);
1107 UDF_I_LENEATTR(inode) = le32_to_cpu(fe->lengthExtendedAttr);
1108 UDF_I_LENALLOC(inode) = le32_to_cpu(fe->lengthAllocDescs);
1109 offset = sizeof(struct fileEntry) + UDF_I_LENEATTR(inode);
1113 inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
1114 (inode->i_sb->s_blocksize_bits - 9);
1116 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1117 lets_to_cpu(efe->accessTime)) )
1119 inode->i_atime.tv_sec = convtime;
1120 inode->i_atime.tv_nsec = convtime_usec * 1000;
1124 inode->i_atime = UDF_SB_RECORDTIME(inode->i_sb);
1127 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1128 lets_to_cpu(efe->modificationTime)) )
1130 inode->i_mtime.tv_sec = convtime;
1131 inode->i_mtime.tv_nsec = convtime_usec * 1000;
1135 inode->i_mtime = UDF_SB_RECORDTIME(inode->i_sb);
1138 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1139 lets_to_cpu(efe->createTime)) )
1141 UDF_I_CRTIME(inode).tv_sec = convtime;
1142 UDF_I_CRTIME(inode).tv_nsec = convtime_usec * 1000;
1146 UDF_I_CRTIME(inode) = UDF_SB_RECORDTIME(inode->i_sb);
1149 if ( udf_stamp_to_time(&convtime, &convtime_usec,
1150 lets_to_cpu(efe->attrTime)) )
1152 inode->i_ctime.tv_sec = convtime;
1153 inode->i_ctime.tv_nsec = convtime_usec * 1000;
1157 inode->i_ctime = UDF_SB_RECORDTIME(inode->i_sb);
1160 UDF_I_UNIQUE(inode) = le64_to_cpu(efe->uniqueID);
1161 UDF_I_LENEATTR(inode) = le32_to_cpu(efe->lengthExtendedAttr);
1162 UDF_I_LENALLOC(inode) = le32_to_cpu(efe->lengthAllocDescs);
1163 offset = sizeof(struct extendedFileEntry) + UDF_I_LENEATTR(inode);
1166 switch (fe->icbTag.fileType)
1168 case ICBTAG_FILE_TYPE_DIRECTORY:
1170 inode->i_op = &udf_dir_inode_operations;
1171 inode->i_fop = &udf_dir_operations;
1172 inode->i_mode |= S_IFDIR;
1176 case ICBTAG_FILE_TYPE_REALTIME:
1177 case ICBTAG_FILE_TYPE_REGULAR:
1178 case ICBTAG_FILE_TYPE_UNDEF:
1180 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_IN_ICB)
1181 inode->i_data.a_ops = &udf_adinicb_aops;
1183 inode->i_data.a_ops = &udf_aops;
1184 inode->i_op = &udf_file_inode_operations;
1185 inode->i_fop = &udf_file_operations;
1186 inode->i_mode |= S_IFREG;
1189 case ICBTAG_FILE_TYPE_BLOCK:
1191 inode->i_mode |= S_IFBLK;
1194 case ICBTAG_FILE_TYPE_CHAR:
1196 inode->i_mode |= S_IFCHR;
1199 case ICBTAG_FILE_TYPE_FIFO:
1201 init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
1204 case ICBTAG_FILE_TYPE_SOCKET:
1206 init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
1209 case ICBTAG_FILE_TYPE_SYMLINK:
1211 inode->i_data.a_ops = &udf_symlink_aops;
1212 inode->i_op = &page_symlink_inode_operations;
1213 inode->i_mode = S_IFLNK|S_IRWXUGO;
1218 printk(KERN_ERR "udf: udf_fill_inode(ino %ld) failed unknown file type=%d\n",
1219 inode->i_ino, fe->icbTag.fileType);
1220 make_bad_inode(inode);
1224 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1226 struct deviceSpec *dsea =
1227 (struct deviceSpec *)
1228 udf_get_extendedattr(inode, 12, 1);
1232 init_special_inode(inode, inode->i_mode, MKDEV(
1233 le32_to_cpu(dsea->majorDeviceIdent),
1234 le32_to_cpu(dsea->minorDeviceIdent)));
1235 /* Developer ID ??? */
1239 make_bad_inode(inode);
1245 udf_convert_permissions(struct fileEntry *fe)
1248 uint32_t permissions;
1251 permissions = le32_to_cpu(fe->permissions);
1252 flags = le16_to_cpu(fe->icbTag.flags);
1254 mode = (( permissions ) & S_IRWXO) |
1255 (( permissions >> 2 ) & S_IRWXG) |
1256 (( permissions >> 4 ) & S_IRWXU) |
1257 (( flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
1258 (( flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
1259 (( flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
1268 * Write out the specified inode.
1271 * This routine is called whenever an inode is synced.
1272 * Currently this routine is just a placeholder.
1275 * July 1, 1997 - Andrew E. Mileski
1276 * Written, tested, and released.
1279 int udf_write_inode(struct inode * inode, int sync)
1283 ret = udf_update_inode(inode, sync);
1288 int udf_sync_inode(struct inode * inode)
1290 return udf_update_inode(inode, 1);
1294 udf_update_inode(struct inode *inode, int do_sync)
1296 struct buffer_head *bh = NULL;
1297 struct fileEntry *fe;
1298 struct extendedFileEntry *efe;
1303 kernel_timestamp cpu_time;
1306 bh = udf_tread(inode->i_sb,
1307 udf_get_lb_pblock(inode->i_sb, UDF_I_LOCATION(inode), 0));
1311 udf_debug("bread failure\n");
1315 memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
1317 fe = (struct fileEntry *)bh->b_data;
1318 efe = (struct extendedFileEntry *)bh->b_data;
1320 if (le16_to_cpu(fe->descTag.tagIdent) == TAG_IDENT_USE)
1322 struct unallocSpaceEntry *use =
1323 (struct unallocSpaceEntry *)bh->b_data;
1325 use->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1326 memcpy(bh->b_data + sizeof(struct unallocSpaceEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry));
1327 crclen = sizeof(struct unallocSpaceEntry) + UDF_I_LENALLOC(inode) -
1329 use->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1330 use->descTag.descCRCLength = cpu_to_le16(crclen);
1331 use->descTag.descCRC = cpu_to_le16(udf_crc((char *)use + sizeof(tag), crclen, 0));
1333 use->descTag.tagChecksum = 0;
1334 for (i=0; i<16; i++)
1336 use->descTag.tagChecksum += ((uint8_t *)&(use->descTag))[i];
1338 mark_buffer_dirty(bh);
1339 udf_release_data(bh);
1343 if (inode->i_uid != UDF_SB(inode->i_sb)->s_uid)
1344 fe->uid = cpu_to_le32(inode->i_uid);
1346 if (inode->i_gid != UDF_SB(inode->i_sb)->s_gid)
1347 fe->gid = cpu_to_le32(inode->i_gid);
1349 udfperms = ((inode->i_mode & S_IRWXO) ) |
1350 ((inode->i_mode & S_IRWXG) << 2) |
1351 ((inode->i_mode & S_IRWXU) << 4);
1353 udfperms |= (le32_to_cpu(fe->permissions) &
1354 (FE_PERM_O_DELETE | FE_PERM_O_CHATTR |
1355 FE_PERM_G_DELETE | FE_PERM_G_CHATTR |
1356 FE_PERM_U_DELETE | FE_PERM_U_CHATTR));
1357 fe->permissions = cpu_to_le32(udfperms);
1359 if (S_ISDIR(inode->i_mode))
1360 fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
1362 fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
1364 fe->informationLength = cpu_to_le64(inode->i_size);
1366 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
1369 struct deviceSpec *dsea =
1370 (struct deviceSpec *)
1371 udf_get_extendedattr(inode, 12, 1);
1375 dsea = (struct deviceSpec *)
1376 udf_add_extendedattr(inode,
1377 sizeof(struct deviceSpec) +
1378 sizeof(regid), 12, 0x3);
1379 dsea->attrType = cpu_to_le32(12);
1380 dsea->attrSubtype = 1;
1381 dsea->attrLength = cpu_to_le32(sizeof(struct deviceSpec) +
1383 dsea->impUseLength = cpu_to_le32(sizeof(regid));
1385 eid = (regid *)dsea->impUse;
1386 memset(eid, 0, sizeof(regid));
1387 strcpy(eid->ident, UDF_ID_DEVELOPER);
1388 eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
1389 eid->identSuffix[1] = UDF_OS_ID_LINUX;
1390 dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
1391 dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
1394 if (UDF_I_EFE(inode) == 0)
1396 memcpy(bh->b_data + sizeof(struct fileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct fileEntry));
1397 fe->logicalBlocksRecorded = cpu_to_le64(
1398 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1399 (inode->i_sb->s_blocksize_bits - 9));
1401 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1402 fe->accessTime = cpu_to_lets(cpu_time);
1403 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1404 fe->modificationTime = cpu_to_lets(cpu_time);
1405 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1406 fe->attrTime = cpu_to_lets(cpu_time);
1407 memset(&(fe->impIdent), 0, sizeof(regid));
1408 strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
1409 fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1410 fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1411 fe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1412 fe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1413 fe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1414 fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
1415 crclen = sizeof(struct fileEntry);
1419 memcpy(bh->b_data + sizeof(struct extendedFileEntry), UDF_I_DATA(inode), inode->i_sb->s_blocksize - sizeof(struct extendedFileEntry));
1420 efe->objectSize = cpu_to_le64(inode->i_size);
1421 efe->logicalBlocksRecorded = cpu_to_le64(
1422 (inode->i_blocks + (1 << (inode->i_sb->s_blocksize_bits - 9)) - 1) >>
1423 (inode->i_sb->s_blocksize_bits - 9));
1425 if (UDF_I_CRTIME(inode).tv_sec > inode->i_atime.tv_sec ||
1426 (UDF_I_CRTIME(inode).tv_sec == inode->i_atime.tv_sec &&
1427 UDF_I_CRTIME(inode).tv_nsec > inode->i_atime.tv_nsec))
1429 UDF_I_CRTIME(inode) = inode->i_atime;
1431 if (UDF_I_CRTIME(inode).tv_sec > inode->i_mtime.tv_sec ||
1432 (UDF_I_CRTIME(inode).tv_sec == inode->i_mtime.tv_sec &&
1433 UDF_I_CRTIME(inode).tv_nsec > inode->i_mtime.tv_nsec))
1435 UDF_I_CRTIME(inode) = inode->i_mtime;
1437 if (UDF_I_CRTIME(inode).tv_sec > inode->i_ctime.tv_sec ||
1438 (UDF_I_CRTIME(inode).tv_sec == inode->i_ctime.tv_sec &&
1439 UDF_I_CRTIME(inode).tv_nsec > inode->i_ctime.tv_nsec))
1441 UDF_I_CRTIME(inode) = inode->i_ctime;
1444 if (udf_time_to_stamp(&cpu_time, inode->i_atime))
1445 efe->accessTime = cpu_to_lets(cpu_time);
1446 if (udf_time_to_stamp(&cpu_time, inode->i_mtime))
1447 efe->modificationTime = cpu_to_lets(cpu_time);
1448 if (udf_time_to_stamp(&cpu_time, UDF_I_CRTIME(inode)))
1449 efe->createTime = cpu_to_lets(cpu_time);
1450 if (udf_time_to_stamp(&cpu_time, inode->i_ctime))
1451 efe->attrTime = cpu_to_lets(cpu_time);
1453 memset(&(efe->impIdent), 0, sizeof(regid));
1454 strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
1455 efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
1456 efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
1457 efe->uniqueID = cpu_to_le64(UDF_I_UNIQUE(inode));
1458 efe->lengthExtendedAttr = cpu_to_le32(UDF_I_LENEATTR(inode));
1459 efe->lengthAllocDescs = cpu_to_le32(UDF_I_LENALLOC(inode));
1460 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1461 crclen = sizeof(struct extendedFileEntry);
1463 if (UDF_I_STRAT4096(inode))
1465 fe->icbTag.strategyType = cpu_to_le16(4096);
1466 fe->icbTag.strategyParameter = cpu_to_le16(1);
1467 fe->icbTag.numEntries = cpu_to_le16(2);
1471 fe->icbTag.strategyType = cpu_to_le16(4);
1472 fe->icbTag.numEntries = cpu_to_le16(1);
1475 if (S_ISDIR(inode->i_mode))
1476 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1477 else if (S_ISREG(inode->i_mode))
1478 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
1479 else if (S_ISLNK(inode->i_mode))
1480 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
1481 else if (S_ISBLK(inode->i_mode))
1482 fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
1483 else if (S_ISCHR(inode->i_mode))
1484 fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
1485 else if (S_ISFIFO(inode->i_mode))
1486 fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
1487 else if (S_ISSOCK(inode->i_mode))
1488 fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
1490 icbflags = UDF_I_ALLOCTYPE(inode) |
1491 ((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
1492 ((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
1493 ((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
1494 (le16_to_cpu(fe->icbTag.flags) &
1495 ~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
1496 ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
1498 fe->icbTag.flags = cpu_to_le16(icbflags);
1499 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1500 fe->descTag.descVersion = cpu_to_le16(3);
1502 fe->descTag.descVersion = cpu_to_le16(2);
1503 fe->descTag.tagSerialNum = cpu_to_le16(UDF_SB_SERIALNUM(inode->i_sb));
1504 fe->descTag.tagLocation = cpu_to_le32(UDF_I_LOCATION(inode).logicalBlockNum);
1505 crclen += UDF_I_LENEATTR(inode) + UDF_I_LENALLOC(inode) - sizeof(tag);
1506 fe->descTag.descCRCLength = cpu_to_le16(crclen);
1507 fe->descTag.descCRC = cpu_to_le16(udf_crc((char *)fe + sizeof(tag), crclen, 0));
1509 fe->descTag.tagChecksum = 0;
1510 for (i=0; i<16; i++)
1512 fe->descTag.tagChecksum += ((uint8_t *)&(fe->descTag))[i];
1514 /* write the data blocks */
1515 mark_buffer_dirty(bh);
1518 sync_dirty_buffer(bh);
1519 if (buffer_req(bh) && !buffer_uptodate(bh))
1521 printk("IO error syncing udf inode [%s:%08lx]\n",
1522 inode->i_sb->s_id, inode->i_ino);
1526 udf_release_data(bh);
1531 udf_iget(struct super_block *sb, kernel_lb_addr ino)
1533 unsigned long block = udf_get_lb_pblock(sb, ino, 0);
1534 struct inode *inode = iget_locked(sb, block);
1539 if (inode->i_state & I_NEW) {
1540 memcpy(&UDF_I_LOCATION(inode), &ino, sizeof(kernel_lb_addr));
1541 __udf_read_inode(inode);
1542 unlock_new_inode(inode);
1545 if (is_bad_inode(inode))
1548 if (ino.logicalBlockNum >= UDF_SB_PARTLEN(sb, ino.partitionReferenceNum)) {
1549 udf_debug("block=%d, partition=%d out of range\n",
1550 ino.logicalBlockNum, ino.partitionReferenceNum);
1551 make_bad_inode(inode);
1562 int8_t udf_add_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1563 kernel_lb_addr eloc, uint32_t elen, struct buffer_head **bh, int inc)
1566 short_ad *sad = NULL;
1567 long_ad *lad = NULL;
1568 struct allocExtDesc *aed;
1573 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1575 ptr = (*bh)->b_data + *extoffset;
1577 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1578 adsize = sizeof(short_ad);
1579 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1580 adsize = sizeof(long_ad);
1584 if (*extoffset + (2 * adsize) > inode->i_sb->s_blocksize)
1587 struct buffer_head *nbh;
1589 kernel_lb_addr obloc = *bloc;
1591 if (!(bloc->logicalBlockNum = udf_new_block(inode->i_sb, NULL,
1592 obloc.partitionReferenceNum, obloc.logicalBlockNum, &err)))
1596 if (!(nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
1602 memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
1603 set_buffer_uptodate(nbh);
1605 mark_buffer_dirty_inode(nbh, inode);
1607 aed = (struct allocExtDesc *)(nbh->b_data);
1608 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
1609 aed->previousAllocExtLocation = cpu_to_le32(obloc.logicalBlockNum);
1610 if (*extoffset + adsize > inode->i_sb->s_blocksize)
1612 loffset = *extoffset;
1613 aed->lengthAllocDescs = cpu_to_le32(adsize);
1614 sptr = ptr - adsize;
1615 dptr = nbh->b_data + sizeof(struct allocExtDesc);
1616 memcpy(dptr, sptr, adsize);
1617 *extoffset = sizeof(struct allocExtDesc) + adsize;
1621 loffset = *extoffset + adsize;
1622 aed->lengthAllocDescs = cpu_to_le32(0);
1624 *extoffset = sizeof(struct allocExtDesc);
1628 aed = (struct allocExtDesc *)(*bh)->b_data;
1629 aed->lengthAllocDescs =
1630 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1634 UDF_I_LENALLOC(inode) += adsize;
1635 mark_inode_dirty(inode);
1638 if (UDF_SB_UDFREV(inode->i_sb) >= 0x0200)
1639 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
1640 bloc->logicalBlockNum, sizeof(tag));
1642 udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
1643 bloc->logicalBlockNum, sizeof(tag));
1644 switch (UDF_I_ALLOCTYPE(inode))
1646 case ICBTAG_FLAG_AD_SHORT:
1648 sad = (short_ad *)sptr;
1649 sad->extLength = cpu_to_le32(
1650 EXT_NEXT_EXTENT_ALLOCDECS |
1651 inode->i_sb->s_blocksize);
1652 sad->extPosition = cpu_to_le32(bloc->logicalBlockNum);
1655 case ICBTAG_FLAG_AD_LONG:
1657 lad = (long_ad *)sptr;
1658 lad->extLength = cpu_to_le32(
1659 EXT_NEXT_EXTENT_ALLOCDECS |
1660 inode->i_sb->s_blocksize);
1661 lad->extLocation = cpu_to_lelb(*bloc);
1662 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1668 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1669 udf_update_tag((*bh)->b_data, loffset);
1671 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1672 mark_buffer_dirty_inode(*bh, inode);
1673 udf_release_data(*bh);
1676 mark_inode_dirty(inode);
1680 etype = udf_write_aext(inode, *bloc, extoffset, eloc, elen, *bh, inc);
1684 UDF_I_LENALLOC(inode) += adsize;
1685 mark_inode_dirty(inode);
1689 aed = (struct allocExtDesc *)(*bh)->b_data;
1690 aed->lengthAllocDescs =
1691 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) + adsize);
1692 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1693 udf_update_tag((*bh)->b_data, *extoffset + (inc ? 0 : adsize));
1695 udf_update_tag((*bh)->b_data, sizeof(struct allocExtDesc));
1696 mark_buffer_dirty_inode(*bh, inode);
1702 int8_t udf_write_aext(struct inode *inode, kernel_lb_addr bloc, int *extoffset,
1703 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *bh, int inc)
1709 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1712 ptr = bh->b_data + *extoffset;
1713 atomic_inc(&bh->b_count);
1716 switch (UDF_I_ALLOCTYPE(inode))
1718 case ICBTAG_FLAG_AD_SHORT:
1720 short_ad *sad = (short_ad *)ptr;
1721 sad->extLength = cpu_to_le32(elen);
1722 sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
1723 adsize = sizeof(short_ad);
1726 case ICBTAG_FLAG_AD_LONG:
1728 long_ad *lad = (long_ad *)ptr;
1729 lad->extLength = cpu_to_le32(elen);
1730 lad->extLocation = cpu_to_lelb(eloc);
1731 memset(lad->impUse, 0x00, sizeof(lad->impUse));
1732 adsize = sizeof(long_ad);
1741 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1743 struct allocExtDesc *aed = (struct allocExtDesc *)(bh)->b_data;
1744 udf_update_tag((bh)->b_data,
1745 le32_to_cpu(aed->lengthAllocDescs) + sizeof(struct allocExtDesc));
1747 mark_buffer_dirty_inode(bh, inode);
1748 udf_release_data(bh);
1751 mark_inode_dirty(inode);
1754 *extoffset += adsize;
1755 return (elen >> 30);
1758 int8_t udf_next_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1759 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1763 while ((etype = udf_current_aext(inode, bloc, extoffset, eloc, elen, bh, inc)) ==
1764 (EXT_NEXT_EXTENT_ALLOCDECS >> 30))
1767 *extoffset = sizeof(struct allocExtDesc);
1768 udf_release_data(*bh);
1769 if (!(*bh = udf_tread(inode->i_sb, udf_get_lb_pblock(inode->i_sb, *bloc, 0))))
1771 udf_debug("reading block %d failed!\n",
1772 udf_get_lb_pblock(inode->i_sb, *bloc, 0));
1780 int8_t udf_current_aext(struct inode *inode, kernel_lb_addr *bloc, int *extoffset,
1781 kernel_lb_addr *eloc, uint32_t *elen, struct buffer_head **bh, int inc)
1790 *extoffset = udf_file_entry_alloc_offset(inode);
1791 ptr = UDF_I_DATA(inode) + *extoffset - udf_file_entry_alloc_offset(inode) + UDF_I_LENEATTR(inode);
1792 alen = udf_file_entry_alloc_offset(inode) + UDF_I_LENALLOC(inode);
1797 *extoffset = sizeof(struct allocExtDesc);
1798 ptr = (*bh)->b_data + *extoffset;
1799 alen = sizeof(struct allocExtDesc) + le32_to_cpu(((struct allocExtDesc *)(*bh)->b_data)->lengthAllocDescs);
1802 switch (UDF_I_ALLOCTYPE(inode))
1804 case ICBTAG_FLAG_AD_SHORT:
1808 if (!(sad = udf_get_fileshortad(ptr, alen, extoffset, inc)))
1811 etype = le32_to_cpu(sad->extLength) >> 30;
1812 eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
1813 eloc->partitionReferenceNum = UDF_I_LOCATION(inode).partitionReferenceNum;
1814 *elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
1817 case ICBTAG_FLAG_AD_LONG:
1821 if (!(lad = udf_get_filelongad(ptr, alen, extoffset, inc)))
1824 etype = le32_to_cpu(lad->extLength) >> 30;
1825 *eloc = lelb_to_cpu(lad->extLocation);
1826 *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
1831 udf_debug("alloc_type = %d unsupported\n", UDF_I_ALLOCTYPE(inode));
1840 udf_insert_aext(struct inode *inode, kernel_lb_addr bloc, int extoffset,
1841 kernel_lb_addr neloc, uint32_t nelen, struct buffer_head *bh)
1843 kernel_lb_addr oeloc;
1848 atomic_inc(&bh->b_count);
1850 while ((etype = udf_next_aext(inode, &bloc, &extoffset, &oeloc, &oelen, &bh, 0)) != -1)
1852 udf_write_aext(inode, bloc, &extoffset, neloc, nelen, bh, 1);
1855 nelen = (etype << 30) | oelen;
1857 udf_add_aext(inode, &bloc, &extoffset, neloc, nelen, &bh, 1);
1858 udf_release_data(bh);
1859 return (nelen >> 30);
1862 int8_t udf_delete_aext(struct inode *inode, kernel_lb_addr nbloc, int nextoffset,
1863 kernel_lb_addr eloc, uint32_t elen, struct buffer_head *nbh)
1865 struct buffer_head *obh;
1866 kernel_lb_addr obloc;
1867 int oextoffset, adsize;
1869 struct allocExtDesc *aed;
1873 atomic_inc(&nbh->b_count);
1874 atomic_inc(&nbh->b_count);
1877 if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_SHORT)
1878 adsize = sizeof(short_ad);
1879 else if (UDF_I_ALLOCTYPE(inode) == ICBTAG_FLAG_AD_LONG)
1880 adsize = sizeof(long_ad);
1886 oextoffset = nextoffset;
1888 if (udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1) == -1)
1891 while ((etype = udf_next_aext(inode, &nbloc, &nextoffset, &eloc, &elen, &nbh, 1)) != -1)
1893 udf_write_aext(inode, obloc, &oextoffset, eloc, (etype << 30) | elen, obh, 1);
1897 udf_release_data(obh);
1898 atomic_inc(&nbh->b_count);
1900 oextoffset = nextoffset - adsize;
1903 memset(&eloc, 0x00, sizeof(kernel_lb_addr));
1908 udf_free_blocks(inode->i_sb, inode, nbloc, 0, 1);
1909 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1910 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1913 UDF_I_LENALLOC(inode) -= (adsize * 2);
1914 mark_inode_dirty(inode);
1918 aed = (struct allocExtDesc *)(obh)->b_data;
1919 aed->lengthAllocDescs =
1920 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - (2*adsize));
1921 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1922 udf_update_tag((obh)->b_data, oextoffset - (2*adsize));
1924 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1925 mark_buffer_dirty_inode(obh, inode);
1930 udf_write_aext(inode, obloc, &oextoffset, eloc, elen, obh, 1);
1933 UDF_I_LENALLOC(inode) -= adsize;
1934 mark_inode_dirty(inode);
1938 aed = (struct allocExtDesc *)(obh)->b_data;
1939 aed->lengthAllocDescs =
1940 cpu_to_le32(le32_to_cpu(aed->lengthAllocDescs) - adsize);
1941 if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) || UDF_SB_UDFREV(inode->i_sb) >= 0x0201)
1942 udf_update_tag((obh)->b_data, oextoffset - adsize);
1944 udf_update_tag((obh)->b_data, sizeof(struct allocExtDesc));
1945 mark_buffer_dirty_inode(obh, inode);
1949 udf_release_data(nbh);
1950 udf_release_data(obh);
1951 return (elen >> 30);
1954 int8_t inode_bmap(struct inode *inode, int block, kernel_lb_addr *bloc, uint32_t *extoffset,
1955 kernel_lb_addr *eloc, uint32_t *elen, uint32_t *offset, struct buffer_head **bh)
1957 uint64_t lbcount = 0, bcount = (uint64_t)block << inode->i_sb->s_blocksize_bits;
1962 printk(KERN_ERR "udf: inode_bmap: block < 0\n");
1967 printk(KERN_ERR "udf: inode_bmap: NULL inode\n");
1973 *bloc = UDF_I_LOCATION(inode);
1977 if ((etype = udf_next_aext(inode, bloc, extoffset, eloc, elen, bh, 1)) == -1)
1979 *offset = bcount - lbcount;
1980 UDF_I_LENEXTENTS(inode) = lbcount;
1984 } while (lbcount <= bcount);
1986 *offset = bcount + *elen - lbcount;
1991 long udf_block_map(struct inode *inode, long block)
1993 kernel_lb_addr eloc, bloc;
1994 uint32_t offset, extoffset, elen;
1995 struct buffer_head *bh = NULL;
2000 if (inode_bmap(inode, block, &bloc, &extoffset, &eloc, &elen, &offset, &bh) == (EXT_RECORDED_ALLOCATED >> 30))
2001 ret = udf_get_lb_pblock(inode->i_sb, eloc, offset >> inode->i_sb->s_blocksize_bits);
2006 udf_release_data(bh);
2008 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_VARCONV))
2009 return udf_fixed_to_variable(ret);