5 * Partition handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998-2001 Ben Fennema
17 * 12/06/98 blf Created file.
26 #include <linux/string.h>
27 #include <linux/slab.h>
28 #include <linux/buffer_head.h>
30 uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
31 uint16_t partition, uint32_t offset)
33 struct udf_sb_info *sbi = UDF_SB(sb);
34 struct udf_part_map *map;
35 if (partition >= sbi->s_partitions) {
36 udf_debug("block=%d, partition=%d, offset=%d: "
37 "invalid partition\n", block, partition, offset);
40 map = &sbi->s_partmaps[partition];
41 if (map->s_partition_func)
42 return map->s_partition_func(sb, block, partition, offset);
44 return map->s_partition_root + block + offset;
47 uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
48 uint16_t partition, uint32_t offset)
50 struct buffer_head *bh = NULL;
54 struct udf_sb_info *sbi = UDF_SB(sb);
55 struct udf_part_map *map;
56 struct udf_virtual_data *vdata;
57 struct udf_inode_info *iinfo = UDF_I(sbi->s_vat_inode);
59 map = &sbi->s_partmaps[partition];
60 vdata = &map->s_type_specific.s_virtual;
62 if (block > vdata->s_num_entries) {
63 udf_debug("Trying to access block beyond end of VAT "
64 "(%d max %d)\n", block, vdata->s_num_entries);
68 if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
69 loc = le32_to_cpu(((__le32 *)(iinfo->i_ext.i_data +
70 vdata->s_start_offset))[block]);
73 index = (sb->s_blocksize - vdata->s_start_offset) / sizeof(uint32_t);
76 newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
77 index = block % (sb->s_blocksize / sizeof(uint32_t));
80 index = vdata->s_start_offset / sizeof(uint32_t) + block;
83 loc = udf_block_map(sbi->s_vat_inode, newblock);
85 bh = sb_bread(sb, loc);
87 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
88 sb, block, partition, loc, index);
92 loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
97 if (iinfo->i_location.partitionReferenceNum == partition) {
98 udf_debug("recursive call to udf_get_pblock!\n");
102 return udf_get_pblock(sb, loc,
103 iinfo->i_location.partitionReferenceNum,
107 inline uint32_t udf_get_pblock_virt20(struct super_block *sb, uint32_t block,
108 uint16_t partition, uint32_t offset)
110 return udf_get_pblock_virt15(sb, block, partition, offset);
113 uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
114 uint16_t partition, uint32_t offset)
117 struct sparingTable *st = NULL;
118 struct udf_sb_info *sbi = UDF_SB(sb);
119 struct udf_part_map *map;
121 struct udf_sparing_data *sdata;
123 map = &sbi->s_partmaps[partition];
124 sdata = &map->s_type_specific.s_sparing;
125 packet = (block + offset) & ~(sdata->s_packet_len - 1);
127 for (i = 0; i < 4; i++) {
128 if (sdata->s_spar_map[i] != NULL) {
129 st = (struct sparingTable *)
130 sdata->s_spar_map[i]->b_data;
136 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
137 struct sparingEntry *entry = &st->mapEntry[i];
138 u32 origLoc = le32_to_cpu(entry->origLocation);
139 if (origLoc >= 0xFFFFFFF0)
141 else if (origLoc == packet)
142 return le32_to_cpu(entry->mappedLocation) +
144 (sdata->s_packet_len - 1));
145 else if (origLoc > packet)
150 return map->s_partition_root + block + offset;
153 int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
155 struct udf_sparing_data *sdata;
156 struct sparingTable *st = NULL;
157 struct sparingEntry mapEntry;
160 struct udf_sb_info *sbi = UDF_SB(sb);
161 u16 reallocationTableLen;
162 struct buffer_head *bh;
164 for (i = 0; i < sbi->s_partitions; i++) {
165 struct udf_part_map *map = &sbi->s_partmaps[i];
166 if (old_block > map->s_partition_root &&
167 old_block < map->s_partition_root + map->s_partition_len) {
168 sdata = &map->s_type_specific.s_sparing;
169 packet = (old_block - map->s_partition_root) &
170 ~(sdata->s_packet_len - 1);
172 for (j = 0; j < 4; j++)
173 if (sdata->s_spar_map[j] != NULL) {
174 st = (struct sparingTable *)
175 sdata->s_spar_map[j]->b_data;
182 reallocationTableLen =
183 le16_to_cpu(st->reallocationTableLen);
184 for (k = 0; k < reallocationTableLen; k++) {
185 struct sparingEntry *entry = &st->mapEntry[k];
186 u32 origLoc = le32_to_cpu(entry->origLocation);
188 if (origLoc == 0xFFFFFFFF) {
191 bh = sdata->s_spar_map[j];
195 st = (struct sparingTable *)
197 entry->origLocation =
200 sizeof(struct sparingTable) +
201 reallocationTableLen *
202 sizeof(struct sparingEntry);
203 udf_update_tag((char *)st, len);
204 mark_buffer_dirty(bh);
206 *new_block = le32_to_cpu(
207 entry->mappedLocation) +
209 map->s_partition_root) &
210 (sdata->s_packet_len - 1));
212 } else if (origLoc == packet) {
213 *new_block = le32_to_cpu(
214 entry->mappedLocation) +
216 map->s_partition_root) &
217 (sdata->s_packet_len - 1));
219 } else if (origLoc > packet)
223 for (l = k; l < reallocationTableLen; l++) {
224 struct sparingEntry *entry = &st->mapEntry[l];
225 u32 origLoc = le32_to_cpu(entry->origLocation);
227 if (origLoc != 0xFFFFFFFF)
231 bh = sdata->s_spar_map[j];
235 st = (struct sparingTable *)bh->b_data;
236 mapEntry = st->mapEntry[l];
237 mapEntry.origLocation =
239 memmove(&st->mapEntry[k + 1],
242 sizeof(struct sparingEntry));
243 st->mapEntry[k] = mapEntry;
244 udf_update_tag((char *)st,
245 sizeof(struct sparingTable) +
246 reallocationTableLen *
247 sizeof(struct sparingEntry));
248 mark_buffer_dirty(bh);
252 st->mapEntry[k].mappedLocation) +
253 ((old_block - map->s_partition_root) &
254 (sdata->s_packet_len - 1));
262 if (i == sbi->s_partitions) {
263 /* outside of partitions */
264 /* for now, fail =) */
271 static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
272 uint16_t partition, uint32_t offset)
274 struct super_block *sb = inode->i_sb;
275 struct udf_part_map *map;
276 struct kernel_lb_addr eloc;
279 struct extent_position epos = {};
282 if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
283 (EXT_RECORDED_ALLOCATED >> 30))
284 phyblock = 0xFFFFFFFF;
286 map = &UDF_SB(sb)->s_partmaps[partition];
287 /* map to sparable/physical partition desc */
288 phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
289 map->s_partition_num, ext_offset + offset);
296 uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
297 uint16_t partition, uint32_t offset)
299 struct udf_sb_info *sbi = UDF_SB(sb);
300 struct udf_part_map *map;
301 struct udf_meta_data *mdata;
305 udf_debug("READING from METADATA\n");
307 map = &sbi->s_partmaps[partition];
308 mdata = &map->s_type_specific.s_metadata;
309 inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
311 /* We shouldn't mount such media... */
313 retblk = udf_try_read_meta(inode, block, partition, offset);
314 if (retblk == 0xFFFFFFFF) {
315 udf_warning(sb, __func__, "error reading from METADATA, "
316 "trying to read from MIRROR");
317 inode = mdata->s_mirror_fe;
320 retblk = udf_try_read_meta(inode, block, partition, offset);