5 * Partition handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998-2001 Ben Fennema
17 * 12/06/98 blf Created file.
26 #include <linux/string.h>
27 #include <linux/udf_fs.h>
28 #include <linux/slab.h>
29 #include <linux/buffer_head.h>
31 inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
32 uint16_t partition, uint32_t offset)
34 struct udf_sb_info *sbi = UDF_SB(sb);
35 struct udf_part_map *map;
36 if (partition >= sbi->s_partitions) {
37 udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
38 block, partition, offset);
41 map = &sbi->s_partmaps[partition];
42 if (map->s_partition_func)
43 return map->s_partition_func(sb, block, partition, offset);
45 return map->s_partition_root + block + offset;
48 uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
49 uint16_t partition, uint32_t offset)
51 struct buffer_head *bh = NULL;
55 struct udf_sb_info *sbi = UDF_SB(sb);
56 struct udf_part_map *map;
58 map = &sbi->s_partmaps[partition];
59 index = (sb->s_blocksize - map->s_type_specific.s_virtual.s_start_offset) / sizeof(uint32_t);
61 if (block > map->s_type_specific.s_virtual.s_num_entries) {
62 udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
63 block, map->s_type_specific.s_virtual.s_num_entries);
69 newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
70 index = block % (sb->s_blocksize / sizeof(uint32_t));
73 index = map->s_type_specific.s_virtual.s_start_offset / sizeof(uint32_t) + block;
76 loc = udf_block_map(sbi->s_vat_inode, newblock);
78 if (!(bh = sb_bread(sb, loc))) {
79 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
80 sb, block, partition, loc, index);
84 loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
88 if (UDF_I_LOCATION(sbi->s_vat_inode).partitionReferenceNum == partition) {
89 udf_debug("recursive call to udf_get_pblock!\n");
93 return udf_get_pblock(sb, loc,
94 UDF_I_LOCATION(sbi->s_vat_inode).partitionReferenceNum,
98 inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block,
99 uint16_t partition, uint32_t offset)
101 return udf_get_pblock_virt15(sb, block, partition, offset);
104 uint32_t udf_get_pblock_spar15(struct super_block *sb, uint32_t block,
105 uint16_t partition, uint32_t offset)
108 struct sparingTable *st = NULL;
109 struct udf_sb_info *sbi = UDF_SB(sb);
110 struct udf_part_map *map;
113 map = &sbi->s_partmaps[partition];
114 packet = (block + offset) & ~(map->s_type_specific.s_sparing.s_packet_len - 1);
116 for (i = 0; i < 4; i++) {
117 if (map->s_type_specific.s_sparing.s_spar_map[i] != NULL) {
118 st = (struct sparingTable *)map->s_type_specific.s_sparing.s_spar_map[i]->b_data;
124 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
125 if (le32_to_cpu(st->mapEntry[i].origLocation) >= 0xFFFFFFF0) {
127 } else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet) {
128 return le32_to_cpu(st->mapEntry[i].mappedLocation) +
129 ((block + offset) & (map->s_type_specific.s_sparing.s_packet_len - 1));
130 } else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet) {
136 return map->s_partition_root + block + offset;
139 int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
141 struct udf_sparing_data *sdata;
142 struct sparingTable *st = NULL;
143 struct sparingEntry mapEntry;
146 struct udf_sb_info *sbi = UDF_SB(sb);
148 for (i = 0; i < sbi->s_partitions; i++) {
149 struct udf_part_map *map = &sbi->s_partmaps[i];
150 if (old_block > map->s_partition_root &&
151 old_block < map->s_partition_root + map->s_partition_len) {
152 sdata = &map->s_type_specific.s_sparing;
153 packet = (old_block - map->s_partition_root) & ~(sdata->s_packet_len - 1);
155 for (j = 0; j < 4; j++) {
156 if (map->s_type_specific.s_sparing.s_spar_map[j] != NULL) {
157 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
165 for (k = 0; k < le16_to_cpu(st->reallocationTableLen); k++) {
166 if (le32_to_cpu(st->mapEntry[k].origLocation) == 0xFFFFFFFF) {
168 if (sdata->s_spar_map[j]) {
169 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
170 st->mapEntry[k].origLocation = cpu_to_le32(packet);
171 udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
172 mark_buffer_dirty(sdata->s_spar_map[j]);
175 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
176 ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1));
178 } else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet) {
179 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
180 ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1));
182 } else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet) {
187 for (l = k; l < le16_to_cpu(st->reallocationTableLen); l++) {
188 if (le32_to_cpu(st->mapEntry[l].origLocation) == 0xFFFFFFFF) {
190 if (sdata->s_spar_map[j]) {
191 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
192 mapEntry = st->mapEntry[l];
193 mapEntry.origLocation = cpu_to_le32(packet);
194 memmove(&st->mapEntry[k + 1], &st->mapEntry[k], (l - k) * sizeof(struct sparingEntry));
195 st->mapEntry[k] = mapEntry;
196 udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
197 mark_buffer_dirty(sdata->s_spar_map[j]);
200 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
201 ((old_block - map->s_partition_root) & (sdata->s_packet_len - 1));
210 if (i == sbi->s_partitions) {
211 /* outside of partitions */
212 /* for now, fail =) */