5 * Partition handling routines for the OSTA-UDF(tm) filesystem.
8 * This file is distributed under the terms of the GNU General Public
9 * License (GPL). Copies of the GPL can be obtained from:
10 * ftp://prep.ai.mit.edu/pub/gnu/GPL
11 * Each contributing author retains all rights to their own work.
13 * (C) 1998-2001 Ben Fennema
17 * 12/06/98 blf Created file.
26 #include <linux/string.h>
27 #include <linux/udf_fs.h>
28 #include <linux/slab.h>
29 #include <linux/buffer_head.h>
31 inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
32 uint16_t partition, uint32_t offset)
34 if (partition >= UDF_SB_NUMPARTS(sb)) {
35 udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
36 block, partition, offset);
39 if (UDF_SB_PARTFUNC(sb, partition))
40 return UDF_SB_PARTFUNC(sb, partition)(sb, block, partition, offset);
42 return UDF_SB_PARTROOT(sb, partition) + block + offset;
45 uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
46 uint16_t partition, uint32_t offset)
48 struct buffer_head *bh = NULL;
53 index = (sb->s_blocksize - UDF_SB_TYPEVIRT(sb,partition).s_start_offset) / sizeof(uint32_t);
55 if (block > UDF_SB_TYPEVIRT(sb,partition).s_num_entries) {
56 udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
57 block, UDF_SB_TYPEVIRT(sb,partition).s_num_entries);
63 newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
64 index = block % (sb->s_blocksize / sizeof(uint32_t));
67 index = UDF_SB_TYPEVIRT(sb,partition).s_start_offset / sizeof(uint32_t) + block;
70 loc = udf_block_map(UDF_SB_VAT(sb), newblock);
72 if (!(bh = sb_bread(sb, loc))) {
73 udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
74 sb, block, partition, loc, index);
78 loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
82 if (UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum == partition) {
83 udf_debug("recursive call to udf_get_pblock!\n");
87 return udf_get_pblock(sb, loc,
88 UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum,
92 inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block,
93 uint16_t partition, uint32_t offset)
95 return udf_get_pblock_virt15(sb, block, partition, offset);
98 uint32_t udf_get_pblock_spar15(struct super_block * sb, uint32_t block,
99 uint16_t partition, uint32_t offset)
102 struct sparingTable *st = NULL;
103 uint32_t packet = (block + offset) & ~(UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1);
105 for (i = 0; i < 4; i++) {
106 if (UDF_SB_TYPESPAR(sb,partition).s_spar_map[i] != NULL) {
107 st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,partition).s_spar_map[i]->b_data;
113 for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
114 if (le32_to_cpu(st->mapEntry[i].origLocation) >= 0xFFFFFFF0) {
116 } else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet) {
117 return le32_to_cpu(st->mapEntry[i].mappedLocation) +
118 ((block + offset) & (UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1));
119 } else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet) {
125 return UDF_SB_PARTROOT(sb,partition) + block + offset;
128 int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
130 struct udf_sparing_data *sdata;
131 struct sparingTable *st = NULL;
132 struct sparingEntry mapEntry;
136 for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
137 if (old_block > UDF_SB_PARTROOT(sb,i) &&
138 old_block < UDF_SB_PARTROOT(sb,i) + UDF_SB_PARTLEN(sb,i)) {
139 sdata = &UDF_SB_TYPESPAR(sb,i);
140 packet = (old_block - UDF_SB_PARTROOT(sb,i)) & ~(sdata->s_packet_len - 1);
142 for (j = 0; j < 4; j++) {
143 if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL) {
144 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
152 for (k = 0; k < le16_to_cpu(st->reallocationTableLen); k++) {
153 if (le32_to_cpu(st->mapEntry[k].origLocation) == 0xFFFFFFFF) {
155 if (sdata->s_spar_map[j]) {
156 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
157 st->mapEntry[k].origLocation = cpu_to_le32(packet);
158 udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
159 mark_buffer_dirty(sdata->s_spar_map[j]);
162 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
163 ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
165 } else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet) {
166 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
167 ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
169 } else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet) {
174 for (l = k; l < le16_to_cpu(st->reallocationTableLen); l++) {
175 if (le32_to_cpu(st->mapEntry[l].origLocation) == 0xFFFFFFFF) {
177 if (sdata->s_spar_map[j]) {
178 st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
179 mapEntry = st->mapEntry[l];
180 mapEntry.origLocation = cpu_to_le32(packet);
181 memmove(&st->mapEntry[k + 1], &st->mapEntry[k], (l - k) * sizeof(struct sparingEntry));
182 st->mapEntry[k] = mapEntry;
183 udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
184 mark_buffer_dirty(sdata->s_spar_map[j]);
187 *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
188 ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
197 if (i == UDF_SB_NUMPARTS(sb)) {
198 /* outside of partitions */
199 /* for now, fail =) */