udf: create common function for tag checksumming
[linux-2.6] / fs / udf / balloc.c
1 /*
2  * balloc.c
3  *
4  * PURPOSE
5  *      Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *      This file is distributed under the terms of the GNU General Public
9  *      License (GPL). Copies of the GPL can be obtained from:
10  *              ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *      Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1999-2001 Ben Fennema
14  *  (C) 1999 Stelias Computing Inc
15  *
16  * HISTORY
17  *
18  *  02/24/99 blf  Created.
19  *
20  */
21
22 #include "udfdecl.h"
23
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
27
28 #include "udf_i.h"
29 #include "udf_sb.h"
30
31 #define udf_clear_bit(nr, addr) ext2_clear_bit(nr, addr)
32 #define udf_set_bit(nr, addr) ext2_set_bit(nr, addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) \
36                 find_next_one_bit(addr, size, offset)
37
38 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
39 #define leNUM_to_cpup(x, y) xleNUM_to_cpup(x, y)
40 #define xleNUM_to_cpup(x, y) (le ## x ## _to_cpup(y))
41 #define uintBPL_t uint(BITS_PER_LONG)
42 #define uint(x) xuint(x)
43 #define xuint(x) __le ## x
44
45 static inline int find_next_one_bit(void *addr, int size, int offset)
46 {
47         uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
48         int result = offset & ~(BITS_PER_LONG - 1);
49         unsigned long tmp;
50
51         if (offset >= size)
52                 return size;
53         size -= result;
54         offset &= (BITS_PER_LONG - 1);
55         if (offset) {
56                 tmp = leBPL_to_cpup(p++);
57                 tmp &= ~0UL << offset;
58                 if (size < BITS_PER_LONG)
59                         goto found_first;
60                 if (tmp)
61                         goto found_middle;
62                 size -= BITS_PER_LONG;
63                 result += BITS_PER_LONG;
64         }
65         while (size & ~(BITS_PER_LONG - 1)) {
66                 tmp = leBPL_to_cpup(p++);
67                 if (tmp)
68                         goto found_middle;
69                 result += BITS_PER_LONG;
70                 size -= BITS_PER_LONG;
71         }
72         if (!size)
73                 return result;
74         tmp = leBPL_to_cpup(p);
75 found_first:
76         tmp &= ~0UL >> (BITS_PER_LONG - size);
77 found_middle:
78         return result + ffz(~tmp);
79 }
80
81 #define find_first_one_bit(addr, size)\
82         find_next_one_bit((addr), (size), 0)
83
84 static int read_block_bitmap(struct super_block *sb,
85                              struct udf_bitmap *bitmap, unsigned int block,
86                              unsigned long bitmap_nr)
87 {
88         struct buffer_head *bh = NULL;
89         int retval = 0;
90         kernel_lb_addr loc;
91
92         loc.logicalBlockNum = bitmap->s_extPosition;
93         loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
94
95         bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
96         if (!bh)
97                 retval = -EIO;
98
99         bitmap->s_block_bitmap[bitmap_nr] = bh;
100         return retval;
101 }
102
103 static int __load_block_bitmap(struct super_block *sb,
104                                struct udf_bitmap *bitmap,
105                                unsigned int block_group)
106 {
107         int retval = 0;
108         int nr_groups = bitmap->s_nr_groups;
109
110         if (block_group >= nr_groups) {
111                 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
112                           nr_groups);
113         }
114
115         if (bitmap->s_block_bitmap[block_group]) {
116                 return block_group;
117         } else {
118                 retval = read_block_bitmap(sb, bitmap, block_group,
119                                            block_group);
120                 if (retval < 0)
121                         return retval;
122                 return block_group;
123         }
124 }
125
126 static inline int load_block_bitmap(struct super_block *sb,
127                                     struct udf_bitmap *bitmap,
128                                     unsigned int block_group)
129 {
130         int slot;
131
132         slot = __load_block_bitmap(sb, bitmap, block_group);
133
134         if (slot < 0)
135                 return slot;
136
137         if (!bitmap->s_block_bitmap[slot])
138                 return -EIO;
139
140         return slot;
141 }
142
143 static void udf_bitmap_free_blocks(struct super_block *sb,
144                                    struct inode *inode,
145                                    struct udf_bitmap *bitmap,
146                                    kernel_lb_addr bloc, uint32_t offset,
147                                    uint32_t count)
148 {
149         struct udf_sb_info *sbi = UDF_SB(sb);
150         struct buffer_head *bh = NULL;
151         unsigned long block;
152         unsigned long block_group;
153         unsigned long bit;
154         unsigned long i;
155         int bitmap_nr;
156         unsigned long overflow;
157
158         mutex_lock(&sbi->s_alloc_mutex);
159         if (bloc.logicalBlockNum < 0 ||
160             (bloc.logicalBlockNum + count) >
161                 sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
162                 udf_debug("%d < %d || %d + %d > %d\n",
163                           bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
164                           sbi->s_partmaps[bloc.partitionReferenceNum].
165                                                         s_partition_len);
166                 goto error_return;
167         }
168
169         block = bloc.logicalBlockNum + offset +
170                 (sizeof(struct spaceBitmapDesc) << 3);
171
172 do_more:
173         overflow = 0;
174         block_group = block >> (sb->s_blocksize_bits + 3);
175         bit = block % (sb->s_blocksize << 3);
176
177         /*
178          * Check to see if we are freeing blocks across a group boundary.
179          */
180         if (bit + count > (sb->s_blocksize << 3)) {
181                 overflow = bit + count - (sb->s_blocksize << 3);
182                 count -= overflow;
183         }
184         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
185         if (bitmap_nr < 0)
186                 goto error_return;
187
188         bh = bitmap->s_block_bitmap[bitmap_nr];
189         for (i = 0; i < count; i++) {
190                 if (udf_set_bit(bit + i, bh->b_data)) {
191                         udf_debug("bit %ld already set\n", bit + i);
192                         udf_debug("byte=%2x\n",
193                                   ((char *)bh->b_data)[(bit + i) >> 3]);
194                 } else {
195                         if (inode)
196                                 DQUOT_FREE_BLOCK(inode, 1);
197                         if (sbi->s_lvid_bh) {
198                                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
199                                 lvid->freeSpaceTable[sbi->s_partition] =
200                                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + 1);
201                         }
202                 }
203         }
204         mark_buffer_dirty(bh);
205         if (overflow) {
206                 block += count;
207                 count = overflow;
208                 goto do_more;
209         }
210 error_return:
211         sb->s_dirt = 1;
212         if (sbi->s_lvid_bh)
213                 mark_buffer_dirty(sbi->s_lvid_bh);
214         mutex_unlock(&sbi->s_alloc_mutex);
215         return;
216 }
217
218 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
219                                       struct inode *inode,
220                                       struct udf_bitmap *bitmap,
221                                       uint16_t partition, uint32_t first_block,
222                                       uint32_t block_count)
223 {
224         struct udf_sb_info *sbi = UDF_SB(sb);
225         int alloc_count = 0;
226         int bit, block, block_group, group_start;
227         int nr_groups, bitmap_nr;
228         struct buffer_head *bh;
229         __u32 part_len;
230
231         mutex_lock(&sbi->s_alloc_mutex);
232         part_len = sbi->s_partmaps[partition].s_partition_len;
233         if (first_block < 0 || first_block >= part_len)
234                 goto out;
235
236         if (first_block + block_count > part_len)
237                 block_count = part_len - first_block;
238
239 repeat:
240         nr_groups = udf_compute_nr_groups(sb, partition);
241         block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
242         block_group = block >> (sb->s_blocksize_bits + 3);
243         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
244
245         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
246         if (bitmap_nr < 0)
247                 goto out;
248         bh = bitmap->s_block_bitmap[bitmap_nr];
249
250         bit = block % (sb->s_blocksize << 3);
251
252         while (bit < (sb->s_blocksize << 3) && block_count > 0) {
253                 if (!udf_test_bit(bit, bh->b_data)) {
254                         goto out;
255                 } else if (DQUOT_PREALLOC_BLOCK(inode, 1)) {
256                         goto out;
257                 } else if (!udf_clear_bit(bit, bh->b_data)) {
258                         udf_debug("bit already cleared for block %d\n", bit);
259                         DQUOT_FREE_BLOCK(inode, 1);
260                         goto out;
261                 }
262                 block_count--;
263                 alloc_count++;
264                 bit++;
265                 block++;
266         }
267         mark_buffer_dirty(bh);
268         if (block_count > 0)
269                 goto repeat;
270 out:
271         if (sbi->s_lvid_bh) {
272                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
273                 lvid->freeSpaceTable[partition] =
274                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
275                 mark_buffer_dirty(sbi->s_lvid_bh);
276         }
277         sb->s_dirt = 1;
278         mutex_unlock(&sbi->s_alloc_mutex);
279         return alloc_count;
280 }
281
282 static int udf_bitmap_new_block(struct super_block *sb,
283                                 struct inode *inode,
284                                 struct udf_bitmap *bitmap, uint16_t partition,
285                                 uint32_t goal, int *err)
286 {
287         struct udf_sb_info *sbi = UDF_SB(sb);
288         int newbit, bit = 0, block, block_group, group_start;
289         int end_goal, nr_groups, bitmap_nr, i;
290         struct buffer_head *bh = NULL;
291         char *ptr;
292         int newblock = 0;
293
294         *err = -ENOSPC;
295         mutex_lock(&sbi->s_alloc_mutex);
296
297 repeat:
298         if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
299                 goal = 0;
300
301         nr_groups = bitmap->s_nr_groups;
302         block = goal + (sizeof(struct spaceBitmapDesc) << 3);
303         block_group = block >> (sb->s_blocksize_bits + 3);
304         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
305
306         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
307         if (bitmap_nr < 0)
308                 goto error_return;
309         bh = bitmap->s_block_bitmap[bitmap_nr];
310         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
311                       sb->s_blocksize - group_start);
312
313         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
314                 bit = block % (sb->s_blocksize << 3);
315                 if (udf_test_bit(bit, bh->b_data))
316                         goto got_block;
317
318                 end_goal = (bit + 63) & ~63;
319                 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
320                 if (bit < end_goal)
321                         goto got_block;
322
323                 ptr = memscan((char *)bh->b_data + (bit >> 3), 0xFF,
324                               sb->s_blocksize - ((bit + 7) >> 3));
325                 newbit = (ptr - ((char *)bh->b_data)) << 3;
326                 if (newbit < sb->s_blocksize << 3) {
327                         bit = newbit;
328                         goto search_back;
329                 }
330
331                 newbit = udf_find_next_one_bit(bh->b_data,
332                                                sb->s_blocksize << 3, bit);
333                 if (newbit < sb->s_blocksize << 3) {
334                         bit = newbit;
335                         goto got_block;
336                 }
337         }
338
339         for (i = 0; i < (nr_groups * 2); i++) {
340                 block_group++;
341                 if (block_group >= nr_groups)
342                         block_group = 0;
343                 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
344
345                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
346                 if (bitmap_nr < 0)
347                         goto error_return;
348                 bh = bitmap->s_block_bitmap[bitmap_nr];
349                 if (i < nr_groups) {
350                         ptr = memscan((char *)bh->b_data + group_start, 0xFF,
351                                       sb->s_blocksize - group_start);
352                         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
353                                 bit = (ptr - ((char *)bh->b_data)) << 3;
354                                 break;
355                         }
356                 } else {
357                         bit = udf_find_next_one_bit((char *)bh->b_data,
358                                                     sb->s_blocksize << 3,
359                                                     group_start << 3);
360                         if (bit < sb->s_blocksize << 3)
361                                 break;
362                 }
363         }
364         if (i >= (nr_groups * 2)) {
365                 mutex_unlock(&sbi->s_alloc_mutex);
366                 return newblock;
367         }
368         if (bit < sb->s_blocksize << 3)
369                 goto search_back;
370         else
371                 bit = udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
372                                             group_start << 3);
373         if (bit >= sb->s_blocksize << 3) {
374                 mutex_unlock(&sbi->s_alloc_mutex);
375                 return 0;
376         }
377
378 search_back:
379         i = 0;
380         while (i < 7 && bit > (group_start << 3) &&
381                udf_test_bit(bit - 1, bh->b_data)) {
382                 ++i;
383                 --bit;
384         }
385
386 got_block:
387
388         /*
389          * Check quota for allocation of this block.
390          */
391         if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
392                 mutex_unlock(&sbi->s_alloc_mutex);
393                 *err = -EDQUOT;
394                 return 0;
395         }
396
397         newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
398                 (sizeof(struct spaceBitmapDesc) << 3);
399
400         if (!udf_clear_bit(bit, bh->b_data)) {
401                 udf_debug("bit already cleared for block %d\n", bit);
402                 goto repeat;
403         }
404
405         mark_buffer_dirty(bh);
406
407         if (sbi->s_lvid_bh) {
408                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
409                 lvid->freeSpaceTable[partition] =
410                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
411                 mark_buffer_dirty(sbi->s_lvid_bh);
412         }
413         sb->s_dirt = 1;
414         mutex_unlock(&sbi->s_alloc_mutex);
415         *err = 0;
416         return newblock;
417
418 error_return:
419         *err = -EIO;
420         mutex_unlock(&sbi->s_alloc_mutex);
421         return 0;
422 }
423
424 static void udf_table_free_blocks(struct super_block *sb,
425                                   struct inode *inode,
426                                   struct inode *table,
427                                   kernel_lb_addr bloc, uint32_t offset,
428                                   uint32_t count)
429 {
430         struct udf_sb_info *sbi = UDF_SB(sb);
431         uint32_t start, end;
432         uint32_t elen;
433         kernel_lb_addr eloc;
434         struct extent_position oepos, epos;
435         int8_t etype;
436         int i;
437
438         mutex_lock(&sbi->s_alloc_mutex);
439         if (bloc.logicalBlockNum < 0 ||
440             (bloc.logicalBlockNum + count) >
441                 sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
442                 udf_debug("%d < %d || %d + %d > %d\n",
443                           bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
444                           sbi->s_partmaps[bloc.partitionReferenceNum].
445                                                         s_partition_len);
446                 goto error_return;
447         }
448
449         /* We do this up front - There are some error conditions that
450            could occure, but.. oh well */
451         if (inode)
452                 DQUOT_FREE_BLOCK(inode, count);
453         if (sbi->s_lvid_bh) {
454                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
455                 lvid->freeSpaceTable[sbi->s_partition] =
456                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[sbi->s_partition]) + count);
457                 mark_buffer_dirty(sbi->s_lvid_bh);
458         }
459
460         start = bloc.logicalBlockNum + offset;
461         end = bloc.logicalBlockNum + offset + count - 1;
462
463         epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
464         elen = 0;
465         epos.block = oepos.block = UDF_I_LOCATION(table);
466         epos.bh = oepos.bh = NULL;
467
468         while (count &&
469                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
470                 if (((eloc.logicalBlockNum +
471                         (elen >> sb->s_blocksize_bits)) == start)) {
472                         if ((0x3FFFFFFF - elen) <
473                                         (count << sb->s_blocksize_bits)) {
474                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
475                                                         sb->s_blocksize_bits);
476                                 count -= tmp;
477                                 start += tmp;
478                                 elen = (etype << 30) |
479                                         (0x40000000 - sb->s_blocksize);
480                         } else {
481                                 elen = (etype << 30) |
482                                         (elen +
483                                         (count << sb->s_blocksize_bits));
484                                 start += count;
485                                 count = 0;
486                         }
487                         udf_write_aext(table, &oepos, eloc, elen, 1);
488                 } else if (eloc.logicalBlockNum == (end + 1)) {
489                         if ((0x3FFFFFFF - elen) <
490                                         (count << sb->s_blocksize_bits)) {
491                                 uint32_t tmp = ((0x3FFFFFFF - elen) >>
492                                                 sb->s_blocksize_bits);
493                                 count -= tmp;
494                                 end -= tmp;
495                                 eloc.logicalBlockNum -= tmp;
496                                 elen = (etype << 30) |
497                                         (0x40000000 - sb->s_blocksize);
498                         } else {
499                                 eloc.logicalBlockNum = start;
500                                 elen = (etype << 30) |
501                                         (elen +
502                                         (count << sb->s_blocksize_bits));
503                                 end -= count;
504                                 count = 0;
505                         }
506                         udf_write_aext(table, &oepos, eloc, elen, 1);
507                 }
508
509                 if (epos.bh != oepos.bh) {
510                         i = -1;
511                         oepos.block = epos.block;
512                         brelse(oepos.bh);
513                         get_bh(epos.bh);
514                         oepos.bh = epos.bh;
515                         oepos.offset = 0;
516                 } else {
517                         oepos.offset = epos.offset;
518                 }
519         }
520
521         if (count) {
522                 /*
523                  * NOTE: we CANNOT use udf_add_aext here, as it can try to
524                  * allocate a new block, and since we hold the super block
525                  * lock already very bad things would happen :)
526                  *
527                  * We copy the behavior of udf_add_aext, but instead of
528                  * trying to allocate a new block close to the existing one,
529                  * we just steal a block from the extent we are trying to add.
530                  *
531                  * It would be nice if the blocks were close together, but it
532                  * isn't required.
533                  */
534
535                 int adsize;
536                 short_ad *sad = NULL;
537                 long_ad *lad = NULL;
538                 struct allocExtDesc *aed;
539
540                 eloc.logicalBlockNum = start;
541                 elen = EXT_RECORDED_ALLOCATED |
542                         (count << sb->s_blocksize_bits);
543
544                 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT) {
545                         adsize = sizeof(short_ad);
546                 } else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG) {
547                         adsize = sizeof(long_ad);
548                 } else {
549                         brelse(oepos.bh);
550                         brelse(epos.bh);
551                         goto error_return;
552                 }
553
554                 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
555                         char *sptr, *dptr;
556                         int loffset;
557
558                         brelse(oepos.bh);
559                         oepos = epos;
560
561                         /* Steal a block from the extent being free'd */
562                         epos.block.logicalBlockNum = eloc.logicalBlockNum;
563                         eloc.logicalBlockNum++;
564                         elen -= sb->s_blocksize;
565
566                         epos.bh = udf_tread(sb,
567                                         udf_get_lb_pblock(sb, epos.block, 0));
568                         if (!epos.bh) {
569                                 brelse(oepos.bh);
570                                 goto error_return;
571                         }
572                         aed = (struct allocExtDesc *)(epos.bh->b_data);
573                         aed->previousAllocExtLocation =
574                                 cpu_to_le32(oepos.block.logicalBlockNum);
575                         if (epos.offset + adsize > sb->s_blocksize) {
576                                 loffset = epos.offset;
577                                 aed->lengthAllocDescs = cpu_to_le32(adsize);
578                                 sptr = UDF_I_DATA(table) + epos.offset - adsize;
579                                 dptr = epos.bh->b_data +
580                                         sizeof(struct allocExtDesc);
581                                 memcpy(dptr, sptr, adsize);
582                                 epos.offset = sizeof(struct allocExtDesc) +
583                                                 adsize;
584                         } else {
585                                 loffset = epos.offset + adsize;
586                                 aed->lengthAllocDescs = cpu_to_le32(0);
587                                 if (oepos.bh) {
588                                         sptr = oepos.bh->b_data + epos.offset;
589                                         aed = (struct allocExtDesc *)
590                                                 oepos.bh->b_data;
591                                         aed->lengthAllocDescs =
592                                                 cpu_to_le32(le32_to_cpu(
593                                                         aed->lengthAllocDescs) +
594                                                                 adsize);
595                                 } else {
596                                         sptr = UDF_I_DATA(table) + epos.offset;
597                                         UDF_I_LENALLOC(table) += adsize;
598                                         mark_inode_dirty(table);
599                                 }
600                                 epos.offset = sizeof(struct allocExtDesc);
601                         }
602                         if (sbi->s_udfrev >= 0x0200)
603                                 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
604                                             3, 1, epos.block.logicalBlockNum,
605                                             sizeof(tag));
606                         else
607                                 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
608                                             2, 1, epos.block.logicalBlockNum,
609                                             sizeof(tag));
610
611                         switch (UDF_I_ALLOCTYPE(table)) {
612                         case ICBTAG_FLAG_AD_SHORT:
613                                 sad = (short_ad *)sptr;
614                                 sad->extLength = cpu_to_le32(
615                                         EXT_NEXT_EXTENT_ALLOCDECS |
616                                         sb->s_blocksize);
617                                 sad->extPosition =
618                                         cpu_to_le32(epos.block.logicalBlockNum);
619                                 break;
620                         case ICBTAG_FLAG_AD_LONG:
621                                 lad = (long_ad *)sptr;
622                                 lad->extLength = cpu_to_le32(
623                                         EXT_NEXT_EXTENT_ALLOCDECS |
624                                         sb->s_blocksize);
625                                 lad->extLocation =
626                                         cpu_to_lelb(epos.block);
627                                 break;
628                         }
629                         if (oepos.bh) {
630                                 udf_update_tag(oepos.bh->b_data, loffset);
631                                 mark_buffer_dirty(oepos.bh);
632                         } else {
633                                 mark_inode_dirty(table);
634                         }
635                 }
636
637                 /* It's possible that stealing the block emptied the extent */
638                 if (elen) {
639                         udf_write_aext(table, &epos, eloc, elen, 1);
640
641                         if (!epos.bh) {
642                                 UDF_I_LENALLOC(table) += adsize;
643                                 mark_inode_dirty(table);
644                         } else {
645                                 aed = (struct allocExtDesc *)epos.bh->b_data;
646                                 aed->lengthAllocDescs =
647                                         cpu_to_le32(le32_to_cpu(
648                                             aed->lengthAllocDescs) + adsize);
649                                 udf_update_tag(epos.bh->b_data, epos.offset);
650                                 mark_buffer_dirty(epos.bh);
651                         }
652                 }
653         }
654
655         brelse(epos.bh);
656         brelse(oepos.bh);
657
658 error_return:
659         sb->s_dirt = 1;
660         mutex_unlock(&sbi->s_alloc_mutex);
661         return;
662 }
663
664 static int udf_table_prealloc_blocks(struct super_block *sb,
665                                      struct inode *inode,
666                                      struct inode *table, uint16_t partition,
667                                      uint32_t first_block, uint32_t block_count)
668 {
669         struct udf_sb_info *sbi = UDF_SB(sb);
670         int alloc_count = 0;
671         uint32_t elen, adsize;
672         kernel_lb_addr eloc;
673         struct extent_position epos;
674         int8_t etype = -1;
675
676         if (first_block < 0 ||
677                 first_block >= sbi->s_partmaps[partition].s_partition_len)
678                 return 0;
679
680         if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
681                 adsize = sizeof(short_ad);
682         else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
683                 adsize = sizeof(long_ad);
684         else
685                 return 0;
686
687         mutex_lock(&sbi->s_alloc_mutex);
688         epos.offset = sizeof(struct unallocSpaceEntry);
689         epos.block = UDF_I_LOCATION(table);
690         epos.bh = NULL;
691         eloc.logicalBlockNum = 0xFFFFFFFF;
692
693         while (first_block != eloc.logicalBlockNum &&
694                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
695                 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
696                           eloc.logicalBlockNum, elen, first_block);
697                 ; /* empty loop body */
698         }
699
700         if (first_block == eloc.logicalBlockNum) {
701                 epos.offset -= adsize;
702
703                 alloc_count = (elen >> sb->s_blocksize_bits);
704                 if (inode && DQUOT_PREALLOC_BLOCK(inode,
705                         alloc_count > block_count ? block_count : alloc_count))
706                         alloc_count = 0;
707                 else if (alloc_count > block_count) {
708                         alloc_count = block_count;
709                         eloc.logicalBlockNum += alloc_count;
710                         elen -= (alloc_count << sb->s_blocksize_bits);
711                         udf_write_aext(table, &epos, eloc,
712                                         (etype << 30) | elen, 1);
713                 } else
714                         udf_delete_aext(table, epos, eloc,
715                                         (etype << 30) | elen);
716         } else {
717                 alloc_count = 0;
718         }
719
720         brelse(epos.bh);
721
722         if (alloc_count && sbi->s_lvid_bh) {
723                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
724                 lvid->freeSpaceTable[partition] =
725                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - alloc_count);
726                 mark_buffer_dirty(sbi->s_lvid_bh);
727                 sb->s_dirt = 1;
728         }
729         mutex_unlock(&sbi->s_alloc_mutex);
730         return alloc_count;
731 }
732
733 static int udf_table_new_block(struct super_block *sb,
734                                struct inode *inode,
735                                struct inode *table, uint16_t partition,
736                                uint32_t goal, int *err)
737 {
738         struct udf_sb_info *sbi = UDF_SB(sb);
739         uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
740         uint32_t newblock = 0, adsize;
741         uint32_t elen, goal_elen = 0;
742         kernel_lb_addr eloc, uninitialized_var(goal_eloc);
743         struct extent_position epos, goal_epos;
744         int8_t etype;
745
746         *err = -ENOSPC;
747
748         if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
749                 adsize = sizeof(short_ad);
750         else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
751                 adsize = sizeof(long_ad);
752         else
753                 return newblock;
754
755         mutex_lock(&sbi->s_alloc_mutex);
756         if (goal < 0 || goal >= sbi->s_partmaps[partition].s_partition_len)
757                 goal = 0;
758
759         /* We search for the closest matching block to goal. If we find
760            a exact hit, we stop. Otherwise we keep going till we run out
761            of extents. We store the buffer_head, bloc, and extoffset
762            of the current closest match and use that when we are done.
763          */
764         epos.offset = sizeof(struct unallocSpaceEntry);
765         epos.block = UDF_I_LOCATION(table);
766         epos.bh = goal_epos.bh = NULL;
767
768         while (spread &&
769                (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
770                 if (goal >= eloc.logicalBlockNum) {
771                         if (goal < eloc.logicalBlockNum +
772                                         (elen >> sb->s_blocksize_bits))
773                                 nspread = 0;
774                         else
775                                 nspread = goal - eloc.logicalBlockNum -
776                                         (elen >> sb->s_blocksize_bits);
777                 } else {
778                         nspread = eloc.logicalBlockNum - goal;
779                 }
780
781                 if (nspread < spread) {
782                         spread = nspread;
783                         if (goal_epos.bh != epos.bh) {
784                                 brelse(goal_epos.bh);
785                                 goal_epos.bh = epos.bh;
786                                 get_bh(goal_epos.bh);
787                         }
788                         goal_epos.block = epos.block;
789                         goal_epos.offset = epos.offset - adsize;
790                         goal_eloc = eloc;
791                         goal_elen = (etype << 30) | elen;
792                 }
793         }
794
795         brelse(epos.bh);
796
797         if (spread == 0xFFFFFFFF) {
798                 brelse(goal_epos.bh);
799                 mutex_unlock(&sbi->s_alloc_mutex);
800                 return 0;
801         }
802
803         /* Only allocate blocks from the beginning of the extent.
804            That way, we only delete (empty) extents, never have to insert an
805            extent because of splitting */
806         /* This works, but very poorly.... */
807
808         newblock = goal_eloc.logicalBlockNum;
809         goal_eloc.logicalBlockNum++;
810         goal_elen -= sb->s_blocksize;
811
812         if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
813                 brelse(goal_epos.bh);
814                 mutex_unlock(&sbi->s_alloc_mutex);
815                 *err = -EDQUOT;
816                 return 0;
817         }
818
819         if (goal_elen)
820                 udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1);
821         else
822                 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
823         brelse(goal_epos.bh);
824
825         if (sbi->s_lvid_bh) {
826                 struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
827                 lvid->freeSpaceTable[partition] =
828                         cpu_to_le32(le32_to_cpu(lvid->freeSpaceTable[partition]) - 1);
829                 mark_buffer_dirty(sbi->s_lvid_bh);
830         }
831
832         sb->s_dirt = 1;
833         mutex_unlock(&sbi->s_alloc_mutex);
834         *err = 0;
835         return newblock;
836 }
837
838 inline void udf_free_blocks(struct super_block *sb,
839                             struct inode *inode,
840                             kernel_lb_addr bloc, uint32_t offset,
841                             uint32_t count)
842 {
843         uint16_t partition = bloc.partitionReferenceNum;
844         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
845
846         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
847                 return udf_bitmap_free_blocks(sb, inode,
848                                               map->s_uspace.s_bitmap,
849                                               bloc, offset, count);
850         } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
851                 return udf_table_free_blocks(sb, inode,
852                                              map->s_uspace.s_table,
853                                              bloc, offset, count);
854         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
855                 return udf_bitmap_free_blocks(sb, inode,
856                                               map->s_fspace.s_bitmap,
857                                               bloc, offset, count);
858         } else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
859                 return udf_table_free_blocks(sb, inode,
860                                              map->s_fspace.s_table,
861                                              bloc, offset, count);
862         } else {
863                 return;
864         }
865 }
866
867 inline int udf_prealloc_blocks(struct super_block *sb,
868                                struct inode *inode,
869                                uint16_t partition, uint32_t first_block,
870                                uint32_t block_count)
871 {
872         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
873
874         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
875                 return udf_bitmap_prealloc_blocks(sb, inode,
876                                                   map->s_uspace.s_bitmap,
877                                                   partition, first_block,
878                                                   block_count);
879         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
880                 return udf_table_prealloc_blocks(sb, inode,
881                                                  map->s_uspace.s_table,
882                                                  partition, first_block,
883                                                  block_count);
884         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
885                 return udf_bitmap_prealloc_blocks(sb, inode,
886                                                   map->s_fspace.s_bitmap,
887                                                   partition, first_block,
888                                                   block_count);
889         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
890                 return udf_table_prealloc_blocks(sb, inode,
891                                                  map->s_fspace.s_table,
892                                                  partition, first_block,
893                                                  block_count);
894         else
895                 return 0;
896 }
897
898 inline int udf_new_block(struct super_block *sb,
899                          struct inode *inode,
900                          uint16_t partition, uint32_t goal, int *err)
901 {
902         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
903
904         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
905                 return udf_bitmap_new_block(sb, inode,
906                                            map->s_uspace.s_bitmap,
907                                            partition, goal, err);
908         else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
909                 return udf_table_new_block(sb, inode,
910                                            map->s_uspace.s_table,
911                                            partition, goal, err);
912         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
913                 return udf_bitmap_new_block(sb, inode,
914                                             map->s_fspace.s_bitmap,
915                                             partition, goal, err);
916         else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
917                 return udf_table_new_block(sb, inode,
918                                            map->s_fspace.s_table,
919                                            partition, goal, err);
920         else {
921                 *err = -EIO;
922                 return 0;
923         }
924 }