Merge branch 'for-linus' of git://oss.sgi.com:8090/xfs/xfs-2.6
[linux-2.6] / fs / udf / balloc.c
1 /*
2  * balloc.c
3  *
4  * PURPOSE
5  *      Block allocation handling routines for the OSTA-UDF(tm) filesystem.
6  *
7  * COPYRIGHT
8  *      This file is distributed under the terms of the GNU General Public
9  *      License (GPL). Copies of the GPL can be obtained from:
10  *              ftp://prep.ai.mit.edu/pub/gnu/GPL
11  *      Each contributing author retains all rights to their own work.
12  *
13  *  (C) 1999-2001 Ben Fennema
14  *  (C) 1999 Stelias Computing Inc
15  *
16  * HISTORY
17  *
18  *  02/24/99 blf  Created.
19  *
20  */
21
22 #include "udfdecl.h"
23
24 #include <linux/quotaops.h>
25 #include <linux/buffer_head.h>
26 #include <linux/bitops.h>
27
28 #include "udf_i.h"
29 #include "udf_sb.h"
30
31 #define udf_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
32 #define udf_set_bit(nr,addr) ext2_set_bit(nr,addr)
33 #define udf_test_bit(nr, addr) ext2_test_bit(nr, addr)
34 #define udf_find_first_one_bit(addr, size) find_first_one_bit(addr, size)
35 #define udf_find_next_one_bit(addr, size, offset) find_next_one_bit(addr, size, offset)
36
37 #define leBPL_to_cpup(x) leNUM_to_cpup(BITS_PER_LONG, x)
38 #define leNUM_to_cpup(x,y) xleNUM_to_cpup(x,y)
39 #define xleNUM_to_cpup(x,y) (le ## x ## _to_cpup(y))
40 #define uintBPL_t uint(BITS_PER_LONG)
41 #define uint(x) xuint(x)
42 #define xuint(x) __le ## x
43
44 static inline int find_next_one_bit(void *addr, int size, int offset)
45 {
46         uintBPL_t *p = ((uintBPL_t *) addr) + (offset / BITS_PER_LONG);
47         int result = offset & ~(BITS_PER_LONG - 1);
48         unsigned long tmp;
49
50         if (offset >= size)
51                 return size;
52         size -= result;
53         offset &= (BITS_PER_LONG - 1);
54         if (offset) {
55                 tmp = leBPL_to_cpup(p++);
56                 tmp &= ~0UL << offset;
57                 if (size < BITS_PER_LONG)
58                         goto found_first;
59                 if (tmp)
60                         goto found_middle;
61                 size -= BITS_PER_LONG;
62                 result += BITS_PER_LONG;
63         }
64         while (size & ~(BITS_PER_LONG - 1)) {
65                 if ((tmp = leBPL_to_cpup(p++)))
66                         goto found_middle;
67                 result += BITS_PER_LONG;
68                 size -= BITS_PER_LONG;
69         }
70         if (!size)
71                 return result;
72         tmp = leBPL_to_cpup(p);
73       found_first:
74         tmp &= ~0UL >> (BITS_PER_LONG - size);
75       found_middle:
76         return result + ffz(~tmp);
77 }
78
79 #define find_first_one_bit(addr, size)\
80         find_next_one_bit((addr), (size), 0)
81
82 static int read_block_bitmap(struct super_block *sb,
83                              struct udf_bitmap *bitmap, unsigned int block,
84                              unsigned long bitmap_nr)
85 {
86         struct buffer_head *bh = NULL;
87         int retval = 0;
88         kernel_lb_addr loc;
89
90         loc.logicalBlockNum = bitmap->s_extPosition;
91         loc.partitionReferenceNum = UDF_SB_PARTITION(sb);
92
93         bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
94         if (!bh) {
95                 retval = -EIO;
96         }
97         bitmap->s_block_bitmap[bitmap_nr] = bh;
98         return retval;
99 }
100
101 static int __load_block_bitmap(struct super_block *sb,
102                                struct udf_bitmap *bitmap,
103                                unsigned int block_group)
104 {
105         int retval = 0;
106         int nr_groups = bitmap->s_nr_groups;
107
108         if (block_group >= nr_groups) {
109                 udf_debug("block_group (%d) > nr_groups (%d)\n", block_group,
110                           nr_groups);
111         }
112
113         if (bitmap->s_block_bitmap[block_group])
114                 return block_group;
115         else {
116                 retval =
117                     read_block_bitmap(sb, bitmap, block_group, block_group);
118                 if (retval < 0)
119                         return retval;
120                 return block_group;
121         }
122 }
123
124 static inline int load_block_bitmap(struct super_block *sb,
125                                     struct udf_bitmap *bitmap,
126                                     unsigned int block_group)
127 {
128         int slot;
129
130         slot = __load_block_bitmap(sb, bitmap, block_group);
131
132         if (slot < 0)
133                 return slot;
134
135         if (!bitmap->s_block_bitmap[slot])
136                 return -EIO;
137
138         return slot;
139 }
140
141 static void udf_bitmap_free_blocks(struct super_block *sb,
142                                    struct inode *inode,
143                                    struct udf_bitmap *bitmap,
144                                    kernel_lb_addr bloc, uint32_t offset,
145                                    uint32_t count)
146 {
147         struct udf_sb_info *sbi = UDF_SB(sb);
148         struct buffer_head *bh = NULL;
149         unsigned long block;
150         unsigned long block_group;
151         unsigned long bit;
152         unsigned long i;
153         int bitmap_nr;
154         unsigned long overflow;
155
156         mutex_lock(&sbi->s_alloc_mutex);
157         if (bloc.logicalBlockNum < 0 ||
158             (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb,
159                                                             bloc.
160                                                             partitionReferenceNum))
161         {
162                 udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0,
163                           bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb,
164                                                                       bloc.
165                                                                       partitionReferenceNum));
166                 goto error_return;
167         }
168
169         block =
170             bloc.logicalBlockNum + offset +
171             (sizeof(struct spaceBitmapDesc) << 3);
172
173       do_more:
174         overflow = 0;
175         block_group = block >> (sb->s_blocksize_bits + 3);
176         bit = block % (sb->s_blocksize << 3);
177
178         /*
179          * Check to see if we are freeing blocks across a group boundary.
180          */
181         if (bit + count > (sb->s_blocksize << 3)) {
182                 overflow = bit + count - (sb->s_blocksize << 3);
183                 count -= overflow;
184         }
185         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
186         if (bitmap_nr < 0)
187                 goto error_return;
188
189         bh = bitmap->s_block_bitmap[bitmap_nr];
190         for (i = 0; i < count; i++) {
191                 if (udf_set_bit(bit + i, bh->b_data)) {
192                         udf_debug("bit %ld already set\n", bit + i);
193                         udf_debug("byte=%2x\n",
194                                   ((char *)bh->b_data)[(bit + i) >> 3]);
195                 } else {
196                         if (inode)
197                                 DQUOT_FREE_BLOCK(inode, 1);
198                         if (UDF_SB_LVIDBH(sb)) {
199                                 UDF_SB_LVID(sb)->
200                                     freeSpaceTable[UDF_SB_PARTITION(sb)] =
201                                     cpu_to_le32(le32_to_cpu
202                                                 (UDF_SB_LVID(sb)->
203                                                  freeSpaceTable[UDF_SB_PARTITION
204                                                                 (sb)]) + 1);
205                         }
206                 }
207         }
208         mark_buffer_dirty(bh);
209         if (overflow) {
210                 block += count;
211                 count = overflow;
212                 goto do_more;
213         }
214       error_return:
215         sb->s_dirt = 1;
216         if (UDF_SB_LVIDBH(sb))
217                 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
218         mutex_unlock(&sbi->s_alloc_mutex);
219         return;
220 }
221
222 static int udf_bitmap_prealloc_blocks(struct super_block *sb,
223                                       struct inode *inode,
224                                       struct udf_bitmap *bitmap,
225                                       uint16_t partition, uint32_t first_block,
226                                       uint32_t block_count)
227 {
228         struct udf_sb_info *sbi = UDF_SB(sb);
229         int alloc_count = 0;
230         int bit, block, block_group, group_start;
231         int nr_groups, bitmap_nr;
232         struct buffer_head *bh;
233
234         mutex_lock(&sbi->s_alloc_mutex);
235         if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
236                 goto out;
237
238         if (first_block + block_count > UDF_SB_PARTLEN(sb, partition))
239                 block_count = UDF_SB_PARTLEN(sb, partition) - first_block;
240
241       repeat:
242         nr_groups = (UDF_SB_PARTLEN(sb, partition) +
243                      (sizeof(struct spaceBitmapDesc) << 3) +
244                      (sb->s_blocksize * 8) - 1) / (sb->s_blocksize * 8);
245         block = first_block + (sizeof(struct spaceBitmapDesc) << 3);
246         block_group = block >> (sb->s_blocksize_bits + 3);
247         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
248
249         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
250         if (bitmap_nr < 0)
251                 goto out;
252         bh = bitmap->s_block_bitmap[bitmap_nr];
253
254         bit = block % (sb->s_blocksize << 3);
255
256         while (bit < (sb->s_blocksize << 3) && block_count > 0) {
257                 if (!udf_test_bit(bit, bh->b_data))
258                         goto out;
259                 else if (DQUOT_PREALLOC_BLOCK(inode, 1))
260                         goto out;
261                 else if (!udf_clear_bit(bit, bh->b_data)) {
262                         udf_debug("bit already cleared for block %d\n", bit);
263                         DQUOT_FREE_BLOCK(inode, 1);
264                         goto out;
265                 }
266                 block_count--;
267                 alloc_count++;
268                 bit++;
269                 block++;
270         }
271         mark_buffer_dirty(bh);
272         if (block_count > 0)
273                 goto repeat;
274       out:
275         if (UDF_SB_LVIDBH(sb)) {
276                 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
277                     cpu_to_le32(le32_to_cpu
278                                 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
279                                 alloc_count);
280                 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
281         }
282         sb->s_dirt = 1;
283         mutex_unlock(&sbi->s_alloc_mutex);
284         return alloc_count;
285 }
286
287 static int udf_bitmap_new_block(struct super_block *sb,
288                                 struct inode *inode,
289                                 struct udf_bitmap *bitmap, uint16_t partition,
290                                 uint32_t goal, int *err)
291 {
292         struct udf_sb_info *sbi = UDF_SB(sb);
293         int newbit, bit = 0, block, block_group, group_start;
294         int end_goal, nr_groups, bitmap_nr, i;
295         struct buffer_head *bh = NULL;
296         char *ptr;
297         int newblock = 0;
298
299         *err = -ENOSPC;
300         mutex_lock(&sbi->s_alloc_mutex);
301
302       repeat:
303         if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
304                 goal = 0;
305
306         nr_groups = bitmap->s_nr_groups;
307         block = goal + (sizeof(struct spaceBitmapDesc) << 3);
308         block_group = block >> (sb->s_blocksize_bits + 3);
309         group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
310
311         bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
312         if (bitmap_nr < 0)
313                 goto error_return;
314         bh = bitmap->s_block_bitmap[bitmap_nr];
315         ptr =
316             memscan((char *)bh->b_data + group_start, 0xFF,
317                     sb->s_blocksize - group_start);
318
319         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
320                 bit = block % (sb->s_blocksize << 3);
321
322                 if (udf_test_bit(bit, bh->b_data)) {
323                         goto got_block;
324                 }
325                 end_goal = (bit + 63) & ~63;
326                 bit = udf_find_next_one_bit(bh->b_data, end_goal, bit);
327                 if (bit < end_goal)
328                         goto got_block;
329                 ptr =
330                     memscan((char *)bh->b_data + (bit >> 3), 0xFF,
331                             sb->s_blocksize - ((bit + 7) >> 3));
332                 newbit = (ptr - ((char *)bh->b_data)) << 3;
333                 if (newbit < sb->s_blocksize << 3) {
334                         bit = newbit;
335                         goto search_back;
336                 }
337                 newbit =
338                     udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
339                                           bit);
340                 if (newbit < sb->s_blocksize << 3) {
341                         bit = newbit;
342                         goto got_block;
343                 }
344         }
345
346         for (i = 0; i < (nr_groups * 2); i++) {
347                 block_group++;
348                 if (block_group >= nr_groups)
349                         block_group = 0;
350                 group_start = block_group ? 0 : sizeof(struct spaceBitmapDesc);
351
352                 bitmap_nr = load_block_bitmap(sb, bitmap, block_group);
353                 if (bitmap_nr < 0)
354                         goto error_return;
355                 bh = bitmap->s_block_bitmap[bitmap_nr];
356                 if (i < nr_groups) {
357                         ptr =
358                             memscan((char *)bh->b_data + group_start, 0xFF,
359                                     sb->s_blocksize - group_start);
360                         if ((ptr - ((char *)bh->b_data)) < sb->s_blocksize) {
361                                 bit = (ptr - ((char *)bh->b_data)) << 3;
362                                 break;
363                         }
364                 } else {
365                         bit =
366                             udf_find_next_one_bit((char *)bh->b_data,
367                                                   sb->s_blocksize << 3,
368                                                   group_start << 3);
369                         if (bit < sb->s_blocksize << 3)
370                                 break;
371                 }
372         }
373         if (i >= (nr_groups * 2)) {
374                 mutex_unlock(&sbi->s_alloc_mutex);
375                 return newblock;
376         }
377         if (bit < sb->s_blocksize << 3)
378                 goto search_back;
379         else
380                 bit =
381                     udf_find_next_one_bit(bh->b_data, sb->s_blocksize << 3,
382                                           group_start << 3);
383         if (bit >= sb->s_blocksize << 3) {
384                 mutex_unlock(&sbi->s_alloc_mutex);
385                 return 0;
386         }
387
388       search_back:
389         for (i = 0;
390              i < 7 && bit > (group_start << 3)
391              && udf_test_bit(bit - 1, bh->b_data); i++, bit--) ;
392
393       got_block:
394
395         /*
396          * Check quota for allocation of this block.
397          */
398         if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
399                 mutex_unlock(&sbi->s_alloc_mutex);
400                 *err = -EDQUOT;
401                 return 0;
402         }
403
404         newblock = bit + (block_group << (sb->s_blocksize_bits + 3)) -
405             (sizeof(struct spaceBitmapDesc) << 3);
406
407         if (!udf_clear_bit(bit, bh->b_data)) {
408                 udf_debug("bit already cleared for block %d\n", bit);
409                 goto repeat;
410         }
411
412         mark_buffer_dirty(bh);
413
414         if (UDF_SB_LVIDBH(sb)) {
415                 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
416                     cpu_to_le32(le32_to_cpu
417                                 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
418                                 1);
419                 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
420         }
421         sb->s_dirt = 1;
422         mutex_unlock(&sbi->s_alloc_mutex);
423         *err = 0;
424         return newblock;
425
426       error_return:
427         *err = -EIO;
428         mutex_unlock(&sbi->s_alloc_mutex);
429         return 0;
430 }
431
432 static void udf_table_free_blocks(struct super_block *sb,
433                                   struct inode *inode,
434                                   struct inode *table,
435                                   kernel_lb_addr bloc, uint32_t offset,
436                                   uint32_t count)
437 {
438         struct udf_sb_info *sbi = UDF_SB(sb);
439         uint32_t start, end;
440         uint32_t elen;
441         kernel_lb_addr eloc;
442         struct extent_position oepos, epos;
443         int8_t etype;
444         int i;
445
446         mutex_lock(&sbi->s_alloc_mutex);
447         if (bloc.logicalBlockNum < 0 ||
448             (bloc.logicalBlockNum + count) > UDF_SB_PARTLEN(sb,
449                                                             bloc.
450                                                             partitionReferenceNum))
451         {
452                 udf_debug("%d < %d || %d + %d > %d\n", bloc.logicalBlockNum, 0,
453                           bloc.logicalBlockNum, count, UDF_SB_PARTLEN(sb,
454                                                                       bloc.
455                                                                       partitionReferenceNum));
456                 goto error_return;
457         }
458
459         /* We do this up front - There are some error conditions that could occure,
460            but.. oh well */
461         if (inode)
462                 DQUOT_FREE_BLOCK(inode, count);
463         if (UDF_SB_LVIDBH(sb)) {
464                 UDF_SB_LVID(sb)->freeSpaceTable[UDF_SB_PARTITION(sb)] =
465                     cpu_to_le32(le32_to_cpu
466                                 (UDF_SB_LVID(sb)->
467                                  freeSpaceTable[UDF_SB_PARTITION(sb)]) + count);
468                 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
469         }
470
471         start = bloc.logicalBlockNum + offset;
472         end = bloc.logicalBlockNum + offset + count - 1;
473
474         epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
475         elen = 0;
476         epos.block = oepos.block = UDF_I_LOCATION(table);
477         epos.bh = oepos.bh = NULL;
478
479         while (count && (etype =
480                          udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
481                 if (((eloc.logicalBlockNum + (elen >> sb->s_blocksize_bits)) ==
482                      start)) {
483                         if ((0x3FFFFFFF - elen) <
484                             (count << sb->s_blocksize_bits)) {
485                                 count -=
486                                     ((0x3FFFFFFF -
487                                       elen) >> sb->s_blocksize_bits);
488                                 start +=
489                                     ((0x3FFFFFFF -
490                                       elen) >> sb->s_blocksize_bits);
491                                 elen =
492                                     (etype << 30) | (0x40000000 -
493                                                      sb->s_blocksize);
494                         } else {
495                                 elen = (etype << 30) |
496                                     (elen + (count << sb->s_blocksize_bits));
497                                 start += count;
498                                 count = 0;
499                         }
500                         udf_write_aext(table, &oepos, eloc, elen, 1);
501                 } else if (eloc.logicalBlockNum == (end + 1)) {
502                         if ((0x3FFFFFFF - elen) <
503                             (count << sb->s_blocksize_bits)) {
504                                 count -=
505                                     ((0x3FFFFFFF -
506                                       elen) >> sb->s_blocksize_bits);
507                                 end -=
508                                     ((0x3FFFFFFF -
509                                       elen) >> sb->s_blocksize_bits);
510                                 eloc.logicalBlockNum -=
511                                     ((0x3FFFFFFF -
512                                       elen) >> sb->s_blocksize_bits);
513                                 elen =
514                                     (etype << 30) | (0x40000000 -
515                                                      sb->s_blocksize);
516                         } else {
517                                 eloc.logicalBlockNum = start;
518                                 elen = (etype << 30) |
519                                     (elen + (count << sb->s_blocksize_bits));
520                                 end -= count;
521                                 count = 0;
522                         }
523                         udf_write_aext(table, &oepos, eloc, elen, 1);
524                 }
525
526                 if (epos.bh != oepos.bh) {
527                         i = -1;
528                         oepos.block = epos.block;
529                         brelse(oepos.bh);
530                         get_bh(epos.bh);
531                         oepos.bh = epos.bh;
532                         oepos.offset = 0;
533                 } else
534                         oepos.offset = epos.offset;
535         }
536
537         if (count) {
538                 /* NOTE: we CANNOT use udf_add_aext here, as it can try to allocate
539                    a new block, and since we hold the super block lock already
540                    very bad things would happen :)
541
542                    We copy the behavior of udf_add_aext, but instead of
543                    trying to allocate a new block close to the existing one,
544                    we just steal a block from the extent we are trying to add.
545
546                    It would be nice if the blocks were close together, but it
547                    isn't required.
548                  */
549
550                 int adsize;
551                 short_ad *sad = NULL;
552                 long_ad *lad = NULL;
553                 struct allocExtDesc *aed;
554
555                 eloc.logicalBlockNum = start;
556                 elen = EXT_RECORDED_ALLOCATED | (count << sb->s_blocksize_bits);
557
558                 if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
559                         adsize = sizeof(short_ad);
560                 else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
561                         adsize = sizeof(long_ad);
562                 else {
563                         brelse(oepos.bh);
564                         brelse(epos.bh);
565                         goto error_return;
566                 }
567
568                 if (epos.offset + (2 * adsize) > sb->s_blocksize) {
569                         char *sptr, *dptr;
570                         int loffset;
571
572                         brelse(oepos.bh);
573                         oepos = epos;
574
575                         /* Steal a block from the extent being free'd */
576                         epos.block.logicalBlockNum = eloc.logicalBlockNum;
577                         eloc.logicalBlockNum++;
578                         elen -= sb->s_blocksize;
579
580                         if (!(epos.bh = udf_tread(sb,
581                                                   udf_get_lb_pblock(sb,
582                                                                     epos.block,
583                                                                     0)))) {
584                                 brelse(oepos.bh);
585                                 goto error_return;
586                         }
587                         aed = (struct allocExtDesc *)(epos.bh->b_data);
588                         aed->previousAllocExtLocation =
589                             cpu_to_le32(oepos.block.logicalBlockNum);
590                         if (epos.offset + adsize > sb->s_blocksize) {
591                                 loffset = epos.offset;
592                                 aed->lengthAllocDescs = cpu_to_le32(adsize);
593                                 sptr = UDF_I_DATA(inode) + epos.offset -
594                                     udf_file_entry_alloc_offset(inode) +
595                                     UDF_I_LENEATTR(inode) - adsize;
596                                 dptr =
597                                     epos.bh->b_data +
598                                     sizeof(struct allocExtDesc);
599                                 memcpy(dptr, sptr, adsize);
600                                 epos.offset =
601                                     sizeof(struct allocExtDesc) + adsize;
602                         } else {
603                                 loffset = epos.offset + adsize;
604                                 aed->lengthAllocDescs = cpu_to_le32(0);
605                                 sptr = oepos.bh->b_data + epos.offset;
606                                 epos.offset = sizeof(struct allocExtDesc);
607
608                                 if (oepos.bh) {
609                                         aed =
610                                             (struct allocExtDesc *)oepos.bh->
611                                             b_data;
612                                         aed->lengthAllocDescs =
613                                             cpu_to_le32(le32_to_cpu
614                                                         (aed->
615                                                          lengthAllocDescs) +
616                                                         adsize);
617                                 } else {
618                                         UDF_I_LENALLOC(table) += adsize;
619                                         mark_inode_dirty(table);
620                                 }
621                         }
622                         if (UDF_SB_UDFREV(sb) >= 0x0200)
623                                 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 3,
624                                             1, epos.block.logicalBlockNum,
625                                             sizeof(tag));
626                         else
627                                 udf_new_tag(epos.bh->b_data, TAG_IDENT_AED, 2,
628                                             1, epos.block.logicalBlockNum,
629                                             sizeof(tag));
630                         switch (UDF_I_ALLOCTYPE(table)) {
631                         case ICBTAG_FLAG_AD_SHORT:
632                                 {
633                                         sad = (short_ad *) sptr;
634                                         sad->extLength =
635                                             cpu_to_le32
636                                             (EXT_NEXT_EXTENT_ALLOCDECS | sb->
637                                              s_blocksize);
638                                         sad->extPosition =
639                                             cpu_to_le32(epos.block.
640                                                         logicalBlockNum);
641                                         break;
642                                 }
643                         case ICBTAG_FLAG_AD_LONG:
644                                 {
645                                         lad = (long_ad *) sptr;
646                                         lad->extLength =
647                                             cpu_to_le32
648                                             (EXT_NEXT_EXTENT_ALLOCDECS | sb->
649                                              s_blocksize);
650                                         lad->extLocation =
651                                             cpu_to_lelb(epos.block);
652                                         break;
653                                 }
654                         }
655                         if (oepos.bh) {
656                                 udf_update_tag(oepos.bh->b_data, loffset);
657                                 mark_buffer_dirty(oepos.bh);
658                         } else
659                                 mark_inode_dirty(table);
660                 }
661
662                 if (elen) {     /* It's possible that stealing the block emptied the extent */
663                         udf_write_aext(table, &epos, eloc, elen, 1);
664
665                         if (!epos.bh) {
666                                 UDF_I_LENALLOC(table) += adsize;
667                                 mark_inode_dirty(table);
668                         } else {
669                                 aed = (struct allocExtDesc *)epos.bh->b_data;
670                                 aed->lengthAllocDescs =
671                                     cpu_to_le32(le32_to_cpu
672                                                 (aed->lengthAllocDescs) +
673                                                 adsize);
674                                 udf_update_tag(epos.bh->b_data, epos.offset);
675                                 mark_buffer_dirty(epos.bh);
676                         }
677                 }
678         }
679
680         brelse(epos.bh);
681         brelse(oepos.bh);
682
683       error_return:
684         sb->s_dirt = 1;
685         mutex_unlock(&sbi->s_alloc_mutex);
686         return;
687 }
688
689 static int udf_table_prealloc_blocks(struct super_block *sb,
690                                      struct inode *inode,
691                                      struct inode *table, uint16_t partition,
692                                      uint32_t first_block, uint32_t block_count)
693 {
694         struct udf_sb_info *sbi = UDF_SB(sb);
695         int alloc_count = 0;
696         uint32_t elen, adsize;
697         kernel_lb_addr eloc;
698         struct extent_position epos;
699         int8_t etype = -1;
700
701         if (first_block < 0 || first_block >= UDF_SB_PARTLEN(sb, partition))
702                 return 0;
703
704         if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
705                 adsize = sizeof(short_ad);
706         else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
707                 adsize = sizeof(long_ad);
708         else
709                 return 0;
710
711         mutex_lock(&sbi->s_alloc_mutex);
712         epos.offset = sizeof(struct unallocSpaceEntry);
713         epos.block = UDF_I_LOCATION(table);
714         epos.bh = NULL;
715         eloc.logicalBlockNum = 0xFFFFFFFF;
716
717         while (first_block != eloc.logicalBlockNum && (etype =
718                                                        udf_next_aext(table,
719                                                                      &epos,
720                                                                      &eloc,
721                                                                      &elen,
722                                                                      1)) !=
723                -1) {
724                 udf_debug("eloc=%d, elen=%d, first_block=%d\n",
725                           eloc.logicalBlockNum, elen, first_block);
726                 ;               /* empty loop body */
727         }
728
729         if (first_block == eloc.logicalBlockNum) {
730                 epos.offset -= adsize;
731
732                 alloc_count = (elen >> sb->s_blocksize_bits);
733                 if (inode
734                     && DQUOT_PREALLOC_BLOCK(inode,
735                                             alloc_count >
736                                             block_count ? block_count :
737                                             alloc_count))
738                         alloc_count = 0;
739                 else if (alloc_count > block_count) {
740                         alloc_count = block_count;
741                         eloc.logicalBlockNum += alloc_count;
742                         elen -= (alloc_count << sb->s_blocksize_bits);
743                         udf_write_aext(table, &epos, eloc, (etype << 30) | elen,
744                                        1);
745                 } else
746                         udf_delete_aext(table, epos, eloc,
747                                         (etype << 30) | elen);
748         } else
749                 alloc_count = 0;
750
751         brelse(epos.bh);
752
753         if (alloc_count && UDF_SB_LVIDBH(sb)) {
754                 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
755                     cpu_to_le32(le32_to_cpu
756                                 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
757                                 alloc_count);
758                 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
759                 sb->s_dirt = 1;
760         }
761         mutex_unlock(&sbi->s_alloc_mutex);
762         return alloc_count;
763 }
764
765 static int udf_table_new_block(struct super_block *sb,
766                                struct inode *inode,
767                                struct inode *table, uint16_t partition,
768                                uint32_t goal, int *err)
769 {
770         struct udf_sb_info *sbi = UDF_SB(sb);
771         uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
772         uint32_t newblock = 0, adsize;
773         uint32_t elen, goal_elen = 0;
774         kernel_lb_addr eloc, goal_eloc;
775         struct extent_position epos, goal_epos;
776         int8_t etype;
777
778         *err = -ENOSPC;
779
780         if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_SHORT)
781                 adsize = sizeof(short_ad);
782         else if (UDF_I_ALLOCTYPE(table) == ICBTAG_FLAG_AD_LONG)
783                 adsize = sizeof(long_ad);
784         else
785                 return newblock;
786
787         mutex_lock(&sbi->s_alloc_mutex);
788         if (goal < 0 || goal >= UDF_SB_PARTLEN(sb, partition))
789                 goal = 0;
790
791         /* We search for the closest matching block to goal. If we find a exact hit,
792            we stop. Otherwise we keep going till we run out of extents.
793            We store the buffer_head, bloc, and extoffset of the current closest
794            match and use that when we are done.
795          */
796         epos.offset = sizeof(struct unallocSpaceEntry);
797         epos.block = UDF_I_LOCATION(table);
798         epos.bh = goal_epos.bh = NULL;
799
800         while (spread && (etype =
801                           udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
802                 if (goal >= eloc.logicalBlockNum) {
803                         if (goal <
804                             eloc.logicalBlockNum +
805                             (elen >> sb->s_blocksize_bits))
806                                 nspread = 0;
807                         else
808                                 nspread = goal - eloc.logicalBlockNum -
809                                     (elen >> sb->s_blocksize_bits);
810                 } else
811                         nspread = eloc.logicalBlockNum - goal;
812
813                 if (nspread < spread) {
814                         spread = nspread;
815                         if (goal_epos.bh != epos.bh) {
816                                 brelse(goal_epos.bh);
817                                 goal_epos.bh = epos.bh;
818                                 get_bh(goal_epos.bh);
819                         }
820                         goal_epos.block = epos.block;
821                         goal_epos.offset = epos.offset - adsize;
822                         goal_eloc = eloc;
823                         goal_elen = (etype << 30) | elen;
824                 }
825         }
826
827         brelse(epos.bh);
828
829         if (spread == 0xFFFFFFFF) {
830                 brelse(goal_epos.bh);
831                 mutex_unlock(&sbi->s_alloc_mutex);
832                 return 0;
833         }
834
835         /* Only allocate blocks from the beginning of the extent.
836            That way, we only delete (empty) extents, never have to insert an
837            extent because of splitting */
838         /* This works, but very poorly.... */
839
840         newblock = goal_eloc.logicalBlockNum;
841         goal_eloc.logicalBlockNum++;
842         goal_elen -= sb->s_blocksize;
843
844         if (inode && DQUOT_ALLOC_BLOCK(inode, 1)) {
845                 brelse(goal_epos.bh);
846                 mutex_unlock(&sbi->s_alloc_mutex);
847                 *err = -EDQUOT;
848                 return 0;
849         }
850
851         if (goal_elen)
852                 udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1);
853         else
854                 udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
855         brelse(goal_epos.bh);
856
857         if (UDF_SB_LVIDBH(sb)) {
858                 UDF_SB_LVID(sb)->freeSpaceTable[partition] =
859                     cpu_to_le32(le32_to_cpu
860                                 (UDF_SB_LVID(sb)->freeSpaceTable[partition]) -
861                                 1);
862                 mark_buffer_dirty(UDF_SB_LVIDBH(sb));
863         }
864
865         sb->s_dirt = 1;
866         mutex_unlock(&sbi->s_alloc_mutex);
867         *err = 0;
868         return newblock;
869 }
870
871 inline void udf_free_blocks(struct super_block *sb,
872                             struct inode *inode,
873                             kernel_lb_addr bloc, uint32_t offset,
874                             uint32_t count)
875 {
876         uint16_t partition = bloc.partitionReferenceNum;
877
878         if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
879                 return udf_bitmap_free_blocks(sb, inode,
880                                               UDF_SB_PARTMAPS(sb)[partition].
881                                               s_uspace.s_bitmap, bloc, offset,
882                                               count);
883         } else if (UDF_SB_PARTFLAGS(sb, partition) &
884                    UDF_PART_FLAG_UNALLOC_TABLE) {
885                 return udf_table_free_blocks(sb, inode,
886                                              UDF_SB_PARTMAPS(sb)[partition].
887                                              s_uspace.s_table, bloc, offset,
888                                              count);
889         } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
890                 return udf_bitmap_free_blocks(sb, inode,
891                                               UDF_SB_PARTMAPS(sb)[partition].
892                                               s_fspace.s_bitmap, bloc, offset,
893                                               count);
894         } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
895                 return udf_table_free_blocks(sb, inode,
896                                              UDF_SB_PARTMAPS(sb)[partition].
897                                              s_fspace.s_table, bloc, offset,
898                                              count);
899         } else
900                 return;
901 }
902
903 inline int udf_prealloc_blocks(struct super_block *sb,
904                                struct inode *inode,
905                                uint16_t partition, uint32_t first_block,
906                                uint32_t block_count)
907 {
908         if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
909                 return udf_bitmap_prealloc_blocks(sb, inode,
910                                                   UDF_SB_PARTMAPS(sb)
911                                                   [partition].s_uspace.s_bitmap,
912                                                   partition, first_block,
913                                                   block_count);
914         } else if (UDF_SB_PARTFLAGS(sb, partition) &
915                    UDF_PART_FLAG_UNALLOC_TABLE) {
916                 return udf_table_prealloc_blocks(sb, inode,
917                                                  UDF_SB_PARTMAPS(sb)[partition].
918                                                  s_uspace.s_table, partition,
919                                                  first_block, block_count);
920         } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
921                 return udf_bitmap_prealloc_blocks(sb, inode,
922                                                   UDF_SB_PARTMAPS(sb)
923                                                   [partition].s_fspace.s_bitmap,
924                                                   partition, first_block,
925                                                   block_count);
926         } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
927                 return udf_table_prealloc_blocks(sb, inode,
928                                                  UDF_SB_PARTMAPS(sb)[partition].
929                                                  s_fspace.s_table, partition,
930                                                  first_block, block_count);
931         } else
932                 return 0;
933 }
934
935 inline int udf_new_block(struct super_block *sb,
936                          struct inode *inode,
937                          uint16_t partition, uint32_t goal, int *err)
938 {
939         int ret;
940
941         if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_UNALLOC_BITMAP) {
942                 ret = udf_bitmap_new_block(sb, inode,
943                                            UDF_SB_PARTMAPS(sb)[partition].
944                                            s_uspace.s_bitmap, partition, goal,
945                                            err);
946                 return ret;
947         } else if (UDF_SB_PARTFLAGS(sb, partition) &
948                    UDF_PART_FLAG_UNALLOC_TABLE) {
949                 return udf_table_new_block(sb, inode,
950                                            UDF_SB_PARTMAPS(sb)[partition].
951                                            s_uspace.s_table, partition, goal,
952                                            err);
953         } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_BITMAP) {
954                 return udf_bitmap_new_block(sb, inode,
955                                             UDF_SB_PARTMAPS(sb)[partition].
956                                             s_fspace.s_bitmap, partition, goal,
957                                             err);
958         } else if (UDF_SB_PARTFLAGS(sb, partition) & UDF_PART_FLAG_FREED_TABLE) {
959                 return udf_table_new_block(sb, inode,
960                                            UDF_SB_PARTMAPS(sb)[partition].
961                                            s_fspace.s_table, partition, goal,
962                                            err);
963         } else {
964                 *err = -EIO;
965                 return 0;
966         }
967 }