RDMA/nes: Fix client side QP destroy
[linux-2.6] / fs / fat / fatent.c
1 /*
2  * Copyright (C) 2004, OGAWA Hirofumi
3  * Released under GPL v2.
4  */
5
6 #include <linux/module.h>
7 #include <linux/fs.h>
8 #include <linux/msdos_fs.h>
9
10 struct fatent_operations {
11         void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
12         void (*ent_set_ptr)(struct fat_entry *, int);
13         int (*ent_bread)(struct super_block *, struct fat_entry *,
14                          int, sector_t);
15         int (*ent_get)(struct fat_entry *);
16         void (*ent_put)(struct fat_entry *, int);
17         int (*ent_next)(struct fat_entry *);
18 };
19
20 static DEFINE_SPINLOCK(fat12_entry_lock);
21
22 static void fat12_ent_blocknr(struct super_block *sb, int entry,
23                               int *offset, sector_t *blocknr)
24 {
25         struct msdos_sb_info *sbi = MSDOS_SB(sb);
26         int bytes = entry + (entry >> 1);
27         WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
28         *offset = bytes & (sb->s_blocksize - 1);
29         *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
30 }
31
32 static void fat_ent_blocknr(struct super_block *sb, int entry,
33                             int *offset, sector_t *blocknr)
34 {
35         struct msdos_sb_info *sbi = MSDOS_SB(sb);
36         int bytes = (entry << sbi->fatent_shift);
37         WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
38         *offset = bytes & (sb->s_blocksize - 1);
39         *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
40 }
41
42 static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
43 {
44         struct buffer_head **bhs = fatent->bhs;
45         if (fatent->nr_bhs == 1) {
46                 WARN_ON(offset >= (bhs[0]->b_size - 1));
47                 fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
48                 fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
49         } else {
50                 WARN_ON(offset != (bhs[0]->b_size - 1));
51                 fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
52                 fatent->u.ent12_p[1] = bhs[1]->b_data;
53         }
54 }
55
56 static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
57 {
58         WARN_ON(offset & (2 - 1));
59         fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
60 }
61
62 static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
63 {
64         WARN_ON(offset & (4 - 1));
65         fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
66 }
67
68 static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
69                            int offset, sector_t blocknr)
70 {
71         struct buffer_head **bhs = fatent->bhs;
72
73         WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
74         bhs[0] = sb_bread(sb, blocknr);
75         if (!bhs[0])
76                 goto err;
77
78         if ((offset + 1) < sb->s_blocksize)
79                 fatent->nr_bhs = 1;
80         else {
81                 /* This entry is block boundary, it needs the next block */
82                 blocknr++;
83                 bhs[1] = sb_bread(sb, blocknr);
84                 if (!bhs[1])
85                         goto err_brelse;
86                 fatent->nr_bhs = 2;
87         }
88         fat12_ent_set_ptr(fatent, offset);
89         return 0;
90
91 err_brelse:
92         brelse(bhs[0]);
93 err:
94         printk(KERN_ERR "FAT: FAT read failed (blocknr %llu)\n",
95                (unsigned long long)blocknr);
96         return -EIO;
97 }
98
99 static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
100                          int offset, sector_t blocknr)
101 {
102         struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
103
104         WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
105         fatent->bhs[0] = sb_bread(sb, blocknr);
106         if (!fatent->bhs[0]) {
107                 printk(KERN_ERR "FAT: FAT read failed (blocknr %llu)\n",
108                        (unsigned long long)blocknr);
109                 return -EIO;
110         }
111         fatent->nr_bhs = 1;
112         ops->ent_set_ptr(fatent, offset);
113         return 0;
114 }
115
116 static int fat12_ent_get(struct fat_entry *fatent)
117 {
118         u8 **ent12_p = fatent->u.ent12_p;
119         int next;
120
121         spin_lock(&fat12_entry_lock);
122         if (fatent->entry & 1)
123                 next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
124         else
125                 next = (*ent12_p[1] << 8) | *ent12_p[0];
126         spin_unlock(&fat12_entry_lock);
127
128         next &= 0x0fff;
129         if (next >= BAD_FAT12)
130                 next = FAT_ENT_EOF;
131         return next;
132 }
133
134 static int fat16_ent_get(struct fat_entry *fatent)
135 {
136         int next = le16_to_cpu(*fatent->u.ent16_p);
137         WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
138         if (next >= BAD_FAT16)
139                 next = FAT_ENT_EOF;
140         return next;
141 }
142
143 static int fat32_ent_get(struct fat_entry *fatent)
144 {
145         int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
146         WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
147         if (next >= BAD_FAT32)
148                 next = FAT_ENT_EOF;
149         return next;
150 }
151
152 static void fat12_ent_put(struct fat_entry *fatent, int new)
153 {
154         u8 **ent12_p = fatent->u.ent12_p;
155
156         if (new == FAT_ENT_EOF)
157                 new = EOF_FAT12;
158
159         spin_lock(&fat12_entry_lock);
160         if (fatent->entry & 1) {
161                 *ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
162                 *ent12_p[1] = new >> 4;
163         } else {
164                 *ent12_p[0] = new & 0xff;
165                 *ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
166         }
167         spin_unlock(&fat12_entry_lock);
168
169         mark_buffer_dirty(fatent->bhs[0]);
170         if (fatent->nr_bhs == 2)
171                 mark_buffer_dirty(fatent->bhs[1]);
172 }
173
174 static void fat16_ent_put(struct fat_entry *fatent, int new)
175 {
176         if (new == FAT_ENT_EOF)
177                 new = EOF_FAT16;
178
179         *fatent->u.ent16_p = cpu_to_le16(new);
180         mark_buffer_dirty(fatent->bhs[0]);
181 }
182
183 static void fat32_ent_put(struct fat_entry *fatent, int new)
184 {
185         if (new == FAT_ENT_EOF)
186                 new = EOF_FAT32;
187
188         WARN_ON(new & 0xf0000000);
189         new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
190         *fatent->u.ent32_p = cpu_to_le32(new);
191         mark_buffer_dirty(fatent->bhs[0]);
192 }
193
194 static int fat12_ent_next(struct fat_entry *fatent)
195 {
196         u8 **ent12_p = fatent->u.ent12_p;
197         struct buffer_head **bhs = fatent->bhs;
198         u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
199
200         fatent->entry++;
201         if (fatent->nr_bhs == 1) {
202                 WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 2)));
203                 WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1)));
204                 if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
205                         ent12_p[0] = nextp - 1;
206                         ent12_p[1] = nextp;
207                         return 1;
208                 }
209         } else {
210                 WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1)));
211                 WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
212                 ent12_p[0] = nextp - 1;
213                 ent12_p[1] = nextp;
214                 brelse(bhs[0]);
215                 bhs[0] = bhs[1];
216                 fatent->nr_bhs = 1;
217                 return 1;
218         }
219         ent12_p[0] = NULL;
220         ent12_p[1] = NULL;
221         return 0;
222 }
223
224 static int fat16_ent_next(struct fat_entry *fatent)
225 {
226         const struct buffer_head *bh = fatent->bhs[0];
227         fatent->entry++;
228         if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
229                 fatent->u.ent16_p++;
230                 return 1;
231         }
232         fatent->u.ent16_p = NULL;
233         return 0;
234 }
235
236 static int fat32_ent_next(struct fat_entry *fatent)
237 {
238         const struct buffer_head *bh = fatent->bhs[0];
239         fatent->entry++;
240         if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
241                 fatent->u.ent32_p++;
242                 return 1;
243         }
244         fatent->u.ent32_p = NULL;
245         return 0;
246 }
247
248 static struct fatent_operations fat12_ops = {
249         .ent_blocknr    = fat12_ent_blocknr,
250         .ent_set_ptr    = fat12_ent_set_ptr,
251         .ent_bread      = fat12_ent_bread,
252         .ent_get        = fat12_ent_get,
253         .ent_put        = fat12_ent_put,
254         .ent_next       = fat12_ent_next,
255 };
256
257 static struct fatent_operations fat16_ops = {
258         .ent_blocknr    = fat_ent_blocknr,
259         .ent_set_ptr    = fat16_ent_set_ptr,
260         .ent_bread      = fat_ent_bread,
261         .ent_get        = fat16_ent_get,
262         .ent_put        = fat16_ent_put,
263         .ent_next       = fat16_ent_next,
264 };
265
266 static struct fatent_operations fat32_ops = {
267         .ent_blocknr    = fat_ent_blocknr,
268         .ent_set_ptr    = fat32_ent_set_ptr,
269         .ent_bread      = fat_ent_bread,
270         .ent_get        = fat32_ent_get,
271         .ent_put        = fat32_ent_put,
272         .ent_next       = fat32_ent_next,
273 };
274
275 static inline void lock_fat(struct msdos_sb_info *sbi)
276 {
277         mutex_lock(&sbi->fat_lock);
278 }
279
280 static inline void unlock_fat(struct msdos_sb_info *sbi)
281 {
282         mutex_unlock(&sbi->fat_lock);
283 }
284
285 void fat_ent_access_init(struct super_block *sb)
286 {
287         struct msdos_sb_info *sbi = MSDOS_SB(sb);
288
289         mutex_init(&sbi->fat_lock);
290
291         switch (sbi->fat_bits) {
292         case 32:
293                 sbi->fatent_shift = 2;
294                 sbi->fatent_ops = &fat32_ops;
295                 break;
296         case 16:
297                 sbi->fatent_shift = 1;
298                 sbi->fatent_ops = &fat16_ops;
299                 break;
300         case 12:
301                 sbi->fatent_shift = -1;
302                 sbi->fatent_ops = &fat12_ops;
303                 break;
304         }
305 }
306
307 static inline int fat_ent_update_ptr(struct super_block *sb,
308                                      struct fat_entry *fatent,
309                                      int offset, sector_t blocknr)
310 {
311         struct msdos_sb_info *sbi = MSDOS_SB(sb);
312         struct fatent_operations *ops = sbi->fatent_ops;
313         struct buffer_head **bhs = fatent->bhs;
314
315         /* Is this fatent's blocks including this entry? */
316         if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
317                 return 0;
318         /* Does this entry need the next block? */
319         if (sbi->fat_bits == 12 && (offset + 1) >= sb->s_blocksize) {
320                 if (fatent->nr_bhs != 2 || bhs[1]->b_blocknr != (blocknr + 1))
321                         return 0;
322         }
323         ops->ent_set_ptr(fatent, offset);
324         return 1;
325 }
326
327 int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
328 {
329         struct super_block *sb = inode->i_sb;
330         struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
331         struct fatent_operations *ops = sbi->fatent_ops;
332         int err, offset;
333         sector_t blocknr;
334
335         if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
336                 fatent_brelse(fatent);
337                 fat_fs_panic(sb, "invalid access to FAT (entry 0x%08x)", entry);
338                 return -EIO;
339         }
340
341         fatent_set_entry(fatent, entry);
342         ops->ent_blocknr(sb, entry, &offset, &blocknr);
343
344         if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
345                 fatent_brelse(fatent);
346                 err = ops->ent_bread(sb, fatent, offset, blocknr);
347                 if (err)
348                         return err;
349         }
350         return ops->ent_get(fatent);
351 }
352
353 /* FIXME: We can write the blocks as more big chunk. */
354 static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
355                           int nr_bhs)
356 {
357         struct msdos_sb_info *sbi = MSDOS_SB(sb);
358         struct buffer_head *c_bh;
359         int err, n, copy;
360
361         err = 0;
362         for (copy = 1; copy < sbi->fats; copy++) {
363                 sector_t backup_fat = sbi->fat_length * copy;
364
365                 for (n = 0; n < nr_bhs; n++) {
366                         c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
367                         if (!c_bh) {
368                                 err = -ENOMEM;
369                                 goto error;
370                         }
371                         memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
372                         set_buffer_uptodate(c_bh);
373                         mark_buffer_dirty(c_bh);
374                         if (sb->s_flags & MS_SYNCHRONOUS)
375                                 err = sync_dirty_buffer(c_bh);
376                         brelse(c_bh);
377                         if (err)
378                                 goto error;
379                 }
380         }
381 error:
382         return err;
383 }
384
385 int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
386                   int new, int wait)
387 {
388         struct super_block *sb = inode->i_sb;
389         struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
390         int err;
391
392         ops->ent_put(fatent, new);
393         if (wait) {
394                 err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
395                 if (err)
396                         return err;
397         }
398         return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
399 }
400
401 static inline int fat_ent_next(struct msdos_sb_info *sbi,
402                                struct fat_entry *fatent)
403 {
404         if (sbi->fatent_ops->ent_next(fatent)) {
405                 if (fatent->entry < sbi->max_cluster)
406                         return 1;
407         }
408         return 0;
409 }
410
411 static inline int fat_ent_read_block(struct super_block *sb,
412                                      struct fat_entry *fatent)
413 {
414         struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
415         sector_t blocknr;
416         int offset;
417
418         fatent_brelse(fatent);
419         ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
420         return ops->ent_bread(sb, fatent, offset, blocknr);
421 }
422
423 static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
424                             struct fat_entry *fatent)
425 {
426         int n, i;
427
428         for (n = 0; n < fatent->nr_bhs; n++) {
429                 for (i = 0; i < *nr_bhs; i++) {
430                         if (fatent->bhs[n] == bhs[i])
431                                 break;
432                 }
433                 if (i == *nr_bhs) {
434                         get_bh(fatent->bhs[n]);
435                         bhs[i] = fatent->bhs[n];
436                         (*nr_bhs)++;
437                 }
438         }
439 }
440
441 int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
442 {
443         struct super_block *sb = inode->i_sb;
444         struct msdos_sb_info *sbi = MSDOS_SB(sb);
445         struct fatent_operations *ops = sbi->fatent_ops;
446         struct fat_entry fatent, prev_ent;
447         struct buffer_head *bhs[MAX_BUF_PER_PAGE];
448         int i, count, err, nr_bhs, idx_clus;
449
450         BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2));    /* fixed limit */
451
452         lock_fat(sbi);
453         if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
454             sbi->free_clusters < nr_cluster) {
455                 unlock_fat(sbi);
456                 return -ENOSPC;
457         }
458
459         err = nr_bhs = idx_clus = 0;
460         count = FAT_START_ENT;
461         fatent_init(&prev_ent);
462         fatent_init(&fatent);
463         fatent_set_entry(&fatent, sbi->prev_free + 1);
464         while (count < sbi->max_cluster) {
465                 if (fatent.entry >= sbi->max_cluster)
466                         fatent.entry = FAT_START_ENT;
467                 fatent_set_entry(&fatent, fatent.entry);
468                 err = fat_ent_read_block(sb, &fatent);
469                 if (err)
470                         goto out;
471
472                 /* Find the free entries in a block */
473                 do {
474                         if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
475                                 int entry = fatent.entry;
476
477                                 /* make the cluster chain */
478                                 ops->ent_put(&fatent, FAT_ENT_EOF);
479                                 if (prev_ent.nr_bhs)
480                                         ops->ent_put(&prev_ent, entry);
481
482                                 fat_collect_bhs(bhs, &nr_bhs, &fatent);
483
484                                 sbi->prev_free = entry;
485                                 if (sbi->free_clusters != -1)
486                                         sbi->free_clusters--;
487                                 sb->s_dirt = 1;
488
489                                 cluster[idx_clus] = entry;
490                                 idx_clus++;
491                                 if (idx_clus == nr_cluster)
492                                         goto out;
493
494                                 /*
495                                  * fat_collect_bhs() gets ref-count of bhs,
496                                  * so we can still use the prev_ent.
497                                  */
498                                 prev_ent = fatent;
499                         }
500                         count++;
501                         if (count == sbi->max_cluster)
502                                 break;
503                 } while (fat_ent_next(sbi, &fatent));
504         }
505
506         /* Couldn't allocate the free entries */
507         sbi->free_clusters = 0;
508         sbi->free_clus_valid = 1;
509         sb->s_dirt = 1;
510         err = -ENOSPC;
511
512 out:
513         unlock_fat(sbi);
514         fatent_brelse(&fatent);
515         if (!err) {
516                 if (inode_needs_sync(inode))
517                         err = fat_sync_bhs(bhs, nr_bhs);
518                 if (!err)
519                         err = fat_mirror_bhs(sb, bhs, nr_bhs);
520         }
521         for (i = 0; i < nr_bhs; i++)
522                 brelse(bhs[i]);
523
524         if (err && idx_clus)
525                 fat_free_clusters(inode, cluster[0]);
526
527         return err;
528 }
529
530 int fat_free_clusters(struct inode *inode, int cluster)
531 {
532         struct super_block *sb = inode->i_sb;
533         struct msdos_sb_info *sbi = MSDOS_SB(sb);
534         struct fatent_operations *ops = sbi->fatent_ops;
535         struct fat_entry fatent;
536         struct buffer_head *bhs[MAX_BUF_PER_PAGE];
537         int i, err, nr_bhs;
538
539         nr_bhs = 0;
540         fatent_init(&fatent);
541         lock_fat(sbi);
542         do {
543                 cluster = fat_ent_read(inode, &fatent, cluster);
544                 if (cluster < 0) {
545                         err = cluster;
546                         goto error;
547                 } else if (cluster == FAT_ENT_FREE) {
548                         fat_fs_panic(sb, "%s: deleting FAT entry beyond EOF",
549                                      __func__);
550                         err = -EIO;
551                         goto error;
552                 }
553
554                 ops->ent_put(&fatent, FAT_ENT_FREE);
555                 if (sbi->free_clusters != -1) {
556                         sbi->free_clusters++;
557                         sb->s_dirt = 1;
558                 }
559
560                 if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
561                         if (sb->s_flags & MS_SYNCHRONOUS) {
562                                 err = fat_sync_bhs(bhs, nr_bhs);
563                                 if (err)
564                                         goto error;
565                         }
566                         err = fat_mirror_bhs(sb, bhs, nr_bhs);
567                         if (err)
568                                 goto error;
569                         for (i = 0; i < nr_bhs; i++)
570                                 brelse(bhs[i]);
571                         nr_bhs = 0;
572                 }
573                 fat_collect_bhs(bhs, &nr_bhs, &fatent);
574         } while (cluster != FAT_ENT_EOF);
575
576         if (sb->s_flags & MS_SYNCHRONOUS) {
577                 err = fat_sync_bhs(bhs, nr_bhs);
578                 if (err)
579                         goto error;
580         }
581         err = fat_mirror_bhs(sb, bhs, nr_bhs);
582 error:
583         fatent_brelse(&fatent);
584         for (i = 0; i < nr_bhs; i++)
585                 brelse(bhs[i]);
586         unlock_fat(sbi);
587
588         return err;
589 }
590
591 EXPORT_SYMBOL_GPL(fat_free_clusters);
592
593 /* 128kb is the whole sectors for FAT12 and FAT16 */
594 #define FAT_READA_SIZE          (128 * 1024)
595
596 static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent,
597                           unsigned long reada_blocks)
598 {
599         struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
600         sector_t blocknr;
601         int i, offset;
602
603         ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
604
605         for (i = 0; i < reada_blocks; i++)
606                 sb_breadahead(sb, blocknr + i);
607 }
608
609 int fat_count_free_clusters(struct super_block *sb)
610 {
611         struct msdos_sb_info *sbi = MSDOS_SB(sb);
612         struct fatent_operations *ops = sbi->fatent_ops;
613         struct fat_entry fatent;
614         unsigned long reada_blocks, reada_mask, cur_block;
615         int err = 0, free;
616
617         lock_fat(sbi);
618         if (sbi->free_clusters != -1 && sbi->free_clus_valid)
619                 goto out;
620
621         reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
622         reada_mask = reada_blocks - 1;
623         cur_block = 0;
624
625         free = 0;
626         fatent_init(&fatent);
627         fatent_set_entry(&fatent, FAT_START_ENT);
628         while (fatent.entry < sbi->max_cluster) {
629                 /* readahead of fat blocks */
630                 if ((cur_block & reada_mask) == 0) {
631                         unsigned long rest = sbi->fat_length - cur_block;
632                         fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
633                 }
634                 cur_block++;
635
636                 err = fat_ent_read_block(sb, &fatent);
637                 if (err)
638                         goto out;
639
640                 do {
641                         if (ops->ent_get(&fatent) == FAT_ENT_FREE)
642                                 free++;
643                 } while (fat_ent_next(sbi, &fatent));
644         }
645         sbi->free_clusters = free;
646         sbi->free_clus_valid = 1;
647         sb->s_dirt = 1;
648         fatent_brelse(&fatent);
649 out:
650         unlock_fat(sbi);
651         return err;
652 }