Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[linux-2.6] / fs / jffs2 / nodemgmt.c
1 /*
2  * JFFS2 -- Journalling Flash File System, Version 2.
3  *
4  * Copyright © 2001-2007 Red Hat, Inc.
5  *
6  * Created by David Woodhouse <dwmw2@infradead.org>
7  *
8  * For licensing information, see the file 'LICENCE' in this directory.
9  *
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/compiler.h>
16 #include <linux/sched.h> /* For cond_resched() */
17 #include "nodelist.h"
18 #include "debug.h"
19
20 /**
21  *      jffs2_reserve_space - request physical space to write nodes to flash
22  *      @c: superblock info
23  *      @minsize: Minimum acceptable size of allocation
24  *      @len: Returned value of allocation length
25  *      @prio: Allocation type - ALLOC_{NORMAL,DELETION}
26  *
27  *      Requests a block of physical space on the flash. Returns zero for success
28  *      and puts 'len' into the appropriate place, or returns -ENOSPC or other 
29  *      error if appropriate. Doesn't return len since that's 
30  *
31  *      If it returns zero, jffs2_reserve_space() also downs the per-filesystem
32  *      allocation semaphore, to prevent more than one allocation from being
33  *      active at any time. The semaphore is later released by jffs2_commit_allocation()
34  *
35  *      jffs2_reserve_space() may trigger garbage collection in order to make room
36  *      for the requested allocation.
37  */
38
39 static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize,
40                                   uint32_t *len, uint32_t sumsize);
41
42 int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
43                         uint32_t *len, int prio, uint32_t sumsize)
44 {
45         int ret = -EAGAIN;
46         int blocksneeded = c->resv_blocks_write;
47         /* align it */
48         minsize = PAD(minsize);
49
50         D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
51         mutex_lock(&c->alloc_sem);
52
53         D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
54
55         spin_lock(&c->erase_completion_lock);
56
57         /* this needs a little more thought (true <tglx> :)) */
58         while(ret == -EAGAIN) {
59                 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
60                         uint32_t dirty, avail;
61
62                         /* calculate real dirty size
63                          * dirty_size contains blocks on erase_pending_list
64                          * those blocks are counted in c->nr_erasing_blocks.
65                          * If one block is actually erased, it is not longer counted as dirty_space
66                          * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
67                          * with c->nr_erasing_blocks * c->sector_size again.
68                          * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
69                          * This helps us to force gc and pick eventually a clean block to spread the load.
70                          * We add unchecked_size here, as we hopefully will find some space to use.
71                          * This will affect the sum only once, as gc first finishes checking
72                          * of nodes.
73                          */
74                         dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
75                         if (dirty < c->nospc_dirty_size) {
76                                 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
77                                         D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on dirty space to GC, but it's a deletion. Allowing...\n"));
78                                         break;
79                                 }
80                                 D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
81                                           dirty, c->unchecked_size, c->sector_size));
82
83                                 spin_unlock(&c->erase_completion_lock);
84                                 mutex_unlock(&c->alloc_sem);
85                                 return -ENOSPC;
86                         }
87
88                         /* Calc possibly available space. Possibly available means that we
89                          * don't know, if unchecked size contains obsoleted nodes, which could give us some
90                          * more usable space. This will affect the sum only once, as gc first finishes checking
91                          * of nodes.
92                          + Return -ENOSPC, if the maximum possibly available space is less or equal than
93                          * blocksneeded * sector_size.
94                          * This blocks endless gc looping on a filesystem, which is nearly full, even if
95                          * the check above passes.
96                          */
97                         avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
98                         if ( (avail / c->sector_size) <= blocksneeded) {
99                                 if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
100                                         D1(printk(KERN_NOTICE "jffs2_reserve_space(): Low on possibly available space, but it's a deletion. Allowing...\n"));
101                                         break;
102                                 }
103
104                                 D1(printk(KERN_DEBUG "max. available size 0x%08x  < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
105                                           avail, blocksneeded * c->sector_size));
106                                 spin_unlock(&c->erase_completion_lock);
107                                 mutex_unlock(&c->alloc_sem);
108                                 return -ENOSPC;
109                         }
110
111                         mutex_unlock(&c->alloc_sem);
112
113                         D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
114                                   c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
115                                   c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
116                         spin_unlock(&c->erase_completion_lock);
117
118                         ret = jffs2_garbage_collect_pass(c);
119
120                         if (ret == -EAGAIN)
121                                 jffs2_erase_pending_blocks(c, 1);
122                         else if (ret)
123                                 return ret;
124
125                         cond_resched();
126
127                         if (signal_pending(current))
128                                 return -EINTR;
129
130                         mutex_lock(&c->alloc_sem);
131                         spin_lock(&c->erase_completion_lock);
132                 }
133
134                 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
135                 if (ret) {
136                         D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
137                 }
138         }
139         spin_unlock(&c->erase_completion_lock);
140         if (!ret)
141                 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
142         if (ret)
143                 mutex_unlock(&c->alloc_sem);
144         return ret;
145 }
146
147 int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
148                            uint32_t *len, uint32_t sumsize)
149 {
150         int ret = -EAGAIN;
151         minsize = PAD(minsize);
152
153         D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
154
155         spin_lock(&c->erase_completion_lock);
156         while(ret == -EAGAIN) {
157                 ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
158                 if (ret) {
159                         D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
160                 }
161         }
162         spin_unlock(&c->erase_completion_lock);
163         if (!ret)
164                 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
165
166         return ret;
167 }
168
169
170 /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
171
172 static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
173 {
174
175         if (c->nextblock == NULL) {
176                 D1(printk(KERN_DEBUG "jffs2_close_nextblock: Erase block at 0x%08x has already been placed in a list\n",
177                   jeb->offset));
178                 return;
179         }
180         /* Check, if we have a dirty block now, or if it was dirty already */
181         if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
182                 c->dirty_size += jeb->wasted_size;
183                 c->wasted_size -= jeb->wasted_size;
184                 jeb->dirty_size += jeb->wasted_size;
185                 jeb->wasted_size = 0;
186                 if (VERYDIRTY(c, jeb->dirty_size)) {
187                         D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
188                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
189                         list_add_tail(&jeb->list, &c->very_dirty_list);
190                 } else {
191                         D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
192                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
193                         list_add_tail(&jeb->list, &c->dirty_list);
194                 }
195         } else {
196                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
197                   jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
198                 list_add_tail(&jeb->list, &c->clean_list);
199         }
200         c->nextblock = NULL;
201
202 }
203
204 /* Select a new jeb for nextblock */
205
206 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
207 {
208         struct list_head *next;
209
210         /* Take the next block off the 'free' list */
211
212         if (list_empty(&c->free_list)) {
213
214                 if (!c->nr_erasing_blocks &&
215                         !list_empty(&c->erasable_list)) {
216                         struct jffs2_eraseblock *ejeb;
217
218                         ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
219                         list_move_tail(&ejeb->list, &c->erase_pending_list);
220                         c->nr_erasing_blocks++;
221                         jffs2_erase_pending_trigger(c);
222                         D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
223                                   ejeb->offset));
224                 }
225
226                 if (!c->nr_erasing_blocks &&
227                         !list_empty(&c->erasable_pending_wbuf_list)) {
228                         D1(printk(KERN_DEBUG "jffs2_find_nextblock: Flushing write buffer\n"));
229                         /* c->nextblock is NULL, no update to c->nextblock allowed */
230                         spin_unlock(&c->erase_completion_lock);
231                         jffs2_flush_wbuf_pad(c);
232                         spin_lock(&c->erase_completion_lock);
233                         /* Have another go. It'll be on the erasable_list now */
234                         return -EAGAIN;
235                 }
236
237                 if (!c->nr_erasing_blocks) {
238                         /* Ouch. We're in GC, or we wouldn't have got here.
239                            And there's no space left. At all. */
240                         printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
241                                    c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
242                                    list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
243                         return -ENOSPC;
244                 }
245
246                 spin_unlock(&c->erase_completion_lock);
247                 /* Don't wait for it; just erase one right now */
248                 jffs2_erase_pending_blocks(c, 1);
249                 spin_lock(&c->erase_completion_lock);
250
251                 /* An erase may have failed, decreasing the
252                    amount of free space available. So we must
253                    restart from the beginning */
254                 return -EAGAIN;
255         }
256
257         next = c->free_list.next;
258         list_del(next);
259         c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
260         c->nr_free_blocks--;
261
262         jffs2_sum_reset_collected(c->summary); /* reset collected summary */
263
264 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
265         /* adjust write buffer offset, else we get a non contiguous write bug */
266         if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
267                 c->wbuf_ofs = 0xffffffff;
268 #endif
269
270         D1(printk(KERN_DEBUG "jffs2_find_nextblock(): new nextblock = 0x%08x\n", c->nextblock->offset));
271
272         return 0;
273 }
274
275 /* Called with alloc sem _and_ erase_completion_lock */
276 static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
277                                   uint32_t *len, uint32_t sumsize)
278 {
279         struct jffs2_eraseblock *jeb = c->nextblock;
280         uint32_t reserved_size;                         /* for summary information at the end of the jeb */
281         int ret;
282
283  restart:
284         reserved_size = 0;
285
286         if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
287                                                         /* NOSUM_SIZE means not to generate summary */
288
289                 if (jeb) {
290                         reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
291                         dbg_summary("minsize=%d , jeb->free=%d ,"
292                                                 "summary->size=%d , sumsize=%d\n",
293                                                 minsize, jeb->free_size,
294                                                 c->summary->sum_size, sumsize);
295                 }
296
297                 /* Is there enough space for writing out the current node, or we have to
298                    write out summary information now, close this jeb and select new nextblock? */
299                 if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
300                                         JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
301
302                         /* Has summary been disabled for this jeb? */
303                         if (jffs2_sum_is_disabled(c->summary)) {
304                                 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
305                                 goto restart;
306                         }
307
308                         /* Writing out the collected summary information */
309                         dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
310                         ret = jffs2_sum_write_sumnode(c);
311
312                         if (ret)
313                                 return ret;
314
315                         if (jffs2_sum_is_disabled(c->summary)) {
316                                 /* jffs2_write_sumnode() couldn't write out the summary information
317                                    diabling summary for this jeb and free the collected information
318                                  */
319                                 sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
320                                 goto restart;
321                         }
322
323                         jffs2_close_nextblock(c, jeb);
324                         jeb = NULL;
325                         /* keep always valid value in reserved_size */
326                         reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
327                 }
328         } else {
329                 if (jeb && minsize > jeb->free_size) {
330                         uint32_t waste;
331
332                         /* Skip the end of this block and file it as having some dirty space */
333                         /* If there's a pending write to it, flush now */
334
335                         if (jffs2_wbuf_dirty(c)) {
336                                 spin_unlock(&c->erase_completion_lock);
337                                 D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
338                                 jffs2_flush_wbuf_pad(c);
339                                 spin_lock(&c->erase_completion_lock);
340                                 jeb = c->nextblock;
341                                 goto restart;
342                         }
343
344                         spin_unlock(&c->erase_completion_lock);
345
346                         ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
347                         if (ret)
348                                 return ret;
349                         /* Just lock it again and continue. Nothing much can change because
350                            we hold c->alloc_sem anyway. In fact, it's not entirely clear why
351                            we hold c->erase_completion_lock in the majority of this function...
352                            but that's a question for another (more caffeine-rich) day. */
353                         spin_lock(&c->erase_completion_lock);
354
355                         waste = jeb->free_size;
356                         jffs2_link_node_ref(c, jeb,
357                                             (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
358                                             waste, NULL);
359                         /* FIXME: that made it count as dirty. Convert to wasted */
360                         jeb->dirty_size -= waste;
361                         c->dirty_size -= waste;
362                         jeb->wasted_size += waste;
363                         c->wasted_size += waste;
364
365                         jffs2_close_nextblock(c, jeb);
366                         jeb = NULL;
367                 }
368         }
369
370         if (!jeb) {
371
372                 ret = jffs2_find_nextblock(c);
373                 if (ret)
374                         return ret;
375
376                 jeb = c->nextblock;
377
378                 if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
379                         printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
380                         goto restart;
381                 }
382         }
383         /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
384            enough space */
385         *len = jeb->free_size - reserved_size;
386
387         if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
388             !jeb->first_node->next_in_ino) {
389                 /* Only node in it beforehand was a CLEANMARKER node (we think).
390                    So mark it obsolete now that there's going to be another node
391                    in the block. This will reduce used_size to zero but We've
392                    already set c->nextblock so that jffs2_mark_node_obsolete()
393                    won't try to refile it to the dirty_list.
394                 */
395                 spin_unlock(&c->erase_completion_lock);
396                 jffs2_mark_node_obsolete(c, jeb->first_node);
397                 spin_lock(&c->erase_completion_lock);
398         }
399
400         D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n",
401                   *len, jeb->offset + (c->sector_size - jeb->free_size)));
402         return 0;
403 }
404
405 /**
406  *      jffs2_add_physical_node_ref - add a physical node reference to the list
407  *      @c: superblock info
408  *      @new: new node reference to add
409  *      @len: length of this physical node
410  *
411  *      Should only be used to report nodes for which space has been allocated
412  *      by jffs2_reserve_space.
413  *
414  *      Must be called with the alloc_sem held.
415  */
416
417 struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
418                                                        uint32_t ofs, uint32_t len,
419                                                        struct jffs2_inode_cache *ic)
420 {
421         struct jffs2_eraseblock *jeb;
422         struct jffs2_raw_node_ref *new;
423
424         jeb = &c->blocks[ofs / c->sector_size];
425
426         D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n",
427                   ofs & ~3, ofs & 3, len));
428 #if 1
429         /* Allow non-obsolete nodes only to be added at the end of c->nextblock, 
430            if c->nextblock is set. Note that wbuf.c will file obsolete nodes
431            even after refiling c->nextblock */
432         if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
433             && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
434                 printk(KERN_WARNING "argh. node added in wrong place at 0x%08x(%d)\n", ofs & ~3, ofs & 3);
435                 if (c->nextblock)
436                         printk(KERN_WARNING "nextblock 0x%08x", c->nextblock->offset);
437                 else
438                         printk(KERN_WARNING "No nextblock");
439                 printk(", expected at %08x\n", jeb->offset + (c->sector_size - jeb->free_size));
440                 return ERR_PTR(-EINVAL);
441         }
442 #endif
443         spin_lock(&c->erase_completion_lock);
444
445         new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
446
447         if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
448                 /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
449                 D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
450                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
451                 if (jffs2_wbuf_dirty(c)) {
452                         /* Flush the last write in the block if it's outstanding */
453                         spin_unlock(&c->erase_completion_lock);
454                         jffs2_flush_wbuf_pad(c);
455                         spin_lock(&c->erase_completion_lock);
456                 }
457
458                 list_add_tail(&jeb->list, &c->clean_list);
459                 c->nextblock = NULL;
460         }
461         jffs2_dbg_acct_sanity_check_nolock(c,jeb);
462         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
463
464         spin_unlock(&c->erase_completion_lock);
465
466         return new;
467 }
468
469
470 void jffs2_complete_reservation(struct jffs2_sb_info *c)
471 {
472         D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
473         jffs2_garbage_collect_trigger(c);
474         mutex_unlock(&c->alloc_sem);
475 }
476
477 static inline int on_list(struct list_head *obj, struct list_head *head)
478 {
479         struct list_head *this;
480
481         list_for_each(this, head) {
482                 if (this == obj) {
483                         D1(printk("%p is on list at %p\n", obj, head));
484                         return 1;
485
486                 }
487         }
488         return 0;
489 }
490
491 void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
492 {
493         struct jffs2_eraseblock *jeb;
494         int blocknr;
495         struct jffs2_unknown_node n;
496         int ret, addedsize;
497         size_t retlen;
498         uint32_t freed_len;
499
500         if(unlikely(!ref)) {
501                 printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
502                 return;
503         }
504         if (ref_obsolete(ref)) {
505                 D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
506                 return;
507         }
508         blocknr = ref->flash_offset / c->sector_size;
509         if (blocknr >= c->nr_blocks) {
510                 printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
511                 BUG();
512         }
513         jeb = &c->blocks[blocknr];
514
515         if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
516             !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
517                 /* Hm. This may confuse static lock analysis. If any of the above
518                    three conditions is false, we're going to return from this
519                    function without actually obliterating any nodes or freeing
520                    any jffs2_raw_node_refs. So we don't need to stop erases from
521                    happening, or protect against people holding an obsolete
522                    jffs2_raw_node_ref without the erase_completion_lock. */
523                 mutex_lock(&c->erase_free_sem);
524         }
525
526         spin_lock(&c->erase_completion_lock);
527
528         freed_len = ref_totlen(c, jeb, ref);
529
530         if (ref_flags(ref) == REF_UNCHECKED) {
531                 D1(if (unlikely(jeb->unchecked_size < freed_len)) {
532                         printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
533                                freed_len, blocknr, ref->flash_offset, jeb->used_size);
534                         BUG();
535                 })
536                 D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), freed_len));
537                 jeb->unchecked_size -= freed_len;
538                 c->unchecked_size -= freed_len;
539         } else {
540                 D1(if (unlikely(jeb->used_size < freed_len)) {
541                         printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
542                                freed_len, blocknr, ref->flash_offset, jeb->used_size);
543                         BUG();
544                 })
545                 D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %#x: ", ref_offset(ref), freed_len));
546                 jeb->used_size -= freed_len;
547                 c->used_size -= freed_len;
548         }
549
550         // Take care, that wasted size is taken into concern
551         if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
552                 D1(printk("Dirtying\n"));
553                 addedsize = freed_len;
554                 jeb->dirty_size += freed_len;
555                 c->dirty_size += freed_len;
556
557                 /* Convert wasted space to dirty, if not a bad block */
558                 if (jeb->wasted_size) {
559                         if (on_list(&jeb->list, &c->bad_used_list)) {
560                                 D1(printk(KERN_DEBUG "Leaving block at %08x on the bad_used_list\n",
561                                           jeb->offset));
562                                 addedsize = 0; /* To fool the refiling code later */
563                         } else {
564                                 D1(printk(KERN_DEBUG "Converting %d bytes of wasted space to dirty in block at %08x\n",
565                                           jeb->wasted_size, jeb->offset));
566                                 addedsize += jeb->wasted_size;
567                                 jeb->dirty_size += jeb->wasted_size;
568                                 c->dirty_size += jeb->wasted_size;
569                                 c->wasted_size -= jeb->wasted_size;
570                                 jeb->wasted_size = 0;
571                         }
572                 }
573         } else {
574                 D1(printk("Wasting\n"));
575                 addedsize = 0;
576                 jeb->wasted_size += freed_len;
577                 c->wasted_size += freed_len;
578         }
579         ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
580
581         jffs2_dbg_acct_sanity_check_nolock(c, jeb);
582         jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
583
584         if (c->flags & JFFS2_SB_FLAG_SCANNING) {
585                 /* Flash scanning is in progress. Don't muck about with the block
586                    lists because they're not ready yet, and don't actually
587                    obliterate nodes that look obsolete. If they weren't
588                    marked obsolete on the flash at the time they _became_
589                    obsolete, there was probably a reason for that. */
590                 spin_unlock(&c->erase_completion_lock);
591                 /* We didn't lock the erase_free_sem */
592                 return;
593         }
594
595         if (jeb == c->nextblock) {
596                 D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
597         } else if (!jeb->used_size && !jeb->unchecked_size) {
598                 if (jeb == c->gcblock) {
599                         D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
600                         c->gcblock = NULL;
601                 } else {
602                         D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
603                         list_del(&jeb->list);
604                 }
605                 if (jffs2_wbuf_dirty(c)) {
606                         D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
607                         list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
608                 } else {
609                         if (jiffies & 127) {
610                                 /* Most of the time, we just erase it immediately. Otherwise we
611                                    spend ages scanning it on mount, etc. */
612                                 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
613                                 list_add_tail(&jeb->list, &c->erase_pending_list);
614                                 c->nr_erasing_blocks++;
615                                 jffs2_erase_pending_trigger(c);
616                         } else {
617                                 /* Sometimes, however, we leave it elsewhere so it doesn't get
618                                    immediately reused, and we spread the load a bit. */
619                                 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
620                                 list_add_tail(&jeb->list, &c->erasable_list);
621                         }
622                 }
623                 D1(printk(KERN_DEBUG "Done OK\n"));
624         } else if (jeb == c->gcblock) {
625                 D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
626         } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
627                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
628                 list_del(&jeb->list);
629                 D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
630                 list_add_tail(&jeb->list, &c->dirty_list);
631         } else if (VERYDIRTY(c, jeb->dirty_size) &&
632                    !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
633                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
634                 list_del(&jeb->list);
635                 D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
636                 list_add_tail(&jeb->list, &c->very_dirty_list);
637         } else {
638                 D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
639                           jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
640         }
641
642         spin_unlock(&c->erase_completion_lock);
643
644         if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
645                 (c->flags & JFFS2_SB_FLAG_BUILDING)) {
646                 /* We didn't lock the erase_free_sem */
647                 return;
648         }
649
650         /* The erase_free_sem is locked, and has been since before we marked the node obsolete
651            and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
652            the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
653            by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
654
655         D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
656         ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
657         if (ret) {
658                 printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
659                 goto out_erase_sem;
660         }
661         if (retlen != sizeof(n)) {
662                 printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
663                 goto out_erase_sem;
664         }
665         if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
666                 printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n", je32_to_cpu(n.totlen), freed_len);
667                 goto out_erase_sem;
668         }
669         if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
670                 D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
671                 goto out_erase_sem;
672         }
673         /* XXX FIXME: This is ugly now */
674         n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
675         ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
676         if (ret) {
677                 printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
678                 goto out_erase_sem;
679         }
680         if (retlen != sizeof(n)) {
681                 printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
682                 goto out_erase_sem;
683         }
684
685         /* Nodes which have been marked obsolete no longer need to be
686            associated with any inode. Remove them from the per-inode list.
687
688            Note we can't do this for NAND at the moment because we need
689            obsolete dirent nodes to stay on the lists, because of the
690            horridness in jffs2_garbage_collect_deletion_dirent(). Also
691            because we delete the inocache, and on NAND we need that to
692            stay around until all the nodes are actually erased, in order
693            to stop us from giving the same inode number to another newly
694            created inode. */
695         if (ref->next_in_ino) {
696                 struct jffs2_inode_cache *ic;
697                 struct jffs2_raw_node_ref **p;
698
699                 spin_lock(&c->erase_completion_lock);
700
701                 ic = jffs2_raw_ref_to_ic(ref);
702                 for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
703                         ;
704
705                 *p = ref->next_in_ino;
706                 ref->next_in_ino = NULL;
707
708                 switch (ic->class) {
709 #ifdef CONFIG_JFFS2_FS_XATTR
710                         case RAWNODE_CLASS_XATTR_DATUM:
711                                 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
712                                 break;
713                         case RAWNODE_CLASS_XATTR_REF:
714                                 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
715                                 break;
716 #endif
717                         default:
718                                 if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
719                                         jffs2_del_ino_cache(c, ic);
720                                 break;
721                 }
722                 spin_unlock(&c->erase_completion_lock);
723         }
724
725  out_erase_sem:
726         mutex_unlock(&c->erase_free_sem);
727 }
728
729 int jffs2_thread_should_wake(struct jffs2_sb_info *c)
730 {
731         int ret = 0;
732         uint32_t dirty;
733         int nr_very_dirty = 0;
734         struct jffs2_eraseblock *jeb;
735
736         if (c->unchecked_size) {
737                 D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): unchecked_size %d, checked_ino #%d\n",
738                           c->unchecked_size, c->checked_ino));
739                 return 1;
740         }
741
742         /* dirty_size contains blocks on erase_pending_list
743          * those blocks are counted in c->nr_erasing_blocks.
744          * If one block is actually erased, it is not longer counted as dirty_space
745          * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
746          * with c->nr_erasing_blocks * c->sector_size again.
747          * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
748          * This helps us to force gc and pick eventually a clean block to spread the load.
749          */
750         dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
751
752         if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
753                         (dirty > c->nospc_dirty_size))
754                 ret = 1;
755
756         list_for_each_entry(jeb, &c->very_dirty_list, list) {
757                 nr_very_dirty++;
758                 if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
759                         ret = 1;
760                         /* In debug mode, actually go through and count them all */
761                         D1(continue);
762                         break;
763                 }
764         }
765
766         D1(printk(KERN_DEBUG "jffs2_thread_should_wake(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
767                   c->nr_free_blocks, c->nr_erasing_blocks, c->dirty_size, nr_very_dirty, ret?"yes":"no"));
768
769         return ret;
770 }