2 * JFFS2 -- Journalling Flash File System, Version 2.
4 * Copyright (C) 2001-2003 Red Hat, Inc.
5 * Copyright (C) 2004 Thomas Gleixner <tglx@linutronix.de>
7 * Created by David Woodhouse <dwmw2@infradead.org>
8 * Modified debugged and enhanced by Thomas Gleixner <tglx@linutronix.de>
10 * For licensing information, see the file 'LICENCE' in this directory.
12 * $Id: wbuf.c,v 1.92 2005/04/05 12:51:54 dedekind Exp $
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/crc32.h>
20 #include <linux/mtd/nand.h>
21 #include <linux/jiffies.h>
25 /* For testing write failures */
30 static unsigned char *brokenbuf;
33 /* max. erase failures before we mark a block bad */
34 #define MAX_ERASE_FAILURES 2
36 /* two seconds timeout for timed wbuf-flushing */
37 #define WBUF_FLUSH_TIMEOUT 2 * HZ
39 struct jffs2_inodirty {
41 struct jffs2_inodirty *next;
44 static struct jffs2_inodirty inodirty_nomem;
46 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino)
48 struct jffs2_inodirty *this = c->wbuf_inodes;
50 /* If a malloc failed, consider _everything_ dirty */
51 if (this == &inodirty_nomem)
54 /* If ino == 0, _any_ non-GC writes mean 'yes' */
58 /* Look to see if the inode in question is pending in the wbuf */
67 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c)
69 struct jffs2_inodirty *this;
71 this = c->wbuf_inodes;
73 if (this != &inodirty_nomem) {
75 struct jffs2_inodirty *next = this->next;
80 c->wbuf_inodes = NULL;
83 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
85 struct jffs2_inodirty *new;
87 /* Mark the superblock dirty so that kupdated will flush... */
88 jffs2_erase_pending_trigger(c);
90 if (jffs2_wbuf_pending_for_ino(c, ino))
93 new = kmalloc(sizeof(*new), GFP_KERNEL);
95 D1(printk(KERN_DEBUG "No memory to allocate inodirty. Fallback to all considered dirty\n"));
96 jffs2_clear_wbuf_ino_list(c);
97 c->wbuf_inodes = &inodirty_nomem;
101 new->next = c->wbuf_inodes;
102 c->wbuf_inodes = new;
106 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
108 struct list_head *this, *next;
111 if (list_empty(&c->erasable_pending_wbuf_list))
114 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) {
115 struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
117 D1(printk(KERN_DEBUG "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset));
119 if ((jiffies + (n++)) & 127) {
120 /* Most of the time, we just erase it immediately. Otherwise we
121 spend ages scanning it on mount, etc. */
122 D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
123 list_add_tail(&jeb->list, &c->erase_pending_list);
124 c->nr_erasing_blocks++;
125 jffs2_erase_pending_trigger(c);
127 /* Sometimes, however, we leave it elsewhere so it doesn't get
128 immediately reused, and we spread the load a bit. */
129 D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
130 list_add_tail(&jeb->list, &c->erasable_list);
135 #define REFILE_NOTEMPTY 0
136 #define REFILE_ANYWAY 1
138 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty)
140 D1(printk("About to refile bad block at %08x\n", jeb->offset));
142 D2(jffs2_dump_block_lists(c));
143 /* File the existing block on the bad_used_list.... */
144 if (c->nextblock == jeb)
146 else /* Not sure this should ever happen... need more coffee */
147 list_del(&jeb->list);
148 if (jeb->first_node) {
149 D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
150 list_add(&jeb->list, &c->bad_used_list);
152 BUG_ON(allow_empty == REFILE_NOTEMPTY);
153 /* It has to have had some nodes or we couldn't be here */
154 D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
155 list_add(&jeb->list, &c->erase_pending_list);
156 c->nr_erasing_blocks++;
157 jffs2_erase_pending_trigger(c);
159 D2(jffs2_dump_block_lists(c));
161 /* Adjust its size counts accordingly */
162 c->wasted_size += jeb->free_size;
163 c->free_size -= jeb->free_size;
164 jeb->wasted_size += jeb->free_size;
167 ACCT_SANITY_CHECK(c,jeb);
168 D1(ACCT_PARANOIA_CHECK(jeb));
171 /* Recover from failure to write wbuf. Recover the nodes up to the
172 * wbuf, not the one which we were starting to try to write. */
174 static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
176 struct jffs2_eraseblock *jeb, *new_jeb;
177 struct jffs2_raw_node_ref **first_raw, **raw;
181 uint32_t start, end, ofs, len;
183 spin_lock(&c->erase_completion_lock);
185 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
187 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY);
189 /* Find the first node to be recovered, by skipping over every
190 node which ends before the wbuf starts, or which is obsolete. */
191 first_raw = &jeb->first_node;
193 (ref_obsolete(*first_raw) ||
194 (ref_offset(*first_raw)+ref_totlen(c, jeb, *first_raw)) < c->wbuf_ofs)) {
195 D1(printk(KERN_DEBUG "Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n",
196 ref_offset(*first_raw), ref_flags(*first_raw),
197 (ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw)),
199 first_raw = &(*first_raw)->next_phys;
203 /* All nodes were obsolete. Nothing to recover. */
204 D1(printk(KERN_DEBUG "No non-obsolete nodes to be recovered. Just filing block bad\n"));
205 spin_unlock(&c->erase_completion_lock);
209 start = ref_offset(*first_raw);
210 end = ref_offset(*first_raw) + ref_totlen(c, jeb, *first_raw);
212 /* Find the last node to be recovered */
215 if (!ref_obsolete(*raw))
216 end = ref_offset(*raw) + ref_totlen(c, jeb, *raw);
218 raw = &(*raw)->next_phys;
220 spin_unlock(&c->erase_completion_lock);
222 D1(printk(KERN_DEBUG "wbuf recover %08x-%08x\n", start, end));
225 if (start < c->wbuf_ofs) {
226 /* First affected node was already partially written.
227 * Attempt to reread the old data into our buffer. */
229 buf = kmalloc(end - start, GFP_KERNEL);
231 printk(KERN_CRIT "Malloc failure in wbuf recovery. Data loss ensues.\n");
237 if (jffs2_cleanmarker_oob(c))
238 ret = c->mtd->read_ecc(c->mtd, start, c->wbuf_ofs - start, &retlen, buf, NULL, c->oobinfo);
240 ret = c->mtd->read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf);
242 if (ret == -EBADMSG && retlen == c->wbuf_ofs - start) {
246 if (ret || retlen != c->wbuf_ofs - start) {
247 printk(KERN_CRIT "Old data are already lost in wbuf recovery. Data loss ensues.\n");
252 first_raw = &(*first_raw)->next_phys;
253 /* If this was the only node to be recovered, give up */
257 /* It wasn't. Go on and try to recover nodes complete in the wbuf */
258 start = ref_offset(*first_raw);
260 /* Read succeeded. Copy the remaining data from the wbuf */
261 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs);
264 /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards.
265 Either 'buf' contains the data, or we find it in the wbuf */
268 /* ... and get an allocation of space from a shiny new block instead */
269 ret = jffs2_reserve_space_gc(c, end-start, &ofs, &len);
271 printk(KERN_WARNING "Failed to allocate space for wbuf recovery. Data loss ensues.\n");
275 if (end-start >= c->wbuf_pagesize) {
276 /* Need to do another write immediately, but it's possible
277 that this is just because the wbuf itself is completely
278 full, and there's nothing earlier read back from the
279 flash. Hence 'buf' isn't necessarily what we're writing
281 unsigned char *rewrite_buf = buf?:c->wbuf;
282 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize);
284 D1(printk(KERN_DEBUG "Write 0x%x bytes at 0x%08x in wbuf recover\n",
289 if (breakme++ == 20) {
290 printk(KERN_NOTICE "Faking write error at 0x%08x\n", ofs);
292 c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
293 brokenbuf, NULL, c->oobinfo);
297 if (jffs2_cleanmarker_oob(c))
298 ret = c->mtd->write_ecc(c->mtd, ofs, towrite, &retlen,
299 rewrite_buf, NULL, c->oobinfo);
301 ret = c->mtd->write(c->mtd, ofs, towrite, &retlen, rewrite_buf);
303 if (ret || retlen != towrite) {
304 /* Argh. We tried. Really we did. */
305 printk(KERN_CRIT "Recovery of wbuf failed due to a second write error\n");
309 struct jffs2_raw_node_ref *raw2;
311 raw2 = jffs2_alloc_raw_node_ref();
315 raw2->flash_offset = ofs | REF_OBSOLETE;
316 raw2->__totlen = ref_totlen(c, jeb, *first_raw);
317 raw2->next_phys = NULL;
318 raw2->next_in_ino = NULL;
320 jffs2_add_physical_node_ref(c, raw2);
324 printk(KERN_NOTICE "Recovery of wbuf succeeded to %08x\n", ofs);
326 c->wbuf_len = (end - start) - towrite;
327 c->wbuf_ofs = ofs + towrite;
328 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len);
329 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */
333 /* OK, now we're left with the dregs in whichever buffer we're using */
335 memcpy(c->wbuf, buf, end-start);
338 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start);
341 c->wbuf_len = end - start;
344 /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */
345 new_jeb = &c->blocks[ofs / c->sector_size];
347 spin_lock(&c->erase_completion_lock);
348 if (new_jeb->first_node) {
349 /* Odd, but possible with ST flash later maybe */
350 new_jeb->last_node->next_phys = *first_raw;
352 new_jeb->first_node = *first_raw;
357 uint32_t rawlen = ref_totlen(c, jeb, *raw);
359 D1(printk(KERN_DEBUG "Refiling block of %08x at %08x(%d) to %08x\n",
360 rawlen, ref_offset(*raw), ref_flags(*raw), ofs));
362 if (ref_obsolete(*raw)) {
363 /* Shouldn't really happen much */
364 new_jeb->dirty_size += rawlen;
365 new_jeb->free_size -= rawlen;
366 c->dirty_size += rawlen;
368 new_jeb->used_size += rawlen;
369 new_jeb->free_size -= rawlen;
370 jeb->dirty_size += rawlen;
371 jeb->used_size -= rawlen;
372 c->dirty_size += rawlen;
374 c->free_size -= rawlen;
375 (*raw)->flash_offset = ofs | ref_flags(*raw);
377 new_jeb->last_node = *raw;
379 raw = &(*raw)->next_phys;
382 /* Fix up the original jeb now it's on the bad_list */
384 if (first_raw == &jeb->first_node) {
385 jeb->last_node = NULL;
386 D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
387 list_del(&jeb->list);
388 list_add(&jeb->list, &c->erase_pending_list);
389 c->nr_erasing_blocks++;
390 jffs2_erase_pending_trigger(c);
393 jeb->last_node = container_of(first_raw, struct jffs2_raw_node_ref, next_phys);
395 ACCT_SANITY_CHECK(c,jeb);
396 D1(ACCT_PARANOIA_CHECK(jeb));
398 ACCT_SANITY_CHECK(c,new_jeb);
399 D1(ACCT_PARANOIA_CHECK(new_jeb));
401 spin_unlock(&c->erase_completion_lock);
403 D1(printk(KERN_DEBUG "wbuf recovery completed OK\n"));
406 /* Meaning of pad argument:
407 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway.
408 1: Pad, do not adjust nextblock free_size
409 2: Pad, adjust nextblock free_size
412 #define PAD_NOACCOUNT 1
413 #define PAD_ACCOUNTING 2
415 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
420 /* Nothing to do if not write-buffering the flash. In particular, we shouldn't
421 del_timer() the timer we never initialised. */
422 if (!jffs2_is_writebuffered(c))
425 if (!down_trylock(&c->alloc_sem)) {
427 printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
431 if (!c->wbuf_len) /* already checked c->wbuf above */
434 /* claim remaining space on the page
435 this happens, if we have a change to a new block,
436 or if fsync forces us to flush the writebuffer.
437 if we have a switch to next page, we will not have
438 enough remaining space for this.
440 if (pad && !jffs2_dataflash(c)) {
441 c->wbuf_len = PAD(c->wbuf_len);
443 /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR
444 with 8 byte page size */
445 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len);
447 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) {
448 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len);
449 padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
450 padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING);
451 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len);
452 padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4));
455 /* else jffs2_flash_writev has actually filled in the rest of the
456 buffer for us, and will deal with the node refs etc. later. */
460 if (breakme++ == 20) {
461 printk(KERN_NOTICE "Faking write error at 0x%08x\n", c->wbuf_ofs);
463 c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize,
464 &retlen, brokenbuf, NULL, c->oobinfo);
469 if (jffs2_cleanmarker_oob(c))
470 ret = c->mtd->write_ecc(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf, NULL, c->oobinfo);
472 ret = c->mtd->write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf);
474 if (ret || retlen != c->wbuf_pagesize) {
476 printk(KERN_WARNING "jffs2_flush_wbuf(): Write failed with %d\n",ret);
478 printk(KERN_WARNING "jffs2_flush_wbuf(): Write was short: %zd instead of %d\n",
479 retlen, c->wbuf_pagesize);
483 jffs2_wbuf_recover(c);
488 spin_lock(&c->erase_completion_lock);
490 /* Adjust free size of the block if we padded. */
491 if (pad && !jffs2_dataflash(c)) {
492 struct jffs2_eraseblock *jeb;
494 jeb = &c->blocks[c->wbuf_ofs / c->sector_size];
496 D1(printk(KERN_DEBUG "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n",
497 (jeb==c->nextblock)?"next":"", jeb->offset));
499 /* wbuf_pagesize - wbuf_len is the amount of space that's to be
500 padded. If there is less free space in the block than that,
501 something screwed up */
502 if (jeb->free_size < (c->wbuf_pagesize - c->wbuf_len)) {
503 printk(KERN_CRIT "jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n",
504 c->wbuf_ofs, c->wbuf_len, c->wbuf_pagesize-c->wbuf_len);
505 printk(KERN_CRIT "jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n",
506 jeb->offset, jeb->free_size);
509 jeb->free_size -= (c->wbuf_pagesize - c->wbuf_len);
510 c->free_size -= (c->wbuf_pagesize - c->wbuf_len);
511 jeb->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
512 c->wasted_size += (c->wbuf_pagesize - c->wbuf_len);
515 /* Stick any now-obsoleted blocks on the erase_pending_list */
516 jffs2_refile_wbuf_blocks(c);
517 jffs2_clear_wbuf_ino_list(c);
518 spin_unlock(&c->erase_completion_lock);
520 memset(c->wbuf,0xff,c->wbuf_pagesize);
521 /* adjust write buffer offset, else we get a non contiguous write bug */
522 c->wbuf_ofs += c->wbuf_pagesize;
527 /* Trigger garbage collection to flush the write-buffer.
528 If ino arg is zero, do it if _any_ real (i.e. not GC) writes are
529 outstanding. If ino arg non-zero, do it only if a write for the
530 given inode is outstanding. */
531 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino)
533 uint32_t old_wbuf_ofs;
534 uint32_t old_wbuf_len;
537 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino));
543 if (!jffs2_wbuf_pending_for_ino(c, ino)) {
544 D1(printk(KERN_DEBUG "Ino #%d not pending in wbuf. Returning\n", ino));
549 old_wbuf_ofs = c->wbuf_ofs;
550 old_wbuf_len = c->wbuf_len;
552 if (c->unchecked_size) {
553 /* GC won't make any progress for a while */
554 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() padding. Not finished checking\n"));
555 down_write(&c->wbuf_sem);
556 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
557 /* retry flushing wbuf in case jffs2_wbuf_recover
558 left some data in the wbuf */
560 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
561 up_write(&c->wbuf_sem);
562 } else while (old_wbuf_len &&
563 old_wbuf_ofs == c->wbuf_ofs) {
567 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() calls gc pass\n"));
569 ret = jffs2_garbage_collect_pass(c);
571 /* GC failed. Flush it with padding instead */
573 down_write(&c->wbuf_sem);
574 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
575 /* retry flushing wbuf in case jffs2_wbuf_recover
576 left some data in the wbuf */
578 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING);
579 up_write(&c->wbuf_sem);
585 D1(printk(KERN_DEBUG "jffs2_flush_wbuf_gc() ends...\n"));
591 /* Pad write-buffer to end and write it, wasting space. */
592 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c)
599 down_write(&c->wbuf_sem);
600 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
601 /* retry - maybe wbuf recover left some data in wbuf. */
603 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
604 up_write(&c->wbuf_sem);
609 #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
610 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) )
611 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
613 #define PAGE_DIV(x) ( (x) & (~(c->wbuf_pagesize - 1)) )
614 #define PAGE_MOD(x) ( (x) & (c->wbuf_pagesize - 1) )
617 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino)
619 struct kvec outvecs[3];
621 uint32_t split_ofs = 0;
623 int ret, splitvec = -1;
626 unsigned char *wbuf_ptr;
628 uint32_t outvec_to = to;
630 /* If not NAND flash, don't bother */
631 if (!jffs2_is_writebuffered(c))
632 return jffs2_flash_direct_writev(c, invecs, count, to, retlen);
634 down_write(&c->wbuf_sem);
636 /* If wbuf_ofs is not initialized, set it to target address */
637 if (c->wbuf_ofs == 0xFFFFFFFF) {
638 c->wbuf_ofs = PAGE_DIV(to);
639 c->wbuf_len = PAGE_MOD(to);
640 memset(c->wbuf,0xff,c->wbuf_pagesize);
643 /* Fixup the wbuf if we are moving to a new eraseblock. The checks below
644 fail for ECC'd NOR because cleanmarker == 16, so a block starts at
646 if (jffs2_nor_ecc(c)) {
647 if (((c->wbuf_ofs % c->sector_size) == 0) && !c->wbuf_len) {
648 c->wbuf_ofs = PAGE_DIV(to);
649 c->wbuf_len = PAGE_MOD(to);
650 memset(c->wbuf,0xff,c->wbuf_pagesize);
654 /* Sanity checks on target address.
655 It's permitted to write at PAD(c->wbuf_len+c->wbuf_ofs),
656 and it's permitted to write at the beginning of a new
657 erase block. Anything else, and you die.
658 New block starts at xxx000c (0-b = block header)
660 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) {
661 /* It's a write to a new block */
663 D1(printk(KERN_DEBUG "jffs2_flash_writev() to 0x%lx causes flush of wbuf at 0x%08x\n", (unsigned long)to, c->wbuf_ofs));
664 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT);
666 /* the underlying layer has to check wbuf_len to do the cleanup */
667 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
672 /* set pointer to new block */
673 c->wbuf_ofs = PAGE_DIV(to);
674 c->wbuf_len = PAGE_MOD(to);
677 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) {
678 /* We're not writing immediately after the writebuffer. Bad. */
679 printk(KERN_CRIT "jffs2_flash_writev(): Non-contiguous write to %08lx\n", (unsigned long)to);
681 printk(KERN_CRIT "wbuf was previously %08x-%08x\n",
682 c->wbuf_ofs, c->wbuf_ofs+c->wbuf_len);
686 /* Note outvecs[3] above. We know count is never greater than 2 */
688 printk(KERN_CRIT "jffs2_flash_writev(): count is %ld\n", count);
695 /* Fill writebuffer first, if already in use */
697 uint32_t invec_ofs = 0;
699 /* adjust alignment offset */
700 if (c->wbuf_len != PAGE_MOD(to)) {
701 c->wbuf_len = PAGE_MOD(to);
702 /* take care of alignment to next page */
704 c->wbuf_len = c->wbuf_pagesize;
707 while(c->wbuf_len < c->wbuf_pagesize) {
713 thislen = c->wbuf_pagesize - c->wbuf_len;
715 if (thislen >= invecs[invec].iov_len)
716 thislen = invecs[invec].iov_len;
720 memcpy(c->wbuf + c->wbuf_len, invecs[invec].iov_base, thislen);
721 c->wbuf_len += thislen;
723 /* Get next invec, if actual did not fill the buffer */
724 if (c->wbuf_len < c->wbuf_pagesize)
728 /* write buffer is full, flush buffer */
729 ret = __jffs2_flush_wbuf(c, NOPAD);
731 /* the underlying layer has to check wbuf_len to do the cleanup */
732 D1(printk(KERN_WARNING "jffs2_flush_wbuf() called from jffs2_flash_writev() failed %d\n", ret));
733 /* Retlen zero to make sure our caller doesn't mark the space dirty.
734 We've already done everything that's necessary */
738 outvec_to += donelen;
739 c->wbuf_ofs = outvec_to;
741 /* All invecs done ? */
745 /* Set up the first outvec, containing the remainder of the
746 invec we partially used */
747 if (invecs[invec].iov_len > invec_ofs) {
748 outvecs[0].iov_base = invecs[invec].iov_base+invec_ofs;
749 totlen = outvecs[0].iov_len = invecs[invec].iov_len-invec_ofs;
750 if (totlen > c->wbuf_pagesize) {
752 split_ofs = outvecs[0].iov_len - PAGE_MOD(totlen);
759 /* OK, now we've flushed the wbuf and the start of the bits
760 we have been asked to write, now to write the rest.... */
762 /* totlen holds the amount of data still to be written */
764 for ( ; invec < count; invec++,outvec++ ) {
765 outvecs[outvec].iov_base = invecs[invec].iov_base;
766 totlen += outvecs[outvec].iov_len = invecs[invec].iov_len;
767 if (PAGE_DIV(totlen) != PAGE_DIV(old_totlen)) {
769 split_ofs = outvecs[outvec].iov_len - PAGE_MOD(totlen);
774 /* Now the outvecs array holds all the remaining data to write */
775 /* Up to splitvec,split_ofs is to be written immediately. The rest
776 goes into the (now-empty) wbuf */
778 if (splitvec != -1) {
781 remainder = outvecs[splitvec].iov_len - split_ofs;
782 outvecs[splitvec].iov_len = split_ofs;
784 /* We did cross a page boundary, so we write some now */
785 if (jffs2_cleanmarker_oob(c))
786 ret = c->mtd->writev_ecc(c->mtd, outvecs, splitvec+1, outvec_to, &wbuf_retlen, NULL, c->oobinfo);
788 ret = jffs2_flash_direct_writev(c, outvecs, splitvec+1, outvec_to, &wbuf_retlen);
790 if (ret < 0 || wbuf_retlen != PAGE_DIV(totlen)) {
791 /* At this point we have no problem,
792 c->wbuf is empty. However refile nextblock to avoid
793 writing again to same address.
795 struct jffs2_eraseblock *jeb;
797 spin_lock(&c->erase_completion_lock);
799 jeb = &c->blocks[outvec_to / c->sector_size];
800 jffs2_block_refile(c, jeb, REFILE_ANYWAY);
803 spin_unlock(&c->erase_completion_lock);
807 donelen += wbuf_retlen;
808 c->wbuf_ofs = PAGE_DIV(outvec_to) + PAGE_DIV(totlen);
811 outvecs[splitvec].iov_base += split_ofs;
812 outvecs[splitvec].iov_len = remainder;
821 /* Now splitvec points to the start of the bits we have to copy
825 for ( ; splitvec < outvec; splitvec++) {
826 /* Don't copy the wbuf into itself */
827 if (outvecs[splitvec].iov_base == c->wbuf)
829 memcpy(wbuf_ptr, outvecs[splitvec].iov_base, outvecs[splitvec].iov_len);
830 wbuf_ptr += outvecs[splitvec].iov_len;
831 donelen += outvecs[splitvec].iov_len;
833 c->wbuf_len = wbuf_ptr - c->wbuf;
835 /* If there's a remainder in the wbuf and it's a non-GC write,
836 remember that the wbuf affects this ino */
840 if (c->wbuf_len && ino)
841 jffs2_wbuf_dirties_inode(c, ino);
846 up_write(&c->wbuf_sem);
851 * This is the entry for flash write.
852 * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev
854 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf)
858 if (!jffs2_is_writebuffered(c))
859 return c->mtd->write(c->mtd, ofs, len, retlen, buf);
861 vecs[0].iov_base = (unsigned char *) buf;
862 vecs[0].iov_len = len;
863 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0);
867 Handle readback from writebuffer and ECC failure return
869 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf)
871 loff_t orbf = 0, owbf = 0, lwbf = 0;
874 if (!jffs2_is_writebuffered(c))
875 return c->mtd->read(c->mtd, ofs, len, retlen, buf);
878 down_read(&c->wbuf_sem);
879 if (jffs2_cleanmarker_oob(c))
880 ret = c->mtd->read_ecc(c->mtd, ofs, len, retlen, buf, NULL, c->oobinfo);
882 ret = c->mtd->read(c->mtd, ofs, len, retlen, buf);
884 if ( (ret == -EBADMSG) && (*retlen == len) ) {
885 printk(KERN_WARNING "mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n",
888 * We have the raw data without ECC correction in the buffer, maybe
889 * we are lucky and all data or parts are correct. We check the node.
890 * If data are corrupted node check will sort it out.
891 * We keep this block, it will fail on write or erase and the we
892 * mark it bad. Or should we do that now? But we should give him a chance.
893 * Maybe we had a system crash or power loss before the ecc write or
894 * a erase was completed.
895 * So we return success. :)
900 /* if no writebuffer available or write buffer empty, return */
901 if (!c->wbuf_pagesize || !c->wbuf_len)
904 /* if we read in a different block, return */
905 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs))
908 if (ofs >= c->wbuf_ofs) {
909 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */
910 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */
912 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */
916 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */
917 if (orbf > len) /* is write beyond write buffer ? */
919 lwbf = len - orbf; /* number of bytes to copy */
920 if (lwbf > c->wbuf_len)
924 memcpy(buf+orbf,c->wbuf+owbf,lwbf);
927 up_read(&c->wbuf_sem);
932 * Check, if the out of band area is empty
934 int jffs2_check_oob_empty( struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode)
942 /* allocate a buffer for all oob data in this sector */
943 oob_size = c->mtd->oobsize;
945 buf = kmalloc(len, GFP_KERNEL);
947 printk(KERN_NOTICE "jffs2_check_oob_empty(): allocation of temporary data buffer for oob check failed\n");
951 * if mode = 0, we scan for a total empty oob area, else we have
952 * to take care of the cleanmarker in the first page of the block
954 ret = jffs2_flash_read_oob(c, jeb->offset, len , &retlen, buf);
956 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
961 D1(printk(KERN_WARNING "jffs2_check_oob_empty(): Read OOB return short read "
962 "(%zd bytes not %d) for block at %08x\n", retlen, len, jeb->offset));
967 /* Special check for first page */
968 for(i = 0; i < oob_size ; i++) {
969 /* Yeah, we know about the cleanmarker. */
970 if (mode && i >= c->fsdata_pos &&
971 i < c->fsdata_pos + c->fsdata_len)
974 if (buf[i] != 0xFF) {
975 D2(printk(KERN_DEBUG "Found %02x at %x in OOB for %08x\n",
976 buf[page+i], page+i, jeb->offset));
982 /* we know, we are aligned :) */
983 for (page = oob_size; page < len; page += sizeof(long)) {
984 unsigned long dat = *(unsigned long *)(&buf[page]);
998 * Scan for a valid cleanmarker and for bad blocks
999 * For virtual blocks (concatenated physical blocks) check the cleanmarker
1000 * only in the first page of the first physical block, but scan for bad blocks in all
1003 int jffs2_check_nand_cleanmarker (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1005 struct jffs2_unknown_node n;
1006 unsigned char buf[2 * NAND_MAX_OOBSIZE];
1008 int ret, i, cnt, retval = 0;
1009 size_t retlen, offset;
1012 offset = jeb->offset;
1013 oob_size = c->mtd->oobsize;
1015 /* Loop through the physical blocks */
1016 for (cnt = 0; cnt < (c->sector_size / c->mtd->erasesize); cnt++) {
1017 /* Check first if the block is bad. */
1018 if (c->mtd->block_isbad (c->mtd, offset)) {
1019 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Bad block at %08x\n", jeb->offset));
1023 * We read oob data from page 0 and 1 of the block.
1024 * page 0 contains cleanmarker and badblock info
1025 * page 1 contains failure count of this block
1027 ret = c->mtd->read_oob (c->mtd, offset, oob_size << 1, &retlen, buf);
1030 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB failed %d for block at %08x\n", ret, jeb->offset));
1033 if (retlen < (oob_size << 1)) {
1034 D1 (printk (KERN_WARNING "jffs2_check_nand_cleanmarker(): Read OOB return short read (%zd bytes not %d) for block at %08x\n", retlen, oob_size << 1, jeb->offset));
1038 /* Check cleanmarker only on the first physical block */
1040 n.magic = cpu_to_je16 (JFFS2_MAGIC_BITMASK);
1041 n.nodetype = cpu_to_je16 (JFFS2_NODETYPE_CLEANMARKER);
1042 n.totlen = cpu_to_je32 (8);
1043 p = (unsigned char *) &n;
1045 for (i = 0; i < c->fsdata_len; i++) {
1046 if (buf[c->fsdata_pos + i] != p[i]) {
1050 D1(if (retval == 1) {
1051 printk(KERN_WARNING "jffs2_check_nand_cleanmarker(): Cleanmarker node not detected in block at %08x\n", jeb->offset);
1052 printk(KERN_WARNING "OOB at %08x was ", offset);
1053 for (i=0; i < oob_size; i++) {
1054 printk("%02x ", buf[i]);
1059 offset += c->mtd->erasesize;
1064 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
1066 struct jffs2_unknown_node n;
1070 n.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1071 n.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
1072 n.totlen = cpu_to_je32(8);
1074 ret = jffs2_flash_write_oob(c, jeb->offset + c->fsdata_pos, c->fsdata_len, &retlen, (unsigned char *)&n);
1077 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1080 if (retlen != c->fsdata_len) {
1081 D1(printk(KERN_WARNING "jffs2_write_nand_cleanmarker(): Short write for block at %08x: %zd not %d\n", jeb->offset, retlen, c->fsdata_len));
1088 * On NAND we try to mark this block bad. If the block was erased more
1089 * than MAX_ERASE_FAILURES we mark it finaly bad.
1090 * Don't care about failures. This block remains on the erase-pending
1091 * or badblock list as long as nobody manipulates the flash with
1092 * a bootloader or something like that.
1095 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset)
1099 /* if the count is < max, we try to write the counter to the 2nd page oob area */
1100 if( ++jeb->bad_count < MAX_ERASE_FAILURES)
1103 if (!c->mtd->block_markbad)
1104 return 1; // What else can we do?
1106 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Marking bad block at %08x\n", bad_offset));
1107 ret = c->mtd->block_markbad(c->mtd, bad_offset);
1110 D1(printk(KERN_WARNING "jffs2_write_nand_badblock(): Write failed for block at %08x: error %d\n", jeb->offset, ret));
1116 #define NAND_JFFS2_OOB16_FSDALEN 8
1118 static struct nand_oobinfo jffs2_oobinfo_docecc = {
1119 .useecc = MTD_NANDECC_PLACE,
1121 .eccpos = {0,1,2,3,4,5}
1125 static int jffs2_nand_set_oobinfo(struct jffs2_sb_info *c)
1127 struct nand_oobinfo *oinfo = &c->mtd->oobinfo;
1129 /* Do this only, if we have an oob buffer */
1130 if (!c->mtd->oobsize)
1133 /* Cleanmarker is out-of-band, so inline size zero */
1134 c->cleanmarker_size = 0;
1136 /* Should we use autoplacement ? */
1137 if (oinfo && oinfo->useecc == MTD_NANDECC_AUTOPLACE) {
1138 D1(printk(KERN_DEBUG "JFFS2 using autoplace on NAND\n"));
1139 /* Get the position of the free bytes */
1140 if (!oinfo->oobfree[0][1]) {
1141 printk (KERN_WARNING "jffs2_nand_set_oobinfo(): Eeep. Autoplacement selected and no empty space in oob\n");
1144 c->fsdata_pos = oinfo->oobfree[0][0];
1145 c->fsdata_len = oinfo->oobfree[0][1];
1146 if (c->fsdata_len > 8)
1149 /* This is just a legacy fallback and should go away soon */
1150 switch(c->mtd->ecctype) {
1151 case MTD_ECC_RS_DiskOnChip:
1152 printk(KERN_WARNING "JFFS2 using DiskOnChip hardware ECC without autoplacement. Fix it!\n");
1153 c->oobinfo = &jffs2_oobinfo_docecc;
1155 c->fsdata_len = NAND_JFFS2_OOB16_FSDALEN;
1156 c->badblock_pos = 15;
1160 D1(printk(KERN_DEBUG "JFFS2 on NAND. No autoplacment info found\n"));
1167 int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1171 /* Initialise write buffer */
1172 init_rwsem(&c->wbuf_sem);
1173 c->wbuf_pagesize = c->mtd->oobblock;
1174 c->wbuf_ofs = 0xFFFFFFFF;
1176 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1180 res = jffs2_nand_set_oobinfo(c);
1184 brokenbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1189 memset(brokenbuf, 0xdb, c->wbuf_pagesize);
1194 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c)
1199 int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1200 c->cleanmarker_size = 0; /* No cleanmarkers needed */
1202 /* Initialize write buffer */
1203 init_rwsem(&c->wbuf_sem);
1204 c->wbuf_pagesize = c->sector_size;
1205 c->wbuf_ofs = 0xFFFFFFFF;
1207 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1211 printk(KERN_INFO "JFFS2 write-buffering enabled (%i)\n", c->wbuf_pagesize);
1216 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) {
1220 int jffs2_nor_ecc_flash_setup(struct jffs2_sb_info *c) {
1221 /* Cleanmarker is actually larger on the flashes */
1222 c->cleanmarker_size = 16;
1224 /* Initialize write buffer */
1225 init_rwsem(&c->wbuf_sem);
1226 c->wbuf_pagesize = c->mtd->eccsize;
1227 c->wbuf_ofs = 0xFFFFFFFF;
1229 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL);
1236 void jffs2_nor_ecc_flash_cleanup(struct jffs2_sb_info *c) {