2 * Copyright (c) International Business Machines Corp., 2006
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12 * the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
22 * UBI wear-leveling unit.
24 * This unit is responsible for wear-leveling. It works in terms of physical
25 * eraseblocks and erase counters and knows nothing about logical eraseblocks,
26 * volumes, etc. From this unit's perspective all physical eraseblocks are of
27 * two types - used and free. Used physical eraseblocks are those that were
28 * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are
29 * those that were put by the 'ubi_wl_put_peb()' function.
31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32 * header. The rest of the physical eraseblock contains only 0xFF bytes.
34 * When physical eraseblocks are returned to the WL unit by means of the
35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36 * done asynchronously in context of the per-UBI device background thread,
37 * which is also managed by the WL unit.
39 * The wear-leveling is ensured by means of moving the contents of used
40 * physical eraseblocks with low erase counter to free physical eraseblocks
41 * with high erase counter.
43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick
44 * an "optimal" physical eraseblock. For example, when it is known that the
45 * physical eraseblock will be "put" soon because it contains short-term data,
46 * the WL unit may pick a free physical eraseblock with low erase counter, and
49 * If the WL unit fails to erase a physical eraseblock, it marks it as bad.
51 * This unit is also responsible for scrubbing. If a bit-flip is detected in a
52 * physical eraseblock, it has to be moved. Technically this is the same as
53 * moving it for wear-leveling reasons.
55 * As it was said, for the UBI unit all physical eraseblocks are either "free"
56 * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used
57 * eraseblocks are kept in a set of different RB-trees: @wl->used,
58 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub.
60 * Note, in this implementation, we keep a small in-RAM object for each physical
61 * eraseblock. This is surely not a scalable solution. But it appears to be good
62 * enough for moderately large flashes and it is simple. In future, one may
63 * re-work this unit and make it more scalable.
65 * At the moment this unit does not utilize the sequence number, which was
66 * introduced relatively recently. But it would be wise to do this because the
67 * sequence number of a logical eraseblock characterizes how old is it. For
68 * example, when we move a PEB with low erase counter, and we need to pick the
69 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
70 * pick target PEB with an average EC if our PEB is not very "old". This is a
71 * room for future re-works of the WL unit.
73 * FIXME: looks too complex, should be simplified (later).
76 #include <linux/slab.h>
77 #include <linux/crc32.h>
78 #include <linux/freezer.h>
79 #include <linux/kthread.h>
82 /* Number of physical eraseblocks reserved for wear-leveling purposes */
83 #define WL_RESERVED_PEBS 1
86 * How many erase cycles are short term, unknown, and long term physical
87 * eraseblocks protected.
89 #define ST_PROTECTION 16
90 #define U_PROTECTION 10
91 #define LT_PROTECTION 4
94 * Maximum difference between two erase counters. If this threshold is
95 * exceeded, the WL unit starts moving data from used physical eraseblocks with
96 * low erase counter to free physical eraseblocks with high erase counter.
98 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
101 * When a physical eraseblock is moved, the WL unit has to pick the target
102 * physical eraseblock to move to. The simplest way would be just to pick the
103 * one with the highest erase counter. But in certain workloads this could lead
104 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
105 * situation when the picked physical eraseblock is constantly erased after the
106 * data is written to it. So, we have a constant which limits the highest erase
107 * counter of the free physical eraseblock to pick. Namely, the WL unit does
108 * not pick eraseblocks with erase counter greater then the lowest erase
109 * counter plus %WL_FREE_MAX_DIFF.
111 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
114 * Maximum number of consecutive background thread failures which is enough to
115 * switch to read-only mode.
117 #define WL_MAX_FAILURES 32
120 * struct ubi_wl_entry - wear-leveling entry.
121 * @rb: link in the corresponding RB-tree
123 * @pnum: physical eraseblock number
125 * Each physical eraseblock has a corresponding &struct wl_entry object which
126 * may be kept in different RB-trees.
128 struct ubi_wl_entry {
135 * struct ubi_wl_prot_entry - PEB protection entry.
136 * @rb_pnum: link in the @wl->prot.pnum RB-tree
137 * @rb_aec: link in the @wl->prot.aec RB-tree
138 * @abs_ec: the absolute erase counter value when the protection ends
139 * @e: the wear-leveling entry of the physical eraseblock under protection
141 * When the WL unit returns a physical eraseblock, the physical eraseblock is
142 * protected from being moved for some "time". For this reason, the physical
143 * eraseblock is not directly moved from the @wl->free tree to the @wl->used
144 * tree. There is one more tree in between where this physical eraseblock is
145 * temporarily stored (@wl->prot).
147 * All this protection stuff is needed because:
148 * o we don't want to move physical eraseblocks just after we have given them
149 * to the user; instead, we first want to let users fill them up with data;
151 * o there is a chance that the user will put the physical eraseblock very
152 * soon, so it makes sense not to move it for some time, but wait; this is
153 * especially important in case of "short term" physical eraseblocks.
155 * Physical eraseblocks stay protected only for limited time. But the "time" is
156 * measured in erase cycles in this case. This is implemented with help of the
157 * absolute erase counter (@wl->abs_ec). When it reaches certain value, the
158 * physical eraseblocks are moved from the protection trees (@wl->prot.*) to
159 * the @wl->used tree.
161 * Protected physical eraseblocks are searched by physical eraseblock number
162 * (when they are put) and by the absolute erase counter (to check if it is
163 * time to move them to the @wl->used tree). So there are actually 2 RB-trees
164 * storing the protected physical eraseblocks: @wl->prot.pnum and
165 * @wl->prot.aec. They are referred to as the "protection" trees. The
166 * first one is indexed by the physical eraseblock number. The second one is
167 * indexed by the absolute erase counter. Both trees store
168 * &struct ubi_wl_prot_entry objects.
170 * Each physical eraseblock has 2 main states: free and used. The former state
171 * corresponds to the @wl->free tree. The latter state is split up on several
173 * o the WL movement is allowed (@wl->used tree);
174 * o the WL movement is temporarily prohibited (@wl->prot.pnum and
175 * @wl->prot.aec trees);
176 * o scrubbing is needed (@wl->scrub tree).
178 * Depending on the sub-state, wear-leveling entries of the used physical
179 * eraseblocks may be kept in one of those trees.
181 struct ubi_wl_prot_entry {
182 struct rb_node rb_pnum;
183 struct rb_node rb_aec;
184 unsigned long long abs_ec;
185 struct ubi_wl_entry *e;
189 * struct ubi_work - UBI work description data structure.
190 * @list: a link in the list of pending works
191 * @func: worker function
192 * @priv: private data of the worker function
194 * @e: physical eraseblock to erase
195 * @torture: if the physical eraseblock has to be tortured
197 * The @func pointer points to the worker function. If the @cancel argument is
198 * not zero, the worker has to free the resources and exit immediately. The
199 * worker has to return zero in case of success and a negative error code in
203 struct list_head list;
204 int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
205 /* The below fields are only relevant to erasure works */
206 struct ubi_wl_entry *e;
210 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
211 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
212 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
213 struct rb_root *root);
215 #define paranoid_check_ec(ubi, pnum, ec) 0
216 #define paranoid_check_in_wl_tree(e, root)
219 /* Slab cache for wear-leveling entries */
220 static struct kmem_cache *wl_entries_slab;
223 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
224 * @e: the wear-leveling entry to add
225 * @root: the root of the tree
227 * Note, we use (erase counter, physical eraseblock number) pairs as keys in
228 * the @ubi->used and @ubi->free RB-trees.
230 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
232 struct rb_node **p, *parent = NULL;
236 struct ubi_wl_entry *e1;
239 e1 = rb_entry(parent, struct ubi_wl_entry, rb);
243 else if (e->ec > e1->ec)
246 ubi_assert(e->pnum != e1->pnum);
247 if (e->pnum < e1->pnum)
254 rb_link_node(&e->rb, parent, p);
255 rb_insert_color(&e->rb, root);
259 * do_work - do one pending work.
260 * @ubi: UBI device description object
262 * This function returns zero in case of success and a negative error code in
265 static int do_work(struct ubi_device *ubi)
268 struct ubi_work *wrk;
270 spin_lock(&ubi->wl_lock);
272 if (list_empty(&ubi->works)) {
273 spin_unlock(&ubi->wl_lock);
277 wrk = list_entry(ubi->works.next, struct ubi_work, list);
278 list_del(&wrk->list);
279 spin_unlock(&ubi->wl_lock);
282 * Call the worker function. Do not touch the work structure
283 * after this call as it will have been freed or reused by that
284 * time by the worker function.
286 err = wrk->func(ubi, wrk, 0);
288 ubi_err("work failed with error code %d", err);
290 spin_lock(&ubi->wl_lock);
291 ubi->works_count -= 1;
292 ubi_assert(ubi->works_count >= 0);
293 spin_unlock(&ubi->wl_lock);
298 * produce_free_peb - produce a free physical eraseblock.
299 * @ubi: UBI device description object
301 * This function tries to make a free PEB by means of synchronous execution of
302 * pending works. This may be needed if, for example the background thread is
303 * disabled. Returns zero in case of success and a negative error code in case
306 static int produce_free_peb(struct ubi_device *ubi)
310 spin_lock(&ubi->wl_lock);
311 while (!ubi->free.rb_node) {
312 spin_unlock(&ubi->wl_lock);
314 dbg_wl("do one work synchronously");
319 spin_lock(&ubi->wl_lock);
321 spin_unlock(&ubi->wl_lock);
327 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
328 * @e: the wear-leveling entry to check
329 * @root: the root of the tree
331 * This function returns non-zero if @e is in the @root RB-tree and zero if it
334 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
340 struct ubi_wl_entry *e1;
342 e1 = rb_entry(p, struct ubi_wl_entry, rb);
344 if (e->pnum == e1->pnum) {
351 else if (e->ec > e1->ec)
354 ubi_assert(e->pnum != e1->pnum);
355 if (e->pnum < e1->pnum)
366 * prot_tree_add - add physical eraseblock to protection trees.
367 * @ubi: UBI device description object
368 * @e: the physical eraseblock to add
369 * @pe: protection entry object to use
370 * @abs_ec: absolute erase counter value when this physical eraseblock has
371 * to be removed from the protection trees.
373 * @wl->lock has to be locked.
375 static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e,
376 struct ubi_wl_prot_entry *pe, int abs_ec)
378 struct rb_node **p, *parent = NULL;
379 struct ubi_wl_prot_entry *pe1;
382 pe->abs_ec = ubi->abs_ec + abs_ec;
384 p = &ubi->prot.pnum.rb_node;
387 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
389 if (e->pnum < pe1->e->pnum)
394 rb_link_node(&pe->rb_pnum, parent, p);
395 rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
397 p = &ubi->prot.aec.rb_node;
401 pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
403 if (pe->abs_ec < pe1->abs_ec)
408 rb_link_node(&pe->rb_aec, parent, p);
409 rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
413 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
414 * @root: the RB-tree where to look for
415 * @max: highest possible erase counter
417 * This function looks for a wear leveling entry with erase counter closest to
418 * @max and less then @max.
420 static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
423 struct ubi_wl_entry *e;
425 e = rb_entry(rb_first(root), struct ubi_wl_entry, rb);
430 struct ubi_wl_entry *e1;
432 e1 = rb_entry(p, struct ubi_wl_entry, rb);
445 * ubi_wl_get_peb - get a physical eraseblock.
446 * @ubi: UBI device description object
447 * @dtype: type of data which will be stored in this physical eraseblock
449 * This function returns a physical eraseblock in case of success and a
450 * negative error code in case of failure. Might sleep.
452 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
454 int err, protect, medium_ec;
455 struct ubi_wl_entry *e, *first, *last;
456 struct ubi_wl_prot_entry *pe;
458 ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
459 dtype == UBI_UNKNOWN);
461 pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
466 spin_lock(&ubi->wl_lock);
467 if (!ubi->free.rb_node) {
468 if (ubi->works_count == 0) {
469 ubi_assert(list_empty(&ubi->works));
470 ubi_err("no free eraseblocks");
471 spin_unlock(&ubi->wl_lock);
475 spin_unlock(&ubi->wl_lock);
477 err = produce_free_peb(ubi);
488 * For long term data we pick a physical eraseblock
489 * with high erase counter. But the highest erase
490 * counter we can pick is bounded by the the lowest
491 * erase counter plus %WL_FREE_MAX_DIFF.
493 e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
494 protect = LT_PROTECTION;
498 * For unknown data we pick a physical eraseblock with
499 * medium erase counter. But we by no means can pick a
500 * physical eraseblock with erase counter greater or
501 * equivalent than the lowest erase counter plus
504 first = rb_entry(rb_first(&ubi->free),
505 struct ubi_wl_entry, rb);
506 last = rb_entry(rb_last(&ubi->free),
507 struct ubi_wl_entry, rb);
509 if (last->ec - first->ec < WL_FREE_MAX_DIFF)
510 e = rb_entry(ubi->free.rb_node,
511 struct ubi_wl_entry, rb);
513 medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
514 e = find_wl_entry(&ubi->free, medium_ec);
516 protect = U_PROTECTION;
520 * For short term data we pick a physical eraseblock
521 * with the lowest erase counter as we expect it will
524 e = rb_entry(rb_first(&ubi->free),
525 struct ubi_wl_entry, rb);
526 protect = ST_PROTECTION;
535 * Move the physical eraseblock to the protection trees where it will
536 * be protected from being moved for some time.
538 paranoid_check_in_wl_tree(e, &ubi->free);
539 rb_erase(&e->rb, &ubi->free);
540 prot_tree_add(ubi, e, pe, protect);
542 dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
543 spin_unlock(&ubi->wl_lock);
549 * prot_tree_del - remove a physical eraseblock from the protection trees
550 * @ubi: UBI device description object
551 * @pnum: the physical eraseblock to remove
553 static void prot_tree_del(struct ubi_device *ubi, int pnum)
556 struct ubi_wl_prot_entry *pe = NULL;
558 p = ubi->prot.pnum.rb_node;
561 pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
563 if (pnum == pe->e->pnum)
566 if (pnum < pe->e->pnum)
572 ubi_assert(pe->e->pnum == pnum);
573 rb_erase(&pe->rb_aec, &ubi->prot.aec);
574 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
579 * sync_erase - synchronously erase a physical eraseblock.
580 * @ubi: UBI device description object
581 * @e: the the physical eraseblock to erase
582 * @torture: if the physical eraseblock has to be tortured
584 * This function returns zero in case of success and a negative error code in
587 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
590 struct ubi_ec_hdr *ec_hdr;
591 unsigned long long ec = e->ec;
593 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
595 err = paranoid_check_ec(ubi, e->pnum, e->ec);
599 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
603 err = ubi_io_sync_erase(ubi, e->pnum, torture);
608 if (ec > UBI_MAX_ERASECOUNTER) {
610 * Erase counter overflow. Upgrade UBI and use 64-bit
611 * erase counters internally.
613 ubi_err("erase counter overflow at PEB %d, EC %llu",
619 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
621 ec_hdr->ec = cpu_to_be64(ec);
623 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
628 spin_lock(&ubi->wl_lock);
629 if (e->ec > ubi->max_ec)
631 spin_unlock(&ubi->wl_lock);
639 * check_protection_over - check if it is time to stop protecting some
640 * physical eraseblocks.
641 * @ubi: UBI device description object
643 * This function is called after each erase operation, when the absolute erase
644 * counter is incremented, to check if some physical eraseblock have not to be
645 * protected any longer. These physical eraseblocks are moved from the
646 * protection trees to the used tree.
648 static void check_protection_over(struct ubi_device *ubi)
650 struct ubi_wl_prot_entry *pe;
653 * There may be several protected physical eraseblock to remove,
657 spin_lock(&ubi->wl_lock);
658 if (!ubi->prot.aec.rb_node) {
659 spin_unlock(&ubi->wl_lock);
663 pe = rb_entry(rb_first(&ubi->prot.aec),
664 struct ubi_wl_prot_entry, rb_aec);
666 if (pe->abs_ec > ubi->abs_ec) {
667 spin_unlock(&ubi->wl_lock);
671 dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
672 pe->e->pnum, ubi->abs_ec, pe->abs_ec);
673 rb_erase(&pe->rb_aec, &ubi->prot.aec);
674 rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
675 wl_tree_add(pe->e, &ubi->used);
676 spin_unlock(&ubi->wl_lock);
684 * schedule_ubi_work - schedule a work.
685 * @ubi: UBI device description object
686 * @wrk: the work to schedule
688 * This function enqueues a work defined by @wrk to the tail of the pending
691 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
693 spin_lock(&ubi->wl_lock);
694 list_add_tail(&wrk->list, &ubi->works);
695 ubi_assert(ubi->works_count >= 0);
696 ubi->works_count += 1;
697 if (ubi->thread_enabled)
698 wake_up_process(ubi->bgt_thread);
699 spin_unlock(&ubi->wl_lock);
702 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
706 * schedule_erase - schedule an erase work.
707 * @ubi: UBI device description object
708 * @e: the WL entry of the physical eraseblock to erase
709 * @torture: if the physical eraseblock has to be tortured
711 * This function returns zero in case of success and a %-ENOMEM in case of
714 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
717 struct ubi_work *wl_wrk;
719 dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
720 e->pnum, e->ec, torture);
722 wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
726 wl_wrk->func = &erase_worker;
728 wl_wrk->torture = torture;
730 schedule_ubi_work(ubi, wl_wrk);
735 * wear_leveling_worker - wear-leveling worker function.
736 * @ubi: UBI device description object
737 * @wrk: the work object
738 * @cancel: non-zero if the worker has to free memory and exit
740 * This function copies a more worn out physical eraseblock to a less worn out
741 * one. Returns zero in case of success and a negative error code in case of
744 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
748 struct ubi_wl_entry *e1, *e2;
749 struct ubi_vid_hdr *vid_hdr;
756 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
760 spin_lock(&ubi->wl_lock);
763 * Only one WL worker at a time is supported at this implementation, so
764 * make sure a PEB is not being moved already.
766 if (ubi->move_to || !ubi->free.rb_node ||
767 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
769 * Only one WL worker at a time is supported at this
770 * implementation, so if a LEB is already being moved, cancel.
772 * No free physical eraseblocks? Well, we cancel wear-leveling
773 * then. It will be triggered again when a free physical
774 * eraseblock appears.
776 * No used physical eraseblocks? They must be temporarily
777 * protected from being moved. They will be moved to the
778 * @ubi->used tree later and the wear-leveling will be
781 dbg_wl("cancel WL, a list is empty: free %d, used %d",
782 !ubi->free.rb_node, !ubi->used.rb_node);
783 ubi->wl_scheduled = 0;
784 spin_unlock(&ubi->wl_lock);
785 ubi_free_vid_hdr(ubi, vid_hdr);
789 if (!ubi->scrub.rb_node) {
791 * Now pick the least worn-out used physical eraseblock and a
792 * highly worn-out free physical eraseblock. If the erase
793 * counters differ much enough, start wear-leveling.
795 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
796 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
798 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
799 dbg_wl("no WL needed: min used EC %d, max free EC %d",
801 ubi->wl_scheduled = 0;
802 spin_unlock(&ubi->wl_lock);
803 ubi_free_vid_hdr(ubi, vid_hdr);
806 paranoid_check_in_wl_tree(e1, &ubi->used);
807 rb_erase(&e1->rb, &ubi->used);
808 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
809 e1->pnum, e1->ec, e2->pnum, e2->ec);
811 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
812 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
813 paranoid_check_in_wl_tree(e1, &ubi->scrub);
814 rb_erase(&e1->rb, &ubi->scrub);
815 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
818 paranoid_check_in_wl_tree(e2, &ubi->free);
819 rb_erase(&e2->rb, &ubi->free);
820 ubi_assert(!ubi->move_from && !ubi->move_to);
821 ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
824 spin_unlock(&ubi->wl_lock);
827 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
828 * We so far do not know which logical eraseblock our physical
829 * eraseblock (@e1) belongs to. We have to read the volume identifier
833 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
834 if (err && err != UBI_IO_BITFLIPS) {
835 if (err == UBI_IO_PEB_FREE) {
837 * We are trying to move PEB without a VID header. UBI
838 * always write VID headers shortly after the PEB was
839 * given, so we have a situation when it did not have
840 * chance to write it down because it was preempted.
841 * Just re-schedule the work, so that next time it will
842 * likely have the VID header in place.
844 dbg_wl("PEB %d has no VID header", e1->pnum);
847 ubi_err("error %d while reading VID header from PEB %d",
855 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
857 if (err == UBI_IO_BITFLIPS)
862 ubi_free_vid_hdr(ubi, vid_hdr);
863 spin_lock(&ubi->wl_lock);
864 if (!ubi->move_to_put)
865 wl_tree_add(e2, &ubi->used);
868 ubi->move_from = ubi->move_to = NULL;
869 ubi->move_from_put = ubi->move_to_put = 0;
870 ubi->wl_scheduled = 0;
871 spin_unlock(&ubi->wl_lock);
875 * Well, the target PEB was put meanwhile, schedule it for
878 dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
879 err = schedule_erase(ubi, e2, 0);
881 kmem_cache_free(wl_entries_slab, e2);
886 err = schedule_erase(ubi, e1, 0);
888 kmem_cache_free(wl_entries_slab, e1);
896 * Some error occurred. @e1 was not changed, so return it back. @e2
897 * might be changed, schedule it for erasure.
901 dbg_wl("error %d occurred, cancel operation", err);
902 ubi_assert(err <= 0);
904 ubi_free_vid_hdr(ubi, vid_hdr);
905 spin_lock(&ubi->wl_lock);
906 ubi->wl_scheduled = 0;
907 if (ubi->move_from_put)
910 wl_tree_add(e1, &ubi->used);
911 ubi->move_from = ubi->move_to = NULL;
912 ubi->move_from_put = ubi->move_to_put = 0;
913 spin_unlock(&ubi->wl_lock);
917 * Well, the target PEB was put meanwhile, schedule it for
920 dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
921 err = schedule_erase(ubi, e1, 0);
923 kmem_cache_free(wl_entries_slab, e1);
928 err = schedule_erase(ubi, e2, 0);
930 kmem_cache_free(wl_entries_slab, e2);
939 * ensure_wear_leveling - schedule wear-leveling if it is needed.
940 * @ubi: UBI device description object
942 * This function checks if it is time to start wear-leveling and schedules it
943 * if yes. This function returns zero in case of success and a negative error
944 * code in case of failure.
946 static int ensure_wear_leveling(struct ubi_device *ubi)
949 struct ubi_wl_entry *e1;
950 struct ubi_wl_entry *e2;
951 struct ubi_work *wrk;
953 spin_lock(&ubi->wl_lock);
954 if (ubi->wl_scheduled)
955 /* Wear-leveling is already in the work queue */
959 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
960 * the WL worker has to be scheduled anyway.
962 if (!ubi->scrub.rb_node) {
963 if (!ubi->used.rb_node || !ubi->free.rb_node)
964 /* No physical eraseblocks - no deal */
968 * We schedule wear-leveling only if the difference between the
969 * lowest erase counter of used physical eraseblocks and a high
970 * erase counter of free physical eraseblocks is greater then
973 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb);
974 e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
976 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
978 dbg_wl("schedule wear-leveling");
980 dbg_wl("schedule scrubbing");
982 ubi->wl_scheduled = 1;
983 spin_unlock(&ubi->wl_lock);
985 wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
991 wrk->func = &wear_leveling_worker;
992 schedule_ubi_work(ubi, wrk);
996 spin_lock(&ubi->wl_lock);
997 ubi->wl_scheduled = 0;
999 spin_unlock(&ubi->wl_lock);
1004 * erase_worker - physical eraseblock erase worker function.
1005 * @ubi: UBI device description object
1006 * @wl_wrk: the work object
1007 * @cancel: non-zero if the worker has to free memory and exit
1009 * This function erases a physical eraseblock and perform torture testing if
1010 * needed. It also takes care about marking the physical eraseblock bad if
1011 * needed. Returns zero in case of success and a negative error code in case of
1014 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1017 struct ubi_wl_entry *e = wl_wrk->e;
1018 int pnum = e->pnum, err, need;
1021 dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
1023 kmem_cache_free(wl_entries_slab, e);
1027 dbg_wl("erase PEB %d EC %d", pnum, e->ec);
1029 err = sync_erase(ubi, e, wl_wrk->torture);
1031 /* Fine, we've erased it successfully */
1034 spin_lock(&ubi->wl_lock);
1036 wl_tree_add(e, &ubi->free);
1037 spin_unlock(&ubi->wl_lock);
1040 * One more erase operation has happened, take care about protected
1041 * physical eraseblocks.
1043 check_protection_over(ubi);
1045 /* And take care about wear-leveling */
1046 err = ensure_wear_leveling(ubi);
1050 ubi_err("failed to erase PEB %d, error %d", pnum, err);
1052 kmem_cache_free(wl_entries_slab, e);
1054 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1058 /* Re-schedule the LEB for erasure */
1059 err1 = schedule_erase(ubi, e, 0);
1065 } else if (err != -EIO) {
1067 * If this is not %-EIO, we have no idea what to do. Scheduling
1068 * this physical eraseblock for erasure again would cause
1069 * errors again and again. Well, lets switch to RO mode.
1074 /* It is %-EIO, the PEB went bad */
1076 if (!ubi->bad_allowed) {
1077 ubi_err("bad physical eraseblock %d detected", pnum);
1081 spin_lock(&ubi->volumes_lock);
1082 need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1;
1084 need = ubi->avail_pebs >= need ? need : ubi->avail_pebs;
1085 ubi->avail_pebs -= need;
1086 ubi->rsvd_pebs += need;
1087 ubi->beb_rsvd_pebs += need;
1089 ubi_msg("reserve more %d PEBs", need);
1092 if (ubi->beb_rsvd_pebs == 0) {
1093 spin_unlock(&ubi->volumes_lock);
1094 ubi_err("no reserved physical eraseblocks");
1098 spin_unlock(&ubi->volumes_lock);
1099 ubi_msg("mark PEB %d as bad", pnum);
1101 err = ubi_io_mark_bad(ubi, pnum);
1105 spin_lock(&ubi->volumes_lock);
1106 ubi->beb_rsvd_pebs -= 1;
1107 ubi->bad_peb_count += 1;
1108 ubi->good_peb_count -= 1;
1109 ubi_calculate_reserved(ubi);
1110 if (ubi->beb_rsvd_pebs == 0)
1111 ubi_warn("last PEB from the reserved pool was used");
1112 spin_unlock(&ubi->volumes_lock);
1122 * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling
1124 * @ubi: UBI device description object
1125 * @pnum: physical eraseblock to return
1126 * @torture: if this physical eraseblock has to be tortured
1128 * This function is called to return physical eraseblock @pnum to the pool of
1129 * free physical eraseblocks. The @torture flag has to be set if an I/O error
1130 * occurred to this @pnum and it has to be tested. This function returns zero
1131 * in case of success and a negative error code in case of failure.
1133 int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
1136 struct ubi_wl_entry *e;
1138 dbg_wl("PEB %d", pnum);
1139 ubi_assert(pnum >= 0);
1140 ubi_assert(pnum < ubi->peb_count);
1142 spin_lock(&ubi->wl_lock);
1144 e = ubi->lookuptbl[pnum];
1145 if (e == ubi->move_from) {
1147 * User is putting the physical eraseblock which was selected to
1148 * be moved. It will be scheduled for erasure in the
1149 * wear-leveling worker.
1151 dbg_wl("PEB %d is being moved", pnum);
1152 ubi_assert(!ubi->move_from_put);
1153 ubi->move_from_put = 1;
1154 spin_unlock(&ubi->wl_lock);
1156 } else if (e == ubi->move_to) {
1158 * User is putting the physical eraseblock which was selected
1159 * as the target the data is moved to. It may happen if the EBA
1160 * unit already re-mapped the LEB but the WL unit did has not
1161 * put the PEB to the "used" tree.
1163 dbg_wl("PEB %d is the target of data moving", pnum);
1164 ubi_assert(!ubi->move_to_put);
1165 ubi->move_to_put = 1;
1166 spin_unlock(&ubi->wl_lock);
1169 if (in_wl_tree(e, &ubi->used)) {
1170 paranoid_check_in_wl_tree(e, &ubi->used);
1171 rb_erase(&e->rb, &ubi->used);
1172 } else if (in_wl_tree(e, &ubi->scrub)) {
1173 paranoid_check_in_wl_tree(e, &ubi->scrub);
1174 rb_erase(&e->rb, &ubi->scrub);
1176 prot_tree_del(ubi, e->pnum);
1178 spin_unlock(&ubi->wl_lock);
1180 err = schedule_erase(ubi, e, torture);
1182 spin_lock(&ubi->wl_lock);
1183 wl_tree_add(e, &ubi->used);
1184 spin_unlock(&ubi->wl_lock);
1191 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1192 * @ubi: UBI device description object
1193 * @pnum: the physical eraseblock to schedule
1195 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1196 * needs scrubbing. This function schedules a physical eraseblock for
1197 * scrubbing which is done in background. This function returns zero in case of
1198 * success and a negative error code in case of failure.
1200 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1202 struct ubi_wl_entry *e;
1204 ubi_msg("schedule PEB %d for scrubbing", pnum);
1207 spin_lock(&ubi->wl_lock);
1208 e = ubi->lookuptbl[pnum];
1209 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub)) {
1210 spin_unlock(&ubi->wl_lock);
1214 if (e == ubi->move_to) {
1216 * This physical eraseblock was used to move data to. The data
1217 * was moved but the PEB was not yet inserted to the proper
1218 * tree. We should just wait a little and let the WL worker
1221 spin_unlock(&ubi->wl_lock);
1222 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1227 if (in_wl_tree(e, &ubi->used)) {
1228 paranoid_check_in_wl_tree(e, &ubi->used);
1229 rb_erase(&e->rb, &ubi->used);
1231 prot_tree_del(ubi, pnum);
1233 wl_tree_add(e, &ubi->scrub);
1234 spin_unlock(&ubi->wl_lock);
1237 * Technically scrubbing is the same as wear-leveling, so it is done
1240 return ensure_wear_leveling(ubi);
1244 * ubi_wl_flush - flush all pending works.
1245 * @ubi: UBI device description object
1247 * This function returns zero in case of success and a negative error code in
1250 int ubi_wl_flush(struct ubi_device *ubi)
1252 int err, pending_count;
1254 pending_count = ubi->works_count;
1256 dbg_wl("flush (%d pending works)", pending_count);
1259 * Erase while the pending works queue is not empty, but not more then
1260 * the number of currently pending works.
1262 while (pending_count-- > 0) {
1272 * tree_destroy - destroy an RB-tree.
1273 * @root: the root of the tree to destroy
1275 static void tree_destroy(struct rb_root *root)
1278 struct ubi_wl_entry *e;
1284 else if (rb->rb_right)
1287 e = rb_entry(rb, struct ubi_wl_entry, rb);
1291 if (rb->rb_left == &e->rb)
1294 rb->rb_right = NULL;
1297 kmem_cache_free(wl_entries_slab, e);
1303 * ubi_thread - UBI background thread.
1304 * @u: the UBI device description object pointer
1306 static int ubi_thread(void *u)
1309 struct ubi_device *ubi = u;
1311 ubi_msg("background thread \"%s\" started, PID %d",
1312 ubi->bgt_name, current->pid);
1318 if (kthread_should_stop())
1321 if (try_to_freeze())
1324 spin_lock(&ubi->wl_lock);
1325 if (list_empty(&ubi->works) || ubi->ro_mode ||
1326 !ubi->thread_enabled) {
1327 set_current_state(TASK_INTERRUPTIBLE);
1328 spin_unlock(&ubi->wl_lock);
1332 spin_unlock(&ubi->wl_lock);
1336 ubi_err("%s: work failed with error code %d",
1337 ubi->bgt_name, err);
1338 if (failures++ > WL_MAX_FAILURES) {
1340 * Too many failures, disable the thread and
1341 * switch to read-only mode.
1343 ubi_msg("%s: %d consecutive failures",
1344 ubi->bgt_name, WL_MAX_FAILURES);
1355 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1360 * cancel_pending - cancel all pending works.
1361 * @ubi: UBI device description object
1363 static void cancel_pending(struct ubi_device *ubi)
1365 while (!list_empty(&ubi->works)) {
1366 struct ubi_work *wrk;
1368 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1369 list_del(&wrk->list);
1370 wrk->func(ubi, wrk, 1);
1371 ubi->works_count -= 1;
1372 ubi_assert(ubi->works_count >= 0);
1377 * ubi_wl_init_scan - initialize the wear-leveling unit using scanning
1379 * @ubi: UBI device description object
1380 * @si: scanning information
1382 * This function returns zero in case of success, and a negative error code in
1385 int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1388 struct rb_node *rb1, *rb2;
1389 struct ubi_scan_volume *sv;
1390 struct ubi_scan_leb *seb, *tmp;
1391 struct ubi_wl_entry *e;
1394 ubi->used = ubi->free = ubi->scrub = RB_ROOT;
1395 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1396 spin_lock_init(&ubi->wl_lock);
1397 ubi->max_ec = si->max_ec;
1398 INIT_LIST_HEAD(&ubi->works);
1400 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1402 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
1403 if (IS_ERR(ubi->bgt_thread)) {
1404 err = PTR_ERR(ubi->bgt_thread);
1405 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
1410 if (ubi_devices_cnt == 0) {
1411 wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
1412 sizeof(struct ubi_wl_entry),
1414 if (!wl_entries_slab)
1419 ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
1420 if (!ubi->lookuptbl)
1423 list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
1426 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
1430 e->pnum = seb->pnum;
1432 ubi->lookuptbl[e->pnum] = e;
1433 if (schedule_erase(ubi, e, 0)) {
1434 kmem_cache_free(wl_entries_slab, e);
1439 list_for_each_entry(seb, &si->free, u.list) {
1442 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
1446 e->pnum = seb->pnum;
1448 ubi_assert(e->ec >= 0);
1449 wl_tree_add(e, &ubi->free);
1450 ubi->lookuptbl[e->pnum] = e;
1453 list_for_each_entry(seb, &si->corr, u.list) {
1456 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
1460 e->pnum = seb->pnum;
1462 ubi->lookuptbl[e->pnum] = e;
1463 if (schedule_erase(ubi, e, 0)) {
1464 kmem_cache_free(wl_entries_slab, e);
1469 ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
1470 ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
1473 e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
1477 e->pnum = seb->pnum;
1479 ubi->lookuptbl[e->pnum] = e;
1481 dbg_wl("add PEB %d EC %d to the used tree",
1483 wl_tree_add(e, &ubi->used);
1485 dbg_wl("add PEB %d EC %d to the scrub tree",
1487 wl_tree_add(e, &ubi->scrub);
1492 if (ubi->avail_pebs < WL_RESERVED_PEBS) {
1493 ubi_err("no enough physical eraseblocks (%d, need %d)",
1494 ubi->avail_pebs, WL_RESERVED_PEBS);
1497 ubi->avail_pebs -= WL_RESERVED_PEBS;
1498 ubi->rsvd_pebs += WL_RESERVED_PEBS;
1500 /* Schedule wear-leveling if needed */
1501 err = ensure_wear_leveling(ubi);
1508 cancel_pending(ubi);
1509 tree_destroy(&ubi->used);
1510 tree_destroy(&ubi->free);
1511 tree_destroy(&ubi->scrub);
1512 kfree(ubi->lookuptbl);
1513 if (ubi_devices_cnt == 0)
1514 kmem_cache_destroy(wl_entries_slab);
1519 * protection_trees_destroy - destroy the protection RB-trees.
1520 * @ubi: UBI device description object
1522 static void protection_trees_destroy(struct ubi_device *ubi)
1525 struct ubi_wl_prot_entry *pe;
1527 rb = ubi->prot.aec.rb_node;
1531 else if (rb->rb_right)
1534 pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
1538 if (rb->rb_left == &pe->rb_aec)
1541 rb->rb_right = NULL;
1544 kmem_cache_free(wl_entries_slab, pe->e);
1551 * ubi_wl_close - close the wear-leveling unit.
1552 * @ubi: UBI device description object
1554 void ubi_wl_close(struct ubi_device *ubi)
1556 dbg_wl("disable \"%s\"", ubi->bgt_name);
1557 if (ubi->bgt_thread)
1558 kthread_stop(ubi->bgt_thread);
1560 dbg_wl("close the UBI wear-leveling unit");
1562 cancel_pending(ubi);
1563 protection_trees_destroy(ubi);
1564 tree_destroy(&ubi->used);
1565 tree_destroy(&ubi->free);
1566 tree_destroy(&ubi->scrub);
1567 kfree(ubi->lookuptbl);
1568 if (ubi_devices_cnt == 1)
1569 kmem_cache_destroy(wl_entries_slab);
1572 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
1575 * paranoid_check_ec - make sure that the erase counter of a physical eraseblock
1577 * @ubi: UBI device description object
1578 * @pnum: the physical eraseblock number to check
1579 * @ec: the erase counter to check
1581 * This function returns zero if the erase counter of physical eraseblock @pnum
1582 * is equivalent to @ec, %1 if not, and a negative error code if an error
1585 static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
1589 struct ubi_ec_hdr *ec_hdr;
1591 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1595 err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1596 if (err && err != UBI_IO_BITFLIPS) {
1597 /* The header does not have to exist */
1602 read_ec = be64_to_cpu(ec_hdr->ec);
1603 if (ec != read_ec) {
1604 ubi_err("paranoid check failed for PEB %d", pnum);
1605 ubi_err("read EC is %lld, should be %d", read_ec, ec);
1606 ubi_dbg_dump_stack();
1617 * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present
1619 * @e: the wear-leveling entry to check
1620 * @root: the root of the tree
1622 * This function returns zero if @e is in the @root RB-tree and %1 if it
1625 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
1626 struct rb_root *root)
1628 if (in_wl_tree(e, root))
1631 ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
1632 e->pnum, e->ec, root);
1633 ubi_dbg_dump_stack();
1637 #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */