avr32: Remove include/asm-avr32/arch-at32ap
[linux-2.6] / drivers / mtd / mtdoops.c
1 /*
2  * MTD Oops/Panic logger
3  *
4  * Copyright (C) 2007 Nokia Corporation. All rights reserved.
5  *
6  * Author: Richard Purdie <rpurdie@openedhand.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/console.h>
27 #include <linux/vmalloc.h>
28 #include <linux/workqueue.h>
29 #include <linux/sched.h>
30 #include <linux/wait.h>
31 #include <linux/delay.h>
32 #include <linux/spinlock.h>
33 #include <linux/interrupt.h>
34 #include <linux/mtd/mtd.h>
35
36 #define OOPS_PAGE_SIZE 4096
37
38 static struct mtdoops_context {
39         int mtd_index;
40         struct work_struct work_erase;
41         struct work_struct work_write;
42         struct mtd_info *mtd;
43         int oops_pages;
44         int nextpage;
45         int nextcount;
46
47         void *oops_buf;
48
49         /* writecount and disabling ready are spin lock protected */
50         spinlock_t writecount_lock;
51         int ready;
52         int writecount;
53 } oops_cxt;
54
55 static void mtdoops_erase_callback(struct erase_info *done)
56 {
57         wait_queue_head_t *wait_q = (wait_queue_head_t *)done->priv;
58         wake_up(wait_q);
59 }
60
61 static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
62 {
63         struct erase_info erase;
64         DECLARE_WAITQUEUE(wait, current);
65         wait_queue_head_t wait_q;
66         int ret;
67
68         init_waitqueue_head(&wait_q);
69         erase.mtd = mtd;
70         erase.callback = mtdoops_erase_callback;
71         erase.addr = offset;
72         erase.len = mtd->erasesize;
73         erase.priv = (u_long)&wait_q;
74
75         set_current_state(TASK_INTERRUPTIBLE);
76         add_wait_queue(&wait_q, &wait);
77
78         ret = mtd->erase(mtd, &erase);
79         if (ret) {
80                 set_current_state(TASK_RUNNING);
81                 remove_wait_queue(&wait_q, &wait);
82                 printk (KERN_WARNING "mtdoops: erase of region [0x%x, 0x%x] "
83                                      "on \"%s\" failed\n",
84                         erase.addr, erase.len, mtd->name);
85                 return ret;
86         }
87
88         schedule();  /* Wait for erase to finish. */
89         remove_wait_queue(&wait_q, &wait);
90
91         return 0;
92 }
93
94 static void mtdoops_inc_counter(struct mtdoops_context *cxt)
95 {
96         struct mtd_info *mtd = cxt->mtd;
97         size_t retlen;
98         u32 count;
99         int ret;
100
101         cxt->nextpage++;
102         if (cxt->nextpage > cxt->oops_pages)
103                 cxt->nextpage = 0;
104         cxt->nextcount++;
105         if (cxt->nextcount == 0xffffffff)
106                 cxt->nextcount = 0;
107
108         ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
109                         &retlen, (u_char *) &count);
110         if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
111                 printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
112                                 ", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
113                                 retlen, ret);
114                 schedule_work(&cxt->work_erase);
115                 return;
116         }
117
118         /* See if we need to erase the next block */
119         if (count != 0xffffffff) {
120                 schedule_work(&cxt->work_erase);
121                 return;
122         }
123
124         printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
125                         cxt->nextpage, cxt->nextcount);
126         cxt->ready = 1;
127 }
128
129 /* Scheduled work - when we can't proceed without erasing a block */
130 static void mtdoops_workfunc_erase(struct work_struct *work)
131 {
132         struct mtdoops_context *cxt =
133                         container_of(work, struct mtdoops_context, work_erase);
134         struct mtd_info *mtd = cxt->mtd;
135         int i = 0, j, ret, mod;
136
137         /* We were unregistered */
138         if (!mtd)
139                 return;
140
141         mod = (cxt->nextpage * OOPS_PAGE_SIZE) % mtd->erasesize;
142         if (mod != 0) {
143                 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / OOPS_PAGE_SIZE);
144                 if (cxt->nextpage > cxt->oops_pages)
145                         cxt->nextpage = 0;
146         }
147
148         while (mtd->block_isbad) {
149                 ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
150                 if (!ret)
151                         break;
152                 if (ret < 0) {
153                         printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
154                         return;
155                 }
156 badblock:
157                 printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
158                                 cxt->nextpage * OOPS_PAGE_SIZE);
159                 i++;
160                 cxt->nextpage = cxt->nextpage + (mtd->erasesize / OOPS_PAGE_SIZE);
161                 if (cxt->nextpage > cxt->oops_pages)
162                         cxt->nextpage = 0;
163                 if (i == (cxt->oops_pages / (mtd->erasesize / OOPS_PAGE_SIZE))) {
164                         printk(KERN_ERR "mtdoops: All blocks bad!\n");
165                         return;
166                 }
167         }
168
169         for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
170                 ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
171
172         if (ret >= 0) {
173                 printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
174                 cxt->ready = 1;
175                 return;
176         }
177
178         if (mtd->block_markbad && (ret == -EIO)) {
179                 ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
180                 if (ret < 0) {
181                         printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
182                         return;
183                 }
184         }
185         goto badblock;
186 }
187
188 static void mtdoops_write(struct mtdoops_context *cxt, int panic)
189 {
190         struct mtd_info *mtd = cxt->mtd;
191         size_t retlen;
192         int ret;
193
194         if (cxt->writecount < OOPS_PAGE_SIZE)
195                 memset(cxt->oops_buf + cxt->writecount, 0xff,
196                                         OOPS_PAGE_SIZE - cxt->writecount);
197
198         if (panic)
199                 ret = mtd->panic_write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
200                                         OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
201         else
202                 ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
203                                         OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
204
205         cxt->writecount = 0;
206
207         if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
208                 printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
209                         cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
210
211         mtdoops_inc_counter(cxt);
212 }
213
214
215 static void mtdoops_workfunc_write(struct work_struct *work)
216 {
217         struct mtdoops_context *cxt =
218                         container_of(work, struct mtdoops_context, work_write);
219
220         mtdoops_write(cxt, 0);
221 }                                       
222
223 static void find_next_position(struct mtdoops_context *cxt)
224 {
225         struct mtd_info *mtd = cxt->mtd;
226         int ret, page, maxpos = 0;
227         u32 count, maxcount = 0xffffffff;
228         size_t retlen;
229
230         for (page = 0; page < cxt->oops_pages; page++) {
231                 ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
232                 if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
233                         printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
234                                 ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
235                         continue;
236                 }
237
238                 if (count == 0xffffffff)
239                         continue;
240                 if (maxcount == 0xffffffff) {
241                         maxcount = count;
242                         maxpos = page;
243                 } else if ((count < 0x40000000) && (maxcount > 0xc0000000)) {
244                         maxcount = count;
245                         maxpos = page;
246                 } else if ((count > maxcount) && (count < 0xc0000000)) {
247                         maxcount = count;
248                         maxpos = page;
249                 } else if ((count > maxcount) && (count > 0xc0000000)
250                                         && (maxcount > 0x80000000)) {
251                         maxcount = count;
252                         maxpos = page;
253                 }
254         }
255         if (maxcount == 0xffffffff) {
256                 cxt->nextpage = 0;
257                 cxt->nextcount = 1;
258                 cxt->ready = 1;
259                 printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
260                                 cxt->nextpage, cxt->nextcount);
261                 return;
262         }
263
264         cxt->nextpage = maxpos;
265         cxt->nextcount = maxcount;
266
267         mtdoops_inc_counter(cxt);
268 }
269
270
271 static void mtdoops_notify_add(struct mtd_info *mtd)
272 {
273         struct mtdoops_context *cxt = &oops_cxt;
274
275         if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
276                 return;
277
278         if (mtd->size < (mtd->erasesize * 2)) {
279                 printk(KERN_ERR "MTD partition %d not big enough for mtdoops\n",
280                                 mtd->index);
281                 return;
282         }
283
284         if (mtd->erasesize < OOPS_PAGE_SIZE) {
285                 printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
286                                 mtd->index);
287                 return;
288         }
289
290         cxt->mtd = mtd;
291         cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
292
293         find_next_position(cxt);
294
295         printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
296 }
297
298 static void mtdoops_notify_remove(struct mtd_info *mtd)
299 {
300         struct mtdoops_context *cxt = &oops_cxt;
301
302         if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
303                 return;
304
305         cxt->mtd = NULL;
306         flush_scheduled_work();
307 }
308
309 static void mtdoops_console_sync(void)
310 {
311         struct mtdoops_context *cxt = &oops_cxt;
312         struct mtd_info *mtd = cxt->mtd;
313         unsigned long flags;
314
315         if (!cxt->ready || !mtd || cxt->writecount == 0)
316                 return;
317
318         /* 
319          *  Once ready is 0 and we've held the lock no further writes to the 
320          *  buffer will happen
321          */
322         spin_lock_irqsave(&cxt->writecount_lock, flags);
323         if (!cxt->ready) {
324                 spin_unlock_irqrestore(&cxt->writecount_lock, flags);
325                 return;
326         }
327         cxt->ready = 0;
328         spin_unlock_irqrestore(&cxt->writecount_lock, flags);
329
330         if (mtd->panic_write && in_interrupt())
331                 /* Interrupt context, we're going to panic so try and log */
332                 mtdoops_write(cxt, 1);
333         else
334                 schedule_work(&cxt->work_write);
335 }
336
337 static void
338 mtdoops_console_write(struct console *co, const char *s, unsigned int count)
339 {
340         struct mtdoops_context *cxt = co->data;
341         struct mtd_info *mtd = cxt->mtd;
342         unsigned long flags;
343
344         if (!oops_in_progress) {
345                 mtdoops_console_sync();
346                 return;
347         }
348
349         if (!cxt->ready || !mtd)
350                 return;
351
352         /* Locking on writecount ensures sequential writes to the buffer */
353         spin_lock_irqsave(&cxt->writecount_lock, flags);
354
355         /* Check ready status didn't change whilst waiting for the lock */
356         if (!cxt->ready)
357                 return;
358
359         if (cxt->writecount == 0) {
360                 u32 *stamp = cxt->oops_buf;
361                 *stamp = cxt->nextcount;
362                 cxt->writecount = 4;
363         }
364
365         if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
366                 count = OOPS_PAGE_SIZE - cxt->writecount;
367
368         memcpy(cxt->oops_buf + cxt->writecount, s, count);
369         cxt->writecount += count;
370
371         spin_unlock_irqrestore(&cxt->writecount_lock, flags);
372
373         if (cxt->writecount == OOPS_PAGE_SIZE)
374                 mtdoops_console_sync();
375 }
376
377 static int __init mtdoops_console_setup(struct console *co, char *options)
378 {
379         struct mtdoops_context *cxt = co->data;
380
381         if (cxt->mtd_index != -1)
382                 return -EBUSY;
383         if (co->index == -1)
384                 return -EINVAL;
385
386         cxt->mtd_index = co->index;
387         return 0;
388 }
389
390 static struct mtd_notifier mtdoops_notifier = {
391         .add    = mtdoops_notify_add,
392         .remove = mtdoops_notify_remove,
393 };
394
395 static struct console mtdoops_console = {
396         .name           = "ttyMTD",
397         .write          = mtdoops_console_write,
398         .setup          = mtdoops_console_setup,
399         .unblank        = mtdoops_console_sync,
400         .index          = -1,
401         .data           = &oops_cxt,
402 };
403
404 static int __init mtdoops_console_init(void)
405 {
406         struct mtdoops_context *cxt = &oops_cxt;
407
408         cxt->mtd_index = -1;
409         cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
410
411         if (!cxt->oops_buf) {
412                 printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
413                 return -ENOMEM;
414         }
415
416         INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
417         INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
418
419         register_console(&mtdoops_console);
420         register_mtd_user(&mtdoops_notifier);
421         return 0;
422 }
423
424 static void __exit mtdoops_console_exit(void)
425 {
426         struct mtdoops_context *cxt = &oops_cxt;
427
428         unregister_mtd_user(&mtdoops_notifier);
429         unregister_console(&mtdoops_console);
430         vfree(cxt->oops_buf);
431 }
432
433
434 subsys_initcall(mtdoops_console_init);
435 module_exit(mtdoops_console_exit);
436
437 MODULE_LICENSE("GPL");
438 MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
439 MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");