Merge branch 'upstream' into upstream-jgarzik
[linux-2.6] / drivers / mtd / mtd_blkdevs.c
1 /*
2  * $Id: mtd_blkdevs.c,v 1.27 2005/11/07 11:14:20 gleixner Exp $
3  *
4  * (C) 2003 David Woodhouse <dwmw2@infradead.org>
5  *
6  * Interface to Linux 2.5 block layer for MTD 'translation layers'.
7  *
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/fs.h>
15 #include <linux/mtd/blktrans.h>
16 #include <linux/mtd/mtd.h>
17 #include <linux/blkdev.h>
18 #include <linux/blkpg.h>
19 #include <linux/spinlock.h>
20 #include <linux/hdreg.h>
21 #include <linux/init.h>
22 #include <linux/mutex.h>
23 #include <asm/uaccess.h>
24
25 static LIST_HEAD(blktrans_majors);
26
27 extern struct mutex mtd_table_mutex;
28 extern struct mtd_info *mtd_table[];
29
30 struct mtd_blkcore_priv {
31         struct completion thread_dead;
32         int exiting;
33         wait_queue_head_t thread_wq;
34         struct request_queue *rq;
35         spinlock_t queue_lock;
36 };
37
38 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
39                                struct mtd_blktrans_dev *dev,
40                                struct request *req)
41 {
42         unsigned long block, nsect;
43         char *buf;
44
45         block = req->sector << 9 >> tr->blkshift;
46         nsect = req->current_nr_sectors << 9 >> tr->blkshift;
47
48         buf = req->buffer;
49
50         if (!blk_fs_request(req))
51                 return 0;
52
53         if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
54                 return 0;
55
56         switch(rq_data_dir(req)) {
57         case READ:
58                 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
59                         if (tr->readsect(dev, block, buf))
60                                 return 0;
61                 return 1;
62
63         case WRITE:
64                 if (!tr->writesect)
65                         return 0;
66
67                 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
68                         if (tr->writesect(dev, block, buf))
69                                 return 0;
70                 return 1;
71
72         default:
73                 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
74                 return 0;
75         }
76 }
77
78 static int mtd_blktrans_thread(void *arg)
79 {
80         struct mtd_blktrans_ops *tr = arg;
81         struct request_queue *rq = tr->blkcore_priv->rq;
82
83         /* we might get involved when memory gets low, so use PF_MEMALLOC */
84         current->flags |= PF_MEMALLOC | PF_NOFREEZE;
85
86         daemonize("%sd", tr->name);
87
88         /* daemonize() doesn't do this for us since some kernel threads
89            actually want to deal with signals. We can't just call
90            exit_sighand() since that'll cause an oops when we finally
91            do exit. */
92         spin_lock_irq(&current->sighand->siglock);
93         sigfillset(&current->blocked);
94         recalc_sigpending();
95         spin_unlock_irq(&current->sighand->siglock);
96
97         spin_lock_irq(rq->queue_lock);
98
99         while (!tr->blkcore_priv->exiting) {
100                 struct request *req;
101                 struct mtd_blktrans_dev *dev;
102                 int res = 0;
103                 DECLARE_WAITQUEUE(wait, current);
104
105                 req = elv_next_request(rq);
106
107                 if (!req) {
108                         add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
109                         set_current_state(TASK_INTERRUPTIBLE);
110
111                         spin_unlock_irq(rq->queue_lock);
112
113                         schedule();
114                         remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
115
116                         spin_lock_irq(rq->queue_lock);
117
118                         continue;
119                 }
120
121                 dev = req->rq_disk->private_data;
122                 tr = dev->tr;
123
124                 spin_unlock_irq(rq->queue_lock);
125
126                 mutex_lock(&dev->lock);
127                 res = do_blktrans_request(tr, dev, req);
128                 mutex_unlock(&dev->lock);
129
130                 spin_lock_irq(rq->queue_lock);
131
132                 end_request(req, res);
133         }
134         spin_unlock_irq(rq->queue_lock);
135
136         complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
137 }
138
139 static void mtd_blktrans_request(struct request_queue *rq)
140 {
141         struct mtd_blktrans_ops *tr = rq->queuedata;
142         wake_up(&tr->blkcore_priv->thread_wq);
143 }
144
145
146 static int blktrans_open(struct inode *i, struct file *f)
147 {
148         struct mtd_blktrans_dev *dev;
149         struct mtd_blktrans_ops *tr;
150         int ret = -ENODEV;
151
152         dev = i->i_bdev->bd_disk->private_data;
153         tr = dev->tr;
154
155         if (!try_module_get(dev->mtd->owner))
156                 goto out;
157
158         if (!try_module_get(tr->owner))
159                 goto out_tr;
160
161         /* FIXME: Locking. A hot pluggable device can go away
162            (del_mtd_device can be called for it) without its module
163            being unloaded. */
164         dev->mtd->usecount++;
165
166         ret = 0;
167         if (tr->open && (ret = tr->open(dev))) {
168                 dev->mtd->usecount--;
169                 module_put(dev->mtd->owner);
170         out_tr:
171                 module_put(tr->owner);
172         }
173  out:
174         return ret;
175 }
176
177 static int blktrans_release(struct inode *i, struct file *f)
178 {
179         struct mtd_blktrans_dev *dev;
180         struct mtd_blktrans_ops *tr;
181         int ret = 0;
182
183         dev = i->i_bdev->bd_disk->private_data;
184         tr = dev->tr;
185
186         if (tr->release)
187                 ret = tr->release(dev);
188
189         if (!ret) {
190                 dev->mtd->usecount--;
191                 module_put(dev->mtd->owner);
192                 module_put(tr->owner);
193         }
194
195         return ret;
196 }
197
198 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
199 {
200         struct mtd_blktrans_dev *dev = bdev->bd_disk->private_data;
201
202         if (dev->tr->getgeo)
203                 return dev->tr->getgeo(dev, geo);
204         return -ENOTTY;
205 }
206
207 static int blktrans_ioctl(struct inode *inode, struct file *file,
208                               unsigned int cmd, unsigned long arg)
209 {
210         struct mtd_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
211         struct mtd_blktrans_ops *tr = dev->tr;
212
213         switch (cmd) {
214         case BLKFLSBUF:
215                 if (tr->flush)
216                         return tr->flush(dev);
217                 /* The core code did the work, we had nothing to do. */
218                 return 0;
219         default:
220                 return -ENOTTY;
221         }
222 }
223
224 struct block_device_operations mtd_blktrans_ops = {
225         .owner          = THIS_MODULE,
226         .open           = blktrans_open,
227         .release        = blktrans_release,
228         .ioctl          = blktrans_ioctl,
229         .getgeo         = blktrans_getgeo,
230 };
231
232 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
233 {
234         struct mtd_blktrans_ops *tr = new->tr;
235         struct list_head *this;
236         int last_devnum = -1;
237         struct gendisk *gd;
238
239         if (!!mutex_trylock(&mtd_table_mutex)) {
240                 mutex_unlock(&mtd_table_mutex);
241                 BUG();
242         }
243
244         list_for_each(this, &tr->devs) {
245                 struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
246                 if (new->devnum == -1) {
247                         /* Use first free number */
248                         if (d->devnum != last_devnum+1) {
249                                 /* Found a free devnum. Plug it in here */
250                                 new->devnum = last_devnum+1;
251                                 list_add_tail(&new->list, &d->list);
252                                 goto added;
253                         }
254                 } else if (d->devnum == new->devnum) {
255                         /* Required number taken */
256                         return -EBUSY;
257                 } else if (d->devnum > new->devnum) {
258                         /* Required number was free */
259                         list_add_tail(&new->list, &d->list);
260                         goto added;
261                 }
262                 last_devnum = d->devnum;
263         }
264         if (new->devnum == -1)
265                 new->devnum = last_devnum+1;
266
267         if ((new->devnum << tr->part_bits) > 256) {
268                 return -EBUSY;
269         }
270
271         mutex_init(&new->lock);
272         list_add_tail(&new->list, &tr->devs);
273  added:
274         if (!tr->writesect)
275                 new->readonly = 1;
276
277         gd = alloc_disk(1 << tr->part_bits);
278         if (!gd) {
279                 list_del(&new->list);
280                 return -ENOMEM;
281         }
282         gd->major = tr->major;
283         gd->first_minor = (new->devnum) << tr->part_bits;
284         gd->fops = &mtd_blktrans_ops;
285
286         if (tr->part_bits)
287                 if (new->devnum < 26)
288                         snprintf(gd->disk_name, sizeof(gd->disk_name),
289                                  "%s%c", tr->name, 'a' + new->devnum);
290                 else
291                         snprintf(gd->disk_name, sizeof(gd->disk_name),
292                                  "%s%c%c", tr->name,
293                                  'a' - 1 + new->devnum / 26,
294                                  'a' + new->devnum % 26);
295         else
296                 snprintf(gd->disk_name, sizeof(gd->disk_name),
297                          "%s%d", tr->name, new->devnum);
298
299         /* 2.5 has capacity in units of 512 bytes while still
300            having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
301         set_capacity(gd, (new->size * tr->blksize) >> 9);
302
303         gd->private_data = new;
304         new->blkcore_priv = gd;
305         gd->queue = tr->blkcore_priv->rq;
306
307         if (new->readonly)
308                 set_disk_ro(gd, 1);
309
310         add_disk(gd);
311
312         return 0;
313 }
314
315 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
316 {
317         if (!!mutex_trylock(&mtd_table_mutex)) {
318                 mutex_unlock(&mtd_table_mutex);
319                 BUG();
320         }
321
322         list_del(&old->list);
323
324         del_gendisk(old->blkcore_priv);
325         put_disk(old->blkcore_priv);
326
327         return 0;
328 }
329
330 static void blktrans_notify_remove(struct mtd_info *mtd)
331 {
332         struct list_head *this, *this2, *next;
333
334         list_for_each(this, &blktrans_majors) {
335                 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
336
337                 list_for_each_safe(this2, next, &tr->devs) {
338                         struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
339
340                         if (dev->mtd == mtd)
341                                 tr->remove_dev(dev);
342                 }
343         }
344 }
345
346 static void blktrans_notify_add(struct mtd_info *mtd)
347 {
348         struct list_head *this;
349
350         if (mtd->type == MTD_ABSENT)
351                 return;
352
353         list_for_each(this, &blktrans_majors) {
354                 struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
355
356                 tr->add_mtd(tr, mtd);
357         }
358
359 }
360
361 static struct mtd_notifier blktrans_notifier = {
362         .add = blktrans_notify_add,
363         .remove = blktrans_notify_remove,
364 };
365
366 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
367 {
368         int ret, i;
369
370         /* Register the notifier if/when the first device type is
371            registered, to prevent the link/init ordering from fucking
372            us over. */
373         if (!blktrans_notifier.list.next)
374                 register_mtd_user(&blktrans_notifier);
375
376         tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
377         if (!tr->blkcore_priv)
378                 return -ENOMEM;
379
380         mutex_lock(&mtd_table_mutex);
381
382         ret = register_blkdev(tr->major, tr->name);
383         if (ret) {
384                 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
385                        tr->name, tr->major, ret);
386                 kfree(tr->blkcore_priv);
387                 mutex_unlock(&mtd_table_mutex);
388                 return ret;
389         }
390         spin_lock_init(&tr->blkcore_priv->queue_lock);
391         init_completion(&tr->blkcore_priv->thread_dead);
392         init_waitqueue_head(&tr->blkcore_priv->thread_wq);
393
394         tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
395         if (!tr->blkcore_priv->rq) {
396                 unregister_blkdev(tr->major, tr->name);
397                 kfree(tr->blkcore_priv);
398                 mutex_unlock(&mtd_table_mutex);
399                 return -ENOMEM;
400         }
401
402         tr->blkcore_priv->rq->queuedata = tr;
403         blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
404         tr->blkshift = ffs(tr->blksize) - 1;
405
406         ret = kernel_thread(mtd_blktrans_thread, tr, CLONE_KERNEL);
407         if (ret < 0) {
408                 blk_cleanup_queue(tr->blkcore_priv->rq);
409                 unregister_blkdev(tr->major, tr->name);
410                 kfree(tr->blkcore_priv);
411                 mutex_unlock(&mtd_table_mutex);
412                 return ret;
413         }
414
415         INIT_LIST_HEAD(&tr->devs);
416         list_add(&tr->list, &blktrans_majors);
417
418         for (i=0; i<MAX_MTD_DEVICES; i++) {
419                 if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
420                         tr->add_mtd(tr, mtd_table[i]);
421         }
422
423         mutex_unlock(&mtd_table_mutex);
424
425         return 0;
426 }
427
428 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
429 {
430         struct list_head *this, *next;
431
432         mutex_lock(&mtd_table_mutex);
433
434         /* Clean up the kernel thread */
435         tr->blkcore_priv->exiting = 1;
436         wake_up(&tr->blkcore_priv->thread_wq);
437         wait_for_completion(&tr->blkcore_priv->thread_dead);
438
439         /* Remove it from the list of active majors */
440         list_del(&tr->list);
441
442         list_for_each_safe(this, next, &tr->devs) {
443                 struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
444                 tr->remove_dev(dev);
445         }
446
447         blk_cleanup_queue(tr->blkcore_priv->rq);
448         unregister_blkdev(tr->major, tr->name);
449
450         mutex_unlock(&mtd_table_mutex);
451
452         kfree(tr->blkcore_priv);
453
454         BUG_ON(!list_empty(&tr->devs));
455         return 0;
456 }
457
458 static void __exit mtd_blktrans_exit(void)
459 {
460         /* No race here -- if someone's currently in register_mtd_blktrans
461            we're screwed anyway. */
462         if (blktrans_notifier.list.next)
463                 unregister_mtd_user(&blktrans_notifier);
464 }
465
466 module_exit(mtd_blktrans_exit);
467
468 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
469 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
470 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
471 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
472
473 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
474 MODULE_LICENSE("GPL");
475 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");