mtd: blktrans: Hotplug fixes
[cascardo/linux.git] / drivers / mtd / mtd_blkdevs.c
1 /*
2  * (C) 2003 David Woodhouse <dwmw2@infradead.org>
3  *
4  * Interface to Linux 2.5 block layer for MTD 'translation layers'.
5  *
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/fs.h>
13 #include <linux/mtd/blktrans.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/blkdev.h>
16 #include <linux/blkpg.h>
17 #include <linux/spinlock.h>
18 #include <linux/hdreg.h>
19 #include <linux/init.h>
20 #include <linux/mutex.h>
21 #include <linux/kthread.h>
22 #include <asm/uaccess.h>
23
24 #include "mtdcore.h"
25
26 static LIST_HEAD(blktrans_majors);
27 static DEFINE_MUTEX(blktrans_ref_mutex);
28
29 void blktrans_dev_release(struct kref *kref)
30 {
31         struct mtd_blktrans_dev *dev =
32                 container_of(kref, struct mtd_blktrans_dev, ref);
33
34         dev->disk->private_data = NULL;
35         put_disk(dev->disk);
36         list_del(&dev->list);
37         kfree(dev);
38 }
39
40 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
41 {
42         struct mtd_blktrans_dev *dev;
43
44         mutex_lock(&blktrans_ref_mutex);
45         dev = disk->private_data;
46
47         if (!dev)
48                 goto unlock;
49         kref_get(&dev->ref);
50 unlock:
51         mutex_unlock(&blktrans_ref_mutex);
52         return dev;
53 }
54
55 void blktrans_dev_put(struct mtd_blktrans_dev *dev)
56 {
57         mutex_lock(&blktrans_ref_mutex);
58         kref_put(&dev->ref, blktrans_dev_release);
59         mutex_unlock(&blktrans_ref_mutex);
60 }
61
62
63 static int do_blktrans_request(struct mtd_blktrans_ops *tr,
64                                struct mtd_blktrans_dev *dev,
65                                struct request *req)
66 {
67         unsigned long block, nsect;
68         char *buf;
69
70         block = blk_rq_pos(req) << 9 >> tr->blkshift;
71         nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
72
73         buf = req->buffer;
74
75         if (!blk_fs_request(req))
76                 return -EIO;
77
78         if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
79             get_capacity(req->rq_disk))
80                 return -EIO;
81
82         if (blk_discard_rq(req))
83                 return tr->discard(dev, block, nsect);
84
85         switch(rq_data_dir(req)) {
86         case READ:
87                 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
88                         if (tr->readsect(dev, block, buf))
89                                 return -EIO;
90                 rq_flush_dcache_pages(req);
91                 return 0;
92         case WRITE:
93                 if (!tr->writesect)
94                         return -EIO;
95
96                 rq_flush_dcache_pages(req);
97                 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
98                         if (tr->writesect(dev, block, buf))
99                                 return -EIO;
100                 return 0;
101         default:
102                 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
103                 return -EIO;
104         }
105 }
106
107 static int mtd_blktrans_thread(void *arg)
108 {
109         struct mtd_blktrans_dev *dev = arg;
110         struct request_queue *rq = dev->rq;
111         struct request *req = NULL;
112
113         spin_lock_irq(rq->queue_lock);
114
115         while (!kthread_should_stop()) {
116                 int res;
117
118                 if (!req && !(req = blk_fetch_request(rq))) {
119                         set_current_state(TASK_INTERRUPTIBLE);
120                         spin_unlock_irq(rq->queue_lock);
121                         schedule();
122                         spin_lock_irq(rq->queue_lock);
123                         continue;
124                 }
125
126                 spin_unlock_irq(rq->queue_lock);
127
128                 mutex_lock(&dev->lock);
129                 res = do_blktrans_request(dev->tr, dev, req);
130                 mutex_unlock(&dev->lock);
131
132                 spin_lock_irq(rq->queue_lock);
133
134                 if (!__blk_end_request_cur(req, res))
135                         req = NULL;
136         }
137
138         if (req)
139                 __blk_end_request_all(req, -EIO);
140
141         spin_unlock_irq(rq->queue_lock);
142
143         return 0;
144 }
145
146 static void mtd_blktrans_request(struct request_queue *rq)
147 {
148         struct mtd_blktrans_dev *dev;
149         struct request *req = NULL;
150
151         dev = rq->queuedata;
152
153         if (!dev)
154                 while ((req = blk_fetch_request(rq)) != NULL)
155                         __blk_end_request_all(req, -ENODEV);
156         else
157                 wake_up_process(dev->thread);
158 }
159
160 static int blktrans_open(struct block_device *bdev, fmode_t mode)
161 {
162         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
163         int ret;
164
165         if (!dev)
166                 return -ERESTARTSYS;
167
168         mutex_lock(&dev->lock);
169
170         if (!dev->mtd) {
171                 ret = -ENXIO;
172                 goto unlock;
173         }
174
175         ret = !dev->open++ && dev->tr->open ? dev->tr->open(dev) : 0;
176
177         /* Take another reference on the device so it won't go away till
178                 last release */
179         if (!ret)
180                 kref_get(&dev->ref);
181 unlock:
182         mutex_unlock(&dev->lock);
183         blktrans_dev_put(dev);
184         return ret;
185 }
186
187 static int blktrans_release(struct gendisk *disk, fmode_t mode)
188 {
189         struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
190         int ret = -ENXIO;
191
192         if (!dev)
193                 return ret;
194
195         mutex_lock(&dev->lock);
196
197         /* Release one reference, we sure its not the last one here*/
198         kref_put(&dev->ref, blktrans_dev_release);
199
200         if (!dev->mtd)
201                 goto unlock;
202
203         ret = !--dev->open && dev->tr->release ? dev->tr->release(dev) : 0;
204 unlock:
205         mutex_unlock(&dev->lock);
206         blktrans_dev_put(dev);
207         return ret;
208 }
209
210 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
211 {
212         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
213         int ret = -ENXIO;
214
215         if (!dev)
216                 return ret;
217
218         mutex_lock(&dev->lock);
219
220         if (!dev->mtd)
221                 goto unlock;
222
223         ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : 0;
224 unlock:
225         mutex_unlock(&dev->lock);
226         blktrans_dev_put(dev);
227         return ret;
228 }
229
230 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
231                               unsigned int cmd, unsigned long arg)
232 {
233         struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
234         int ret = -ENXIO;
235
236         if (!dev)
237                 return ret;
238
239         mutex_lock(&dev->lock);
240
241         if (!dev->mtd)
242                 goto unlock;
243
244         switch (cmd) {
245         case BLKFLSBUF:
246                 ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
247         default:
248                 ret = -ENOTTY;
249         }
250 unlock:
251         mutex_unlock(&dev->lock);
252         blktrans_dev_put(dev);
253         return ret;
254 }
255
256 static const struct block_device_operations mtd_blktrans_ops = {
257         .owner          = THIS_MODULE,
258         .open           = blktrans_open,
259         .release        = blktrans_release,
260         .locked_ioctl   = blktrans_ioctl,
261         .getgeo         = blktrans_getgeo,
262 };
263
264 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
265 {
266         struct mtd_blktrans_ops *tr = new->tr;
267         struct mtd_blktrans_dev *d;
268         int last_devnum = -1;
269         struct gendisk *gd;
270         int ret;
271
272         if (mutex_trylock(&mtd_table_mutex)) {
273                 mutex_unlock(&mtd_table_mutex);
274                 BUG();
275         }
276
277         mutex_lock(&blktrans_ref_mutex);
278         list_for_each_entry(d, &tr->devs, list) {
279                 if (new->devnum == -1) {
280                         /* Use first free number */
281                         if (d->devnum != last_devnum+1) {
282                                 /* Found a free devnum. Plug it in here */
283                                 new->devnum = last_devnum+1;
284                                 list_add_tail(&new->list, &d->list);
285                                 goto added;
286                         }
287                 } else if (d->devnum == new->devnum) {
288                         /* Required number taken */
289                         mutex_unlock(&blktrans_ref_mutex);
290                         return -EBUSY;
291                 } else if (d->devnum > new->devnum) {
292                         /* Required number was free */
293                         list_add_tail(&new->list, &d->list);
294                         goto added;
295                 }
296                 last_devnum = d->devnum;
297         }
298
299         ret = -EBUSY;
300         if (new->devnum == -1)
301                 new->devnum = last_devnum+1;
302
303         /* Check that the device and any partitions will get valid
304          * minor numbers and that the disk naming code below can cope
305          * with this number. */
306         if (new->devnum > (MINORMASK >> tr->part_bits) ||
307             (tr->part_bits && new->devnum >= 27 * 26)) {
308                 mutex_unlock(&blktrans_ref_mutex);
309                 goto error1;
310         }
311
312         list_add_tail(&new->list, &tr->devs);
313  added:
314         mutex_unlock(&blktrans_ref_mutex);
315
316         mutex_init(&new->lock);
317         kref_init(&new->ref);
318         if (!tr->writesect)
319                 new->readonly = 1;
320
321         /* Create gendisk */
322         ret = -ENOMEM;
323         gd = alloc_disk(1 << tr->part_bits);
324
325         if (!gd)
326                 goto error2;
327
328         new->disk = gd;
329         gd->private_data = new;
330         gd->major = tr->major;
331         gd->first_minor = (new->devnum) << tr->part_bits;
332         gd->fops = &mtd_blktrans_ops;
333
334         if (tr->part_bits)
335                 if (new->devnum < 26)
336                         snprintf(gd->disk_name, sizeof(gd->disk_name),
337                                  "%s%c", tr->name, 'a' + new->devnum);
338                 else
339                         snprintf(gd->disk_name, sizeof(gd->disk_name),
340                                  "%s%c%c", tr->name,
341                                  'a' - 1 + new->devnum / 26,
342                                  'a' + new->devnum % 26);
343         else
344                 snprintf(gd->disk_name, sizeof(gd->disk_name),
345                          "%s%d", tr->name, new->devnum);
346
347         set_capacity(gd, (new->size * tr->blksize) >> 9);
348
349         /* Create the request queue */
350         spin_lock_init(&new->queue_lock);
351         new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
352
353         if (!new->rq)
354                 goto error3;
355
356         new->rq->queuedata = new;
357         blk_queue_logical_block_size(new->rq, tr->blksize);
358
359         if (tr->discard)
360                 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
361                                         new->rq);
362
363         gd->queue = new->rq;
364
365         __get_mtd_device(new->mtd);
366         __module_get(tr->owner);
367
368         /* Create processing thread */
369         /* TODO: workqueue ? */
370         new->thread = kthread_run(mtd_blktrans_thread, new,
371                         "%s%d", tr->name, new->mtd->index);
372         if (IS_ERR(new->thread)) {
373                 ret = PTR_ERR(new->thread);
374                 goto error4;
375         }
376         gd->driverfs_dev = &new->mtd->dev;
377
378         if (new->readonly)
379                 set_disk_ro(gd, 1);
380
381         add_disk(gd);
382         return 0;
383 error4:
384         module_put(tr->owner);
385         __put_mtd_device(new->mtd);
386         blk_cleanup_queue(new->rq);
387 error3:
388         put_disk(new->disk);
389 error2:
390         list_del(&new->list);
391 error1:
392         kfree(new);
393         return ret;
394 }
395
396 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
397 {
398         unsigned long flags;
399
400         if (mutex_trylock(&mtd_table_mutex)) {
401                 mutex_unlock(&mtd_table_mutex);
402                 BUG();
403         }
404
405         /* Stop new requests to arrive */
406         del_gendisk(old->disk);
407
408         /* Stop the thread */
409         kthread_stop(old->thread);
410
411         /* Kill current requests */
412         spin_lock_irqsave(&old->queue_lock, flags);
413         old->rq->queuedata = NULL;
414         blk_start_queue(old->rq);
415         spin_unlock_irqrestore(&old->queue_lock, flags);
416         blk_cleanup_queue(old->rq);
417
418         /* Ask trans driver for release to the mtd device */
419         mutex_lock(&old->lock);
420         if (old->open && old->tr->release) {
421                 old->tr->release(old);
422                 old->open = 0;
423         }
424
425         __put_mtd_device(old->mtd);
426         module_put(old->tr->owner);
427
428         /* At that point, we don't touch the mtd anymore */
429         old->mtd = NULL;
430
431         mutex_unlock(&old->lock);
432         blktrans_dev_put(old);
433         return 0;
434 }
435
436 static void blktrans_notify_remove(struct mtd_info *mtd)
437 {
438         struct mtd_blktrans_ops *tr;
439         struct mtd_blktrans_dev *dev, *next;
440
441         list_for_each_entry(tr, &blktrans_majors, list)
442                 list_for_each_entry_safe(dev, next, &tr->devs, list)
443                         if (dev->mtd == mtd)
444                                 tr->remove_dev(dev);
445 }
446
447 static void blktrans_notify_add(struct mtd_info *mtd)
448 {
449         struct mtd_blktrans_ops *tr;
450
451         if (mtd->type == MTD_ABSENT)
452                 return;
453
454         list_for_each_entry(tr, &blktrans_majors, list)
455                 tr->add_mtd(tr, mtd);
456 }
457
458 static struct mtd_notifier blktrans_notifier = {
459         .add = blktrans_notify_add,
460         .remove = blktrans_notify_remove,
461 };
462
463 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
464 {
465         struct mtd_info *mtd;
466         int ret;
467
468         /* Register the notifier if/when the first device type is
469            registered, to prevent the link/init ordering from fucking
470            us over. */
471         if (!blktrans_notifier.list.next)
472                 register_mtd_user(&blktrans_notifier);
473
474
475         mutex_lock(&mtd_table_mutex);
476
477         ret = register_blkdev(tr->major, tr->name);
478         if (ret) {
479                 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
480                        tr->name, tr->major, ret);
481                 mutex_unlock(&mtd_table_mutex);
482                 return ret;
483         }
484
485         tr->blkshift = ffs(tr->blksize) - 1;
486
487         INIT_LIST_HEAD(&tr->devs);
488         list_add(&tr->list, &blktrans_majors);
489
490         mtd_for_each_device(mtd)
491                 if (mtd->type != MTD_ABSENT)
492                         tr->add_mtd(tr, mtd);
493
494         mutex_unlock(&mtd_table_mutex);
495         return 0;
496 }
497
498 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
499 {
500         struct mtd_blktrans_dev *dev, *next;
501
502         mutex_lock(&mtd_table_mutex);
503
504         /* Remove it from the list of active majors */
505         list_del(&tr->list);
506
507         list_for_each_entry_safe(dev, next, &tr->devs, list)
508                 tr->remove_dev(dev);
509
510         unregister_blkdev(tr->major, tr->name);
511         mutex_unlock(&mtd_table_mutex);
512
513         BUG_ON(!list_empty(&tr->devs));
514         return 0;
515 }
516
517 static void __exit mtd_blktrans_exit(void)
518 {
519         /* No race here -- if someone's currently in register_mtd_blktrans
520            we're screwed anyway. */
521         if (blktrans_notifier.list.next)
522                 unregister_mtd_user(&blktrans_notifier);
523 }
524
525 module_exit(mtd_blktrans_exit);
526
527 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
528 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
529 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
530 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
531
532 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
533 MODULE_LICENSE("GPL");
534 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");