x86: move ack_bad_irq into irq code
[linux-2.6] / drivers / block / xen-blkfront.c
1 /*
2  * blkfront.c
3  *
4  * XenLinux virtual block device driver.
5  *
6  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7  * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8  * Copyright (c) 2004, Christian Limpach
9  * Copyright (c) 2004, Andrew Warfield
10  * Copyright (c) 2005, Christopher Clark
11  * Copyright (c) 2005, XenSource Ltd
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/module.h>
41
42 #include <xen/xenbus.h>
43 #include <xen/grant_table.h>
44 #include <xen/events.h>
45 #include <xen/page.h>
46
47 #include <xen/interface/grant_table.h>
48 #include <xen/interface/io/blkif.h>
49
50 #include <asm/xen/hypervisor.h>
51
52 enum blkif_state {
53         BLKIF_STATE_DISCONNECTED,
54         BLKIF_STATE_CONNECTED,
55         BLKIF_STATE_SUSPENDED,
56 };
57
58 struct blk_shadow {
59         struct blkif_request req;
60         unsigned long request;
61         unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
62 };
63
64 static struct block_device_operations xlvbd_block_fops;
65
66 #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
67
68 /*
69  * We have one of these per vbd, whether ide, scsi or 'other'.  They
70  * hang in private_data off the gendisk structure. We may end up
71  * putting all kinds of interesting stuff here :-)
72  */
73 struct blkfront_info
74 {
75         struct xenbus_device *xbdev;
76         dev_t dev;
77         struct gendisk *gd;
78         int vdevice;
79         blkif_vdev_t handle;
80         enum blkif_state connected;
81         int ring_ref;
82         struct blkif_front_ring ring;
83         unsigned int evtchn, irq;
84         struct request_queue *rq;
85         struct work_struct work;
86         struct gnttab_free_callback callback;
87         struct blk_shadow shadow[BLK_RING_SIZE];
88         unsigned long shadow_free;
89         int feature_barrier;
90
91         /**
92          * The number of people holding this device open.  We won't allow a
93          * hot-unplug unless this is 0.
94          */
95         int users;
96 };
97
98 static DEFINE_SPINLOCK(blkif_io_lock);
99
100 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
101         (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
102 #define GRANT_INVALID_REF       0
103
104 #define PARTS_PER_DISK          16
105
106 #define BLKIF_MAJOR(dev) ((dev)>>8)
107 #define BLKIF_MINOR(dev) ((dev) & 0xff)
108
109 #define DEV_NAME        "xvd"   /* name in /dev */
110
111 /* Information about our VBDs. */
112 #define MAX_VBDS 64
113 static LIST_HEAD(vbds_list);
114
115 static int get_id_from_freelist(struct blkfront_info *info)
116 {
117         unsigned long free = info->shadow_free;
118         BUG_ON(free > BLK_RING_SIZE);
119         info->shadow_free = info->shadow[free].req.id;
120         info->shadow[free].req.id = 0x0fffffee; /* debug */
121         return free;
122 }
123
124 static void add_id_to_freelist(struct blkfront_info *info,
125                                unsigned long id)
126 {
127         info->shadow[id].req.id  = info->shadow_free;
128         info->shadow[id].request = 0;
129         info->shadow_free = id;
130 }
131
132 static void blkif_restart_queue_callback(void *arg)
133 {
134         struct blkfront_info *info = (struct blkfront_info *)arg;
135         schedule_work(&info->work);
136 }
137
138 /*
139  * blkif_queue_request
140  *
141  * request block io
142  *
143  * id: for guest use only.
144  * operation: BLKIF_OP_{READ,WRITE,PROBE}
145  * buffer: buffer to read/write into. this should be a
146  *   virtual address in the guest os.
147  */
148 static int blkif_queue_request(struct request *req)
149 {
150         struct blkfront_info *info = req->rq_disk->private_data;
151         unsigned long buffer_mfn;
152         struct blkif_request *ring_req;
153         struct req_iterator iter;
154         struct bio_vec *bvec;
155         unsigned long id;
156         unsigned int fsect, lsect;
157         int ref;
158         grant_ref_t gref_head;
159
160         if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
161                 return 1;
162
163         if (gnttab_alloc_grant_references(
164                 BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
165                 gnttab_request_free_callback(
166                         &info->callback,
167                         blkif_restart_queue_callback,
168                         info,
169                         BLKIF_MAX_SEGMENTS_PER_REQUEST);
170                 return 1;
171         }
172
173         /* Fill out a communications ring structure. */
174         ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
175         id = get_id_from_freelist(info);
176         info->shadow[id].request = (unsigned long)req;
177
178         ring_req->id = id;
179         ring_req->sector_number = (blkif_sector_t)req->sector;
180         ring_req->handle = info->handle;
181
182         ring_req->operation = rq_data_dir(req) ?
183                 BLKIF_OP_WRITE : BLKIF_OP_READ;
184         if (blk_barrier_rq(req))
185                 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
186
187         ring_req->nr_segments = 0;
188         rq_for_each_segment(bvec, req, iter) {
189                 BUG_ON(ring_req->nr_segments == BLKIF_MAX_SEGMENTS_PER_REQUEST);
190                 buffer_mfn = pfn_to_mfn(page_to_pfn(bvec->bv_page));
191                 fsect = bvec->bv_offset >> 9;
192                 lsect = fsect + (bvec->bv_len >> 9) - 1;
193                 /* install a grant reference. */
194                 ref = gnttab_claim_grant_reference(&gref_head);
195                 BUG_ON(ref == -ENOSPC);
196
197                 gnttab_grant_foreign_access_ref(
198                                 ref,
199                                 info->xbdev->otherend_id,
200                                 buffer_mfn,
201                                 rq_data_dir(req) );
202
203                 info->shadow[id].frame[ring_req->nr_segments] =
204                                 mfn_to_pfn(buffer_mfn);
205
206                 ring_req->seg[ring_req->nr_segments] =
207                                 (struct blkif_request_segment) {
208                                         .gref       = ref,
209                                         .first_sect = fsect,
210                                         .last_sect  = lsect };
211
212                 ring_req->nr_segments++;
213         }
214
215         info->ring.req_prod_pvt++;
216
217         /* Keep a private copy so we can reissue requests when recovering. */
218         info->shadow[id].req = *ring_req;
219
220         gnttab_free_grant_references(gref_head);
221
222         return 0;
223 }
224
225
226 static inline void flush_requests(struct blkfront_info *info)
227 {
228         int notify;
229
230         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
231
232         if (notify)
233                 notify_remote_via_irq(info->irq);
234 }
235
236 /*
237  * do_blkif_request
238  *  read a block; request is in a request queue
239  */
240 static void do_blkif_request(struct request_queue *rq)
241 {
242         struct blkfront_info *info = NULL;
243         struct request *req;
244         int queued;
245
246         pr_debug("Entered do_blkif_request\n");
247
248         queued = 0;
249
250         while ((req = elv_next_request(rq)) != NULL) {
251                 info = req->rq_disk->private_data;
252                 if (!blk_fs_request(req)) {
253                         end_request(req, 0);
254                         continue;
255                 }
256
257                 if (RING_FULL(&info->ring))
258                         goto wait;
259
260                 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
261                          "(%u/%li) buffer:%p [%s]\n",
262                          req, req->cmd, (unsigned long)req->sector,
263                          req->current_nr_sectors,
264                          req->nr_sectors, req->buffer,
265                          rq_data_dir(req) ? "write" : "read");
266
267
268                 blkdev_dequeue_request(req);
269                 if (blkif_queue_request(req)) {
270                         blk_requeue_request(rq, req);
271 wait:
272                         /* Avoid pointless unplugs. */
273                         blk_stop_queue(rq);
274                         break;
275                 }
276
277                 queued++;
278         }
279
280         if (queued != 0)
281                 flush_requests(info);
282 }
283
284 static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
285 {
286         struct request_queue *rq;
287
288         rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
289         if (rq == NULL)
290                 return -1;
291
292         elevator_init(rq, "noop");
293
294         /* Hard sector size and max sectors impersonate the equiv. hardware. */
295         blk_queue_hardsect_size(rq, sector_size);
296         blk_queue_max_sectors(rq, 512);
297
298         /* Each segment in a request is up to an aligned page in size. */
299         blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
300         blk_queue_max_segment_size(rq, PAGE_SIZE);
301
302         /* Ensure a merged request will fit in a single I/O ring slot. */
303         blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
304         blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
305
306         /* Make sure buffer addresses are sector-aligned. */
307         blk_queue_dma_alignment(rq, 511);
308
309         gd->queue = rq;
310
311         return 0;
312 }
313
314
315 static int xlvbd_barrier(struct blkfront_info *info)
316 {
317         int err;
318
319         err = blk_queue_ordered(info->rq,
320                                 info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE,
321                                 NULL);
322
323         if (err)
324                 return err;
325
326         printk(KERN_INFO "blkfront: %s: barriers %s\n",
327                info->gd->disk_name,
328                info->feature_barrier ? "enabled" : "disabled");
329         return 0;
330 }
331
332
333 static int xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity,
334                                int vdevice, u16 vdisk_info, u16 sector_size,
335                                struct blkfront_info *info)
336 {
337         struct gendisk *gd;
338         int nr_minors = 1;
339         int err = -ENODEV;
340
341         BUG_ON(info->gd != NULL);
342         BUG_ON(info->rq != NULL);
343
344         if ((minor % PARTS_PER_DISK) == 0)
345                 nr_minors = PARTS_PER_DISK;
346
347         gd = alloc_disk(nr_minors);
348         if (gd == NULL)
349                 goto out;
350
351         if (nr_minors > 1)
352                 sprintf(gd->disk_name, "%s%c", DEV_NAME,
353                         'a' + minor / PARTS_PER_DISK);
354         else
355                 sprintf(gd->disk_name, "%s%c%d", DEV_NAME,
356                         'a' + minor / PARTS_PER_DISK,
357                         minor % PARTS_PER_DISK);
358
359         gd->major = XENVBD_MAJOR;
360         gd->first_minor = minor;
361         gd->fops = &xlvbd_block_fops;
362         gd->private_data = info;
363         gd->driverfs_dev = &(info->xbdev->dev);
364         set_capacity(gd, capacity);
365
366         if (xlvbd_init_blk_queue(gd, sector_size)) {
367                 del_gendisk(gd);
368                 goto out;
369         }
370
371         info->rq = gd->queue;
372         info->gd = gd;
373
374         if (info->feature_barrier)
375                 xlvbd_barrier(info);
376
377         if (vdisk_info & VDISK_READONLY)
378                 set_disk_ro(gd, 1);
379
380         if (vdisk_info & VDISK_REMOVABLE)
381                 gd->flags |= GENHD_FL_REMOVABLE;
382
383         if (vdisk_info & VDISK_CDROM)
384                 gd->flags |= GENHD_FL_CD;
385
386         return 0;
387
388  out:
389         return err;
390 }
391
392 static void kick_pending_request_queues(struct blkfront_info *info)
393 {
394         if (!RING_FULL(&info->ring)) {
395                 /* Re-enable calldowns. */
396                 blk_start_queue(info->rq);
397                 /* Kick things off immediately. */
398                 do_blkif_request(info->rq);
399         }
400 }
401
402 static void blkif_restart_queue(struct work_struct *work)
403 {
404         struct blkfront_info *info = container_of(work, struct blkfront_info, work);
405
406         spin_lock_irq(&blkif_io_lock);
407         if (info->connected == BLKIF_STATE_CONNECTED)
408                 kick_pending_request_queues(info);
409         spin_unlock_irq(&blkif_io_lock);
410 }
411
412 static void blkif_free(struct blkfront_info *info, int suspend)
413 {
414         /* Prevent new requests being issued until we fix things up. */
415         spin_lock_irq(&blkif_io_lock);
416         info->connected = suspend ?
417                 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
418         /* No more blkif_request(). */
419         if (info->rq)
420                 blk_stop_queue(info->rq);
421         /* No more gnttab callback work. */
422         gnttab_cancel_free_callback(&info->callback);
423         spin_unlock_irq(&blkif_io_lock);
424
425         /* Flush gnttab callback work. Must be done with no locks held. */
426         flush_scheduled_work();
427
428         /* Free resources associated with old device channel. */
429         if (info->ring_ref != GRANT_INVALID_REF) {
430                 gnttab_end_foreign_access(info->ring_ref, 0,
431                                           (unsigned long)info->ring.sring);
432                 info->ring_ref = GRANT_INVALID_REF;
433                 info->ring.sring = NULL;
434         }
435         if (info->irq)
436                 unbind_from_irqhandler(info->irq, info);
437         info->evtchn = info->irq = 0;
438
439 }
440
441 static void blkif_completion(struct blk_shadow *s)
442 {
443         int i;
444         for (i = 0; i < s->req.nr_segments; i++)
445                 gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL);
446 }
447
448 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
449 {
450         struct request *req;
451         struct blkif_response *bret;
452         RING_IDX i, rp;
453         unsigned long flags;
454         struct blkfront_info *info = (struct blkfront_info *)dev_id;
455         int error;
456
457         spin_lock_irqsave(&blkif_io_lock, flags);
458
459         if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
460                 spin_unlock_irqrestore(&blkif_io_lock, flags);
461                 return IRQ_HANDLED;
462         }
463
464  again:
465         rp = info->ring.sring->rsp_prod;
466         rmb(); /* Ensure we see queued responses up to 'rp'. */
467
468         for (i = info->ring.rsp_cons; i != rp; i++) {
469                 unsigned long id;
470                 int ret;
471
472                 bret = RING_GET_RESPONSE(&info->ring, i);
473                 id   = bret->id;
474                 req  = (struct request *)info->shadow[id].request;
475
476                 blkif_completion(&info->shadow[id]);
477
478                 add_id_to_freelist(info, id);
479
480                 error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
481                 switch (bret->operation) {
482                 case BLKIF_OP_WRITE_BARRIER:
483                         if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
484                                 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
485                                        info->gd->disk_name);
486                                 error = -EOPNOTSUPP;
487                                 info->feature_barrier = 0;
488                                 xlvbd_barrier(info);
489                         }
490                         /* fall through */
491                 case BLKIF_OP_READ:
492                 case BLKIF_OP_WRITE:
493                         if (unlikely(bret->status != BLKIF_RSP_OKAY))
494                                 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
495                                         "request: %x\n", bret->status);
496
497                         ret = __blk_end_request(req, error, blk_rq_bytes(req));
498                         BUG_ON(ret);
499                         break;
500                 default:
501                         BUG();
502                 }
503         }
504
505         info->ring.rsp_cons = i;
506
507         if (i != info->ring.req_prod_pvt) {
508                 int more_to_do;
509                 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
510                 if (more_to_do)
511                         goto again;
512         } else
513                 info->ring.sring->rsp_event = i + 1;
514
515         kick_pending_request_queues(info);
516
517         spin_unlock_irqrestore(&blkif_io_lock, flags);
518
519         return IRQ_HANDLED;
520 }
521
522
523 static int setup_blkring(struct xenbus_device *dev,
524                          struct blkfront_info *info)
525 {
526         struct blkif_sring *sring;
527         int err;
528
529         info->ring_ref = GRANT_INVALID_REF;
530
531         sring = (struct blkif_sring *)__get_free_page(GFP_KERNEL);
532         if (!sring) {
533                 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
534                 return -ENOMEM;
535         }
536         SHARED_RING_INIT(sring);
537         FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
538
539         err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
540         if (err < 0) {
541                 free_page((unsigned long)sring);
542                 info->ring.sring = NULL;
543                 goto fail;
544         }
545         info->ring_ref = err;
546
547         err = xenbus_alloc_evtchn(dev, &info->evtchn);
548         if (err)
549                 goto fail;
550
551         err = bind_evtchn_to_irqhandler(info->evtchn,
552                                         blkif_interrupt,
553                                         IRQF_SAMPLE_RANDOM, "blkif", info);
554         if (err <= 0) {
555                 xenbus_dev_fatal(dev, err,
556                                  "bind_evtchn_to_irqhandler failed");
557                 goto fail;
558         }
559         info->irq = err;
560
561         return 0;
562 fail:
563         blkif_free(info, 0);
564         return err;
565 }
566
567
568 /* Common code used when first setting up, and when resuming. */
569 static int talk_to_backend(struct xenbus_device *dev,
570                            struct blkfront_info *info)
571 {
572         const char *message = NULL;
573         struct xenbus_transaction xbt;
574         int err;
575
576         /* Create shared ring, alloc event channel. */
577         err = setup_blkring(dev, info);
578         if (err)
579                 goto out;
580
581 again:
582         err = xenbus_transaction_start(&xbt);
583         if (err) {
584                 xenbus_dev_fatal(dev, err, "starting transaction");
585                 goto destroy_blkring;
586         }
587
588         err = xenbus_printf(xbt, dev->nodename,
589                             "ring-ref", "%u", info->ring_ref);
590         if (err) {
591                 message = "writing ring-ref";
592                 goto abort_transaction;
593         }
594         err = xenbus_printf(xbt, dev->nodename,
595                             "event-channel", "%u", info->evtchn);
596         if (err) {
597                 message = "writing event-channel";
598                 goto abort_transaction;
599         }
600
601         err = xenbus_transaction_end(xbt, 0);
602         if (err) {
603                 if (err == -EAGAIN)
604                         goto again;
605                 xenbus_dev_fatal(dev, err, "completing transaction");
606                 goto destroy_blkring;
607         }
608
609         xenbus_switch_state(dev, XenbusStateInitialised);
610
611         return 0;
612
613  abort_transaction:
614         xenbus_transaction_end(xbt, 1);
615         if (message)
616                 xenbus_dev_fatal(dev, err, "%s", message);
617  destroy_blkring:
618         blkif_free(info, 0);
619  out:
620         return err;
621 }
622
623
624 /**
625  * Entry point to this code when a new device is created.  Allocate the basic
626  * structures and the ring buffer for communication with the backend, and
627  * inform the backend of the appropriate details for those.  Switch to
628  * Initialised state.
629  */
630 static int blkfront_probe(struct xenbus_device *dev,
631                           const struct xenbus_device_id *id)
632 {
633         int err, vdevice, i;
634         struct blkfront_info *info;
635
636         /* FIXME: Use dynamic device id if this is not set. */
637         err = xenbus_scanf(XBT_NIL, dev->nodename,
638                            "virtual-device", "%i", &vdevice);
639         if (err != 1) {
640                 xenbus_dev_fatal(dev, err, "reading virtual-device");
641                 return err;
642         }
643
644         info = kzalloc(sizeof(*info), GFP_KERNEL);
645         if (!info) {
646                 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
647                 return -ENOMEM;
648         }
649
650         info->xbdev = dev;
651         info->vdevice = vdevice;
652         info->connected = BLKIF_STATE_DISCONNECTED;
653         INIT_WORK(&info->work, blkif_restart_queue);
654
655         for (i = 0; i < BLK_RING_SIZE; i++)
656                 info->shadow[i].req.id = i+1;
657         info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
658
659         /* Front end dir is a number, which is used as the id. */
660         info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
661         dev->dev.driver_data = info;
662
663         err = talk_to_backend(dev, info);
664         if (err) {
665                 kfree(info);
666                 dev->dev.driver_data = NULL;
667                 return err;
668         }
669
670         return 0;
671 }
672
673
674 static int blkif_recover(struct blkfront_info *info)
675 {
676         int i;
677         struct blkif_request *req;
678         struct blk_shadow *copy;
679         int j;
680
681         /* Stage 1: Make a safe copy of the shadow state. */
682         copy = kmalloc(sizeof(info->shadow), GFP_KERNEL);
683         if (!copy)
684                 return -ENOMEM;
685         memcpy(copy, info->shadow, sizeof(info->shadow));
686
687         /* Stage 2: Set up free list. */
688         memset(&info->shadow, 0, sizeof(info->shadow));
689         for (i = 0; i < BLK_RING_SIZE; i++)
690                 info->shadow[i].req.id = i+1;
691         info->shadow_free = info->ring.req_prod_pvt;
692         info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
693
694         /* Stage 3: Find pending requests and requeue them. */
695         for (i = 0; i < BLK_RING_SIZE; i++) {
696                 /* Not in use? */
697                 if (copy[i].request == 0)
698                         continue;
699
700                 /* Grab a request slot and copy shadow state into it. */
701                 req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
702                 *req = copy[i].req;
703
704                 /* We get a new request id, and must reset the shadow state. */
705                 req->id = get_id_from_freelist(info);
706                 memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
707
708                 /* Rewrite any grant references invalidated by susp/resume. */
709                 for (j = 0; j < req->nr_segments; j++)
710                         gnttab_grant_foreign_access_ref(
711                                 req->seg[j].gref,
712                                 info->xbdev->otherend_id,
713                                 pfn_to_mfn(info->shadow[req->id].frame[j]),
714                                 rq_data_dir(
715                                         (struct request *)
716                                         info->shadow[req->id].request));
717                 info->shadow[req->id].req = *req;
718
719                 info->ring.req_prod_pvt++;
720         }
721
722         kfree(copy);
723
724         xenbus_switch_state(info->xbdev, XenbusStateConnected);
725
726         spin_lock_irq(&blkif_io_lock);
727
728         /* Now safe for us to use the shared ring */
729         info->connected = BLKIF_STATE_CONNECTED;
730
731         /* Send off requeued requests */
732         flush_requests(info);
733
734         /* Kick any other new requests queued since we resumed */
735         kick_pending_request_queues(info);
736
737         spin_unlock_irq(&blkif_io_lock);
738
739         return 0;
740 }
741
742 /**
743  * We are reconnecting to the backend, due to a suspend/resume, or a backend
744  * driver restart.  We tear down our blkif structure and recreate it, but
745  * leave the device-layer structures intact so that this is transparent to the
746  * rest of the kernel.
747  */
748 static int blkfront_resume(struct xenbus_device *dev)
749 {
750         struct blkfront_info *info = dev->dev.driver_data;
751         int err;
752
753         dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
754
755         blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
756
757         err = talk_to_backend(dev, info);
758         if (info->connected == BLKIF_STATE_SUSPENDED && !err)
759                 err = blkif_recover(info);
760
761         return err;
762 }
763
764
765 /*
766  * Invoked when the backend is finally 'ready' (and has told produced
767  * the details about the physical device - #sectors, size, etc).
768  */
769 static void blkfront_connect(struct blkfront_info *info)
770 {
771         unsigned long long sectors;
772         unsigned long sector_size;
773         unsigned int binfo;
774         int err;
775
776         if ((info->connected == BLKIF_STATE_CONNECTED) ||
777             (info->connected == BLKIF_STATE_SUSPENDED) )
778                 return;
779
780         dev_dbg(&info->xbdev->dev, "%s:%s.\n",
781                 __func__, info->xbdev->otherend);
782
783         err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
784                             "sectors", "%llu", &sectors,
785                             "info", "%u", &binfo,
786                             "sector-size", "%lu", &sector_size,
787                             NULL);
788         if (err) {
789                 xenbus_dev_fatal(info->xbdev, err,
790                                  "reading backend fields at %s",
791                                  info->xbdev->otherend);
792                 return;
793         }
794
795         err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
796                             "feature-barrier", "%lu", &info->feature_barrier,
797                             NULL);
798         if (err)
799                 info->feature_barrier = 0;
800
801         err = xlvbd_alloc_gendisk(BLKIF_MINOR(info->vdevice),
802                                   sectors, info->vdevice,
803                                   binfo, sector_size, info);
804         if (err) {
805                 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
806                                  info->xbdev->otherend);
807                 return;
808         }
809
810         xenbus_switch_state(info->xbdev, XenbusStateConnected);
811
812         /* Kick pending requests. */
813         spin_lock_irq(&blkif_io_lock);
814         info->connected = BLKIF_STATE_CONNECTED;
815         kick_pending_request_queues(info);
816         spin_unlock_irq(&blkif_io_lock);
817
818         add_disk(info->gd);
819 }
820
821 /**
822  * Handle the change of state of the backend to Closing.  We must delete our
823  * device-layer structures now, to ensure that writes are flushed through to
824  * the backend.  Once is this done, we can switch to Closed in
825  * acknowledgement.
826  */
827 static void blkfront_closing(struct xenbus_device *dev)
828 {
829         struct blkfront_info *info = dev->dev.driver_data;
830         unsigned long flags;
831
832         dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename);
833
834         if (info->rq == NULL)
835                 goto out;
836
837         spin_lock_irqsave(&blkif_io_lock, flags);
838
839         del_gendisk(info->gd);
840
841         /* No more blkif_request(). */
842         blk_stop_queue(info->rq);
843
844         /* No more gnttab callback work. */
845         gnttab_cancel_free_callback(&info->callback);
846         spin_unlock_irqrestore(&blkif_io_lock, flags);
847
848         /* Flush gnttab callback work. Must be done with no locks held. */
849         flush_scheduled_work();
850
851         blk_cleanup_queue(info->rq);
852         info->rq = NULL;
853
854  out:
855         xenbus_frontend_closed(dev);
856 }
857
858 /**
859  * Callback received when the backend's state changes.
860  */
861 static void backend_changed(struct xenbus_device *dev,
862                             enum xenbus_state backend_state)
863 {
864         struct blkfront_info *info = dev->dev.driver_data;
865         struct block_device *bd;
866
867         dev_dbg(&dev->dev, "blkfront:backend_changed.\n");
868
869         switch (backend_state) {
870         case XenbusStateInitialising:
871         case XenbusStateInitWait:
872         case XenbusStateInitialised:
873         case XenbusStateUnknown:
874         case XenbusStateClosed:
875                 break;
876
877         case XenbusStateConnected:
878                 blkfront_connect(info);
879                 break;
880
881         case XenbusStateClosing:
882                 bd = bdget(info->dev);
883                 if (bd == NULL)
884                         xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
885
886                 mutex_lock(&bd->bd_mutex);
887                 if (info->users > 0)
888                         xenbus_dev_error(dev, -EBUSY,
889                                          "Device in use; refusing to close");
890                 else
891                         blkfront_closing(dev);
892                 mutex_unlock(&bd->bd_mutex);
893                 bdput(bd);
894                 break;
895         }
896 }
897
898 static int blkfront_remove(struct xenbus_device *dev)
899 {
900         struct blkfront_info *info = dev->dev.driver_data;
901
902         dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename);
903
904         blkif_free(info, 0);
905
906         kfree(info);
907
908         return 0;
909 }
910
911 static int blkif_open(struct inode *inode, struct file *filep)
912 {
913         struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
914         info->users++;
915         return 0;
916 }
917
918 static int blkif_release(struct inode *inode, struct file *filep)
919 {
920         struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
921         info->users--;
922         if (info->users == 0) {
923                 /* Check whether we have been instructed to close.  We will
924                    have ignored this request initially, as the device was
925                    still mounted. */
926                 struct xenbus_device *dev = info->xbdev;
927                 enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
928
929                 if (state == XenbusStateClosing)
930                         blkfront_closing(dev);
931         }
932         return 0;
933 }
934
935 static struct block_device_operations xlvbd_block_fops =
936 {
937         .owner = THIS_MODULE,
938         .open = blkif_open,
939         .release = blkif_release,
940 };
941
942
943 static struct xenbus_device_id blkfront_ids[] = {
944         { "vbd" },
945         { "" }
946 };
947
948 static struct xenbus_driver blkfront = {
949         .name = "vbd",
950         .owner = THIS_MODULE,
951         .ids = blkfront_ids,
952         .probe = blkfront_probe,
953         .remove = blkfront_remove,
954         .resume = blkfront_resume,
955         .otherend_changed = backend_changed,
956 };
957
958 static int __init xlblk_init(void)
959 {
960         if (!is_running_on_xen())
961                 return -ENODEV;
962
963         if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
964                 printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
965                        XENVBD_MAJOR, DEV_NAME);
966                 return -ENODEV;
967         }
968
969         return xenbus_register_frontend(&blkfront);
970 }
971 module_init(xlblk_init);
972
973
974 static void xlblk_exit(void)
975 {
976         return xenbus_unregister_driver(&blkfront);
977 }
978 module_exit(xlblk_exit);
979
980 MODULE_DESCRIPTION("Xen virtual block device frontend");
981 MODULE_LICENSE("GPL");
982 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);