3 * Authors: Dave Boutcher <boutcher@us.ibm.com>
4 * Ryan Arnold <ryanarn@us.ibm.com>
5 * Colin Devilbiss <devilbis@us.ibm.com>
6 * Stephen Rothwell <sfr@au1.ibm.com>
8 * (C) Copyright 2000-2004 IBM Corporation
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * This routine provides access to disk space (termed "DASD" in historical
25 * IBM terms) owned and managed by an OS/400 partition running on the
26 * same box as this Linux partition.
28 * All disk operations are performed by sending messages back and forth to
29 * the OS/400 partition.
31 #include <linux/major.h>
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/blkdev.h>
36 #include <linux/genhd.h>
37 #include <linux/hdreg.h>
38 #include <linux/errno.h>
39 #include <linux/init.h>
40 #include <linux/string.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/completion.h>
43 #include <linux/device.h>
44 #include <linux/scatterlist.h>
46 #include <asm/uaccess.h>
48 #include <asm/iseries/hv_types.h>
49 #include <asm/iseries/hv_lp_event.h>
50 #include <asm/iseries/hv_lp_config.h>
51 #include <asm/iseries/vio.h>
52 #include <asm/firmware.h>
54 MODULE_DESCRIPTION("iSeries Virtual DASD");
55 MODULE_AUTHOR("Dave Boutcher");
56 MODULE_LICENSE("GPL");
59 * We only support 7 partitions per physical disk....so with minor
60 * numbers 0-255 we get a maximum of 32 disks.
62 #define VIOD_GENHD_NAME "iseries/vd"
64 #define VIOD_VERS "1.64"
66 #define VIOD_KERN_WARNING KERN_WARNING "viod: "
67 #define VIOD_KERN_INFO KERN_INFO "viod: "
71 MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS,
72 MAX_DISK_NAME = sizeof(((struct gendisk *)0)->disk_name)
75 static DEFINE_SPINLOCK(viodasd_spinlock);
79 #define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0])
81 struct viodasd_waitevent {
82 struct completion com;
85 int max_disk; /* open */
88 static const struct vio_error_entry viodasd_err_table[] = {
89 { 0x0201, EINVAL, "Invalid Range" },
90 { 0x0202, EINVAL, "Invalid Token" },
91 { 0x0203, EIO, "DMA Error" },
92 { 0x0204, EIO, "Use Error" },
93 { 0x0205, EIO, "Release Error" },
94 { 0x0206, EINVAL, "Invalid Disk" },
95 { 0x0207, EBUSY, "Cant Lock" },
96 { 0x0208, EIO, "Already Locked" },
97 { 0x0209, EIO, "Already Unlocked" },
98 { 0x020A, EIO, "Invalid Arg" },
99 { 0x020B, EIO, "Bad IFS File" },
100 { 0x020C, EROFS, "Read Only Device" },
101 { 0x02FF, EIO, "Internal Error" },
106 * Figure out the biggest I/O request (in sectors) we can accept
108 #define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA)
111 * Number of disk I/O requests we've sent to OS/400
113 static int num_req_outstanding;
116 * This is our internal structure for keeping track of disk devices
118 struct viodasd_device {
122 u16 bytes_per_sector;
126 struct gendisk *disk;
128 } viodasd_devices[MAX_DISKNO];
131 * External open entry point.
133 static int viodasd_open(struct inode *ino, struct file *fil)
135 struct viodasd_device *d = ino->i_bdev->bd_disk->private_data;
137 struct viodasd_waitevent we;
141 if ((fil != NULL) && (fil->f_mode & FMODE_WRITE))
143 flags = vioblockflags_ro;
146 init_completion(&we.com);
148 /* Send the open event to OS/400 */
149 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
150 HvLpEvent_Type_VirtualIo,
151 viomajorsubtype_blockio | vioblockopen,
152 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
153 viopath_sourceinst(viopath_hostLp),
154 viopath_targetinst(viopath_hostLp),
155 (u64)(unsigned long)&we, VIOVERSION << 16,
156 ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32),
159 printk(VIOD_KERN_WARNING "HV open failed %d\n", (int)hvrc);
163 wait_for_completion(&we.com);
165 /* Check the return code */
167 const struct vio_error_entry *err =
168 vio_lookup_rc(viodasd_err_table, we.sub_result);
170 printk(VIOD_KERN_WARNING
171 "bad rc opening disk: %d:0x%04x (%s)\n",
172 (int)we.rc, we.sub_result, err->msg);
180 * External release entry point.
182 static int viodasd_release(struct inode *ino, struct file *fil)
184 struct viodasd_device *d = ino->i_bdev->bd_disk->private_data;
187 /* Send the event to OS/400. We DON'T expect a response */
188 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
189 HvLpEvent_Type_VirtualIo,
190 viomajorsubtype_blockio | vioblockclose,
191 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
192 viopath_sourceinst(viopath_hostLp),
193 viopath_targetinst(viopath_hostLp),
195 ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */,
198 printk(VIOD_KERN_WARNING "HV close call failed %d\n",
204 /* External ioctl entry point.
206 static int viodasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
208 struct gendisk *disk = bdev->bd_disk;
209 struct viodasd_device *d = disk->private_data;
211 geo->sectors = d->sectors ? d->sectors : 32;
212 geo->heads = d->tracks ? d->tracks : 64;
213 geo->cylinders = d->cylinders ? d->cylinders :
214 get_capacity(disk) / (geo->sectors * geo->heads);
220 * Our file operations table
222 static struct block_device_operations viodasd_fops = {
223 .owner = THIS_MODULE,
224 .open = viodasd_open,
225 .release = viodasd_release,
226 .getgeo = viodasd_getgeo,
232 static void viodasd_end_request(struct request *req, int error,
235 __blk_end_request(req, error, num_sectors << 9);
239 * Send an actual I/O request to OS/400
241 static int send_request(struct request *req)
248 struct vioblocklpevent *bevent;
249 struct HvLpEvent *hev;
250 struct scatterlist sg[VIOMAXBLOCKDMA];
253 struct viodasd_device *d;
256 start = (u64)req->sector << 9;
258 if (rq_data_dir(req) == READ) {
259 direction = DMA_FROM_DEVICE;
260 viocmd = viomajorsubtype_blockio | vioblockread;
263 direction = DMA_TO_DEVICE;
264 viocmd = viomajorsubtype_blockio | vioblockwrite;
268 d = req->rq_disk->private_data;
270 /* Now build the scatter-gather list */
271 sg_init_table(sg, VIOMAXBLOCKDMA);
272 nsg = blk_rq_map_sg(req->q, req, sg);
273 nsg = dma_map_sg(d->dev, sg, nsg, direction);
275 spin_lock_irqsave(&viodasd_spinlock, flags);
276 num_req_outstanding++;
278 /* This optimization handles a single DMA block */
280 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
281 HvLpEvent_Type_VirtualIo, viocmd,
282 HvLpEvent_AckInd_DoAck,
283 HvLpEvent_AckType_ImmediateAck,
284 viopath_sourceinst(viopath_hostLp),
285 viopath_targetinst(viopath_hostLp),
286 (u64)(unsigned long)req, VIOVERSION << 16,
287 ((u64)DEVICE_NO(d) << 48), start,
288 ((u64)sg_dma_address(&sg[0])) << 32,
291 bevent = (struct vioblocklpevent *)
292 vio_get_event_buffer(viomajorsubtype_blockio);
293 if (bevent == NULL) {
294 printk(VIOD_KERN_WARNING
295 "error allocating disk event buffer\n");
300 * Now build up the actual request. Note that we store
301 * the pointer to the request in the correlation
302 * token so we can match the response up later
304 memset(bevent, 0, sizeof(struct vioblocklpevent));
305 hev = &bevent->event;
306 hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK |
308 hev->xType = HvLpEvent_Type_VirtualIo;
309 hev->xSubtype = viocmd;
310 hev->xSourceLp = HvLpConfig_getLpIndex();
311 hev->xTargetLp = viopath_hostLp;
313 offsetof(struct vioblocklpevent, u.rw_data.dma_info) +
314 (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1;
315 hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp);
316 hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp);
317 hev->xCorrelationToken = (u64)req;
318 bevent->version = VIOVERSION;
319 bevent->disk = DEVICE_NO(d);
320 bevent->u.rw_data.offset = start;
323 * Copy just the dma information from the sg list
326 for (sgindex = 0; sgindex < nsg; sgindex++) {
327 bevent->u.rw_data.dma_info[sgindex].token =
328 sg_dma_address(&sg[sgindex]);
329 bevent->u.rw_data.dma_info[sgindex].len =
330 sg_dma_len(&sg[sgindex]);
333 /* Send the request */
334 hvrc = HvCallEvent_signalLpEvent(&bevent->event);
335 vio_free_event_buffer(viomajorsubtype_blockio, bevent);
338 if (hvrc != HvLpEvent_Rc_Good) {
339 printk(VIOD_KERN_WARNING
340 "error sending disk event to OS/400 (rc %d)\n",
344 spin_unlock_irqrestore(&viodasd_spinlock, flags);
348 num_req_outstanding--;
349 spin_unlock_irqrestore(&viodasd_spinlock, flags);
350 dma_unmap_sg(d->dev, sg, nsg, direction);
355 * This is the external request processing routine
357 static void do_viodasd_request(struct request_queue *q)
362 * If we already have the maximum number of requests
363 * outstanding to OS/400 just bail out. We'll come
366 while (num_req_outstanding < VIOMAXREQ) {
367 req = elv_next_request(q);
370 /* dequeue the current request from the queue */
371 blkdev_dequeue_request(req);
372 /* check that request contains a valid command */
373 if (!blk_fs_request(req)) {
374 viodasd_end_request(req, -EIO, req->hard_nr_sectors);
377 /* Try sending the request */
378 if (send_request(req) != 0)
379 viodasd_end_request(req, -EIO, req->hard_nr_sectors);
384 * Probe a single disk and fill in the viodasd_device structure
387 static int probe_disk(struct viodasd_device *d)
390 struct viodasd_waitevent we;
391 int dev_no = DEVICE_NO(d);
393 struct request_queue *q;
397 init_completion(&we.com);
399 /* Send the open event to OS/400 */
400 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
401 HvLpEvent_Type_VirtualIo,
402 viomajorsubtype_blockio | vioblockopen,
403 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
404 viopath_sourceinst(viopath_hostLp),
405 viopath_targetinst(viopath_hostLp),
406 (u64)(unsigned long)&we, VIOVERSION << 16,
407 ((u64)dev_no << 48) | ((u64)flags<< 32),
410 printk(VIOD_KERN_WARNING "bad rc on HV open %d\n", (int)hvrc);
414 wait_for_completion(&we.com);
419 /* try again with read only flag set */
420 flags = vioblockflags_ro;
423 if (we.max_disk > (MAX_DISKNO - 1)) {
428 printk(VIOD_KERN_INFO
429 "Only examining the first %d "
430 "of %d disks connected\n",
431 MAX_DISKNO, we.max_disk + 1);
435 /* Send the close event to OS/400. We DON'T expect a response */
436 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
437 HvLpEvent_Type_VirtualIo,
438 viomajorsubtype_blockio | vioblockclose,
439 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
440 viopath_sourceinst(viopath_hostLp),
441 viopath_targetinst(viopath_hostLp),
443 ((u64)dev_no << 48) | ((u64)flags << 32),
446 printk(VIOD_KERN_WARNING
447 "bad rc sending event to OS/400 %d\n", (int)hvrc);
451 if (d->dev == NULL) {
452 /* this is when we reprobe for new disks */
453 if (vio_create_viodasd(dev_no) == NULL) {
454 printk(VIOD_KERN_WARNING
455 "cannot allocate virtual device for disk %d\n",
460 * The vio_create_viodasd will have recursed into this
461 * routine with d->dev set to the new vio device and
462 * will finish the setup of the disk below.
467 /* create the request queue for the disk */
468 spin_lock_init(&d->q_lock);
469 q = blk_init_queue(do_viodasd_request, &d->q_lock);
471 printk(VIOD_KERN_WARNING "cannot allocate queue for disk %d\n",
475 g = alloc_disk(1 << PARTITION_SHIFT);
477 printk(VIOD_KERN_WARNING
478 "cannot allocate disk structure for disk %d\n",
480 blk_cleanup_queue(q);
485 blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA);
486 blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA);
487 blk_queue_max_sectors(q, VIODASD_MAXSECTORS);
488 g->major = VIODASD_MAJOR;
489 g->first_minor = dev_no << PARTITION_SHIFT;
491 snprintf(g->disk_name, sizeof(g->disk_name),
492 VIOD_GENHD_NAME "%c%c",
493 'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26));
495 snprintf(g->disk_name, sizeof(g->disk_name),
496 VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26));
497 g->fops = &viodasd_fops;
500 g->driverfs_dev = d->dev;
501 set_capacity(g, d->size >> 9);
503 printk(VIOD_KERN_INFO "disk %d: %lu sectors (%lu MB) "
504 "CHS=%d/%d/%d sector size %d%s\n",
505 dev_no, (unsigned long)(d->size >> 9),
506 (unsigned long)(d->size >> 20),
507 (int)d->cylinders, (int)d->tracks,
508 (int)d->sectors, (int)d->bytes_per_sector,
509 d->read_only ? " (RO)" : "");
511 /* register us in the global list */
516 /* returns the total number of scatterlist elements converted */
517 static int block_event_to_scatterlist(const struct vioblocklpevent *bevent,
518 struct scatterlist *sg, int *total_len)
521 const struct rw_data *rw_data = &bevent->u.rw_data;
522 static const int offset =
523 offsetof(struct vioblocklpevent, u.rw_data.dma_info);
524 static const int element_size = sizeof(rw_data->dma_info[0]);
526 numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size;
527 if (numsg > VIOMAXBLOCKDMA)
528 numsg = VIOMAXBLOCKDMA;
531 memset(sg, 0, sizeof(sg[0]) * VIOMAXBLOCKDMA);
533 for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) {
534 sg_dma_address(&sg[i]) = rw_data->dma_info[i].token;
535 sg_dma_len(&sg[i]) = rw_data->dma_info[i].len;
536 *total_len += rw_data->dma_info[i].len;
542 * Restart all queues, starting with the one _after_ the disk given,
543 * thus reducing the chance of starvation of higher numbered disks.
545 static void viodasd_restart_all_queues_starting_from(int first_index)
549 for (i = first_index + 1; i < MAX_DISKNO; ++i)
550 if (viodasd_devices[i].disk)
551 blk_run_queue(viodasd_devices[i].disk->queue);
552 for (i = 0; i <= first_index; ++i)
553 if (viodasd_devices[i].disk)
554 blk_run_queue(viodasd_devices[i].disk->queue);
558 * For read and write requests, decrement the number of outstanding requests,
559 * Free the DMA buffers we allocated.
561 static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
563 int num_sg, num_sect, pci_direction, total_len;
565 struct scatterlist sg[VIOMAXBLOCKDMA];
566 struct HvLpEvent *event = &bevent->event;
567 unsigned long irq_flags;
568 struct viodasd_device *d;
572 num_sg = block_event_to_scatterlist(bevent, sg, &total_len);
573 num_sect = total_len >> 9;
574 if (event->xSubtype == (viomajorsubtype_blockio | vioblockread))
575 pci_direction = DMA_FROM_DEVICE;
577 pci_direction = DMA_TO_DEVICE;
578 req = (struct request *)bevent->event.xCorrelationToken;
579 d = req->rq_disk->private_data;
581 dma_unmap_sg(d->dev, sg, num_sg, pci_direction);
584 * Since this is running in interrupt mode, we need to make sure
585 * we're not stepping on any global I/O operations
587 spin_lock_irqsave(&viodasd_spinlock, irq_flags);
588 num_req_outstanding--;
589 spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
591 error = (event->xRc == HvLpEvent_Rc_Good) ? 0 : -EIO;
593 const struct vio_error_entry *err;
594 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
595 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
596 event->xRc, bevent->sub_result, err->msg);
597 num_sect = req->hard_nr_sectors;
599 qlock = req->q->queue_lock;
600 spin_lock_irqsave(qlock, irq_flags);
601 viodasd_end_request(req, error, num_sect);
602 spin_unlock_irqrestore(qlock, irq_flags);
604 /* Finally, try to get more requests off of this device's queue */
605 viodasd_restart_all_queues_starting_from(DEVICE_NO(d));
610 /* This routine handles incoming block LP events */
611 static void handle_block_event(struct HvLpEvent *event)
613 struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
614 struct viodasd_waitevent *pwe;
617 /* Notification that a partition went away! */
619 /* First, we should NEVER get an int here...only acks */
620 if (hvlpevent_is_int(event)) {
621 printk(VIOD_KERN_WARNING
622 "Yikes! got an int in viodasd event handler!\n");
623 if (hvlpevent_need_ack(event)) {
624 event->xRc = HvLpEvent_Rc_InvalidSubtype;
625 HvCallEvent_ackLpEvent(event);
629 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
632 * Handle a response to an open request. We get all the
633 * disk information in the response, so update it. The
634 * correlation token contains a pointer to a waitevent
635 * structure that has a completion in it. update the
636 * return code in the waitevent structure and post the
637 * completion to wake up the guy who sent the request
639 pwe = (struct viodasd_waitevent *)event->xCorrelationToken;
640 pwe->rc = event->xRc;
641 pwe->sub_result = bevent->sub_result;
642 if (event->xRc == HvLpEvent_Rc_Good) {
643 const struct open_data *data = &bevent->u.open_data;
644 struct viodasd_device *device =
645 &viodasd_devices[bevent->disk];
647 bevent->flags & vioblockflags_ro;
648 device->size = data->disk_size;
649 device->cylinders = data->cylinders;
650 device->tracks = data->tracks;
651 device->sectors = data->sectors;
652 device->bytes_per_sector = data->bytes_per_sector;
653 pwe->max_disk = data->max_disk;
661 viodasd_handle_read_write(bevent);
665 printk(VIOD_KERN_WARNING "invalid subtype!");
666 if (hvlpevent_need_ack(event)) {
667 event->xRc = HvLpEvent_Rc_InvalidSubtype;
668 HvCallEvent_ackLpEvent(event);
674 * Get the driver to reprobe for more disks.
676 static ssize_t probe_disks(struct device_driver *drv, const char *buf,
679 struct viodasd_device *d;
681 for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) {
687 static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks);
689 static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
691 struct viodasd_device *d = &viodasd_devices[vdev->unit_address];
699 static int viodasd_remove(struct vio_dev *vdev)
701 struct viodasd_device *d;
703 d = &viodasd_devices[vdev->unit_address];
705 del_gendisk(d->disk);
706 blk_cleanup_queue(d->disk->queue);
715 * viodasd_device_table: Used by vio.c to match devices that we
718 static struct vio_device_id viodasd_device_table[] __devinitdata = {
719 { "block", "IBM,iSeries-viodasd" },
722 MODULE_DEVICE_TABLE(vio, viodasd_device_table);
724 static struct vio_driver viodasd_driver = {
725 .id_table = viodasd_device_table,
726 .probe = viodasd_probe,
727 .remove = viodasd_remove,
730 .owner = THIS_MODULE,
734 static int need_delete_probe;
737 * Initialize the whole device driver. Handle module and non-module
740 static int __init viodasd_init(void)
744 if (!firmware_has_feature(FW_FEATURE_ISERIES)) {
749 /* Try to open to our host lp */
750 if (viopath_hostLp == HvLpIndexInvalid)
753 if (viopath_hostLp == HvLpIndexInvalid) {
754 printk(VIOD_KERN_WARNING "invalid hosting partition\n");
759 printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n",
762 /* register the block device */
763 rc = register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
765 printk(VIOD_KERN_WARNING
766 "Unable to get major number %d for %s\n",
767 VIODASD_MAJOR, VIOD_GENHD_NAME);
770 /* Actually open the path to the hosting partition */
771 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio,
774 printk(VIOD_KERN_WARNING
775 "error opening path to host partition %d\n",
780 /* Initialize our request handler */
781 vio_setHandler(viomajorsubtype_blockio, handle_block_event);
783 rc = vio_register_driver(&viodasd_driver);
785 printk(VIOD_KERN_WARNING "vio_register_driver failed\n");
790 * If this call fails, it just means that we cannot dynamically
791 * add virtual disks, but the driver will still work fine for
792 * all existing disk, so ignore the failure.
794 if (!driver_create_file(&viodasd_driver.driver, &driver_attr_probe))
795 need_delete_probe = 1;
800 vio_clearHandler(viomajorsubtype_blockio);
801 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
803 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
807 module_init(viodasd_init);
809 void __exit viodasd_exit(void)
811 if (need_delete_probe)
812 driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
813 vio_unregister_driver(&viodasd_driver);
814 vio_clearHandler(viomajorsubtype_blockio);
815 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
816 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
818 module_exit(viodasd_exit);