3 * Authors: Dave Boutcher <boutcher@us.ibm.com>
4 * Ryan Arnold <ryanarn@us.ibm.com>
5 * Colin Devilbiss <devilbis@us.ibm.com>
6 * Stephen Rothwell <sfr@au1.ibm.com>
8 * (C) Copyright 2000-2004 IBM Corporation
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 * This routine provides access to disk space (termed "DASD" in historical
25 * IBM terms) owned and managed by an OS/400 partition running on the
26 * same box as this Linux partition.
28 * All disk operations are performed by sending messages back and forth to
29 * the OS/400 partition.
31 #include <linux/major.h>
33 #include <linux/module.h>
34 #include <linux/kernel.h>
35 #include <linux/blkdev.h>
36 #include <linux/genhd.h>
37 #include <linux/hdreg.h>
38 #include <linux/errno.h>
39 #include <linux/init.h>
40 #include <linux/string.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/completion.h>
43 #include <linux/device.h>
44 #include <linux/kernel.h>
46 #include <asm/uaccess.h>
48 #include <asm/iseries/hv_types.h>
49 #include <asm/iseries/hv_lp_event.h>
50 #include <asm/iseries/hv_lp_config.h>
51 #include <asm/iseries/vio.h>
53 MODULE_DESCRIPTION("iSeries Virtual DASD");
54 MODULE_AUTHOR("Dave Boutcher");
55 MODULE_LICENSE("GPL");
58 * We only support 7 partitions per physical disk....so with minor
59 * numbers 0-255 we get a maximum of 32 disks.
61 #define VIOD_GENHD_NAME "iseries/vd"
62 #define VIOD_GENHD_DEVFS_NAME "iseries/disc"
64 #define VIOD_VERS "1.64"
66 #define VIOD_KERN_WARNING KERN_WARNING "viod: "
67 #define VIOD_KERN_INFO KERN_INFO "viod: "
71 MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS,
72 MAX_DISK_NAME = sizeof(((struct gendisk *)0)->disk_name)
75 static DEFINE_SPINLOCK(viodasd_spinlock);
78 #define VIOMAXBLOCKDMA 12
80 #define DEVICE_NO(cell) ((struct viodasd_device *)(cell) - &viodasd_devices[0])
97 } dma_info[VIOMAXBLOCKDMA];
100 struct vioblocklpevent {
101 struct HvLpEvent event;
108 struct open_data open_data;
109 struct rw_data rw_data;
114 #define vioblockflags_ro 0x0001
116 enum vioblocksubtype {
117 vioblockopen = 0x0001,
118 vioblockclose = 0x0002,
119 vioblockread = 0x0003,
120 vioblockwrite = 0x0004,
121 vioblockflush = 0x0005,
122 vioblockcheck = 0x0007
125 struct viodasd_waitevent {
126 struct completion com;
129 int max_disk; /* open */
132 static const struct vio_error_entry viodasd_err_table[] = {
133 { 0x0201, EINVAL, "Invalid Range" },
134 { 0x0202, EINVAL, "Invalid Token" },
135 { 0x0203, EIO, "DMA Error" },
136 { 0x0204, EIO, "Use Error" },
137 { 0x0205, EIO, "Release Error" },
138 { 0x0206, EINVAL, "Invalid Disk" },
139 { 0x0207, EBUSY, "Cant Lock" },
140 { 0x0208, EIO, "Already Locked" },
141 { 0x0209, EIO, "Already Unlocked" },
142 { 0x020A, EIO, "Invalid Arg" },
143 { 0x020B, EIO, "Bad IFS File" },
144 { 0x020C, EROFS, "Read Only Device" },
145 { 0x02FF, EIO, "Internal Error" },
150 * Figure out the biggest I/O request (in sectors) we can accept
152 #define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA)
155 * Number of disk I/O requests we've sent to OS/400
157 static int num_req_outstanding;
160 * This is our internal structure for keeping track of disk devices
162 struct viodasd_device {
166 u16 bytes_per_sector;
170 struct gendisk *disk;
172 } viodasd_devices[MAX_DISKNO];
175 * External open entry point.
177 static int viodasd_open(struct inode *ino, struct file *fil)
179 struct viodasd_device *d = ino->i_bdev->bd_disk->private_data;
181 struct viodasd_waitevent we;
185 if ((fil != NULL) && (fil->f_mode & FMODE_WRITE))
187 flags = vioblockflags_ro;
190 init_completion(&we.com);
192 /* Send the open event to OS/400 */
193 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
194 HvLpEvent_Type_VirtualIo,
195 viomajorsubtype_blockio | vioblockopen,
196 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
197 viopath_sourceinst(viopath_hostLp),
198 viopath_targetinst(viopath_hostLp),
199 (u64)(unsigned long)&we, VIOVERSION << 16,
200 ((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32),
203 printk(VIOD_KERN_WARNING "HV open failed %d\n", (int)hvrc);
207 wait_for_completion(&we.com);
209 /* Check the return code */
211 const struct vio_error_entry *err =
212 vio_lookup_rc(viodasd_err_table, we.sub_result);
214 printk(VIOD_KERN_WARNING
215 "bad rc opening disk: %d:0x%04x (%s)\n",
216 (int)we.rc, we.sub_result, err->msg);
224 * External release entry point.
226 static int viodasd_release(struct inode *ino, struct file *fil)
228 struct viodasd_device *d = ino->i_bdev->bd_disk->private_data;
231 /* Send the event to OS/400. We DON'T expect a response */
232 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
233 HvLpEvent_Type_VirtualIo,
234 viomajorsubtype_blockio | vioblockclose,
235 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
236 viopath_sourceinst(viopath_hostLp),
237 viopath_targetinst(viopath_hostLp),
239 ((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */,
242 printk(VIOD_KERN_WARNING "HV close call failed %d\n",
248 /* External ioctl entry point.
250 static int viodasd_ioctl(struct inode *ino, struct file *fil,
251 unsigned int cmd, unsigned long arg)
253 unsigned char sectors;
255 unsigned short cylinders;
256 struct hd_geometry *geo;
257 struct gendisk *gendisk;
258 struct viodasd_device *d;
262 geo = (struct hd_geometry *)arg;
265 if (!access_ok(VERIFY_WRITE, geo, sizeof(*geo)))
267 gendisk = ino->i_bdev->bd_disk;
268 d = gendisk->private_data;
269 sectors = d->sectors;
275 cylinders = d->cylinders;
277 cylinders = get_capacity(gendisk) / (sectors * heads);
278 if (__put_user(sectors, &geo->sectors) ||
279 __put_user(heads, &geo->heads) ||
280 __put_user(cylinders, &geo->cylinders) ||
281 __put_user(get_start_sect(ino->i_bdev), &geo->start))
290 * Our file operations table
292 static struct block_device_operations viodasd_fops = {
293 .owner = THIS_MODULE,
294 .open = viodasd_open,
295 .release = viodasd_release,
296 .ioctl = viodasd_ioctl,
302 static void viodasd_end_request(struct request *req, int uptodate,
305 if (end_that_request_first(req, uptodate, num_sectors))
307 add_disk_randomness(req->rq_disk);
308 end_that_request_last(req, uptodate);
312 * Send an actual I/O request to OS/400
314 static int send_request(struct request *req)
321 struct vioblocklpevent *bevent;
322 struct scatterlist sg[VIOMAXBLOCKDMA];
325 struct viodasd_device *d;
328 start = (u64)req->sector << 9;
330 if (rq_data_dir(req) == READ) {
331 direction = DMA_FROM_DEVICE;
332 viocmd = viomajorsubtype_blockio | vioblockread;
335 direction = DMA_TO_DEVICE;
336 viocmd = viomajorsubtype_blockio | vioblockwrite;
340 d = req->rq_disk->private_data;
342 /* Now build the scatter-gather list */
343 nsg = blk_rq_map_sg(req->q, req, sg);
344 nsg = dma_map_sg(d->dev, sg, nsg, direction);
346 spin_lock_irqsave(&viodasd_spinlock, flags);
347 num_req_outstanding++;
349 /* This optimization handles a single DMA block */
351 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
352 HvLpEvent_Type_VirtualIo, viocmd,
353 HvLpEvent_AckInd_DoAck,
354 HvLpEvent_AckType_ImmediateAck,
355 viopath_sourceinst(viopath_hostLp),
356 viopath_targetinst(viopath_hostLp),
357 (u64)(unsigned long)req, VIOVERSION << 16,
358 ((u64)DEVICE_NO(d) << 48), start,
359 ((u64)sg_dma_address(&sg[0])) << 32,
362 bevent = (struct vioblocklpevent *)
363 vio_get_event_buffer(viomajorsubtype_blockio);
364 if (bevent == NULL) {
365 printk(VIOD_KERN_WARNING
366 "error allocating disk event buffer\n");
371 * Now build up the actual request. Note that we store
372 * the pointer to the request in the correlation
373 * token so we can match the response up later
375 memset(bevent, 0, sizeof(struct vioblocklpevent));
376 bevent->event.xFlags.xValid = 1;
377 bevent->event.xFlags.xFunction = HvLpEvent_Function_Int;
378 bevent->event.xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
379 bevent->event.xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
380 bevent->event.xType = HvLpEvent_Type_VirtualIo;
381 bevent->event.xSubtype = viocmd;
382 bevent->event.xSourceLp = HvLpConfig_getLpIndex();
383 bevent->event.xTargetLp = viopath_hostLp;
384 bevent->event.xSizeMinus1 =
385 offsetof(struct vioblocklpevent, u.rw_data.dma_info) +
386 (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1;
387 bevent->event.xSourceInstanceId =
388 viopath_sourceinst(viopath_hostLp);
389 bevent->event.xTargetInstanceId =
390 viopath_targetinst(viopath_hostLp);
391 bevent->event.xCorrelationToken = (u64)req;
392 bevent->version = VIOVERSION;
393 bevent->disk = DEVICE_NO(d);
394 bevent->u.rw_data.offset = start;
397 * Copy just the dma information from the sg list
400 for (sgindex = 0; sgindex < nsg; sgindex++) {
401 bevent->u.rw_data.dma_info[sgindex].token =
402 sg_dma_address(&sg[sgindex]);
403 bevent->u.rw_data.dma_info[sgindex].len =
404 sg_dma_len(&sg[sgindex]);
407 /* Send the request */
408 hvrc = HvCallEvent_signalLpEvent(&bevent->event);
409 vio_free_event_buffer(viomajorsubtype_blockio, bevent);
412 if (hvrc != HvLpEvent_Rc_Good) {
413 printk(VIOD_KERN_WARNING
414 "error sending disk event to OS/400 (rc %d)\n",
418 spin_unlock_irqrestore(&viodasd_spinlock, flags);
422 num_req_outstanding--;
423 spin_unlock_irqrestore(&viodasd_spinlock, flags);
424 dma_unmap_sg(d->dev, sg, nsg, direction);
429 * This is the external request processing routine
431 static void do_viodasd_request(request_queue_t *q)
436 * If we already have the maximum number of requests
437 * outstanding to OS/400 just bail out. We'll come
440 while (num_req_outstanding < VIOMAXREQ) {
441 req = elv_next_request(q);
444 /* dequeue the current request from the queue */
445 blkdev_dequeue_request(req);
446 /* check that request contains a valid command */
447 if (!blk_fs_request(req)) {
448 viodasd_end_request(req, 0, req->hard_nr_sectors);
451 /* Try sending the request */
452 if (send_request(req) != 0)
453 viodasd_end_request(req, 0, req->hard_nr_sectors);
458 * Probe a single disk and fill in the viodasd_device structure
461 static void probe_disk(struct viodasd_device *d)
464 struct viodasd_waitevent we;
465 int dev_no = DEVICE_NO(d);
467 struct request_queue *q;
471 init_completion(&we.com);
473 /* Send the open event to OS/400 */
474 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
475 HvLpEvent_Type_VirtualIo,
476 viomajorsubtype_blockio | vioblockopen,
477 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
478 viopath_sourceinst(viopath_hostLp),
479 viopath_targetinst(viopath_hostLp),
480 (u64)(unsigned long)&we, VIOVERSION << 16,
481 ((u64)dev_no << 48) | ((u64)flags<< 32),
484 printk(VIOD_KERN_WARNING "bad rc on HV open %d\n", (int)hvrc);
488 wait_for_completion(&we.com);
493 /* try again with read only flag set */
494 flags = vioblockflags_ro;
497 if (we.max_disk > (MAX_DISKNO - 1)) {
502 printk(VIOD_KERN_INFO
503 "Only examining the first %d "
504 "of %d disks connected\n",
505 MAX_DISKNO, we.max_disk + 1);
509 /* Send the close event to OS/400. We DON'T expect a response */
510 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
511 HvLpEvent_Type_VirtualIo,
512 viomajorsubtype_blockio | vioblockclose,
513 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
514 viopath_sourceinst(viopath_hostLp),
515 viopath_targetinst(viopath_hostLp),
517 ((u64)dev_no << 48) | ((u64)flags << 32),
520 printk(VIOD_KERN_WARNING
521 "bad rc sending event to OS/400 %d\n", (int)hvrc);
524 /* create the request queue for the disk */
525 spin_lock_init(&d->q_lock);
526 q = blk_init_queue(do_viodasd_request, &d->q_lock);
528 printk(VIOD_KERN_WARNING "cannot allocate queue for disk %d\n",
532 g = alloc_disk(1 << PARTITION_SHIFT);
534 printk(VIOD_KERN_WARNING
535 "cannot allocate disk structure for disk %d\n",
537 blk_cleanup_queue(q);
542 blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA);
543 blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA);
544 blk_queue_max_sectors(q, VIODASD_MAXSECTORS);
545 g->major = VIODASD_MAJOR;
546 g->first_minor = dev_no << PARTITION_SHIFT;
548 snprintf(g->disk_name, sizeof(g->disk_name),
549 VIOD_GENHD_NAME "%c%c",
550 'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26));
552 snprintf(g->disk_name, sizeof(g->disk_name),
553 VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26));
554 snprintf(g->devfs_name, sizeof(g->devfs_name),
555 "%s%d", VIOD_GENHD_DEVFS_NAME, dev_no);
556 g->fops = &viodasd_fops;
559 g->driverfs_dev = d->dev;
560 set_capacity(g, d->size >> 9);
562 printk(VIOD_KERN_INFO "disk %d: %lu sectors (%lu MB) "
563 "CHS=%d/%d/%d sector size %d%s\n",
564 dev_no, (unsigned long)(d->size >> 9),
565 (unsigned long)(d->size >> 20),
566 (int)d->cylinders, (int)d->tracks,
567 (int)d->sectors, (int)d->bytes_per_sector,
568 d->read_only ? " (RO)" : "");
570 /* register us in the global list */
574 /* returns the total number of scatterlist elements converted */
575 static int block_event_to_scatterlist(const struct vioblocklpevent *bevent,
576 struct scatterlist *sg, int *total_len)
579 const struct rw_data *rw_data = &bevent->u.rw_data;
580 static const int offset =
581 offsetof(struct vioblocklpevent, u.rw_data.dma_info);
582 static const int element_size = sizeof(rw_data->dma_info[0]);
584 numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size;
585 if (numsg > VIOMAXBLOCKDMA)
586 numsg = VIOMAXBLOCKDMA;
589 memset(sg, 0, sizeof(sg[0]) * VIOMAXBLOCKDMA);
591 for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) {
592 sg_dma_address(&sg[i]) = rw_data->dma_info[i].token;
593 sg_dma_len(&sg[i]) = rw_data->dma_info[i].len;
594 *total_len += rw_data->dma_info[i].len;
600 * Restart all queues, starting with the one _after_ the disk given,
601 * thus reducing the chance of starvation of higher numbered disks.
603 static void viodasd_restart_all_queues_starting_from(int first_index)
607 for (i = first_index + 1; i < MAX_DISKNO; ++i)
608 if (viodasd_devices[i].disk)
609 blk_run_queue(viodasd_devices[i].disk->queue);
610 for (i = 0; i <= first_index; ++i)
611 if (viodasd_devices[i].disk)
612 blk_run_queue(viodasd_devices[i].disk->queue);
616 * For read and write requests, decrement the number of outstanding requests,
617 * Free the DMA buffers we allocated.
619 static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
621 int num_sg, num_sect, pci_direction, total_len;
623 struct scatterlist sg[VIOMAXBLOCKDMA];
624 struct HvLpEvent *event = &bevent->event;
625 unsigned long irq_flags;
626 struct viodasd_device *d;
630 num_sg = block_event_to_scatterlist(bevent, sg, &total_len);
631 num_sect = total_len >> 9;
632 if (event->xSubtype == (viomajorsubtype_blockio | vioblockread))
633 pci_direction = DMA_FROM_DEVICE;
635 pci_direction = DMA_TO_DEVICE;
636 req = (struct request *)bevent->event.xCorrelationToken;
637 d = req->rq_disk->private_data;
639 dma_unmap_sg(d->dev, sg, num_sg, pci_direction);
642 * Since this is running in interrupt mode, we need to make sure
643 * we're not stepping on any global I/O operations
645 spin_lock_irqsave(&viodasd_spinlock, irq_flags);
646 num_req_outstanding--;
647 spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
649 error = event->xRc != HvLpEvent_Rc_Good;
651 const struct vio_error_entry *err;
652 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
653 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
654 event->xRc, bevent->sub_result, err->msg);
655 num_sect = req->hard_nr_sectors;
657 qlock = req->q->queue_lock;
658 spin_lock_irqsave(qlock, irq_flags);
659 viodasd_end_request(req, !error, num_sect);
660 spin_unlock_irqrestore(qlock, irq_flags);
662 /* Finally, try to get more requests off of this device's queue */
663 viodasd_restart_all_queues_starting_from(DEVICE_NO(d));
668 /* This routine handles incoming block LP events */
669 static void handle_block_event(struct HvLpEvent *event)
671 struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
672 struct viodasd_waitevent *pwe;
675 /* Notification that a partition went away! */
677 /* First, we should NEVER get an int here...only acks */
678 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
679 printk(VIOD_KERN_WARNING
680 "Yikes! got an int in viodasd event handler!\n");
681 if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
682 event->xRc = HvLpEvent_Rc_InvalidSubtype;
683 HvCallEvent_ackLpEvent(event);
687 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
690 * Handle a response to an open request. We get all the
691 * disk information in the response, so update it. The
692 * correlation token contains a pointer to a waitevent
693 * structure that has a completion in it. update the
694 * return code in the waitevent structure and post the
695 * completion to wake up the guy who sent the request
697 pwe = (struct viodasd_waitevent *)event->xCorrelationToken;
698 pwe->rc = event->xRc;
699 pwe->sub_result = bevent->sub_result;
700 if (event->xRc == HvLpEvent_Rc_Good) {
701 const struct open_data *data = &bevent->u.open_data;
702 struct viodasd_device *device =
703 &viodasd_devices[bevent->disk];
705 bevent->flags & vioblockflags_ro;
706 device->size = data->disk_size;
707 device->cylinders = data->cylinders;
708 device->tracks = data->tracks;
709 device->sectors = data->sectors;
710 device->bytes_per_sector = data->bytes_per_sector;
711 pwe->max_disk = data->max_disk;
719 viodasd_handle_read_write(bevent);
723 printk(VIOD_KERN_WARNING "invalid subtype!");
724 if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
725 event->xRc = HvLpEvent_Rc_InvalidSubtype;
726 HvCallEvent_ackLpEvent(event);
732 * Get the driver to reprobe for more disks.
734 static ssize_t probe_disks(struct device_driver *drv, const char *buf,
737 struct viodasd_device *d;
739 for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) {
745 static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks);
747 static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
749 struct viodasd_device *d = &viodasd_devices[vdev->unit_address];
758 static int viodasd_remove(struct vio_dev *vdev)
760 struct viodasd_device *d;
762 d = &viodasd_devices[vdev->unit_address];
764 del_gendisk(d->disk);
765 blk_cleanup_queue(d->disk->queue);
774 * viodasd_device_table: Used by vio.c to match devices that we
777 static struct vio_device_id viodasd_device_table[] __devinitdata = {
781 MODULE_DEVICE_TABLE(vio, viodasd_device_table);
783 static struct vio_driver viodasd_driver = {
784 .id_table = viodasd_device_table,
785 .probe = viodasd_probe,
786 .remove = viodasd_remove,
789 .owner = THIS_MODULE,
794 * Initialize the whole device driver. Handle module and non-module
797 static int __init viodasd_init(void)
801 /* Try to open to our host lp */
802 if (viopath_hostLp == HvLpIndexInvalid)
805 if (viopath_hostLp == HvLpIndexInvalid) {
806 printk(VIOD_KERN_WARNING "invalid hosting partition\n");
810 printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n",
813 /* register the block device */
814 if (register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME)) {
815 printk(VIOD_KERN_WARNING
816 "Unable to get major number %d for %s\n",
817 VIODASD_MAJOR, VIOD_GENHD_NAME);
820 /* Actually open the path to the hosting partition */
821 if (viopath_open(viopath_hostLp, viomajorsubtype_blockio,
823 printk(VIOD_KERN_WARNING
824 "error opening path to host partition %d\n",
826 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
830 /* Initialize our request handler */
831 vio_setHandler(viomajorsubtype_blockio, handle_block_event);
833 rc = vio_register_driver(&viodasd_driver);
835 driver_create_file(&viodasd_driver.driver, &driver_attr_probe);
838 module_init(viodasd_init);
840 void viodasd_exit(void)
842 driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
843 vio_unregister_driver(&viodasd_driver);
844 vio_clearHandler(viomajorsubtype_blockio);
845 unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
846 viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
849 module_exit(viodasd_exit);