4 * Copyright (C) 1999-2002 Red Hat Software
6 * Written by Alan Cox, Building Number Three Ltd
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * For the purpose of avoiding doubt the preferred form of the work
19 * for making modifications shall be a standards compliant form such
20 * gzipped tar and not one requiring a proprietary or patent encumbered
25 * Multiple device handling error fixes,
26 * Added a queue depth.
28 * FC920 has an rmw bug. Dont or in the end marker.
29 * Removed queue walk, fixed for 64bitness.
30 * Rewrote much of the code over time
31 * Added indirect block lists
32 * Handle 64K limits on many controllers
33 * Don't use indirects on the Promise (breaks)
34 * Heavily chop down the queue depths
36 * Independent queues per IOP
37 * Support for dynamic device creation/deletion
39 * Support for larger I/Os through merge* functions
40 * (taken from DAC960 driver)
41 * Boji T Kannanthanam:
42 * Set the I2O Block devices to be detected in increasing
43 * order of TIDs during boot.
44 * Search and set the I2O block device that we boot off
45 * from as the first device to be claimed (as /dev/i2o/hda)
46 * Properly attach/detach I2O gendisk structure from the
47 * system gendisk list. The I2O block devices now appear in
49 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
50 * Minor bugfixes for 2.6.
53 #include <linux/module.h>
54 #include <linux/i2o.h>
56 #include <linux/mempool.h>
58 #include <linux/genhd.h>
59 #include <linux/blkdev.h>
60 #include <linux/hdreg.h>
62 #include "i2o_block.h"
64 #define OSM_NAME "block-osm"
65 #define OSM_VERSION "$Rev$"
66 #define OSM_DESCRIPTION "I2O Block Device OSM"
68 static struct i2o_driver i2o_block_driver;
70 /* global Block OSM request mempool */
71 static struct i2o_block_mempool i2o_blk_req_pool;
73 /* Block OSM class handling definition */
74 static struct i2o_class_id i2o_block_class_id[] = {
75 {I2O_CLASS_RANDOM_BLOCK_STORAGE},
80 * i2o_block_device_free - free the memory of the I2O Block device
81 * @dev: I2O Block device, which should be cleaned up
83 * Frees the request queue, gendisk and the i2o_block_device structure.
85 static void i2o_block_device_free(struct i2o_block_device *dev)
87 blk_cleanup_queue(dev->gd->queue);
95 * i2o_block_remove - remove the I2O Block device from the system again
96 * @dev: I2O Block device which should be removed
98 * Remove gendisk from system and free all allocated memory.
102 static int i2o_block_remove(struct device *dev)
104 struct i2o_device *i2o_dev = to_i2o_device(dev);
105 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
107 osm_info("Device removed %s\n", i2o_blk_dev->gd->disk_name);
109 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
111 del_gendisk(i2o_blk_dev->gd);
113 dev_set_drvdata(dev, NULL);
115 i2o_device_claim_release(i2o_dev);
117 i2o_block_device_free(i2o_blk_dev);
123 * i2o_block_device flush - Flush all dirty data of I2O device dev
124 * @dev: I2O device which should be flushed
126 * Flushes all dirty data on device dev.
128 * Returns 0 on success or negative error code on failure.
130 static int i2o_block_device_flush(struct i2o_device *dev)
132 struct i2o_message __iomem *msg;
135 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
136 if (m == I2O_QUEUE_EMPTY)
139 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
140 writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid,
142 writel(60 << 16, &msg->body[0]);
143 osm_debug("Flushing...\n");
145 return i2o_msg_post_wait(dev->iop, m, 60);
149 * i2o_block_device_mount - Mount (load) the media of device dev
150 * @dev: I2O device which should receive the mount request
151 * @media_id: Media Identifier
153 * Load a media into drive. Identifier should be set to -1, because the
154 * spec does not support any other value.
156 * Returns 0 on success or negative error code on failure.
158 static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
160 struct i2o_message __iomem *msg;
163 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
164 if (m == I2O_QUEUE_EMPTY)
167 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
168 writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid,
170 writel(-1, &msg->body[0]);
171 writel(0, &msg->body[1]);
172 osm_debug("Mounting...\n");
174 return i2o_msg_post_wait(dev->iop, m, 2);
178 * i2o_block_device_lock - Locks the media of device dev
179 * @dev: I2O device which should receive the lock request
180 * @media_id: Media Identifier
182 * Lock media of device dev to prevent removal. The media identifier
183 * should be set to -1, because the spec does not support any other value.
185 * Returns 0 on success or negative error code on failure.
187 static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
189 struct i2o_message __iomem *msg;
192 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
193 if (m == I2O_QUEUE_EMPTY)
196 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
197 writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
199 writel(-1, &msg->body[0]);
200 osm_debug("Locking...\n");
202 return i2o_msg_post_wait(dev->iop, m, 2);
206 * i2o_block_device_unlock - Unlocks the media of device dev
207 * @dev: I2O device which should receive the unlocked request
208 * @media_id: Media Identifier
210 * Unlocks the media in device dev. The media identifier should be set to
211 * -1, because the spec does not support any other value.
213 * Returns 0 on success or negative error code on failure.
215 static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
217 struct i2o_message __iomem *msg;
220 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
221 if (m == I2O_QUEUE_EMPTY)
224 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
225 writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
227 writel(media_id, &msg->body[0]);
228 osm_debug("Unlocking...\n");
230 return i2o_msg_post_wait(dev->iop, m, 2);
234 * i2o_block_device_power - Power management for device dev
235 * @dev: I2O device which should receive the power management request
236 * @operation: Operation which should be send
238 * Send a power management request to the device dev.
240 * Returns 0 on success or negative error code on failure.
242 static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
244 struct i2o_device *i2o_dev = dev->i2o_dev;
245 struct i2o_controller *c = i2o_dev->iop;
246 struct i2o_message __iomem *msg;
250 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
251 if (m == I2O_QUEUE_EMPTY)
254 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
255 writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data.
256 tid, &msg->u.head[1]);
257 writel(op << 24, &msg->body[0]);
258 osm_debug("Power...\n");
260 rc = i2o_msg_post_wait(c, m, 60);
268 * i2o_block_request_alloc - Allocate an I2O block request struct
270 * Allocates an I2O block request struct and initialize the list.
272 * Returns a i2o_block_request pointer on success or negative error code
275 static inline struct i2o_block_request *i2o_block_request_alloc(void)
277 struct i2o_block_request *ireq;
279 ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
281 return ERR_PTR(-ENOMEM);
283 INIT_LIST_HEAD(&ireq->queue);
289 * i2o_block_request_free - Frees a I2O block request
290 * @ireq: I2O block request which should be freed
292 * Fres the allocated memory (give it back to the request mempool).
294 static inline void i2o_block_request_free(struct i2o_block_request *ireq)
296 mempool_free(ireq, i2o_blk_req_pool.pool);
300 * i2o_block_sglist_alloc - Allocate the SG list and map it
301 * @ireq: I2O block request
303 * Builds the SG list and map it into to be accessable by the controller.
305 * Returns the number of elements in the SG list or 0 on failure.
307 static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq)
309 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
312 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
314 if (rq_data_dir(ireq->req) == READ)
315 ireq->sg_dma_direction = PCI_DMA_FROMDEVICE;
317 ireq->sg_dma_direction = PCI_DMA_TODEVICE;
319 ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents,
320 ireq->sg_dma_direction);
322 return ireq->sg_nents;
326 * i2o_block_sglist_free - Frees the SG list
327 * @ireq: I2O block request from which the SG should be freed
329 * Frees the SG list from the I2O block request.
331 static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
333 struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
335 dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents,
336 ireq->sg_dma_direction);
340 * i2o_block_prep_req_fn - Allocates I2O block device specific struct
341 * @q: request queue for the request
342 * @req: the request to prepare
344 * Allocate the necessary i2o_block_request struct and connect it to
345 * the request. This is needed that we not loose the SG list later on.
347 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
349 static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
351 struct i2o_block_device *i2o_blk_dev = q->queuedata;
352 struct i2o_block_request *ireq;
354 /* request is already processed by us, so return */
355 if (req->flags & REQ_SPECIAL) {
356 osm_debug("REQ_SPECIAL already set!\n");
357 req->flags |= REQ_DONTPREP;
361 /* connect the i2o_block_request to the request */
363 ireq = i2o_block_request_alloc();
364 if (unlikely(IS_ERR(ireq))) {
365 osm_debug("unable to allocate i2o_block_request!\n");
366 return BLKPREP_DEFER;
369 ireq->i2o_blk_dev = i2o_blk_dev;
375 /* do not come back here */
376 req->flags |= REQ_DONTPREP | REQ_SPECIAL;
382 * i2o_block_delayed_request_fn - delayed request queue function
383 * delayed_request: the delayed request with the queue to start
385 * If the request queue is stopped for a disk, and there is no open
386 * request, a new event is created, which calls this function to start
387 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
390 static void i2o_block_delayed_request_fn(void *delayed_request)
392 struct i2o_block_delayed_request *dreq = delayed_request;
393 struct request_queue *q = dreq->queue;
396 spin_lock_irqsave(q->queue_lock, flags);
398 spin_unlock_irqrestore(q->queue_lock, flags);
403 * i2o_block_reply - Block OSM reply handler.
404 * @c: I2O controller from which the message arrives
405 * @m: message id of reply
406 * qmsg: the actuall I2O message reply
408 * This function gets all the message replies.
411 static int i2o_block_reply(struct i2o_controller *c, u32 m,
412 struct i2o_message *msg)
414 struct i2o_block_request *ireq;
416 struct i2o_block_device *dev;
417 struct request_queue *q;
422 if (unlikely(le32_to_cpu(msg->u.head[0]) & (1 << 13))) {
423 struct i2o_message *pmsg;
427 * FAILed message from controller
428 * We increment the error count and abort it
430 * In theory this will never happen. The I2O block class
431 * specification states that block devices never return
432 * FAILs but instead use the REQ status field...but
433 * better be on the safe side since no one really follows
434 * the spec to the book :)
436 pm = le32_to_cpu(msg->body[3]);
437 pmsg = i2o_msg_in_to_virt(c, pm);
439 req = i2o_cntxt_list_get(c, le32_to_cpu(pmsg->u.s.tcntxt));
440 if (unlikely(!req)) {
441 osm_err("NULL reply received!\n");
446 dev = ireq->i2o_blk_dev;
451 spin_lock_irqsave(q->queue_lock, flags);
453 while (end_that_request_chunk(req, !req->errors,
454 le32_to_cpu(pmsg->body[1]))) ;
455 end_that_request_last(req);
457 dev->open_queue_depth--;
458 list_del(&ireq->queue);
461 spin_unlock_irqrestore(q->queue_lock, flags);
463 /* Now flush the message by making it a NOP */
469 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
470 if (unlikely(!req)) {
471 osm_err("NULL reply received!\n");
476 dev = ireq->i2o_blk_dev;
479 if (unlikely(!dev->i2o_dev)) {
481 * This is HACK, but Intel Integrated RAID allows user
482 * to delete a volume that is claimed, locked, and in use
483 * by the OS. We have to check for a reply from a
484 * non-existent device and flag it as an error or the system
488 osm_warn("Data transfer to deleted device!\n");
489 spin_lock_irqsave(q->queue_lock, flags);
490 while (end_that_request_chunk
491 (req, !req->errors, le32_to_cpu(msg->body[1]))) ;
492 end_that_request_last(req);
494 dev->open_queue_depth--;
495 list_del(&ireq->queue);
498 spin_unlock_irqrestore(q->queue_lock, flags);
503 * Lets see what is cooking. We stuffed the
504 * request in the context.
507 st = le32_to_cpu(msg->body[0]) >> 24;
511 char *bsa_errors[] = {
514 "Failure communicating to device",
516 "Device is not ready",
518 "Media is locked by another user",
520 "Failure communicating to device",
521 "Device bus failure",
522 "Device is locked by another user",
523 "Device is write protected",
525 "Volume has changed, waiting for acknowledgement"
528 err = le32_to_cpu(msg->body[0]) & 0xffff;
531 * Device not ready means two things. One is that the
532 * the thing went offline (but not a removal media)
534 * The second is that you have a SuperTrak 100 and the
535 * firmware got constipated. Unlike standard i2o card
536 * setups the supertrak returns an error rather than
537 * blocking for the timeout in these cases.
539 * Don't stick a supertrak100 into cache aggressive modes
542 osm_err("block-osm: /dev/%s error: %s", dev->gd->disk_name,
543 bsa_errors[le32_to_cpu(msg->body[0]) & 0xffff]);
544 if (le32_to_cpu(msg->body[0]) & 0x00ff0000)
545 printk(KERN_ERR " - DDM attempted %d retries",
546 (le32_to_cpu(msg->body[0]) >> 16) & 0x00ff);
547 printk(KERN_ERR ".\n");
552 if (!end_that_request_chunk
553 (req, !req->errors, le32_to_cpu(msg->body[1]))) {
554 add_disk_randomness(req->rq_disk);
555 spin_lock_irqsave(q->queue_lock, flags);
557 end_that_request_last(req);
559 dev->open_queue_depth--;
560 list_del(&ireq->queue);
563 spin_unlock_irqrestore(q->queue_lock, flags);
565 i2o_block_sglist_free(ireq);
566 i2o_block_request_free(ireq);
568 osm_err("still remaining chunks\n");
573 static void i2o_block_event(struct i2o_event *evt)
575 osm_info("block-osm: event received\n");
580 * SCSI-CAM for ioctl geometry mapping
581 * Duplicated with SCSI - this should be moved into somewhere common
584 * LBA -> CHS mapping table taken from:
586 * "Incorporating the I2O Architecture into BIOS for Intel Architecture
589 * This is an I2O document that is only available to I2O members,
592 * From my understanding, this is how all the I2O cards do this
594 * Disk Size | Sectors | Heads | Cylinders
595 * ---------------+---------+-------+-------------------
596 * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512)
597 * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512)
598 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
599 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512)
602 #define BLOCK_SIZE_528M 1081344
603 #define BLOCK_SIZE_1G 2097152
604 #define BLOCK_SIZE_21G 4403200
605 #define BLOCK_SIZE_42G 8806400
606 #define BLOCK_SIZE_84G 17612800
608 static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
609 unsigned char *hds, unsigned char *secs)
611 unsigned long heads, sectors, cylinders;
613 sectors = 63L; /* Maximize sectors per track */
614 if (capacity <= BLOCK_SIZE_528M)
616 else if (capacity <= BLOCK_SIZE_1G)
618 else if (capacity <= BLOCK_SIZE_21G)
620 else if (capacity <= BLOCK_SIZE_42G)
625 cylinders = (unsigned long)capacity / (heads * sectors);
627 *cyls = (unsigned short)cylinders; /* Stuff return values */
628 *secs = (unsigned char)sectors;
629 *hds = (unsigned char)heads;
633 * i2o_block_open - Open the block device
635 * Power up the device, mount and lock the media. This function is called,
636 * if the block device is opened for access.
638 * Returns 0 on success or negative error code on failure.
640 static int i2o_block_open(struct inode *inode, struct file *file)
642 struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data;
647 if (dev->power > 0x1f)
648 i2o_block_device_power(dev, 0x02);
650 i2o_block_device_mount(dev->i2o_dev, -1);
652 i2o_block_device_lock(dev->i2o_dev, -1);
654 osm_debug("Ready.\n");
660 * i2o_block_release - Release the I2O block device
662 * Unlock and unmount the media, and power down the device. Gets called if
663 * the block device is closed.
665 * Returns 0 on success or negative error code on failure.
667 static int i2o_block_release(struct inode *inode, struct file *file)
669 struct gendisk *disk = inode->i_bdev->bd_disk;
670 struct i2o_block_device *dev = disk->private_data;
674 * This is to deail with the case of an application
675 * opening a device and then the device dissapears while
676 * it's in use, and then the application tries to release
677 * it. ex: Unmounting a deleted RAID volume at reboot.
678 * If we send messages, it will just cause FAILs since
679 * the TID no longer exists.
684 i2o_block_device_flush(dev->i2o_dev);
686 i2o_block_device_unlock(dev->i2o_dev, -1);
688 if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */
693 i2o_block_device_power(dev, operation);
699 * i2o_block_ioctl - Issue device specific ioctl calls.
700 * @cmd: ioctl command
703 * Handles ioctl request for the block device.
705 * Return 0 on success or negative error on failure.
707 static int i2o_block_ioctl(struct inode *inode, struct file *file,
708 unsigned int cmd, unsigned long arg)
710 struct gendisk *disk = inode->i_bdev->bd_disk;
711 struct i2o_block_device *dev = disk->private_data;
712 void __user *argp = (void __user *)arg;
714 /* Anyone capable of this syscall can do *real bad* things */
716 if (!capable(CAP_SYS_ADMIN))
722 struct hd_geometry g;
723 i2o_block_biosparam(get_capacity(disk),
724 &g.cylinders, &g.heads, &g.sectors);
725 g.start = get_start_sect(inode->i_bdev);
726 return copy_to_user(argp, &g, sizeof(g)) ? -EFAULT : 0;
730 return put_user(dev->rcache, (int __user *)arg);
732 return put_user(dev->wcache, (int __user *)arg);
734 if (arg < 0 || arg > CACHE_SMARTFETCH)
740 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
749 * i2o_block_media_changed - Have we seen a media change?
750 * @disk: gendisk which should be verified
752 * Verifies if the media has changed.
754 * Returns 1 if the media was changed or 0 otherwise.
756 static int i2o_block_media_changed(struct gendisk *disk)
758 struct i2o_block_device *p = disk->private_data;
760 if (p->media_change_flag) {
761 p->media_change_flag = 0;
768 * i2o_block_transfer - Transfer a request to/from the I2O controller
769 * @req: the request which should be transfered
771 * This function converts the request into a I2O message. The necessary
772 * DMA buffers are allocated and after everything is setup post the message
773 * to the I2O controller. No cleanup is done by this function. It is done
774 * on the interrupt side when the reply arrives.
776 * Return 0 on success or negative error code on failure.
778 static int i2o_block_transfer(struct request *req)
780 struct i2o_block_device *dev = req->rq_disk->private_data;
781 struct i2o_controller *c = dev->i2o_dev->iop;
782 int tid = dev->i2o_dev->lct_data.tid;
783 struct i2o_message __iomem *msg;
785 struct i2o_block_request *ireq = req->special;
786 struct scatterlist *sg;
794 m = i2o_msg_get(c, &msg);
795 if (m == I2O_QUEUE_EMPTY) {
800 tcntxt = i2o_cntxt_list_add(c, req);
806 if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) {
811 /* Build the message based on the request. */
812 writel(i2o_block_driver.context, &msg->u.s.icntxt);
813 writel(tcntxt, &msg->u.s.tcntxt);
814 writel(req->nr_sectors << 9, &msg->body[1]);
816 writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]);
817 writel(req->sector >> 23, &msg->body[3]);
819 mptr = &msg->body[4];
823 if (rq_data_dir(req) == READ) {
824 writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid,
826 sg_flags = 0x10000000;
827 switch (dev->rcache) {
829 writel(0, &msg->body[0]);
832 writel(0x201F0008, &msg->body[0]);
834 case CACHE_SMARTFETCH:
835 if (req->nr_sectors > 16)
836 writel(0x201F0008, &msg->body[0]);
838 writel(0x001F0000, &msg->body[0]);
842 writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid,
844 sg_flags = 0x14000000;
845 switch (dev->wcache) {
847 writel(0, &msg->body[0]);
849 case CACHE_WRITETHROUGH:
850 writel(0x001F0008, &msg->body[0]);
852 case CACHE_WRITEBACK:
853 writel(0x001F0010, &msg->body[0]);
855 case CACHE_SMARTBACK:
856 if (req->nr_sectors > 16)
857 writel(0x001F0004, &msg->body[0]);
859 writel(0x001F0010, &msg->body[0]);
861 case CACHE_SMARTTHROUGH:
862 if (req->nr_sectors > 16)
863 writel(0x001F0004, &msg->body[0]);
865 writel(0x001F0010, &msg->body[0]);
869 for (i = sgnum; i > 0; i--) {
871 sg_flags |= 0x80000000;
872 writel(sg_flags | sg_dma_len(sg), mptr);
873 writel(sg_dma_address(sg), mptr + 4);
878 writel(I2O_MESSAGE_SIZE
879 (((unsigned long)mptr -
880 (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8,
883 list_add_tail(&ireq->queue, &dev->open_queue);
884 dev->open_queue_depth++;
891 i2o_cntxt_list_remove(c, req);
901 * i2o_block_request_fn - request queue handling function
902 * q: request queue from which the request could be fetched
904 * Takes the next request from the queue, transfers it and if no error
905 * occurs dequeue it from the queue. On arrival of the reply the message
906 * will be processed further. If an error occurs requeue the request.
908 static void i2o_block_request_fn(struct request_queue *q)
912 while (!blk_queue_plugged(q)) {
913 req = elv_next_request(q);
917 if (blk_fs_request(req)) {
918 struct i2o_block_delayed_request *dreq;
919 struct i2o_block_request *ireq = req->special;
920 unsigned int queue_depth;
922 queue_depth = ireq->i2o_blk_dev->open_queue_depth;
924 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS)
925 if (!i2o_block_transfer(req)) {
926 blkdev_dequeue_request(req);
933 /* stop the queue and retry later */
934 dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
939 INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
942 osm_info("transfer error\n");
943 if (!queue_delayed_work(i2o_block_driver.event_queue,
945 I2O_BLOCK_RETRY_TIME))
956 /* I2O Block device operations definition */
957 static struct block_device_operations i2o_block_fops = {
958 .owner = THIS_MODULE,
959 .open = i2o_block_open,
960 .release = i2o_block_release,
961 .ioctl = i2o_block_ioctl,
962 .media_changed = i2o_block_media_changed
966 * i2o_block_device_alloc - Allocate memory for a I2O Block device
968 * Allocate memory for the i2o_block_device struct, gendisk and request
969 * queue and initialize them as far as no additional information is needed.
971 * Returns a pointer to the allocated I2O Block device on succes or a
972 * negative error code on failure.
974 static struct i2o_block_device *i2o_block_device_alloc(void)
976 struct i2o_block_device *dev;
978 struct request_queue *queue;
981 dev = kmalloc(sizeof(*dev), GFP_KERNEL);
983 osm_err("Insufficient memory to allocate I2O Block disk.\n");
987 memset(dev, 0, sizeof(*dev));
989 INIT_LIST_HEAD(&dev->open_queue);
990 spin_lock_init(&dev->lock);
991 dev->rcache = CACHE_PREFETCH;
992 dev->wcache = CACHE_WRITEBACK;
994 /* allocate a gendisk with 16 partitions */
997 osm_err("Insufficient memory to allocate gendisk.\n");
1002 /* initialize the request queue */
1003 queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
1005 osm_err("Insufficient memory to allocate request queue.\n");
1010 blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
1012 gd->major = I2O_MAJOR;
1014 gd->fops = &i2o_block_fops;
1015 gd->private_data = dev;
1032 * i2o_block_probe - verify if dev is a I2O Block device and install it
1033 * @dev: device to verify if it is a I2O Block device
1035 * We only verify if the user_tid of the device is 0xfff and then install
1036 * the device. Otherwise it is used by some other device (e. g. RAID).
1038 * Returns 0 on success or negative error code on failure.
1040 static int i2o_block_probe(struct device *dev)
1042 struct i2o_device *i2o_dev = to_i2o_device(dev);
1043 struct i2o_block_device *i2o_blk_dev;
1044 struct i2o_controller *c = i2o_dev->iop;
1046 struct request_queue *queue;
1047 static int unit = 0;
1055 /* skip devices which are used by IOP */
1056 if (i2o_dev->lct_data.user_tid != 0xfff) {
1057 osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
1061 osm_info("New device detected (TID: %03x)\n", i2o_dev->lct_data.tid);
1063 if (i2o_device_claim(i2o_dev)) {
1064 osm_warn("Unable to claim device. Installation aborted\n");
1069 i2o_blk_dev = i2o_block_device_alloc();
1070 if (IS_ERR(i2o_blk_dev)) {
1071 osm_err("could not alloc a new I2O block device");
1072 rc = PTR_ERR(i2o_blk_dev);
1076 i2o_blk_dev->i2o_dev = i2o_dev;
1077 dev_set_drvdata(dev, i2o_blk_dev);
1080 gd = i2o_blk_dev->gd;
1081 gd->first_minor = unit << 4;
1082 sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
1083 sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit);
1084 gd->driverfs_dev = &i2o_dev->device;
1086 /* setup request queue */
1088 queue->queuedata = i2o_blk_dev;
1090 blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS);
1091 blk_queue_max_sectors(queue, I2O_MAX_SECTORS);
1096 i2o_status_block *sb;
1098 sb = c->status_block.virt;
1100 segments = (sb->inbound_frame_size -
1101 sizeof(struct i2o_message) / 4 - 4) / 2;
1104 blk_queue_max_hw_segments(queue, segments);
1106 osm_debug("max sectors = %d\n", I2O_MAX_SECTORS);
1107 osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS);
1108 osm_debug("hw segments = %d\n", segments);
1111 * Ask for the current media data. If that isn't supported
1112 * then we ask for the device capacity data
1114 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0
1115 || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) {
1116 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4);
1117 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8);
1119 osm_debug("blocksize = %d\n", blocksize);
1121 if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
1123 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
1124 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
1126 set_capacity(gd, size >> 9);
1128 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
1137 i2o_device_claim_release(i2o_dev);
1143 /* Block OSM driver struct */
1144 static struct i2o_driver i2o_block_driver = {
1146 .event = i2o_block_event,
1147 .reply = i2o_block_reply,
1148 .classes = i2o_block_class_id,
1150 .probe = i2o_block_probe,
1151 .remove = i2o_block_remove,
1156 * i2o_block_init - Block OSM initialization function
1158 * Allocate the slab and mempool for request structs, registers i2o_block
1159 * block device and finally register the Block OSM in the I2O core.
1161 * Returns 0 on success or negative error code on failure.
1163 static int __init i2o_block_init(void)
1168 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
1170 /* Allocate request mempool and slab */
1171 size = sizeof(struct i2o_block_request);
1172 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
1173 SLAB_HWCACHE_ALIGN, NULL,
1175 if (!i2o_blk_req_pool.slab) {
1176 osm_err("can't init request slab\n");
1181 i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE,
1184 i2o_blk_req_pool.slab);
1185 if (!i2o_blk_req_pool.pool) {
1186 osm_err("can't init request mempool\n");
1191 /* Register the block device interfaces */
1192 rc = register_blkdev(I2O_MAJOR, "i2o_block");
1194 osm_err("unable to register block device\n");
1198 osm_info("registered device at major %d\n", I2O_MAJOR);
1201 /* Register Block OSM into I2O core */
1202 rc = i2o_driver_register(&i2o_block_driver);
1204 osm_err("Could not register Block driver\n");
1205 goto unregister_blkdev;
1211 unregister_blkdev(I2O_MAJOR, "i2o_block");
1214 mempool_destroy(i2o_blk_req_pool.pool);
1217 kmem_cache_destroy(i2o_blk_req_pool.slab);
1224 * i2o_block_exit - Block OSM exit function
1226 * Unregisters Block OSM from I2O core, unregisters i2o_block block device
1227 * and frees the mempool and slab.
1229 static void __exit i2o_block_exit(void)
1231 /* Unregister I2O Block OSM from I2O core */
1232 i2o_driver_unregister(&i2o_block_driver);
1234 /* Unregister block device */
1235 unregister_blkdev(I2O_MAJOR, "i2o_block");
1237 /* Free request mempool and slab */
1238 mempool_destroy(i2o_blk_req_pool.pool);
1239 kmem_cache_destroy(i2o_blk_req_pool.slab);
1242 MODULE_AUTHOR("Red Hat");
1243 MODULE_LICENSE("GPL");
1244 MODULE_DESCRIPTION(OSM_DESCRIPTION);
1245 MODULE_VERSION(OSM_VERSION);
1247 module_init(i2o_block_init);
1248 module_exit(i2o_block_exit);