2 * bsg.c - block layer implementation of the sg v4 interface
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
15 #include <linux/blkdev.h>
16 #include <linux/poll.h>
17 #include <linux/cdev.h>
18 #include <linux/percpu.h>
19 #include <linux/uio.h>
20 #include <linux/idr.h>
21 #include <linux/bsg.h>
22 #include <linux/smp_lock.h>
24 #include <scsi/scsi.h>
25 #include <scsi/scsi_ioctl.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_driver.h>
31 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
32 #define BSG_VERSION "0.4"
35 struct request_queue *queue;
37 struct list_head busy_list;
38 struct list_head done_list;
39 struct hlist_node dev_list;
43 wait_queue_head_t wq_done;
44 wait_queue_head_t wq_free;
54 #define BSG_DEFAULT_CMDS 64
55 #define BSG_MAX_DEVS 32768
60 #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
62 #define dprintk(fmt, args...)
65 static DEFINE_MUTEX(bsg_mutex);
66 static DEFINE_IDR(bsg_minor_idr);
68 #define BSG_LIST_ARRAY_SIZE 8
69 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
71 static struct class *bsg_class;
74 static struct kmem_cache *bsg_cmd_cachep;
77 * our internal command type
80 struct bsg_device *bd;
81 struct list_head list;
87 char sense[SCSI_SENSE_BUFFERSIZE];
90 static void bsg_free_command(struct bsg_command *bc)
92 struct bsg_device *bd = bc->bd;
95 kmem_cache_free(bsg_cmd_cachep, bc);
97 spin_lock_irqsave(&bd->lock, flags);
99 spin_unlock_irqrestore(&bd->lock, flags);
101 wake_up(&bd->wq_free);
104 static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
106 struct bsg_command *bc = ERR_PTR(-EINVAL);
108 spin_lock_irq(&bd->lock);
110 if (bd->queued_cmds >= bd->max_queue)
114 spin_unlock_irq(&bd->lock);
116 bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
118 spin_lock_irq(&bd->lock);
120 bc = ERR_PTR(-ENOMEM);
125 INIT_LIST_HEAD(&bc->list);
126 dprintk("%s: returning free cmd %p\n", bd->name, bc);
129 spin_unlock_irq(&bd->lock);
133 static inline struct hlist_head *bsg_dev_idx_hash(int index)
135 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
138 static int bsg_io_schedule(struct bsg_device *bd)
143 spin_lock_irq(&bd->lock);
145 BUG_ON(bd->done_cmds > bd->queued_cmds);
148 * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
149 * work to do", even though we return -ENOSPC after this same test
150 * during bsg_write() -- there, it means our buffer can't have more
151 * bsg_commands added to it, thus has no space left.
153 if (bd->done_cmds == bd->queued_cmds) {
158 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
163 prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
164 spin_unlock_irq(&bd->lock);
166 finish_wait(&bd->wq_done, &wait);
170 spin_unlock_irq(&bd->lock);
174 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
175 struct sg_io_v4 *hdr, struct bsg_device *bd,
176 fmode_t has_write_perm)
178 if (hdr->request_len > BLK_MAX_CDB) {
179 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
184 if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
188 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
189 if (blk_verify_command(&q->cmd_filter, rq->cmd, has_write_perm))
191 } else if (!capable(CAP_SYS_RAWIO))
195 * fill in request structure
197 rq->cmd_len = hdr->request_len;
198 rq->cmd_type = REQ_TYPE_BLOCK_PC;
200 rq->timeout = (hdr->timeout * HZ) / 1000;
202 rq->timeout = q->sg_timeout;
204 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
205 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
206 rq->timeout = BLK_MIN_SG_TIMEOUT;
212 * Check if sg_io_v4 from user is allowed and valid
215 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
219 if (hdr->guard != 'Q')
221 if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
222 hdr->din_xfer_len > (q->max_sectors << 9))
225 switch (hdr->protocol) {
226 case BSG_PROTOCOL_SCSI:
227 switch (hdr->subprotocol) {
228 case BSG_SUB_PROTOCOL_SCSI_CMD:
229 case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
239 *rw = hdr->dout_xfer_len ? WRITE : READ;
244 * map sg_io_v4 to a request.
246 static struct request *
247 bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
250 struct request_queue *q = bd->queue;
251 struct request *rq, *next_rq = NULL;
253 unsigned int dxfer_len;
256 dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
257 hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
260 ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
265 * map scatter-gather elements seperately and string them to request
267 rq = blk_get_request(q, rw, GFP_KERNEL);
269 return ERR_PTR(-ENOMEM);
270 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
274 if (rw == WRITE && hdr->din_xfer_len) {
275 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
280 next_rq = blk_get_request(q, READ, GFP_KERNEL);
285 rq->next_rq = next_rq;
286 next_rq->cmd_type = rq->cmd_type;
288 dxferp = (void*)(unsigned long)hdr->din_xferp;
289 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
290 hdr->din_xfer_len, GFP_KERNEL);
295 if (hdr->dout_xfer_len) {
296 dxfer_len = hdr->dout_xfer_len;
297 dxferp = (void*)(unsigned long)hdr->dout_xferp;
298 } else if (hdr->din_xfer_len) {
299 dxfer_len = hdr->din_xfer_len;
300 dxferp = (void*)(unsigned long)hdr->din_xferp;
305 ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
316 if (rq->cmd != rq->__cmd)
320 blk_rq_unmap_user(next_rq->bio);
321 blk_put_request(next_rq);
327 * async completion call-back from the block layer, when scsi/ide/whatever
328 * calls end_that_request_last() on a request
330 static void bsg_rq_end_io(struct request *rq, int uptodate)
332 struct bsg_command *bc = rq->end_io_data;
333 struct bsg_device *bd = bc->bd;
336 dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
337 bd->name, rq, bc, bc->bio, uptodate);
339 bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
341 spin_lock_irqsave(&bd->lock, flags);
342 list_move_tail(&bc->list, &bd->done_list);
344 spin_unlock_irqrestore(&bd->lock, flags);
346 wake_up(&bd->wq_done);
350 * do final setup of a 'bc' and submit the matching 'rq' to the block
353 static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
354 struct bsg_command *bc, struct request *rq)
357 * add bc command to busy queue and submit rq for io
362 bc->bidi_bio = rq->next_rq->bio;
363 bc->hdr.duration = jiffies;
364 spin_lock_irq(&bd->lock);
365 list_add_tail(&bc->list, &bd->busy_list);
366 spin_unlock_irq(&bd->lock);
368 dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
370 rq->end_io_data = bc;
371 blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
374 static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
376 struct bsg_command *bc = NULL;
378 spin_lock_irq(&bd->lock);
380 bc = list_first_entry(&bd->done_list, struct bsg_command, list);
384 spin_unlock_irq(&bd->lock);
390 * Get a finished command from the done list
392 static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
394 struct bsg_command *bc;
398 bc = bsg_next_done_cmd(bd);
402 if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
403 bc = ERR_PTR(-EAGAIN);
407 ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
409 bc = ERR_PTR(-ERESTARTSYS);
414 dprintk("%s: returning done %p\n", bd->name, bc);
419 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
420 struct bio *bio, struct bio *bidi_bio)
424 dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
426 * fill in all the output members
428 hdr->device_status = status_byte(rq->errors);
429 hdr->transport_status = host_byte(rq->errors);
430 hdr->driver_status = driver_byte(rq->errors);
432 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
433 hdr->info |= SG_INFO_CHECK;
434 hdr->response_len = 0;
436 if (rq->sense_len && hdr->response) {
437 int len = min_t(unsigned int, hdr->max_response_len,
440 ret = copy_to_user((void*)(unsigned long)hdr->response,
443 hdr->response_len = len;
449 hdr->dout_resid = rq->data_len;
450 hdr->din_resid = rq->next_rq->data_len;
451 blk_rq_unmap_user(bidi_bio);
452 blk_put_request(rq->next_rq);
453 } else if (rq_data_dir(rq) == READ)
454 hdr->din_resid = rq->data_len;
456 hdr->dout_resid = rq->data_len;
459 * If the request generated a negative error number, return it
460 * (providing we aren't already returning an error); if it's
461 * just a protocol response (i.e. non negative), that gets
464 if (!ret && rq->errors < 0)
467 blk_rq_unmap_user(bio);
468 if (rq->cmd != rq->__cmd)
475 static int bsg_complete_all_commands(struct bsg_device *bd)
477 struct bsg_command *bc;
480 dprintk("%s: entered\n", bd->name);
483 * wait for all commands to complete
487 ret = bsg_io_schedule(bd);
489 * look for -ENODATA specifically -- we'll sometimes get
490 * -ERESTARTSYS when we've taken a signal, but we can't
491 * return until we're done freeing the queue, so ignore
492 * it. The signal will get handled when we're done freeing
495 } while (ret != -ENODATA);
498 * discard done commands
502 spin_lock_irq(&bd->lock);
503 if (!bd->queued_cmds) {
504 spin_unlock_irq(&bd->lock);
507 spin_unlock_irq(&bd->lock);
509 bc = bsg_get_done_cmd(bd);
513 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
518 bsg_free_command(bc);
525 __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
526 const struct iovec *iov, ssize_t *bytes_read)
528 struct bsg_command *bc;
529 int nr_commands, ret;
531 if (count % sizeof(struct sg_io_v4))
535 nr_commands = count / sizeof(struct sg_io_v4);
536 while (nr_commands) {
537 bc = bsg_get_done_cmd(bd);
544 * this is the only case where we need to copy data back
545 * after completing the request. so do that here,
546 * bsg_complete_work() cannot do that for us
548 ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
551 if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
554 bsg_free_command(bc);
559 buf += sizeof(struct sg_io_v4);
560 *bytes_read += sizeof(struct sg_io_v4);
567 static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
569 if (file->f_flags & O_NONBLOCK)
570 clear_bit(BSG_F_BLOCK, &bd->flags);
572 set_bit(BSG_F_BLOCK, &bd->flags);
576 * Check if the error is a "real" error that we should return.
578 static inline int err_block_err(int ret)
580 if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
587 bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
589 struct bsg_device *bd = file->private_data;
593 dprintk("%s: read %Zd bytes\n", bd->name, count);
595 bsg_set_block(bd, file);
598 ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
601 if (!bytes_read || (bytes_read && err_block_err(ret)))
607 static int __bsg_write(struct bsg_device *bd, const char __user *buf,
608 size_t count, ssize_t *bytes_written,
609 fmode_t has_write_perm)
611 struct bsg_command *bc;
613 int ret, nr_commands;
615 if (count % sizeof(struct sg_io_v4))
618 nr_commands = count / sizeof(struct sg_io_v4);
622 while (nr_commands) {
623 struct request_queue *q = bd->queue;
625 bc = bsg_alloc_command(bd);
632 if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
638 * get a request, fill in the blanks, and add to request queue
640 rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
647 bsg_add_command(bd, q, bc, rq);
651 buf += sizeof(struct sg_io_v4);
652 *bytes_written += sizeof(struct sg_io_v4);
656 bsg_free_command(bc);
662 bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
664 struct bsg_device *bd = file->private_data;
665 ssize_t bytes_written;
668 dprintk("%s: write %Zd bytes\n", bd->name, count);
670 bsg_set_block(bd, file);
673 ret = __bsg_write(bd, buf, count, &bytes_written,
674 file->f_mode & FMODE_WRITE);
676 *ppos = bytes_written;
679 * return bytes written on non-fatal errors
681 if (!bytes_written || (bytes_written && err_block_err(ret)))
684 dprintk("%s: returning %Zd\n", bd->name, bytes_written);
685 return bytes_written;
688 static struct bsg_device *bsg_alloc_device(void)
690 struct bsg_device *bd;
692 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
696 spin_lock_init(&bd->lock);
698 bd->max_queue = BSG_DEFAULT_CMDS;
700 INIT_LIST_HEAD(&bd->busy_list);
701 INIT_LIST_HEAD(&bd->done_list);
702 INIT_HLIST_NODE(&bd->dev_list);
704 init_waitqueue_head(&bd->wq_free);
705 init_waitqueue_head(&bd->wq_done);
709 static void bsg_kref_release_function(struct kref *kref)
711 struct bsg_class_device *bcd =
712 container_of(kref, struct bsg_class_device, ref);
713 struct device *parent = bcd->parent;
716 bcd->release(bcd->parent);
721 static int bsg_put_device(struct bsg_device *bd)
723 int ret = 0, do_free;
724 struct request_queue *q = bd->queue;
726 mutex_lock(&bsg_mutex);
728 do_free = atomic_dec_and_test(&bd->ref_count);
730 mutex_unlock(&bsg_mutex);
734 hlist_del(&bd->dev_list);
735 mutex_unlock(&bsg_mutex);
737 dprintk("%s: tearing down\n", bd->name);
740 * close can always block
742 set_bit(BSG_F_BLOCK, &bd->flags);
745 * correct error detection baddies here again. it's the responsibility
746 * of the app to properly reap commands before close() if it wants
747 * fool-proof error detection
749 ret = bsg_complete_all_commands(bd);
753 kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
759 static struct bsg_device *bsg_add_device(struct inode *inode,
760 struct request_queue *rq,
763 struct bsg_device *bd;
766 unsigned char buf[32];
768 ret = blk_get_queue(rq);
770 return ERR_PTR(-ENXIO);
772 bd = bsg_alloc_device();
775 return ERR_PTR(-ENOMEM);
780 bsg_set_block(bd, file);
782 atomic_set(&bd->ref_count, 1);
783 mutex_lock(&bsg_mutex);
784 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
786 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
787 dprintk("bound to <%s>, max queue %d\n",
788 format_dev_t(buf, inode->i_rdev), bd->max_queue);
790 mutex_unlock(&bsg_mutex);
794 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
796 struct bsg_device *bd;
797 struct hlist_node *entry;
799 mutex_lock(&bsg_mutex);
801 hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
802 if (bd->queue == q) {
803 atomic_inc(&bd->ref_count);
809 mutex_unlock(&bsg_mutex);
813 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
815 struct bsg_device *bd;
816 struct bsg_class_device *bcd;
819 * find the class device
821 mutex_lock(&bsg_mutex);
822 bcd = idr_find(&bsg_minor_idr, iminor(inode));
825 mutex_unlock(&bsg_mutex);
828 return ERR_PTR(-ENODEV);
830 bd = __bsg_get_device(iminor(inode), bcd->queue);
834 bd = bsg_add_device(inode, bcd->queue, file);
836 kref_put(&bcd->ref, bsg_kref_release_function);
841 static int bsg_open(struct inode *inode, struct file *file)
843 struct bsg_device *bd;
846 bd = bsg_get_device(inode, file);
852 file->private_data = bd;
856 static int bsg_release(struct inode *inode, struct file *file)
858 struct bsg_device *bd = file->private_data;
860 file->private_data = NULL;
861 return bsg_put_device(bd);
864 static unsigned int bsg_poll(struct file *file, poll_table *wait)
866 struct bsg_device *bd = file->private_data;
867 unsigned int mask = 0;
869 poll_wait(file, &bd->wq_done, wait);
870 poll_wait(file, &bd->wq_free, wait);
872 spin_lock_irq(&bd->lock);
873 if (!list_empty(&bd->done_list))
874 mask |= POLLIN | POLLRDNORM;
875 if (bd->queued_cmds >= bd->max_queue)
877 spin_unlock_irq(&bd->lock);
882 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
884 struct bsg_device *bd = file->private_data;
885 int __user *uarg = (int __user *) arg;
892 case SG_GET_COMMAND_Q:
893 return put_user(bd->max_queue, uarg);
894 case SG_SET_COMMAND_Q: {
897 if (get_user(queue, uarg))
902 spin_lock_irq(&bd->lock);
903 bd->max_queue = queue;
904 spin_unlock_irq(&bd->lock);
911 case SG_GET_VERSION_NUM:
912 case SCSI_IOCTL_GET_IDLUN:
913 case SCSI_IOCTL_GET_BUS_NUMBER:
916 case SG_GET_RESERVED_SIZE:
917 case SG_SET_RESERVED_SIZE:
918 case SG_EMULATED_HOST:
919 case SCSI_IOCTL_SEND_COMMAND: {
920 void __user *uarg = (void __user *) arg;
921 return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
925 struct bio *bio, *bidi_bio = NULL;
927 u8 sense[SCSI_SENSE_BUFFERSIZE];
929 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
932 rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
938 bidi_bio = rq->next_rq->bio;
939 blk_execute_rq(bd->queue, NULL, rq, 0);
940 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
942 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
948 * block device ioctls
952 return ioctl_by_bdev(bd->bdev, cmd, arg);
959 static const struct file_operations bsg_fops = {
964 .release = bsg_release,
965 .unlocked_ioctl = bsg_ioctl,
966 .owner = THIS_MODULE,
969 void bsg_unregister_queue(struct request_queue *q)
971 struct bsg_class_device *bcd = &q->bsg_dev;
976 mutex_lock(&bsg_mutex);
977 idr_remove(&bsg_minor_idr, bcd->minor);
978 sysfs_remove_link(&q->kobj, "bsg");
979 device_unregister(bcd->class_dev);
980 bcd->class_dev = NULL;
981 kref_put(&bcd->ref, bsg_kref_release_function);
982 mutex_unlock(&bsg_mutex);
984 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
986 int bsg_register_queue(struct request_queue *q, struct device *parent,
987 const char *name, void (*release)(struct device *))
989 struct bsg_class_device *bcd;
992 struct device *class_dev = NULL;
998 devname = dev_name(parent);
1001 * we need a proper transport to send commands, not a stacked device
1007 memset(bcd, 0, sizeof(*bcd));
1009 mutex_lock(&bsg_mutex);
1011 ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
1017 ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
1021 if (minor >= BSG_MAX_DEVS) {
1022 printk(KERN_ERR "bsg: too many bsg devices\n");
1029 bcd->parent = get_device(parent);
1030 bcd->release = release;
1031 kref_init(&bcd->ref);
1032 dev = MKDEV(bsg_major, bcd->minor);
1033 class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
1034 if (IS_ERR(class_dev)) {
1035 ret = PTR_ERR(class_dev);
1038 bcd->class_dev = class_dev;
1041 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
1043 goto unregister_class_dev;
1046 mutex_unlock(&bsg_mutex);
1049 unregister_class_dev:
1050 device_unregister(class_dev);
1054 idr_remove(&bsg_minor_idr, minor);
1056 mutex_unlock(&bsg_mutex);
1059 EXPORT_SYMBOL_GPL(bsg_register_queue);
1061 static struct cdev bsg_cdev;
1063 static int __init bsg_init(void)
1068 bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
1069 sizeof(struct bsg_command), 0, 0, NULL);
1070 if (!bsg_cmd_cachep) {
1071 printk(KERN_ERR "bsg: failed creating slab cache\n");
1075 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
1076 INIT_HLIST_HEAD(&bsg_device_list[i]);
1078 bsg_class = class_create(THIS_MODULE, "bsg");
1079 if (IS_ERR(bsg_class)) {
1080 ret = PTR_ERR(bsg_class);
1081 goto destroy_kmemcache;
1084 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
1086 goto destroy_bsg_class;
1088 bsg_major = MAJOR(devid);
1090 cdev_init(&bsg_cdev, &bsg_fops);
1091 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1093 goto unregister_chrdev;
1095 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
1096 " loaded (major %d)\n", bsg_major);
1099 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
1101 class_destroy(bsg_class);
1103 kmem_cache_destroy(bsg_cmd_cachep);
1107 MODULE_AUTHOR("Jens Axboe");
1108 MODULE_DESCRIPTION(BSG_DESCRIPTION);
1109 MODULE_LICENSE("GPL");
1111 device_initcall(bsg_init);