2 * dcssblk.c -- the S/390 block driver for dcss memory
4 * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer
7 #include <linux/module.h>
8 #include <linux/moduleparam.h>
9 #include <linux/ctype.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/slab.h>
13 #include <linux/blkdev.h>
14 #include <asm/extmem.h>
16 #include <linux/completion.h>
17 #include <linux/interrupt.h>
18 #include <asm/s390_rdev.h>
20 //#define DCSSBLK_DEBUG /* Debug messages on/off */
21 #define DCSSBLK_NAME "dcssblk"
22 #define DCSSBLK_MINORS_PER_DISK 1
23 #define DCSSBLK_PARM_LEN 400
26 #define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x)
28 #define PRINT_DEBUG(x...) do {} while (0)
30 #define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x)
31 #define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x)
32 #define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x)
34 static int dcssblk_open(struct block_device *bdev, fmode_t mode);
35 static int dcssblk_release(struct gendisk *disk, fmode_t mode);
36 static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
37 static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
38 void **kaddr, unsigned long *pfn);
40 static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
42 static int dcssblk_major;
43 static struct block_device_operations dcssblk_devops = {
46 .release = dcssblk_release,
47 .direct_access = dcssblk_direct_access,
50 struct dcssblk_dev_info {
53 char segment_name[BUS_ID_SIZE];
59 unsigned char save_pending;
60 unsigned char is_shared;
61 struct request_queue *dcssblk_queue;
63 struct list_head seg_list;
68 char segment_name[BUS_ID_SIZE];
74 static ssize_t dcssblk_add_store(struct device * dev, struct device_attribute *attr, const char * buf,
76 static ssize_t dcssblk_remove_store(struct device * dev, struct device_attribute *attr, const char * buf,
78 static ssize_t dcssblk_save_store(struct device * dev, struct device_attribute *attr, const char * buf,
80 static ssize_t dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf);
81 static ssize_t dcssblk_shared_store(struct device * dev, struct device_attribute *attr, const char * buf,
83 static ssize_t dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf);
84 static ssize_t dcssblk_seglist_show(struct device *dev,
85 struct device_attribute *attr,
88 static DEVICE_ATTR(add, S_IWUSR, NULL, dcssblk_add_store);
89 static DEVICE_ATTR(remove, S_IWUSR, NULL, dcssblk_remove_store);
90 static DEVICE_ATTR(save, S_IWUSR | S_IRUSR, dcssblk_save_show,
92 static DEVICE_ATTR(shared, S_IWUSR | S_IRUSR, dcssblk_shared_show,
93 dcssblk_shared_store);
94 static DEVICE_ATTR(seglist, S_IRUSR, dcssblk_seglist_show, NULL);
96 static struct device *dcssblk_root_dev;
98 static LIST_HEAD(dcssblk_devices);
99 static struct rw_semaphore dcssblk_devices_sem;
102 * release function for segment device.
105 dcssblk_release_segment(struct device *dev)
107 struct dcssblk_dev_info *dev_info;
108 struct segment_info *entry, *temp;
110 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
111 list_for_each_entry_safe(entry, temp, &dev_info->seg_list, lh) {
112 list_del(&entry->lh);
116 module_put(THIS_MODULE);
120 * get a minor number. needs to be called with
121 * down_write(&dcssblk_devices_sem) and the
122 * device needs to be enqueued before the semaphore is
126 dcssblk_assign_free_minor(struct dcssblk_dev_info *dev_info)
129 struct dcssblk_dev_info *entry;
131 if (dev_info == NULL)
133 for (minor = 0; minor < (1<<MINORBITS); minor++) {
135 // test if minor available
136 list_for_each_entry(entry, &dcssblk_devices, lh)
137 if (minor == MINOR(disk_devt(entry->gd)))
139 if (!found) break; // got unused minor
143 dev_info->gd->first_minor = minor;
148 * get the struct dcssblk_dev_info from dcssblk_devices
149 * for the given name.
150 * down_read(&dcssblk_devices_sem) must be held.
152 static struct dcssblk_dev_info *
153 dcssblk_get_device_by_name(char *name)
155 struct dcssblk_dev_info *entry;
157 list_for_each_entry(entry, &dcssblk_devices, lh) {
158 if (!strcmp(name, entry->segment_name)) {
166 * get the struct segment_info from seg_list
167 * for the given name.
168 * down_read(&dcssblk_devices_sem) must be held.
170 static struct segment_info *
171 dcssblk_get_segment_by_name(char *name)
173 struct dcssblk_dev_info *dev_info;
174 struct segment_info *entry;
176 list_for_each_entry(dev_info, &dcssblk_devices, lh) {
177 list_for_each_entry(entry, &dev_info->seg_list, lh) {
178 if (!strcmp(name, entry->segment_name))
186 * get the highest address of the multi-segment block.
189 dcssblk_find_highest_addr(struct dcssblk_dev_info *dev_info)
191 unsigned long highest_addr;
192 struct segment_info *entry;
195 list_for_each_entry(entry, &dev_info->seg_list, lh) {
196 if (highest_addr < entry->end)
197 highest_addr = entry->end;
203 * get the lowest address of the multi-segment block.
206 dcssblk_find_lowest_addr(struct dcssblk_dev_info *dev_info)
209 unsigned long lowest_addr;
210 struct segment_info *entry;
214 list_for_each_entry(entry, &dev_info->seg_list, lh) {
215 if (set_first == 0) {
216 lowest_addr = entry->start;
219 if (lowest_addr > entry->start)
220 lowest_addr = entry->start;
227 * Check continuity of segments.
230 dcssblk_is_continuous(struct dcssblk_dev_info *dev_info)
233 struct segment_info *sort_list, *entry, temp;
235 if (dev_info->num_of_segments <= 1)
239 sizeof(struct segment_info) * dev_info->num_of_segments,
241 if (sort_list == NULL)
244 list_for_each_entry(entry, &dev_info->seg_list, lh) {
245 memcpy(&sort_list[i], entry, sizeof(struct segment_info));
250 for (i = 0; i < dev_info->num_of_segments; i++)
251 for (j = 0; j < dev_info->num_of_segments; j++)
252 if (sort_list[j].start > sort_list[i].start) {
253 memcpy(&temp, &sort_list[i],
254 sizeof(struct segment_info));
255 memcpy(&sort_list[i], &sort_list[j],
256 sizeof(struct segment_info));
257 memcpy(&sort_list[j], &temp,
258 sizeof(struct segment_info));
261 /* check continuity */
262 for (i = 0; i < dev_info->num_of_segments - 1; i++) {
263 if ((sort_list[i].end + 1) != sort_list[i+1].start) {
264 PRINT_ERR("Segment %s is not contiguous with "
266 sort_list[i].segment_name,
267 sort_list[i+1].segment_name);
271 /* EN and EW are allowed in a block device */
272 if (sort_list[i].segment_type != sort_list[i+1].segment_type) {
273 if (!(sort_list[i].segment_type & SEGMENT_EXCLUSIVE) ||
274 (sort_list[i].segment_type == SEG_TYPE_ER) ||
275 !(sort_list[i+1].segment_type &
276 SEGMENT_EXCLUSIVE) ||
277 (sort_list[i+1].segment_type == SEG_TYPE_ER)) {
278 PRINT_ERR("Segment %s has different type from "
280 sort_list[i].segment_name,
281 sort_list[i+1].segment_name);
297 dcssblk_load_segment(char *name, struct segment_info **seg_info)
301 /* already loaded? */
302 down_read(&dcssblk_devices_sem);
303 *seg_info = dcssblk_get_segment_by_name(name);
304 up_read(&dcssblk_devices_sem);
305 if (*seg_info != NULL)
308 /* get a struct segment_info */
309 *seg_info = kzalloc(sizeof(struct segment_info), GFP_KERNEL);
310 if (*seg_info == NULL)
313 strcpy((*seg_info)->segment_name, name);
315 /* load the segment */
316 rc = segment_load(name, SEGMENT_SHARED,
317 &(*seg_info)->start, &(*seg_info)->end);
319 segment_warning(rc, (*seg_info)->segment_name);
322 INIT_LIST_HEAD(&(*seg_info)->lh);
323 (*seg_info)->segment_type = rc;
328 static void dcssblk_unregister_callback(struct device *dev)
330 device_unregister(dev);
335 * device attribute for switching shared/nonshared (exclusive)
336 * operation (show + store)
339 dcssblk_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
341 struct dcssblk_dev_info *dev_info;
343 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
344 return sprintf(buf, dev_info->is_shared ? "1\n" : "0\n");
348 dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
350 struct dcssblk_dev_info *dev_info;
351 struct segment_info *entry, *temp;
354 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
356 down_write(&dcssblk_devices_sem);
357 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
358 if (atomic_read(&dev_info->use_count)) {
362 if (inbuf[0] == '1') {
363 /* reload segments in shared mode */
364 list_for_each_entry(entry, &dev_info->seg_list, lh) {
365 rc = segment_modify_shared(entry->segment_name,
368 BUG_ON(rc == -EINVAL);
373 dev_info->is_shared = 1;
374 switch (dev_info->segment_type) {
378 set_disk_ro(dev_info->gd, 1);
380 } else if (inbuf[0] == '0') {
381 /* reload segments in exclusive mode */
382 if (dev_info->segment_type == SEG_TYPE_SC) {
383 PRINT_ERR("Segment type SC (%s) cannot be loaded in "
384 "non-shared mode\n", dev_info->segment_name);
388 list_for_each_entry(entry, &dev_info->seg_list, lh) {
389 rc = segment_modify_shared(entry->segment_name,
392 BUG_ON(rc == -EINVAL);
397 dev_info->is_shared = 0;
398 set_disk_ro(dev_info->gd, 0);
407 PRINT_ERR("Could not reload segment(s) of the device %s, removing "
409 dev_info->segment_name);
411 list_for_each_entry(entry, &dev_info->seg_list, lh) {
413 segment_unload(entry->segment_name);
415 list_del(&dev_info->lh);
417 del_gendisk(dev_info->gd);
418 blk_cleanup_queue(dev_info->dcssblk_queue);
419 dev_info->gd->queue = NULL;
420 put_disk(dev_info->gd);
421 rc = device_schedule_callback(dev, dcssblk_unregister_callback);
423 up_write(&dcssblk_devices_sem);
428 * device attribute for save operation on current copy
429 * of the segment. If the segment is busy, saving will
430 * become pending until it gets released, which can be
431 * undone by storing a non-true value to this entry.
435 dcssblk_save_show(struct device *dev, struct device_attribute *attr, char *buf)
437 struct dcssblk_dev_info *dev_info;
439 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
440 return sprintf(buf, dev_info->save_pending ? "1\n" : "0\n");
444 dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char *inbuf, size_t count)
446 struct dcssblk_dev_info *dev_info;
447 struct segment_info *entry;
449 if ((count > 1) && (inbuf[1] != '\n') && (inbuf[1] != '\0'))
451 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
453 down_write(&dcssblk_devices_sem);
454 if (inbuf[0] == '1') {
455 if (atomic_read(&dev_info->use_count) == 0) {
456 // device is idle => we save immediately
457 PRINT_INFO("Saving segment(s) of the device %s\n",
458 dev_info->segment_name);
459 list_for_each_entry(entry, &dev_info->seg_list, lh) {
460 segment_save(entry->segment_name);
463 // device is busy => we save it when it becomes
464 // idle in dcssblk_release
465 PRINT_INFO("Device %s is currently busy, segment(s) "
466 "will be saved when it becomes idle...\n",
467 dev_info->segment_name);
468 dev_info->save_pending = 1;
470 } else if (inbuf[0] == '0') {
471 if (dev_info->save_pending) {
472 // device is busy & the user wants to undo his save
474 dev_info->save_pending = 0;
475 PRINT_INFO("Pending save for segment(s) of the device "
477 dev_info->segment_name);
480 up_write(&dcssblk_devices_sem);
483 up_write(&dcssblk_devices_sem);
488 * device attribute for showing all segments in a device
491 dcssblk_seglist_show(struct device *dev, struct device_attribute *attr,
496 struct dcssblk_dev_info *dev_info;
497 struct segment_info *entry;
499 down_read(&dcssblk_devices_sem);
500 dev_info = container_of(dev, struct dcssblk_dev_info, dev);
503 list_for_each_entry(entry, &dev_info->seg_list, lh) {
504 strcpy(&buf[i], entry->segment_name);
505 i += strlen(entry->segment_name);
509 up_read(&dcssblk_devices_sem);
514 * device attribute for adding devices
517 dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
519 int rc, i, j, num_of_segments;
520 struct dcssblk_dev_info *dev_info;
521 struct segment_info *seg_info, *temp;
523 unsigned long seg_byte_size;
527 if (dev != dcssblk_root_dev) {
531 if ((count < 1) || (buf[0] == '\0') || (buf[0] == '\n')) {
536 local_buf = kmalloc(count + 1, GFP_KERNEL);
537 if (local_buf == NULL) {
546 for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) {
547 for (j = i; (buf[j] != ':') &&
551 local_buf[j-i] = toupper(buf[j]);
553 local_buf[j-i] = '\0';
554 if (((j - i) == 0) || ((j - i) > 8)) {
559 rc = dcssblk_load_segment(local_buf, &seg_info);
563 * get a struct dcssblk_dev_info
565 if (num_of_segments == 0) {
566 dev_info = kzalloc(sizeof(struct dcssblk_dev_info),
568 if (dev_info == NULL) {
572 strcpy(dev_info->segment_name, local_buf);
573 dev_info->segment_type = seg_info->segment_type;
574 INIT_LIST_HEAD(&dev_info->seg_list);
576 list_add_tail(&seg_info->lh, &dev_info->seg_list);
580 if ((buf[j] == '\0') || (buf[j] == '\n'))
584 /* no trailing colon at the end of the input */
585 if ((i > 0) && (buf[i-1] == ':')) {
589 strlcpy(local_buf, buf, i + 1);
590 dev_info->num_of_segments = num_of_segments;
591 rc = dcssblk_is_continuous(dev_info);
595 dev_info->start = dcssblk_find_lowest_addr(dev_info);
596 dev_info->end = dcssblk_find_highest_addr(dev_info);
598 dev_set_name(&dev_info->dev, dev_info->segment_name);
599 dev_info->dev.release = dcssblk_release_segment;
600 INIT_LIST_HEAD(&dev_info->lh);
601 dev_info->gd = alloc_disk(DCSSBLK_MINORS_PER_DISK);
602 if (dev_info->gd == NULL) {
606 dev_info->gd->major = dcssblk_major;
607 dev_info->gd->fops = &dcssblk_devops;
608 dev_info->dcssblk_queue = blk_alloc_queue(GFP_KERNEL);
609 dev_info->gd->queue = dev_info->dcssblk_queue;
610 dev_info->gd->private_data = dev_info;
611 dev_info->gd->driverfs_dev = &dev_info->dev;
612 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
613 blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
615 seg_byte_size = (dev_info->end - dev_info->start + 1);
616 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
617 PRINT_INFO("Loaded segment(s) %s, size = %lu Byte, "
618 "capacity = %lu (512 Byte) sectors\n", local_buf,
619 seg_byte_size, seg_byte_size >> 9);
621 dev_info->save_pending = 0;
622 dev_info->is_shared = 1;
623 dev_info->dev.parent = dcssblk_root_dev;
626 *get minor, add to list
628 down_write(&dcssblk_devices_sem);
629 if (dcssblk_get_segment_by_name(local_buf)) {
633 rc = dcssblk_assign_free_minor(dev_info);
636 sprintf(dev_info->gd->disk_name, "dcssblk%d",
637 MINOR(disk_devt(dev_info->gd)));
638 list_add_tail(&dev_info->lh, &dcssblk_devices);
640 if (!try_module_get(THIS_MODULE)) {
645 * register the device
647 rc = device_register(&dev_info->dev);
649 module_put(THIS_MODULE);
652 get_device(&dev_info->dev);
653 rc = device_create_file(&dev_info->dev, &dev_attr_shared);
656 rc = device_create_file(&dev_info->dev, &dev_attr_save);
659 rc = device_create_file(&dev_info->dev, &dev_attr_seglist);
663 add_disk(dev_info->gd);
665 switch (dev_info->segment_type) {
669 set_disk_ro(dev_info->gd,1);
672 set_disk_ro(dev_info->gd,0);
675 up_write(&dcssblk_devices_sem);
680 list_del(&dev_info->lh);
681 blk_cleanup_queue(dev_info->dcssblk_queue);
682 dev_info->gd->queue = NULL;
683 put_disk(dev_info->gd);
684 device_unregister(&dev_info->dev);
685 list_for_each_entry(seg_info, &dev_info->seg_list, lh) {
686 segment_unload(seg_info->segment_name);
688 put_device(&dev_info->dev);
689 up_write(&dcssblk_devices_sem);
692 list_del(&dev_info->lh);
694 blk_cleanup_queue(dev_info->dcssblk_queue);
695 dev_info->gd->queue = NULL;
696 put_disk(dev_info->gd);
697 up_write(&dcssblk_devices_sem);
699 if (dev_info == NULL)
701 list_for_each_entry_safe(seg_info, temp, &dev_info->seg_list, lh) {
702 list_del(&seg_info->lh);
703 segment_unload(seg_info->segment_name);
714 * device attribute for removing devices
717 dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
719 struct dcssblk_dev_info *dev_info;
720 struct segment_info *entry;
724 if (dev != dcssblk_root_dev) {
727 local_buf = kmalloc(count + 1, GFP_KERNEL);
728 if (local_buf == NULL) {
734 for (i = 0; ((*(buf+i)!='\0') && (*(buf+i)!='\n') && i < count); i++) {
735 local_buf[i] = toupper(buf[i]);
738 if ((i == 0) || (i > 8)) {
743 down_write(&dcssblk_devices_sem);
744 dev_info = dcssblk_get_device_by_name(local_buf);
745 if (dev_info == NULL) {
746 up_write(&dcssblk_devices_sem);
747 PRINT_WARN("Device %s is not loaded!\n", local_buf);
751 if (atomic_read(&dev_info->use_count) != 0) {
752 up_write(&dcssblk_devices_sem);
753 PRINT_WARN("Device %s is in use!\n", local_buf);
758 list_del(&dev_info->lh);
759 del_gendisk(dev_info->gd);
760 blk_cleanup_queue(dev_info->dcssblk_queue);
761 dev_info->gd->queue = NULL;
762 put_disk(dev_info->gd);
763 device_unregister(&dev_info->dev);
765 /* unload all related segments */
766 list_for_each_entry(entry, &dev_info->seg_list, lh)
767 segment_unload(entry->segment_name);
769 put_device(&dev_info->dev);
770 up_write(&dcssblk_devices_sem);
779 dcssblk_open(struct block_device *bdev, fmode_t mode)
781 struct dcssblk_dev_info *dev_info;
784 dev_info = bdev->bd_disk->private_data;
785 if (NULL == dev_info) {
789 atomic_inc(&dev_info->use_count);
790 bdev->bd_block_size = 4096;
797 dcssblk_release(struct gendisk *disk, fmode_t mode)
799 struct dcssblk_dev_info *dev_info = disk->private_data;
800 struct segment_info *entry;
807 down_write(&dcssblk_devices_sem);
808 if (atomic_dec_and_test(&dev_info->use_count)
809 && (dev_info->save_pending)) {
810 PRINT_INFO("Device %s became idle and is being saved now\n",
811 dev_info->segment_name);
812 list_for_each_entry(entry, &dev_info->seg_list, lh) {
813 segment_save(entry->segment_name);
815 dev_info->save_pending = 0;
817 up_write(&dcssblk_devices_sem);
824 dcssblk_make_request(struct request_queue *q, struct bio *bio)
826 struct dcssblk_dev_info *dev_info;
827 struct bio_vec *bvec;
829 unsigned long page_addr;
830 unsigned long source_addr;
831 unsigned long bytes_done;
835 dev_info = bio->bi_bdev->bd_disk->private_data;
836 if (dev_info == NULL)
838 if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0)
839 /* Request is not page-aligned. */
841 if (((bio->bi_size >> 9) + bio->bi_sector)
842 > get_capacity(bio->bi_bdev->bd_disk)) {
843 /* Request beyond end of DCSS segment. */
846 /* verify data transfer direction */
847 if (dev_info->is_shared) {
848 switch (dev_info->segment_type) {
852 /* cannot write to these segments */
853 if (bio_data_dir(bio) == WRITE) {
854 PRINT_WARN("rejecting write to ro device %s\n",
855 dev_name(&dev_info->dev));
861 index = (bio->bi_sector >> 3);
862 bio_for_each_segment(bvec, bio, i) {
863 page_addr = (unsigned long)
864 page_address(bvec->bv_page) + bvec->bv_offset;
865 source_addr = dev_info->start + (index<<12) + bytes_done;
866 if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
869 if (bio_data_dir(bio) == READ) {
870 memcpy((void*)page_addr, (void*)source_addr,
873 memcpy((void*)source_addr, (void*)page_addr,
876 bytes_done += bvec->bv_len;
886 dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
887 void **kaddr, unsigned long *pfn)
889 struct dcssblk_dev_info *dev_info;
892 dev_info = bdev->bd_disk->private_data;
895 if (secnum % (PAGE_SIZE/512))
897 pgoff = secnum / (PAGE_SIZE / 512);
898 if ((pgoff+1)*PAGE_SIZE-1 > dev_info->end - dev_info->start)
900 *kaddr = (void *) (dev_info->start+pgoff*PAGE_SIZE);
901 *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
907 dcssblk_check_params(void)
910 char buf[DCSSBLK_PARM_LEN + 1];
911 struct dcssblk_dev_info *dev_info;
913 for (i = 0; (i < DCSSBLK_PARM_LEN) && (dcssblk_segments[i] != '\0');
915 for (j = i; (dcssblk_segments[j] != ',') &&
916 (dcssblk_segments[j] != '\0') &&
917 (dcssblk_segments[j] != '(') &&
918 (j < DCSSBLK_PARM_LEN); j++)
920 buf[j-i] = dcssblk_segments[j];
923 rc = dcssblk_add_store(dcssblk_root_dev, NULL, buf, j-i);
924 if ((rc >= 0) && (dcssblk_segments[j] == '(')) {
925 for (k = 0; (buf[k] != ':') && (buf[k] != '\0'); k++)
926 buf[k] = toupper(buf[k]);
928 if (!strncmp(&dcssblk_segments[j], "(local)", 7)) {
929 down_read(&dcssblk_devices_sem);
930 dev_info = dcssblk_get_device_by_name(buf);
931 up_read(&dcssblk_devices_sem);
933 dcssblk_shared_store(&dev_info->dev,
937 while ((dcssblk_segments[j] != ',') &&
938 (dcssblk_segments[j] != '\0'))
942 if (dcssblk_segments[j] == '\0')
949 * The init/exit functions.
954 s390_root_dev_unregister(dcssblk_root_dev);
955 unregister_blkdev(dcssblk_major, DCSSBLK_NAME);
963 dcssblk_root_dev = s390_root_dev_register("dcssblk");
964 if (IS_ERR(dcssblk_root_dev))
965 return PTR_ERR(dcssblk_root_dev);
966 rc = device_create_file(dcssblk_root_dev, &dev_attr_add);
968 s390_root_dev_unregister(dcssblk_root_dev);
971 rc = device_create_file(dcssblk_root_dev, &dev_attr_remove);
973 s390_root_dev_unregister(dcssblk_root_dev);
976 rc = register_blkdev(0, DCSSBLK_NAME);
978 s390_root_dev_unregister(dcssblk_root_dev);
982 init_rwsem(&dcssblk_devices_sem);
984 dcssblk_check_params();
989 module_init(dcssblk_init);
990 module_exit(dcssblk_exit);
992 module_param_string(segments, dcssblk_segments, DCSSBLK_PARM_LEN, 0444);
993 MODULE_PARM_DESC(segments, "Name of DCSS segment(s) to be loaded, "
994 "comma-separated list, names in each set separated "
995 "by commas are separated by colons, each set contains "
996 "names of contiguous segments and each name max. 8 chars.\n"
997 "Adding \"(local)\" to the end of each set equals echoing 0 "
998 "to /sys/devices/dcssblk/<device name>/shared after loading "
999 "the contiguous segments - \n"
1000 "e.g. segments=\"mydcss1,mydcss2:mydcss3,mydcss4(local)\"");
1002 MODULE_LICENSE("GPL");