2 * drivers/s390/char/tape_core.c
3 * basic function of the tape device driver
5 * S390 and zSeries version
6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Michael Holzheu <holzheu@de.ibm.com>
9 * Tuan Ngo-Anh <ngoanh@de.ibm.com>
10 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 * Stefan Bader <shbader@de.ibm.com>
14 #include <linux/config.h>
15 #include <linux/module.h>
16 #include <linux/init.h> // for kernel parameters
17 #include <linux/kmod.h> // for requesting modules
18 #include <linux/spinlock.h> // for locks
19 #include <linux/vmalloc.h>
20 #include <linux/list.h>
22 #include <asm/types.h> // for variable types
24 #define TAPE_DBF_AREA tape_core_dbf
29 #define PRINTK_HEADER "TAPE_CORE: "
31 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
32 static void tape_delayed_next_request(void * data);
35 * One list to contain all tape devices of all disciplines, so
36 * we can assign the devices to minor numbers of the same major
37 * The list is protected by the rwlock
39 static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list);
40 static DEFINE_RWLOCK(tape_device_lock);
43 * Pointer to debug area.
45 debug_info_t *TAPE_DBF_AREA = NULL;
46 EXPORT_SYMBOL(TAPE_DBF_AREA);
49 * Printable strings for tape enumerations.
51 const char *tape_state_verbose[TS_SIZE] =
53 [TS_UNUSED] = "UNUSED",
54 [TS_IN_USE] = "IN_USE",
55 [TS_BLKUSE] = "BLKUSE",
57 [TS_NOT_OPER] = "NOT_OP"
60 const char *tape_op_verbose[TO_SIZE] =
62 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB",
63 [TO_BSF] = "BSF", [TO_DSE] = "DSE",
64 [TO_FSB] = "FSB", [TO_FSF] = "FSF",
65 [TO_LBL] = "LBL", [TO_NOP] = "NOP",
66 [TO_RBA] = "RBA", [TO_RBI] = "RBI",
67 [TO_RFO] = "RFO", [TO_REW] = "REW",
68 [TO_RUN] = "RUN", [TO_WRI] = "WRI",
69 [TO_WTM] = "WTM", [TO_MSEN] = "MSN",
70 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF",
71 [TO_READ_ATTMSG] = "RAT",
72 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS",
77 busid_to_int(char *bus_id)
83 for(s = bus_id, d = 0; *s != '\0' && *s != '.'; s++)
84 d = (d * 10) + (*s - '0');
86 for(s++, d = 0; *s != '\0' && *s != '.'; s++)
87 d = (d * 10) + (*s - '0');
90 for(s++; *s != '\0'; s++) {
91 if (*s >= '0' && *s <= '9') {
93 } else if (*s >= 'a' && *s <= 'f') {
105 * Some channel attached tape specific attributes.
107 * FIXME: In the future the first_minor and blocksize attribute should be
108 * replaced by a link to the cdev tree.
111 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
113 struct tape_device *tdev;
115 tdev = (struct tape_device *) dev->driver_data;
116 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
120 DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
123 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
125 struct tape_device *tdev;
127 tdev = (struct tape_device *) dev->driver_data;
128 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
132 DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
135 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
137 struct tape_device *tdev;
139 tdev = (struct tape_device *) dev->driver_data;
140 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
141 "OFFLINE" : tape_state_verbose[tdev->tape_state]);
145 DEVICE_ATTR(state, 0444, tape_state_show, NULL);
148 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
150 struct tape_device *tdev;
153 tdev = (struct tape_device *) dev->driver_data;
154 if (tdev->first_minor < 0)
155 return scnprintf(buf, PAGE_SIZE, "N/A\n");
157 spin_lock_irq(get_ccwdev_lock(tdev->cdev));
158 if (list_empty(&tdev->req_queue))
159 rc = scnprintf(buf, PAGE_SIZE, "---\n");
161 struct tape_request *req;
163 req = list_entry(tdev->req_queue.next, struct tape_request,
165 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
167 spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
172 DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
175 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
177 struct tape_device *tdev;
179 tdev = (struct tape_device *) dev->driver_data;
181 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
185 DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
187 static struct attribute *tape_attrs[] = {
188 &dev_attr_medium_state.attr,
189 &dev_attr_first_minor.attr,
190 &dev_attr_state.attr,
191 &dev_attr_operation.attr,
192 &dev_attr_blocksize.attr,
196 static struct attribute_group tape_attr_group = {
201 * Tape state functions
204 tape_state_set(struct tape_device *device, enum tape_state newstate)
208 if (device->tape_state == TS_NOT_OPER) {
209 DBF_EVENT(3, "ts_set err: not oper\n");
212 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor);
213 if (device->tape_state < TO_SIZE && device->tape_state >= 0)
214 str = tape_state_verbose[device->tape_state];
217 DBF_EVENT(4, "old ts: %s\n", str);
218 if (device->tape_state < TO_SIZE && device->tape_state >=0 )
219 str = tape_state_verbose[device->tape_state];
222 DBF_EVENT(4, "%s\n", str);
223 DBF_EVENT(4, "new ts:\t\n");
224 if (newstate < TO_SIZE && newstate >= 0)
225 str = tape_state_verbose[newstate];
228 DBF_EVENT(4, "%s\n", str);
229 device->tape_state = newstate;
230 wake_up(&device->state_change_wq);
234 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
236 if (device->medium_state == newstate)
240 device->tape_generic_status |= GMT_DR_OPEN(~0);
241 PRINT_INFO("(%s): Tape is unloaded\n",
242 device->cdev->dev.bus_id);
245 device->tape_generic_status &= ~GMT_DR_OPEN(~0);
246 PRINT_INFO("(%s): Tape has been mounted\n",
247 device->cdev->dev.bus_id);
253 device->medium_state = newstate;
254 wake_up(&device->state_change_wq);
258 * Stop running ccw. Has to be called with the device lock held.
261 __tape_cancel_io(struct tape_device *device, struct tape_request *request)
266 /* Check if interrupt has already been processed */
267 if (request->callback == NULL)
271 for (retries = 0; retries < 5; retries++) {
272 rc = ccw_device_clear(device->cdev, (long) request);
276 request->status = TAPE_REQUEST_DONE;
279 request->status = TAPE_REQUEST_CANCEL;
280 schedule_work(&device->tape_dnr);
283 DBF_EXCEPTION(2, "device gone, retry\n");
286 DBF_EXCEPTION(2, "I/O error, retry\n");
297 * Add device into the sorted list, giving it the first
298 * available minor number.
301 tape_assign_minor(struct tape_device *device)
303 struct tape_device *tmp;
307 write_lock(&tape_device_lock);
308 list_for_each_entry(tmp, &tape_device_list, node) {
309 if (minor < tmp->first_minor)
311 minor += TAPE_MINORS_PER_DEV;
314 write_unlock(&tape_device_lock);
317 device->first_minor = minor;
318 list_add_tail(&device->node, &tmp->node);
319 write_unlock(&tape_device_lock);
323 /* remove device from the list */
325 tape_remove_minor(struct tape_device *device)
327 write_lock(&tape_device_lock);
328 list_del_init(&device->node);
329 device->first_minor = -1;
330 write_unlock(&tape_device_lock);
334 * Set a device online.
336 * This function is called by the common I/O layer to move a device from the
337 * detected but offline into the online state.
338 * If we return an error (RC < 0) the device remains in the offline state. This
339 * can happen if the device is assigned somewhere else, for example.
342 tape_generic_online(struct tape_device *device,
343 struct tape_discipline *discipline)
347 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
349 if (device->tape_state != TS_INIT) {
350 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
354 /* Let the discipline have a go at the device. */
355 device->discipline = discipline;
356 if (!try_module_get(discipline->owner)) {
357 PRINT_ERR("Cannot get module. Module gone.\n");
361 rc = discipline->setup_device(device);
364 rc = tape_assign_minor(device);
368 rc = tapechar_setup_device(device);
371 rc = tapeblock_setup_device(device);
375 tape_state_set(device, TS_UNUSED);
377 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
382 tapechar_cleanup_device(device);
384 device->discipline->cleanup_device(device);
385 device->discipline = NULL;
387 tape_remove_minor(device);
389 module_put(discipline->owner);
394 tape_cleanup_device(struct tape_device *device)
396 tapeblock_cleanup_device(device);
397 tapechar_cleanup_device(device);
398 device->discipline->cleanup_device(device);
399 module_put(device->discipline->owner);
400 tape_remove_minor(device);
401 tape_med_state_set(device, MS_UNKNOWN);
405 * Set device offline.
407 * Called by the common I/O layer if the drive should set offline on user
408 * request. We may prevent this by returning an error.
409 * Manual offline is only allowed while the drive is not in use.
412 tape_generic_offline(struct tape_device *device)
415 PRINT_ERR("tape_generic_offline: no such device\n");
419 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
420 device->cdev_id, device);
422 spin_lock_irq(get_ccwdev_lock(device->cdev));
423 switch (device->tape_state) {
426 spin_unlock_irq(get_ccwdev_lock(device->cdev));
429 tape_state_set(device, TS_INIT);
430 spin_unlock_irq(get_ccwdev_lock(device->cdev));
431 tape_cleanup_device(device);
434 DBF_EVENT(3, "(%08x): Set offline failed "
437 PRINT_WARN("(%s): Set offline failed "
439 device->cdev->dev.bus_id);
440 spin_unlock_irq(get_ccwdev_lock(device->cdev));
444 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
449 * Allocate memory for a new device structure.
451 static struct tape_device *
452 tape_alloc_device(void)
454 struct tape_device *device;
456 device = (struct tape_device *)
457 kmalloc(sizeof(struct tape_device), GFP_KERNEL);
458 if (device == NULL) {
459 DBF_EXCEPTION(2, "ti:no mem\n");
460 PRINT_INFO ("can't allocate memory for "
461 "tape info structure\n");
462 return ERR_PTR(-ENOMEM);
464 memset(device, 0, sizeof(struct tape_device));
465 device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA);
466 if (device->modeset_byte == NULL) {
467 DBF_EXCEPTION(2, "ti:no mem\n");
468 PRINT_INFO("can't allocate memory for modeset byte\n");
470 return ERR_PTR(-ENOMEM);
472 INIT_LIST_HEAD(&device->req_queue);
473 INIT_LIST_HEAD(&device->node);
474 init_waitqueue_head(&device->state_change_wq);
475 device->tape_state = TS_INIT;
476 device->medium_state = MS_UNKNOWN;
477 *device->modeset_byte = 0;
478 device->first_minor = -1;
479 atomic_set(&device->ref_count, 1);
480 INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device);
486 * Get a reference to an existing device structure. This will automatically
487 * increment the reference count.
490 tape_get_device_reference(struct tape_device *device)
492 DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device,
493 atomic_inc_return(&device->ref_count));
499 * Decrease the reference counter of a devices structure. If the
500 * reference counter reaches zero free the device structure.
501 * The function returns a NULL pointer to be used by the caller
502 * for clearing reference pointers.
505 tape_put_device(struct tape_device *device)
509 remain = atomic_dec_return(&device->ref_count);
511 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain);
514 DBF_EVENT(4, "put device without reference\n");
515 PRINT_ERR("put device without reference\n");
517 DBF_EVENT(4, "tape_free_device(%p)\n", device);
518 kfree(device->modeset_byte);
527 * Find tape device by a device index.
530 tape_get_device(int devindex)
532 struct tape_device *device, *tmp;
534 device = ERR_PTR(-ENODEV);
535 read_lock(&tape_device_lock);
536 list_for_each_entry(tmp, &tape_device_list, node) {
537 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
538 device = tape_get_device_reference(tmp);
542 read_unlock(&tape_device_lock);
547 * Driverfs tape probe function.
550 tape_generic_probe(struct ccw_device *cdev)
552 struct tape_device *device;
554 device = tape_alloc_device();
557 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id);
558 cdev->dev.driver_data = device;
560 device->cdev_id = busid_to_int(cdev->dev.bus_id);
561 cdev->handler = __tape_do_irq;
563 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP);
564 sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
570 __tape_discard_requests(struct tape_device *device)
572 struct tape_request * request;
573 struct list_head * l, *n;
575 list_for_each_safe(l, n, &device->req_queue) {
576 request = list_entry(l, struct tape_request, list);
577 if (request->status == TAPE_REQUEST_IN_IO)
578 request->status = TAPE_REQUEST_DONE;
579 list_del(&request->list);
581 /* Decrease ref_count for removed request. */
582 request->device = tape_put_device(device);
584 if (request->callback != NULL)
585 request->callback(request, request->callback_data);
590 * Driverfs tape remove function.
592 * This function is called whenever the common I/O layer detects the device
593 * gone. This can happen at any time and we cannot refuse.
596 tape_generic_remove(struct ccw_device *cdev)
598 struct tape_device * device;
600 device = cdev->dev.driver_data;
602 PRINT_ERR("No device pointer in tape_generic_remove!\n");
605 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
607 spin_lock_irq(get_ccwdev_lock(device->cdev));
608 switch (device->tape_state) {
610 tape_state_set(device, TS_NOT_OPER);
615 spin_unlock_irq(get_ccwdev_lock(device->cdev));
619 * Need only to release the device.
621 tape_state_set(device, TS_NOT_OPER);
622 spin_unlock_irq(get_ccwdev_lock(device->cdev));
623 tape_cleanup_device(device);
627 * There may be requests on the queue. We will not get
628 * an interrupt for a request that was running. So we
629 * just post them all as I/O errors.
631 DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
633 PRINT_WARN("(%s): Drive in use vanished - "
635 device->cdev->dev.bus_id);
636 PRINT_WARN("State was %i\n", device->tape_state);
637 tape_state_set(device, TS_NOT_OPER);
638 __tape_discard_requests(device);
639 spin_unlock_irq(get_ccwdev_lock(device->cdev));
640 tape_cleanup_device(device);
643 if (cdev->dev.driver_data != NULL) {
644 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
645 cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data);
650 * Allocate a new tape ccw request
652 struct tape_request *
653 tape_alloc_request(int cplength, int datasize)
655 struct tape_request *request;
657 if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE)
660 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
662 request = (struct tape_request *) kmalloc(sizeof(struct tape_request),
664 if (request == NULL) {
665 DBF_EXCEPTION(1, "cqra nomem\n");
666 return ERR_PTR(-ENOMEM);
668 memset(request, 0, sizeof(struct tape_request));
669 /* allocate channel program */
671 request->cpaddr = kmalloc(cplength*sizeof(struct ccw1),
672 GFP_ATOMIC | GFP_DMA);
673 if (request->cpaddr == NULL) {
674 DBF_EXCEPTION(1, "cqra nomem\n");
676 return ERR_PTR(-ENOMEM);
678 memset(request->cpaddr, 0, cplength*sizeof(struct ccw1));
680 /* alloc small kernel buffer */
682 request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA);
683 if (request->cpdata == NULL) {
684 DBF_EXCEPTION(1, "cqra nomem\n");
685 kfree(request->cpaddr);
687 return ERR_PTR(-ENOMEM);
689 memset(request->cpdata, 0, datasize);
691 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
698 * Free tape ccw request
701 tape_free_request (struct tape_request * request)
703 DBF_LH(6, "Free request %p\n", request);
705 if (request->device != NULL) {
706 request->device = tape_put_device(request->device);
708 kfree(request->cpdata);
709 kfree(request->cpaddr);
714 __tape_start_io(struct tape_device *device, struct tape_request *request)
718 #ifdef CONFIG_S390_TAPE_BLOCK
719 if (request->op == TO_BLOCK)
720 device->discipline->check_locate(device, request);
722 rc = ccw_device_start(
725 (unsigned long) request,
730 request->status = TAPE_REQUEST_IN_IO;
731 } else if (rc == -EBUSY) {
732 /* The common I/O subsystem is currently busy. Retry later. */
733 request->status = TAPE_REQUEST_QUEUED;
734 schedule_work(&device->tape_dnr);
737 /* Start failed. Remove request and indicate failure. */
738 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
744 __tape_start_next_request(struct tape_device *device)
746 struct list_head *l, *n;
747 struct tape_request *request;
750 DBF_LH(6, "__tape_start_next_request(%p)\n", device);
752 * Try to start each request on request queue until one is
753 * started successful.
755 list_for_each_safe(l, n, &device->req_queue) {
756 request = list_entry(l, struct tape_request, list);
759 * Avoid race condition if bottom-half was triggered more than
762 if (request->status == TAPE_REQUEST_IN_IO)
766 * We wanted to cancel the request but the common I/O layer
767 * was busy at that time. This can only happen if this
768 * function is called by delayed_next_request.
769 * Otherwise we start the next request on the queue.
771 if (request->status == TAPE_REQUEST_CANCEL) {
772 rc = __tape_cancel_io(device, request);
774 rc = __tape_start_io(device, request);
779 /* Set ending status. */
781 request->status = TAPE_REQUEST_DONE;
783 /* Remove from request queue. */
784 list_del(&request->list);
787 if (request->callback != NULL)
788 request->callback(request, request->callback_data);
793 tape_delayed_next_request(void *data)
795 struct tape_device * device;
797 device = (struct tape_device *) data;
798 DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
799 spin_lock_irq(get_ccwdev_lock(device->cdev));
800 __tape_start_next_request(device);
801 spin_unlock_irq(get_ccwdev_lock(device->cdev));
806 struct tape_device * device,
807 struct tape_request * request,
810 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
813 request->status = TAPE_REQUEST_DONE;
815 /* Remove from request queue. */
816 list_del(&request->list);
819 if (request->callback != NULL)
820 request->callback(request, request->callback_data);
823 /* Start next request. */
824 if (!list_empty(&device->req_queue))
825 __tape_start_next_request(device);
829 * Write sense data to console/dbf
832 tape_dump_sense(struct tape_device* device, struct tape_request *request,
837 PRINT_INFO("-------------------------------------------------\n");
838 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n",
839 irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa);
840 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id);
842 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]);
844 sptr = (unsigned int *) irb->ecw;
845 PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
846 sptr[0], sptr[1], sptr[2], sptr[3]);
847 PRINT_INFO("Sense data: %08X %08X %08X %08X \n",
848 sptr[4], sptr[5], sptr[6], sptr[7]);
849 PRINT_INFO("--------------------------------------------------\n");
853 * Write sense data to dbf
856 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
863 op = tape_op_verbose[request->op];
866 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n",
867 irb->scsw.dstat,irb->scsw.cstat);
868 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
869 sptr = (unsigned int *) irb->ecw;
870 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
871 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
872 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
873 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
877 * I/O helper function. Adds the request to the request queue
878 * and starts it if the tape is idle. Has to be called with
879 * the device lock held.
882 __tape_start_request(struct tape_device *device, struct tape_request *request)
886 switch (request->op) {
891 if (device->tape_state == TS_INIT)
893 if (device->tape_state == TS_UNUSED)
896 if (device->tape_state == TS_BLKUSE)
898 if (device->tape_state != TS_IN_USE)
902 /* Increase use count of device for the added request. */
903 request->device = tape_get_device_reference(device);
905 if (list_empty(&device->req_queue)) {
906 /* No other requests are on the queue. Start this one. */
907 rc = __tape_start_io(device, request);
911 DBF_LH(5, "Request %p added for execution.\n", request);
912 list_add(&request->list, &device->req_queue);
914 DBF_LH(5, "Request %p add to queue.\n", request);
915 request->status = TAPE_REQUEST_QUEUED;
916 list_add_tail(&request->list, &device->req_queue);
922 * Add the request to the request queue, try to start it if the
923 * tape is idle. Return without waiting for end of i/o.
926 tape_do_io_async(struct tape_device *device, struct tape_request *request)
930 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
932 spin_lock_irq(get_ccwdev_lock(device->cdev));
933 /* Add request to request queue and try to start it. */
934 rc = __tape_start_request(device, request);
935 spin_unlock_irq(get_ccwdev_lock(device->cdev));
940 * tape_do_io/__tape_wake_up
941 * Add the request to the request queue, try to start it if the
942 * tape is idle and wait uninterruptible for its completion.
945 __tape_wake_up(struct tape_request *request, void *data)
947 request->callback = NULL;
948 wake_up((wait_queue_head_t *) data);
952 tape_do_io(struct tape_device *device, struct tape_request *request)
954 wait_queue_head_t wq;
957 init_waitqueue_head(&wq);
958 spin_lock_irq(get_ccwdev_lock(device->cdev));
960 request->callback = __tape_wake_up;
961 request->callback_data = &wq;
962 /* Add request to request queue and try to start it. */
963 rc = __tape_start_request(device, request);
964 spin_unlock_irq(get_ccwdev_lock(device->cdev));
967 /* Request added to the queue. Wait for its completion. */
968 wait_event(wq, (request->callback == NULL));
969 /* Get rc from request */
974 * tape_do_io_interruptible/__tape_wake_up_interruptible
975 * Add the request to the request queue, try to start it if the
976 * tape is idle and wait uninterruptible for its completion.
979 __tape_wake_up_interruptible(struct tape_request *request, void *data)
981 request->callback = NULL;
982 wake_up_interruptible((wait_queue_head_t *) data);
986 tape_do_io_interruptible(struct tape_device *device,
987 struct tape_request *request)
989 wait_queue_head_t wq;
992 init_waitqueue_head(&wq);
993 spin_lock_irq(get_ccwdev_lock(device->cdev));
995 request->callback = __tape_wake_up_interruptible;
996 request->callback_data = &wq;
997 rc = __tape_start_request(device, request);
998 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1001 /* Request added to the queue. Wait for its completion. */
1002 rc = wait_event_interruptible(wq, (request->callback == NULL));
1003 if (rc != -ERESTARTSYS)
1004 /* Request finished normally. */
1007 /* Interrupted by a signal. We have to stop the current request. */
1008 spin_lock_irq(get_ccwdev_lock(device->cdev));
1009 rc = __tape_cancel_io(device, request);
1010 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1012 /* Wait for the interrupt that acknowledges the halt. */
1014 rc = wait_event_interruptible(
1016 (request->callback == NULL)
1018 } while (rc != -ERESTARTSYS);
1020 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
1027 * Tape interrupt routine, called from the ccw_device layer
1030 __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1032 struct tape_device *device;
1033 struct tape_request *request;
1036 device = (struct tape_device *) cdev->dev.driver_data;
1037 if (device == NULL) {
1038 PRINT_ERR("could not get device structure for %s "
1039 "in interrupt\n", cdev->dev.bus_id);
1042 request = (struct tape_request *) intparm;
1044 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
1046 /* On special conditions irb is an error pointer */
1048 /* FIXME: What to do with the request? */
1049 switch (PTR_ERR(irb)) {
1051 PRINT_WARN("(%s): Request timed out\n",
1054 __tape_end_request(device, request, -EIO);
1057 PRINT_ERR("(%s): Unexpected i/o error %li\n",
1065 * If the condition code is not zero and the start function bit is
1066 * still set, this is an deferred error and the last start I/O did
1067 * not succeed. Restart the request now.
1069 if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
1070 PRINT_WARN("(%s): deferred cc=%i. restaring\n",
1073 rc = __tape_start_io(device, request);
1075 __tape_end_request(device, request, rc);
1079 /* May be an unsolicited irq */
1081 request->rescnt = irb->scsw.count;
1083 if (irb->scsw.dstat != 0x0c) {
1084 /* Set the 'ONLINE' flag depending on sense byte 1 */
1085 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
1086 device->tape_generic_status |= GMT_ONLINE(~0);
1088 device->tape_generic_status &= ~GMT_ONLINE(~0);
1091 * Any request that does not come back with channel end
1092 * and device end is unusual. Log the sense data.
1094 DBF_EVENT(3,"-- Tape Interrupthandler --\n");
1095 tape_dump_sense_dbf(device, request, irb);
1097 /* Upon normal completion the device _is_ online */
1098 device->tape_generic_status |= GMT_ONLINE(~0);
1100 if (device->tape_state == TS_NOT_OPER) {
1101 DBF_EVENT(6, "tape:device is not operational\n");
1106 * Request that were canceled still come back with an interrupt.
1107 * To detect these request the state will be set to TAPE_REQUEST_DONE.
1109 if(request != NULL && request->status == TAPE_REQUEST_DONE) {
1110 __tape_end_request(device, request, -EIO);
1114 rc = device->discipline->irq(device, request, irb);
1116 * rc < 0 : request finished unsuccessfully.
1117 * rc == TAPE_IO_SUCCESS: request finished successfully.
1118 * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
1119 * rc == TAPE_IO_RETRY: request finished but needs another go.
1120 * rc == TAPE_IO_STOP: request needs to get terminated.
1123 case TAPE_IO_SUCCESS:
1124 /* Upon normal completion the device _is_ online */
1125 device->tape_generic_status |= GMT_ONLINE(~0);
1126 __tape_end_request(device, request, rc);
1128 case TAPE_IO_PENDING:
1131 rc = __tape_start_io(device, request);
1133 __tape_end_request(device, request, rc);
1136 rc = __tape_cancel_io(device, request);
1138 __tape_end_request(device, request, rc);
1142 DBF_EVENT(6, "xunknownrc\n");
1143 PRINT_ERR("Invalid return code from discipline "
1144 "interrupt function.\n");
1145 __tape_end_request(device, request, -EIO);
1147 __tape_end_request(device, request, rc);
1154 * Tape device open function used by tape_char & tape_block frontends.
1157 tape_open(struct tape_device *device)
1161 spin_lock(get_ccwdev_lock(device->cdev));
1162 if (device->tape_state == TS_NOT_OPER) {
1163 DBF_EVENT(6, "TAPE:nodev\n");
1165 } else if (device->tape_state == TS_IN_USE) {
1166 DBF_EVENT(6, "TAPE:dbusy\n");
1168 } else if (device->tape_state == TS_BLKUSE) {
1169 DBF_EVENT(6, "TAPE:dbusy\n");
1171 } else if (device->discipline != NULL &&
1172 !try_module_get(device->discipline->owner)) {
1173 DBF_EVENT(6, "TAPE:nodisc\n");
1176 tape_state_set(device, TS_IN_USE);
1179 spin_unlock(get_ccwdev_lock(device->cdev));
1184 * Tape device release function used by tape_char & tape_block frontends.
1187 tape_release(struct tape_device *device)
1189 spin_lock(get_ccwdev_lock(device->cdev));
1190 if (device->tape_state == TS_IN_USE)
1191 tape_state_set(device, TS_UNUSED);
1192 module_put(device->discipline->owner);
1193 spin_unlock(get_ccwdev_lock(device->cdev));
1198 * Execute a magnetic tape command a number of times.
1201 tape_mtop(struct tape_device *device, int mt_op, int mt_count)
1206 DBF_EVENT(6, "TAPE:mtio\n");
1207 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
1208 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count);
1210 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
1212 fn = device->discipline->mtop_array[mt_op];
1216 /* We assume that the backends can handle count up to 500. */
1217 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF ||
1218 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) {
1220 for (; mt_count > 500; mt_count -= 500)
1221 if ((rc = fn(device, 500)) != 0)
1224 rc = fn(device, mt_count);
1226 rc = fn(device, mt_count);
1232 * Tape init function.
1237 TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
1238 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1239 #ifdef DBF_LIKE_HELL
1240 debug_set_level(TAPE_DBF_AREA, 6);
1242 DBF_EVENT(3, "tape init\n");
1250 * Tape exit function.
1255 DBF_EVENT(6, "tape exit\n");
1257 /* Get rid of the frontends */
1260 tape_proc_cleanup();
1261 debug_unregister (TAPE_DBF_AREA);
1264 MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
1265 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
1266 MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
1267 MODULE_LICENSE("GPL");
1269 module_init(tape_init);
1270 module_exit(tape_exit);
1272 EXPORT_SYMBOL(tape_generic_remove);
1273 EXPORT_SYMBOL(tape_generic_probe);
1274 EXPORT_SYMBOL(tape_generic_online);
1275 EXPORT_SYMBOL(tape_generic_offline);
1276 EXPORT_SYMBOL(tape_put_device);
1277 EXPORT_SYMBOL(tape_get_device_reference);
1278 EXPORT_SYMBOL(tape_state_verbose);
1279 EXPORT_SYMBOL(tape_op_verbose);
1280 EXPORT_SYMBOL(tape_state_set);
1281 EXPORT_SYMBOL(tape_med_state_set);
1282 EXPORT_SYMBOL(tape_alloc_request);
1283 EXPORT_SYMBOL(tape_free_request);
1284 EXPORT_SYMBOL(tape_dump_sense);
1285 EXPORT_SYMBOL(tape_dump_sense_dbf);
1286 EXPORT_SYMBOL(tape_do_io);
1287 EXPORT_SYMBOL(tape_do_io_async);
1288 EXPORT_SYMBOL(tape_do_io_interruptible);
1289 EXPORT_SYMBOL(tape_mtop);