2 * IDE ATAPI streaming tape driver.
4 * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
5 * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
7 * This driver was constructed as a student project in the software laboratory
8 * of the faculty of electrical engineering in the Technion - Israel's
9 * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
11 * It is hereby placed under the terms of the GNU general public license.
12 * (See linux/COPYING).
14 * For a historical changelog see
15 * Documentation/ide/ChangeLog.ide-tape.1995-2002
18 #define IDETAPE_VERSION "1.20"
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/string.h>
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/timer.h>
27 #include <linux/interrupt.h>
28 #include <linux/jiffies.h>
29 #include <linux/major.h>
30 #include <linux/errno.h>
31 #include <linux/genhd.h>
32 #include <linux/slab.h>
33 #include <linux/pci.h>
34 #include <linux/ide.h>
35 #include <linux/smp_lock.h>
36 #include <linux/completion.h>
37 #include <linux/bitops.h>
38 #include <linux/mutex.h>
39 #include <scsi/scsi.h>
41 #include <asm/byteorder.h>
42 #include <linux/irq.h>
43 #include <linux/uaccess.h>
45 #include <asm/unaligned.h>
46 #include <linux/mtio.h>
49 /* output errors only */
51 /* output all sense key/asc */
53 /* info regarding all chrdev-related procedures */
54 DBG_CHRDEV = (1 << 2),
55 /* all remaining procedures */
57 /* buffer alloc info (pc_stack & rq_stack) */
58 DBG_PCRQ_STACK = (1 << 4),
61 /* define to see debug info */
62 #define IDETAPE_DEBUG_LOG 0
65 #define debug_log(lvl, fmt, args...) \
67 if (tape->debug_mask & lvl) \
68 printk(KERN_INFO "ide-tape: " fmt, ## args); \
71 #define debug_log(lvl, fmt, args...) do {} while (0)
74 /**************************** Tunable parameters *****************************/
78 * Pipelined mode parameters.
80 * We try to use the minimum number of stages which is enough to keep the tape
81 * constantly streaming. To accomplish that, we implement a feedback loop around
82 * the maximum number of stages:
84 * We start from MIN maximum stages (we will not even use MIN stages if we don't
85 * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
86 * pipeline is empty, until we reach the optimum value or until we reach MAX.
88 #define IDETAPE_MIN_PIPELINE_STAGES 1
89 #define IDETAPE_MAX_PIPELINE_STAGES 400
90 #define IDETAPE_INCREASE_STAGES_RATE 20
93 * After each failed packet command we issue a request sense command and retry
94 * the packet command IDETAPE_MAX_PC_RETRIES times.
96 * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
98 #define IDETAPE_MAX_PC_RETRIES 3
101 * With each packet command, we allocate a buffer of IDETAPE_PC_BUFFER_SIZE
102 * bytes. This is used for several packet commands (Not for READ/WRITE commands)
104 #define IDETAPE_PC_BUFFER_SIZE 256
107 * In various places in the driver, we need to allocate storage
108 * for packet commands and requests, which will remain valid while
109 * we leave the driver to wait for an interrupt or a timeout event.
111 #define IDETAPE_PC_STACK (10 + IDETAPE_MAX_PC_RETRIES)
114 * Some drives (for example, Seagate STT3401A Travan) require a very long
115 * timeout, because they don't return an interrupt or clear their busy bit
116 * until after the command completes (even retension commands).
118 #define IDETAPE_WAIT_CMD (900*HZ)
121 * The following parameter is used to select the point in the internal tape fifo
122 * in which we will start to refill the buffer. Decreasing the following
123 * parameter will improve the system's latency and interactive response, while
124 * using a high value might improve system throughput.
126 #define IDETAPE_FIFO_THRESHOLD 2
129 * DSC polling parameters.
131 * Polling for DSC (a single bit in the status register) is a very important
132 * function in ide-tape. There are two cases in which we poll for DSC:
134 * 1. Before a read/write packet command, to ensure that we can transfer data
135 * from/to the tape's data buffers, without causing an actual media access.
136 * In case the tape is not ready yet, we take out our request from the device
137 * request queue, so that ide.c could service requests from the other device
138 * on the same interface in the meantime.
140 * 2. After the successful initialization of a "media access packet command",
141 * which is a command that can take a long time to complete (the interval can
142 * range from several seconds to even an hour). Again, we postpone our request
143 * in the middle to free the bus for the other device. The polling frequency
144 * here should be lower than the read/write frequency since those media access
145 * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
146 * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
147 * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
149 * We also set a timeout for the timer, in case something goes wrong. The
150 * timeout should be longer then the maximum execution time of a tape operation.
154 #define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
155 #define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
156 #define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
157 #define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
158 #define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
159 #define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
160 #define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
162 /*************************** End of tunable parameters ***********************/
164 /* Read/Write error simulation */
165 #define SIMULATE_ERRORS 0
167 /* tape directions */
169 IDETAPE_DIR_NONE = (1 << 0),
170 IDETAPE_DIR_READ = (1 << 1),
171 IDETAPE_DIR_WRITE = (1 << 2),
177 struct idetape_bh *b_reqnext;
181 /* Tape door status */
182 #define DOOR_UNLOCKED 0
183 #define DOOR_LOCKED 1
184 #define DOOR_EXPLICITLY_LOCKED 2
186 /* Some defines for the SPACE command */
187 #define IDETAPE_SPACE_OVER_FILEMARK 1
188 #define IDETAPE_SPACE_TO_EOD 3
190 /* Some defines for the LOAD UNLOAD command */
191 #define IDETAPE_LU_LOAD_MASK 1
192 #define IDETAPE_LU_RETENSION_MASK 2
193 #define IDETAPE_LU_EOT_MASK 4
196 * Special requests for our block device strategy routine.
198 * In order to service a character device command, we add special requests to
199 * the tail of our block device request queue and wait for their completion.
203 REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
204 REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
205 REQ_IDETAPE_READ = (1 << 2),
206 REQ_IDETAPE_WRITE = (1 << 3),
209 /* Error codes returned in rq->errors to the higher part of the driver. */
210 #define IDETAPE_ERROR_GENERAL 101
211 #define IDETAPE_ERROR_FILEMARK 102
212 #define IDETAPE_ERROR_EOD 103
214 /* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
215 #define IDETAPE_BLOCK_DESCRIPTOR 0
216 #define IDETAPE_CAPABILITIES_PAGE 0x2a
218 /* Tape flag bits values. */
220 IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
221 /* 0 When the tape position is unknown */
222 IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
223 /* Device already opened */
224 IDETAPE_FLAG_BUSY = (1 << 2),
225 /* Attempt to auto-detect the current user block size */
226 IDETAPE_FLAG_DETECT_BS = (1 << 3),
227 /* Currently on a filemark */
228 IDETAPE_FLAG_FILEMARK = (1 << 4),
229 /* DRQ interrupt device */
230 IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 5),
231 /* 0 = no tape is loaded, so we don't rewind after ejecting */
232 IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 6),
235 /* A pipeline stage. */
236 typedef struct idetape_stage_s {
237 struct request rq; /* The corresponding request */
238 struct idetape_bh *bh; /* The data buffers */
239 struct idetape_stage_s *next; /* Pointer to the next stage */
243 * Most of our global data which we need to save even as we leave the driver due
244 * to an interrupt or a timer event is stored in the struct defined below.
246 typedef struct ide_tape_obj {
248 ide_driver_t *driver;
249 struct gendisk *disk;
253 * Since a typical character device operation requires more
254 * than one packet command, we provide here enough memory
255 * for the maximum of interconnected packet commands.
256 * The packet commands are stored in the circular array pc_stack.
257 * pc_stack_index points to the last used entry, and warps around
258 * to the start when we get to the last array entry.
260 * pc points to the current processed packet command.
262 * failed_pc points to the last failed packet command, or contains
263 * NULL if we do not need to retry any packet command. This is
264 * required since an additional packet command is needed before the
265 * retry, to get detailed information on what went wrong.
267 /* Current packet command */
268 struct ide_atapi_pc *pc;
269 /* Last failed packet command */
270 struct ide_atapi_pc *failed_pc;
271 /* Packet command stack */
272 struct ide_atapi_pc pc_stack[IDETAPE_PC_STACK];
273 /* Next free packet command storage space */
275 struct request rq_stack[IDETAPE_PC_STACK];
276 /* We implement a circular array */
280 * DSC polling variables.
282 * While polling for DSC we use postponed_rq to postpone the current
283 * request so that ide.c will be able to service pending requests on the
284 * other device. Note that at most we will have only one DSC (usually
285 * data transfer) request in the device request queue. Additional
286 * requests can be queued in our internal pipeline, but they will be
287 * visible to ide.c only one at a time.
289 struct request *postponed_rq;
290 /* The time in which we started polling for DSC */
291 unsigned long dsc_polling_start;
292 /* Timer used to poll for dsc */
293 struct timer_list dsc_timer;
294 /* Read/Write dsc polling frequency */
295 unsigned long best_dsc_rw_freq;
296 unsigned long dsc_poll_freq;
297 unsigned long dsc_timeout;
299 /* Read position information */
302 unsigned int first_frame;
304 /* Last error information */
305 u8 sense_key, asc, ascq;
307 /* Character device operation */
311 /* Current character device data transfer direction */
314 /* tape block size, usually 512 or 1024 bytes */
315 unsigned short blk_size;
318 /* Copy of the tape's Capabilities and Mechanical Page */
322 * Active data transfer request parameters.
324 * At most, there is only one ide-tape originated data transfer request
325 * in the device request queue. This allows ide.c to easily service
326 * requests from the other device when we postpone our active request.
327 * In the pipelined operation mode, we use our internal pipeline
328 * structure to hold more data requests. The data buffer size is chosen
329 * based on the tape's recommendation.
331 /* ptr to the request which is waiting in the device request queue */
332 struct request *active_data_rq;
333 /* Data buffer size chosen based on the tape's recommendation */
335 idetape_stage_t *merge_stage;
336 int merge_stage_size;
337 struct idetape_bh *bh;
342 * Pipeline parameters.
344 * To accomplish non-pipelined mode, we simply set the following
345 * variables to zero (or NULL, where appropriate).
347 /* Number of currently used stages */
349 /* Number of pending stages */
350 int nr_pending_stages;
351 /* We will not allocate more than this number of stages */
352 int max_stages, min_pipeline, max_pipeline;
353 /* The first stage which will be removed from the pipeline */
354 idetape_stage_t *first_stage;
355 /* The currently active stage */
356 idetape_stage_t *active_stage;
357 /* Will be serviced after the currently active request */
358 idetape_stage_t *next_stage;
359 /* New requests will be added to the pipeline here */
360 idetape_stage_t *last_stage;
362 /* Wasted space in each stage */
365 /* Status/Action flags: long for set_bit */
367 /* protects the ide-tape queue */
370 /* Measures average tape speed */
371 unsigned long avg_time;
375 /* the door is currently locked */
377 /* the tape hardware is write protected */
379 /* the tape is write protected (hardware or opened as read-only) */
383 * Limit the number of times a request can be postponed, to avoid an
384 * infinite postpone deadlock.
388 /* Speed control at the tape buffers input/output */
389 unsigned long insert_time;
392 int measure_insert_time;
397 static DEFINE_MUTEX(idetape_ref_mutex);
399 static struct class *idetape_sysfs_class;
401 #define to_ide_tape(obj) container_of(obj, struct ide_tape_obj, kref)
403 #define ide_tape_g(disk) \
404 container_of((disk)->private_data, struct ide_tape_obj, driver)
406 static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
408 struct ide_tape_obj *tape = NULL;
410 mutex_lock(&idetape_ref_mutex);
411 tape = ide_tape_g(disk);
413 kref_get(&tape->kref);
414 mutex_unlock(&idetape_ref_mutex);
418 static void ide_tape_release(struct kref *);
420 static void ide_tape_put(struct ide_tape_obj *tape)
422 mutex_lock(&idetape_ref_mutex);
423 kref_put(&tape->kref, ide_tape_release);
424 mutex_unlock(&idetape_ref_mutex);
428 * The variables below are used for the character device interface. Additional
429 * state variables are defined in our ide_drive_t structure.
431 static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
433 #define ide_tape_f(file) ((file)->private_data)
435 static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
437 struct ide_tape_obj *tape = NULL;
439 mutex_lock(&idetape_ref_mutex);
440 tape = idetape_devs[i];
442 kref_get(&tape->kref);
443 mutex_unlock(&idetape_ref_mutex);
447 static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
450 struct idetape_bh *bh = pc->bh;
455 printk(KERN_ERR "ide-tape: bh == NULL in "
456 "idetape_input_buffers\n");
457 ide_atapi_discard_data(drive, bcount);
461 (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
463 HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
464 atomic_read(&bh->b_count), count);
466 atomic_add(count, &bh->b_count);
467 if (atomic_read(&bh->b_count) == bh->b_size) {
470 atomic_set(&bh->b_count, 0);
476 static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
479 struct idetape_bh *bh = pc->bh;
484 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
488 count = min((unsigned int)pc->b_count, (unsigned int)bcount);
489 HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
492 pc->b_count -= count;
497 pc->b_data = bh->b_data;
498 pc->b_count = atomic_read(&bh->b_count);
504 static void idetape_update_buffers(struct ide_atapi_pc *pc)
506 struct idetape_bh *bh = pc->bh;
508 unsigned int bcount = pc->xferred;
510 if (pc->flags & PC_FLAG_WRITING)
514 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
518 count = min((unsigned int)bh->b_size, (unsigned int)bcount);
519 atomic_set(&bh->b_count, count);
520 if (atomic_read(&bh->b_count) == bh->b_size)
528 * idetape_next_pc_storage returns a pointer to a place in which we can
529 * safely store a packet command, even though we intend to leave the
530 * driver. A storage space for a maximum of IDETAPE_PC_STACK packet
531 * commands is allocated at initialization time.
533 static struct ide_atapi_pc *idetape_next_pc_storage(ide_drive_t *drive)
535 idetape_tape_t *tape = drive->driver_data;
537 debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
539 if (tape->pc_stack_index == IDETAPE_PC_STACK)
540 tape->pc_stack_index = 0;
541 return (&tape->pc_stack[tape->pc_stack_index++]);
545 * idetape_next_rq_storage is used along with idetape_next_pc_storage.
546 * Since we queue packet commands in the request queue, we need to
547 * allocate a request, along with the allocation of a packet command.
550 /**************************************************************
552 * This should get fixed to use kmalloc(.., GFP_ATOMIC) *
553 * followed later on by kfree(). -ml *
555 **************************************************************/
557 static struct request *idetape_next_rq_storage(ide_drive_t *drive)
559 idetape_tape_t *tape = drive->driver_data;
561 debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
563 if (tape->rq_stack_index == IDETAPE_PC_STACK)
564 tape->rq_stack_index = 0;
565 return (&tape->rq_stack[tape->rq_stack_index++]);
568 static void idetape_init_pc(struct ide_atapi_pc *pc)
570 memset(pc->c, 0, 12);
574 pc->buf = pc->pc_buf;
575 pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
581 * called on each failed packet command retry to analyze the request sense. We
582 * currently do not utilize this information.
584 static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
586 idetape_tape_t *tape = drive->driver_data;
587 struct ide_atapi_pc *pc = tape->failed_pc;
589 tape->sense_key = sense[2] & 0xF;
590 tape->asc = sense[12];
591 tape->ascq = sense[13];
593 debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
594 pc->c[0], tape->sense_key, tape->asc, tape->ascq);
596 /* Correct pc->xferred by asking the tape. */
597 if (pc->flags & PC_FLAG_DMA_ERROR) {
598 pc->xferred = pc->req_xfer -
600 be32_to_cpu(get_unaligned((u32 *)&sense[3]));
601 idetape_update_buffers(pc);
605 * If error was the result of a zero-length read or write command,
606 * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
607 * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
609 if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
611 && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
612 if (tape->sense_key == 5) {
613 /* don't report an error, everything's ok */
615 /* don't retry read/write */
616 pc->flags |= PC_FLAG_ABORT;
619 if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
620 pc->error = IDETAPE_ERROR_FILEMARK;
621 pc->flags |= PC_FLAG_ABORT;
623 if (pc->c[0] == WRITE_6) {
624 if ((sense[2] & 0x40) || (tape->sense_key == 0xd
625 && tape->asc == 0x0 && tape->ascq == 0x2)) {
626 pc->error = IDETAPE_ERROR_EOD;
627 pc->flags |= PC_FLAG_ABORT;
630 if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
631 if (tape->sense_key == 8) {
632 pc->error = IDETAPE_ERROR_EOD;
633 pc->flags |= PC_FLAG_ABORT;
635 if (!(pc->flags & PC_FLAG_ABORT) &&
637 pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
641 /* Free a stage along with its related buffers completely. */
642 static void __idetape_kfree_stage(idetape_stage_t *stage)
644 struct idetape_bh *prev_bh, *bh = stage->bh;
648 if (bh->b_data != NULL) {
649 size = (int) bh->b_size;
651 free_page((unsigned long) bh->b_data);
653 bh->b_data += PAGE_SIZE;
664 * Finish servicing a request and insert a pending pipeline request into the
667 static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
669 struct request *rq = HWGROUP(drive)->rq;
670 idetape_tape_t *tape = drive->driver_data;
674 debug_log(DBG_PROCS, "Enter %s\n", __func__);
677 case 0: error = IDETAPE_ERROR_GENERAL; break;
678 case 1: error = 0; break;
679 default: error = uptodate;
683 tape->failed_pc = NULL;
685 if (!blk_special_request(rq)) {
686 ide_end_request(drive, uptodate, nr_sects);
690 spin_lock_irqsave(&tape->lock, flags);
692 ide_end_drive_cmd(drive, 0, 0);
694 spin_unlock_irqrestore(&tape->lock, flags);
698 static ide_startstop_t idetape_request_sense_callback(ide_drive_t *drive)
700 idetape_tape_t *tape = drive->driver_data;
702 debug_log(DBG_PROCS, "Enter %s\n", __func__);
704 if (!tape->pc->error) {
705 idetape_analyze_error(drive, tape->pc->buf);
706 idetape_end_request(drive, 1, 0);
708 printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
709 "Aborting request!\n");
710 idetape_end_request(drive, 0, 0);
715 static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
718 pc->c[0] = REQUEST_SENSE;
721 pc->idetape_callback = &idetape_request_sense_callback;
724 static void idetape_init_rq(struct request *rq, u8 cmd)
726 memset(rq, 0, sizeof(*rq));
727 rq->cmd_type = REQ_TYPE_SPECIAL;
732 * Generate a new packet command request in front of the request queue, before
733 * the current request, so that it will be processed immediately, on the next
734 * pass through the driver. The function below is called from the request
735 * handling part of the driver (the "bottom" part). Safe storage for the request
736 * should be allocated with ide_tape_next_{pc,rq}_storage() prior to that.
738 * Memory for those requests is pre-allocated at initialization time, and is
739 * limited to IDETAPE_PC_STACK requests. We assume that we have enough space for
740 * the maximum possible number of inter-dependent packet commands.
742 * The higher level of the driver - The ioctl handler and the character device
743 * handling functions should queue request to the lower level part and wait for
744 * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
746 static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
749 struct ide_tape_obj *tape = drive->driver_data;
751 idetape_init_rq(rq, REQ_IDETAPE_PC1);
752 rq->buffer = (char *) pc;
753 rq->rq_disk = tape->disk;
754 (void) ide_do_drive_cmd(drive, rq, ide_preempt);
758 * idetape_retry_pc is called when an error was detected during the
759 * last packet command. We queue a request sense packet command in
760 * the head of the request list.
762 static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
764 idetape_tape_t *tape = drive->driver_data;
765 struct ide_atapi_pc *pc;
768 (void)ide_read_error(drive);
769 pc = idetape_next_pc_storage(drive);
770 rq = idetape_next_rq_storage(drive);
771 idetape_create_request_sense_cmd(pc);
772 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
773 idetape_queue_pc_head(drive, pc, rq);
778 * Postpone the current request so that ide.c will be able to service requests
779 * from another device on the same hwgroup while we are polling for DSC.
781 static void idetape_postpone_request(ide_drive_t *drive)
783 idetape_tape_t *tape = drive->driver_data;
785 debug_log(DBG_PROCS, "Enter %s\n", __func__);
787 tape->postponed_rq = HWGROUP(drive)->rq;
788 ide_stall_queue(drive, tape->dsc_poll_freq);
791 typedef void idetape_io_buf(ide_drive_t *, struct ide_atapi_pc *, unsigned int);
794 * This is the usual interrupt handler which will be called during a packet
795 * command. We will transfer some of the data (as requested by the drive) and
796 * will re-point interrupt handler to us. When data transfer is finished, we
797 * will act according to the algorithm described before
800 static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
802 ide_hwif_t *hwif = drive->hwif;
803 idetape_tape_t *tape = drive->driver_data;
804 struct ide_atapi_pc *pc = tape->pc;
805 xfer_func_t *xferfunc;
806 idetape_io_buf *iobuf;
809 static int error_sim_count;
814 debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
816 /* Clear the interrupt */
817 stat = ide_read_status(drive);
819 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
820 if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
822 * A DMA error is sometimes expected. For example,
823 * if the tape is crossing a filemark during a
824 * READ command, it will issue an irq and position
825 * itself before the filemark, so that only a partial
826 * data transfer will occur (which causes the DMA
827 * error). In that case, we will later ask the tape
828 * how much bytes of the original request were
829 * actually transferred (we can't receive that
830 * information from the DMA engine on most chipsets).
834 * On the contrary, a DMA error is never expected;
835 * it usually indicates a hardware error or abort.
836 * If the tape crosses a filemark during a READ
837 * command, it will issue an irq and position itself
838 * after the filemark (not before). Only a partial
839 * data transfer will occur, but no DMA error.
842 pc->flags |= PC_FLAG_DMA_ERROR;
844 pc->xferred = pc->req_xfer;
845 idetape_update_buffers(pc);
847 debug_log(DBG_PROCS, "DMA finished\n");
851 /* No more interrupts */
852 if ((stat & DRQ_STAT) == 0) {
853 debug_log(DBG_SENSE, "Packet command completed, %d bytes"
854 " transferred\n", pc->xferred);
856 pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
860 if ((pc->c[0] == WRITE_6 || pc->c[0] == READ_6) &&
861 (++error_sim_count % 100) == 0) {
862 printk(KERN_INFO "ide-tape: %s: simulating error\n",
867 if ((stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
869 if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
871 debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
873 if (pc->c[0] == REQUEST_SENSE) {
874 printk(KERN_ERR "ide-tape: I/O error in request"
876 return ide_do_reset(drive);
878 debug_log(DBG_ERR, "[cmd %x]: check condition\n",
881 /* Retry operation */
882 return idetape_retry_pc(drive);
885 if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) &&
886 (stat & SEEK_STAT) == 0) {
887 /* Media access command */
888 tape->dsc_polling_start = jiffies;
889 tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
890 tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
891 /* Allow ide.c to handle other requests */
892 idetape_postpone_request(drive);
895 if (tape->failed_pc == pc)
896 tape->failed_pc = NULL;
897 /* Command finished - Call the callback function */
898 return pc->idetape_callback(drive);
901 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
902 pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
903 printk(KERN_ERR "ide-tape: The tape wants to issue more "
904 "interrupts in DMA mode\n");
905 printk(KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
907 return ide_do_reset(drive);
909 /* Get the number of bytes to transfer on this interrupt. */
910 bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
911 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
913 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
916 printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
917 return ide_do_reset(drive);
919 if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
920 /* Hopefully, we will never get here */
921 printk(KERN_ERR "ide-tape: We wanted to %s, ",
922 (ireason & IO) ? "Write" : "Read");
923 printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
924 (ireason & IO) ? "Read" : "Write");
925 return ide_do_reset(drive);
927 if (!(pc->flags & PC_FLAG_WRITING)) {
928 /* Reading - Check that we have enough space */
929 temp = pc->xferred + bcount;
930 if (temp > pc->req_xfer) {
931 if (temp > pc->buf_size) {
932 printk(KERN_ERR "ide-tape: The tape wants to "
933 "send us more data than expected "
934 "- discarding data\n");
935 ide_atapi_discard_data(drive, bcount);
936 ide_set_handler(drive, &idetape_pc_intr,
937 IDETAPE_WAIT_CMD, NULL);
940 debug_log(DBG_SENSE, "The tape wants to send us more "
941 "data than expected - allowing transfer\n");
943 iobuf = &idetape_input_buffers;
944 xferfunc = hwif->atapi_input_bytes;
946 iobuf = &idetape_output_buffers;
947 xferfunc = hwif->atapi_output_bytes;
951 iobuf(drive, pc, bcount);
953 xferfunc(drive, pc->cur_pos, bcount);
955 /* Update the current position */
956 pc->xferred += bcount;
957 pc->cur_pos += bcount;
959 debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
962 /* And set the interrupt handler again */
963 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
968 * Packet Command Interface
970 * The current Packet Command is available in tape->pc, and will not change
971 * until we finish handling it. Each packet command is associated with a
972 * callback function that will be called when the command is finished.
974 * The handling will be done in three stages:
976 * 1. idetape_issue_pc will send the packet command to the drive, and will set
977 * the interrupt handler to idetape_pc_intr.
979 * 2. On each interrupt, idetape_pc_intr will be called. This step will be
980 * repeated until the device signals us that no more interrupts will be issued.
982 * 3. ATAPI Tape media access commands have immediate status with a delayed
983 * process. In case of a successful initiation of a media access packet command,
984 * the DSC bit will be set when the actual execution of the command is finished.
985 * Since the tape drive will not issue an interrupt, we have to poll for this
986 * event. In this case, we define the request as "low priority request" by
987 * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
990 * ide.c will then give higher priority to requests which originate from the
991 * other device, until will change rq_status to RQ_ACTIVE.
993 * 4. When the packet command is finished, it will be checked for errors.
995 * 5. In case an error was found, we queue a request sense packet command in
996 * front of the request queue and retry the operation up to
997 * IDETAPE_MAX_PC_RETRIES times.
999 * 6. In case no error was found, or we decided to give up and not to retry
1000 * again, the callback function will be called and then we will handle the next
1003 static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
1005 ide_hwif_t *hwif = drive->hwif;
1006 idetape_tape_t *tape = drive->driver_data;
1007 struct ide_atapi_pc *pc = tape->pc;
1009 ide_startstop_t startstop;
1012 if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
1013 printk(KERN_ERR "ide-tape: Strange, packet command initiated "
1014 "yet DRQ isn't asserted\n");
1017 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1018 while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
1019 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
1020 "a packet command, retrying\n");
1022 ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
1024 printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
1025 "issuing a packet command, ignoring\n");
1030 if ((ireason & CD) == 0 || (ireason & IO)) {
1031 printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
1032 "a packet command\n");
1033 return ide_do_reset(drive);
1035 /* Set the interrupt routine */
1036 ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
1037 #ifdef CONFIG_BLK_DEV_IDEDMA
1038 /* Begin DMA, if necessary */
1039 if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
1040 hwif->dma_ops->dma_start(drive);
1042 /* Send the actual packet */
1043 HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
1047 static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
1048 struct ide_atapi_pc *pc)
1050 ide_hwif_t *hwif = drive->hwif;
1051 idetape_tape_t *tape = drive->driver_data;
1055 if (tape->pc->c[0] == REQUEST_SENSE &&
1056 pc->c[0] == REQUEST_SENSE) {
1057 printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
1058 "Two request sense in serial were issued\n");
1061 if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
1062 tape->failed_pc = pc;
1063 /* Set the current packet command */
1066 if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
1067 (pc->flags & PC_FLAG_ABORT)) {
1069 * We will "abort" retrying a packet command in case legitimate
1070 * error code was received (crossing a filemark, or end of the
1071 * media, for example).
1073 if (!(pc->flags & PC_FLAG_ABORT)) {
1074 if (!(pc->c[0] == TEST_UNIT_READY &&
1075 tape->sense_key == 2 && tape->asc == 4 &&
1076 (tape->ascq == 1 || tape->ascq == 8))) {
1077 printk(KERN_ERR "ide-tape: %s: I/O error, "
1078 "pc = %2x, key = %2x, "
1079 "asc = %2x, ascq = %2x\n",
1080 tape->name, pc->c[0],
1081 tape->sense_key, tape->asc,
1085 pc->error = IDETAPE_ERROR_GENERAL;
1087 tape->failed_pc = NULL;
1088 return pc->idetape_callback(drive);
1090 debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
1093 /* We haven't transferred any data yet */
1095 pc->cur_pos = pc->buf;
1096 /* Request to transfer the entire buffer at once */
1097 bcount = pc->req_xfer;
1099 if (pc->flags & PC_FLAG_DMA_ERROR) {
1100 pc->flags &= ~PC_FLAG_DMA_ERROR;
1101 printk(KERN_WARNING "ide-tape: DMA disabled, "
1102 "reverting to PIO\n");
1105 if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
1106 dma_ok = !hwif->dma_ops->dma_setup(drive);
1108 ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
1109 IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
1112 /* Will begin DMA later */
1113 pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
1114 if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags)) {
1115 ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc,
1116 IDETAPE_WAIT_CMD, NULL);
1119 hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
1120 return idetape_transfer_pc(drive);
1124 static ide_startstop_t idetape_pc_callback(ide_drive_t *drive)
1126 idetape_tape_t *tape = drive->driver_data;
1128 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1130 idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
1134 /* A mode sense command is used to "sense" tape parameters. */
1135 static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
1137 idetape_init_pc(pc);
1138 pc->c[0] = MODE_SENSE;
1139 if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
1140 /* DBD = 1 - Don't return block descriptors */
1142 pc->c[2] = page_code;
1144 * Changed pc->c[3] to 0 (255 will at best return unused info).
1146 * For SCSI this byte is defined as subpage instead of high byte
1147 * of length and some IDE drives seem to interpret it this way
1148 * and return an error when 255 is used.
1151 /* We will just discard data in that case */
1153 if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
1155 else if (page_code == IDETAPE_CAPABILITIES_PAGE)
1159 pc->idetape_callback = &idetape_pc_callback;
1162 static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
1164 idetape_tape_t *tape = drive->driver_data;
1165 struct ide_atapi_pc *pc = tape->pc;
1168 stat = ide_read_status(drive);
1170 if (stat & SEEK_STAT) {
1171 if (stat & ERR_STAT) {
1172 /* Error detected */
1173 if (pc->c[0] != TEST_UNIT_READY)
1174 printk(KERN_ERR "ide-tape: %s: I/O error, ",
1176 /* Retry operation */
1177 return idetape_retry_pc(drive);
1180 if (tape->failed_pc == pc)
1181 tape->failed_pc = NULL;
1183 pc->error = IDETAPE_ERROR_GENERAL;
1184 tape->failed_pc = NULL;
1186 return pc->idetape_callback(drive);
1189 static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
1191 idetape_tape_t *tape = drive->driver_data;
1192 struct request *rq = HWGROUP(drive)->rq;
1193 int blocks = tape->pc->xferred / tape->blk_size;
1195 tape->avg_size += blocks * tape->blk_size;
1196 tape->insert_size += blocks * tape->blk_size;
1197 if (tape->insert_size > 1024 * 1024)
1198 tape->measure_insert_time = 1;
1199 if (tape->measure_insert_time) {
1200 tape->measure_insert_time = 0;
1201 tape->insert_time = jiffies;
1202 tape->insert_size = 0;
1204 if (time_after(jiffies, tape->insert_time))
1205 tape->insert_speed = tape->insert_size / 1024 * HZ /
1206 (jiffies - tape->insert_time);
1207 if (time_after_eq(jiffies, tape->avg_time + HZ)) {
1208 tape->avg_speed = tape->avg_size * HZ /
1209 (jiffies - tape->avg_time) / 1024;
1211 tape->avg_time = jiffies;
1213 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1215 tape->first_frame += blocks;
1216 rq->current_nr_sectors -= blocks;
1218 if (!tape->pc->error)
1219 idetape_end_request(drive, 1, 0);
1221 idetape_end_request(drive, tape->pc->error, 0);
1225 static void idetape_create_read_cmd(idetape_tape_t *tape,
1226 struct ide_atapi_pc *pc,
1227 unsigned int length, struct idetape_bh *bh)
1229 idetape_init_pc(pc);
1231 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1233 pc->idetape_callback = &idetape_rw_callback;
1235 atomic_set(&bh->b_count, 0);
1237 pc->buf_size = length * tape->blk_size;
1238 pc->req_xfer = pc->buf_size;
1239 if (pc->req_xfer == tape->stage_size)
1240 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1243 static void idetape_create_write_cmd(idetape_tape_t *tape,
1244 struct ide_atapi_pc *pc,
1245 unsigned int length, struct idetape_bh *bh)
1247 idetape_init_pc(pc);
1249 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
1251 pc->idetape_callback = &idetape_rw_callback;
1252 pc->flags |= PC_FLAG_WRITING;
1254 pc->b_data = bh->b_data;
1255 pc->b_count = atomic_read(&bh->b_count);
1257 pc->buf_size = length * tape->blk_size;
1258 pc->req_xfer = pc->buf_size;
1259 if (pc->req_xfer == tape->stage_size)
1260 pc->flags |= PC_FLAG_DMA_RECOMMENDED;
1263 static ide_startstop_t idetape_do_request(ide_drive_t *drive,
1264 struct request *rq, sector_t block)
1266 idetape_tape_t *tape = drive->driver_data;
1267 struct ide_atapi_pc *pc = NULL;
1268 struct request *postponed_rq = tape->postponed_rq;
1271 debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
1272 " current_nr_sectors: %d\n",
1273 rq->sector, rq->nr_sectors, rq->current_nr_sectors);
1275 if (!blk_special_request(rq)) {
1276 /* We do not support buffer cache originated requests. */
1277 printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
1278 "request queue (%d)\n", drive->name, rq->cmd_type);
1279 ide_end_request(drive, 0, 0);
1283 /* Retry a failed packet command */
1284 if (tape->failed_pc && tape->pc->c[0] == REQUEST_SENSE)
1285 return idetape_issue_pc(drive, tape->failed_pc);
1287 if (postponed_rq != NULL)
1288 if (rq != postponed_rq) {
1289 printk(KERN_ERR "ide-tape: ide-tape.c bug - "
1290 "Two DSC requests were queued\n");
1291 idetape_end_request(drive, 0, 0);
1295 tape->postponed_rq = NULL;
1298 * If the tape is still busy, postpone our request and service
1299 * the other device meanwhile.
1301 stat = ide_read_status(drive);
1303 if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
1304 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
1306 if (drive->post_reset == 1) {
1307 set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
1308 drive->post_reset = 0;
1311 if (time_after(jiffies, tape->insert_time))
1312 tape->insert_speed = tape->insert_size / 1024 * HZ /
1313 (jiffies - tape->insert_time);
1314 if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
1315 (stat & SEEK_STAT) == 0) {
1316 if (postponed_rq == NULL) {
1317 tape->dsc_polling_start = jiffies;
1318 tape->dsc_poll_freq = tape->best_dsc_rw_freq;
1319 tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
1320 } else if (time_after(jiffies, tape->dsc_timeout)) {
1321 printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
1323 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1324 idetape_media_access_finished(drive);
1327 return ide_do_reset(drive);
1329 } else if (time_after(jiffies,
1330 tape->dsc_polling_start +
1331 IDETAPE_DSC_MA_THRESHOLD))
1332 tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
1333 idetape_postpone_request(drive);
1336 if (rq->cmd[0] & REQ_IDETAPE_READ) {
1337 tape->postpone_cnt = 0;
1338 pc = idetape_next_pc_storage(drive);
1339 idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
1340 (struct idetape_bh *)rq->special);
1343 if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
1344 tape->postpone_cnt = 0;
1345 pc = idetape_next_pc_storage(drive);
1346 idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
1347 (struct idetape_bh *)rq->special);
1350 if (rq->cmd[0] & REQ_IDETAPE_PC1) {
1351 pc = (struct ide_atapi_pc *) rq->buffer;
1352 rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
1353 rq->cmd[0] |= REQ_IDETAPE_PC2;
1356 if (rq->cmd[0] & REQ_IDETAPE_PC2) {
1357 idetape_media_access_finished(drive);
1362 return idetape_issue_pc(drive, pc);
1365 /* Pipeline related functions */
1368 * The function below uses __get_free_page to allocate a pipeline stage, along
1369 * with all the necessary small buffers which together make a buffer of size
1370 * tape->stage_size (or a bit more). We attempt to combine sequential pages as
1373 * It returns a pointer to the new allocated stage, or NULL if we can't (or
1374 * don't want to) allocate a stage.
1376 * Pipeline stages are optional and are used to increase performance. If we
1377 * can't allocate them, we'll manage without them.
1379 static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
1382 idetape_stage_t *stage;
1383 struct idetape_bh *prev_bh, *bh;
1384 int pages = tape->pages_per_stage;
1385 char *b_data = NULL;
1387 stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
1392 stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1396 bh->b_reqnext = NULL;
1397 bh->b_data = (char *) __get_free_page(GFP_KERNEL);
1401 memset(bh->b_data, 0, PAGE_SIZE);
1402 bh->b_size = PAGE_SIZE;
1403 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1406 b_data = (char *) __get_free_page(GFP_KERNEL);
1410 memset(b_data, 0, PAGE_SIZE);
1411 if (bh->b_data == b_data + PAGE_SIZE) {
1412 bh->b_size += PAGE_SIZE;
1413 bh->b_data -= PAGE_SIZE;
1415 atomic_add(PAGE_SIZE, &bh->b_count);
1418 if (b_data == bh->b_data + bh->b_size) {
1419 bh->b_size += PAGE_SIZE;
1421 atomic_add(PAGE_SIZE, &bh->b_count);
1425 bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
1427 free_page((unsigned long) b_data);
1430 bh->b_reqnext = NULL;
1431 bh->b_data = b_data;
1432 bh->b_size = PAGE_SIZE;
1433 atomic_set(&bh->b_count, full ? bh->b_size : 0);
1434 prev_bh->b_reqnext = bh;
1436 bh->b_size -= tape->excess_bh_size;
1438 atomic_sub(tape->excess_bh_size, &bh->b_count);
1441 __idetape_kfree_stage(stage);
1445 static int idetape_copy_stage_from_user(idetape_tape_t *tape,
1446 const char __user *buf, int n)
1448 struct idetape_bh *bh = tape->bh;
1454 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
1458 count = min((unsigned int)
1459 (bh->b_size - atomic_read(&bh->b_count)),
1461 if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
1465 atomic_add(count, &bh->b_count);
1467 if (atomic_read(&bh->b_count) == bh->b_size) {
1470 atomic_set(&bh->b_count, 0);
1477 static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
1480 struct idetape_bh *bh = tape->bh;
1486 printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
1490 count = min(tape->b_count, n);
1491 if (copy_to_user(buf, tape->b_data, count))
1494 tape->b_data += count;
1495 tape->b_count -= count;
1497 if (!tape->b_count) {
1501 tape->b_data = bh->b_data;
1502 tape->b_count = atomic_read(&bh->b_count);
1509 static void idetape_init_merge_stage(idetape_tape_t *tape)
1511 struct idetape_bh *bh = tape->merge_stage->bh;
1514 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
1515 atomic_set(&bh->b_count, 0);
1517 tape->b_data = bh->b_data;
1518 tape->b_count = atomic_read(&bh->b_count);
1522 static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
1524 idetape_tape_t *tape = drive->driver_data;
1525 u8 *readpos = tape->pc->buf;
1527 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1529 if (!tape->pc->error) {
1530 debug_log(DBG_SENSE, "BOP - %s\n",
1531 (readpos[0] & 0x80) ? "Yes" : "No");
1532 debug_log(DBG_SENSE, "EOP - %s\n",
1533 (readpos[0] & 0x40) ? "Yes" : "No");
1535 if (readpos[0] & 0x4) {
1536 printk(KERN_INFO "ide-tape: Block location is unknown"
1538 clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
1539 idetape_end_request(drive, 0, 0);
1541 debug_log(DBG_SENSE, "Block Location - %u\n",
1542 be32_to_cpu(*(u32 *)&readpos[4]));
1544 tape->partition = readpos[1];
1546 be32_to_cpu(*(u32 *)&readpos[4]);
1547 set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
1548 idetape_end_request(drive, 1, 0);
1551 idetape_end_request(drive, 0, 0);
1557 * Write a filemark if write_filemark=1. Flush the device buffers without
1558 * writing a filemark otherwise.
1560 static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
1561 struct ide_atapi_pc *pc, int write_filemark)
1563 idetape_init_pc(pc);
1564 pc->c[0] = WRITE_FILEMARKS;
1565 pc->c[4] = write_filemark;
1566 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1567 pc->idetape_callback = &idetape_pc_callback;
1570 static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
1572 idetape_init_pc(pc);
1573 pc->c[0] = TEST_UNIT_READY;
1574 pc->idetape_callback = &idetape_pc_callback;
1578 * We add a special packet command request to the tail of the request queue, and
1579 * wait for it to be serviced. This is not to be called from within the request
1580 * handling part of the driver! We allocate here data on the stack and it is
1581 * valid until the request is finished. This is not the case for the bottom part
1582 * of the driver, where we are always leaving the functions to wait for an
1583 * interrupt or a timer event.
1585 * From the bottom part of the driver, we should allocate safe memory using
1586 * idetape_next_pc_storage() and ide_tape_next_rq_storage(), and add the request
1587 * to the request list without waiting for it to be serviced! In that case, we
1588 * usually use idetape_queue_pc_head().
1590 static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
1592 struct ide_tape_obj *tape = drive->driver_data;
1595 idetape_init_rq(&rq, REQ_IDETAPE_PC1);
1596 rq.buffer = (char *) pc;
1597 rq.rq_disk = tape->disk;
1598 return ide_do_drive_cmd(drive, &rq, ide_wait);
1601 static void idetape_create_load_unload_cmd(ide_drive_t *drive,
1602 struct ide_atapi_pc *pc, int cmd)
1604 idetape_init_pc(pc);
1605 pc->c[0] = START_STOP;
1607 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1608 pc->idetape_callback = &idetape_pc_callback;
1611 static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
1613 idetape_tape_t *tape = drive->driver_data;
1614 struct ide_atapi_pc pc;
1615 int load_attempted = 0;
1617 /* Wait for the tape to become ready */
1618 set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
1620 while (time_before(jiffies, timeout)) {
1621 idetape_create_test_unit_ready_cmd(&pc);
1622 if (!idetape_queue_pc_tail(drive, &pc))
1624 if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
1625 || (tape->asc == 0x3A)) {
1629 idetape_create_load_unload_cmd(drive, &pc,
1630 IDETAPE_LU_LOAD_MASK);
1631 idetape_queue_pc_tail(drive, &pc);
1633 /* not about to be ready */
1634 } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
1635 (tape->ascq == 1 || tape->ascq == 8)))
1642 static int idetape_flush_tape_buffers(ide_drive_t *drive)
1644 struct ide_atapi_pc pc;
1647 idetape_create_write_filemark_cmd(drive, &pc, 0);
1648 rc = idetape_queue_pc_tail(drive, &pc);
1651 idetape_wait_ready(drive, 60 * 5 * HZ);
1655 static void idetape_create_read_position_cmd(struct ide_atapi_pc *pc)
1657 idetape_init_pc(pc);
1658 pc->c[0] = READ_POSITION;
1660 pc->idetape_callback = &idetape_read_position_callback;
1663 static int idetape_read_position(ide_drive_t *drive)
1665 idetape_tape_t *tape = drive->driver_data;
1666 struct ide_atapi_pc pc;
1669 debug_log(DBG_PROCS, "Enter %s\n", __func__);
1671 idetape_create_read_position_cmd(&pc);
1672 if (idetape_queue_pc_tail(drive, &pc))
1674 position = tape->first_frame;
1678 static void idetape_create_locate_cmd(ide_drive_t *drive,
1679 struct ide_atapi_pc *pc,
1680 unsigned int block, u8 partition, int skip)
1682 idetape_init_pc(pc);
1683 pc->c[0] = POSITION_TO_ELEMENT;
1685 put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
1686 pc->c[8] = partition;
1687 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1688 pc->idetape_callback = &idetape_pc_callback;
1691 static int idetape_create_prevent_cmd(ide_drive_t *drive,
1692 struct ide_atapi_pc *pc, int prevent)
1694 idetape_tape_t *tape = drive->driver_data;
1696 /* device supports locking according to capabilities page */
1697 if (!(tape->caps[6] & 0x01))
1700 idetape_init_pc(pc);
1701 pc->c[0] = ALLOW_MEDIUM_REMOVAL;
1703 pc->idetape_callback = &idetape_pc_callback;
1707 static int __idetape_discard_read_pipeline(ide_drive_t *drive)
1709 idetape_tape_t *tape = drive->driver_data;
1710 unsigned long flags;
1713 if (tape->chrdev_dir != IDETAPE_DIR_READ)
1716 /* Remove merge stage. */
1717 cnt = tape->merge_stage_size / tape->blk_size;
1718 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
1719 ++cnt; /* Filemarks count as 1 sector */
1720 tape->merge_stage_size = 0;
1721 if (tape->merge_stage != NULL) {
1722 __idetape_kfree_stage(tape->merge_stage);
1723 tape->merge_stage = NULL;
1726 tape->chrdev_dir = IDETAPE_DIR_NONE;
1728 /* Remove pipeline stages. */
1729 if (tape->first_stage == NULL)
1732 spin_lock_irqsave(&tape->lock, flags);
1733 tape->next_stage = NULL;
1734 spin_unlock_irqrestore(&tape->lock, flags);
1736 while (tape->first_stage != NULL) {
1737 struct request *rq_ptr = &tape->first_stage->rq;
1739 cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
1740 if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
1743 tape->nr_pending_stages = 0;
1744 tape->max_stages = tape->min_pipeline;
1749 * Position the tape to the requested block using the LOCATE packet command.
1750 * A READ POSITION command is then issued to check where we are positioned. Like
1751 * all higher level operations, we queue the commands at the tail of the request
1752 * queue and wait for their completion.
1754 static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
1755 u8 partition, int skip)
1757 idetape_tape_t *tape = drive->driver_data;
1759 struct ide_atapi_pc pc;
1761 if (tape->chrdev_dir == IDETAPE_DIR_READ)
1762 __idetape_discard_read_pipeline(drive);
1763 idetape_wait_ready(drive, 60 * 5 * HZ);
1764 idetape_create_locate_cmd(drive, &pc, block, partition, skip);
1765 retval = idetape_queue_pc_tail(drive, &pc);
1769 idetape_create_read_position_cmd(&pc);
1770 return (idetape_queue_pc_tail(drive, &pc));
1773 static void idetape_discard_read_pipeline(ide_drive_t *drive,
1774 int restore_position)
1776 idetape_tape_t *tape = drive->driver_data;
1780 cnt = __idetape_discard_read_pipeline(drive);
1781 if (restore_position) {
1782 position = idetape_read_position(drive);
1783 seek = position > cnt ? position - cnt : 0;
1784 if (idetape_position_tape(drive, seek, 0, 0)) {
1785 printk(KERN_INFO "ide-tape: %s: position_tape failed in"
1786 " discard_pipeline()\n", tape->name);
1793 * Generate a read/write request for the block device interface and wait for it
1796 static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
1797 struct idetape_bh *bh)
1799 idetape_tape_t *tape = drive->driver_data;
1802 debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
1804 idetape_init_rq(&rq, cmd);
1805 rq.rq_disk = tape->disk;
1806 rq.special = (void *)bh;
1807 rq.sector = tape->first_frame;
1808 rq.nr_sectors = blocks;
1809 rq.current_nr_sectors = blocks;
1810 (void) ide_do_drive_cmd(drive, &rq, ide_wait);
1812 if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
1815 if (tape->merge_stage)
1816 idetape_init_merge_stage(tape);
1817 if (rq.errors == IDETAPE_ERROR_GENERAL)
1819 return (tape->blk_size * (blocks-rq.current_nr_sectors));
1822 static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
1824 idetape_init_pc(pc);
1828 pc->idetape_callback = &idetape_pc_callback;
1831 static void idetape_create_rewind_cmd(ide_drive_t *drive,
1832 struct ide_atapi_pc *pc)
1834 idetape_init_pc(pc);
1835 pc->c[0] = REZERO_UNIT;
1836 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1837 pc->idetape_callback = &idetape_pc_callback;
1840 static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
1842 idetape_init_pc(pc);
1845 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1846 pc->idetape_callback = &idetape_pc_callback;
1849 static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
1851 idetape_init_pc(pc);
1853 put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
1855 pc->flags |= PC_FLAG_WAIT_FOR_DSC;
1856 pc->idetape_callback = &idetape_pc_callback;
1859 /* Queue up a character device originated write request. */
1860 static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
1862 idetape_tape_t *tape = drive->driver_data;
1864 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
1866 return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
1867 blocks, tape->merge_stage->bh);
1870 static void idetape_empty_write_pipeline(ide_drive_t *drive)
1872 idetape_tape_t *tape = drive->driver_data;
1874 struct idetape_bh *bh;
1876 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
1877 printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
1878 " but we are not writing.\n");
1881 if (tape->merge_stage_size > tape->stage_size) {
1882 printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
1883 tape->merge_stage_size = tape->stage_size;
1885 if (tape->merge_stage_size) {
1886 blocks = tape->merge_stage_size / tape->blk_size;
1887 if (tape->merge_stage_size % tape->blk_size) {
1891 i = tape->blk_size - tape->merge_stage_size %
1893 bh = tape->bh->b_reqnext;
1895 atomic_set(&bh->b_count, 0);
1901 printk(KERN_INFO "ide-tape: bug,"
1905 min = min(i, (unsigned int)(bh->b_size -
1906 atomic_read(&bh->b_count)));
1907 memset(bh->b_data + atomic_read(&bh->b_count),
1909 atomic_add(min, &bh->b_count);
1914 (void) idetape_add_chrdev_write_request(drive, blocks);
1915 tape->merge_stage_size = 0;
1917 if (tape->merge_stage != NULL) {
1918 __idetape_kfree_stage(tape->merge_stage);
1919 tape->merge_stage = NULL;
1921 tape->chrdev_dir = IDETAPE_DIR_NONE;
1924 * On the next backup, perform the feedback loop again. (I don't want to
1925 * keep sense information between backups, as some systems are
1926 * constantly on, and the system load can be totally different on the
1929 tape->max_stages = tape->min_pipeline;
1930 if (tape->first_stage != NULL ||
1931 tape->next_stage != NULL ||
1932 tape->last_stage != NULL ||
1933 tape->nr_stages != 0) {
1934 printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
1935 "first_stage %p, next_stage %p, "
1936 "last_stage %p, nr_stages %d\n",
1937 tape->first_stage, tape->next_stage,
1938 tape->last_stage, tape->nr_stages);
1942 static int idetape_init_read(ide_drive_t *drive, int max_stages)
1944 idetape_tape_t *tape = drive->driver_data;
1947 /* Initialize read operation */
1948 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
1949 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
1950 idetape_empty_write_pipeline(drive);
1951 idetape_flush_tape_buffers(drive);
1953 if (tape->merge_stage || tape->merge_stage_size) {
1954 printk(KERN_ERR "ide-tape: merge_stage_size should be"
1956 tape->merge_stage_size = 0;
1958 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
1959 if (!tape->merge_stage)
1961 tape->chrdev_dir = IDETAPE_DIR_READ;
1964 * Issue a read 0 command to ensure that DSC handshake is
1965 * switched from completion mode to buffer available mode.
1966 * No point in issuing this if DSC overlap isn't supported, some
1967 * drives (Seagate STT3401A) will return an error.
1969 if (drive->dsc_overlap) {
1970 bytes_read = idetape_queue_rw_tail(drive,
1971 REQ_IDETAPE_READ, 0,
1972 tape->merge_stage->bh);
1973 if (bytes_read < 0) {
1974 __idetape_kfree_stage(tape->merge_stage);
1975 tape->merge_stage = NULL;
1976 tape->chrdev_dir = IDETAPE_DIR_NONE;
1982 if (tape->nr_pending_stages >= 3 * max_stages / 4) {
1983 tape->measure_insert_time = 1;
1984 tape->insert_time = jiffies;
1985 tape->insert_size = 0;
1986 tape->insert_speed = 0;
1993 * Called from idetape_chrdev_read() to service a character device read request
1994 * and add read-ahead requests to our pipeline.
1996 static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
1998 idetape_tape_t *tape = drive->driver_data;
2000 debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
2002 /* If we are at a filemark, return a read length of 0 */
2003 if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2006 idetape_init_read(drive, tape->max_stages);
2008 return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
2009 tape->merge_stage->bh);
2012 static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
2014 idetape_tape_t *tape = drive->driver_data;
2015 struct idetape_bh *bh;
2021 bh = tape->merge_stage->bh;
2022 count = min(tape->stage_size, bcount);
2024 blocks = count / tape->blk_size;
2026 atomic_set(&bh->b_count,
2027 min(count, (unsigned int)bh->b_size));
2028 memset(bh->b_data, 0, atomic_read(&bh->b_count));
2029 count -= atomic_read(&bh->b_count);
2032 idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
2033 tape->merge_stage->bh);
2038 * Rewinds the tape to the Beginning Of the current Partition (BOP). We
2039 * currently support only one partition.
2041 static int idetape_rewind_tape(ide_drive_t *drive)
2044 struct ide_atapi_pc pc;
2045 idetape_tape_t *tape;
2046 tape = drive->driver_data;
2048 debug_log(DBG_SENSE, "Enter %s\n", __func__);
2050 idetape_create_rewind_cmd(drive, &pc);
2051 retval = idetape_queue_pc_tail(drive, &pc);
2055 idetape_create_read_position_cmd(&pc);
2056 retval = idetape_queue_pc_tail(drive, &pc);
2062 /* mtio.h compatible commands should be issued to the chrdev interface. */
2063 static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
2066 idetape_tape_t *tape = drive->driver_data;
2067 void __user *argp = (void __user *)arg;
2069 struct idetape_config {
2070 int dsc_rw_frequency;
2071 int dsc_media_access_frequency;
2075 debug_log(DBG_PROCS, "Enter %s\n", __func__);
2079 if (copy_from_user(&config, argp, sizeof(config)))
2081 tape->best_dsc_rw_freq = config.dsc_rw_frequency;
2082 tape->max_stages = config.nr_stages;
2085 config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
2086 config.nr_stages = tape->max_stages;
2087 if (copy_to_user(argp, &config, sizeof(config)))
2096 static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
2099 idetape_tape_t *tape = drive->driver_data;
2100 struct ide_atapi_pc pc;
2101 int retval, count = 0;
2102 int sprev = !!(tape->caps[4] & 0x20);
2106 if (MTBSF == mt_op || MTBSFM == mt_op) {
2109 mt_count = -mt_count;
2112 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
2113 tape->merge_stage_size = 0;
2114 if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
2116 idetape_discard_read_pipeline(drive, 0);
2120 * The filemark was not found in our internal pipeline; now we can issue
2121 * the space command.
2126 idetape_create_space_cmd(&pc, mt_count - count,
2127 IDETAPE_SPACE_OVER_FILEMARK);
2128 return idetape_queue_pc_tail(drive, &pc);
2133 retval = idetape_space_over_filemarks(drive, MTFSF,
2137 count = (MTBSFM == mt_op ? 1 : -1);
2138 return idetape_space_over_filemarks(drive, MTFSF, count);
2140 printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
2147 * Our character device read / write functions.
2149 * The tape is optimized to maximize throughput when it is transferring an
2150 * integral number of the "continuous transfer limit", which is a parameter of
2151 * the specific tape (26kB on my particular tape, 32kB for Onstream).
2153 * As of version 1.3 of the driver, the character device provides an abstract
2154 * continuous view of the media - any mix of block sizes (even 1 byte) on the
2155 * same backup/restore procedure is supported. The driver will internally
2156 * convert the requests to the recommended transfer unit, so that an unmatch
2157 * between the user's block size to the recommended size will only result in a
2158 * (slightly) increased driver overhead, but will no longer hit performance.
2159 * This is not applicable to Onstream.
2161 static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
2162 size_t count, loff_t *ppos)
2164 struct ide_tape_obj *tape = ide_tape_f(file);
2165 ide_drive_t *drive = tape->drive;
2166 ssize_t bytes_read, temp, actually_read = 0, rc;
2168 u16 ctl = *(u16 *)&tape->caps[12];
2170 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2172 if (tape->chrdev_dir != IDETAPE_DIR_READ) {
2173 if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
2174 if (count > tape->blk_size &&
2175 (count % tape->blk_size) == 0)
2176 tape->user_bs_factor = count / tape->blk_size;
2178 rc = idetape_init_read(drive, tape->max_stages);
2183 if (tape->merge_stage_size) {
2184 actually_read = min((unsigned int)(tape->merge_stage_size),
2185 (unsigned int)count);
2186 if (idetape_copy_stage_to_user(tape, buf, actually_read))
2188 buf += actually_read;
2189 tape->merge_stage_size -= actually_read;
2190 count -= actually_read;
2192 while (count >= tape->stage_size) {
2193 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2194 if (bytes_read <= 0)
2196 if (idetape_copy_stage_to_user(tape, buf, bytes_read))
2199 count -= bytes_read;
2200 actually_read += bytes_read;
2203 bytes_read = idetape_add_chrdev_read_request(drive, ctl);
2204 if (bytes_read <= 0)
2206 temp = min((unsigned long)count, (unsigned long)bytes_read);
2207 if (idetape_copy_stage_to_user(tape, buf, temp))
2209 actually_read += temp;
2210 tape->merge_stage_size = bytes_read-temp;
2213 if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
2214 debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
2216 idetape_space_over_filemarks(drive, MTFSF, 1);
2220 return ret ? ret : actually_read;
2223 static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
2224 size_t count, loff_t *ppos)
2226 struct ide_tape_obj *tape = ide_tape_f(file);
2227 ide_drive_t *drive = tape->drive;
2228 ssize_t actually_written = 0;
2230 u16 ctl = *(u16 *)&tape->caps[12];
2232 /* The drive is write protected. */
2233 if (tape->write_prot)
2236 debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
2238 /* Initialize write operation */
2239 if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
2240 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2241 idetape_discard_read_pipeline(drive, 1);
2242 if (tape->merge_stage || tape->merge_stage_size) {
2243 printk(KERN_ERR "ide-tape: merge_stage_size "
2244 "should be 0 now\n");
2245 tape->merge_stage_size = 0;
2247 tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
2248 if (!tape->merge_stage)
2250 tape->chrdev_dir = IDETAPE_DIR_WRITE;
2251 idetape_init_merge_stage(tape);
2254 * Issue a write 0 command to ensure that DSC handshake is
2255 * switched from completion mode to buffer available mode. No
2256 * point in issuing this if DSC overlap isn't supported, some
2257 * drives (Seagate STT3401A) will return an error.
2259 if (drive->dsc_overlap) {
2260 ssize_t retval = idetape_queue_rw_tail(drive,
2261 REQ_IDETAPE_WRITE, 0,
2262 tape->merge_stage->bh);
2264 __idetape_kfree_stage(tape->merge_stage);
2265 tape->merge_stage = NULL;
2266 tape->chrdev_dir = IDETAPE_DIR_NONE;
2273 if (tape->merge_stage_size) {
2274 if (tape->merge_stage_size >= tape->stage_size) {
2275 printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
2276 tape->merge_stage_size = 0;
2278 actually_written = min((unsigned int)
2279 (tape->stage_size - tape->merge_stage_size),
2280 (unsigned int)count);
2281 if (idetape_copy_stage_from_user(tape, buf, actually_written))
2283 buf += actually_written;
2284 tape->merge_stage_size += actually_written;
2285 count -= actually_written;
2287 if (tape->merge_stage_size == tape->stage_size) {
2289 tape->merge_stage_size = 0;
2290 retval = idetape_add_chrdev_write_request(drive, ctl);
2295 while (count >= tape->stage_size) {
2297 if (idetape_copy_stage_from_user(tape, buf, tape->stage_size))
2299 buf += tape->stage_size;
2300 count -= tape->stage_size;
2301 retval = idetape_add_chrdev_write_request(drive, ctl);
2302 actually_written += tape->stage_size;
2307 actually_written += count;
2308 if (idetape_copy_stage_from_user(tape, buf, count))
2310 tape->merge_stage_size += count;
2312 return ret ? ret : actually_written;
2315 static int idetape_write_filemark(ide_drive_t *drive)
2317 struct ide_atapi_pc pc;
2319 /* Write a filemark */
2320 idetape_create_write_filemark_cmd(drive, &pc, 1);
2321 if (idetape_queue_pc_tail(drive, &pc)) {
2322 printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
2329 * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
2332 * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
2333 * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
2334 * usually not supported (it is supported in the rare case in which we crossed
2335 * the filemark during our read-ahead pipelined operation mode).
2337 * The following commands are currently not supported:
2339 * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
2340 * MT_ST_WRITE_THRESHOLD.
2342 static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
2344 idetape_tape_t *tape = drive->driver_data;
2345 struct ide_atapi_pc pc;
2348 debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
2351 /* Commands which need our pipelined read-ahead stages. */
2359 return idetape_space_over_filemarks(drive, mt_op, mt_count);
2366 if (tape->write_prot)
2368 idetape_discard_read_pipeline(drive, 1);
2369 for (i = 0; i < mt_count; i++) {
2370 retval = idetape_write_filemark(drive);
2376 idetape_discard_read_pipeline(drive, 0);
2377 if (idetape_rewind_tape(drive))
2381 idetape_discard_read_pipeline(drive, 0);
2382 idetape_create_load_unload_cmd(drive, &pc,
2383 IDETAPE_LU_LOAD_MASK);
2384 return idetape_queue_pc_tail(drive, &pc);
2388 * If door is locked, attempt to unlock before
2389 * attempting to eject.
2391 if (tape->door_locked) {
2392 if (idetape_create_prevent_cmd(drive, &pc, 0))
2393 if (!idetape_queue_pc_tail(drive, &pc))
2394 tape->door_locked = DOOR_UNLOCKED;
2396 idetape_discard_read_pipeline(drive, 0);
2397 idetape_create_load_unload_cmd(drive, &pc,
2398 !IDETAPE_LU_LOAD_MASK);
2399 retval = idetape_queue_pc_tail(drive, &pc);
2401 clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
2404 idetape_discard_read_pipeline(drive, 0);
2405 return idetape_flush_tape_buffers(drive);
2407 idetape_discard_read_pipeline(drive, 0);
2408 idetape_create_load_unload_cmd(drive, &pc,
2409 IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
2410 return idetape_queue_pc_tail(drive, &pc);
2412 idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
2413 return idetape_queue_pc_tail(drive, &pc);
2415 (void)idetape_rewind_tape(drive);
2416 idetape_create_erase_cmd(&pc);
2417 return idetape_queue_pc_tail(drive, &pc);
2420 if (mt_count < tape->blk_size ||
2421 mt_count % tape->blk_size)
2423 tape->user_bs_factor = mt_count / tape->blk_size;
2424 clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
2426 set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
2429 idetape_discard_read_pipeline(drive, 0);
2430 return idetape_position_tape(drive,
2431 mt_count * tape->user_bs_factor, tape->partition, 0);
2433 idetape_discard_read_pipeline(drive, 0);
2434 return idetape_position_tape(drive, 0, mt_count, 0);
2438 if (!idetape_create_prevent_cmd(drive, &pc, 1))
2440 retval = idetape_queue_pc_tail(drive, &pc);
2443 tape->door_locked = DOOR_EXPLICITLY_LOCKED;
2446 if (!idetape_create_prevent_cmd(drive, &pc, 0))
2448 retval = idetape_queue_pc_tail(drive, &pc);
2451 tape->door_locked = DOOR_UNLOCKED;
2454 printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
2461 * Our character device ioctls. General mtio.h magnetic io commands are
2462 * supported here, and not in the corresponding block interface. Our own
2463 * ide-tape ioctls are supported on both interfaces.
2465 static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
2466 unsigned int cmd, unsigned long arg)
2468 struct ide_tape_obj *tape = ide_tape_f(file);
2469 ide_drive_t *drive = tape->drive;
2473 int block_offset = 0, position = tape->first_frame;
2474 void __user *argp = (void __user *)arg;
2476 debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
2478 if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
2479 idetape_empty_write_pipeline(drive);
2480 idetape_flush_tape_buffers(drive);
2482 if (cmd == MTIOCGET || cmd == MTIOCPOS) {
2483 block_offset = tape->merge_stage_size /
2484 (tape->blk_size * tape->user_bs_factor);
2485 position = idetape_read_position(drive);
2491 if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
2493 return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
2495 memset(&mtget, 0, sizeof(struct mtget));
2496 mtget.mt_type = MT_ISSCSI2;
2497 mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
2499 ((tape->blk_size * tape->user_bs_factor)
2500 << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
2502 if (tape->drv_write_prot)
2503 mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
2505 if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
2509 mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
2510 if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
2514 if (tape->chrdev_dir == IDETAPE_DIR_READ)
2515 idetape_discard_read_pipeline(drive, 1);
2516 return idetape_blkdev_ioctl(drive, cmd, arg);
2521 * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
2522 * block size with the reported value.
2524 static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
2526 idetape_tape_t *tape = drive->driver_data;
2527 struct ide_atapi_pc pc;
2529 idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
2530 if (idetape_queue_pc_tail(drive, &pc)) {
2531 printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
2532 if (tape->blk_size == 0) {
2533 printk(KERN_WARNING "ide-tape: Cannot deal with zero "
2534 "block size, assuming 32k\n");
2535 tape->blk_size = 32768;
2539 tape->blk_size = (pc.buf[4 + 5] << 16) +
2540 (pc.buf[4 + 6] << 8) +
2542 tape->drv_write_prot = (pc.buf[2] & 0x80) >> 7;
2545 static int idetape_chrdev_open(struct inode *inode, struct file *filp)
2547 unsigned int minor = iminor(inode), i = minor & ~0xc0;
2549 idetape_tape_t *tape;
2550 struct ide_atapi_pc pc;
2553 if (i >= MAX_HWIFS * MAX_DRIVES)
2556 tape = ide_tape_chrdev_get(i);
2560 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2563 * We really want to do nonseekable_open(inode, filp); here, but some
2564 * versions of tar incorrectly call lseek on tapes and bail out if that
2565 * fails. So we disallow pread() and pwrite(), but permit lseeks.
2567 filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
2569 drive = tape->drive;
2571 filp->private_data = tape;
2573 if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
2578 retval = idetape_wait_ready(drive, 60 * HZ);
2580 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2581 printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
2585 idetape_read_position(drive);
2586 if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
2587 (void)idetape_rewind_tape(drive);
2589 /* Read block size and write protect status from drive. */
2590 ide_tape_get_bsize_from_bdesc(drive);
2592 /* Set write protect flag if device is opened as read-only. */
2593 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
2594 tape->write_prot = 1;
2596 tape->write_prot = tape->drv_write_prot;
2598 /* Make sure drive isn't write protected if user wants to write. */
2599 if (tape->write_prot) {
2600 if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
2601 (filp->f_flags & O_ACCMODE) == O_RDWR) {
2602 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2608 /* Lock the tape drive door so user can't eject. */
2609 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
2610 if (idetape_create_prevent_cmd(drive, &pc, 1)) {
2611 if (!idetape_queue_pc_tail(drive, &pc)) {
2612 if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
2613 tape->door_locked = DOOR_LOCKED;
2624 static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
2626 idetape_tape_t *tape = drive->driver_data;
2628 idetape_empty_write_pipeline(drive);
2629 tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
2630 if (tape->merge_stage != NULL) {
2631 idetape_pad_zeros(drive, tape->blk_size *
2632 (tape->user_bs_factor - 1));
2633 __idetape_kfree_stage(tape->merge_stage);
2634 tape->merge_stage = NULL;
2636 idetape_write_filemark(drive);
2637 idetape_flush_tape_buffers(drive);
2638 idetape_flush_tape_buffers(drive);
2641 static int idetape_chrdev_release(struct inode *inode, struct file *filp)
2643 struct ide_tape_obj *tape = ide_tape_f(filp);
2644 ide_drive_t *drive = tape->drive;
2645 struct ide_atapi_pc pc;
2646 unsigned int minor = iminor(inode);
2649 tape = drive->driver_data;
2651 debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
2653 if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
2654 idetape_write_release(drive, minor);
2655 if (tape->chrdev_dir == IDETAPE_DIR_READ) {
2657 idetape_discard_read_pipeline(drive, 1);
2660 if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
2661 (void) idetape_rewind_tape(drive);
2662 if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
2663 if (tape->door_locked == DOOR_LOCKED) {
2664 if (idetape_create_prevent_cmd(drive, &pc, 0)) {
2665 if (!idetape_queue_pc_tail(drive, &pc))
2666 tape->door_locked = DOOR_UNLOCKED;
2670 clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
2677 * check the contents of the ATAPI IDENTIFY command results. We return:
2679 * 1 - If the tape can be supported by us, based on the information we have so
2682 * 0 - If this tape driver is not currently supported by us.
2684 static int idetape_identify_device(ide_drive_t *drive)
2686 u8 gcw[2], protocol, device_type, removable, packet_size;
2688 if (drive->id_read == 0)
2691 *((unsigned short *) &gcw) = drive->id->config;
2693 protocol = (gcw[1] & 0xC0) >> 6;
2694 device_type = gcw[1] & 0x1F;
2695 removable = !!(gcw[0] & 0x80);
2696 packet_size = gcw[0] & 0x3;
2698 /* Check that we can support this device */
2700 printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
2702 else if (device_type != 1)
2703 printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
2704 "to tape\n", device_type);
2705 else if (!removable)
2706 printk(KERN_ERR "ide-tape: The removable flag is not set\n");
2707 else if (packet_size != 0) {
2708 printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12"
2709 " bytes\n", packet_size);
2715 static void idetape_get_inquiry_results(ide_drive_t *drive)
2717 idetape_tape_t *tape = drive->driver_data;
2718 struct ide_atapi_pc pc;
2719 char fw_rev[6], vendor_id[10], product_id[18];
2721 idetape_create_inquiry_cmd(&pc);
2722 if (idetape_queue_pc_tail(drive, &pc)) {
2723 printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
2727 memcpy(vendor_id, &pc.buf[8], 8);
2728 memcpy(product_id, &pc.buf[16], 16);
2729 memcpy(fw_rev, &pc.buf[32], 4);
2731 ide_fixstring(vendor_id, 10, 0);
2732 ide_fixstring(product_id, 18, 0);
2733 ide_fixstring(fw_rev, 6, 0);
2735 printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
2736 drive->name, tape->name, vendor_id, product_id, fw_rev);
2740 * Ask the tape about its various parameters. In particular, we will adjust our
2741 * data transfer buffer size to the recommended value as returned by the tape.
2743 static void idetape_get_mode_sense_results(ide_drive_t *drive)
2745 idetape_tape_t *tape = drive->driver_data;
2746 struct ide_atapi_pc pc;
2748 u8 speed, max_speed;
2750 idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
2751 if (idetape_queue_pc_tail(drive, &pc)) {
2752 printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
2753 " some default values\n");
2754 tape->blk_size = 512;
2755 put_unaligned(52, (u16 *)&tape->caps[12]);
2756 put_unaligned(540, (u16 *)&tape->caps[14]);
2757 put_unaligned(6*52, (u16 *)&tape->caps[16]);
2760 caps = pc.buf + 4 + pc.buf[3];
2762 /* convert to host order and save for later use */
2763 speed = be16_to_cpu(*(u16 *)&caps[14]);
2764 max_speed = be16_to_cpu(*(u16 *)&caps[8]);
2766 put_unaligned(max_speed, (u16 *)&caps[8]);
2767 put_unaligned(be16_to_cpu(*(u16 *)&caps[12]), (u16 *)&caps[12]);
2768 put_unaligned(speed, (u16 *)&caps[14]);
2769 put_unaligned(be16_to_cpu(*(u16 *)&caps[16]), (u16 *)&caps[16]);
2772 printk(KERN_INFO "ide-tape: %s: invalid tape speed "
2773 "(assuming 650KB/sec)\n", drive->name);
2774 put_unaligned(650, (u16 *)&caps[14]);
2777 printk(KERN_INFO "ide-tape: %s: invalid max_speed "
2778 "(assuming 650KB/sec)\n", drive->name);
2779 put_unaligned(650, (u16 *)&caps[8]);
2782 memcpy(&tape->caps, caps, 20);
2784 tape->blk_size = 512;
2785 else if (caps[7] & 0x04)
2786 tape->blk_size = 1024;
2789 #ifdef CONFIG_IDE_PROC_FS
2790 static void idetape_add_settings(ide_drive_t *drive)
2792 idetape_tape_t *tape = drive->driver_data;
2794 ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
2795 1, 2, (u16 *)&tape->caps[16], NULL);
2796 ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
2797 tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
2798 ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
2799 tape->stage_size / 1024, 1, &tape->max_stages, NULL);
2800 ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
2801 tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
2802 ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
2803 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
2805 ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
2806 0xffff, tape->stage_size / 1024, 1,
2807 &tape->nr_pending_stages, NULL);
2808 ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
2809 1, 1, (u16 *)&tape->caps[14], NULL);
2810 ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
2811 1024, &tape->stage_size, NULL);
2812 ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
2813 IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
2815 ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
2816 1, &drive->dsc_overlap, NULL);
2817 ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
2818 1, 1, &tape->avg_speed, NULL);
2819 ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
2820 1, &tape->debug_mask, NULL);
2823 static inline void idetape_add_settings(ide_drive_t *drive) { ; }
2827 * The function below is called to:
2829 * 1. Initialize our various state variables.
2830 * 2. Ask the tape for its capabilities.
2831 * 3. Allocate a buffer which will be used for data transfer. The buffer size
2832 * is chosen based on the recommendation which we received in step 2.
2834 * Note that at this point ide.c already assigned us an irq, so that we can
2835 * queue requests here and wait for their completion.
2837 static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
2839 unsigned long t1, tmid, tn, t;
2844 u16 *ctl = (u16 *)&tape->caps[12];
2846 spin_lock_init(&tape->lock);
2847 drive->dsc_overlap = 1;
2848 if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
2849 printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
2851 drive->dsc_overlap = 0;
2853 /* Seagate Travan drives do not support DSC overlap. */
2854 if (strstr(drive->id->model, "Seagate STT3401"))
2855 drive->dsc_overlap = 0;
2856 tape->minor = minor;
2857 tape->name[0] = 'h';
2858 tape->name[1] = 't';
2859 tape->name[2] = '0' + minor;
2860 tape->chrdev_dir = IDETAPE_DIR_NONE;
2861 tape->pc = tape->pc_stack;
2862 *((unsigned short *) &gcw) = drive->id->config;
2864 /* Command packet DRQ type */
2865 if (((gcw[0] & 0x60) >> 5) == 1)
2866 set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
2868 tape->min_pipeline = 10;
2869 tape->max_pipeline = 10;
2870 tape->max_stages = 10;
2872 idetape_get_inquiry_results(drive);
2873 idetape_get_mode_sense_results(drive);
2874 ide_tape_get_bsize_from_bdesc(drive);
2875 tape->user_bs_factor = 1;
2876 tape->stage_size = *ctl * tape->blk_size;
2877 while (tape->stage_size > 0xffff) {
2878 printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
2880 tape->stage_size = *ctl * tape->blk_size;
2882 stage_size = tape->stage_size;
2883 tape->pages_per_stage = stage_size / PAGE_SIZE;
2884 if (stage_size % PAGE_SIZE) {
2885 tape->pages_per_stage++;
2886 tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
2889 /* Select the "best" DSC read/write polling freq and pipeline size. */
2890 speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
2892 tape->max_stages = speed * 1000 * 10 / tape->stage_size;
2894 /* Limit memory use for pipeline to 10% of physical memory */
2896 if (tape->max_stages * tape->stage_size >
2897 si.totalram * si.mem_unit / 10)
2899 si.totalram * si.mem_unit / (10 * tape->stage_size);
2901 tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
2902 tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
2903 tape->max_pipeline =
2904 min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
2905 if (tape->max_stages == 0) {
2906 tape->max_stages = 1;
2907 tape->min_pipeline = 1;
2908 tape->max_pipeline = 1;
2911 t1 = (tape->stage_size * HZ) / (speed * 1000);
2912 tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
2913 tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
2915 if (tape->max_stages)
2921 * Ensure that the number we got makes sense; limit it within
2922 * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
2924 tape->best_dsc_rw_freq = max_t(unsigned long,
2925 min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
2926 IDETAPE_DSC_RW_MIN);
2927 printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
2928 "%dkB pipeline, %lums tDSC%s\n",
2929 drive->name, tape->name, *(u16 *)&tape->caps[14],
2930 (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
2931 tape->stage_size / 1024,
2932 tape->max_stages * tape->stage_size / 1024,
2933 tape->best_dsc_rw_freq * 1000 / HZ,
2934 drive->using_dma ? ", DMA":"");
2936 idetape_add_settings(drive);
2939 static void ide_tape_remove(ide_drive_t *drive)
2941 idetape_tape_t *tape = drive->driver_data;
2943 ide_proc_unregister_driver(drive, tape->driver);
2945 ide_unregister_region(tape->disk);
2950 static void ide_tape_release(struct kref *kref)
2952 struct ide_tape_obj *tape = to_ide_tape(kref);
2953 ide_drive_t *drive = tape->drive;
2954 struct gendisk *g = tape->disk;
2956 BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
2958 drive->dsc_overlap = 0;
2959 drive->driver_data = NULL;
2960 device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
2961 device_destroy(idetape_sysfs_class,
2962 MKDEV(IDETAPE_MAJOR, tape->minor + 128));
2963 idetape_devs[tape->minor] = NULL;
2964 g->private_data = NULL;
2969 #ifdef CONFIG_IDE_PROC_FS
2970 static int proc_idetape_read_name
2971 (char *page, char **start, off_t off, int count, int *eof, void *data)
2973 ide_drive_t *drive = (ide_drive_t *) data;
2974 idetape_tape_t *tape = drive->driver_data;
2978 len = sprintf(out, "%s\n", tape->name);
2979 PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
2982 static ide_proc_entry_t idetape_proc[] = {
2983 { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
2984 { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
2985 { NULL, 0, NULL, NULL }
2989 static int ide_tape_probe(ide_drive_t *);
2991 static ide_driver_t idetape_driver = {
2993 .owner = THIS_MODULE,
2995 .bus = &ide_bus_type,
2997 .probe = ide_tape_probe,
2998 .remove = ide_tape_remove,
2999 .version = IDETAPE_VERSION,
3001 .supports_dsc_overlap = 1,
3002 .do_request = idetape_do_request,
3003 .end_request = idetape_end_request,
3004 .error = __ide_error,
3005 .abort = __ide_abort,
3006 #ifdef CONFIG_IDE_PROC_FS
3007 .proc = idetape_proc,
3011 /* Our character device supporting functions, passed to register_chrdev. */
3012 static const struct file_operations idetape_fops = {
3013 .owner = THIS_MODULE,
3014 .read = idetape_chrdev_read,
3015 .write = idetape_chrdev_write,
3016 .ioctl = idetape_chrdev_ioctl,
3017 .open = idetape_chrdev_open,
3018 .release = idetape_chrdev_release,
3021 static int idetape_open(struct inode *inode, struct file *filp)
3023 struct gendisk *disk = inode->i_bdev->bd_disk;
3024 struct ide_tape_obj *tape;
3026 tape = ide_tape_get(disk);
3033 static int idetape_release(struct inode *inode, struct file *filp)
3035 struct gendisk *disk = inode->i_bdev->bd_disk;
3036 struct ide_tape_obj *tape = ide_tape_g(disk);
3043 static int idetape_ioctl(struct inode *inode, struct file *file,
3044 unsigned int cmd, unsigned long arg)
3046 struct block_device *bdev = inode->i_bdev;
3047 struct ide_tape_obj *tape = ide_tape_g(bdev->bd_disk);
3048 ide_drive_t *drive = tape->drive;
3049 int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
3051 err = idetape_blkdev_ioctl(drive, cmd, arg);
3055 static struct block_device_operations idetape_block_ops = {
3056 .owner = THIS_MODULE,
3057 .open = idetape_open,
3058 .release = idetape_release,
3059 .ioctl = idetape_ioctl,
3062 static int ide_tape_probe(ide_drive_t *drive)
3064 idetape_tape_t *tape;
3068 if (!strstr("ide-tape", drive->driver_req))
3070 if (!drive->present)
3072 if (drive->media != ide_tape)
3074 if (!idetape_identify_device(drive)) {
3075 printk(KERN_ERR "ide-tape: %s: not supported by this version of"
3076 " the driver\n", drive->name);
3080 printk(KERN_INFO "ide-tape: passing drive %s to ide-scsi"
3081 " emulation.\n", drive->name);
3084 tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
3086 printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
3091 g = alloc_disk(1 << PARTN_BITS);
3095 ide_init_disk(g, drive);
3097 ide_proc_register_driver(drive, &idetape_driver);
3099 kref_init(&tape->kref);
3101 tape->drive = drive;
3102 tape->driver = &idetape_driver;
3105 g->private_data = &tape->driver;
3107 drive->driver_data = tape;
3109 mutex_lock(&idetape_ref_mutex);
3110 for (minor = 0; idetape_devs[minor]; minor++)
3112 idetape_devs[minor] = tape;
3113 mutex_unlock(&idetape_ref_mutex);
3115 idetape_setup(drive, tape, minor);
3117 device_create(idetape_sysfs_class, &drive->gendev,
3118 MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
3119 device_create(idetape_sysfs_class, &drive->gendev,
3120 MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
3122 g->fops = &idetape_block_ops;
3123 ide_register_region(g);
3133 static void __exit idetape_exit(void)
3135 driver_unregister(&idetape_driver.gen_driver);
3136 class_destroy(idetape_sysfs_class);
3137 unregister_chrdev(IDETAPE_MAJOR, "ht");
3140 static int __init idetape_init(void)
3143 idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
3144 if (IS_ERR(idetape_sysfs_class)) {
3145 idetape_sysfs_class = NULL;
3146 printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
3151 if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
3152 printk(KERN_ERR "ide-tape: Failed to register chrdev"
3155 goto out_free_class;
3158 error = driver_register(&idetape_driver.gen_driver);
3160 goto out_free_driver;
3165 driver_unregister(&idetape_driver.gen_driver);
3167 class_destroy(idetape_sysfs_class);
3172 MODULE_ALIAS("ide:*m-tape*");
3173 module_init(idetape_init);
3174 module_exit(idetape_exit);
3175 MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
3176 MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
3177 MODULE_LICENSE("GPL");