2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/completion.h>
13 #include <linux/kernel.h>
14 #include <linux/mempool.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_dbg.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_driver.h>
24 #include <scsi/scsi_eh.h>
25 #include <scsi/scsi_host.h>
26 #include <scsi/scsi_request.h>
28 #include "scsi_priv.h"
29 #include "scsi_logging.h"
32 #define SG_MEMPOOL_NR (sizeof(scsi_sg_pools)/sizeof(struct scsi_host_sg_pool))
33 #define SG_MEMPOOL_SIZE 32
35 struct scsi_host_sg_pool {
42 #if (SCSI_MAX_PHYS_SEGMENTS < 32)
43 #error SCSI_MAX_PHYS_SEGMENTS is too small
46 #define SP(x) { x, "sgpool-" #x }
47 static struct scsi_host_sg_pool scsi_sg_pools[] = {
51 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
53 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
55 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
57 #if (SCSI_MAX_PHYS_SEGMENTS > 256)
58 #error SCSI_MAX_PHYS_SEGMENTS is too large
68 * Function: scsi_insert_special_req()
70 * Purpose: Insert pre-formed request into request queue.
72 * Arguments: sreq - request that is ready to be queued.
73 * at_head - boolean. True if we should insert at head
74 * of queue, false if we should insert at tail.
76 * Lock status: Assumed that lock is not held upon entry.
80 * Notes: This function is called from character device and from
81 * ioctl types of functions where the caller knows exactly
82 * what SCSI command needs to be issued. The idea is that
83 * we merely inject the command into the queue (at the head
84 * for now), and then call the queue request function to actually
87 int scsi_insert_special_req(struct scsi_request *sreq, int at_head)
90 * Because users of this function are apt to reuse requests with no
91 * modification, we have to sanitise the request flags here
93 sreq->sr_request->flags &= ~REQ_DONTPREP;
94 blk_insert_request(sreq->sr_device->request_queue, sreq->sr_request,
99 static void scsi_run_queue(struct request_queue *q);
102 * Function: scsi_unprep_request()
104 * Purpose: Remove all preparation done for a request, including its
105 * associated scsi_cmnd, so that it can be requeued.
107 * Arguments: req - request to unprepare
109 * Lock status: Assumed that no locks are held upon entry.
113 static void scsi_unprep_request(struct request *req)
115 struct scsi_cmnd *cmd = req->special;
117 req->flags &= ~REQ_DONTPREP;
118 req->special = (req->flags & REQ_SPECIAL) ? cmd->sc_request : NULL;
120 scsi_put_command(cmd);
124 * Function: scsi_queue_insert()
126 * Purpose: Insert a command in the midlevel queue.
128 * Arguments: cmd - command that we are adding to queue.
129 * reason - why we are inserting command to queue.
131 * Lock status: Assumed that lock is not held upon entry.
135 * Notes: We do this for one of two cases. Either the host is busy
136 * and it cannot accept any more commands for the time being,
137 * or the device returned QUEUE_FULL and can accept no more
139 * Notes: This could be called either from an interrupt context or a
140 * normal process context.
142 int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
144 struct Scsi_Host *host = cmd->device->host;
145 struct scsi_device *device = cmd->device;
146 struct request_queue *q = device->request_queue;
150 printk("Inserting command %p into mlqueue\n", cmd));
153 * Set the appropriate busy bit for the device/host.
155 * If the host/device isn't busy, assume that something actually
156 * completed, and that we should be able to queue a command now.
158 * Note that the prior mid-layer assumption that any host could
159 * always queue at least one command is now broken. The mid-layer
160 * will implement a user specifiable stall (see
161 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
162 * if a command is requeued with no other commands outstanding
163 * either for the device or for the host.
165 if (reason == SCSI_MLQUEUE_HOST_BUSY)
166 host->host_blocked = host->max_host_blocked;
167 else if (reason == SCSI_MLQUEUE_DEVICE_BUSY)
168 device->device_blocked = device->max_device_blocked;
171 * Decrement the counters, since these commands are no longer
172 * active on the host/device.
174 scsi_device_unbusy(device);
177 * Requeue this command. It will go before all other commands
178 * that are already in the queue.
180 * NOTE: there is magic here about the way the queue is plugged if
181 * we have no outstanding commands.
183 * Although we *don't* plug the queue, we call the request
184 * function. The SCSI request function detects the blocked condition
185 * and plugs the queue appropriately.
187 spin_lock_irqsave(q->queue_lock, flags);
188 blk_requeue_request(q, cmd->request);
189 spin_unlock_irqrestore(q->queue_lock, flags);
197 * Function: scsi_do_req
199 * Purpose: Queue a SCSI request
201 * Arguments: sreq - command descriptor.
202 * cmnd - actual SCSI command to be performed.
203 * buffer - data buffer.
204 * bufflen - size of data buffer.
205 * done - completion function to be run.
206 * timeout - how long to let it run before timeout.
207 * retries - number of retries we allow.
209 * Lock status: No locks held upon entry.
213 * Notes: This function is only used for queueing requests for things
214 * like ioctls and character device requests - this is because
215 * we essentially just inject a request into the queue for the
218 * In order to support the scsi_device_quiesce function, we
219 * now inject requests on the *head* of the device queue
220 * rather than the tail.
222 void scsi_do_req(struct scsi_request *sreq, const void *cmnd,
223 void *buffer, unsigned bufflen,
224 void (*done)(struct scsi_cmnd *),
225 int timeout, int retries)
228 * If the upper level driver is reusing these things, then
229 * we should release the low-level block now. Another one will
230 * be allocated later when this request is getting queued.
232 __scsi_release_request(sreq);
235 * Our own function scsi_done (which marks the host as not busy,
236 * disables the timeout counter, etc) will be called by us or by the
237 * scsi_hosts[host].queuecommand() function needs to also call
238 * the completion function for the high level driver.
240 memcpy(sreq->sr_cmnd, cmnd, sizeof(sreq->sr_cmnd));
241 sreq->sr_bufflen = bufflen;
242 sreq->sr_buffer = buffer;
243 sreq->sr_allowed = retries;
244 sreq->sr_done = done;
245 sreq->sr_timeout_per_command = timeout;
247 if (sreq->sr_cmd_len == 0)
248 sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]);
251 * head injection *required* here otherwise quiesce won't work
253 scsi_insert_special_req(sreq, 1);
255 EXPORT_SYMBOL(scsi_do_req);
257 /* This is the end routine we get to if a command was never attached
258 * to the request. Simply complete the request without changing
259 * rq_status; this will cause a DRIVER_ERROR. */
260 static void scsi_wait_req_end_io(struct request *req)
262 BUG_ON(!req->waiting);
264 complete(req->waiting);
267 void scsi_wait_req(struct scsi_request *sreq, const void *cmnd, void *buffer,
268 unsigned bufflen, int timeout, int retries)
270 DECLARE_COMPLETION(wait);
271 int write = (sreq->sr_data_direction == DMA_TO_DEVICE);
274 req = blk_get_request(sreq->sr_device->request_queue, write,
276 if (bufflen && blk_rq_map_kern(sreq->sr_device->request_queue, req,
277 buffer, bufflen, __GFP_WAIT)) {
278 sreq->sr_result = DRIVER_ERROR << 24;
279 blk_put_request(req);
283 req->flags |= REQ_NOMERGE;
284 req->waiting = &wait;
285 req->end_io = scsi_wait_req_end_io;
286 req->cmd_len = COMMAND_SIZE(((u8 *)cmnd)[0]);
287 req->sense = sreq->sr_sense_buffer;
289 memcpy(req->cmd, cmnd, req->cmd_len);
290 req->timeout = timeout;
291 req->flags |= REQ_BLOCK_PC;
293 blk_insert_request(sreq->sr_device->request_queue, req,
294 sreq->sr_data_direction == DMA_TO_DEVICE, NULL);
295 wait_for_completion(&wait);
296 sreq->sr_request->waiting = NULL;
297 sreq->sr_result = req->errors;
299 sreq->sr_result |= (DRIVER_ERROR << 24);
301 blk_put_request(req);
304 EXPORT_SYMBOL(scsi_wait_req);
307 * scsi_execute - insert request and wait for the result
310 * @data_direction: data direction
311 * @buffer: data buffer
312 * @bufflen: len of buffer
313 * @sense: optional sense buffer
314 * @timeout: request timeout in seconds
315 * @retries: number of times to retry request
316 * @flags: or into request flags;
318 * returns the req->errors value which is the the scsi_cmnd result
321 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
322 int data_direction, void *buffer, unsigned bufflen,
323 unsigned char *sense, int timeout, int retries, int flags)
326 int write = (data_direction == DMA_TO_DEVICE);
327 int ret = DRIVER_ERROR << 24;
329 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
331 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
332 buffer, bufflen, __GFP_WAIT))
335 req->cmd_len = COMMAND_SIZE(cmd[0]);
336 memcpy(req->cmd, cmd, req->cmd_len);
339 req->timeout = timeout;
340 req->flags |= flags | REQ_BLOCK_PC | REQ_SPECIAL | REQ_QUIET;
343 * head injection *required* here otherwise quiesce won't work
345 blk_execute_rq(req->q, NULL, req, 1);
349 blk_put_request(req);
353 EXPORT_SYMBOL(scsi_execute);
356 int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
357 int data_direction, void *buffer, unsigned bufflen,
358 struct scsi_sense_hdr *sshdr, int timeout, int retries)
364 sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
366 return DRIVER_ERROR << 24;
367 memset(sense, 0, SCSI_SENSE_BUFFERSIZE);
369 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
370 sense, timeout, retries, 0);
372 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
377 EXPORT_SYMBOL(scsi_execute_req);
380 * Function: scsi_init_cmd_errh()
382 * Purpose: Initialize cmd fields related to error handling.
384 * Arguments: cmd - command that is ready to be queued.
388 * Notes: This function has the job of initializing a number of
389 * fields related to error handling. Typically this will
390 * be called once for each command, as required.
392 static int scsi_init_cmd_errh(struct scsi_cmnd *cmd)
394 cmd->serial_number = 0;
396 memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer);
398 if (cmd->cmd_len == 0)
399 cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]);
402 * We need saved copies of a number of fields - this is because
403 * error handling may need to overwrite these with different values
404 * to run different commands, and once error handling is complete,
405 * we will need to restore these values prior to running the actual
408 cmd->old_use_sg = cmd->use_sg;
409 cmd->old_cmd_len = cmd->cmd_len;
410 cmd->sc_old_data_direction = cmd->sc_data_direction;
411 cmd->old_underflow = cmd->underflow;
412 memcpy(cmd->data_cmnd, cmd->cmnd, sizeof(cmd->cmnd));
413 cmd->buffer = cmd->request_buffer;
414 cmd->bufflen = cmd->request_bufflen;
420 * Function: scsi_setup_cmd_retry()
422 * Purpose: Restore the command state for a retry
424 * Arguments: cmd - command to be restored
428 * Notes: Immediately prior to retrying a command, we need
429 * to restore certain fields that we saved above.
431 void scsi_setup_cmd_retry(struct scsi_cmnd *cmd)
433 memcpy(cmd->cmnd, cmd->data_cmnd, sizeof(cmd->data_cmnd));
434 cmd->request_buffer = cmd->buffer;
435 cmd->request_bufflen = cmd->bufflen;
436 cmd->use_sg = cmd->old_use_sg;
437 cmd->cmd_len = cmd->old_cmd_len;
438 cmd->sc_data_direction = cmd->sc_old_data_direction;
439 cmd->underflow = cmd->old_underflow;
442 void scsi_device_unbusy(struct scsi_device *sdev)
444 struct Scsi_Host *shost = sdev->host;
447 spin_lock_irqsave(shost->host_lock, flags);
449 if (unlikely(scsi_host_in_recovery(shost) &&
451 scsi_eh_wakeup(shost);
452 spin_unlock(shost->host_lock);
453 spin_lock(sdev->request_queue->queue_lock);
455 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
459 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
460 * and call blk_run_queue for all the scsi_devices on the target -
461 * including current_sdev first.
463 * Called with *no* scsi locks held.
465 static void scsi_single_lun_run(struct scsi_device *current_sdev)
467 struct Scsi_Host *shost = current_sdev->host;
468 struct scsi_device *sdev, *tmp;
469 struct scsi_target *starget = scsi_target(current_sdev);
472 spin_lock_irqsave(shost->host_lock, flags);
473 starget->starget_sdev_user = NULL;
474 spin_unlock_irqrestore(shost->host_lock, flags);
477 * Call blk_run_queue for all LUNs on the target, starting with
478 * current_sdev. We race with others (to set starget_sdev_user),
479 * but in most cases, we will be first. Ideally, each LU on the
480 * target would get some limited time or requests on the target.
482 blk_run_queue(current_sdev->request_queue);
484 spin_lock_irqsave(shost->host_lock, flags);
485 if (starget->starget_sdev_user)
487 list_for_each_entry_safe(sdev, tmp, &starget->devices,
488 same_target_siblings) {
489 if (sdev == current_sdev)
491 if (scsi_device_get(sdev))
494 spin_unlock_irqrestore(shost->host_lock, flags);
495 blk_run_queue(sdev->request_queue);
496 spin_lock_irqsave(shost->host_lock, flags);
498 scsi_device_put(sdev);
501 spin_unlock_irqrestore(shost->host_lock, flags);
505 * Function: scsi_run_queue()
507 * Purpose: Select a proper request queue to serve next
509 * Arguments: q - last request's queue
513 * Notes: The previous command was completely finished, start
514 * a new one if possible.
516 static void scsi_run_queue(struct request_queue *q)
518 struct scsi_device *sdev = q->queuedata;
519 struct Scsi_Host *shost = sdev->host;
522 if (sdev->single_lun)
523 scsi_single_lun_run(sdev);
525 spin_lock_irqsave(shost->host_lock, flags);
526 while (!list_empty(&shost->starved_list) &&
527 !shost->host_blocked && !shost->host_self_blocked &&
528 !((shost->can_queue > 0) &&
529 (shost->host_busy >= shost->can_queue))) {
531 * As long as shost is accepting commands and we have
532 * starved queues, call blk_run_queue. scsi_request_fn
533 * drops the queue_lock and can add us back to the
536 * host_lock protects the starved_list and starved_entry.
537 * scsi_request_fn must get the host_lock before checking
538 * or modifying starved_list or starved_entry.
540 sdev = list_entry(shost->starved_list.next,
541 struct scsi_device, starved_entry);
542 list_del_init(&sdev->starved_entry);
543 spin_unlock_irqrestore(shost->host_lock, flags);
545 blk_run_queue(sdev->request_queue);
547 spin_lock_irqsave(shost->host_lock, flags);
548 if (unlikely(!list_empty(&sdev->starved_entry)))
550 * sdev lost a race, and was put back on the
551 * starved list. This is unlikely but without this
552 * in theory we could loop forever.
556 spin_unlock_irqrestore(shost->host_lock, flags);
562 * Function: scsi_requeue_command()
564 * Purpose: Handle post-processing of completed commands.
566 * Arguments: q - queue to operate on
567 * cmd - command that may need to be requeued.
571 * Notes: After command completion, there may be blocks left
572 * over which weren't finished by the previous command
573 * this can be for a number of reasons - the main one is
574 * I/O errors in the middle of the request, in which case
575 * we need to request the blocks that come after the bad
577 * Notes: Upon return, cmd is a stale pointer.
579 static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
581 struct request *req = cmd->request;
584 scsi_unprep_request(req);
585 spin_lock_irqsave(q->queue_lock, flags);
586 blk_requeue_request(q, req);
587 spin_unlock_irqrestore(q->queue_lock, flags);
592 void scsi_next_command(struct scsi_cmnd *cmd)
594 struct request_queue *q = cmd->device->request_queue;
596 scsi_put_command(cmd);
600 void scsi_run_host_queues(struct Scsi_Host *shost)
602 struct scsi_device *sdev;
604 shost_for_each_device(sdev, shost)
605 scsi_run_queue(sdev->request_queue);
609 * Function: scsi_end_request()
611 * Purpose: Post-processing of completed commands (usually invoked at end
612 * of upper level post-processing and scsi_io_completion).
614 * Arguments: cmd - command that is complete.
615 * uptodate - 1 if I/O indicates success, <= 0 for I/O error.
616 * bytes - number of bytes of completed I/O
617 * requeue - indicates whether we should requeue leftovers.
619 * Lock status: Assumed that lock is not held upon entry.
621 * Returns: cmd if requeue required, NULL otherwise.
623 * Notes: This is called for block device requests in order to
624 * mark some number of sectors as complete.
626 * We are guaranteeing that the request queue will be goosed
627 * at some point during this call.
628 * Notes: If cmd was requeued, upon return it will be a stale pointer.
630 static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate,
631 int bytes, int requeue)
633 request_queue_t *q = cmd->device->request_queue;
634 struct request *req = cmd->request;
638 * If there are blocks left over at the end, set up the command
639 * to queue the remainder of them.
641 if (end_that_request_chunk(req, uptodate, bytes)) {
642 int leftover = (req->hard_nr_sectors << 9);
644 if (blk_pc_request(req))
645 leftover = req->data_len;
647 /* kill remainder if no retrys */
648 if (!uptodate && blk_noretry_request(req))
649 end_that_request_chunk(req, 0, leftover);
653 * Bleah. Leftovers again. Stick the
654 * leftovers in the front of the
655 * queue, and goose the queue again.
657 scsi_requeue_command(q, cmd);
664 add_disk_randomness(req->rq_disk);
666 spin_lock_irqsave(q->queue_lock, flags);
667 if (blk_rq_tagged(req))
668 blk_queue_end_tag(q, req);
669 end_that_request_last(req);
670 spin_unlock_irqrestore(q->queue_lock, flags);
673 * This will goose the queue request function at the end, so we don't
674 * need to worry about launching another command.
676 scsi_next_command(cmd);
680 static struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask)
682 struct scsi_host_sg_pool *sgp;
683 struct scatterlist *sgl;
685 BUG_ON(!cmd->use_sg);
687 switch (cmd->use_sg) {
697 #if (SCSI_MAX_PHYS_SEGMENTS > 32)
701 #if (SCSI_MAX_PHYS_SEGMENTS > 64)
705 #if (SCSI_MAX_PHYS_SEGMENTS > 128)
716 sgp = scsi_sg_pools + cmd->sglist_len;
717 sgl = mempool_alloc(sgp->pool, gfp_mask);
721 static void scsi_free_sgtable(struct scatterlist *sgl, int index)
723 struct scsi_host_sg_pool *sgp;
725 BUG_ON(index >= SG_MEMPOOL_NR);
727 sgp = scsi_sg_pools + index;
728 mempool_free(sgl, sgp->pool);
732 * Function: scsi_release_buffers()
734 * Purpose: Completion processing for block device I/O requests.
736 * Arguments: cmd - command that we are bailing.
738 * Lock status: Assumed that no lock is held upon entry.
742 * Notes: In the event that an upper level driver rejects a
743 * command, we must release resources allocated during
744 * the __init_io() function. Primarily this would involve
745 * the scatter-gather table, and potentially any bounce
748 static void scsi_release_buffers(struct scsi_cmnd *cmd)
750 struct request *req = cmd->request;
753 * Free up any indirection buffers we allocated for DMA purposes.
756 scsi_free_sgtable(cmd->request_buffer, cmd->sglist_len);
757 else if (cmd->request_buffer != req->buffer)
758 kfree(cmd->request_buffer);
761 * Zero these out. They now point to freed memory, and it is
762 * dangerous to hang onto the pointers.
766 cmd->request_buffer = NULL;
767 cmd->request_bufflen = 0;
771 * Function: scsi_io_completion()
773 * Purpose: Completion processing for block device I/O requests.
775 * Arguments: cmd - command that is finished.
777 * Lock status: Assumed that no lock is held upon entry.
781 * Notes: This function is matched in terms of capabilities to
782 * the function that created the scatter-gather list.
783 * In other words, if there are no bounce buffers
784 * (the normal case for most drivers), we don't need
785 * the logic to deal with cleaning up afterwards.
787 * We must do one of several things here:
789 * a) Call scsi_end_request. This will finish off the
790 * specified number of sectors. If we are done, the
791 * command block will be released, and the queue
792 * function will be goosed. If we are not done, then
793 * scsi_end_request will directly goose the queue.
795 * b) We can just use scsi_requeue_command() here. This would
796 * be used if we just wanted to retry, for example.
798 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes,
799 unsigned int block_bytes)
801 int result = cmd->result;
802 int this_count = cmd->bufflen;
803 request_queue_t *q = cmd->device->request_queue;
804 struct request *req = cmd->request;
805 int clear_errors = 1;
806 struct scsi_sense_hdr sshdr;
808 int sense_deferred = 0;
810 if (blk_complete_barrier_rq(q, req, good_bytes >> 9))
814 * Free up any indirection buffers we allocated for DMA purposes.
815 * For the case of a READ, we need to copy the data out of the
816 * bounce buffer and into the real buffer.
819 scsi_free_sgtable(cmd->buffer, cmd->sglist_len);
820 else if (cmd->buffer != req->buffer) {
821 if (rq_data_dir(req) == READ) {
823 char *to = bio_kmap_irq(req->bio, &flags);
824 memcpy(to, cmd->buffer, cmd->bufflen);
825 bio_kunmap_irq(to, &flags);
831 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
833 sense_deferred = scsi_sense_is_deferred(&sshdr);
835 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
836 req->errors = result;
839 if (sense_valid && req->sense) {
841 * SG_IO wants current and deferred errors
843 int len = 8 + cmd->sense_buffer[7];
845 if (len > SCSI_SENSE_BUFFERSIZE)
846 len = SCSI_SENSE_BUFFERSIZE;
847 memcpy(req->sense, cmd->sense_buffer, len);
848 req->sense_len = len;
851 req->data_len = cmd->resid;
855 * Zero these out. They now point to freed memory, and it is
856 * dangerous to hang onto the pointers.
860 cmd->request_buffer = NULL;
861 cmd->request_bufflen = 0;
864 * Next deal with any sectors which we were able to correctly
867 if (good_bytes >= 0) {
868 SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, %d bytes done.\n",
869 req->nr_sectors, good_bytes));
870 SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg));
875 * If multiple sectors are requested in one buffer, then
876 * they will have been finished off by the first command.
877 * If not, then we have a multi-buffer command.
879 * If block_bytes != 0, it means we had a medium error
880 * of some sort, and that we want to mark some number of
881 * sectors as not uptodate. Thus we want to inhibit
882 * requeueing right here - we will requeue down below
883 * when we handle the bad sectors.
887 * If the command completed without error, then either
888 * finish off the rest of the command, or start a new one.
890 if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL)
894 * Now, if we were good little boys and girls, Santa left us a request
895 * sense buffer. We can extract information from this, so we
896 * can choose a block to remap, etc.
898 if (sense_valid && !sense_deferred) {
899 switch (sshdr.sense_key) {
901 if (cmd->device->removable) {
902 /* detected disc change. set a bit
903 * and quietly refuse further access.
905 cmd->device->changed = 1;
906 scsi_end_request(cmd, 0,
911 * Must have been a power glitch, or a
912 * bus reset. Could not have been a
913 * media change, so we just retry the
914 * request and see what happens.
916 scsi_requeue_command(q, cmd);
920 case ILLEGAL_REQUEST:
922 * If we had an ILLEGAL REQUEST returned, then we may
923 * have performed an unsupported command. The only
924 * thing this should be would be a ten byte read where
925 * only a six byte read was supported. Also, on a
926 * system where READ CAPACITY failed, we may have read
927 * past the end of the disk.
929 if (cmd->device->use_10_for_rw &&
930 (cmd->cmnd[0] == READ_10 ||
931 cmd->cmnd[0] == WRITE_10)) {
932 cmd->device->use_10_for_rw = 0;
934 * This will cause a retry with a 6-byte
937 scsi_requeue_command(q, cmd);
940 scsi_end_request(cmd, 0, this_count, 1);
946 * If the device is in the process of becoming ready,
949 if (sshdr.asc == 0x04 && sshdr.ascq == 0x01) {
950 scsi_requeue_command(q, cmd);
953 if (!(req->flags & REQ_QUIET))
954 dev_printk(KERN_INFO,
955 &cmd->device->sdev_gendev,
956 "Device not ready.\n");
957 scsi_end_request(cmd, 0, this_count, 1);
959 case VOLUME_OVERFLOW:
960 if (!(req->flags & REQ_QUIET)) {
961 dev_printk(KERN_INFO,
962 &cmd->device->sdev_gendev,
963 "Volume overflow, CDB: ");
964 __scsi_print_command(cmd->data_cmnd);
965 scsi_print_sense("", cmd);
967 scsi_end_request(cmd, 0, block_bytes, 1);
972 } /* driver byte != 0 */
973 if (host_byte(result) == DID_RESET) {
975 * Third party bus reset or reset for error
976 * recovery reasons. Just retry the request
977 * and see what happens.
979 scsi_requeue_command(q, cmd);
983 if (!(req->flags & REQ_QUIET)) {
984 dev_printk(KERN_INFO, &cmd->device->sdev_gendev,
985 "SCSI error: return code = 0x%x\n", result);
987 if (driver_byte(result) & DRIVER_SENSE)
988 scsi_print_sense("", cmd);
991 * Mark a single buffer as not uptodate. Queue the remainder.
992 * We sometimes get this cruft in the event that a medium error
993 * isn't properly reported.
995 block_bytes = req->hard_cur_sectors << 9;
997 block_bytes = req->data_len;
998 scsi_end_request(cmd, 0, block_bytes, 1);
1001 EXPORT_SYMBOL(scsi_io_completion);
1004 * Function: scsi_init_io()
1006 * Purpose: SCSI I/O initialize function.
1008 * Arguments: cmd - Command descriptor we wish to initialize
1010 * Returns: 0 on success
1011 * BLKPREP_DEFER if the failure is retryable
1012 * BLKPREP_KILL if the failure is fatal
1014 static int scsi_init_io(struct scsi_cmnd *cmd)
1016 struct request *req = cmd->request;
1017 struct scatterlist *sgpnt;
1021 * if this is a rq->data based REQ_BLOCK_PC, setup for a non-sg xfer
1023 if ((req->flags & REQ_BLOCK_PC) && !req->bio) {
1024 cmd->request_bufflen = req->data_len;
1025 cmd->request_buffer = req->data;
1026 req->buffer = req->data;
1032 * we used to not use scatter-gather for single segment request,
1033 * but now we do (it makes highmem I/O easier to support without
1036 cmd->use_sg = req->nr_phys_segments;
1039 * if sg table allocation fails, requeue request later.
1041 sgpnt = scsi_alloc_sgtable(cmd, GFP_ATOMIC);
1042 if (unlikely(!sgpnt)) {
1043 scsi_unprep_request(req);
1044 return BLKPREP_DEFER;
1047 cmd->request_buffer = (char *) sgpnt;
1048 cmd->request_bufflen = req->nr_sectors << 9;
1049 if (blk_pc_request(req))
1050 cmd->request_bufflen = req->data_len;
1054 * Next, walk the list, and fill in the addresses and sizes of
1057 count = blk_rq_map_sg(req->q, req, cmd->request_buffer);
1060 * mapped well, send it off
1062 if (likely(count <= cmd->use_sg)) {
1063 cmd->use_sg = count;
1067 printk(KERN_ERR "Incorrect number of segments after building list\n");
1068 printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg);
1069 printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors,
1070 req->current_nr_sectors);
1072 /* release the command and kill it */
1073 scsi_release_buffers(cmd);
1074 scsi_put_command(cmd);
1075 return BLKPREP_KILL;
1078 static int scsi_prepare_flush_fn(request_queue_t *q, struct request *rq)
1080 struct scsi_device *sdev = q->queuedata;
1081 struct scsi_driver *drv;
1083 if (sdev->sdev_state == SDEV_RUNNING) {
1084 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1086 if (drv->prepare_flush)
1087 return drv->prepare_flush(q, rq);
1093 static void scsi_end_flush_fn(request_queue_t *q, struct request *rq)
1095 struct scsi_device *sdev = q->queuedata;
1096 struct request *flush_rq = rq->end_io_data;
1097 struct scsi_driver *drv;
1099 if (flush_rq->errors) {
1100 printk("scsi: barrier error, disabling flush support\n");
1101 blk_queue_ordered(q, QUEUE_ORDERED_NONE);
1104 if (sdev->sdev_state == SDEV_RUNNING) {
1105 drv = *(struct scsi_driver **) rq->rq_disk->private_data;
1106 drv->end_flush(q, rq);
1110 static int scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
1111 sector_t *error_sector)
1113 struct scsi_device *sdev = q->queuedata;
1114 struct scsi_driver *drv;
1116 if (sdev->sdev_state != SDEV_RUNNING)
1119 drv = *(struct scsi_driver **) disk->private_data;
1120 if (drv->issue_flush)
1121 return drv->issue_flush(&sdev->sdev_gendev, error_sector);
1126 static void scsi_generic_done(struct scsi_cmnd *cmd)
1128 BUG_ON(!blk_pc_request(cmd->request));
1129 scsi_io_completion(cmd, cmd->result == 0 ? cmd->bufflen : 0, 0);
1132 static int scsi_prep_fn(struct request_queue *q, struct request *req)
1134 struct scsi_device *sdev = q->queuedata;
1135 struct scsi_cmnd *cmd;
1136 int specials_only = 0;
1139 * Just check to see if the device is online. If it isn't, we
1140 * refuse to process any commands. The device must be brought
1141 * online before trying any recovery commands
1143 if (unlikely(!scsi_device_online(sdev))) {
1144 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1145 sdev->host->host_no, sdev->id, sdev->lun);
1148 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1149 /* OK, we're not in a running state don't prep
1151 if (sdev->sdev_state == SDEV_DEL) {
1152 /* Device is fully deleted, no commands
1153 * at all allowed down */
1154 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to dead device\n",
1155 sdev->host->host_no, sdev->id, sdev->lun);
1158 /* OK, we only allow special commands (i.e. not
1159 * user initiated ones */
1160 specials_only = sdev->sdev_state;
1164 * Find the actual device driver associated with this command.
1165 * The SPECIAL requests are things like character device or
1166 * ioctls, which did not originate from ll_rw_blk. Note that
1167 * the special field is also used to indicate the cmd for
1168 * the remainder of a partially fulfilled request that can
1169 * come up when there is a medium error. We have to treat
1170 * these two cases differently. We differentiate by looking
1171 * at request->cmd, as this tells us the real story.
1173 if (req->flags & REQ_SPECIAL && req->special) {
1174 struct scsi_request *sreq = req->special;
1176 if (sreq->sr_magic == SCSI_REQ_MAGIC) {
1177 cmd = scsi_get_command(sreq->sr_device, GFP_ATOMIC);
1180 scsi_init_cmd_from_req(cmd, sreq);
1183 } else if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1185 if(unlikely(specials_only) && !(req->flags & REQ_SPECIAL)) {
1186 if(specials_only == SDEV_QUIESCE ||
1187 specials_only == SDEV_BLOCK)
1190 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to device being removed\n",
1191 sdev->host->host_no, sdev->id, sdev->lun);
1197 * Now try and find a command block that we can use.
1199 if (!req->special) {
1200 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1206 /* pull a tag out of the request if we have one */
1207 cmd->tag = req->tag;
1209 blk_dump_rq_flags(req, "SCSI bad req");
1213 /* note the overloading of req->special. When the tag
1214 * is active it always means cmd. If the tag goes
1215 * back for re-queueing, it may be reset */
1220 * FIXME: drop the lock here because the functions below
1221 * expect to be called without the queue lock held. Also,
1222 * previously, we dequeued the request before dropping the
1223 * lock. We hope REQ_STARTED prevents anything untoward from
1226 if (req->flags & (REQ_CMD | REQ_BLOCK_PC)) {
1227 struct scsi_driver *drv;
1231 * This will do a couple of things:
1232 * 1) Fill in the actual SCSI command.
1233 * 2) Fill in any other upper-level specific fields
1236 * If this returns 0, it means that the request failed
1237 * (reading past end of disk, reading offline device,
1238 * etc). This won't actually talk to the device, but
1239 * some kinds of consistency checking may cause the
1240 * request to be rejected immediately.
1244 * This sets up the scatter-gather table (allocating if
1247 ret = scsi_init_io(cmd);
1249 /* For BLKPREP_KILL/DEFER the cmd was released */
1257 * Initialize the actual SCSI command for this request.
1260 drv = *(struct scsi_driver **)req->rq_disk->private_data;
1261 if (unlikely(!drv->init_command(cmd))) {
1262 scsi_release_buffers(cmd);
1263 scsi_put_command(cmd);
1267 memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd));
1268 cmd->cmd_len = req->cmd_len;
1269 if (rq_data_dir(req) == WRITE)
1270 cmd->sc_data_direction = DMA_TO_DEVICE;
1271 else if (req->data_len)
1272 cmd->sc_data_direction = DMA_FROM_DEVICE;
1274 cmd->sc_data_direction = DMA_NONE;
1276 cmd->transfersize = req->data_len;
1278 cmd->timeout_per_command = req->timeout;
1279 cmd->done = scsi_generic_done;
1284 * The request is now prepped, no need to come back here
1286 req->flags |= REQ_DONTPREP;
1290 /* If we defer, the elv_next_request() returns NULL, but the
1291 * queue must be restarted, so we plug here if no returning
1292 * command will automatically do that. */
1293 if (sdev->device_busy == 0)
1295 return BLKPREP_DEFER;
1297 req->errors = DID_NO_CONNECT << 16;
1298 return BLKPREP_KILL;
1302 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1305 * Called with the queue_lock held.
1307 static inline int scsi_dev_queue_ready(struct request_queue *q,
1308 struct scsi_device *sdev)
1310 if (sdev->device_busy >= sdev->queue_depth)
1312 if (sdev->device_busy == 0 && sdev->device_blocked) {
1314 * unblock after device_blocked iterates to zero
1316 if (--sdev->device_blocked == 0) {
1318 printk("scsi%d (%d:%d) unblocking device at"
1319 " zero depth\n", sdev->host->host_no,
1320 sdev->id, sdev->lun));
1326 if (sdev->device_blocked)
1333 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1334 * return 0. We must end up running the queue again whenever 0 is
1335 * returned, else IO can hang.
1337 * Called with host_lock held.
1339 static inline int scsi_host_queue_ready(struct request_queue *q,
1340 struct Scsi_Host *shost,
1341 struct scsi_device *sdev)
1343 if (scsi_host_in_recovery(shost))
1345 if (shost->host_busy == 0 && shost->host_blocked) {
1347 * unblock after host_blocked iterates to zero
1349 if (--shost->host_blocked == 0) {
1351 printk("scsi%d unblocking host at zero depth\n",
1358 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
1359 shost->host_blocked || shost->host_self_blocked) {
1360 if (list_empty(&sdev->starved_entry))
1361 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1365 /* We're OK to process the command, so we can't be starved */
1366 if (!list_empty(&sdev->starved_entry))
1367 list_del_init(&sdev->starved_entry);
1373 * Kill a request for a dead device
1375 static void scsi_kill_request(struct request *req, request_queue_t *q)
1377 struct scsi_cmnd *cmd = req->special;
1379 blkdev_dequeue_request(req);
1381 if (unlikely(cmd == NULL)) {
1382 printk(KERN_CRIT "impossible request in %s.\n",
1387 scsi_init_cmd_errh(cmd);
1388 cmd->result = DID_NO_CONNECT << 16;
1389 atomic_inc(&cmd->device->iorequest_cnt);
1394 * Function: scsi_request_fn()
1396 * Purpose: Main strategy routine for SCSI.
1398 * Arguments: q - Pointer to actual queue.
1402 * Lock status: IO request lock assumed to be held when called.
1404 static void scsi_request_fn(struct request_queue *q)
1406 struct scsi_device *sdev = q->queuedata;
1407 struct Scsi_Host *shost;
1408 struct scsi_cmnd *cmd;
1409 struct request *req;
1412 printk("scsi: killing requests for dead queue\n");
1413 while ((req = elv_next_request(q)) != NULL)
1414 scsi_kill_request(req, q);
1418 if(!get_device(&sdev->sdev_gendev))
1419 /* We must be tearing the block queue down already */
1423 * To start with, we keep looping until the queue is empty, or until
1424 * the host is no longer able to accept any more requests.
1427 while (!blk_queue_plugged(q)) {
1430 * get next queueable request. We do this early to make sure
1431 * that the request is fully prepared even if we cannot
1434 req = elv_next_request(q);
1435 if (!req || !scsi_dev_queue_ready(q, sdev))
1438 if (unlikely(!scsi_device_online(sdev))) {
1439 printk(KERN_ERR "scsi%d (%d:%d): rejecting I/O to offline device\n",
1440 sdev->host->host_no, sdev->id, sdev->lun);
1441 scsi_kill_request(req, q);
1447 * Remove the request from the request list.
1449 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1450 blkdev_dequeue_request(req);
1451 sdev->device_busy++;
1453 spin_unlock(q->queue_lock);
1455 if (unlikely(cmd == NULL)) {
1456 printk(KERN_CRIT "impossible request in %s.\n"
1457 "please mail a stack trace to "
1458 "linux-scsi@vger.kernel.org",
1462 spin_lock(shost->host_lock);
1464 if (!scsi_host_queue_ready(q, shost, sdev))
1466 if (sdev->single_lun) {
1467 if (scsi_target(sdev)->starget_sdev_user &&
1468 scsi_target(sdev)->starget_sdev_user != sdev)
1470 scsi_target(sdev)->starget_sdev_user = sdev;
1475 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1476 * take the lock again.
1478 spin_unlock_irq(shost->host_lock);
1481 * Finally, initialize any error handling parameters, and set up
1482 * the timers for timeouts.
1484 scsi_init_cmd_errh(cmd);
1487 * Dispatch the command to the low-level driver.
1489 rtn = scsi_dispatch_cmd(cmd);
1490 spin_lock_irq(q->queue_lock);
1492 /* we're refusing the command; because of
1493 * the way locks get dropped, we need to
1494 * check here if plugging is required */
1495 if(sdev->device_busy == 0)
1505 spin_unlock_irq(shost->host_lock);
1508 * lock q, handle tag, requeue req, and decrement device_busy. We
1509 * must return with queue_lock held.
1511 * Decrementing device_busy without checking it is OK, as all such
1512 * cases (host limits or settings) should run the queue at some
1515 spin_lock_irq(q->queue_lock);
1516 blk_requeue_request(q, req);
1517 sdev->device_busy--;
1518 if(sdev->device_busy == 0)
1521 /* must be careful here...if we trigger the ->remove() function
1522 * we cannot be holding the q lock */
1523 spin_unlock_irq(q->queue_lock);
1524 put_device(&sdev->sdev_gendev);
1525 spin_lock_irq(q->queue_lock);
1528 u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1530 struct device *host_dev;
1531 u64 bounce_limit = 0xffffffff;
1533 if (shost->unchecked_isa_dma)
1534 return BLK_BOUNCE_ISA;
1536 * Platforms with virtual-DMA translation
1537 * hardware have no practical limit.
1539 if (!PCI_DMA_BUS_IS_PHYS)
1540 return BLK_BOUNCE_ANY;
1542 host_dev = scsi_get_device(shost);
1543 if (host_dev && host_dev->dma_mask)
1544 bounce_limit = *host_dev->dma_mask;
1546 return bounce_limit;
1548 EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1550 struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1552 struct Scsi_Host *shost = sdev->host;
1553 struct request_queue *q;
1555 q = blk_init_queue(scsi_request_fn, NULL);
1559 blk_queue_prep_rq(q, scsi_prep_fn);
1561 blk_queue_max_hw_segments(q, shost->sg_tablesize);
1562 blk_queue_max_phys_segments(q, SCSI_MAX_PHYS_SEGMENTS);
1563 blk_queue_max_sectors(q, shost->max_sectors);
1564 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1565 blk_queue_segment_boundary(q, shost->dma_boundary);
1566 blk_queue_issue_flush_fn(q, scsi_issue_flush_fn);
1569 * ordered tags are superior to flush ordering
1571 if (shost->ordered_tag)
1572 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
1573 else if (shost->ordered_flush) {
1574 blk_queue_ordered(q, QUEUE_ORDERED_FLUSH);
1575 q->prepare_flush_fn = scsi_prepare_flush_fn;
1576 q->end_flush_fn = scsi_end_flush_fn;
1579 if (!shost->use_clustering)
1580 clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
1584 void scsi_free_queue(struct request_queue *q)
1586 blk_cleanup_queue(q);
1590 * Function: scsi_block_requests()
1592 * Purpose: Utility function used by low-level drivers to prevent further
1593 * commands from being queued to the device.
1595 * Arguments: shost - Host in question
1599 * Lock status: No locks are assumed held.
1601 * Notes: There is no timer nor any other means by which the requests
1602 * get unblocked other than the low-level driver calling
1603 * scsi_unblock_requests().
1605 void scsi_block_requests(struct Scsi_Host *shost)
1607 shost->host_self_blocked = 1;
1609 EXPORT_SYMBOL(scsi_block_requests);
1612 * Function: scsi_unblock_requests()
1614 * Purpose: Utility function used by low-level drivers to allow further
1615 * commands from being queued to the device.
1617 * Arguments: shost - Host in question
1621 * Lock status: No locks are assumed held.
1623 * Notes: There is no timer nor any other means by which the requests
1624 * get unblocked other than the low-level driver calling
1625 * scsi_unblock_requests().
1627 * This is done as an API function so that changes to the
1628 * internals of the scsi mid-layer won't require wholesale
1629 * changes to drivers that use this feature.
1631 void scsi_unblock_requests(struct Scsi_Host *shost)
1633 shost->host_self_blocked = 0;
1634 scsi_run_host_queues(shost);
1636 EXPORT_SYMBOL(scsi_unblock_requests);
1638 int __init scsi_init_queue(void)
1642 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1643 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1644 int size = sgp->size * sizeof(struct scatterlist);
1646 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1647 SLAB_HWCACHE_ALIGN, NULL, NULL);
1649 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1653 sgp->pool = mempool_create(SG_MEMPOOL_SIZE,
1654 mempool_alloc_slab, mempool_free_slab,
1657 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1665 void scsi_exit_queue(void)
1669 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1670 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1671 mempool_destroy(sgp->pool);
1672 kmem_cache_destroy(sgp->slab);
1676 * scsi_mode_sense - issue a mode sense, falling back from 10 to
1677 * six bytes if necessary.
1678 * @sdev: SCSI device to be queried
1679 * @dbd: set if mode sense will allow block descriptors to be returned
1680 * @modepage: mode page being requested
1681 * @buffer: request buffer (may not be smaller than eight bytes)
1682 * @len: length of request buffer.
1683 * @timeout: command timeout
1684 * @retries: number of retries before failing
1685 * @data: returns a structure abstracting the mode header data
1686 * @sense: place to put sense data (or NULL if no sense to be collected).
1687 * must be SCSI_SENSE_BUFFERSIZE big.
1689 * Returns zero if unsuccessful, or the header offset (either 4
1690 * or 8 depending on whether a six or ten byte command was
1691 * issued) if successful.
1694 scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1695 unsigned char *buffer, int len, int timeout, int retries,
1696 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) {
1697 unsigned char cmd[12];
1701 struct scsi_sense_hdr my_sshdr;
1703 memset(data, 0, sizeof(*data));
1704 memset(&cmd[0], 0, 12);
1705 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1708 /* caller might not be interested in sense, but we need it */
1713 use_10_for_ms = sdev->use_10_for_ms;
1715 if (use_10_for_ms) {
1719 cmd[0] = MODE_SENSE_10;
1726 cmd[0] = MODE_SENSE;
1731 memset(buffer, 0, len);
1733 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
1734 sshdr, timeout, retries);
1736 /* This code looks awful: what it's doing is making sure an
1737 * ILLEGAL REQUEST sense return identifies the actual command
1738 * byte as the problem. MODE_SENSE commands can return
1739 * ILLEGAL REQUEST if the code page isn't supported */
1741 if (use_10_for_ms && !scsi_status_is_good(result) &&
1742 (driver_byte(result) & DRIVER_SENSE)) {
1743 if (scsi_sense_valid(sshdr)) {
1744 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1745 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1747 * Invalid command operation code
1749 sdev->use_10_for_ms = 0;
1755 if(scsi_status_is_good(result)) {
1756 data->header_length = header_length;
1758 data->length = buffer[0]*256 + buffer[1] + 2;
1759 data->medium_type = buffer[2];
1760 data->device_specific = buffer[3];
1761 data->longlba = buffer[4] & 0x01;
1762 data->block_descriptor_length = buffer[6]*256
1765 data->length = buffer[0] + 1;
1766 data->medium_type = buffer[1];
1767 data->device_specific = buffer[2];
1768 data->block_descriptor_length = buffer[3];
1774 EXPORT_SYMBOL(scsi_mode_sense);
1777 scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries)
1780 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1782 struct scsi_sense_hdr sshdr;
1785 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr,
1788 if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) {
1790 if ((scsi_sense_valid(&sshdr)) &&
1791 ((sshdr.sense_key == UNIT_ATTENTION) ||
1792 (sshdr.sense_key == NOT_READY))) {
1799 EXPORT_SYMBOL(scsi_test_unit_ready);
1802 * scsi_device_set_state - Take the given device through the device
1804 * @sdev: scsi device to change the state of.
1805 * @state: state to change to.
1807 * Returns zero if unsuccessful or an error if the requested
1808 * transition is illegal.
1811 scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
1813 enum scsi_device_state oldstate = sdev->sdev_state;
1815 if (state == oldstate)
1820 /* There are no legal states that come back to
1821 * created. This is the manually initialised start
1891 sdev->sdev_state = state;
1895 SCSI_LOG_ERROR_RECOVERY(1,
1896 dev_printk(KERN_ERR, &sdev->sdev_gendev,
1897 "Illegal state transition %s->%s\n",
1898 scsi_device_state_name(oldstate),
1899 scsi_device_state_name(state))
1903 EXPORT_SYMBOL(scsi_device_set_state);
1906 * scsi_device_quiesce - Block user issued commands.
1907 * @sdev: scsi device to quiesce.
1909 * This works by trying to transition to the SDEV_QUIESCE state
1910 * (which must be a legal transition). When the device is in this
1911 * state, only special requests will be accepted, all others will
1912 * be deferred. Since special requests may also be requeued requests,
1913 * a successful return doesn't guarantee the device will be
1914 * totally quiescent.
1916 * Must be called with user context, may sleep.
1918 * Returns zero if unsuccessful or an error if not.
1921 scsi_device_quiesce(struct scsi_device *sdev)
1923 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
1927 scsi_run_queue(sdev->request_queue);
1928 while (sdev->device_busy) {
1929 msleep_interruptible(200);
1930 scsi_run_queue(sdev->request_queue);
1934 EXPORT_SYMBOL(scsi_device_quiesce);
1937 * scsi_device_resume - Restart user issued commands to a quiesced device.
1938 * @sdev: scsi device to resume.
1940 * Moves the device from quiesced back to running and restarts the
1943 * Must be called with user context, may sleep.
1946 scsi_device_resume(struct scsi_device *sdev)
1948 if(scsi_device_set_state(sdev, SDEV_RUNNING))
1950 scsi_run_queue(sdev->request_queue);
1952 EXPORT_SYMBOL(scsi_device_resume);
1955 device_quiesce_fn(struct scsi_device *sdev, void *data)
1957 scsi_device_quiesce(sdev);
1961 scsi_target_quiesce(struct scsi_target *starget)
1963 starget_for_each_device(starget, NULL, device_quiesce_fn);
1965 EXPORT_SYMBOL(scsi_target_quiesce);
1968 device_resume_fn(struct scsi_device *sdev, void *data)
1970 scsi_device_resume(sdev);
1974 scsi_target_resume(struct scsi_target *starget)
1976 starget_for_each_device(starget, NULL, device_resume_fn);
1978 EXPORT_SYMBOL(scsi_target_resume);
1981 * scsi_internal_device_block - internal function to put a device
1982 * temporarily into the SDEV_BLOCK state
1983 * @sdev: device to block
1985 * Block request made by scsi lld's to temporarily stop all
1986 * scsi commands on the specified device. Called from interrupt
1987 * or normal process context.
1989 * Returns zero if successful or error if not
1992 * This routine transitions the device to the SDEV_BLOCK state
1993 * (which must be a legal transition). When the device is in this
1994 * state, all commands are deferred until the scsi lld reenables
1995 * the device with scsi_device_unblock or device_block_tmo fires.
1996 * This routine assumes the host_lock is held on entry.
1999 scsi_internal_device_block(struct scsi_device *sdev)
2001 request_queue_t *q = sdev->request_queue;
2002 unsigned long flags;
2005 err = scsi_device_set_state(sdev, SDEV_BLOCK);
2010 * The device has transitioned to SDEV_BLOCK. Stop the
2011 * block layer from calling the midlayer with this device's
2014 spin_lock_irqsave(q->queue_lock, flags);
2016 spin_unlock_irqrestore(q->queue_lock, flags);
2020 EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2023 * scsi_internal_device_unblock - resume a device after a block request
2024 * @sdev: device to resume
2026 * Called by scsi lld's or the midlayer to restart the device queue
2027 * for the previously suspended scsi device. Called from interrupt or
2028 * normal process context.
2030 * Returns zero if successful or error if not.
2033 * This routine transitions the device to the SDEV_RUNNING state
2034 * (which must be a legal transition) allowing the midlayer to
2035 * goose the queue for this device. This routine assumes the
2036 * host_lock is held upon entry.
2039 scsi_internal_device_unblock(struct scsi_device *sdev)
2041 request_queue_t *q = sdev->request_queue;
2043 unsigned long flags;
2046 * Try to transition the scsi device to SDEV_RUNNING
2047 * and goose the device queue if successful.
2049 err = scsi_device_set_state(sdev, SDEV_RUNNING);
2053 spin_lock_irqsave(q->queue_lock, flags);
2055 spin_unlock_irqrestore(q->queue_lock, flags);
2059 EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2062 device_block(struct scsi_device *sdev, void *data)
2064 scsi_internal_device_block(sdev);
2068 target_block(struct device *dev, void *data)
2070 if (scsi_is_target_device(dev))
2071 starget_for_each_device(to_scsi_target(dev), NULL,
2077 scsi_target_block(struct device *dev)
2079 if (scsi_is_target_device(dev))
2080 starget_for_each_device(to_scsi_target(dev), NULL,
2083 device_for_each_child(dev, NULL, target_block);
2085 EXPORT_SYMBOL_GPL(scsi_target_block);
2088 device_unblock(struct scsi_device *sdev, void *data)
2090 scsi_internal_device_unblock(sdev);
2094 target_unblock(struct device *dev, void *data)
2096 if (scsi_is_target_device(dev))
2097 starget_for_each_device(to_scsi_target(dev), NULL,
2103 scsi_target_unblock(struct device *dev)
2105 if (scsi_is_target_device(dev))
2106 starget_for_each_device(to_scsi_target(dev), NULL,
2109 device_for_each_child(dev, NULL, target_unblock);
2111 EXPORT_SYMBOL_GPL(scsi_target_unblock);