1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <asm/unaligned.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_eh.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_transport_fc.h>
33 #include "lpfc_version.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
44 #define LPFC_RESET_WAIT 2
45 #define LPFC_ABORT_WAIT 2
49 static char *dif_op_str[] = {
51 "SCSI_PROT_READ_INSERT",
52 "SCSI_PROT_WRITE_STRIP",
53 "SCSI_PROT_READ_STRIP",
54 "SCSI_PROT_WRITE_INSERT",
55 "SCSI_PROT_READ_PASS",
56 "SCSI_PROT_WRITE_PASS",
57 "SCSI_PROT_READ_CONVERT",
58 "SCSI_PROT_WRITE_CONVERT"
62 lpfc_debug_save_data(struct scsi_cmnd *cmnd)
65 struct scatterlist *sgde = scsi_sglist(cmnd);
67 if (!_dump_buf_data) {
68 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
75 printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
79 dst = (void *) _dump_buf_data;
82 memcpy(dst, src, sgde->length);
89 lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
92 struct scatterlist *sgde = scsi_prot_sglist(cmnd);
95 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
101 printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
108 memcpy(dst, src, sgde->length);
110 sgde = sg_next(sgde);
115 * lpfc_update_stats - Update statistical data for the command completion
116 * @phba: Pointer to HBA object.
117 * @lpfc_cmd: lpfc scsi command object pointer.
119 * This function is called when there is a command completion and this
120 * function updates the statistical data for the command completion.
123 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
125 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
126 struct lpfc_nodelist *pnode = rdata->pnode;
127 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
129 struct Scsi_Host *shost = cmd->device->host;
130 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
131 unsigned long latency;
137 latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
139 spin_lock_irqsave(shost->host_lock, flags);
140 if (!vport->stat_data_enabled ||
141 vport->stat_data_blocked ||
143 (phba->bucket_type == LPFC_NO_BUCKET)) {
144 spin_unlock_irqrestore(shost->host_lock, flags);
148 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
149 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
151 /* check array subscript bounds */
154 else if (i >= LPFC_MAX_BUCKET_COUNT)
155 i = LPFC_MAX_BUCKET_COUNT - 1;
157 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
158 if (latency <= (phba->bucket_base +
159 ((1<<i)*phba->bucket_step)))
163 pnode->lat_data[i].cmd_count++;
164 spin_unlock_irqrestore(shost->host_lock, flags);
168 * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
169 * @phba: Pointer to HBA context object.
170 * @vport: Pointer to vport object.
171 * @ndlp: Pointer to FC node associated with the target.
172 * @lun: Lun number of the scsi device.
173 * @old_val: Old value of the queue depth.
174 * @new_val: New value of the queue depth.
176 * This function sends an event to the mgmt application indicating
177 * there is a change in the scsi device queue depth.
180 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
181 struct lpfc_vport *vport,
182 struct lpfc_nodelist *ndlp,
187 struct lpfc_fast_path_event *fast_path_evt;
190 fast_path_evt = lpfc_alloc_fast_evt(phba);
194 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
196 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
197 LPFC_EVENT_VARQUEDEPTH;
199 /* Report all luns with change in queue depth */
200 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
201 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
202 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
203 &ndlp->nlp_portname, sizeof(struct lpfc_name));
204 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
205 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
208 fast_path_evt->un.queue_depth_evt.oldval = old_val;
209 fast_path_evt->un.queue_depth_evt.newval = new_val;
210 fast_path_evt->vport = vport;
212 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
213 spin_lock_irqsave(&phba->hbalock, flags);
214 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
215 spin_unlock_irqrestore(&phba->hbalock, flags);
216 lpfc_worker_wake_up(phba);
222 * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
223 * @phba: The Hba for which this call is being executed.
225 * This routine is called when there is resource error in driver or firmware.
226 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
227 * posts at most 1 event each second. This routine wakes up worker thread of
228 * @phba to process WORKER_RAM_DOWN_EVENT event.
230 * This routine should be called with no lock held.
233 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
238 spin_lock_irqsave(&phba->hbalock, flags);
239 atomic_inc(&phba->num_rsrc_err);
240 phba->last_rsrc_error_time = jiffies;
242 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
243 spin_unlock_irqrestore(&phba->hbalock, flags);
247 phba->last_ramp_down_time = jiffies;
249 spin_unlock_irqrestore(&phba->hbalock, flags);
251 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
252 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
254 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
255 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
258 lpfc_worker_wake_up(phba);
263 * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
264 * @phba: The Hba for which this call is being executed.
266 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
267 * post at most 1 event every 5 minute after last_ramp_up_time or
268 * last_rsrc_error_time. This routine wakes up worker thread of @phba
269 * to process WORKER_RAM_DOWN_EVENT event.
271 * This routine should be called with no lock held.
274 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
275 uint32_t queue_depth)
278 struct lpfc_hba *phba = vport->phba;
280 atomic_inc(&phba->num_cmd_success);
282 if (vport->cfg_lun_queue_depth <= queue_depth)
284 spin_lock_irqsave(&phba->hbalock, flags);
285 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
286 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
287 spin_unlock_irqrestore(&phba->hbalock, flags);
290 phba->last_ramp_up_time = jiffies;
291 spin_unlock_irqrestore(&phba->hbalock, flags);
293 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
294 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
296 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
297 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
300 lpfc_worker_wake_up(phba);
305 * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
306 * @phba: The Hba for which this call is being executed.
308 * This routine is called to process WORKER_RAMP_DOWN_QUEUE event for worker
309 * thread.This routine reduces queue depth for all scsi device on each vport
310 * associated with @phba.
313 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
315 struct lpfc_vport **vports;
316 struct Scsi_Host *shost;
317 struct scsi_device *sdev;
318 unsigned long new_queue_depth, old_queue_depth;
319 unsigned long num_rsrc_err, num_cmd_success;
321 struct lpfc_rport_data *rdata;
323 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
324 num_cmd_success = atomic_read(&phba->num_cmd_success);
326 vports = lpfc_create_vport_work_array(phba);
328 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
329 shost = lpfc_shost_from_vport(vports[i]);
330 shost_for_each_device(sdev, shost) {
332 sdev->queue_depth * num_rsrc_err /
333 (num_rsrc_err + num_cmd_success);
334 if (!new_queue_depth)
335 new_queue_depth = sdev->queue_depth - 1;
337 new_queue_depth = sdev->queue_depth -
339 old_queue_depth = sdev->queue_depth;
340 if (sdev->ordered_tags)
341 scsi_adjust_queue_depth(sdev,
345 scsi_adjust_queue_depth(sdev,
348 rdata = sdev->hostdata;
350 lpfc_send_sdev_queuedepth_change_event(
353 sdev->lun, old_queue_depth,
357 lpfc_destroy_vport_work_array(phba, vports);
358 atomic_set(&phba->num_rsrc_err, 0);
359 atomic_set(&phba->num_cmd_success, 0);
363 * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
364 * @phba: The Hba for which this call is being executed.
366 * This routine is called to process WORKER_RAMP_UP_QUEUE event for worker
367 * thread.This routine increases queue depth for all scsi device on each vport
368 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
369 * num_cmd_success to zero.
372 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
374 struct lpfc_vport **vports;
375 struct Scsi_Host *shost;
376 struct scsi_device *sdev;
378 struct lpfc_rport_data *rdata;
380 vports = lpfc_create_vport_work_array(phba);
382 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
383 shost = lpfc_shost_from_vport(vports[i]);
384 shost_for_each_device(sdev, shost) {
385 if (vports[i]->cfg_lun_queue_depth <=
388 if (sdev->ordered_tags)
389 scsi_adjust_queue_depth(sdev,
391 sdev->queue_depth+1);
393 scsi_adjust_queue_depth(sdev,
395 sdev->queue_depth+1);
396 rdata = sdev->hostdata;
398 lpfc_send_sdev_queuedepth_change_event(
402 sdev->queue_depth - 1,
406 lpfc_destroy_vport_work_array(phba, vports);
407 atomic_set(&phba->num_rsrc_err, 0);
408 atomic_set(&phba->num_cmd_success, 0);
412 * lpfc_scsi_dev_block - set all scsi hosts to block state
413 * @phba: Pointer to HBA context object.
415 * This function walks vport list and set each SCSI host to block state
416 * by invoking fc_remote_port_delete() routine. This function is invoked
417 * with EEH when device's PCI slot has been permanently disabled.
420 lpfc_scsi_dev_block(struct lpfc_hba *phba)
422 struct lpfc_vport **vports;
423 struct Scsi_Host *shost;
424 struct scsi_device *sdev;
425 struct fc_rport *rport;
428 vports = lpfc_create_vport_work_array(phba);
430 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
431 shost = lpfc_shost_from_vport(vports[i]);
432 shost_for_each_device(sdev, shost) {
433 rport = starget_to_rport(scsi_target(sdev));
434 fc_remote_port_delete(rport);
437 lpfc_destroy_vport_work_array(phba, vports);
441 * lpfc_new_scsi_buf - Scsi buffer allocator
442 * @vport: The virtual port for which this call being executed.
444 * This routine allocates a scsi buffer, which contains all the necessary
445 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
446 * contains information to build the IOCB. The DMAable region contains
447 * memory for the FCP CMND, FCP RSP, and the initial BPL. In addition to
448 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
449 * and the BPL BDE is setup in the IOCB.
453 * Pointer to lpfc_scsi_buf data structure - Success
455 static struct lpfc_scsi_buf *
456 lpfc_new_scsi_buf(struct lpfc_vport *vport)
458 struct lpfc_hba *phba = vport->phba;
459 struct lpfc_scsi_buf *psb;
460 struct ulp_bde64 *bpl;
462 dma_addr_t pdma_phys_fcp_cmd;
463 dma_addr_t pdma_phys_fcp_rsp;
464 dma_addr_t pdma_phys_bpl;
467 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
472 * Get memory from the pci pool to map the virt space to pci bus space
473 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
474 * struct fcp_rsp and the number of bde's necessary to support the
477 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
484 /* Initialize virtual ptrs to dma_buf region. */
485 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
487 /* Allocate iotag for psb->cur_iocbq. */
488 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
490 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
491 psb->data, psb->dma_handle);
495 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
497 psb->fcp_cmnd = psb->data;
498 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
499 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
500 sizeof(struct fcp_rsp);
502 /* Initialize local short-hand pointers. */
504 pdma_phys_fcp_cmd = psb->dma_handle;
505 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
506 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
507 sizeof(struct fcp_rsp);
510 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
511 * list bdes. Initialize the first two and leave the rest for
514 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
515 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
516 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
517 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
518 bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
520 /* Setup the physical region for the FCP RSP */
521 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
522 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
523 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
524 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525 bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
528 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
529 * initialize it with all known data now.
531 iocb = &psb->cur_iocbq.iocb;
532 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
533 if ((phba->sli_rev == 3) &&
534 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
535 /* fill in immediate fcp command BDE */
536 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
537 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
538 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
540 iocb->un.fcpi64.bdl.addrHigh = 0;
541 iocb->ulpBdeCount = 0;
543 /* fill in responce BDE */
544 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
545 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
546 sizeof(struct fcp_rsp);
547 iocb->unsli3.fcp_ext.rbde.addrLow =
548 putPaddrLow(pdma_phys_fcp_rsp);
549 iocb->unsli3.fcp_ext.rbde.addrHigh =
550 putPaddrHigh(pdma_phys_fcp_rsp);
552 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
553 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
554 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
555 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
556 iocb->ulpBdeCount = 1;
559 iocb->ulpClass = CLASS3;
565 * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba
566 * @phba: The Hba for which this call is being executed.
568 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
569 * and returns to caller.
573 * Pointer to lpfc_scsi_buf - Success
575 static struct lpfc_scsi_buf*
576 lpfc_get_scsi_buf(struct lpfc_hba * phba)
578 struct lpfc_scsi_buf * lpfc_cmd = NULL;
579 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
580 unsigned long iflag = 0;
582 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
583 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
585 lpfc_cmd->seg_cnt = 0;
586 lpfc_cmd->nonsg_phys = 0;
587 lpfc_cmd->prot_seg_cnt = 0;
589 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
594 * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list
595 * @phba: The Hba for which this call is being executed.
596 * @psb: The scsi buffer which is being released.
598 * This routine releases @psb scsi buffer by adding it to tail of @phba
599 * lpfc_scsi_buf_list list.
602 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
604 unsigned long iflag = 0;
606 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
608 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
609 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
613 * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer
614 * @phba: The Hba for which this call is being executed.
615 * @lpfc_cmd: The scsi buffer which is going to be mapped.
617 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
618 * field of @lpfc_cmd. This routine scans through sg elements and format the
619 * bdea. This routine also initializes all IOCB fields which are dependent on
620 * scsi command request buffer.
627 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
629 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
630 struct scatterlist *sgel = NULL;
631 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
632 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
633 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
634 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
636 uint32_t num_bde = 0;
637 int nseg, datadir = scsi_cmnd->sc_data_direction;
640 * There are three possibilities here - use scatter-gather segment, use
641 * the single mapping, or neither. Start the lpfc command prep by
642 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
646 if (scsi_sg_count(scsi_cmnd)) {
648 * The driver stores the segment count returned from pci_map_sg
649 * because this a count of dma-mappings used to map the use_sg
650 * pages. They are not guaranteed to be the same for those
651 * architectures that implement an IOMMU.
654 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
655 scsi_sg_count(scsi_cmnd), datadir);
659 lpfc_cmd->seg_cnt = nseg;
660 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
661 printk(KERN_ERR "%s: Too many sg segments from "
662 "dma_map_sg. Config %d, seg_cnt %d\n",
663 __func__, phba->cfg_sg_seg_cnt,
665 scsi_dma_unmap(scsi_cmnd);
670 * The driver established a maximum scatter-gather segment count
671 * during probe that limits the number of sg elements in any
672 * single scsi command. Just run through the seg_cnt and format
674 * When using SLI-3 the driver will try to fit all the BDEs into
675 * the IOCB. If it can't then the BDEs get added to a BPL as it
676 * does for SLI-2 mode.
678 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
679 physaddr = sg_dma_address(sgel);
680 if (phba->sli_rev == 3 &&
681 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
682 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
683 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
684 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
685 data_bde->addrLow = putPaddrLow(physaddr);
686 data_bde->addrHigh = putPaddrHigh(physaddr);
689 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
690 bpl->tus.f.bdeSize = sg_dma_len(sgel);
691 bpl->tus.w = le32_to_cpu(bpl->tus.w);
693 le32_to_cpu(putPaddrLow(physaddr));
695 le32_to_cpu(putPaddrHigh(physaddr));
702 * Finish initializing those IOCB fields that are dependent on the
703 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
704 * explicitly reinitialized and for SLI-3 the extended bde count is
705 * explicitly reinitialized since all iocb memory resources are reused.
707 if (phba->sli_rev == 3 &&
708 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
709 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
711 * The extended IOCB format can only fit 3 BDE or a BPL.
712 * This I/O has more than 3 BDE so the 1st data bde will
713 * be a BPL that is filled in here.
715 physaddr = lpfc_cmd->dma_handle;
716 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
717 data_bde->tus.f.bdeSize = (num_bde *
718 sizeof(struct ulp_bde64));
719 physaddr += (sizeof(struct fcp_cmnd) +
720 sizeof(struct fcp_rsp) +
721 (2 * sizeof(struct ulp_bde64)));
722 data_bde->addrHigh = putPaddrHigh(physaddr);
723 data_bde->addrLow = putPaddrLow(physaddr);
724 /* ebde count includes the responce bde and data bpl */
725 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
727 /* ebde count includes the responce bde and data bdes */
728 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
731 iocb_cmd->un.fcpi64.bdl.bdeSize =
732 ((num_bde + 2) * sizeof(struct ulp_bde64));
734 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
737 * Due to difference in data length between DIF/non-DIF paths,
738 * we need to set word 4 of IOCB here
740 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
745 * Given a scsi cmnd, determine the BlockGuard profile to be used
749 lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
751 uint8_t guard_type = scsi_host_get_guard(sc->device->host);
752 uint8_t ret_prof = LPFC_PROF_INVALID;
754 if (guard_type == SHOST_DIX_GUARD_IP) {
755 switch (scsi_get_prot_op(sc)) {
756 case SCSI_PROT_READ_INSERT:
757 case SCSI_PROT_WRITE_STRIP:
758 ret_prof = LPFC_PROF_AST2;
761 case SCSI_PROT_READ_STRIP:
762 case SCSI_PROT_WRITE_INSERT:
763 ret_prof = LPFC_PROF_A1;
766 case SCSI_PROT_READ_CONVERT:
767 case SCSI_PROT_WRITE_CONVERT:
768 ret_prof = LPFC_PROF_AST1;
771 case SCSI_PROT_READ_PASS:
772 case SCSI_PROT_WRITE_PASS:
773 case SCSI_PROT_NORMAL:
775 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
776 scsi_get_prot_op(sc), guard_type);
780 } else if (guard_type == SHOST_DIX_GUARD_CRC) {
781 switch (scsi_get_prot_op(sc)) {
782 case SCSI_PROT_READ_STRIP:
783 case SCSI_PROT_WRITE_INSERT:
784 ret_prof = LPFC_PROF_A1;
787 case SCSI_PROT_READ_PASS:
788 case SCSI_PROT_WRITE_PASS:
789 ret_prof = LPFC_PROF_C1;
792 case SCSI_PROT_READ_CONVERT:
793 case SCSI_PROT_WRITE_CONVERT:
794 case SCSI_PROT_READ_INSERT:
795 case SCSI_PROT_WRITE_STRIP:
796 case SCSI_PROT_NORMAL:
798 printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
799 scsi_get_prot_op(sc), guard_type);
803 /* unsupported format */
810 struct scsi_dif_tuple {
811 __be16 guard_tag; /* Checksum */
812 __be16 app_tag; /* Opaque storage */
813 __be32 ref_tag; /* Target LBA or indirect LBA */
816 static inline unsigned
817 lpfc_cmd_blksize(struct scsi_cmnd *sc)
819 return sc->device->sector_size;
823 * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
824 * @sc: in: SCSI command
825 * @apptagmask: out: app tag mask
826 * @apptagval: out: app tag value
827 * @reftag: out: ref tag (reference tag)
830 * Extract DIF paramters from the command if possible. Otherwise,
831 * use default paratmers.
835 lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
836 uint16_t *apptagval, uint32_t *reftag)
838 struct scsi_dif_tuple *spt;
839 unsigned char op = scsi_get_prot_op(sc);
840 unsigned int protcnt = scsi_prot_sg_count(sc);
843 if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
844 op == SCSI_PROT_WRITE_PASS ||
845 op == SCSI_PROT_WRITE_CONVERT)) {
848 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
849 scsi_prot_sglist(sc)[0].offset;
852 *reftag = cpu_to_be32(spt->ref_tag);
855 /* SBC defines ref tag to be lower 32bits of LBA */
856 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
863 * This function sets up buffer list for protection groups of
864 * type LPFC_PG_TYPE_NO_DIF
866 * This is usually used when the HBA is instructed to generate
867 * DIFs and insert them into data stream (or strip DIF from
868 * incoming data stream)
870 * The buffer list consists of just one protection group described
872 * +-------------------------+
873 * start of prot group --> | PDE_1 |
874 * +-------------------------+
876 * +-------------------------+
877 * |more Data BDE's ... (opt)|
878 * +-------------------------+
880 * @sc: pointer to scsi command we're working on
881 * @bpl: pointer to buffer list for protection groups
882 * @datacnt: number of segments of data that have been dma mapped
884 * Note: Data s/g buffers have been dma mapped
887 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
888 struct ulp_bde64 *bpl, int datasegcnt)
890 struct scatterlist *sgde = NULL; /* s/g data entry */
891 struct lpfc_pde *pde1 = NULL;
893 int i = 0, num_bde = 0;
894 int datadir = sc->sc_data_direction;
895 int prof = LPFC_PROF_INVALID;
898 uint16_t apptagmask, apptagval;
900 pde1 = (struct lpfc_pde *) bpl;
901 prof = lpfc_sc_to_sli_prof(sc);
903 if (prof == LPFC_PROF_INVALID)
906 /* extract some info from the scsi command for PDE1*/
907 blksize = lpfc_cmd_blksize(sc);
908 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
910 /* setup PDE1 with what we have */
911 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
913 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
918 /* assumption: caller has already run dma_map_sg on command data */
919 scsi_for_each_sg(sc, sgde, datasegcnt, i) {
920 physaddr = sg_dma_address(sgde);
921 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
922 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
923 bpl->tus.f.bdeSize = sg_dma_len(sgde);
924 if (datadir == DMA_TO_DEVICE)
925 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
927 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
928 bpl->tus.w = le32_to_cpu(bpl->tus.w);
938 * This function sets up buffer list for protection groups of
939 * type LPFC_PG_TYPE_DIF_BUF
941 * This is usually used when DIFs are in their own buffers,
942 * separate from the data. The HBA can then by instructed
943 * to place the DIFs in the outgoing stream. For read operations,
944 * The HBA could extract the DIFs and place it in DIF buffers.
946 * The buffer list for this type consists of one or more of the
947 * protection groups described below:
948 * +-------------------------+
949 * start of first prot group --> | PDE_1 |
950 * +-------------------------+
951 * | PDE_3 (Prot BDE) |
952 * +-------------------------+
954 * +-------------------------+
955 * |more Data BDE's ... (opt)|
956 * +-------------------------+
957 * start of new prot group --> | PDE_1 |
958 * +-------------------------+
960 * +-------------------------+
962 * @sc: pointer to scsi command we're working on
963 * @bpl: pointer to buffer list for protection groups
964 * @datacnt: number of segments of data that have been dma mapped
965 * @protcnt: number of segment of protection data that have been dma mapped
967 * Note: It is assumed that both data and protection s/g buffers have been
971 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
972 struct ulp_bde64 *bpl, int datacnt, int protcnt)
974 struct scatterlist *sgde = NULL; /* s/g data entry */
975 struct scatterlist *sgpe = NULL; /* s/g prot entry */
976 struct lpfc_pde *pde1 = NULL;
977 struct ulp_bde64 *prot_bde = NULL;
978 dma_addr_t dataphysaddr, protphysaddr;
979 unsigned short curr_data = 0, curr_prot = 0;
980 unsigned int split_offset, protgroup_len;
981 unsigned int protgrp_blks, protgrp_bytes;
982 unsigned int remainder, subtotal;
983 int prof = LPFC_PROF_INVALID;
984 int datadir = sc->sc_data_direction;
985 unsigned char pgdone = 0, alldone = 0;
988 uint16_t apptagmask, apptagval;
991 sgpe = scsi_prot_sglist(sc);
992 sgde = scsi_sglist(sc);
994 if (!sgpe || !sgde) {
995 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
996 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
1001 prof = lpfc_sc_to_sli_prof(sc);
1002 if (prof == LPFC_PROF_INVALID)
1005 /* extract some info from the scsi command for PDE1*/
1006 blksize = lpfc_cmd_blksize(sc);
1007 lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1011 /* setup the first PDE_1 */
1012 pde1 = (struct lpfc_pde *) bpl;
1014 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
1016 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1021 /* setup the first BDE that points to protection buffer */
1022 prot_bde = (struct ulp_bde64 *) bpl;
1023 protphysaddr = sg_dma_address(sgpe);
1024 prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1025 prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1026 protgroup_len = sg_dma_len(sgpe);
1029 /* must be integer multiple of the DIF block length */
1030 BUG_ON(protgroup_len % 8);
1032 protgrp_blks = protgroup_len / 8;
1033 protgrp_bytes = protgrp_blks * blksize;
1035 prot_bde->tus.f.bdeSize = protgroup_len;
1036 if (datadir == DMA_TO_DEVICE)
1037 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1039 prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1040 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1045 /* setup BDE's for data blocks associated with DIF data */
1047 subtotal = 0; /* total bytes processed for current prot grp */
1050 printk(KERN_ERR "%s Invalid data segment\n",
1055 dataphysaddr = sg_dma_address(sgde) + split_offset;
1056 bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1057 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1059 remainder = sg_dma_len(sgde) - split_offset;
1061 if ((subtotal + remainder) <= protgrp_bytes) {
1062 /* we can use this whole buffer */
1063 bpl->tus.f.bdeSize = remainder;
1066 if ((subtotal + remainder) == protgrp_bytes)
1069 /* must split this buffer with next prot grp */
1070 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1071 split_offset += bpl->tus.f.bdeSize;
1074 subtotal += bpl->tus.f.bdeSize;
1076 if (datadir == DMA_TO_DEVICE)
1077 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1079 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1080 bpl->tus.w = le32_to_cpu(bpl->tus.w);
1088 /* Move to the next s/g segment if possible */
1089 sgde = sg_next(sgde);
1093 if (curr_prot == protcnt) {
1095 } else if (curr_prot < protcnt) {
1096 /* advance to next prot buffer */
1097 sgpe = sg_next(sgpe);
1100 /* update the reference tag */
1101 reftag += protgrp_blks;
1103 /* if we're here, we have a bug */
1104 printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
1115 * Given a SCSI command that supports DIF, determine composition of protection
1116 * groups involved in setting up buffer lists
1119 * for DIF (for both read and write)
1122 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1124 int ret = LPFC_PG_TYPE_INVALID;
1125 unsigned char op = scsi_get_prot_op(sc);
1128 case SCSI_PROT_READ_STRIP:
1129 case SCSI_PROT_WRITE_INSERT:
1130 ret = LPFC_PG_TYPE_NO_DIF;
1132 case SCSI_PROT_READ_INSERT:
1133 case SCSI_PROT_WRITE_STRIP:
1134 case SCSI_PROT_READ_PASS:
1135 case SCSI_PROT_WRITE_PASS:
1136 case SCSI_PROT_WRITE_CONVERT:
1137 case SCSI_PROT_READ_CONVERT:
1138 ret = LPFC_PG_TYPE_DIF_BUF;
1141 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1142 "9021 Unsupported protection op:%d\n", op);
1150 * This is the protection/DIF aware version of
1151 * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1152 * two functions eventually, but for now, it's here
1155 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1156 struct lpfc_scsi_buf *lpfc_cmd)
1158 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1159 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1160 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1161 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1162 uint32_t num_bde = 0;
1163 int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1164 int prot_group_type = 0;
1169 * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1170 * fcp_rsp regions to the first data bde entry
1173 if (scsi_sg_count(scsi_cmnd)) {
1175 * The driver stores the segment count returned from pci_map_sg
1176 * because this a count of dma-mappings used to map the use_sg
1177 * pages. They are not guaranteed to be the same for those
1178 * architectures that implement an IOMMU.
1180 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1181 scsi_sglist(scsi_cmnd),
1182 scsi_sg_count(scsi_cmnd), datadir);
1183 if (unlikely(!datasegcnt))
1186 lpfc_cmd->seg_cnt = datasegcnt;
1187 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1188 printk(KERN_ERR "%s: Too many sg segments from "
1189 "dma_map_sg. Config %d, seg_cnt %d\n",
1190 __func__, phba->cfg_sg_seg_cnt,
1192 scsi_dma_unmap(scsi_cmnd);
1196 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1198 switch (prot_group_type) {
1199 case LPFC_PG_TYPE_NO_DIF:
1200 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1202 /* we shoud have 2 or more entries in buffer list */
1206 case LPFC_PG_TYPE_DIF_BUF:{
1208 * This type indicates that protection buffers are
1209 * passed to the driver, so that needs to be prepared
1212 protsegcnt = dma_map_sg(&phba->pcidev->dev,
1213 scsi_prot_sglist(scsi_cmnd),
1214 scsi_prot_sg_count(scsi_cmnd), datadir);
1215 if (unlikely(!protsegcnt)) {
1216 scsi_dma_unmap(scsi_cmnd);
1220 lpfc_cmd->prot_seg_cnt = protsegcnt;
1221 if (lpfc_cmd->prot_seg_cnt
1222 > phba->cfg_prot_sg_seg_cnt) {
1223 printk(KERN_ERR "%s: Too many prot sg segments "
1224 "from dma_map_sg. Config %d,"
1225 "prot_seg_cnt %d\n", __func__,
1226 phba->cfg_prot_sg_seg_cnt,
1227 lpfc_cmd->prot_seg_cnt);
1228 dma_unmap_sg(&phba->pcidev->dev,
1229 scsi_prot_sglist(scsi_cmnd),
1230 scsi_prot_sg_count(scsi_cmnd),
1232 scsi_dma_unmap(scsi_cmnd);
1236 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1237 datasegcnt, protsegcnt);
1238 /* we shoud have 3 or more entries in buffer list */
1243 case LPFC_PG_TYPE_INVALID:
1245 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1246 "9022 Unexpected protection group %i\n",
1253 * Finish initializing those IOCB fields that are dependent on the
1254 * scsi_cmnd request_buffer. Note that the bdeSize is explicitly
1255 * reinitialized since all iocb memory resources are used many times
1256 * for transmit, receive, and continuation bpl's.
1258 iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1259 iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1260 iocb_cmd->ulpBdeCount = 1;
1261 iocb_cmd->ulpLe = 1;
1263 fcpdl = scsi_bufflen(scsi_cmnd);
1265 if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1267 * We are in DIF Type 1 mode
1268 * Every data block has a 8 byte DIF (trailer)
1269 * attached to it. Must ajust FCP data length
1271 blksize = lpfc_cmd_blksize(scsi_cmnd);
1272 diflen = (fcpdl / blksize) * 8;
1275 fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1278 * Due to difference in data length between DIF/non-DIF paths,
1279 * we need to set word 4 of IOCB here
1281 iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1285 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1286 "9023 Could not setup all needed BDE's"
1287 "prot_group_type=%d, num_bde=%d\n",
1288 prot_group_type, num_bde);
1293 * This function checks for BlockGuard errors detected by
1294 * the HBA. In case of errors, the ASC/ASCQ fields in the
1295 * sense buffer will be set accordingly, paired with
1296 * ILLEGAL_REQUEST to signal to the kernel that the HBA
1297 * detected corruption.
1300 * 0 - No error found
1301 * 1 - BlockGuard error found
1302 * -1 - Internal error (bad profile, ...etc)
1305 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1306 struct lpfc_iocbq *pIocbOut)
1308 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1309 struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1311 uint32_t bghm = bgf->bghm;
1312 uint32_t bgstat = bgf->bgstat;
1313 uint64_t failing_sector = 0;
1315 printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
1316 "bgstat=0x%x bghm=0x%x\n",
1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1318 cmd->request->nr_sectors, bgstat, bghm);
1320 spin_lock(&_dump_buf_lock);
1321 if (!_dump_buf_done) {
1322 printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
1323 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1324 lpfc_debug_save_data(cmd);
1326 /* If we have a prot sgl, save the DIF buffer */
1327 if (lpfc_prot_group_type(phba, cmd) ==
1328 LPFC_PG_TYPE_DIF_BUF) {
1329 printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
1330 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1331 lpfc_debug_save_dif(cmd);
1336 spin_unlock(&_dump_buf_lock);
1338 if (lpfc_bgs_get_invalid_prof(bgstat)) {
1339 cmd->result = ScsiResult(DID_ERROR, 0);
1340 printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
1346 if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1347 cmd->result = ScsiResult(DID_ERROR, 0);
1348 printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1354 if (lpfc_bgs_get_guard_err(bgstat)) {
1357 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1359 cmd->result = DRIVER_SENSE << 24
1360 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1361 phba->bg_guard_err_cnt++;
1362 printk(KERN_ERR "BLKGRD: guard_tag error\n");
1365 if (lpfc_bgs_get_reftag_err(bgstat)) {
1368 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1370 cmd->result = DRIVER_SENSE << 24
1371 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1373 phba->bg_reftag_err_cnt++;
1374 printk(KERN_ERR "BLKGRD: ref_tag error\n");
1377 if (lpfc_bgs_get_apptag_err(bgstat)) {
1380 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1382 cmd->result = DRIVER_SENSE << 24
1383 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1385 phba->bg_apptag_err_cnt++;
1386 printk(KERN_ERR "BLKGRD: app_tag error\n");
1389 if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1391 * setup sense data descriptor 0 per SPC-4 as an information
1392 * field, and put the failing LBA in it
1394 cmd->sense_buffer[8] = 0; /* Information */
1395 cmd->sense_buffer[9] = 0xa; /* Add. length */
1396 bghm /= cmd->device->sector_size;
1398 failing_sector = scsi_get_lba(cmd);
1399 failing_sector += bghm;
1401 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1405 /* No error was reported - problem in FW? */
1406 cmd->result = ScsiResult(DID_ERROR, 0);
1407 printk(KERN_ERR "BLKGRD: no errors reported!\n");
1415 * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1416 * @phba: Pointer to hba context object.
1417 * @vport: Pointer to vport object.
1418 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
1419 * @rsp_iocb: Pointer to response iocb object which reported error.
1421 * This function posts an event when there is a SCSI command reporting
1422 * error from the scsi device.
1425 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1426 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
1427 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
1428 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
1429 uint32_t resp_info = fcprsp->rspStatus2;
1430 uint32_t scsi_status = fcprsp->rspStatus3;
1431 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
1432 struct lpfc_fast_path_event *fast_path_evt = NULL;
1433 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
1434 unsigned long flags;
1436 /* If there is queuefull or busy condition send a scsi event */
1437 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
1438 (cmnd->result == SAM_STAT_BUSY)) {
1439 fast_path_evt = lpfc_alloc_fast_evt(phba);
1442 fast_path_evt->un.scsi_evt.event_type =
1444 fast_path_evt->un.scsi_evt.subcategory =
1445 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
1446 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
1447 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
1448 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
1449 &pnode->nlp_portname, sizeof(struct lpfc_name));
1450 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
1451 &pnode->nlp_nodename, sizeof(struct lpfc_name));
1452 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
1453 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
1454 fast_path_evt = lpfc_alloc_fast_evt(phba);
1457 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
1459 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
1460 LPFC_EVENT_CHECK_COND;
1461 fast_path_evt->un.check_cond_evt.scsi_event.lun =
1463 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
1464 &pnode->nlp_portname, sizeof(struct lpfc_name));
1465 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
1466 &pnode->nlp_nodename, sizeof(struct lpfc_name));
1467 fast_path_evt->un.check_cond_evt.sense_key =
1468 cmnd->sense_buffer[2] & 0xf;
1469 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
1470 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
1471 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
1473 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
1474 ((scsi_status == SAM_STAT_GOOD) &&
1475 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
1477 * If status is good or resid does not match with fcp_param and
1478 * there is valid fcpi_parm, then there is a read_check error
1480 fast_path_evt = lpfc_alloc_fast_evt(phba);
1483 fast_path_evt->un.read_check_error.header.event_type =
1484 FC_REG_FABRIC_EVENT;
1485 fast_path_evt->un.read_check_error.header.subcategory =
1486 LPFC_EVENT_FCPRDCHKERR;
1487 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
1488 &pnode->nlp_portname, sizeof(struct lpfc_name));
1489 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
1490 &pnode->nlp_nodename, sizeof(struct lpfc_name));
1491 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
1492 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
1493 fast_path_evt->un.read_check_error.fcpiparam =
1498 fast_path_evt->vport = vport;
1499 spin_lock_irqsave(&phba->hbalock, flags);
1500 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
1501 spin_unlock_irqrestore(&phba->hbalock, flags);
1502 lpfc_worker_wake_up(phba);
1507 * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather
1508 * @phba: The Hba for which this call is being executed.
1509 * @psb: The scsi buffer which is going to be un-mapped.
1511 * This routine does DMA un-mapping of scatter gather list of scsi command
1512 * field of @lpfc_cmd.
1515 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
1518 * There are only two special cases to consider. (1) the scsi command
1519 * requested scatter-gather usage or (2) the scsi command allocated
1520 * a request buffer, but did not request use_sg. There is a third
1521 * case, but it does not require resource deallocation.
1523 if (psb->seg_cnt > 0)
1524 scsi_dma_unmap(psb->pCmd);
1525 if (psb->prot_seg_cnt > 0)
1526 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
1527 scsi_prot_sg_count(psb->pCmd),
1528 psb->pCmd->sc_data_direction);
1532 * lpfc_handler_fcp_err - FCP response handler
1533 * @vport: The virtual port for which this call is being executed.
1534 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
1535 * @rsp_iocb: The response IOCB which contains FCP error.
1537 * This routine is called to process response IOCB with status field
1538 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
1539 * based upon SCSI and FCP error.
1542 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1543 struct lpfc_iocbq *rsp_iocb)
1545 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
1546 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
1547 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
1548 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
1549 uint32_t resp_info = fcprsp->rspStatus2;
1550 uint32_t scsi_status = fcprsp->rspStatus3;
1552 uint32_t host_status = DID_OK;
1553 uint32_t rsplen = 0;
1554 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
1558 * If this is a task management command, there is no
1559 * scsi packet associated with this lpfc_cmd. The driver
1562 if (fcpcmd->fcpCntl2) {
1567 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
1568 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
1569 if (snslen > SCSI_SENSE_BUFFERSIZE)
1570 snslen = SCSI_SENSE_BUFFERSIZE;
1572 if (resp_info & RSP_LEN_VALID)
1573 rsplen = be32_to_cpu(fcprsp->rspRspLen);
1574 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
1576 lp = (uint32_t *)cmnd->sense_buffer;
1578 if (!scsi_status && (resp_info & RESID_UNDER))
1581 lpfc_printf_vlog(vport, KERN_WARNING, logit,
1582 "9024 FCP command x%x failed: x%x SNS x%x x%x "
1583 "Data: x%x x%x x%x x%x x%x\n",
1584 cmnd->cmnd[0], scsi_status,
1585 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
1586 be32_to_cpu(fcprsp->rspResId),
1587 be32_to_cpu(fcprsp->rspSnsLen),
1588 be32_to_cpu(fcprsp->rspRspLen),
1591 if (resp_info & RSP_LEN_VALID) {
1592 rsplen = be32_to_cpu(fcprsp->rspRspLen);
1593 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
1594 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
1595 host_status = DID_ERROR;
1600 scsi_set_resid(cmnd, 0);
1601 if (resp_info & RESID_UNDER) {
1602 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
1604 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1605 "9025 FCP Read Underrun, expected %d, "
1606 "residual %d Data: x%x x%x x%x\n",
1607 be32_to_cpu(fcpcmd->fcpDl),
1608 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
1612 * If there is an under run check if under run reported by
1613 * storage array is same as the under run reported by HBA.
1614 * If this is not same, there is a dropped frame.
1616 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
1618 (scsi_get_resid(cmnd) != fcpi_parm)) {
1619 lpfc_printf_vlog(vport, KERN_WARNING,
1620 LOG_FCP | LOG_FCP_ERROR,
1621 "9026 FCP Read Check Error "
1622 "and Underrun Data: x%x x%x x%x x%x\n",
1623 be32_to_cpu(fcpcmd->fcpDl),
1624 scsi_get_resid(cmnd), fcpi_parm,
1626 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
1627 host_status = DID_ERROR;
1630 * The cmnd->underflow is the minimum number of bytes that must
1631 * be transfered for this command. Provided a sense condition
1632 * is not present, make sure the actual amount transferred is at
1633 * least the underflow value or fail.
1635 if (!(resp_info & SNS_LEN_VALID) &&
1636 (scsi_status == SAM_STAT_GOOD) &&
1637 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
1638 < cmnd->underflow)) {
1639 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1640 "9027 FCP command x%x residual "
1641 "underrun converted to error "
1642 "Data: x%x x%x x%x\n",
1643 cmnd->cmnd[0], scsi_bufflen(cmnd),
1644 scsi_get_resid(cmnd), cmnd->underflow);
1645 host_status = DID_ERROR;
1647 } else if (resp_info & RESID_OVER) {
1648 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1649 "9028 FCP command x%x residual overrun error. "
1650 "Data: x%x x%x \n", cmnd->cmnd[0],
1651 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
1652 host_status = DID_ERROR;
1655 * Check SLI validation that all the transfer was actually done
1656 * (fcpi_parm should be zero). Apply check only to reads.
1658 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
1659 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
1660 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
1661 "9029 FCP Read Check Error Data: "
1662 "x%x x%x x%x x%x\n",
1663 be32_to_cpu(fcpcmd->fcpDl),
1664 be32_to_cpu(fcprsp->rspResId),
1665 fcpi_parm, cmnd->cmnd[0]);
1666 host_status = DID_ERROR;
1667 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
1671 cmnd->result = ScsiResult(host_status, scsi_status);
1672 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
1676 * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
1677 * @phba: The Hba for which this call is being executed.
1678 * @pIocbIn: The command IOCBQ for the scsi cmnd.
1679 * @pIocbOut: The response IOCBQ for the scsi cmnd .
1681 * This routine assigns scsi command result by looking into response IOCB
1682 * status field appropriately. This routine handles QUEUE FULL condition as
1683 * well by ramping down device queue depth.
1686 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1687 struct lpfc_iocbq *pIocbOut)
1689 struct lpfc_scsi_buf *lpfc_cmd =
1690 (struct lpfc_scsi_buf *) pIocbIn->context1;
1691 struct lpfc_vport *vport = pIocbIn->vport;
1692 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
1693 struct lpfc_nodelist *pnode = rdata->pnode;
1694 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1696 struct scsi_device *tmp_sdev;
1698 unsigned long flags;
1699 struct lpfc_fast_path_event *fast_path_evt;
1700 struct Scsi_Host *shost = cmd->device->host;
1701 uint32_t queue_depth, scsi_id;
1703 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
1704 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
1705 if (pnode && NLP_CHK_NODE_ACT(pnode))
1706 atomic_dec(&pnode->cmd_pending);
1708 if (lpfc_cmd->status) {
1709 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
1710 (lpfc_cmd->result & IOERR_DRVR_MASK))
1711 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1712 else if (lpfc_cmd->status >= IOSTAT_CNT)
1713 lpfc_cmd->status = IOSTAT_DEFAULT;
1715 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1716 "9030 FCP cmd x%x failed <%d/%d> "
1717 "status: x%x result: x%x Data: x%x x%x\n",
1719 cmd->device ? cmd->device->id : 0xffff,
1720 cmd->device ? cmd->device->lun : 0xffff,
1721 lpfc_cmd->status, lpfc_cmd->result,
1722 pIocbOut->iocb.ulpContext,
1723 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
1725 switch (lpfc_cmd->status) {
1726 case IOSTAT_FCP_RSP_ERROR:
1727 /* Call FCP RSP handler to determine result */
1728 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
1730 case IOSTAT_NPORT_BSY:
1731 case IOSTAT_FABRIC_BSY:
1732 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1733 fast_path_evt = lpfc_alloc_fast_evt(phba);
1736 fast_path_evt->un.fabric_evt.event_type =
1737 FC_REG_FABRIC_EVENT;
1738 fast_path_evt->un.fabric_evt.subcategory =
1739 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
1740 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
1741 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1742 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
1743 &pnode->nlp_portname,
1744 sizeof(struct lpfc_name));
1745 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
1746 &pnode->nlp_nodename,
1747 sizeof(struct lpfc_name));
1749 fast_path_evt->vport = vport;
1750 fast_path_evt->work_evt.evt =
1751 LPFC_EVT_FASTPATH_MGMT_EVT;
1752 spin_lock_irqsave(&phba->hbalock, flags);
1753 list_add_tail(&fast_path_evt->work_evt.evt_listp,
1755 spin_unlock_irqrestore(&phba->hbalock, flags);
1756 lpfc_worker_wake_up(phba);
1758 case IOSTAT_LOCAL_REJECT:
1759 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
1760 lpfc_cmd->result == IOERR_NO_RESOURCES ||
1761 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
1762 cmd->result = ScsiResult(DID_REQUEUE, 0);
1766 if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
1767 lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
1768 pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
1769 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1771 * This is a response for a BG enabled
1772 * cmd. Parse BG error
1774 lpfc_parse_bg_err(phba, lpfc_cmd,
1778 lpfc_printf_vlog(vport, KERN_WARNING,
1780 "9031 non-zero BGSTAT "
1781 "on unprotected cmd");
1785 /* else: fall through */
1787 cmd->result = ScsiResult(DID_ERROR, 0);
1791 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
1792 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
1793 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
1796 cmd->result = ScsiResult(DID_OK, 0);
1799 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
1800 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
1802 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1803 "0710 Iodone <%d/%d> cmd %p, error "
1804 "x%x SNS x%x x%x Data: x%x x%x\n",
1805 cmd->device->id, cmd->device->lun, cmd,
1806 cmd->result, *lp, *(lp + 3), cmd->retries,
1807 scsi_get_resid(cmd));
1810 lpfc_update_stats(phba, lpfc_cmd);
1811 result = cmd->result;
1812 if (vport->cfg_max_scsicmpl_time &&
1813 time_after(jiffies, lpfc_cmd->start_time +
1814 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
1815 spin_lock_irqsave(shost->host_lock, flags);
1816 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1817 if (pnode->cmd_qdepth >
1818 atomic_read(&pnode->cmd_pending) &&
1819 (atomic_read(&pnode->cmd_pending) >
1820 LPFC_MIN_TGT_QDEPTH) &&
1821 ((cmd->cmnd[0] == READ_10) ||
1822 (cmd->cmnd[0] == WRITE_10)))
1824 atomic_read(&pnode->cmd_pending);
1826 pnode->last_change_time = jiffies;
1828 spin_unlock_irqrestore(shost->host_lock, flags);
1829 } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1830 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
1831 time_after(jiffies, pnode->last_change_time +
1832 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
1833 spin_lock_irqsave(shost->host_lock, flags);
1834 pnode->cmd_qdepth += pnode->cmd_qdepth *
1835 LPFC_TGTQ_RAMPUP_PCENT / 100;
1836 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
1837 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
1838 pnode->last_change_time = jiffies;
1839 spin_unlock_irqrestore(shost->host_lock, flags);
1843 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1845 /* The sdev is not guaranteed to be valid post scsi_done upcall. */
1846 queue_depth = cmd->device->queue_depth;
1847 scsi_id = cmd->device->id;
1848 cmd->scsi_done(cmd);
1850 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1852 * If there is a thread waiting for command completion
1853 * wake up the thread.
1855 spin_lock_irqsave(shost->host_lock, flags);
1856 lpfc_cmd->pCmd = NULL;
1857 if (lpfc_cmd->waitq)
1858 wake_up(lpfc_cmd->waitq);
1859 spin_unlock_irqrestore(shost->host_lock, flags);
1860 lpfc_release_scsi_buf(phba, lpfc_cmd);
1866 lpfc_rampup_queue_depth(vport, queue_depth);
1868 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
1869 ((jiffies - pnode->last_ramp_up_time) >
1870 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1871 ((jiffies - pnode->last_q_full_time) >
1872 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1873 (vport->cfg_lun_queue_depth > queue_depth)) {
1874 shost_for_each_device(tmp_sdev, shost) {
1875 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
1876 if (tmp_sdev->id != scsi_id)
1878 if (tmp_sdev->ordered_tags)
1879 scsi_adjust_queue_depth(tmp_sdev,
1881 tmp_sdev->queue_depth+1);
1883 scsi_adjust_queue_depth(tmp_sdev,
1885 tmp_sdev->queue_depth+1);
1887 pnode->last_ramp_up_time = jiffies;
1890 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1892 queue_depth , queue_depth + 1);
1896 * Check for queue full. If the lun is reporting queue full, then
1897 * back off the lun queue depth to prevent target overloads.
1899 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
1900 NLP_CHK_NODE_ACT(pnode)) {
1901 pnode->last_q_full_time = jiffies;
1903 shost_for_each_device(tmp_sdev, shost) {
1904 if (tmp_sdev->id != scsi_id)
1906 depth = scsi_track_queue_full(tmp_sdev,
1907 tmp_sdev->queue_depth - 1);
1910 * The queue depth cannot be lowered any more.
1911 * Modify the returned error code to store
1912 * the final depth value set by
1913 * scsi_track_queue_full.
1916 depth = shost->cmd_per_lun;
1919 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1920 "0711 detected queue full - lun queue "
1921 "depth adjusted to %d.\n", depth);
1922 lpfc_send_sdev_queuedepth_change_event(phba, vport,
1929 * If there is a thread waiting for command completion
1930 * wake up the thread.
1932 spin_lock_irqsave(shost->host_lock, flags);
1933 lpfc_cmd->pCmd = NULL;
1934 if (lpfc_cmd->waitq)
1935 wake_up(lpfc_cmd->waitq);
1936 spin_unlock_irqrestore(shost->host_lock, flags);
1938 lpfc_release_scsi_buf(phba, lpfc_cmd);
1942 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
1943 * @data: A pointer to the immediate command data portion of the IOCB.
1944 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
1946 * The routine copies the entire FCP command from @fcp_cmnd to @data while
1947 * byte swapping the data to big endian format for transmission on the wire.
1950 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1953 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
1954 i += sizeof(uint32_t), j++) {
1955 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
1960 * lpfc_scsi_prep_cmnd - Routine to convert scsi cmnd to FCP information unit
1961 * @vport: The virtual port for which this call is being executed.
1962 * @lpfc_cmd: The scsi command which needs to send.
1963 * @pnode: Pointer to lpfc_nodelist.
1965 * This routine initializes fcp_cmnd and iocb data structure from scsi command
1969 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1970 struct lpfc_nodelist *pnode)
1972 struct lpfc_hba *phba = vport->phba;
1973 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1974 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1975 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1976 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
1977 int datadir = scsi_cmnd->sc_data_direction;
1980 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1983 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
1984 /* clear task management bits */
1985 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
1987 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
1988 &lpfc_cmd->fcp_cmnd->fcp_lun);
1990 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
1992 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
1994 case HEAD_OF_QUEUE_TAG:
1995 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
1997 case ORDERED_QUEUE_TAG:
1998 fcp_cmnd->fcpCntl1 = ORDERED_Q;
2001 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
2005 fcp_cmnd->fcpCntl1 = 0;
2008 * There are three possibilities here - use scatter-gather segment, use
2009 * the single mapping, or neither. Start the lpfc command prep by
2010 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2013 if (scsi_sg_count(scsi_cmnd)) {
2014 if (datadir == DMA_TO_DEVICE) {
2015 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2016 iocb_cmd->un.fcpi.fcpi_parm = 0;
2017 iocb_cmd->ulpPU = 0;
2018 fcp_cmnd->fcpCntl3 = WRITE_DATA;
2019 phba->fc4OutputRequests++;
2021 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
2022 iocb_cmd->ulpPU = PARM_READ_CHECK;
2023 fcp_cmnd->fcpCntl3 = READ_DATA;
2024 phba->fc4InputRequests++;
2027 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
2028 iocb_cmd->un.fcpi.fcpi_parm = 0;
2029 iocb_cmd->ulpPU = 0;
2030 fcp_cmnd->fcpCntl3 = 0;
2031 phba->fc4ControlRequests++;
2033 if (phba->sli_rev == 3 &&
2034 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2035 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
2037 * Finish initializing those IOCB fields that are independent
2038 * of the scsi_cmnd request_buffer
2040 piocbq->iocb.ulpContext = pnode->nlp_rpi;
2041 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2042 piocbq->iocb.ulpFCP2Rcvy = 1;
2044 piocbq->iocb.ulpFCP2Rcvy = 0;
2046 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
2047 piocbq->context1 = lpfc_cmd;
2048 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2049 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2050 piocbq->vport = vport;
2054 * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit
2055 * @vport: The virtual port for which this call is being executed.
2056 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2057 * @lun: Logical unit number.
2058 * @task_mgmt_cmd: SCSI task management command.
2060 * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
2067 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2068 struct lpfc_scsi_buf *lpfc_cmd,
2070 uint8_t task_mgmt_cmd)
2072 struct lpfc_iocbq *piocbq;
2074 struct fcp_cmnd *fcp_cmnd;
2075 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2076 struct lpfc_nodelist *ndlp = rdata->pnode;
2078 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2079 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
2082 piocbq = &(lpfc_cmd->cur_iocbq);
2083 piocbq->vport = vport;
2085 piocb = &piocbq->iocb;
2087 fcp_cmnd = lpfc_cmd->fcp_cmnd;
2088 /* Clear out any old data in the FCP command area */
2089 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2090 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
2091 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
2092 if (vport->phba->sli_rev == 3 &&
2093 !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2094 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2095 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2096 piocb->ulpContext = ndlp->nlp_rpi;
2097 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2098 piocb->ulpFCP2Rcvy = 1;
2100 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
2102 /* ulpTimeout is only one byte */
2103 if (lpfc_cmd->timeout > 0xff) {
2105 * Do not timeout the command at the firmware level.
2106 * The driver will provide the timeout mechanism.
2108 piocb->ulpTimeout = 0;
2110 piocb->ulpTimeout = lpfc_cmd->timeout;
2117 * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2118 * @phba: The Hba for which this call is being executed.
2119 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2120 * @rspiocbq: Pointer to lpfc_iocbq data structure.
2122 * This routine is IOCB completion routine for device reset and target reset
2123 * routine. This routine release scsi buffer associated with lpfc_cmd.
2126 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2127 struct lpfc_iocbq *cmdiocbq,
2128 struct lpfc_iocbq *rspiocbq)
2130 struct lpfc_scsi_buf *lpfc_cmd =
2131 (struct lpfc_scsi_buf *) cmdiocbq->context1;
2133 lpfc_release_scsi_buf(phba, lpfc_cmd);
2138 * lpfc_scsi_tgt_reset - Target reset handler
2139 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
2140 * @vport: The virtual port for which this call is being executed.
2141 * @tgt_id: Target ID.
2143 * @rdata: Pointer to lpfc_rport_data.
2145 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
2152 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2153 unsigned tgt_id, unsigned int lun,
2154 struct lpfc_rport_data *rdata)
2156 struct lpfc_hba *phba = vport->phba;
2157 struct lpfc_iocbq *iocbq;
2158 struct lpfc_iocbq *iocbqrsp;
2162 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
2165 lpfc_cmd->rdata = rdata;
2166 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2171 iocbq = &lpfc_cmd->cur_iocbq;
2172 iocbqrsp = lpfc_sli_get_iocbq(phba);
2177 /* Issue Target Reset to TGT <num> */
2178 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2179 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2180 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2181 status = lpfc_sli_issue_iocb_wait(phba,
2182 &phba->sli.ring[phba->sli.fcp_ring],
2183 iocbq, iocbqrsp, lpfc_cmd->timeout);
2184 if (status != IOCB_SUCCESS) {
2185 if (status == IOCB_TIMEDOUT) {
2186 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2187 ret = TIMEOUT_ERROR;
2190 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2193 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
2194 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
2195 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2196 (lpfc_cmd->result & IOERR_DRVR_MASK))
2197 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2200 lpfc_sli_release_iocbq(phba, iocbqrsp);
2205 * lpfc_info - Info entry point of scsi_host_template data structure
2206 * @host: The scsi host for which this call is being executed.
2208 * This routine provides module information about hba.
2211 * Pointer to char - Success.
2214 lpfc_info(struct Scsi_Host *host)
2216 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
2217 struct lpfc_hba *phba = vport->phba;
2219 static char lpfcinfobuf[384];
2221 memset(lpfcinfobuf,0,384);
2222 if (phba && phba->pcidev){
2223 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
2224 len = strlen(lpfcinfobuf);
2225 snprintf(lpfcinfobuf + len,
2227 " on PCI bus %02x device %02x irq %d",
2228 phba->pcidev->bus->number,
2229 phba->pcidev->devfn,
2231 len = strlen(lpfcinfobuf);
2232 if (phba->Port[0]) {
2233 snprintf(lpfcinfobuf + len,
2243 * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
2244 * @phba: The Hba for which this call is being executed.
2246 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
2247 * The default value of cfg_poll_tmo is 10 milliseconds.
2249 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
2251 unsigned long poll_tmo_expires =
2252 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
2254 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
2255 mod_timer(&phba->fcp_poll_timer,
2260 * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
2261 * @phba: The Hba for which this call is being executed.
2263 * This routine starts the fcp_poll_timer of @phba.
2265 void lpfc_poll_start_timer(struct lpfc_hba * phba)
2267 lpfc_poll_rearm_timer(phba);
2271 * lpfc_poll_timeout - Restart polling timer
2272 * @ptr: Map to lpfc_hba data structure pointer.
2274 * This routine restarts fcp_poll timer, when FCP ring polling is enable
2275 * and FCP Ring interrupt is disable.
2278 void lpfc_poll_timeout(unsigned long ptr)
2280 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2282 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2283 lpfc_sli_poll_fcp_ring (phba);
2284 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2285 lpfc_poll_rearm_timer(phba);
2290 * lpfc_queuecommand - scsi_host_template queuecommand entry point
2291 * @cmnd: Pointer to scsi_cmnd data structure.
2292 * @done: Pointer to done routine.
2294 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2295 * This routine prepares an IOCB from scsi command and provides to firmware.
2296 * The @done callback is invoked after driver finished processing the command.
2300 * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2303 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2305 struct Scsi_Host *shost = cmnd->device->host;
2306 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2307 struct lpfc_hba *phba = vport->phba;
2308 struct lpfc_sli *psli = &phba->sli;
2309 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2310 struct lpfc_nodelist *ndlp = rdata->pnode;
2311 struct lpfc_scsi_buf *lpfc_cmd;
2312 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2315 err = fc_remote_port_chkready(rport);
2318 goto out_fail_command;
2321 if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2322 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2324 printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
2325 "str=%s without registering for BlockGuard - "
2326 "Rejecting command\n",
2327 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2328 dif_op_str[scsi_get_prot_op(cmnd)]);
2329 goto out_fail_command;
2333 * Catch race where our node has transitioned, but the
2334 * transport is still transitioning.
2336 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2337 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2338 goto out_fail_command;
2340 if (vport->cfg_max_scsicmpl_time &&
2341 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
2344 lpfc_cmd = lpfc_get_scsi_buf(phba);
2345 if (lpfc_cmd == NULL) {
2346 lpfc_rampdown_queue_depth(phba);
2348 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2349 "0707 driver's buffer pool is empty, "
2355 * Store the midlayer's command structure for the completion phase
2356 * and complete the command initialization.
2358 lpfc_cmd->pCmd = cmnd;
2359 lpfc_cmd->rdata = rdata;
2360 lpfc_cmd->timeout = 0;
2361 lpfc_cmd->start_time = jiffies;
2362 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
2363 cmnd->scsi_done = done;
2365 if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2366 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2367 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2369 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2370 dif_op_str[scsi_get_prot_op(cmnd)]);
2371 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2372 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2373 "%02x %02x %02x %02x %02x \n",
2374 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2375 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2376 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2378 if (cmnd->cmnd[0] == READ_10)
2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2380 "9035 BLKGRD: READ @ sector %llu, "
2382 (unsigned long long)scsi_get_lba(cmnd),
2383 cmnd->request->nr_sectors);
2384 else if (cmnd->cmnd[0] == WRITE_10)
2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2386 "9036 BLKGRD: WRITE @ sector %llu, "
2387 "count %lu cmd=%p\n",
2388 (unsigned long long)scsi_get_lba(cmnd),
2389 cmnd->request->nr_sectors,
2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2394 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2395 "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
2397 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2398 dif_op_str[scsi_get_prot_op(cmnd)]);
2399 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2400 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2401 "%02x %02x %02x %02x %02x \n",
2402 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2403 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2404 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2406 if (cmnd->cmnd[0] == READ_10)
2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2408 "9040 dbg: READ @ sector %llu, "
2410 (unsigned long long)scsi_get_lba(cmnd),
2411 cmnd->request->nr_sectors);
2412 else if (cmnd->cmnd[0] == WRITE_10)
2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2414 "9041 dbg: WRITE @ sector %llu, "
2415 "count %lu cmd=%p\n",
2416 (unsigned long long)scsi_get_lba(cmnd),
2417 cmnd->request->nr_sectors, cmnd);
2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2420 "9042 dbg: parser not implemented\n");
2421 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2425 goto out_host_busy_free_buf;
2427 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2429 atomic_inc(&ndlp->cmd_pending);
2430 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
2431 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2433 atomic_dec(&ndlp->cmd_pending);
2434 goto out_host_busy_free_buf;
2436 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2437 lpfc_sli_poll_fcp_ring(phba);
2438 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2439 lpfc_poll_rearm_timer(phba);
2444 out_host_busy_free_buf:
2445 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
2446 lpfc_release_scsi_buf(phba, lpfc_cmd);
2448 return SCSI_MLQUEUE_HOST_BUSY;
2456 * lpfc_block_error_handler - Routine to block error handler
2457 * @cmnd: Pointer to scsi_cmnd data structure.
2459 * This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
2462 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
2464 struct Scsi_Host *shost = cmnd->device->host;
2465 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2467 spin_lock_irq(shost->host_lock);
2468 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
2469 spin_unlock_irq(shost->host_lock);
2471 spin_lock_irq(shost->host_lock);
2473 spin_unlock_irq(shost->host_lock);
2478 * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
2479 * @cmnd: Pointer to scsi_cmnd data structure.
2481 * This routine aborts @cmnd pending in base driver.
2488 lpfc_abort_handler(struct scsi_cmnd *cmnd)
2490 struct Scsi_Host *shost = cmnd->device->host;
2491 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2492 struct lpfc_hba *phba = vport->phba;
2493 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
2494 struct lpfc_iocbq *iocb;
2495 struct lpfc_iocbq *abtsiocb;
2496 struct lpfc_scsi_buf *lpfc_cmd;
2499 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
2501 lpfc_block_error_handler(cmnd);
2502 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
2506 * If pCmd field of the corresponding lpfc_scsi_buf structure
2507 * points to a different SCSI command, then the driver has
2508 * already completed this command, but the midlayer did not
2509 * see the completion before the eh fired. Just return
2512 iocb = &lpfc_cmd->cur_iocbq;
2513 if (lpfc_cmd->pCmd != cmnd)
2516 BUG_ON(iocb->context1 != lpfc_cmd);
2518 abtsiocb = lpfc_sli_get_iocbq(phba);
2519 if (abtsiocb == NULL) {
2525 * The scsi command can not be in txq and it is in flight because the
2526 * pCmd is still pointig at the SCSI command we have to abort. There
2527 * is no need to search the txcmplq. Just send an abort to the FW.
2531 icmd = &abtsiocb->iocb;
2532 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2533 icmd->un.acxri.abortContextTag = cmd->ulpContext;
2534 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2537 icmd->ulpClass = cmd->ulpClass;
2538 if (lpfc_is_link_up(phba))
2539 icmd->ulpCommand = CMD_ABORT_XRI_CN;
2541 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
2543 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2544 abtsiocb->vport = vport;
2545 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
2546 lpfc_sli_release_iocbq(phba, abtsiocb);
2551 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2552 lpfc_sli_poll_fcp_ring (phba);
2554 lpfc_cmd->waitq = &waitq;
2555 /* Wait for abort to complete */
2556 wait_event_timeout(waitq,
2557 (lpfc_cmd->pCmd != cmnd),
2558 (2*vport->cfg_devloss_tmo*HZ));
2560 spin_lock_irq(shost->host_lock);
2561 lpfc_cmd->waitq = NULL;
2562 spin_unlock_irq(shost->host_lock);
2564 if (lpfc_cmd->pCmd == cmnd) {
2566 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2567 "0748 abort handler timed out waiting "
2568 "for abort to complete: ret %#x, ID %d, "
2569 "LUN %d, snum %#lx\n",
2570 ret, cmnd->device->id, cmnd->device->lun,
2571 cmnd->serial_number);
2575 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2576 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
2577 "LUN %d snum %#lx\n", ret, cmnd->device->id,
2578 cmnd->device->lun, cmnd->serial_number);
2583 * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
2584 * @cmnd: Pointer to scsi_cmnd data structure.
2586 * This routine does a device reset by sending a TARGET_RESET task management
2594 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
2596 struct Scsi_Host *shost = cmnd->device->host;
2597 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2598 struct lpfc_hba *phba = vport->phba;
2599 struct lpfc_scsi_buf *lpfc_cmd;
2600 struct lpfc_iocbq *iocbq, *iocbqrsp;
2601 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2602 struct lpfc_nodelist *pnode = rdata->pnode;
2603 unsigned long later;
2607 struct lpfc_scsi_event_header scsi_event;
2609 lpfc_block_error_handler(cmnd);
2611 * If target is not in a MAPPED state, delay the reset until
2612 * target is rediscovered or devloss timeout expires.
2614 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2615 while (time_after(later, jiffies)) {
2616 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2618 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
2620 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
2621 rdata = cmnd->device->hostdata;
2624 pnode = rdata->pnode;
2627 scsi_event.event_type = FC_REG_SCSI_EVENT;
2628 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
2630 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
2631 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
2633 fc_host_post_vendor_event(shost,
2634 fc_get_event_number(),
2636 (char *)&scsi_event,
2639 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
2640 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2641 "0721 LUN Reset rport "
2642 "failure: msec x%x rdata x%p\n",
2643 jiffies_to_msecs(jiffies - later), rdata);
2646 lpfc_cmd = lpfc_get_scsi_buf(phba);
2647 if (lpfc_cmd == NULL)
2649 lpfc_cmd->timeout = 60;
2650 lpfc_cmd->rdata = rdata;
2652 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
2656 lpfc_release_scsi_buf(phba, lpfc_cmd);
2659 iocbq = &lpfc_cmd->cur_iocbq;
2661 /* get a buffer for this IOCB command response */
2662 iocbqrsp = lpfc_sli_get_iocbq(phba);
2663 if (iocbqrsp == NULL) {
2664 lpfc_release_scsi_buf(phba, lpfc_cmd);
2667 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2668 "0703 Issue target reset to TGT %d LUN %d "
2669 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
2670 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
2671 status = lpfc_sli_issue_iocb_wait(phba,
2672 &phba->sli.ring[phba->sli.fcp_ring],
2673 iocbq, iocbqrsp, lpfc_cmd->timeout);
2674 if (status == IOCB_TIMEDOUT) {
2675 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2676 ret = TIMEOUT_ERROR;
2678 if (status != IOCB_SUCCESS)
2680 lpfc_release_scsi_buf(phba, lpfc_cmd);
2682 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2683 "0713 SCSI layer issued device reset (%d, %d) "
2684 "return x%x status x%x result x%x\n",
2685 cmnd->device->id, cmnd->device->lun, ret,
2686 iocbqrsp->iocb.ulpStatus,
2687 iocbqrsp->iocb.un.ulpWord[4]);
2688 lpfc_sli_release_iocbq(phba, iocbqrsp);
2689 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
2692 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2693 cmnd->device->id, cmnd->device->lun,
2695 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2696 while (time_after(later, jiffies) && cnt) {
2697 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
2698 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
2699 cmnd->device->lun, LPFC_CTX_TGT);
2702 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2703 "0719 device reset I/O flush failure: "
2711 * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
2712 * @cmnd: Pointer to scsi_cmnd data structure.
2714 * This routine does target reset to all target on @cmnd->device->host.
2721 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
2723 struct Scsi_Host *shost = cmnd->device->host;
2724 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2725 struct lpfc_hba *phba = vport->phba;
2726 struct lpfc_nodelist *ndlp = NULL;
2728 int ret = SUCCESS, status = SUCCESS, i;
2730 struct lpfc_scsi_buf * lpfc_cmd;
2731 unsigned long later;
2732 struct lpfc_scsi_event_header scsi_event;
2734 scsi_event.event_type = FC_REG_SCSI_EVENT;
2735 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
2737 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
2738 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
2740 fc_host_post_vendor_event(shost,
2741 fc_get_event_number(),
2743 (char *)&scsi_event,
2746 lpfc_block_error_handler(cmnd);
2748 * Since the driver manages a single bus device, reset all
2749 * targets known to the driver. Should any target reset
2750 * fail, this routine returns failure to the midlayer.
2752 for (i = 0; i < LPFC_MAX_TARGET; i++) {
2753 /* Search for mapped node by target ID */
2755 spin_lock_irq(shost->host_lock);
2756 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2757 if (!NLP_CHK_NODE_ACT(ndlp))
2759 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
2760 ndlp->nlp_sid == i &&
2766 spin_unlock_irq(shost->host_lock);
2769 lpfc_cmd = lpfc_get_scsi_buf(phba);
2771 lpfc_cmd->timeout = 60;
2772 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
2774 ndlp->rport->dd_data);
2775 if (status != TIMEOUT_ERROR)
2776 lpfc_release_scsi_buf(phba, lpfc_cmd);
2778 if (!lpfc_cmd || status != SUCCESS) {
2779 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2780 "0700 Bus Reset on target %d failed\n",
2786 * All outstanding txcmplq I/Os should have been aborted by
2787 * the targets. Unfortunately, some targets do not abide by
2788 * this forcing the driver to double check.
2790 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
2792 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2793 0, 0, LPFC_CTX_HOST);
2794 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2795 while (time_after(later, jiffies) && cnt) {
2796 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
2797 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
2800 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2801 "0715 Bus Reset I/O flush failure: "
2802 "cnt x%x left x%x\n", cnt, i);
2805 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2806 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
2811 * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
2812 * @sdev: Pointer to scsi_device.
2814 * This routine populates the cmds_per_lun count + 2 scsi_bufs into this host's
2815 * globally available list of scsi buffers. This routine also makes sure scsi
2816 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
2817 * of scsi buffer exists for the lifetime of the driver.
2824 lpfc_slave_alloc(struct scsi_device *sdev)
2826 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2827 struct lpfc_hba *phba = vport->phba;
2828 struct lpfc_scsi_buf *scsi_buf = NULL;
2829 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2830 uint32_t total = 0, i;
2831 uint32_t num_to_alloc = 0;
2832 unsigned long flags;
2834 if (!rport || fc_remote_port_chkready(rport))
2837 sdev->hostdata = rport->dd_data;
2840 * Populate the cmds_per_lun count scsi_bufs into this host's globally
2841 * available list of scsi buffers. Don't allocate more than the
2842 * HBA limit conveyed to the midlayer via the host structure. The
2843 * formula accounts for the lun_queue_depth + error handlers + 1
2844 * extra. This list of scsi bufs exists for the lifetime of the driver.
2846 total = phba->total_scsi_bufs;
2847 num_to_alloc = vport->cfg_lun_queue_depth + 2;
2849 /* Allow some exchanges to be available always to complete discovery */
2850 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2851 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2852 "0704 At limitation of %d preallocated "
2853 "command buffers\n", total);
2855 /* Allow some exchanges to be available always to complete discovery */
2856 } else if (total + num_to_alloc >
2857 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2858 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2859 "0705 Allocation request of %d "
2860 "command buffers will exceed max of %d. "
2861 "Reducing allocation request to %d.\n",
2862 num_to_alloc, phba->cfg_hba_queue_depth,
2863 (phba->cfg_hba_queue_depth - total));
2864 num_to_alloc = phba->cfg_hba_queue_depth - total;
2867 for (i = 0; i < num_to_alloc; i++) {
2868 scsi_buf = lpfc_new_scsi_buf(vport);
2870 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2871 "0706 Failed to allocate "
2872 "command buffer\n");
2876 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2877 phba->total_scsi_bufs++;
2878 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2879 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2885 * lpfc_slave_configure - scsi_host_template slave_configure entry point
2886 * @sdev: Pointer to scsi_device.
2888 * This routine configures following items
2889 * - Tag command queuing support for @sdev if supported.
2890 * - Dev loss time out value of fc_rport.
2891 * - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
2897 lpfc_slave_configure(struct scsi_device *sdev)
2899 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2900 struct lpfc_hba *phba = vport->phba;
2901 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
2903 if (sdev->tagged_supported)
2904 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
2906 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
2909 * Initialize the fc transport attributes for the target
2910 * containing this scsi device. Also note that the driver's
2911 * target pointer is stored in the starget_data for the
2912 * driver's sysfs entry point functions.
2914 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
2916 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2917 lpfc_sli_poll_fcp_ring(phba);
2918 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2919 lpfc_poll_rearm_timer(phba);
2926 * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
2927 * @sdev: Pointer to scsi_device.
2929 * This routine sets @sdev hostatdata filed to null.
2932 lpfc_slave_destroy(struct scsi_device *sdev)
2934 sdev->hostdata = NULL;
2939 struct scsi_host_template lpfc_template = {
2940 .module = THIS_MODULE,
2941 .name = LPFC_DRIVER_NAME,
2943 .queuecommand = lpfc_queuecommand,
2944 .eh_abort_handler = lpfc_abort_handler,
2945 .eh_device_reset_handler= lpfc_device_reset_handler,
2946 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2947 .slave_alloc = lpfc_slave_alloc,
2948 .slave_configure = lpfc_slave_configure,
2949 .slave_destroy = lpfc_slave_destroy,
2950 .scan_finished = lpfc_scan_finished,
2952 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
2953 .cmd_per_lun = LPFC_CMD_PER_LUN,
2954 .use_clustering = ENABLE_CLUSTERING,
2955 .shost_attrs = lpfc_hba_attrs,
2956 .max_sectors = 0xFFFF,
2959 struct scsi_host_template lpfc_vport_template = {
2960 .module = THIS_MODULE,
2961 .name = LPFC_DRIVER_NAME,
2963 .queuecommand = lpfc_queuecommand,
2964 .eh_abort_handler = lpfc_abort_handler,
2965 .eh_device_reset_handler= lpfc_device_reset_handler,
2966 .eh_bus_reset_handler = lpfc_bus_reset_handler,
2967 .slave_alloc = lpfc_slave_alloc,
2968 .slave_configure = lpfc_slave_configure,
2969 .slave_destroy = lpfc_slave_destroy,
2970 .scan_finished = lpfc_scan_finished,
2972 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
2973 .cmd_per_lun = LPFC_CMD_PER_LUN,
2974 .use_clustering = ENABLE_CLUSTERING,
2975 .shost_attrs = lpfc_vport_attrs,
2976 .max_sectors = 0xFFFF,