1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
32 #include "lpfc_version.h"
36 #include "lpfc_disc.h"
37 #include "lpfc_scsi.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_crtn.h"
41 #include "lpfc_vport.h"
43 #define LPFC_RESET_WAIT 2
44 #define LPFC_ABORT_WAIT 2
47 * lpfc_update_stats: Update statistical data for the command completion.
48 * @phba: Pointer to HBA object.
49 * @lpfc_cmd: lpfc scsi command object pointer.
51 * This function is called when there is a command completion and this
52 * function updates the statistical data for the command completion.
55 lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
57 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
58 struct lpfc_nodelist *pnode = rdata->pnode;
59 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
61 struct Scsi_Host *shost = cmd->device->host;
62 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
63 unsigned long latency;
69 spin_lock_irqsave(shost->host_lock, flags);
70 if (!vport->stat_data_enabled ||
71 vport->stat_data_blocked ||
73 (phba->bucket_type == LPFC_NO_BUCKET)) {
74 spin_unlock_irqrestore(shost->host_lock, flags);
77 latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);
79 if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
80 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
82 if (i >= LPFC_MAX_BUCKET_COUNT)
83 i = LPFC_MAX_BUCKET_COUNT;
85 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
86 if (latency <= (phba->bucket_base +
87 ((1<<i)*phba->bucket_step)))
91 pnode->lat_data[i].cmd_count++;
92 spin_unlock_irqrestore(shost->host_lock, flags);
97 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
99 * @phba: Pointer to HBA context object.
100 * @vport: Pointer to vport object.
101 * @ndlp: Pointer to FC node associated with the target.
102 * @lun: Lun number of the scsi device.
103 * @old_val: Old value of the queue depth.
104 * @new_val: New value of the queue depth.
106 * This function sends an event to the mgmt application indicating
107 * there is a change in the scsi device queue depth.
110 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
111 struct lpfc_vport *vport,
112 struct lpfc_nodelist *ndlp,
117 struct lpfc_fast_path_event *fast_path_evt;
120 fast_path_evt = lpfc_alloc_fast_evt(phba);
124 fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
126 fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
127 LPFC_EVENT_VARQUEDEPTH;
129 /* Report all luns with change in queue depth */
130 fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
131 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
132 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
133 &ndlp->nlp_portname, sizeof(struct lpfc_name));
134 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
135 &ndlp->nlp_nodename, sizeof(struct lpfc_name));
138 fast_path_evt->un.queue_depth_evt.oldval = old_val;
139 fast_path_evt->un.queue_depth_evt.newval = new_val;
140 fast_path_evt->vport = vport;
142 fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
143 spin_lock_irqsave(&phba->hbalock, flags);
144 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
145 spin_unlock_irqrestore(&phba->hbalock, flags);
146 lpfc_worker_wake_up(phba);
152 * This function is called with no lock held when there is a resource
153 * error in driver or in firmware.
156 lpfc_adjust_queue_depth(struct lpfc_hba *phba)
161 spin_lock_irqsave(&phba->hbalock, flags);
162 atomic_inc(&phba->num_rsrc_err);
163 phba->last_rsrc_error_time = jiffies;
165 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
166 spin_unlock_irqrestore(&phba->hbalock, flags);
170 phba->last_ramp_down_time = jiffies;
172 spin_unlock_irqrestore(&phba->hbalock, flags);
174 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
175 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
177 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
178 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
181 lpfc_worker_wake_up(phba);
186 * This function is called with no lock held when there is a successful
187 * SCSI command completion.
190 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
191 struct scsi_device *sdev)
194 struct lpfc_hba *phba = vport->phba;
196 atomic_inc(&phba->num_cmd_success);
198 if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
200 spin_lock_irqsave(&phba->hbalock, flags);
201 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
202 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
203 spin_unlock_irqrestore(&phba->hbalock, flags);
206 phba->last_ramp_up_time = jiffies;
207 spin_unlock_irqrestore(&phba->hbalock, flags);
209 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
210 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
212 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
213 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
216 lpfc_worker_wake_up(phba);
221 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
223 struct lpfc_vport **vports;
224 struct Scsi_Host *shost;
225 struct scsi_device *sdev;
226 unsigned long new_queue_depth, old_queue_depth;
227 unsigned long num_rsrc_err, num_cmd_success;
229 struct lpfc_rport_data *rdata;
231 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
232 num_cmd_success = atomic_read(&phba->num_cmd_success);
234 vports = lpfc_create_vport_work_array(phba);
236 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
237 shost = lpfc_shost_from_vport(vports[i]);
238 shost_for_each_device(sdev, shost) {
240 sdev->queue_depth * num_rsrc_err /
241 (num_rsrc_err + num_cmd_success);
242 if (!new_queue_depth)
243 new_queue_depth = sdev->queue_depth - 1;
245 new_queue_depth = sdev->queue_depth -
247 old_queue_depth = sdev->queue_depth;
248 if (sdev->ordered_tags)
249 scsi_adjust_queue_depth(sdev,
253 scsi_adjust_queue_depth(sdev,
256 rdata = sdev->hostdata;
258 lpfc_send_sdev_queuedepth_change_event(
261 sdev->lun, old_queue_depth,
265 lpfc_destroy_vport_work_array(phba, vports);
266 atomic_set(&phba->num_rsrc_err, 0);
267 atomic_set(&phba->num_cmd_success, 0);
271 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
273 struct lpfc_vport **vports;
274 struct Scsi_Host *shost;
275 struct scsi_device *sdev;
277 struct lpfc_rport_data *rdata;
279 vports = lpfc_create_vport_work_array(phba);
281 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
282 shost = lpfc_shost_from_vport(vports[i]);
283 shost_for_each_device(sdev, shost) {
284 if (vports[i]->cfg_lun_queue_depth <=
287 if (sdev->ordered_tags)
288 scsi_adjust_queue_depth(sdev,
290 sdev->queue_depth+1);
292 scsi_adjust_queue_depth(sdev,
294 sdev->queue_depth+1);
295 rdata = sdev->hostdata;
297 lpfc_send_sdev_queuedepth_change_event(
301 sdev->queue_depth - 1,
305 lpfc_destroy_vport_work_array(phba, vports);
306 atomic_set(&phba->num_rsrc_err, 0);
307 atomic_set(&phba->num_cmd_success, 0);
311 * lpfc_scsi_dev_block: set all scsi hosts to block state.
312 * @phba: Pointer to HBA context object.
314 * This function walks vport list and set each SCSI host to block state
315 * by invoking fc_remote_port_delete() routine. This function is invoked
316 * with EEH when device's PCI slot has been permanently disabled.
319 lpfc_scsi_dev_block(struct lpfc_hba *phba)
321 struct lpfc_vport **vports;
322 struct Scsi_Host *shost;
323 struct scsi_device *sdev;
324 struct fc_rport *rport;
327 vports = lpfc_create_vport_work_array(phba);
329 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
330 shost = lpfc_shost_from_vport(vports[i]);
331 shost_for_each_device(sdev, shost) {
332 rport = starget_to_rport(scsi_target(sdev));
333 fc_remote_port_delete(rport);
336 lpfc_destroy_vport_work_array(phba, vports);
340 * This routine allocates a scsi buffer, which contains all the necessary
341 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
342 * contains information to build the IOCB. The DMAable region contains
343 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
344 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
345 * and the BPL BDE is setup in the IOCB.
347 static struct lpfc_scsi_buf *
348 lpfc_new_scsi_buf(struct lpfc_vport *vport)
350 struct lpfc_hba *phba = vport->phba;
351 struct lpfc_scsi_buf *psb;
352 struct ulp_bde64 *bpl;
354 dma_addr_t pdma_phys_fcp_cmd;
355 dma_addr_t pdma_phys_fcp_rsp;
356 dma_addr_t pdma_phys_bpl;
359 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
364 * Get memory from the pci pool to map the virt space to pci bus space
365 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
366 * struct fcp_rsp and the number of bde's necessary to support the
369 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
376 /* Initialize virtual ptrs to dma_buf region. */
377 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
379 /* Allocate iotag for psb->cur_iocbq. */
380 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
382 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
383 psb->data, psb->dma_handle);
387 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
389 psb->fcp_cmnd = psb->data;
390 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
391 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
392 sizeof(struct fcp_rsp);
394 /* Initialize local short-hand pointers. */
396 pdma_phys_fcp_cmd = psb->dma_handle;
397 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
398 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
399 sizeof(struct fcp_rsp);
402 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
403 * list bdes. Initialize the first two and leave the rest for
406 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
407 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
408 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
409 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
410 bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
412 /* Setup the physical region for the FCP RSP */
413 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
414 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
415 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
416 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
417 bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
420 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
421 * initialize it with all known data now.
423 iocb = &psb->cur_iocbq.iocb;
424 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
425 if (phba->sli_rev == 3) {
426 /* fill in immediate fcp command BDE */
427 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
428 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
429 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
431 iocb->un.fcpi64.bdl.addrHigh = 0;
432 iocb->ulpBdeCount = 0;
434 /* fill in responce BDE */
435 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
436 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
437 sizeof(struct fcp_rsp);
438 iocb->unsli3.fcp_ext.rbde.addrLow =
439 putPaddrLow(pdma_phys_fcp_rsp);
440 iocb->unsli3.fcp_ext.rbde.addrHigh =
441 putPaddrHigh(pdma_phys_fcp_rsp);
443 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
444 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
445 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
446 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
447 iocb->ulpBdeCount = 1;
450 iocb->ulpClass = CLASS3;
455 static struct lpfc_scsi_buf*
456 lpfc_get_scsi_buf(struct lpfc_hba * phba)
458 struct lpfc_scsi_buf * lpfc_cmd = NULL;
459 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
460 unsigned long iflag = 0;
462 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
463 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
465 lpfc_cmd->seg_cnt = 0;
466 lpfc_cmd->nonsg_phys = 0;
468 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
473 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
475 unsigned long iflag = 0;
477 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
479 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
480 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
484 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
486 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
487 struct scatterlist *sgel = NULL;
488 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
489 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
490 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
491 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
493 uint32_t num_bde = 0;
494 int nseg, datadir = scsi_cmnd->sc_data_direction;
497 * There are three possibilities here - use scatter-gather segment, use
498 * the single mapping, or neither. Start the lpfc command prep by
499 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
503 if (scsi_sg_count(scsi_cmnd)) {
505 * The driver stores the segment count returned from pci_map_sg
506 * because this a count of dma-mappings used to map the use_sg
507 * pages. They are not guaranteed to be the same for those
508 * architectures that implement an IOMMU.
511 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
512 scsi_sg_count(scsi_cmnd), datadir);
516 lpfc_cmd->seg_cnt = nseg;
517 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
518 printk(KERN_ERR "%s: Too many sg segments from "
519 "dma_map_sg. Config %d, seg_cnt %d",
520 __func__, phba->cfg_sg_seg_cnt,
522 scsi_dma_unmap(scsi_cmnd);
527 * The driver established a maximum scatter-gather segment count
528 * during probe that limits the number of sg elements in any
529 * single scsi command. Just run through the seg_cnt and format
531 * When using SLI-3 the driver will try to fit all the BDEs into
532 * the IOCB. If it can't then the BDEs get added to a BPL as it
533 * does for SLI-2 mode.
535 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
536 physaddr = sg_dma_address(sgel);
537 if (phba->sli_rev == 3 &&
538 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
539 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
540 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
541 data_bde->addrLow = putPaddrLow(physaddr);
542 data_bde->addrHigh = putPaddrHigh(physaddr);
545 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
546 bpl->tus.f.bdeSize = sg_dma_len(sgel);
547 bpl->tus.w = le32_to_cpu(bpl->tus.w);
549 le32_to_cpu(putPaddrLow(physaddr));
551 le32_to_cpu(putPaddrHigh(physaddr));
558 * Finish initializing those IOCB fields that are dependent on the
559 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
560 * explicitly reinitialized and for SLI-3 the extended bde count is
561 * explicitly reinitialized since all iocb memory resources are reused.
563 if (phba->sli_rev == 3) {
564 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
566 * The extended IOCB format can only fit 3 BDE or a BPL.
567 * This I/O has more than 3 BDE so the 1st data bde will
568 * be a BPL that is filled in here.
570 physaddr = lpfc_cmd->dma_handle;
571 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
572 data_bde->tus.f.bdeSize = (num_bde *
573 sizeof(struct ulp_bde64));
574 physaddr += (sizeof(struct fcp_cmnd) +
575 sizeof(struct fcp_rsp) +
576 (2 * sizeof(struct ulp_bde64)));
577 data_bde->addrHigh = putPaddrHigh(physaddr);
578 data_bde->addrLow = putPaddrLow(physaddr);
579 /* ebde count includes the responce bde and data bpl */
580 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
582 /* ebde count includes the responce bde and data bdes */
583 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
586 iocb_cmd->un.fcpi64.bdl.bdeSize =
587 ((num_bde + 2) * sizeof(struct ulp_bde64));
589 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
594 * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
595 * @phba: Pointer to hba context object.
596 * @vport: Pointer to vport object.
597 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
598 * @rsp_iocb: Pointer to response iocb object which reported error.
600 * This function posts an event when there is a SCSI command reporting
601 * error from the scsi device.
604 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
605 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
606 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
607 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
608 uint32_t resp_info = fcprsp->rspStatus2;
609 uint32_t scsi_status = fcprsp->rspStatus3;
610 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
611 struct lpfc_fast_path_event *fast_path_evt = NULL;
612 struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
615 /* If there is queuefull or busy condition send a scsi event */
616 if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
617 (cmnd->result == SAM_STAT_BUSY)) {
618 fast_path_evt = lpfc_alloc_fast_evt(phba);
621 fast_path_evt->un.scsi_evt.event_type =
623 fast_path_evt->un.scsi_evt.subcategory =
624 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
625 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
626 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
627 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
628 &pnode->nlp_portname, sizeof(struct lpfc_name));
629 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
630 &pnode->nlp_nodename, sizeof(struct lpfc_name));
631 } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
632 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
633 fast_path_evt = lpfc_alloc_fast_evt(phba);
636 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
638 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
639 LPFC_EVENT_CHECK_COND;
640 fast_path_evt->un.check_cond_evt.scsi_event.lun =
642 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
643 &pnode->nlp_portname, sizeof(struct lpfc_name));
644 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
645 &pnode->nlp_nodename, sizeof(struct lpfc_name));
646 fast_path_evt->un.check_cond_evt.sense_key =
647 cmnd->sense_buffer[2] & 0xf;
648 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
649 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
650 } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
652 ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
653 ((scsi_status == SAM_STAT_GOOD) &&
654 !(resp_info & (RESID_UNDER | RESID_OVER))))) {
656 * If status is good or resid does not match with fcp_param and
657 * there is valid fcpi_parm, then there is a read_check error
659 fast_path_evt = lpfc_alloc_fast_evt(phba);
662 fast_path_evt->un.read_check_error.header.event_type =
664 fast_path_evt->un.read_check_error.header.subcategory =
665 LPFC_EVENT_FCPRDCHKERR;
666 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
667 &pnode->nlp_portname, sizeof(struct lpfc_name));
668 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
669 &pnode->nlp_nodename, sizeof(struct lpfc_name));
670 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
671 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
672 fast_path_evt->un.read_check_error.fcpiparam =
677 fast_path_evt->vport = vport;
678 spin_lock_irqsave(&phba->hbalock, flags);
679 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
680 spin_unlock_irqrestore(&phba->hbalock, flags);
681 lpfc_worker_wake_up(phba);
685 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
688 * There are only two special cases to consider. (1) the scsi command
689 * requested scatter-gather usage or (2) the scsi command allocated
690 * a request buffer, but did not request use_sg. There is a third
691 * case, but it does not require resource deallocation.
693 if (psb->seg_cnt > 0)
694 scsi_dma_unmap(psb->pCmd);
698 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
699 struct lpfc_iocbq *rsp_iocb)
701 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
702 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
703 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
704 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
705 uint32_t resp_info = fcprsp->rspStatus2;
706 uint32_t scsi_status = fcprsp->rspStatus3;
708 uint32_t host_status = DID_OK;
710 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
714 * If this is a task management command, there is no
715 * scsi packet associated with this lpfc_cmd. The driver
718 if (fcpcmd->fcpCntl2) {
723 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
724 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
725 if (snslen > SCSI_SENSE_BUFFERSIZE)
726 snslen = SCSI_SENSE_BUFFERSIZE;
728 if (resp_info & RSP_LEN_VALID)
729 rsplen = be32_to_cpu(fcprsp->rspRspLen);
730 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
732 lp = (uint32_t *)cmnd->sense_buffer;
734 if (!scsi_status && (resp_info & RESID_UNDER))
737 lpfc_printf_vlog(vport, KERN_WARNING, logit,
738 "0730 FCP command x%x failed: x%x SNS x%x x%x "
739 "Data: x%x x%x x%x x%x x%x\n",
740 cmnd->cmnd[0], scsi_status,
741 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
742 be32_to_cpu(fcprsp->rspResId),
743 be32_to_cpu(fcprsp->rspSnsLen),
744 be32_to_cpu(fcprsp->rspRspLen),
747 if (resp_info & RSP_LEN_VALID) {
748 rsplen = be32_to_cpu(fcprsp->rspRspLen);
749 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
750 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
751 host_status = DID_ERROR;
756 scsi_set_resid(cmnd, 0);
757 if (resp_info & RESID_UNDER) {
758 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
760 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
761 "0716 FCP Read Underrun, expected %d, "
762 "residual %d Data: x%x x%x x%x\n",
763 be32_to_cpu(fcpcmd->fcpDl),
764 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
768 * If there is an under run check if under run reported by
769 * storage array is same as the under run reported by HBA.
770 * If this is not same, there is a dropped frame.
772 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
774 (scsi_get_resid(cmnd) != fcpi_parm)) {
775 lpfc_printf_vlog(vport, KERN_WARNING,
776 LOG_FCP | LOG_FCP_ERROR,
777 "0735 FCP Read Check Error "
778 "and Underrun Data: x%x x%x x%x x%x\n",
779 be32_to_cpu(fcpcmd->fcpDl),
780 scsi_get_resid(cmnd), fcpi_parm,
782 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
783 host_status = DID_ERROR;
786 * The cmnd->underflow is the minimum number of bytes that must
787 * be transfered for this command. Provided a sense condition
788 * is not present, make sure the actual amount transferred is at
789 * least the underflow value or fail.
791 if (!(resp_info & SNS_LEN_VALID) &&
792 (scsi_status == SAM_STAT_GOOD) &&
793 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
794 < cmnd->underflow)) {
795 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
796 "0717 FCP command x%x residual "
797 "underrun converted to error "
798 "Data: x%x x%x x%x\n",
799 cmnd->cmnd[0], scsi_bufflen(cmnd),
800 scsi_get_resid(cmnd), cmnd->underflow);
801 host_status = DID_ERROR;
803 } else if (resp_info & RESID_OVER) {
804 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
805 "0720 FCP command x%x residual overrun error. "
806 "Data: x%x x%x \n", cmnd->cmnd[0],
807 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
808 host_status = DID_ERROR;
811 * Check SLI validation that all the transfer was actually done
812 * (fcpi_parm should be zero). Apply check only to reads.
814 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
815 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
816 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
817 "0734 FCP Read Check Error Data: "
819 be32_to_cpu(fcpcmd->fcpDl),
820 be32_to_cpu(fcprsp->rspResId),
821 fcpi_parm, cmnd->cmnd[0]);
822 host_status = DID_ERROR;
823 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
827 cmnd->result = ScsiResult(host_status, scsi_status);
828 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
832 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
833 struct lpfc_iocbq *pIocbOut)
835 struct lpfc_scsi_buf *lpfc_cmd =
836 (struct lpfc_scsi_buf *) pIocbIn->context1;
837 struct lpfc_vport *vport = pIocbIn->vport;
838 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
839 struct lpfc_nodelist *pnode = rdata->pnode;
840 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
842 struct scsi_device *sdev, *tmp_sdev;
845 struct lpfc_fast_path_event *fast_path_evt;
847 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
848 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
849 atomic_dec(&pnode->cmd_pending);
851 if (lpfc_cmd->status) {
852 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
853 (lpfc_cmd->result & IOERR_DRVR_MASK))
854 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
855 else if (lpfc_cmd->status >= IOSTAT_CNT)
856 lpfc_cmd->status = IOSTAT_DEFAULT;
858 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
859 "0729 FCP cmd x%x failed <%d/%d> "
860 "status: x%x result: x%x Data: x%x x%x\n",
862 cmd->device ? cmd->device->id : 0xffff,
863 cmd->device ? cmd->device->lun : 0xffff,
864 lpfc_cmd->status, lpfc_cmd->result,
865 pIocbOut->iocb.ulpContext,
866 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
868 switch (lpfc_cmd->status) {
869 case IOSTAT_FCP_RSP_ERROR:
870 /* Call FCP RSP handler to determine result */
871 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
873 case IOSTAT_NPORT_BSY:
874 case IOSTAT_FABRIC_BSY:
875 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
876 fast_path_evt = lpfc_alloc_fast_evt(phba);
879 fast_path_evt->un.fabric_evt.event_type =
881 fast_path_evt->un.fabric_evt.subcategory =
882 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
883 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
884 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
885 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
886 &pnode->nlp_portname,
887 sizeof(struct lpfc_name));
888 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
889 &pnode->nlp_nodename,
890 sizeof(struct lpfc_name));
892 fast_path_evt->vport = vport;
893 fast_path_evt->work_evt.evt =
894 LPFC_EVT_FASTPATH_MGMT_EVT;
895 spin_lock_irqsave(&phba->hbalock, flags);
896 list_add_tail(&fast_path_evt->work_evt.evt_listp,
898 spin_unlock_irqrestore(&phba->hbalock, flags);
899 lpfc_worker_wake_up(phba);
901 case IOSTAT_LOCAL_REJECT:
902 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
903 lpfc_cmd->result == IOERR_NO_RESOURCES ||
904 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
905 cmd->result = ScsiResult(DID_REQUEUE, 0);
907 } /* else: fall through */
909 cmd->result = ScsiResult(DID_ERROR, 0);
913 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
914 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
915 cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
918 cmd->result = ScsiResult(DID_OK, 0);
921 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
922 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
924 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
925 "0710 Iodone <%d/%d> cmd %p, error "
926 "x%x SNS x%x x%x Data: x%x x%x\n",
927 cmd->device->id, cmd->device->lun, cmd,
928 cmd->result, *lp, *(lp + 3), cmd->retries,
929 scsi_get_resid(cmd));
932 lpfc_update_stats(phba, lpfc_cmd);
933 result = cmd->result;
935 if (vport->cfg_max_scsicmpl_time &&
936 time_after(jiffies, lpfc_cmd->start_time +
937 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
938 spin_lock_irqsave(sdev->host->host_lock, flags);
939 if ((pnode->cmd_qdepth > atomic_read(&pnode->cmd_pending) &&
940 (atomic_read(&pnode->cmd_pending) > LPFC_MIN_TGT_QDEPTH) &&
941 ((cmd->cmnd[0] == READ_10) || (cmd->cmnd[0] == WRITE_10))))
942 pnode->cmd_qdepth = atomic_read(&pnode->cmd_pending);
944 pnode->last_change_time = jiffies;
945 spin_unlock_irqrestore(sdev->host->host_lock, flags);
946 } else if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
947 time_after(jiffies, pnode->last_change_time +
948 msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
949 spin_lock_irqsave(sdev->host->host_lock, flags);
950 pnode->cmd_qdepth += pnode->cmd_qdepth *
951 LPFC_TGTQ_RAMPUP_PCENT / 100;
952 if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
953 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
954 pnode->last_change_time = jiffies;
955 spin_unlock_irqrestore(sdev->host->host_lock, flags);
958 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
961 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
963 * If there is a thread waiting for command completion
964 * wake up the thread.
966 spin_lock_irqsave(sdev->host->host_lock, flags);
967 lpfc_cmd->pCmd = NULL;
969 wake_up(lpfc_cmd->waitq);
970 spin_unlock_irqrestore(sdev->host->host_lock, flags);
971 lpfc_release_scsi_buf(phba, lpfc_cmd);
977 lpfc_rampup_queue_depth(vport, sdev);
979 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
980 ((jiffies - pnode->last_ramp_up_time) >
981 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
982 ((jiffies - pnode->last_q_full_time) >
983 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
984 (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
985 shost_for_each_device(tmp_sdev, sdev->host) {
986 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
987 if (tmp_sdev->id != sdev->id)
989 if (tmp_sdev->ordered_tags)
990 scsi_adjust_queue_depth(tmp_sdev,
992 tmp_sdev->queue_depth+1);
994 scsi_adjust_queue_depth(tmp_sdev,
996 tmp_sdev->queue_depth+1);
998 pnode->last_ramp_up_time = jiffies;
1001 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1003 sdev->queue_depth - 1, sdev->queue_depth);
1007 * Check for queue full. If the lun is reporting queue full, then
1008 * back off the lun queue depth to prevent target overloads.
1010 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
1011 NLP_CHK_NODE_ACT(pnode)) {
1012 pnode->last_q_full_time = jiffies;
1014 shost_for_each_device(tmp_sdev, sdev->host) {
1015 if (tmp_sdev->id != sdev->id)
1017 depth = scsi_track_queue_full(tmp_sdev,
1018 tmp_sdev->queue_depth - 1);
1021 * The queue depth cannot be lowered any more.
1022 * Modify the returned error code to store
1023 * the final depth value set by
1024 * scsi_track_queue_full.
1027 depth = sdev->host->cmd_per_lun;
1030 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1031 "0711 detected queue full - lun queue "
1032 "depth adjusted to %d.\n", depth);
1033 lpfc_send_sdev_queuedepth_change_event(phba, vport,
1040 * If there is a thread waiting for command completion
1041 * wake up the thread.
1043 spin_lock_irqsave(sdev->host->host_lock, flags);
1044 lpfc_cmd->pCmd = NULL;
1045 if (lpfc_cmd->waitq)
1046 wake_up(lpfc_cmd->waitq);
1047 spin_unlock_irqrestore(sdev->host->host_lock, flags);
1049 lpfc_release_scsi_buf(phba, lpfc_cmd);
1053 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
1054 * @data: A pointer to the immediate command data portion of the IOCB.
1055 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
1057 * The routine copies the entire FCP command from @fcp_cmnd to @data while
1058 * byte swapping the data to big endian format for transmission on the wire.
1061 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1064 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
1065 i += sizeof(uint32_t), j++) {
1066 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
1071 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1072 struct lpfc_nodelist *pnode)
1074 struct lpfc_hba *phba = vport->phba;
1075 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1076 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1077 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1078 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
1079 int datadir = scsi_cmnd->sc_data_direction;
1082 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1085 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
1086 /* clear task management bits */
1087 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
1089 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
1090 &lpfc_cmd->fcp_cmnd->fcp_lun);
1092 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
1094 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
1096 case HEAD_OF_QUEUE_TAG:
1097 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
1099 case ORDERED_QUEUE_TAG:
1100 fcp_cmnd->fcpCntl1 = ORDERED_Q;
1103 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
1107 fcp_cmnd->fcpCntl1 = 0;
1110 * There are three possibilities here - use scatter-gather segment, use
1111 * the single mapping, or neither. Start the lpfc command prep by
1112 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
1115 if (scsi_sg_count(scsi_cmnd)) {
1116 if (datadir == DMA_TO_DEVICE) {
1117 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
1118 iocb_cmd->un.fcpi.fcpi_parm = 0;
1119 iocb_cmd->ulpPU = 0;
1120 fcp_cmnd->fcpCntl3 = WRITE_DATA;
1121 phba->fc4OutputRequests++;
1123 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
1124 iocb_cmd->ulpPU = PARM_READ_CHECK;
1125 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
1126 fcp_cmnd->fcpCntl3 = READ_DATA;
1127 phba->fc4InputRequests++;
1130 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
1131 iocb_cmd->un.fcpi.fcpi_parm = 0;
1132 iocb_cmd->ulpPU = 0;
1133 fcp_cmnd->fcpCntl3 = 0;
1134 phba->fc4ControlRequests++;
1136 if (phba->sli_rev == 3)
1137 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
1139 * Finish initializing those IOCB fields that are independent
1140 * of the scsi_cmnd request_buffer
1142 piocbq->iocb.ulpContext = pnode->nlp_rpi;
1143 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
1144 piocbq->iocb.ulpFCP2Rcvy = 1;
1146 piocbq->iocb.ulpFCP2Rcvy = 0;
1148 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
1149 piocbq->context1 = lpfc_cmd;
1150 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
1151 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
1152 piocbq->vport = vport;
1156 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
1157 struct lpfc_scsi_buf *lpfc_cmd,
1159 uint8_t task_mgmt_cmd)
1161 struct lpfc_iocbq *piocbq;
1163 struct fcp_cmnd *fcp_cmnd;
1164 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
1165 struct lpfc_nodelist *ndlp = rdata->pnode;
1167 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1168 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
1171 piocbq = &(lpfc_cmd->cur_iocbq);
1172 piocbq->vport = vport;
1174 piocb = &piocbq->iocb;
1176 fcp_cmnd = lpfc_cmd->fcp_cmnd;
1177 /* Clear out any old data in the FCP command area */
1178 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
1179 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
1180 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
1181 if (vport->phba->sli_rev == 3)
1182 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
1183 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
1184 piocb->ulpContext = ndlp->nlp_rpi;
1185 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
1186 piocb->ulpFCP2Rcvy = 1;
1188 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
1190 /* ulpTimeout is only one byte */
1191 if (lpfc_cmd->timeout > 0xff) {
1193 * Do not timeout the command at the firmware level.
1194 * The driver will provide the timeout mechanism.
1196 piocb->ulpTimeout = 0;
1198 piocb->ulpTimeout = lpfc_cmd->timeout;
1205 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
1206 struct lpfc_iocbq *cmdiocbq,
1207 struct lpfc_iocbq *rspiocbq)
1209 struct lpfc_scsi_buf *lpfc_cmd =
1210 (struct lpfc_scsi_buf *) cmdiocbq->context1;
1212 lpfc_release_scsi_buf(phba, lpfc_cmd);
1217 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
1218 unsigned tgt_id, unsigned int lun,
1219 struct lpfc_rport_data *rdata)
1221 struct lpfc_hba *phba = vport->phba;
1222 struct lpfc_iocbq *iocbq;
1223 struct lpfc_iocbq *iocbqrsp;
1227 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
1230 lpfc_cmd->rdata = rdata;
1231 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
1236 iocbq = &lpfc_cmd->cur_iocbq;
1237 iocbqrsp = lpfc_sli_get_iocbq(phba);
1242 /* Issue Target Reset to TGT <num> */
1243 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1244 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
1245 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
1246 status = lpfc_sli_issue_iocb_wait(phba,
1247 &phba->sli.ring[phba->sli.fcp_ring],
1248 iocbq, iocbqrsp, lpfc_cmd->timeout);
1249 if (status != IOCB_SUCCESS) {
1250 if (status == IOCB_TIMEDOUT) {
1251 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1252 ret = TIMEOUT_ERROR;
1255 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1258 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
1259 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
1260 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
1261 (lpfc_cmd->result & IOERR_DRVR_MASK))
1262 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1265 lpfc_sli_release_iocbq(phba, iocbqrsp);
1270 lpfc_info(struct Scsi_Host *host)
1272 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
1273 struct lpfc_hba *phba = vport->phba;
1275 static char lpfcinfobuf[384];
1277 memset(lpfcinfobuf,0,384);
1278 if (phba && phba->pcidev){
1279 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
1280 len = strlen(lpfcinfobuf);
1281 snprintf(lpfcinfobuf + len,
1283 " on PCI bus %02x device %02x irq %d",
1284 phba->pcidev->bus->number,
1285 phba->pcidev->devfn,
1287 len = strlen(lpfcinfobuf);
1288 if (phba->Port[0]) {
1289 snprintf(lpfcinfobuf + len,
1298 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
1300 unsigned long poll_tmo_expires =
1301 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
1303 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
1304 mod_timer(&phba->fcp_poll_timer,
1308 void lpfc_poll_start_timer(struct lpfc_hba * phba)
1310 lpfc_poll_rearm_timer(phba);
1313 void lpfc_poll_timeout(unsigned long ptr)
1315 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
1317 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1318 lpfc_sli_poll_fcp_ring (phba);
1319 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1320 lpfc_poll_rearm_timer(phba);
1325 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1327 struct Scsi_Host *shost = cmnd->device->host;
1328 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1329 struct lpfc_hba *phba = vport->phba;
1330 struct lpfc_sli *psli = &phba->sli;
1331 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1332 struct lpfc_nodelist *ndlp = rdata->pnode;
1333 struct lpfc_scsi_buf *lpfc_cmd;
1334 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1337 err = fc_remote_port_chkready(rport);
1340 goto out_fail_command;
1344 * Catch race where our node has transitioned, but the
1345 * transport is still transitioning.
1347 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1348 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1349 goto out_fail_command;
1351 if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
1354 lpfc_cmd = lpfc_get_scsi_buf(phba);
1355 if (lpfc_cmd == NULL) {
1356 lpfc_adjust_queue_depth(phba);
1358 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1359 "0707 driver's buffer pool is empty, "
1364 lpfc_cmd->start_time = jiffies;
1366 * Store the midlayer's command structure for the completion phase
1367 * and complete the command initialization.
1369 lpfc_cmd->pCmd = cmnd;
1370 lpfc_cmd->rdata = rdata;
1371 lpfc_cmd->timeout = 0;
1372 lpfc_cmd->start_time = jiffies;
1373 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
1374 cmnd->scsi_done = done;
1376 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1378 goto out_host_busy_free_buf;
1380 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
1382 atomic_inc(&ndlp->cmd_pending);
1383 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
1384 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
1386 goto out_host_busy_free_buf;
1388 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1389 lpfc_sli_poll_fcp_ring(phba);
1390 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1391 lpfc_poll_rearm_timer(phba);
1396 out_host_busy_free_buf:
1397 atomic_dec(&ndlp->cmd_pending);
1398 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1399 lpfc_release_scsi_buf(phba, lpfc_cmd);
1401 return SCSI_MLQUEUE_HOST_BUSY;
1409 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1411 struct Scsi_Host *shost = cmnd->device->host;
1412 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1414 spin_lock_irq(shost->host_lock);
1415 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1416 spin_unlock_irq(shost->host_lock);
1418 spin_lock_irq(shost->host_lock);
1420 spin_unlock_irq(shost->host_lock);
1425 lpfc_abort_handler(struct scsi_cmnd *cmnd)
1427 struct Scsi_Host *shost = cmnd->device->host;
1428 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1429 struct lpfc_hba *phba = vport->phba;
1430 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
1431 struct lpfc_iocbq *iocb;
1432 struct lpfc_iocbq *abtsiocb;
1433 struct lpfc_scsi_buf *lpfc_cmd;
1436 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
1438 lpfc_block_error_handler(cmnd);
1439 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
1443 * If pCmd field of the corresponding lpfc_scsi_buf structure
1444 * points to a different SCSI command, then the driver has
1445 * already completed this command, but the midlayer did not
1446 * see the completion before the eh fired. Just return
1449 iocb = &lpfc_cmd->cur_iocbq;
1450 if (lpfc_cmd->pCmd != cmnd)
1453 BUG_ON(iocb->context1 != lpfc_cmd);
1455 abtsiocb = lpfc_sli_get_iocbq(phba);
1456 if (abtsiocb == NULL) {
1462 * The scsi command can not be in txq and it is in flight because the
1463 * pCmd is still pointig at the SCSI command we have to abort. There
1464 * is no need to search the txcmplq. Just send an abort to the FW.
1468 icmd = &abtsiocb->iocb;
1469 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
1470 icmd->un.acxri.abortContextTag = cmd->ulpContext;
1471 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
1474 icmd->ulpClass = cmd->ulpClass;
1475 if (lpfc_is_link_up(phba))
1476 icmd->ulpCommand = CMD_ABORT_XRI_CN;
1478 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
1480 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
1481 abtsiocb->vport = vport;
1482 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
1483 lpfc_sli_release_iocbq(phba, abtsiocb);
1488 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1489 lpfc_sli_poll_fcp_ring (phba);
1491 lpfc_cmd->waitq = &waitq;
1492 /* Wait for abort to complete */
1493 wait_event_timeout(waitq,
1494 (lpfc_cmd->pCmd != cmnd),
1495 (2*vport->cfg_devloss_tmo*HZ));
1497 spin_lock_irq(shost->host_lock);
1498 lpfc_cmd->waitq = NULL;
1499 spin_unlock_irq(shost->host_lock);
1501 if (lpfc_cmd->pCmd == cmnd) {
1503 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1504 "0748 abort handler timed out waiting "
1505 "for abort to complete: ret %#x, ID %d, "
1506 "LUN %d, snum %#lx\n",
1507 ret, cmnd->device->id, cmnd->device->lun,
1508 cmnd->serial_number);
1512 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1513 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
1514 "LUN %d snum %#lx\n", ret, cmnd->device->id,
1515 cmnd->device->lun, cmnd->serial_number);
1520 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1522 struct Scsi_Host *shost = cmnd->device->host;
1523 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1524 struct lpfc_hba *phba = vport->phba;
1525 struct lpfc_scsi_buf *lpfc_cmd;
1526 struct lpfc_iocbq *iocbq, *iocbqrsp;
1527 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1528 struct lpfc_nodelist *pnode = rdata->pnode;
1529 unsigned long later;
1533 struct lpfc_scsi_event_header scsi_event;
1535 lpfc_block_error_handler(cmnd);
1537 * If target is not in a MAPPED state, delay the reset until
1538 * target is rediscovered or devloss timeout expires.
1540 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1541 while (time_after(later, jiffies)) {
1542 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1544 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
1546 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1547 rdata = cmnd->device->hostdata;
1550 pnode = rdata->pnode;
1553 scsi_event.event_type = FC_REG_SCSI_EVENT;
1554 scsi_event.subcategory = LPFC_EVENT_TGTRESET;
1556 memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
1557 memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
1559 fc_host_post_vendor_event(shost,
1560 fc_get_event_number(),
1562 (char *)&scsi_event,
1563 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1565 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1566 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1567 "0721 LUN Reset rport "
1568 "failure: msec x%x rdata x%p\n",
1569 jiffies_to_msecs(jiffies - later), rdata);
1572 lpfc_cmd = lpfc_get_scsi_buf(phba);
1573 if (lpfc_cmd == NULL)
1575 lpfc_cmd->timeout = 60;
1576 lpfc_cmd->rdata = rdata;
1578 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
1582 lpfc_release_scsi_buf(phba, lpfc_cmd);
1585 iocbq = &lpfc_cmd->cur_iocbq;
1587 /* get a buffer for this IOCB command response */
1588 iocbqrsp = lpfc_sli_get_iocbq(phba);
1589 if (iocbqrsp == NULL) {
1590 lpfc_release_scsi_buf(phba, lpfc_cmd);
1593 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1594 "0703 Issue target reset to TGT %d LUN %d "
1595 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
1596 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1597 status = lpfc_sli_issue_iocb_wait(phba,
1598 &phba->sli.ring[phba->sli.fcp_ring],
1599 iocbq, iocbqrsp, lpfc_cmd->timeout);
1600 if (status == IOCB_TIMEDOUT) {
1601 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1602 ret = TIMEOUT_ERROR;
1604 if (status != IOCB_SUCCESS)
1606 lpfc_release_scsi_buf(phba, lpfc_cmd);
1608 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1609 "0713 SCSI layer issued device reset (%d, %d) "
1610 "return x%x status x%x result x%x\n",
1611 cmnd->device->id, cmnd->device->lun, ret,
1612 iocbqrsp->iocb.ulpStatus,
1613 iocbqrsp->iocb.un.ulpWord[4]);
1614 lpfc_sli_release_iocbq(phba, iocbqrsp);
1615 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
1618 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1619 cmnd->device->id, cmnd->device->lun,
1621 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1622 while (time_after(later, jiffies) && cnt) {
1623 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1624 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
1625 cmnd->device->lun, LPFC_CTX_TGT);
1628 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1629 "0719 device reset I/O flush failure: "
1637 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1639 struct Scsi_Host *shost = cmnd->device->host;
1640 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1641 struct lpfc_hba *phba = vport->phba;
1642 struct lpfc_nodelist *ndlp = NULL;
1644 int ret = SUCCESS, status = SUCCESS, i;
1646 struct lpfc_scsi_buf * lpfc_cmd;
1647 unsigned long later;
1648 struct lpfc_scsi_event_header scsi_event;
1650 scsi_event.event_type = FC_REG_SCSI_EVENT;
1651 scsi_event.subcategory = LPFC_EVENT_BUSRESET;
1653 memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
1654 memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
1656 fc_host_post_vendor_event(shost,
1657 fc_get_event_number(),
1659 (char *)&scsi_event,
1660 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1662 lpfc_block_error_handler(cmnd);
1664 * Since the driver manages a single bus device, reset all
1665 * targets known to the driver. Should any target reset
1666 * fail, this routine returns failure to the midlayer.
1668 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1669 /* Search for mapped node by target ID */
1671 spin_lock_irq(shost->host_lock);
1672 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1673 if (!NLP_CHK_NODE_ACT(ndlp))
1675 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1676 ndlp->nlp_sid == i &&
1682 spin_unlock_irq(shost->host_lock);
1685 lpfc_cmd = lpfc_get_scsi_buf(phba);
1687 lpfc_cmd->timeout = 60;
1688 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1690 ndlp->rport->dd_data);
1691 if (status != TIMEOUT_ERROR)
1692 lpfc_release_scsi_buf(phba, lpfc_cmd);
1694 if (!lpfc_cmd || status != SUCCESS) {
1695 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1696 "0700 Bus Reset on target %d failed\n",
1702 * All outstanding txcmplq I/Os should have been aborted by
1703 * the targets. Unfortunately, some targets do not abide by
1704 * this forcing the driver to double check.
1706 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1708 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1709 0, 0, LPFC_CTX_HOST);
1710 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1711 while (time_after(later, jiffies) && cnt) {
1712 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1713 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1716 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1717 "0715 Bus Reset I/O flush failure: "
1718 "cnt x%x left x%x\n", cnt, i);
1721 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1722 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
1727 lpfc_slave_alloc(struct scsi_device *sdev)
1729 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1730 struct lpfc_hba *phba = vport->phba;
1731 struct lpfc_scsi_buf *scsi_buf = NULL;
1732 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1733 uint32_t total = 0, i;
1734 uint32_t num_to_alloc = 0;
1735 unsigned long flags;
1737 if (!rport || fc_remote_port_chkready(rport))
1740 sdev->hostdata = rport->dd_data;
1743 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1744 * available list of scsi buffers. Don't allocate more than the
1745 * HBA limit conveyed to the midlayer via the host structure. The
1746 * formula accounts for the lun_queue_depth + error handlers + 1
1747 * extra. This list of scsi bufs exists for the lifetime of the driver.
1749 total = phba->total_scsi_bufs;
1750 num_to_alloc = vport->cfg_lun_queue_depth + 2;
1752 /* Allow some exchanges to be available always to complete discovery */
1753 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1754 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1755 "0704 At limitation of %d preallocated "
1756 "command buffers\n", total);
1758 /* Allow some exchanges to be available always to complete discovery */
1759 } else if (total + num_to_alloc >
1760 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1761 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1762 "0705 Allocation request of %d "
1763 "command buffers will exceed max of %d. "
1764 "Reducing allocation request to %d.\n",
1765 num_to_alloc, phba->cfg_hba_queue_depth,
1766 (phba->cfg_hba_queue_depth - total));
1767 num_to_alloc = phba->cfg_hba_queue_depth - total;
1770 for (i = 0; i < num_to_alloc; i++) {
1771 scsi_buf = lpfc_new_scsi_buf(vport);
1773 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1774 "0706 Failed to allocate "
1775 "command buffer\n");
1779 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
1780 phba->total_scsi_bufs++;
1781 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
1782 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
1788 lpfc_slave_configure(struct scsi_device *sdev)
1790 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1791 struct lpfc_hba *phba = vport->phba;
1792 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1794 if (sdev->tagged_supported)
1795 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
1797 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
1800 * Initialize the fc transport attributes for the target
1801 * containing this scsi device. Also note that the driver's
1802 * target pointer is stored in the starget_data for the
1803 * driver's sysfs entry point functions.
1805 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
1807 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1808 lpfc_sli_poll_fcp_ring(phba);
1809 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1810 lpfc_poll_rearm_timer(phba);
1817 lpfc_slave_destroy(struct scsi_device *sdev)
1819 sdev->hostdata = NULL;
1824 struct scsi_host_template lpfc_template = {
1825 .module = THIS_MODULE,
1826 .name = LPFC_DRIVER_NAME,
1828 .queuecommand = lpfc_queuecommand,
1829 .eh_abort_handler = lpfc_abort_handler,
1830 .eh_device_reset_handler= lpfc_device_reset_handler,
1831 .eh_bus_reset_handler = lpfc_bus_reset_handler,
1832 .slave_alloc = lpfc_slave_alloc,
1833 .slave_configure = lpfc_slave_configure,
1834 .slave_destroy = lpfc_slave_destroy,
1835 .scan_finished = lpfc_scan_finished,
1837 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
1838 .cmd_per_lun = LPFC_CMD_PER_LUN,
1839 .use_clustering = ENABLE_CLUSTERING,
1840 .shost_attrs = lpfc_hba_attrs,
1841 .max_sectors = 0xFFFF,
1844 struct scsi_host_template lpfc_vport_template = {
1845 .module = THIS_MODULE,
1846 .name = LPFC_DRIVER_NAME,
1848 .queuecommand = lpfc_queuecommand,
1849 .eh_abort_handler = lpfc_abort_handler,
1850 .eh_device_reset_handler= lpfc_device_reset_handler,
1851 .eh_bus_reset_handler = lpfc_bus_reset_handler,
1852 .slave_alloc = lpfc_slave_alloc,
1853 .slave_configure = lpfc_slave_configure,
1854 .slave_destroy = lpfc_slave_destroy,
1855 .scan_finished = lpfc_scan_finished,
1857 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
1858 .cmd_per_lun = LPFC_CMD_PER_LUN,
1859 .use_clustering = ENABLE_CLUSTERING,
1860 .shost_attrs = lpfc_vport_attrs,
1861 .max_sectors = 0xFFFF,