1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Enterprise Fibre Channel Host Bus Adapters. *
4 * Refer to the README file included with this package for *
5 * driver version and adapter support. *
6 * Copyright (C) 2004 Emulex Corporation. *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of the GNU General Public License *
11 * as published by the Free Software Foundation; either version 2 *
12 * of the License, or (at your option) any later version. *
14 * This program is distributed in the hope that it will be useful, *
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of *
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
17 * GNU General Public License for more details, a copy of which *
18 * can be found in the file COPYING included with this package. *
19 *******************************************************************/
22 * $Id: lpfc_scsi.c 1.37 2005/04/13 14:27:09EDT sf_support Exp $
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi_transport_fc.h>
34 #include "lpfc_version.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
43 #define LPFC_RESET_WAIT 2
44 #define LPFC_ABORT_WAIT 2
46 static inline void lpfc_put_lun(struct fcp_cmnd *fcmd, unsigned int lun)
49 fcmd->fcpLunMsl = swab16((uint16_t)lun);
53 * This routine allocates a scsi buffer, which contains all the necessary
54 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
55 * contains information to build the IOCB. The DMAable region contains
56 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
57 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
58 * and the BPL BDE is setup in the IOCB.
60 static struct lpfc_scsi_buf *
61 lpfc_get_scsi_buf(struct lpfc_hba * phba)
63 struct lpfc_scsi_buf *psb;
64 struct ulp_bde64 *bpl;
68 psb = kmalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
71 memset(psb, 0, sizeof (struct lpfc_scsi_buf));
75 * Get memory from the pci pool to map the virt space to pci bus space
76 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
77 * struct fcp_rsp and the number of bde's necessary to support the
80 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
87 /* Initialize virtual ptrs to dma_buf region. */
88 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
90 psb->fcp_cmnd = psb->data;
91 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
92 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
93 sizeof(struct fcp_rsp);
95 /* Initialize local short-hand pointers. */
97 pdma_phys = psb->dma_handle;
100 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
101 * list bdes. Initialize the first two and leave the rest for
104 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
105 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
106 bpl->tus.f.bdeSize = sizeof (struct fcp_cmnd);
107 bpl->tus.f.bdeFlags = BUFF_USE_CMND;
108 bpl->tus.w = le32_to_cpu(bpl->tus.w);
111 /* Setup the physical region for the FCP RSP */
112 pdma_phys += sizeof (struct fcp_cmnd);
113 bpl->addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys));
114 bpl->addrLow = le32_to_cpu(putPaddrLow(pdma_phys));
115 bpl->tus.f.bdeSize = sizeof (struct fcp_rsp);
116 bpl->tus.f.bdeFlags = (BUFF_USE_CMND | BUFF_USE_RCV);
117 bpl->tus.w = le32_to_cpu(bpl->tus.w);
120 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
121 * initialize it with all known data now.
123 pdma_phys += (sizeof (struct fcp_rsp));
124 iocb = &psb->cur_iocbq.iocb;
125 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
126 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys);
127 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys);
128 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof (struct ulp_bde64));
129 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDL;
130 iocb->ulpBdeCount = 1;
131 iocb->ulpClass = CLASS3;
137 lpfc_free_scsi_buf(struct lpfc_scsi_buf * psb)
139 struct lpfc_hba *phba = psb->scsi_hba;
142 * There are only two special cases to consider. (1) the scsi command
143 * requested scatter-gather usage or (2) the scsi command allocated
144 * a request buffer, but did not request use_sg. There is a third
145 * case, but it does not require resource deallocation.
147 if ((psb->seg_cnt > 0) && (psb->pCmd->use_sg)) {
148 dma_unmap_sg(&phba->pcidev->dev, psb->pCmd->request_buffer,
149 psb->seg_cnt, psb->pCmd->sc_data_direction);
151 if ((psb->nonsg_phys) && (psb->pCmd->request_bufflen)) {
152 dma_unmap_single(&phba->pcidev->dev, psb->nonsg_phys,
153 psb->pCmd->request_bufflen,
154 psb->pCmd->sc_data_direction);
158 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
162 lpfc_scsi_prep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd)
164 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
165 struct scatterlist *sgel = NULL;
166 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
167 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
168 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
170 uint32_t i, num_bde = 0;
171 int datadir = scsi_cmnd->sc_data_direction;
175 * There are three possibilities here - use scatter-gather segment, use
176 * the single mapping, or neither. Start the lpfc command prep by
177 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
181 if (scsi_cmnd->use_sg) {
183 * The driver stores the segment count returned from pci_map_sg
184 * because this a count of dma-mappings used to map the use_sg
185 * pages. They are not guaranteed to be the same for those
186 * architectures that implement an IOMMU.
188 sgel = (struct scatterlist *)scsi_cmnd->request_buffer;
189 lpfc_cmd->seg_cnt = dma_map_sg(&phba->pcidev->dev, sgel,
190 scsi_cmnd->use_sg, datadir);
191 if (lpfc_cmd->seg_cnt == 0)
194 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
195 printk(KERN_ERR "%s: Too many sg segments from "
196 "dma_map_sg. Config %d, seg_cnt %d",
197 __FUNCTION__, phba->cfg_sg_seg_cnt,
199 dma_unmap_sg(&phba->pcidev->dev, sgel,
200 lpfc_cmd->seg_cnt, datadir);
205 * The driver established a maximum scatter-gather segment count
206 * during probe that limits the number of sg elements in any
207 * single scsi command. Just run through the seg_cnt and format
210 for (i = 0; i < lpfc_cmd->seg_cnt; i++) {
211 physaddr = sg_dma_address(sgel);
212 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
213 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
214 bpl->tus.f.bdeSize = sg_dma_len(sgel);
215 if (datadir == DMA_TO_DEVICE)
216 bpl->tus.f.bdeFlags = 0;
218 bpl->tus.f.bdeFlags = BUFF_USE_RCV;
219 bpl->tus.w = le32_to_cpu(bpl->tus.w);
224 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
225 physaddr = dma_map_single(&phba->pcidev->dev,
226 scsi_cmnd->request_buffer,
227 scsi_cmnd->request_bufflen,
229 dma_error = dma_mapping_error(physaddr);
231 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
232 "%d:0718 Unable to dma_map_single "
233 "request_buffer: x%x\n",
234 phba->brd_no, dma_error);
238 lpfc_cmd->nonsg_phys = physaddr;
239 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
240 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
241 bpl->tus.f.bdeSize = scsi_cmnd->request_bufflen;
242 if (datadir == DMA_TO_DEVICE)
243 bpl->tus.f.bdeFlags = 0;
244 bpl->tus.w = le32_to_cpu(bpl->tus.w);
250 * Finish initializing those IOCB fields that are dependent on the
251 * scsi_cmnd request_buffer
253 iocb_cmd->un.fcpi64.bdl.bdeSize +=
254 (num_bde * sizeof (struct ulp_bde64));
255 iocb_cmd->ulpBdeCount = 1;
257 fcp_cmnd->fcpDl = be32_to_cpu(scsi_cmnd->request_bufflen);
262 lpfc_handle_fcp_err(struct lpfc_scsi_buf *lpfc_cmd)
264 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
265 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
266 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
267 struct lpfc_hba *phba = lpfc_cmd->scsi_hba;
268 uint32_t fcpi_parm = lpfc_cmd->cur_iocbq.iocb.un.fcpi.fcpi_parm;
269 uint32_t resp_info = fcprsp->rspStatus2;
270 uint32_t scsi_status = fcprsp->rspStatus3;
271 uint32_t host_status = DID_OK;
275 * If this is a task management command, there is no
276 * scsi packet associated with this lpfc_cmd. The driver
279 if (fcpcmd->fcpCntl2) {
284 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
285 "%d:0730 FCP command failed: RSP "
286 "Data: x%x x%x x%x x%x x%x x%x\n",
287 phba->brd_no, resp_info, scsi_status,
288 be32_to_cpu(fcprsp->rspResId),
289 be32_to_cpu(fcprsp->rspSnsLen),
290 be32_to_cpu(fcprsp->rspRspLen),
293 if (resp_info & RSP_LEN_VALID) {
294 rsplen = be32_to_cpu(fcprsp->rspRspLen);
295 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
296 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
297 host_status = DID_ERROR;
302 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
303 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
304 if (snslen > SCSI_SENSE_BUFFERSIZE)
305 snslen = SCSI_SENSE_BUFFERSIZE;
307 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
311 if (resp_info & RESID_UNDER) {
312 cmnd->resid = be32_to_cpu(fcprsp->rspResId);
314 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
315 "%d:0716 FCP Read Underrun, expected %d, "
316 "residual %d Data: x%x x%x x%x\n", phba->brd_no,
317 be32_to_cpu(fcpcmd->fcpDl), cmnd->resid,
318 fcpi_parm, cmnd->cmnd[0], cmnd->underflow);
321 * The cmnd->underflow is the minimum number of bytes that must
322 * be transfered for this command. Provided a sense condition
323 * is not present, make sure the actual amount transferred is at
324 * least the underflow value or fail.
326 if (!(resp_info & SNS_LEN_VALID) &&
327 (scsi_status == SAM_STAT_GOOD) &&
328 (cmnd->request_bufflen - cmnd->resid) < cmnd->underflow) {
329 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
330 "%d:0717 FCP command x%x residual "
331 "underrun converted to error "
332 "Data: x%x x%x x%x\n", phba->brd_no,
333 cmnd->cmnd[0], cmnd->request_bufflen,
334 cmnd->resid, cmnd->underflow);
336 host_status = DID_ERROR;
338 } else if (resp_info & RESID_OVER) {
339 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
340 "%d:0720 FCP command x%x residual "
341 "overrun error. Data: x%x x%x \n",
342 phba->brd_no, cmnd->cmnd[0],
343 cmnd->request_bufflen, cmnd->resid);
344 host_status = DID_ERROR;
347 * Check SLI validation that all the transfer was actually done
348 * (fcpi_parm should be zero). Apply check only to reads.
350 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
351 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
352 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
353 "%d:0734 FCP Read Check Error Data: "
354 "x%x x%x x%x x%x\n", phba->brd_no,
355 be32_to_cpu(fcpcmd->fcpDl),
356 be32_to_cpu(fcprsp->rspResId),
357 fcpi_parm, cmnd->cmnd[0]);
358 host_status = DID_ERROR;
359 cmnd->resid = cmnd->request_bufflen;
363 cmnd->result = ScsiResult(host_status, scsi_status);
367 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
368 struct lpfc_iocbq *pIocbOut)
370 struct lpfc_scsi_buf *lpfc_cmd =
371 (struct lpfc_scsi_buf *) pIocbIn->context1;
372 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
373 struct lpfc_nodelist *pnode = rdata->pnode;
374 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
377 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
378 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
380 if (lpfc_cmd->status) {
381 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
382 (lpfc_cmd->result & IOERR_DRVR_MASK))
383 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
384 else if (lpfc_cmd->status >= IOSTAT_CNT)
385 lpfc_cmd->status = IOSTAT_DEFAULT;
387 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
388 "%d:0729 FCP cmd x%x failed <%d/%d> status: "
389 "x%x result: x%x Data: x%x x%x\n",
390 phba->brd_no, cmd->cmnd[0], cmd->device->id,
391 cmd->device->lun, lpfc_cmd->status,
392 lpfc_cmd->result, pIocbOut->iocb.ulpContext,
393 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
395 switch (lpfc_cmd->status) {
396 case IOSTAT_FCP_RSP_ERROR:
397 /* Call FCP RSP handler to determine result */
398 lpfc_handle_fcp_err(lpfc_cmd);
400 case IOSTAT_NPORT_BSY:
401 case IOSTAT_FABRIC_BSY:
402 cmd->result = ScsiResult(DID_BUS_BUSY, 0);
405 cmd->result = ScsiResult(DID_ERROR, 0);
410 if (pnode->nlp_state != NLP_STE_MAPPED_NODE)
411 cmd->result = ScsiResult(DID_BUS_BUSY,
415 cmd->result = ScsiResult(DID_NO_CONNECT, 0);
418 cmd->result = ScsiResult(DID_OK, 0);
421 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
422 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
424 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
425 "%d:0710 Iodone <%d/%d> cmd %p, error x%x "
426 "SNS x%x x%x Data: x%x x%x\n",
427 phba->brd_no, cmd->device->id,
428 cmd->device->lun, cmd, cmd->result,
429 *lp, *(lp + 3), cmd->retries, cmd->resid);
432 spin_lock_irqsave(phba->host->host_lock, iflag);
433 lpfc_free_scsi_buf(lpfc_cmd);
434 cmd->host_scribble = NULL;
435 spin_unlock_irqrestore(phba->host->host_lock, iflag);
441 lpfc_scsi_prep_cmnd(struct lpfc_hba * phba, struct lpfc_scsi_buf * lpfc_cmd,
442 struct lpfc_nodelist *pnode)
444 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
445 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
446 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
447 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
448 int datadir = scsi_cmnd->sc_data_direction;
450 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
452 lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
454 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
456 if (scsi_cmnd->device->tagged_supported) {
457 switch (scsi_cmnd->tag) {
458 case HEAD_OF_QUEUE_TAG:
459 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
461 case ORDERED_QUEUE_TAG:
462 fcp_cmnd->fcpCntl1 = ORDERED_Q;
465 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
469 fcp_cmnd->fcpCntl1 = 0;
472 * There are three possibilities here - use scatter-gather segment, use
473 * the single mapping, or neither. Start the lpfc command prep by
474 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
477 if (scsi_cmnd->use_sg) {
478 if (datadir == DMA_TO_DEVICE) {
479 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
480 iocb_cmd->un.fcpi.fcpi_parm = 0;
482 fcp_cmnd->fcpCntl3 = WRITE_DATA;
483 phba->fc4OutputRequests++;
485 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
486 iocb_cmd->ulpPU = PARM_READ_CHECK;
487 iocb_cmd->un.fcpi.fcpi_parm =
488 scsi_cmnd->request_bufflen;
489 fcp_cmnd->fcpCntl3 = READ_DATA;
490 phba->fc4InputRequests++;
492 } else if (scsi_cmnd->request_buffer && scsi_cmnd->request_bufflen) {
493 if (datadir == DMA_TO_DEVICE) {
494 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
495 iocb_cmd->un.fcpi.fcpi_parm = 0;
497 fcp_cmnd->fcpCntl3 = WRITE_DATA;
498 phba->fc4OutputRequests++;
500 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
501 iocb_cmd->ulpPU = PARM_READ_CHECK;
502 iocb_cmd->un.fcpi.fcpi_parm =
503 scsi_cmnd->request_bufflen;
504 fcp_cmnd->fcpCntl3 = READ_DATA;
505 phba->fc4InputRequests++;
508 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
509 iocb_cmd->un.fcpi.fcpi_parm = 0;
511 fcp_cmnd->fcpCntl3 = 0;
512 phba->fc4ControlRequests++;
516 * Finish initializing those IOCB fields that are independent
517 * of the scsi_cmnd request_buffer
519 piocbq->iocb.ulpContext = pnode->nlp_rpi;
520 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
521 piocbq->iocb.ulpFCP2Rcvy = 1;
523 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
524 piocbq->context1 = lpfc_cmd;
525 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
526 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
530 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_hba *phba,
531 struct lpfc_scsi_buf *lpfc_cmd,
532 uint8_t task_mgmt_cmd)
534 struct lpfc_sli *psli;
535 struct lpfc_iocbq *piocbq;
537 struct fcp_cmnd *fcp_cmnd;
538 struct scsi_device *scsi_dev = lpfc_cmd->pCmd->device;
539 struct lpfc_rport_data *rdata = scsi_dev->hostdata;
540 struct lpfc_nodelist *ndlp = rdata->pnode;
542 if ((ndlp == 0) || (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
547 piocbq = &(lpfc_cmd->cur_iocbq);
548 piocb = &piocbq->iocb;
550 fcp_cmnd = lpfc_cmd->fcp_cmnd;
551 lpfc_put_lun(lpfc_cmd->fcp_cmnd, lpfc_cmd->pCmd->device->lun);
552 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
554 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
556 piocb->ulpContext = ndlp->nlp_rpi;
557 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
558 piocb->ulpFCP2Rcvy = 1;
560 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
562 /* ulpTimeout is only one byte */
563 if (lpfc_cmd->timeout > 0xff) {
565 * Do not timeout the command at the firmware level.
566 * The driver will provide the timeout mechanism.
568 piocb->ulpTimeout = 0;
570 piocb->ulpTimeout = lpfc_cmd->timeout;
573 lpfc_cmd->rdata = rdata;
575 switch (task_mgmt_cmd) {
577 /* Issue LUN Reset to TGT <num> LUN <num> */
578 lpfc_printf_log(phba,
581 "%d:0703 Issue LUN Reset to TGT %d LUN %d "
584 scsi_dev->id, scsi_dev->lun,
585 ndlp->nlp_rpi, ndlp->nlp_flag);
588 case FCP_ABORT_TASK_SET:
589 /* Issue Abort Task Set to TGT <num> LUN <num> */
590 lpfc_printf_log(phba,
593 "%d:0701 Issue Abort Task Set to TGT %d LUN %d "
596 scsi_dev->id, scsi_dev->lun,
597 ndlp->nlp_rpi, ndlp->nlp_flag);
600 case FCP_TARGET_RESET:
601 /* Issue Target Reset to TGT <num> */
602 lpfc_printf_log(phba,
605 "%d:0702 Issue Target Reset to TGT %d "
608 scsi_dev->id, ndlp->nlp_rpi,
617 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf * lpfc_cmd, struct lpfc_hba * phba)
619 struct lpfc_iocbq *iocbq;
620 struct lpfc_iocbq *iocbqrsp = NULL;
621 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
624 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_TARGET_RESET);
628 lpfc_cmd->scsi_hba = phba;
629 iocbq = &lpfc_cmd->cur_iocbq;
630 list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
633 memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
635 iocbq->iocb_flag |= LPFC_IO_POLL;
636 ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
637 &phba->sli.ring[phba->sli.fcp_ring],
638 iocbq, SLI_IOCB_HIGH_PRIORITY,
641 if (ret != IOCB_SUCCESS) {
642 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
646 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
647 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
648 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
649 (lpfc_cmd->result & IOERR_DRVR_MASK))
650 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
654 * All outstanding txcmplq I/Os should have been aborted by the target.
655 * Unfortunately, some targets do not abide by this forcing the driver
658 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
659 lpfc_cmd->pCmd->device->id,
660 lpfc_cmd->pCmd->device->lun, 0, LPFC_CTX_TGT);
662 /* Return response IOCB to free list. */
663 list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
668 lpfc_scsi_cmd_iocb_cleanup (struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
669 struct lpfc_iocbq *pIocbOut)
672 struct lpfc_scsi_buf *lpfc_cmd =
673 (struct lpfc_scsi_buf *) pIocbIn->context1;
675 spin_lock_irqsave(phba->host->host_lock, iflag);
676 lpfc_free_scsi_buf(lpfc_cmd);
677 spin_unlock_irqrestore(phba->host->host_lock, iflag);
681 lpfc_scsi_cmd_iocb_cmpl_aborted(struct lpfc_hba *phba,
682 struct lpfc_iocbq *pIocbIn,
683 struct lpfc_iocbq *pIocbOut)
685 struct scsi_cmnd *ml_cmd =
686 ((struct lpfc_scsi_buf *) pIocbIn->context1)->pCmd;
688 lpfc_scsi_cmd_iocb_cleanup (phba, pIocbIn, pIocbOut);
689 ml_cmd->host_scribble = NULL;
693 lpfc_info(struct Scsi_Host *host)
695 struct lpfc_hba *phba = (struct lpfc_hba *) host->hostdata[0];
697 static char lpfcinfobuf[384];
699 memset(lpfcinfobuf,0,384);
700 if (phba && phba->pcidev){
701 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
702 len = strlen(lpfcinfobuf);
703 snprintf(lpfcinfobuf + len,
705 " on PCI bus %02x device %02x irq %d",
706 phba->pcidev->bus->number,
709 len = strlen(lpfcinfobuf);
711 snprintf(lpfcinfobuf + len,
721 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
723 struct lpfc_hba *phba =
724 (struct lpfc_hba *) cmnd->device->host->hostdata[0];
725 struct lpfc_sli *psli = &phba->sli;
726 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
727 struct lpfc_nodelist *ndlp = rdata->pnode;
728 struct lpfc_scsi_buf *lpfc_cmd = NULL;
729 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
733 * The target pointer is guaranteed not to be NULL because the driver
734 * only clears the device->hostdata field in lpfc_slave_destroy. This
735 * approach guarantees no further IO calls on this target.
738 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
739 goto out_fail_command;
743 * A Fibre Channel target is present and functioning only when the node
744 * state is MAPPED. Any other state is a failure.
746 if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
747 if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
748 (ndlp->nlp_state == NLP_STE_UNUSED_NODE)) {
749 cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
750 goto out_fail_command;
753 * The device is most likely recovered and the driver
754 * needs a bit more time to finish. Ask the midlayer
760 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
761 if (lpfc_cmd == NULL) {
762 printk(KERN_WARNING "%s: No buffer available - list empty, "
763 "total count %d\n", __FUNCTION__, phba->total_scsi_bufs);
768 * Store the midlayer's command structure for the completion phase
769 * and complete the command initialization.
771 lpfc_cmd->pCmd = cmnd;
772 lpfc_cmd->rdata = rdata;
773 lpfc_cmd->timeout = 0;
774 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
775 cmnd->scsi_done = done;
777 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
779 goto out_host_busy_free_buf;
781 lpfc_scsi_prep_cmnd(phba, lpfc_cmd, ndlp);
783 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
784 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
786 goto out_host_busy_free_buf;
789 out_host_busy_free_buf:
790 lpfc_free_scsi_buf(lpfc_cmd);
791 cmnd->host_scribble = NULL;
793 return SCSI_MLQUEUE_HOST_BUSY;
801 __lpfc_abort_handler(struct scsi_cmnd *cmnd)
803 struct lpfc_hba *phba =
804 (struct lpfc_hba *)cmnd->device->host->hostdata[0];
805 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
806 struct lpfc_iocbq *iocb, *next_iocb;
807 struct lpfc_iocbq *abtsiocb = NULL;
808 struct lpfc_scsi_buf *lpfc_cmd;
809 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
812 unsigned int id, lun;
813 unsigned int loop_count = 0;
814 int ret = IOCB_SUCCESS;
817 * If the host_scribble data area is NULL, then the driver has already
818 * completed this command, but the midlayer did not see the completion
819 * before the eh fired. Just return SUCCESS.
821 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
825 /* save these now since lpfc_cmd can be freed */
826 id = lpfc_cmd->pCmd->device->id;
827 lun = lpfc_cmd->pCmd->device->lun;
828 snum = lpfc_cmd->pCmd->serial_number;
830 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
832 if (iocb->context1 != lpfc_cmd)
835 list_del_init(&iocb->list);
837 if (!iocb->iocb_cmpl) {
838 list_add_tail(&iocb->list, lpfc_iocb_list);
841 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
842 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
843 lpfc_scsi_cmd_iocb_cmpl_aborted(phba, iocb, iocb);
849 list_remove_head(lpfc_iocb_list, abtsiocb, struct lpfc_iocbq, list);
850 if (abtsiocb == NULL)
853 memset(abtsiocb, 0, sizeof (struct lpfc_iocbq));
856 * The scsi command was not in the txq. Check the txcmplq and if it is
857 * found, send an abort to the FW.
859 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
860 if (iocb->context1 != lpfc_cmd)
863 iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl_aborted;
865 icmd = &abtsiocb->iocb;
866 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
867 icmd->un.acxri.abortContextTag = cmd->ulpContext;
868 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
871 icmd->ulpClass = cmd->ulpClass;
872 if (phba->hba_state >= LPFC_LINK_UP)
873 icmd->ulpCommand = CMD_ABORT_XRI_CN;
875 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
877 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) ==
879 list_add_tail(&abtsiocb->list, lpfc_iocb_list);
884 /* Wait for abort to complete */
885 while (cmnd->host_scribble)
887 spin_unlock_irq(phba->host->host_lock);
888 set_current_state(TASK_UNINTERRUPTIBLE);
889 schedule_timeout(LPFC_ABORT_WAIT*HZ);
890 spin_lock_irq(phba->host->host_lock);
892 > (2 * phba->cfg_nodev_tmo)/LPFC_ABORT_WAIT)
896 if(cmnd->host_scribble) {
897 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
898 "%d:0748 abort handler timed "
899 "out waiting for abort to "
901 "x%x x%x x%x x%lx\n",
902 phba->brd_no, ret, id, lun, snum);
903 cmnd->host_scribble = NULL;
904 iocb->iocb_cmpl = lpfc_scsi_cmd_iocb_cleanup;
912 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP,
913 "%d:0749 SCSI layer issued abort device "
914 "Data: x%x x%x x%x x%lx\n",
915 phba->brd_no, ret, id, lun, snum);
917 return ret == IOCB_SUCCESS ? SUCCESS : FAILED;
921 lpfc_abort_handler(struct scsi_cmnd *cmnd)
924 spin_lock_irq(cmnd->device->host->host_lock);
925 rc = __lpfc_abort_handler(cmnd);
926 spin_unlock_irq(cmnd->device->host->host_lock);
931 __lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
933 struct Scsi_Host *shost = cmnd->device->host;
934 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
935 struct lpfc_sli *psli = &phba->sli;
936 struct lpfc_scsi_buf *lpfc_cmd = NULL;
937 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
938 struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
939 struct lpfc_iocbq *iocbq, *iocbqrsp = NULL;
940 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
941 struct lpfc_nodelist *pnode = rdata->pnode;
946 * If target is not in a MAPPED state, delay the reset until
947 * target is rediscovered or nodev timeout expires.
953 if (pnode->nlp_state != NLP_STE_MAPPED_NODE) {
954 spin_unlock_irq(phba->host->host_lock);
955 set_current_state(TASK_UNINTERRUPTIBLE);
956 schedule_timeout( HZ/2);
957 spin_lock_irq(phba->host->host_lock);
959 if ((pnode) && (pnode->nlp_state == NLP_STE_MAPPED_NODE))
963 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
964 if (lpfc_cmd == NULL)
967 lpfc_cmd->pCmd = cmnd;
968 lpfc_cmd->timeout = 60;
969 lpfc_cmd->scsi_hba = phba;
971 ret = lpfc_scsi_prep_task_mgmt_cmd(phba, lpfc_cmd, FCP_LUN_RESET);
973 goto out_free_scsi_buf;
975 iocbq = &lpfc_cmd->cur_iocbq;
977 /* get a buffer for this IOCB command response */
978 list_remove_head(lpfc_iocb_list, iocbqrsp, struct lpfc_iocbq, list);
979 if (iocbqrsp == NULL)
980 goto out_free_scsi_buf;
982 memset(iocbqrsp, 0, sizeof (struct lpfc_iocbq));
984 iocbq->iocb_flag |= LPFC_IO_POLL;
985 iocbq->iocb_cmpl = lpfc_sli_wake_iocb_high_priority;
987 ret = lpfc_sli_issue_iocb_wait_high_priority(phba,
988 &phba->sli.ring[psli->fcp_ring],
989 iocbq, 0, iocbqrsp, 60);
990 if (ret == IOCB_SUCCESS)
993 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
994 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
995 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT)
996 if (lpfc_cmd->result & IOERR_DRVR_MASK)
997 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1000 * All outstanding txcmplq I/Os should have been aborted by the target.
1001 * Unfortunately, some targets do not abide by this forcing the driver
1004 lpfc_sli_abort_iocb(phba, &phba->sli.ring[phba->sli.fcp_ring],
1005 cmnd->device->id, cmnd->device->lun, 0,
1009 while((cnt = lpfc_sli_sum_iocb(phba,
1010 &phba->sli.ring[phba->sli.fcp_ring],
1011 cmnd->device->id, cmnd->device->lun,
1013 spin_unlock_irq(phba->host->host_lock);
1014 set_current_state(TASK_UNINTERRUPTIBLE);
1015 schedule_timeout(LPFC_RESET_WAIT*HZ);
1016 spin_lock_irq(phba->host->host_lock);
1019 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
1024 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1025 "%d:0719 LUN Reset I/O flush failure: cnt x%x\n",
1029 list_add_tail(&iocbqrsp->list, lpfc_iocb_list);
1032 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1033 "%d:0713 SCSI layer issued LUN reset (%d, %d) "
1034 "Data: x%x x%x x%x\n",
1035 phba->brd_no, lpfc_cmd->pCmd->device->id,
1036 lpfc_cmd->pCmd->device->lun, ret, lpfc_cmd->status,
1038 lpfc_free_scsi_buf(lpfc_cmd);
1044 lpfc_reset_lun_handler(struct scsi_cmnd *cmnd)
1047 spin_lock_irq(cmnd->device->host->host_lock);
1048 rc = __lpfc_reset_lun_handler(cmnd);
1049 spin_unlock_irq(cmnd->device->host->host_lock);
1054 * Note: midlayer calls this function with the host_lock held
1057 __lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1059 struct Scsi_Host *shost = cmnd->device->host;
1060 struct lpfc_hba *phba = (struct lpfc_hba *)shost->hostdata[0];
1061 struct lpfc_nodelist *ndlp = NULL;
1063 int ret = FAILED, i, err_count = 0;
1065 unsigned int midlayer_id = 0;
1066 struct lpfc_scsi_buf * lpfc_cmd = NULL;
1067 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
1069 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
1070 if (lpfc_cmd == NULL)
1073 /* The lpfc_cmd storage is reused. Set all loop invariants. */
1074 lpfc_cmd->timeout = 60;
1075 lpfc_cmd->pCmd = cmnd;
1076 lpfc_cmd->scsi_hba = phba;
1079 * Since the driver manages a single bus device, reset all
1080 * targets known to the driver. Should any target reset
1081 * fail, this routine returns failure to the midlayer.
1083 midlayer_id = cmnd->device->id;
1084 for (i = 0; i < MAX_FCP_TARGET; i++) {
1085 /* Search the mapped list for this target ID */
1087 list_for_each_entry(ndlp, &phba->fc_nlpmap_list, nlp_listp) {
1088 if ((i == ndlp->nlp_sid) && ndlp->rport) {
1096 lpfc_cmd->pCmd->device->id = i;
1097 lpfc_cmd->pCmd->device->hostdata = ndlp->rport->dd_data;
1098 ret = lpfc_scsi_tgt_reset(lpfc_cmd, phba);
1099 if (ret != SUCCESS) {
1100 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1101 "%d:0713 Bus Reset on target %d failed\n",
1107 cmnd->device->id = midlayer_id;
1109 while((cnt = lpfc_sli_sum_iocb(phba,
1110 &phba->sli.ring[phba->sli.fcp_ring],
1111 0, 0, LPFC_CTX_HOST))) {
1112 spin_unlock_irq(phba->host->host_lock);
1113 set_current_state(TASK_UNINTERRUPTIBLE);
1114 schedule_timeout(LPFC_RESET_WAIT*HZ);
1115 spin_lock_irq(phba->host->host_lock);
1118 > (2 * phba->cfg_nodev_tmo)/LPFC_RESET_WAIT)
1123 /* flush all outstanding commands on the host */
1124 i = lpfc_sli_abort_iocb(phba,
1125 &phba->sli.ring[phba->sli.fcp_ring], 0, 0, 0,
1128 lpfc_printf_log(phba, KERN_INFO, LOG_FCP,
1129 "%d:0715 Bus Reset I/O flush failure: cnt x%x left x%x\n",
1130 phba->brd_no, cnt, i);
1136 lpfc_free_scsi_buf(lpfc_cmd);
1137 lpfc_printf_log(phba,
1140 "%d:0714 SCSI layer issued Bus Reset Data: x%x\n",
1147 lpfc_reset_bus_handler(struct scsi_cmnd *cmnd)
1150 spin_lock_irq(cmnd->device->host->host_lock);
1151 rc = __lpfc_reset_bus_handler(cmnd);
1152 spin_unlock_irq(cmnd->device->host->host_lock);
1157 lpfc_slave_alloc(struct scsi_device *sdev)
1159 struct lpfc_hba *phba = (struct lpfc_hba *)sdev->host->hostdata[0];
1160 struct lpfc_nodelist *ndlp = NULL;
1162 struct lpfc_scsi_buf *scsi_buf = NULL;
1163 uint32_t total = 0, i;
1164 uint32_t num_to_alloc = 0;
1165 unsigned long flags;
1166 struct list_head *listp;
1167 struct list_head *node_list[6];
1170 * Store the target pointer in the scsi_device hostdata pointer provided
1171 * the driver has already discovered the target id.
1174 /* Search the nlp lists other than unmap_list for this target ID */
1175 node_list[0] = &phba->fc_npr_list;
1176 node_list[1] = &phba->fc_nlpmap_list;
1177 node_list[2] = &phba->fc_prli_list;
1178 node_list[3] = &phba->fc_reglogin_list;
1179 node_list[4] = &phba->fc_adisc_list;
1180 node_list[5] = &phba->fc_plogi_list;
1182 for (i = 0; i < 6 && !match; i++) {
1183 listp = node_list[i];
1184 if (list_empty(listp))
1186 list_for_each_entry(ndlp, listp, nlp_listp) {
1187 if ((sdev->id == ndlp->nlp_sid) && ndlp->rport) {
1197 sdev->hostdata = ndlp->rport->dd_data;
1200 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1201 * available list of scsi buffers. Don't allocate more than the
1202 * HBA limit conveyed to the midlayer via the host structure. Note
1203 * that this list of scsi bufs exists for the lifetime of the driver.
1205 total = phba->total_scsi_bufs;
1206 num_to_alloc = LPFC_CMD_PER_LUN;
1207 if (total >= phba->cfg_hba_queue_depth) {
1208 printk(KERN_WARNING "%s, At config limitation of "
1209 "%d allocated scsi_bufs\n", __FUNCTION__, total);
1211 } else if (total + num_to_alloc > phba->cfg_hba_queue_depth) {
1212 num_to_alloc = phba->cfg_hba_queue_depth - total;
1215 for (i = 0; i < num_to_alloc; i++) {
1216 scsi_buf = lpfc_get_scsi_buf(phba);
1218 printk(KERN_ERR "%s, failed to allocate "
1219 "scsi_buf\n", __FUNCTION__);
1223 spin_lock_irqsave(phba->host->host_lock, flags);
1224 phba->total_scsi_bufs++;
1225 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
1226 spin_unlock_irqrestore(phba->host->host_lock, flags);
1232 lpfc_slave_configure(struct scsi_device *sdev)
1234 struct lpfc_hba *phba = (struct lpfc_hba *) sdev->host->hostdata[0];
1235 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1237 if (sdev->tagged_supported)
1238 scsi_activate_tcq(sdev, phba->cfg_lun_queue_depth);
1240 scsi_deactivate_tcq(sdev, phba->cfg_lun_queue_depth);
1243 * Initialize the fc transport attributes for the target
1244 * containing this scsi device. Also note that the driver's
1245 * target pointer is stored in the starget_data for the
1246 * driver's sysfs entry point functions.
1248 rport->dev_loss_tmo = phba->cfg_nodev_tmo + 5;
1254 lpfc_slave_destroy(struct scsi_device *sdev)
1256 sdev->hostdata = NULL;
1260 struct scsi_host_template lpfc_template = {
1261 .module = THIS_MODULE,
1262 .name = LPFC_DRIVER_NAME,
1264 .queuecommand = lpfc_queuecommand,
1265 .eh_abort_handler = lpfc_abort_handler,
1266 .eh_device_reset_handler= lpfc_reset_lun_handler,
1267 .eh_bus_reset_handler = lpfc_reset_bus_handler,
1268 .slave_alloc = lpfc_slave_alloc,
1269 .slave_configure = lpfc_slave_configure,
1270 .slave_destroy = lpfc_slave_destroy,
1272 .sg_tablesize = LPFC_SG_SEG_CNT,
1273 .cmd_per_lun = LPFC_CMD_PER_LUN,
1274 .use_clustering = ENABLE_CLUSTERING,
1275 .shost_attrs = lpfc_host_attrs,