Merge branch 'omap-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind...
[linux-2.6] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 #include <linux/pci.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <asm/unaligned.h>
25
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_eh.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_transport_fc.h>
32
33 #include "lpfc_version.h"
34 #include "lpfc_hw.h"
35 #include "lpfc_sli.h"
36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h"
39 #include "lpfc.h"
40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h"
42 #include "lpfc_vport.h"
43
44 #define LPFC_RESET_WAIT  2
45 #define LPFC_ABORT_WAIT  2
46
47 int _dump_buf_done;
48
49 static char *dif_op_str[] = {
50         "SCSI_PROT_NORMAL",
51         "SCSI_PROT_READ_INSERT",
52         "SCSI_PROT_WRITE_STRIP",
53         "SCSI_PROT_READ_STRIP",
54         "SCSI_PROT_WRITE_INSERT",
55         "SCSI_PROT_READ_PASS",
56         "SCSI_PROT_WRITE_PASS",
57         "SCSI_PROT_READ_CONVERT",
58         "SCSI_PROT_WRITE_CONVERT"
59 };
60
61 static void
62 lpfc_debug_save_data(struct scsi_cmnd *cmnd)
63 {
64         void *src, *dst;
65         struct scatterlist *sgde = scsi_sglist(cmnd);
66
67         if (!_dump_buf_data) {
68                 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
69                                 __func__);
70                 return;
71         }
72
73
74         if (!sgde) {
75                 printk(KERN_ERR "BLKGRD ERROR: data scatterlist is null\n");
76                 return;
77         }
78
79         dst = (void *) _dump_buf_data;
80         while (sgde) {
81                 src = sg_virt(sgde);
82                 memcpy(dst, src, sgde->length);
83                 dst += sgde->length;
84                 sgde = sg_next(sgde);
85         }
86 }
87
88 static void
89 lpfc_debug_save_dif(struct scsi_cmnd *cmnd)
90 {
91         void *src, *dst;
92         struct scatterlist *sgde = scsi_prot_sglist(cmnd);
93
94         if (!_dump_buf_dif) {
95                 printk(KERN_ERR "BLKGRD ERROR %s _dump_buf_data is NULL\n",
96                                 __func__);
97                 return;
98         }
99
100         if (!sgde) {
101                 printk(KERN_ERR "BLKGRD ERROR: prot scatterlist is null\n");
102                 return;
103         }
104
105         dst = _dump_buf_dif;
106         while (sgde) {
107                 src = sg_virt(sgde);
108                 memcpy(dst, src, sgde->length);
109                 dst += sgde->length;
110                 sgde = sg_next(sgde);
111         }
112 }
113
114 /**
115  * lpfc_update_stats - Update statistical data for the command completion
116  * @phba: Pointer to HBA object.
117  * @lpfc_cmd: lpfc scsi command object pointer.
118  *
119  * This function is called when there is a command completion and this
120  * function updates the statistical data for the command completion.
121  **/
122 static void
123 lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
124 {
125         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
126         struct lpfc_nodelist *pnode = rdata->pnode;
127         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
128         unsigned long flags;
129         struct Scsi_Host  *shost = cmd->device->host;
130         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
131         unsigned long latency;
132         int i;
133
134         if (cmd->result)
135                 return;
136
137         latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
138
139         spin_lock_irqsave(shost->host_lock, flags);
140         if (!vport->stat_data_enabled ||
141                 vport->stat_data_blocked ||
142                 !pnode->lat_data ||
143                 (phba->bucket_type == LPFC_NO_BUCKET)) {
144                 spin_unlock_irqrestore(shost->host_lock, flags);
145                 return;
146         }
147
148         if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
149                 i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
150                         phba->bucket_step;
151                 /* check array subscript bounds */
152                 if (i < 0)
153                         i = 0;
154                 else if (i >= LPFC_MAX_BUCKET_COUNT)
155                         i = LPFC_MAX_BUCKET_COUNT - 1;
156         } else {
157                 for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
158                         if (latency <= (phba->bucket_base +
159                                 ((1<<i)*phba->bucket_step)))
160                                 break;
161         }
162
163         pnode->lat_data[i].cmd_count++;
164         spin_unlock_irqrestore(shost->host_lock, flags);
165 }
166
167 /**
168  * lpfc_send_sdev_queuedepth_change_event - Posts a queuedepth change event
169  * @phba: Pointer to HBA context object.
170  * @vport: Pointer to vport object.
171  * @ndlp: Pointer to FC node associated with the target.
172  * @lun: Lun number of the scsi device.
173  * @old_val: Old value of the queue depth.
174  * @new_val: New value of the queue depth.
175  *
176  * This function sends an event to the mgmt application indicating
177  * there is a change in the scsi device queue depth.
178  **/
179 static void
180 lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
181                 struct lpfc_vport  *vport,
182                 struct lpfc_nodelist *ndlp,
183                 uint32_t lun,
184                 uint32_t old_val,
185                 uint32_t new_val)
186 {
187         struct lpfc_fast_path_event *fast_path_evt;
188         unsigned long flags;
189
190         fast_path_evt = lpfc_alloc_fast_evt(phba);
191         if (!fast_path_evt)
192                 return;
193
194         fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
195                 FC_REG_SCSI_EVENT;
196         fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
197                 LPFC_EVENT_VARQUEDEPTH;
198
199         /* Report all luns with change in queue depth */
200         fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
201         if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
202                 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
203                         &ndlp->nlp_portname, sizeof(struct lpfc_name));
204                 memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
205                         &ndlp->nlp_nodename, sizeof(struct lpfc_name));
206         }
207
208         fast_path_evt->un.queue_depth_evt.oldval = old_val;
209         fast_path_evt->un.queue_depth_evt.newval = new_val;
210         fast_path_evt->vport = vport;
211
212         fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
213         spin_lock_irqsave(&phba->hbalock, flags);
214         list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
215         spin_unlock_irqrestore(&phba->hbalock, flags);
216         lpfc_worker_wake_up(phba);
217
218         return;
219 }
220
221 /**
222  * lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
223  * @phba: The Hba for which this call is being executed.
224  *
225  * This routine is called when there is resource error in driver or firmware.
226  * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
227  * posts at most 1 event each second. This routine wakes up worker thread of
228  * @phba to process WORKER_RAM_DOWN_EVENT event.
229  *
230  * This routine should be called with no lock held.
231  **/
232 void
233 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
234 {
235         unsigned long flags;
236         uint32_t evt_posted;
237
238         spin_lock_irqsave(&phba->hbalock, flags);
239         atomic_inc(&phba->num_rsrc_err);
240         phba->last_rsrc_error_time = jiffies;
241
242         if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
243                 spin_unlock_irqrestore(&phba->hbalock, flags);
244                 return;
245         }
246
247         phba->last_ramp_down_time = jiffies;
248
249         spin_unlock_irqrestore(&phba->hbalock, flags);
250
251         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
252         evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
253         if (!evt_posted)
254                 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
255         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
256
257         if (!evt_posted)
258                 lpfc_worker_wake_up(phba);
259         return;
260 }
261
262 /**
263  * lpfc_rampup_queue_depth - Post RAMP_UP_QUEUE event for worker thread
264  * @phba: The Hba for which this call is being executed.
265  *
266  * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
267  * post at most 1 event every 5 minute after last_ramp_up_time or
268  * last_rsrc_error_time.  This routine wakes up worker thread of @phba
269  * to process WORKER_RAM_DOWN_EVENT event.
270  *
271  * This routine should be called with no lock held.
272  **/
273 static inline void
274 lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
275                         uint32_t queue_depth)
276 {
277         unsigned long flags;
278         struct lpfc_hba *phba = vport->phba;
279         uint32_t evt_posted;
280         atomic_inc(&phba->num_cmd_success);
281
282         if (vport->cfg_lun_queue_depth <= queue_depth)
283                 return;
284         spin_lock_irqsave(&phba->hbalock, flags);
285         if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
286          ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
287                 spin_unlock_irqrestore(&phba->hbalock, flags);
288                 return;
289         }
290         phba->last_ramp_up_time = jiffies;
291         spin_unlock_irqrestore(&phba->hbalock, flags);
292
293         spin_lock_irqsave(&phba->pport->work_port_lock, flags);
294         evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
295         if (!evt_posted)
296                 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
297         spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
298
299         if (!evt_posted)
300                 lpfc_worker_wake_up(phba);
301         return;
302 }
303
304 /**
305  * lpfc_ramp_down_queue_handler - WORKER_RAMP_DOWN_QUEUE event handler
306  * @phba: The Hba for which this call is being executed.
307  *
308  * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
309  * thread.This routine reduces queue depth for all scsi device on each vport
310  * associated with @phba.
311  **/
312 void
313 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
314 {
315         struct lpfc_vport **vports;
316         struct Scsi_Host  *shost;
317         struct scsi_device *sdev;
318         unsigned long new_queue_depth, old_queue_depth;
319         unsigned long num_rsrc_err, num_cmd_success;
320         int i;
321         struct lpfc_rport_data *rdata;
322
323         num_rsrc_err = atomic_read(&phba->num_rsrc_err);
324         num_cmd_success = atomic_read(&phba->num_cmd_success);
325
326         vports = lpfc_create_vport_work_array(phba);
327         if (vports != NULL)
328                 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
329                         shost = lpfc_shost_from_vport(vports[i]);
330                         shost_for_each_device(sdev, shost) {
331                                 new_queue_depth =
332                                         sdev->queue_depth * num_rsrc_err /
333                                         (num_rsrc_err + num_cmd_success);
334                                 if (!new_queue_depth)
335                                         new_queue_depth = sdev->queue_depth - 1;
336                                 else
337                                         new_queue_depth = sdev->queue_depth -
338                                                                 new_queue_depth;
339                                 old_queue_depth = sdev->queue_depth;
340                                 if (sdev->ordered_tags)
341                                         scsi_adjust_queue_depth(sdev,
342                                                         MSG_ORDERED_TAG,
343                                                         new_queue_depth);
344                                 else
345                                         scsi_adjust_queue_depth(sdev,
346                                                         MSG_SIMPLE_TAG,
347                                                         new_queue_depth);
348                                 rdata = sdev->hostdata;
349                                 if (rdata)
350                                         lpfc_send_sdev_queuedepth_change_event(
351                                                 phba, vports[i],
352                                                 rdata->pnode,
353                                                 sdev->lun, old_queue_depth,
354                                                 new_queue_depth);
355                         }
356                 }
357         lpfc_destroy_vport_work_array(phba, vports);
358         atomic_set(&phba->num_rsrc_err, 0);
359         atomic_set(&phba->num_cmd_success, 0);
360 }
361
362 /**
363  * lpfc_ramp_up_queue_handler - WORKER_RAMP_UP_QUEUE event handler
364  * @phba: The Hba for which this call is being executed.
365  *
366  * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
367  * thread.This routine increases queue depth for all scsi device on each vport
368  * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
369  * num_cmd_success to zero.
370  **/
371 void
372 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
373 {
374         struct lpfc_vport **vports;
375         struct Scsi_Host  *shost;
376         struct scsi_device *sdev;
377         int i;
378         struct lpfc_rport_data *rdata;
379
380         vports = lpfc_create_vport_work_array(phba);
381         if (vports != NULL)
382                 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
383                         shost = lpfc_shost_from_vport(vports[i]);
384                         shost_for_each_device(sdev, shost) {
385                                 if (vports[i]->cfg_lun_queue_depth <=
386                                     sdev->queue_depth)
387                                         continue;
388                                 if (sdev->ordered_tags)
389                                         scsi_adjust_queue_depth(sdev,
390                                                         MSG_ORDERED_TAG,
391                                                         sdev->queue_depth+1);
392                                 else
393                                         scsi_adjust_queue_depth(sdev,
394                                                         MSG_SIMPLE_TAG,
395                                                         sdev->queue_depth+1);
396                                 rdata = sdev->hostdata;
397                                 if (rdata)
398                                         lpfc_send_sdev_queuedepth_change_event(
399                                                 phba, vports[i],
400                                                 rdata->pnode,
401                                                 sdev->lun,
402                                                 sdev->queue_depth - 1,
403                                                 sdev->queue_depth);
404                         }
405                 }
406         lpfc_destroy_vport_work_array(phba, vports);
407         atomic_set(&phba->num_rsrc_err, 0);
408         atomic_set(&phba->num_cmd_success, 0);
409 }
410
411 /**
412  * lpfc_scsi_dev_block - set all scsi hosts to block state
413  * @phba: Pointer to HBA context object.
414  *
415  * This function walks vport list and set each SCSI host to block state
416  * by invoking fc_remote_port_delete() routine. This function is invoked
417  * with EEH when device's PCI slot has been permanently disabled.
418  **/
419 void
420 lpfc_scsi_dev_block(struct lpfc_hba *phba)
421 {
422         struct lpfc_vport **vports;
423         struct Scsi_Host  *shost;
424         struct scsi_device *sdev;
425         struct fc_rport *rport;
426         int i;
427
428         vports = lpfc_create_vport_work_array(phba);
429         if (vports != NULL)
430                 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
431                         shost = lpfc_shost_from_vport(vports[i]);
432                         shost_for_each_device(sdev, shost) {
433                                 rport = starget_to_rport(scsi_target(sdev));
434                                 fc_remote_port_delete(rport);
435                         }
436                 }
437         lpfc_destroy_vport_work_array(phba, vports);
438 }
439
440 /**
441  * lpfc_new_scsi_buf - Scsi buffer allocator
442  * @vport: The virtual port for which this call being executed.
443  *
444  * This routine allocates a scsi buffer, which contains all the necessary
445  * information needed to initiate a SCSI I/O.  The non-DMAable buffer region
446  * contains information to build the IOCB.  The DMAable region contains
447  * memory for the FCP CMND, FCP RSP, and the initial BPL.  In addition to
448  * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
449  * and the BPL BDE is setup in the IOCB.
450  *
451  * Return codes:
452  *   NULL - Error
453  *   Pointer to lpfc_scsi_buf data structure - Success
454  **/
455 static struct lpfc_scsi_buf *
456 lpfc_new_scsi_buf(struct lpfc_vport *vport)
457 {
458         struct lpfc_hba *phba = vport->phba;
459         struct lpfc_scsi_buf *psb;
460         struct ulp_bde64 *bpl;
461         IOCB_t *iocb;
462         dma_addr_t pdma_phys_fcp_cmd;
463         dma_addr_t pdma_phys_fcp_rsp;
464         dma_addr_t pdma_phys_bpl;
465         uint16_t iotag;
466
467         psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
468         if (!psb)
469                 return NULL;
470
471         /*
472          * Get memory from the pci pool to map the virt space to pci bus space
473          * for an I/O.  The DMA buffer includes space for the struct fcp_cmnd,
474          * struct fcp_rsp and the number of bde's necessary to support the
475          * sg_tablesize.
476          */
477         psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
478                                                         &psb->dma_handle);
479         if (!psb->data) {
480                 kfree(psb);
481                 return NULL;
482         }
483
484         /* Initialize virtual ptrs to dma_buf region. */
485         memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
486
487         /* Allocate iotag for psb->cur_iocbq. */
488         iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
489         if (iotag == 0) {
490                 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
491                               psb->data, psb->dma_handle);
492                 kfree (psb);
493                 return NULL;
494         }
495         psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
496
497         psb->fcp_cmnd = psb->data;
498         psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
499         psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
500                                                         sizeof(struct fcp_rsp);
501
502         /* Initialize local short-hand pointers. */
503         bpl = psb->fcp_bpl;
504         pdma_phys_fcp_cmd = psb->dma_handle;
505         pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
506         pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
507                         sizeof(struct fcp_rsp);
508
509         /*
510          * The first two bdes are the FCP_CMD and FCP_RSP.  The balance are sg
511          * list bdes.  Initialize the first two and leave the rest for
512          * queuecommand.
513          */
514         bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
515         bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
516         bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
517         bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
518         bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
519
520         /* Setup the physical region for the FCP RSP */
521         bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
522         bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
523         bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
524         bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
525         bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
526
527         /*
528          * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
529          * initialize it with all known data now.
530          */
531         iocb = &psb->cur_iocbq.iocb;
532         iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
533         if ((phba->sli_rev == 3) &&
534             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
535                 /* fill in immediate fcp command BDE */
536                 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
537                 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
538                 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
539                                                        unsli3.fcp_ext.icd);
540                 iocb->un.fcpi64.bdl.addrHigh = 0;
541                 iocb->ulpBdeCount = 0;
542                 iocb->ulpLe = 0;
543                 /* fill in responce BDE */
544                 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
545                 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
546                                                 sizeof(struct fcp_rsp);
547                 iocb->unsli3.fcp_ext.rbde.addrLow =
548                                                 putPaddrLow(pdma_phys_fcp_rsp);
549                 iocb->unsli3.fcp_ext.rbde.addrHigh =
550                                                 putPaddrHigh(pdma_phys_fcp_rsp);
551         } else {
552                 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
553                 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
554                 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
555                 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
556                 iocb->ulpBdeCount = 1;
557                 iocb->ulpLe = 1;
558         }
559         iocb->ulpClass = CLASS3;
560
561         return psb;
562 }
563
564 /**
565  * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba
566  * @phba: The Hba for which this call is being executed.
567  *
568  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
569  * and returns to caller.
570  *
571  * Return codes:
572  *   NULL - Error
573  *   Pointer to lpfc_scsi_buf - Success
574  **/
575 static struct lpfc_scsi_buf*
576 lpfc_get_scsi_buf(struct lpfc_hba * phba)
577 {
578         struct  lpfc_scsi_buf * lpfc_cmd = NULL;
579         struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
580         unsigned long iflag = 0;
581
582         spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
583         list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
584         if (lpfc_cmd) {
585                 lpfc_cmd->seg_cnt = 0;
586                 lpfc_cmd->nonsg_phys = 0;
587                 lpfc_cmd->prot_seg_cnt = 0;
588         }
589         spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
590         return  lpfc_cmd;
591 }
592
593 /**
594  * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list
595  * @phba: The Hba for which this call is being executed.
596  * @psb: The scsi buffer which is being released.
597  *
598  * This routine releases @psb scsi buffer by adding it to tail of @phba
599  * lpfc_scsi_buf_list list.
600  **/
601 static void
602 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
603 {
604         unsigned long iflag = 0;
605
606         spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
607         psb->pCmd = NULL;
608         list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
609         spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
610 }
611
612 /**
613  * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer
614  * @phba: The Hba for which this call is being executed.
615  * @lpfc_cmd: The scsi buffer which is going to be mapped.
616  *
617  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
618  * field of @lpfc_cmd. This routine scans through sg elements and format the
619  * bdea. This routine also initializes all IOCB fields which are dependent on
620  * scsi command request buffer.
621  *
622  * Return codes:
623  *   1 - Error
624  *   0 - Success
625  **/
626 static int
627 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
628 {
629         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
630         struct scatterlist *sgel = NULL;
631         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
632         struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
633         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
634         struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
635         dma_addr_t physaddr;
636         uint32_t num_bde = 0;
637         int nseg, datadir = scsi_cmnd->sc_data_direction;
638
639         /*
640          * There are three possibilities here - use scatter-gather segment, use
641          * the single mapping, or neither.  Start the lpfc command prep by
642          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
643          * data bde entry.
644          */
645         bpl += 2;
646         if (scsi_sg_count(scsi_cmnd)) {
647                 /*
648                  * The driver stores the segment count returned from pci_map_sg
649                  * because this a count of dma-mappings used to map the use_sg
650                  * pages.  They are not guaranteed to be the same for those
651                  * architectures that implement an IOMMU.
652                  */
653
654                 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
655                                   scsi_sg_count(scsi_cmnd), datadir);
656                 if (unlikely(!nseg))
657                         return 1;
658
659                 lpfc_cmd->seg_cnt = nseg;
660                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
661                         printk(KERN_ERR "%s: Too many sg segments from "
662                                "dma_map_sg.  Config %d, seg_cnt %d\n",
663                                __func__, phba->cfg_sg_seg_cnt,
664                                lpfc_cmd->seg_cnt);
665                         scsi_dma_unmap(scsi_cmnd);
666                         return 1;
667                 }
668
669                 /*
670                  * The driver established a maximum scatter-gather segment count
671                  * during probe that limits the number of sg elements in any
672                  * single scsi command.  Just run through the seg_cnt and format
673                  * the bde's.
674                  * When using SLI-3 the driver will try to fit all the BDEs into
675                  * the IOCB. If it can't then the BDEs get added to a BPL as it
676                  * does for SLI-2 mode.
677                  */
678                 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
679                         physaddr = sg_dma_address(sgel);
680                         if (phba->sli_rev == 3 &&
681                             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
682                             nseg <= LPFC_EXT_DATA_BDE_COUNT) {
683                                 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
684                                 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
685                                 data_bde->addrLow = putPaddrLow(physaddr);
686                                 data_bde->addrHigh = putPaddrHigh(physaddr);
687                                 data_bde++;
688                         } else {
689                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
690                                 bpl->tus.f.bdeSize = sg_dma_len(sgel);
691                                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
692                                 bpl->addrLow =
693                                         le32_to_cpu(putPaddrLow(physaddr));
694                                 bpl->addrHigh =
695                                         le32_to_cpu(putPaddrHigh(physaddr));
696                                 bpl++;
697                         }
698                 }
699         }
700
701         /*
702          * Finish initializing those IOCB fields that are dependent on the
703          * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
704          * explicitly reinitialized and for SLI-3 the extended bde count is
705          * explicitly reinitialized since all iocb memory resources are reused.
706          */
707         if (phba->sli_rev == 3 &&
708             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
709                 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
710                         /*
711                          * The extended IOCB format can only fit 3 BDE or a BPL.
712                          * This I/O has more than 3 BDE so the 1st data bde will
713                          * be a BPL that is filled in here.
714                          */
715                         physaddr = lpfc_cmd->dma_handle;
716                         data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
717                         data_bde->tus.f.bdeSize = (num_bde *
718                                                    sizeof(struct ulp_bde64));
719                         physaddr += (sizeof(struct fcp_cmnd) +
720                                      sizeof(struct fcp_rsp) +
721                                      (2 * sizeof(struct ulp_bde64)));
722                         data_bde->addrHigh = putPaddrHigh(physaddr);
723                         data_bde->addrLow = putPaddrLow(physaddr);
724                         /* ebde count includes the responce bde and data bpl */
725                         iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
726                 } else {
727                         /* ebde count includes the responce bde and data bdes */
728                         iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
729                 }
730         } else {
731                 iocb_cmd->un.fcpi64.bdl.bdeSize =
732                         ((num_bde + 2) * sizeof(struct ulp_bde64));
733         }
734         fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
735
736         /*
737          * Due to difference in data length between DIF/non-DIF paths,
738          * we need to set word 4 of IOCB here
739          */
740         iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
741         return 0;
742 }
743
744 /*
745  * Given a scsi cmnd, determine the BlockGuard profile to be used
746  * with the cmd
747  */
748 static int
749 lpfc_sc_to_sli_prof(struct scsi_cmnd *sc)
750 {
751         uint8_t guard_type = scsi_host_get_guard(sc->device->host);
752         uint8_t ret_prof = LPFC_PROF_INVALID;
753
754         if (guard_type == SHOST_DIX_GUARD_IP) {
755                 switch (scsi_get_prot_op(sc)) {
756                 case SCSI_PROT_READ_INSERT:
757                 case SCSI_PROT_WRITE_STRIP:
758                         ret_prof = LPFC_PROF_AST2;
759                         break;
760
761                 case SCSI_PROT_READ_STRIP:
762                 case SCSI_PROT_WRITE_INSERT:
763                         ret_prof = LPFC_PROF_A1;
764                         break;
765
766                 case SCSI_PROT_READ_CONVERT:
767                 case SCSI_PROT_WRITE_CONVERT:
768                         ret_prof = LPFC_PROF_AST1;
769                         break;
770
771                 case SCSI_PROT_READ_PASS:
772                 case SCSI_PROT_WRITE_PASS:
773                 case SCSI_PROT_NORMAL:
774                 default:
775                         printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
776                                         scsi_get_prot_op(sc), guard_type);
777                         break;
778
779                 }
780         } else if (guard_type == SHOST_DIX_GUARD_CRC) {
781                 switch (scsi_get_prot_op(sc)) {
782                 case SCSI_PROT_READ_STRIP:
783                 case SCSI_PROT_WRITE_INSERT:
784                         ret_prof = LPFC_PROF_A1;
785                         break;
786
787                 case SCSI_PROT_READ_PASS:
788                 case SCSI_PROT_WRITE_PASS:
789                         ret_prof = LPFC_PROF_C1;
790                         break;
791
792                 case SCSI_PROT_READ_CONVERT:
793                 case SCSI_PROT_WRITE_CONVERT:
794                 case SCSI_PROT_READ_INSERT:
795                 case SCSI_PROT_WRITE_STRIP:
796                 case SCSI_PROT_NORMAL:
797                 default:
798                         printk(KERN_ERR "Bad op/guard:%d/%d combination\n",
799                                         scsi_get_prot_op(sc), guard_type);
800                         break;
801                 }
802         } else {
803                 /* unsupported format */
804                 BUG();
805         }
806
807         return ret_prof;
808 }
809
810 struct scsi_dif_tuple {
811         __be16 guard_tag;       /* Checksum */
812         __be16 app_tag;         /* Opaque storage */
813         __be32 ref_tag;         /* Target LBA or indirect LBA */
814 };
815
816 static inline unsigned
817 lpfc_cmd_blksize(struct scsi_cmnd *sc)
818 {
819         return sc->device->sector_size;
820 }
821
822 /**
823  * lpfc_get_cmd_dif_parms - Extract DIF parameters from SCSI command
824  * @sc:             in: SCSI command
825  * @apptagmask:     out: app tag mask
826  * @apptagval:      out: app tag value
827  * @reftag:         out: ref tag (reference tag)
828  *
829  * Description:
830  *   Extract DIF paramters from the command if possible.  Otherwise,
831  *   use default paratmers.
832  *
833  **/
834 static inline void
835 lpfc_get_cmd_dif_parms(struct scsi_cmnd *sc, uint16_t *apptagmask,
836                 uint16_t *apptagval, uint32_t *reftag)
837 {
838         struct  scsi_dif_tuple *spt;
839         unsigned char op = scsi_get_prot_op(sc);
840         unsigned int protcnt = scsi_prot_sg_count(sc);
841         static int cnt;
842
843         if (protcnt && (op == SCSI_PROT_WRITE_STRIP ||
844                                 op == SCSI_PROT_WRITE_PASS ||
845                                 op == SCSI_PROT_WRITE_CONVERT)) {
846
847                 cnt++;
848                 spt = page_address(sg_page(scsi_prot_sglist(sc))) +
849                         scsi_prot_sglist(sc)[0].offset;
850                 *apptagmask = 0;
851                 *apptagval = 0;
852                 *reftag = cpu_to_be32(spt->ref_tag);
853
854         } else {
855                 /* SBC defines ref tag to be lower 32bits of LBA */
856                 *reftag = (uint32_t) (0xffffffff & scsi_get_lba(sc));
857                 *apptagmask = 0;
858                 *apptagval = 0;
859         }
860 }
861
862 /*
863  * This function sets up buffer list for protection groups of
864  * type LPFC_PG_TYPE_NO_DIF
865  *
866  * This is usually used when the HBA is instructed to generate
867  * DIFs and insert them into data stream (or strip DIF from
868  * incoming data stream)
869  *
870  * The buffer list consists of just one protection group described
871  * below:
872  *                                +-------------------------+
873  *   start of prot group  -->     |          PDE_1          |
874  *                                +-------------------------+
875  *                                |         Data BDE        |
876  *                                +-------------------------+
877  *                                |more Data BDE's ... (opt)|
878  *                                +-------------------------+
879  *
880  * @sc: pointer to scsi command we're working on
881  * @bpl: pointer to buffer list for protection groups
882  * @datacnt: number of segments of data that have been dma mapped
883  *
884  * Note: Data s/g buffers have been dma mapped
885  */
886 static int
887 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
888                 struct ulp_bde64 *bpl, int datasegcnt)
889 {
890         struct scatterlist *sgde = NULL; /* s/g data entry */
891         struct lpfc_pde *pde1 = NULL;
892         dma_addr_t physaddr;
893         int i = 0, num_bde = 0;
894         int datadir = sc->sc_data_direction;
895         int prof = LPFC_PROF_INVALID;
896         unsigned blksize;
897         uint32_t reftag;
898         uint16_t apptagmask, apptagval;
899
900         pde1 = (struct lpfc_pde *) bpl;
901         prof = lpfc_sc_to_sli_prof(sc);
902
903         if (prof == LPFC_PROF_INVALID)
904                 goto out;
905
906         /* extract some info from the scsi command for PDE1*/
907         blksize = lpfc_cmd_blksize(sc);
908         lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
909
910         /* setup PDE1 with what we have */
911         lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
912                         BG_EC_STOP_ERR);
913         lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
914
915         num_bde++;
916         bpl++;
917
918         /* assumption: caller has already run dma_map_sg on command data */
919         scsi_for_each_sg(sc, sgde, datasegcnt, i) {
920                 physaddr = sg_dma_address(sgde);
921                 bpl->addrLow = le32_to_cpu(putPaddrLow(physaddr));
922                 bpl->addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
923                 bpl->tus.f.bdeSize = sg_dma_len(sgde);
924                 if (datadir == DMA_TO_DEVICE)
925                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
926                 else
927                         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
928                 bpl->tus.w = le32_to_cpu(bpl->tus.w);
929                 bpl++;
930                 num_bde++;
931         }
932
933 out:
934         return num_bde;
935 }
936
937 /*
938  * This function sets up buffer list for protection groups of
939  * type LPFC_PG_TYPE_DIF_BUF
940  *
941  * This is usually used when DIFs are in their own buffers,
942  * separate from the data. The HBA can then by instructed
943  * to place the DIFs in the outgoing stream.  For read operations,
944  * The HBA could extract the DIFs and place it in DIF buffers.
945  *
946  * The buffer list for this type consists of one or more of the
947  * protection groups described below:
948  *                                    +-------------------------+
949  *   start of first prot group  -->   |          PDE_1          |
950  *                                    +-------------------------+
951  *                                    |      PDE_3 (Prot BDE)   |
952  *                                    +-------------------------+
953  *                                    |        Data BDE         |
954  *                                    +-------------------------+
955  *                                    |more Data BDE's ... (opt)|
956  *                                    +-------------------------+
957  *   start of new  prot group  -->    |          PDE_1          |
958  *                                    +-------------------------+
959  *                                    |          ...            |
960  *                                    +-------------------------+
961  *
962  * @sc: pointer to scsi command we're working on
963  * @bpl: pointer to buffer list for protection groups
964  * @datacnt: number of segments of data that have been dma mapped
965  * @protcnt: number of segment of protection data that have been dma mapped
966  *
967  * Note: It is assumed that both data and protection s/g buffers have been
968  *       mapped for DMA
969  */
970 static int
971 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
972                 struct ulp_bde64 *bpl, int datacnt, int protcnt)
973 {
974         struct scatterlist *sgde = NULL; /* s/g data entry */
975         struct scatterlist *sgpe = NULL; /* s/g prot entry */
976         struct lpfc_pde *pde1 = NULL;
977         struct ulp_bde64 *prot_bde = NULL;
978         dma_addr_t dataphysaddr, protphysaddr;
979         unsigned short curr_data = 0, curr_prot = 0;
980         unsigned int split_offset, protgroup_len;
981         unsigned int protgrp_blks, protgrp_bytes;
982         unsigned int remainder, subtotal;
983         int prof = LPFC_PROF_INVALID;
984         int datadir = sc->sc_data_direction;
985         unsigned char pgdone = 0, alldone = 0;
986         unsigned blksize;
987         uint32_t reftag;
988         uint16_t apptagmask, apptagval;
989         int num_bde = 0;
990
991         sgpe = scsi_prot_sglist(sc);
992         sgde = scsi_sglist(sc);
993
994         if (!sgpe || !sgde) {
995                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
996                                 "9020 Invalid s/g entry: data=0x%p prot=0x%p\n",
997                                 sgpe, sgde);
998                 return 0;
999         }
1000
1001         prof = lpfc_sc_to_sli_prof(sc);
1002         if (prof == LPFC_PROF_INVALID)
1003                 goto out;
1004
1005         /* extract some info from the scsi command for PDE1*/
1006         blksize = lpfc_cmd_blksize(sc);
1007         lpfc_get_cmd_dif_parms(sc, &apptagmask, &apptagval, &reftag);
1008
1009         split_offset = 0;
1010         do {
1011                 /* setup the first PDE_1 */
1012                 pde1 = (struct lpfc_pde *) bpl;
1013
1014                 lpfc_pde_set_bg_parms(pde1, LPFC_PDE1_DESCRIPTOR, prof, blksize,
1015                                 BG_EC_STOP_ERR);
1016                 lpfc_pde_set_dif_parms(pde1, apptagmask, apptagval, reftag);
1017
1018                 num_bde++;
1019                 bpl++;
1020
1021                 /* setup the first BDE that points to protection buffer */
1022                 prot_bde = (struct ulp_bde64 *) bpl;
1023                 protphysaddr = sg_dma_address(sgpe);
1024                 prot_bde->addrLow = le32_to_cpu(putPaddrLow(protphysaddr));
1025                 prot_bde->addrHigh = le32_to_cpu(putPaddrHigh(protphysaddr));
1026                 protgroup_len = sg_dma_len(sgpe);
1027
1028
1029                 /* must be integer multiple of the DIF block length */
1030                 BUG_ON(protgroup_len % 8);
1031
1032                 protgrp_blks = protgroup_len / 8;
1033                 protgrp_bytes = protgrp_blks * blksize;
1034
1035                 prot_bde->tus.f.bdeSize = protgroup_len;
1036                 if (datadir == DMA_TO_DEVICE)
1037                         prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1038                 else
1039                         prot_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1040                 prot_bde->tus.w = le32_to_cpu(bpl->tus.w);
1041
1042                 curr_prot++;
1043                 num_bde++;
1044
1045                 /* setup BDE's for data blocks associated with DIF data */
1046                 pgdone = 0;
1047                 subtotal = 0; /* total bytes processed for current prot grp */
1048                 while (!pgdone) {
1049                         if (!sgde) {
1050                                 printk(KERN_ERR "%s Invalid data segment\n",
1051                                                 __func__);
1052                                 return 0;
1053                         }
1054                         bpl++;
1055                         dataphysaddr = sg_dma_address(sgde) + split_offset;
1056                         bpl->addrLow = le32_to_cpu(putPaddrLow(dataphysaddr));
1057                         bpl->addrHigh = le32_to_cpu(putPaddrHigh(dataphysaddr));
1058
1059                         remainder = sg_dma_len(sgde) - split_offset;
1060
1061                         if ((subtotal + remainder) <= protgrp_bytes) {
1062                                 /* we can use this whole buffer */
1063                                 bpl->tus.f.bdeSize = remainder;
1064                                 split_offset = 0;
1065
1066                                 if ((subtotal + remainder) == protgrp_bytes)
1067                                         pgdone = 1;
1068                         } else {
1069                                 /* must split this buffer with next prot grp */
1070                                 bpl->tus.f.bdeSize = protgrp_bytes - subtotal;
1071                                 split_offset += bpl->tus.f.bdeSize;
1072                         }
1073
1074                         subtotal += bpl->tus.f.bdeSize;
1075
1076                         if (datadir == DMA_TO_DEVICE)
1077                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1078                         else
1079                                 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
1080                         bpl->tus.w = le32_to_cpu(bpl->tus.w);
1081
1082                         num_bde++;
1083                         curr_data++;
1084
1085                         if (split_offset)
1086                                 break;
1087
1088                         /* Move to the next s/g segment if possible */
1089                         sgde = sg_next(sgde);
1090                 }
1091
1092                 /* are we done ? */
1093                 if (curr_prot == protcnt) {
1094                         alldone = 1;
1095                 } else if (curr_prot < protcnt) {
1096                         /* advance to next prot buffer */
1097                         sgpe = sg_next(sgpe);
1098                         bpl++;
1099
1100                         /* update the reference tag */
1101                         reftag += protgrp_blks;
1102                 } else {
1103                         /* if we're here, we have a bug */
1104                         printk(KERN_ERR "BLKGRD: bug in %s\n", __func__);
1105                 }
1106
1107         } while (!alldone);
1108
1109 out:
1110
1111
1112         return num_bde;
1113 }
1114 /*
1115  * Given a SCSI command that supports DIF, determine composition of protection
1116  * groups involved in setting up buffer lists
1117  *
1118  * Returns:
1119  *                            for DIF (for both read and write)
1120  * */
1121 static int
1122 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
1123 {
1124         int ret = LPFC_PG_TYPE_INVALID;
1125         unsigned char op = scsi_get_prot_op(sc);
1126
1127         switch (op) {
1128         case SCSI_PROT_READ_STRIP:
1129         case SCSI_PROT_WRITE_INSERT:
1130                 ret = LPFC_PG_TYPE_NO_DIF;
1131                 break;
1132         case SCSI_PROT_READ_INSERT:
1133         case SCSI_PROT_WRITE_STRIP:
1134         case SCSI_PROT_READ_PASS:
1135         case SCSI_PROT_WRITE_PASS:
1136         case SCSI_PROT_WRITE_CONVERT:
1137         case SCSI_PROT_READ_CONVERT:
1138                 ret = LPFC_PG_TYPE_DIF_BUF;
1139                 break;
1140         default:
1141                 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1142                                 "9021 Unsupported protection op:%d\n", op);
1143                 break;
1144         }
1145
1146         return ret;
1147 }
1148
1149 /*
1150  * This is the protection/DIF aware version of
1151  * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the
1152  * two functions eventually, but for now, it's here
1153  */
1154 static int
1155 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
1156                 struct lpfc_scsi_buf *lpfc_cmd)
1157 {
1158         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1159         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1160         struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
1161         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1162         uint32_t num_bde = 0;
1163         int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
1164         int prot_group_type = 0;
1165         int diflen, fcpdl;
1166         unsigned blksize;
1167
1168         /*
1169          * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd
1170          *  fcp_rsp regions to the first data bde entry
1171          */
1172         bpl += 2;
1173         if (scsi_sg_count(scsi_cmnd)) {
1174                 /*
1175                  * The driver stores the segment count returned from pci_map_sg
1176                  * because this a count of dma-mappings used to map the use_sg
1177                  * pages.  They are not guaranteed to be the same for those
1178                  * architectures that implement an IOMMU.
1179                  */
1180                 datasegcnt = dma_map_sg(&phba->pcidev->dev,
1181                                         scsi_sglist(scsi_cmnd),
1182                                         scsi_sg_count(scsi_cmnd), datadir);
1183                 if (unlikely(!datasegcnt))
1184                         return 1;
1185
1186                 lpfc_cmd->seg_cnt = datasegcnt;
1187                 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1188                         printk(KERN_ERR "%s: Too many sg segments from "
1189                                         "dma_map_sg.  Config %d, seg_cnt %d\n",
1190                                         __func__, phba->cfg_sg_seg_cnt,
1191                                         lpfc_cmd->seg_cnt);
1192                         scsi_dma_unmap(scsi_cmnd);
1193                         return 1;
1194                 }
1195
1196                 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
1197
1198                 switch (prot_group_type) {
1199                 case LPFC_PG_TYPE_NO_DIF:
1200                         num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
1201                                         datasegcnt);
1202                         /* we shoud have 2 or more entries in buffer list */
1203                         if (num_bde < 2)
1204                                 goto err;
1205                         break;
1206                 case LPFC_PG_TYPE_DIF_BUF:{
1207                         /*
1208                          * This type indicates that protection buffers are
1209                          * passed to the driver, so that needs to be prepared
1210                          * for DMA
1211                          */
1212                         protsegcnt = dma_map_sg(&phba->pcidev->dev,
1213                                         scsi_prot_sglist(scsi_cmnd),
1214                                         scsi_prot_sg_count(scsi_cmnd), datadir);
1215                         if (unlikely(!protsegcnt)) {
1216                                 scsi_dma_unmap(scsi_cmnd);
1217                                 return 1;
1218                         }
1219
1220                         lpfc_cmd->prot_seg_cnt = protsegcnt;
1221                         if (lpfc_cmd->prot_seg_cnt
1222                             > phba->cfg_prot_sg_seg_cnt) {
1223                                 printk(KERN_ERR "%s: Too many prot sg segments "
1224                                                 "from dma_map_sg.  Config %d,"
1225                                                 "prot_seg_cnt %d\n", __func__,
1226                                                 phba->cfg_prot_sg_seg_cnt,
1227                                                 lpfc_cmd->prot_seg_cnt);
1228                                 dma_unmap_sg(&phba->pcidev->dev,
1229                                              scsi_prot_sglist(scsi_cmnd),
1230                                              scsi_prot_sg_count(scsi_cmnd),
1231                                              datadir);
1232                                 scsi_dma_unmap(scsi_cmnd);
1233                                 return 1;
1234                         }
1235
1236                         num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
1237                                         datasegcnt, protsegcnt);
1238                         /* we shoud have 3 or more entries in buffer list */
1239                         if (num_bde < 3)
1240                                 goto err;
1241                         break;
1242                 }
1243                 case LPFC_PG_TYPE_INVALID:
1244                 default:
1245                         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1246                                         "9022 Unexpected protection group %i\n",
1247                                         prot_group_type);
1248                         return 1;
1249                 }
1250         }
1251
1252         /*
1253          * Finish initializing those IOCB fields that are dependent on the
1254          * scsi_cmnd request_buffer.  Note that the bdeSize is explicitly
1255          * reinitialized since all iocb memory resources are used many times
1256          * for transmit, receive, and continuation bpl's.
1257          */
1258         iocb_cmd->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
1259         iocb_cmd->un.fcpi64.bdl.bdeSize += (num_bde * sizeof(struct ulp_bde64));
1260         iocb_cmd->ulpBdeCount = 1;
1261         iocb_cmd->ulpLe = 1;
1262
1263         fcpdl = scsi_bufflen(scsi_cmnd);
1264
1265         if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) {
1266                 /*
1267                  * We are in DIF Type 1 mode
1268                  * Every data block has a 8 byte DIF (trailer)
1269                  * attached to it.  Must ajust FCP data length
1270                  */
1271                 blksize = lpfc_cmd_blksize(scsi_cmnd);
1272                 diflen = (fcpdl / blksize) * 8;
1273                 fcpdl += diflen;
1274         }
1275         fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
1276
1277         /*
1278          * Due to difference in data length between DIF/non-DIF paths,
1279          * we need to set word 4 of IOCB here
1280          */
1281         iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
1282
1283         return 0;
1284 err:
1285         lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
1286                         "9023 Could not setup all needed BDE's"
1287                         "prot_group_type=%d, num_bde=%d\n",
1288                         prot_group_type, num_bde);
1289         return 1;
1290 }
1291
1292 /*
1293  * This function checks for BlockGuard errors detected by
1294  * the HBA.  In case of errors, the ASC/ASCQ fields in the
1295  * sense buffer will be set accordingly, paired with
1296  * ILLEGAL_REQUEST to signal to the kernel that the HBA
1297  * detected corruption.
1298  *
1299  * Returns:
1300  *  0 - No error found
1301  *  1 - BlockGuard error found
1302  * -1 - Internal error (bad profile, ...etc)
1303  */
1304 static int
1305 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
1306                         struct lpfc_iocbq *pIocbOut)
1307 {
1308         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1309         struct sli3_bg_fields *bgf = &pIocbOut->iocb.unsli3.sli3_bg;
1310         int ret = 0;
1311         uint32_t bghm = bgf->bghm;
1312         uint32_t bgstat = bgf->bgstat;
1313         uint64_t failing_sector = 0;
1314
1315         printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx "
1316                         "bgstat=0x%x bghm=0x%x\n",
1317                         cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd),
1318                         cmd->request->nr_sectors, bgstat, bghm);
1319
1320         spin_lock(&_dump_buf_lock);
1321         if (!_dump_buf_done) {
1322                 printk(KERN_ERR "Saving Data for %u blocks to debugfs\n",
1323                                 (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1324                 lpfc_debug_save_data(cmd);
1325
1326                 /* If we have a prot sgl, save the DIF buffer */
1327                 if (lpfc_prot_group_type(phba, cmd) ==
1328                                 LPFC_PG_TYPE_DIF_BUF) {
1329                         printk(KERN_ERR "Saving DIF for %u blocks to debugfs\n",
1330                                         (cmd->cmnd[7] << 8 | cmd->cmnd[8]));
1331                         lpfc_debug_save_dif(cmd);
1332                 }
1333
1334                 _dump_buf_done = 1;
1335         }
1336         spin_unlock(&_dump_buf_lock);
1337
1338         if (lpfc_bgs_get_invalid_prof(bgstat)) {
1339                 cmd->result = ScsiResult(DID_ERROR, 0);
1340                 printk(KERN_ERR "Invalid BlockGuard profile. bgstat:0x%x\n",
1341                                 bgstat);
1342                 ret = (-1);
1343                 goto out;
1344         }
1345
1346         if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
1347                 cmd->result = ScsiResult(DID_ERROR, 0);
1348                 printk(KERN_ERR "Invalid BlockGuard DIF Block. bgstat:0x%x\n",
1349                                 bgstat);
1350                 ret = (-1);
1351                 goto out;
1352         }
1353
1354         if (lpfc_bgs_get_guard_err(bgstat)) {
1355                 ret = 1;
1356
1357                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1358                                 0x10, 0x1);
1359                 cmd->result = DRIVER_SENSE << 24
1360                         | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1361                 phba->bg_guard_err_cnt++;
1362                 printk(KERN_ERR "BLKGRD: guard_tag error\n");
1363         }
1364
1365         if (lpfc_bgs_get_reftag_err(bgstat)) {
1366                 ret = 1;
1367
1368                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1369                                 0x10, 0x3);
1370                 cmd->result = DRIVER_SENSE << 24
1371                         | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1372
1373                 phba->bg_reftag_err_cnt++;
1374                 printk(KERN_ERR "BLKGRD: ref_tag error\n");
1375         }
1376
1377         if (lpfc_bgs_get_apptag_err(bgstat)) {
1378                 ret = 1;
1379
1380                 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1381                                 0x10, 0x2);
1382                 cmd->result = DRIVER_SENSE << 24
1383                         | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
1384
1385                 phba->bg_apptag_err_cnt++;
1386                 printk(KERN_ERR "BLKGRD: app_tag error\n");
1387         }
1388
1389         if (lpfc_bgs_get_hi_water_mark_present(bgstat)) {
1390                 /*
1391                  * setup sense data descriptor 0 per SPC-4 as an information
1392                  * field, and put the failing LBA in it
1393                  */
1394                 cmd->sense_buffer[8] = 0;     /* Information */
1395                 cmd->sense_buffer[9] = 0xa;   /* Add. length */
1396                 bghm /= cmd->device->sector_size;
1397
1398                 failing_sector = scsi_get_lba(cmd);
1399                 failing_sector += bghm;
1400
1401                 put_unaligned_be64(failing_sector, &cmd->sense_buffer[10]);
1402         }
1403
1404         if (!ret) {
1405                 /* No error was reported - problem in FW? */
1406                 cmd->result = ScsiResult(DID_ERROR, 0);
1407                 printk(KERN_ERR "BLKGRD: no errors reported!\n");
1408         }
1409
1410 out:
1411         return ret;
1412 }
1413
1414 /**
1415  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
1416  * @phba: Pointer to hba context object.
1417  * @vport: Pointer to vport object.
1418  * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
1419  * @rsp_iocb: Pointer to response iocb object which reported error.
1420  *
1421  * This function posts an event when there is a SCSI command reporting
1422  * error from the scsi device.
1423  **/
1424 static void
1425 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
1426                 struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
1427         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
1428         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
1429         uint32_t resp_info = fcprsp->rspStatus2;
1430         uint32_t scsi_status = fcprsp->rspStatus3;
1431         uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
1432         struct lpfc_fast_path_event *fast_path_evt = NULL;
1433         struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
1434         unsigned long flags;
1435
1436         /* If there is queuefull or busy condition send a scsi event */
1437         if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
1438                 (cmnd->result == SAM_STAT_BUSY)) {
1439                 fast_path_evt = lpfc_alloc_fast_evt(phba);
1440                 if (!fast_path_evt)
1441                         return;
1442                 fast_path_evt->un.scsi_evt.event_type =
1443                         FC_REG_SCSI_EVENT;
1444                 fast_path_evt->un.scsi_evt.subcategory =
1445                 (cmnd->result == SAM_STAT_TASK_SET_FULL) ?
1446                 LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
1447                 fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
1448                 memcpy(&fast_path_evt->un.scsi_evt.wwpn,
1449                         &pnode->nlp_portname, sizeof(struct lpfc_name));
1450                 memcpy(&fast_path_evt->un.scsi_evt.wwnn,
1451                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
1452         } else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
1453                 ((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
1454                 fast_path_evt = lpfc_alloc_fast_evt(phba);
1455                 if (!fast_path_evt)
1456                         return;
1457                 fast_path_evt->un.check_cond_evt.scsi_event.event_type =
1458                         FC_REG_SCSI_EVENT;
1459                 fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
1460                         LPFC_EVENT_CHECK_COND;
1461                 fast_path_evt->un.check_cond_evt.scsi_event.lun =
1462                         cmnd->device->lun;
1463                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
1464                         &pnode->nlp_portname, sizeof(struct lpfc_name));
1465                 memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
1466                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
1467                 fast_path_evt->un.check_cond_evt.sense_key =
1468                         cmnd->sense_buffer[2] & 0xf;
1469                 fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
1470                 fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
1471         } else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
1472                      fcpi_parm &&
1473                      ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
1474                         ((scsi_status == SAM_STAT_GOOD) &&
1475                         !(resp_info & (RESID_UNDER | RESID_OVER))))) {
1476                 /*
1477                  * If status is good or resid does not match with fcp_param and
1478                  * there is valid fcpi_parm, then there is a read_check error
1479                  */
1480                 fast_path_evt = lpfc_alloc_fast_evt(phba);
1481                 if (!fast_path_evt)
1482                         return;
1483                 fast_path_evt->un.read_check_error.header.event_type =
1484                         FC_REG_FABRIC_EVENT;
1485                 fast_path_evt->un.read_check_error.header.subcategory =
1486                         LPFC_EVENT_FCPRDCHKERR;
1487                 memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
1488                         &pnode->nlp_portname, sizeof(struct lpfc_name));
1489                 memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
1490                         &pnode->nlp_nodename, sizeof(struct lpfc_name));
1491                 fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
1492                 fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
1493                 fast_path_evt->un.read_check_error.fcpiparam =
1494                         fcpi_parm;
1495         } else
1496                 return;
1497
1498         fast_path_evt->vport = vport;
1499         spin_lock_irqsave(&phba->hbalock, flags);
1500         list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
1501         spin_unlock_irqrestore(&phba->hbalock, flags);
1502         lpfc_worker_wake_up(phba);
1503         return;
1504 }
1505
1506 /**
1507  * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather
1508  * @phba: The Hba for which this call is being executed.
1509  * @psb: The scsi buffer which is going to be un-mapped.
1510  *
1511  * This routine does DMA un-mapping of scatter gather list of scsi command
1512  * field of @lpfc_cmd.
1513  **/
1514 static void
1515 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
1516 {
1517         /*
1518          * There are only two special cases to consider.  (1) the scsi command
1519          * requested scatter-gather usage or (2) the scsi command allocated
1520          * a request buffer, but did not request use_sg.  There is a third
1521          * case, but it does not require resource deallocation.
1522          */
1523         if (psb->seg_cnt > 0)
1524                 scsi_dma_unmap(psb->pCmd);
1525         if (psb->prot_seg_cnt > 0)
1526                 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
1527                                 scsi_prot_sg_count(psb->pCmd),
1528                                 psb->pCmd->sc_data_direction);
1529 }
1530
1531 /**
1532  * lpfc_handler_fcp_err - FCP response handler
1533  * @vport: The virtual port for which this call is being executed.
1534  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
1535  * @rsp_iocb: The response IOCB which contains FCP error.
1536  *
1537  * This routine is called to process response IOCB with status field
1538  * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
1539  * based upon SCSI and FCP error.
1540  **/
1541 static void
1542 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1543                     struct lpfc_iocbq *rsp_iocb)
1544 {
1545         struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
1546         struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
1547         struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
1548         uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
1549         uint32_t resp_info = fcprsp->rspStatus2;
1550         uint32_t scsi_status = fcprsp->rspStatus3;
1551         uint32_t *lp;
1552         uint32_t host_status = DID_OK;
1553         uint32_t rsplen = 0;
1554         uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
1555
1556
1557         /*
1558          *  If this is a task management command, there is no
1559          *  scsi packet associated with this lpfc_cmd.  The driver
1560          *  consumes it.
1561          */
1562         if (fcpcmd->fcpCntl2) {
1563                 scsi_status = 0;
1564                 goto out;
1565         }
1566
1567         if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
1568                 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
1569                 if (snslen > SCSI_SENSE_BUFFERSIZE)
1570                         snslen = SCSI_SENSE_BUFFERSIZE;
1571
1572                 if (resp_info & RSP_LEN_VALID)
1573                   rsplen = be32_to_cpu(fcprsp->rspRspLen);
1574                 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
1575         }
1576         lp = (uint32_t *)cmnd->sense_buffer;
1577
1578         if (!scsi_status && (resp_info & RESID_UNDER))
1579                 logit = LOG_FCP;
1580
1581         lpfc_printf_vlog(vport, KERN_WARNING, logit,
1582                          "9024 FCP command x%x failed: x%x SNS x%x x%x "
1583                          "Data: x%x x%x x%x x%x x%x\n",
1584                          cmnd->cmnd[0], scsi_status,
1585                          be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
1586                          be32_to_cpu(fcprsp->rspResId),
1587                          be32_to_cpu(fcprsp->rspSnsLen),
1588                          be32_to_cpu(fcprsp->rspRspLen),
1589                          fcprsp->rspInfo3);
1590
1591         if (resp_info & RSP_LEN_VALID) {
1592                 rsplen = be32_to_cpu(fcprsp->rspRspLen);
1593                 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
1594                     (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
1595                         host_status = DID_ERROR;
1596                         goto out;
1597                 }
1598         }
1599
1600         scsi_set_resid(cmnd, 0);
1601         if (resp_info & RESID_UNDER) {
1602                 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
1603
1604                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1605                                  "9025 FCP Read Underrun, expected %d, "
1606                                  "residual %d Data: x%x x%x x%x\n",
1607                                  be32_to_cpu(fcpcmd->fcpDl),
1608                                  scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
1609                                  cmnd->underflow);
1610
1611                 /*
1612                  * If there is an under run check if under run reported by
1613                  * storage array is same as the under run reported by HBA.
1614                  * If this is not same, there is a dropped frame.
1615                  */
1616                 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
1617                         fcpi_parm &&
1618                         (scsi_get_resid(cmnd) != fcpi_parm)) {
1619                         lpfc_printf_vlog(vport, KERN_WARNING,
1620                                          LOG_FCP | LOG_FCP_ERROR,
1621                                          "9026 FCP Read Check Error "
1622                                          "and Underrun Data: x%x x%x x%x x%x\n",
1623                                          be32_to_cpu(fcpcmd->fcpDl),
1624                                          scsi_get_resid(cmnd), fcpi_parm,
1625                                          cmnd->cmnd[0]);
1626                         scsi_set_resid(cmnd, scsi_bufflen(cmnd));
1627                         host_status = DID_ERROR;
1628                 }
1629                 /*
1630                  * The cmnd->underflow is the minimum number of bytes that must
1631                  * be transfered for this command.  Provided a sense condition
1632                  * is not present, make sure the actual amount transferred is at
1633                  * least the underflow value or fail.
1634                  */
1635                 if (!(resp_info & SNS_LEN_VALID) &&
1636                     (scsi_status == SAM_STAT_GOOD) &&
1637                     (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
1638                      < cmnd->underflow)) {
1639                         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1640                                          "9027 FCP command x%x residual "
1641                                          "underrun converted to error "
1642                                          "Data: x%x x%x x%x\n",
1643                                          cmnd->cmnd[0], scsi_bufflen(cmnd),
1644                                          scsi_get_resid(cmnd), cmnd->underflow);
1645                         host_status = DID_ERROR;
1646                 }
1647         } else if (resp_info & RESID_OVER) {
1648                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1649                                  "9028 FCP command x%x residual overrun error. "
1650                                  "Data: x%x x%x \n", cmnd->cmnd[0],
1651                                  scsi_bufflen(cmnd), scsi_get_resid(cmnd));
1652                 host_status = DID_ERROR;
1653
1654         /*
1655          * Check SLI validation that all the transfer was actually done
1656          * (fcpi_parm should be zero). Apply check only to reads.
1657          */
1658         } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
1659                         (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
1660                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
1661                                  "9029 FCP Read Check Error Data: "
1662                                  "x%x x%x x%x x%x\n",
1663                                  be32_to_cpu(fcpcmd->fcpDl),
1664                                  be32_to_cpu(fcprsp->rspResId),
1665                                  fcpi_parm, cmnd->cmnd[0]);
1666                 host_status = DID_ERROR;
1667                 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
1668         }
1669
1670  out:
1671         cmnd->result = ScsiResult(host_status, scsi_status);
1672         lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
1673 }
1674
1675 /**
1676  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
1677  * @phba: The Hba for which this call is being executed.
1678  * @pIocbIn: The command IOCBQ for the scsi cmnd.
1679  * @pIocbOut: The response IOCBQ for the scsi cmnd .
1680  *
1681  * This routine assigns scsi command result by looking into response IOCB
1682  * status field appropriately. This routine handles QUEUE FULL condition as
1683  * well by ramping down device queue depth.
1684  **/
1685 static void
1686 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
1687                         struct lpfc_iocbq *pIocbOut)
1688 {
1689         struct lpfc_scsi_buf *lpfc_cmd =
1690                 (struct lpfc_scsi_buf *) pIocbIn->context1;
1691         struct lpfc_vport      *vport = pIocbIn->vport;
1692         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
1693         struct lpfc_nodelist *pnode = rdata->pnode;
1694         struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
1695         int result;
1696         struct scsi_device *tmp_sdev;
1697         int depth = 0;
1698         unsigned long flags;
1699         struct lpfc_fast_path_event *fast_path_evt;
1700         struct Scsi_Host *shost = cmd->device->host;
1701         uint32_t queue_depth, scsi_id;
1702
1703         lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
1704         lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
1705         if (pnode && NLP_CHK_NODE_ACT(pnode))
1706                 atomic_dec(&pnode->cmd_pending);
1707
1708         if (lpfc_cmd->status) {
1709                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
1710                     (lpfc_cmd->result & IOERR_DRVR_MASK))
1711                         lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
1712                 else if (lpfc_cmd->status >= IOSTAT_CNT)
1713                         lpfc_cmd->status = IOSTAT_DEFAULT;
1714
1715                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1716                                  "9030 FCP cmd x%x failed <%d/%d> "
1717                                  "status: x%x result: x%x Data: x%x x%x\n",
1718                                  cmd->cmnd[0],
1719                                  cmd->device ? cmd->device->id : 0xffff,
1720                                  cmd->device ? cmd->device->lun : 0xffff,
1721                                  lpfc_cmd->status, lpfc_cmd->result,
1722                                  pIocbOut->iocb.ulpContext,
1723                                  lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
1724
1725                 switch (lpfc_cmd->status) {
1726                 case IOSTAT_FCP_RSP_ERROR:
1727                         /* Call FCP RSP handler to determine result */
1728                         lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
1729                         break;
1730                 case IOSTAT_NPORT_BSY:
1731                 case IOSTAT_FABRIC_BSY:
1732                         cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
1733                         fast_path_evt = lpfc_alloc_fast_evt(phba);
1734                         if (!fast_path_evt)
1735                                 break;
1736                         fast_path_evt->un.fabric_evt.event_type =
1737                                 FC_REG_FABRIC_EVENT;
1738                         fast_path_evt->un.fabric_evt.subcategory =
1739                                 (lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
1740                                 LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
1741                         if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1742                                 memcpy(&fast_path_evt->un.fabric_evt.wwpn,
1743                                         &pnode->nlp_portname,
1744                                         sizeof(struct lpfc_name));
1745                                 memcpy(&fast_path_evt->un.fabric_evt.wwnn,
1746                                         &pnode->nlp_nodename,
1747                                         sizeof(struct lpfc_name));
1748                         }
1749                         fast_path_evt->vport = vport;
1750                         fast_path_evt->work_evt.evt =
1751                                 LPFC_EVT_FASTPATH_MGMT_EVT;
1752                         spin_lock_irqsave(&phba->hbalock, flags);
1753                         list_add_tail(&fast_path_evt->work_evt.evt_listp,
1754                                 &phba->work_list);
1755                         spin_unlock_irqrestore(&phba->hbalock, flags);
1756                         lpfc_worker_wake_up(phba);
1757                         break;
1758                 case IOSTAT_LOCAL_REJECT:
1759                         if (lpfc_cmd->result == IOERR_INVALID_RPI ||
1760                             lpfc_cmd->result == IOERR_NO_RESOURCES ||
1761                             lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
1762                                 cmd->result = ScsiResult(DID_REQUEUE, 0);
1763                                 break;
1764                         }
1765
1766                         if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
1767                              lpfc_cmd->result == IOERR_TX_DMA_FAILED) &&
1768                              pIocbOut->iocb.unsli3.sli3_bg.bgstat) {
1769                                 if (scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
1770                                         /*
1771                                          * This is a response for a BG enabled
1772                                          * cmd. Parse BG error
1773                                          */
1774                                         lpfc_parse_bg_err(phba, lpfc_cmd,
1775                                                         pIocbOut);
1776                                         break;
1777                                 } else {
1778                                         lpfc_printf_vlog(vport, KERN_WARNING,
1779                                                         LOG_BG,
1780                                                         "9031 non-zero BGSTAT "
1781                                                         "on unprotected cmd");
1782                                 }
1783                         }
1784
1785                 /* else: fall through */
1786                 default:
1787                         cmd->result = ScsiResult(DID_ERROR, 0);
1788                         break;
1789                 }
1790
1791                 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
1792                     || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
1793                         cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
1794                                                  SAM_STAT_BUSY);
1795         } else {
1796                 cmd->result = ScsiResult(DID_OK, 0);
1797         }
1798
1799         if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
1800                 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
1801
1802                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1803                                  "0710 Iodone <%d/%d> cmd %p, error "
1804                                  "x%x SNS x%x x%x Data: x%x x%x\n",
1805                                  cmd->device->id, cmd->device->lun, cmd,
1806                                  cmd->result, *lp, *(lp + 3), cmd->retries,
1807                                  scsi_get_resid(cmd));
1808         }
1809
1810         lpfc_update_stats(phba, lpfc_cmd);
1811         result = cmd->result;
1812         if (vport->cfg_max_scsicmpl_time &&
1813            time_after(jiffies, lpfc_cmd->start_time +
1814                 msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
1815                 spin_lock_irqsave(shost->host_lock, flags);
1816                 if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1817                         if (pnode->cmd_qdepth >
1818                                 atomic_read(&pnode->cmd_pending) &&
1819                                 (atomic_read(&pnode->cmd_pending) >
1820                                 LPFC_MIN_TGT_QDEPTH) &&
1821                                 ((cmd->cmnd[0] == READ_10) ||
1822                                 (cmd->cmnd[0] == WRITE_10)))
1823                                 pnode->cmd_qdepth =
1824                                         atomic_read(&pnode->cmd_pending);
1825
1826                         pnode->last_change_time = jiffies;
1827                 }
1828                 spin_unlock_irqrestore(shost->host_lock, flags);
1829         } else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
1830                 if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
1831                    time_after(jiffies, pnode->last_change_time +
1832                               msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
1833                         spin_lock_irqsave(shost->host_lock, flags);
1834                         pnode->cmd_qdepth += pnode->cmd_qdepth *
1835                                 LPFC_TGTQ_RAMPUP_PCENT / 100;
1836                         if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
1837                                 pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
1838                         pnode->last_change_time = jiffies;
1839                         spin_unlock_irqrestore(shost->host_lock, flags);
1840                 }
1841         }
1842
1843         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1844
1845         /* The sdev is not guaranteed to be valid post scsi_done upcall. */
1846         queue_depth = cmd->device->queue_depth;
1847         scsi_id = cmd->device->id;
1848         cmd->scsi_done(cmd);
1849
1850         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1851                 /*
1852                  * If there is a thread waiting for command completion
1853                  * wake up the thread.
1854                  */
1855                 spin_lock_irqsave(shost->host_lock, flags);
1856                 lpfc_cmd->pCmd = NULL;
1857                 if (lpfc_cmd->waitq)
1858                         wake_up(lpfc_cmd->waitq);
1859                 spin_unlock_irqrestore(shost->host_lock, flags);
1860                 lpfc_release_scsi_buf(phba, lpfc_cmd);
1861                 return;
1862         }
1863
1864
1865         if (!result)
1866                 lpfc_rampup_queue_depth(vport, queue_depth);
1867
1868         if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
1869            ((jiffies - pnode->last_ramp_up_time) >
1870                 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1871            ((jiffies - pnode->last_q_full_time) >
1872                 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1873            (vport->cfg_lun_queue_depth > queue_depth)) {
1874                 shost_for_each_device(tmp_sdev, shost) {
1875                         if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
1876                                 if (tmp_sdev->id != scsi_id)
1877                                         continue;
1878                                 if (tmp_sdev->ordered_tags)
1879                                         scsi_adjust_queue_depth(tmp_sdev,
1880                                                 MSG_ORDERED_TAG,
1881                                                 tmp_sdev->queue_depth+1);
1882                                 else
1883                                         scsi_adjust_queue_depth(tmp_sdev,
1884                                                 MSG_SIMPLE_TAG,
1885                                                 tmp_sdev->queue_depth+1);
1886
1887                                 pnode->last_ramp_up_time = jiffies;
1888                         }
1889                 }
1890                 lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
1891                         0xFFFFFFFF,
1892                         queue_depth , queue_depth + 1);
1893         }
1894
1895         /*
1896          * Check for queue full.  If the lun is reporting queue full, then
1897          * back off the lun queue depth to prevent target overloads.
1898          */
1899         if (result == SAM_STAT_TASK_SET_FULL && pnode &&
1900             NLP_CHK_NODE_ACT(pnode)) {
1901                 pnode->last_q_full_time = jiffies;
1902
1903                 shost_for_each_device(tmp_sdev, shost) {
1904                         if (tmp_sdev->id != scsi_id)
1905                                 continue;
1906                         depth = scsi_track_queue_full(tmp_sdev,
1907                                         tmp_sdev->queue_depth - 1);
1908                 }
1909                 /*
1910                  * The queue depth cannot be lowered any more.
1911                  * Modify the returned error code to store
1912                  * the final depth value set by
1913                  * scsi_track_queue_full.
1914                  */
1915                 if (depth == -1)
1916                         depth = shost->cmd_per_lun;
1917
1918                 if (depth) {
1919                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1920                                          "0711 detected queue full - lun queue "
1921                                          "depth adjusted to %d.\n", depth);
1922                         lpfc_send_sdev_queuedepth_change_event(phba, vport,
1923                                 pnode, 0xFFFFFFFF,
1924                                 depth+1, depth);
1925                 }
1926         }
1927
1928         /*
1929          * If there is a thread waiting for command completion
1930          * wake up the thread.
1931          */
1932         spin_lock_irqsave(shost->host_lock, flags);
1933         lpfc_cmd->pCmd = NULL;
1934         if (lpfc_cmd->waitq)
1935                 wake_up(lpfc_cmd->waitq);
1936         spin_unlock_irqrestore(shost->host_lock, flags);
1937
1938         lpfc_release_scsi_buf(phba, lpfc_cmd);
1939 }
1940
1941 /**
1942  * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
1943  * @data: A pointer to the immediate command data portion of the IOCB.
1944  * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
1945  *
1946  * The routine copies the entire FCP command from @fcp_cmnd to @data while
1947  * byte swapping the data to big endian format for transmission on the wire.
1948  **/
1949 static void
1950 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
1951 {
1952         int i, j;
1953         for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
1954              i += sizeof(uint32_t), j++) {
1955                 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
1956         }
1957 }
1958
1959 /**
1960  * lpfc_scsi_prep_cmnd -  Routine to convert scsi cmnd to FCP information unit
1961  * @vport: The virtual port for which this call is being executed.
1962  * @lpfc_cmd: The scsi command which needs to send.
1963  * @pnode: Pointer to lpfc_nodelist.
1964  *
1965  * This routine initializes fcp_cmnd and iocb data structure from scsi command
1966  * to transfer.
1967  **/
1968 static void
1969 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
1970                     struct lpfc_nodelist *pnode)
1971 {
1972         struct lpfc_hba *phba = vport->phba;
1973         struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
1974         struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
1975         IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
1976         struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
1977         int datadir = scsi_cmnd->sc_data_direction;
1978         char tag[2];
1979
1980         if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1981                 return;
1982
1983         lpfc_cmd->fcp_rsp->rspSnsLen = 0;
1984         /* clear task management bits */
1985         lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
1986
1987         int_to_scsilun(lpfc_cmd->pCmd->device->lun,
1988                         &lpfc_cmd->fcp_cmnd->fcp_lun);
1989
1990         memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
1991
1992         if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
1993                 switch (tag[0]) {
1994                 case HEAD_OF_QUEUE_TAG:
1995                         fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
1996                         break;
1997                 case ORDERED_QUEUE_TAG:
1998                         fcp_cmnd->fcpCntl1 = ORDERED_Q;
1999                         break;
2000                 default:
2001                         fcp_cmnd->fcpCntl1 = SIMPLE_Q;
2002                         break;
2003                 }
2004         } else
2005                 fcp_cmnd->fcpCntl1 = 0;
2006
2007         /*
2008          * There are three possibilities here - use scatter-gather segment, use
2009          * the single mapping, or neither.  Start the lpfc command prep by
2010          * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
2011          * data bde entry.
2012          */
2013         if (scsi_sg_count(scsi_cmnd)) {
2014                 if (datadir == DMA_TO_DEVICE) {
2015                         iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
2016                         iocb_cmd->un.fcpi.fcpi_parm = 0;
2017                         iocb_cmd->ulpPU = 0;
2018                         fcp_cmnd->fcpCntl3 = WRITE_DATA;
2019                         phba->fc4OutputRequests++;
2020                 } else {
2021                         iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
2022                         iocb_cmd->ulpPU = PARM_READ_CHECK;
2023                         fcp_cmnd->fcpCntl3 = READ_DATA;
2024                         phba->fc4InputRequests++;
2025                 }
2026         } else {
2027                 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
2028                 iocb_cmd->un.fcpi.fcpi_parm = 0;
2029                 iocb_cmd->ulpPU = 0;
2030                 fcp_cmnd->fcpCntl3 = 0;
2031                 phba->fc4ControlRequests++;
2032         }
2033         if (phba->sli_rev == 3 &&
2034             !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2035                 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
2036         /*
2037          * Finish initializing those IOCB fields that are independent
2038          * of the scsi_cmnd request_buffer
2039          */
2040         piocbq->iocb.ulpContext = pnode->nlp_rpi;
2041         if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
2042                 piocbq->iocb.ulpFCP2Rcvy = 1;
2043         else
2044                 piocbq->iocb.ulpFCP2Rcvy = 0;
2045
2046         piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
2047         piocbq->context1  = lpfc_cmd;
2048         piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
2049         piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
2050         piocbq->vport = vport;
2051 }
2052
2053 /**
2054  * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit
2055  * @vport: The virtual port for which this call is being executed.
2056  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
2057  * @lun: Logical unit number.
2058  * @task_mgmt_cmd: SCSI task management command.
2059  *
2060  * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
2061  *
2062  * Return codes:
2063  *   0 - Error
2064  *   1 - Success
2065  **/
2066 static int
2067 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
2068                              struct lpfc_scsi_buf *lpfc_cmd,
2069                              unsigned int lun,
2070                              uint8_t task_mgmt_cmd)
2071 {
2072         struct lpfc_iocbq *piocbq;
2073         IOCB_t *piocb;
2074         struct fcp_cmnd *fcp_cmnd;
2075         struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
2076         struct lpfc_nodelist *ndlp = rdata->pnode;
2077
2078         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2079             ndlp->nlp_state != NLP_STE_MAPPED_NODE)
2080                 return 0;
2081
2082         piocbq = &(lpfc_cmd->cur_iocbq);
2083         piocbq->vport = vport;
2084
2085         piocb = &piocbq->iocb;
2086
2087         fcp_cmnd = lpfc_cmd->fcp_cmnd;
2088         /* Clear out any old data in the FCP command area */
2089         memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
2090         int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
2091         fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
2092         if (vport->phba->sli_rev == 3 &&
2093             !(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
2094                 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
2095         piocb->ulpCommand = CMD_FCP_ICMND64_CR;
2096         piocb->ulpContext = ndlp->nlp_rpi;
2097         if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
2098                 piocb->ulpFCP2Rcvy = 1;
2099         }
2100         piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
2101
2102         /* ulpTimeout is only one byte */
2103         if (lpfc_cmd->timeout > 0xff) {
2104                 /*
2105                  * Do not timeout the command at the firmware level.
2106                  * The driver will provide the timeout mechanism.
2107                  */
2108                 piocb->ulpTimeout = 0;
2109         } else {
2110                 piocb->ulpTimeout = lpfc_cmd->timeout;
2111         }
2112
2113         return 1;
2114 }
2115
2116 /**
2117  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
2118  * @phba: The Hba for which this call is being executed.
2119  * @cmdiocbq: Pointer to lpfc_iocbq data structure.
2120  * @rspiocbq: Pointer to lpfc_iocbq data structure.
2121  *
2122  * This routine is IOCB completion routine for device reset and target reset
2123  * routine. This routine release scsi buffer associated with lpfc_cmd.
2124  **/
2125 static void
2126 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
2127                         struct lpfc_iocbq *cmdiocbq,
2128                         struct lpfc_iocbq *rspiocbq)
2129 {
2130         struct lpfc_scsi_buf *lpfc_cmd =
2131                 (struct lpfc_scsi_buf *) cmdiocbq->context1;
2132         if (lpfc_cmd)
2133                 lpfc_release_scsi_buf(phba, lpfc_cmd);
2134         return;
2135 }
2136
2137 /**
2138  * lpfc_scsi_tgt_reset - Target reset handler
2139  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
2140  * @vport: The virtual port for which this call is being executed.
2141  * @tgt_id: Target ID.
2142  * @lun: Lun number.
2143  * @rdata: Pointer to lpfc_rport_data.
2144  *
2145  * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
2146  *
2147  * Return Code:
2148  *   0x2003 - Error
2149  *   0x2002 - Success.
2150  **/
2151 static int
2152 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
2153                     unsigned  tgt_id, unsigned int lun,
2154                     struct lpfc_rport_data *rdata)
2155 {
2156         struct lpfc_hba   *phba = vport->phba;
2157         struct lpfc_iocbq *iocbq;
2158         struct lpfc_iocbq *iocbqrsp;
2159         int ret;
2160         int status;
2161
2162         if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
2163                 return FAILED;
2164
2165         lpfc_cmd->rdata = rdata;
2166         status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
2167                                            FCP_TARGET_RESET);
2168         if (!status)
2169                 return FAILED;
2170
2171         iocbq = &lpfc_cmd->cur_iocbq;
2172         iocbqrsp = lpfc_sli_get_iocbq(phba);
2173
2174         if (!iocbqrsp)
2175                 return FAILED;
2176
2177         /* Issue Target Reset to TGT <num> */
2178         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2179                          "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
2180                          tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
2181         status = lpfc_sli_issue_iocb_wait(phba,
2182                                        &phba->sli.ring[phba->sli.fcp_ring],
2183                                        iocbq, iocbqrsp, lpfc_cmd->timeout);
2184         if (status != IOCB_SUCCESS) {
2185                 if (status == IOCB_TIMEDOUT) {
2186                         iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2187                         ret = TIMEOUT_ERROR;
2188                 } else
2189                         ret = FAILED;
2190                 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2191         } else {
2192                 ret = SUCCESS;
2193                 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
2194                 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
2195                 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
2196                         (lpfc_cmd->result & IOERR_DRVR_MASK))
2197                                 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
2198         }
2199
2200         lpfc_sli_release_iocbq(phba, iocbqrsp);
2201         return ret;
2202 }
2203
2204 /**
2205  * lpfc_info - Info entry point of scsi_host_template data structure
2206  * @host: The scsi host for which this call is being executed.
2207  *
2208  * This routine provides module information about hba.
2209  *
2210  * Reutrn code:
2211  *   Pointer to char - Success.
2212  **/
2213 const char *
2214 lpfc_info(struct Scsi_Host *host)
2215 {
2216         struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
2217         struct lpfc_hba   *phba = vport->phba;
2218         int len;
2219         static char  lpfcinfobuf[384];
2220
2221         memset(lpfcinfobuf,0,384);
2222         if (phba && phba->pcidev){
2223                 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
2224                 len = strlen(lpfcinfobuf);
2225                 snprintf(lpfcinfobuf + len,
2226                         384-len,
2227                         " on PCI bus %02x device %02x irq %d",
2228                         phba->pcidev->bus->number,
2229                         phba->pcidev->devfn,
2230                         phba->pcidev->irq);
2231                 len = strlen(lpfcinfobuf);
2232                 if (phba->Port[0]) {
2233                         snprintf(lpfcinfobuf + len,
2234                                  384-len,
2235                                  " port %s",
2236                                  phba->Port);
2237                 }
2238         }
2239         return lpfcinfobuf;
2240 }
2241
2242 /**
2243  * lpfc_poll_rearm_time - Routine to modify fcp_poll timer of hba
2244  * @phba: The Hba for which this call is being executed.
2245  *
2246  * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
2247  * The default value of cfg_poll_tmo is 10 milliseconds.
2248  **/
2249 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
2250 {
2251         unsigned long  poll_tmo_expires =
2252                 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
2253
2254         if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
2255                 mod_timer(&phba->fcp_poll_timer,
2256                           poll_tmo_expires);
2257 }
2258
2259 /**
2260  * lpfc_poll_start_timer - Routine to start fcp_poll_timer of HBA
2261  * @phba: The Hba for which this call is being executed.
2262  *
2263  * This routine starts the fcp_poll_timer of @phba.
2264  **/
2265 void lpfc_poll_start_timer(struct lpfc_hba * phba)
2266 {
2267         lpfc_poll_rearm_timer(phba);
2268 }
2269
2270 /**
2271  * lpfc_poll_timeout - Restart polling timer
2272  * @ptr: Map to lpfc_hba data structure pointer.
2273  *
2274  * This routine restarts fcp_poll timer, when FCP ring  polling is enable
2275  * and FCP Ring interrupt is disable.
2276  **/
2277
2278 void lpfc_poll_timeout(unsigned long ptr)
2279 {
2280         struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
2281
2282         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2283                 lpfc_sli_poll_fcp_ring (phba);
2284                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2285                         lpfc_poll_rearm_timer(phba);
2286         }
2287 }
2288
2289 /**
2290  * lpfc_queuecommand - scsi_host_template queuecommand entry point
2291  * @cmnd: Pointer to scsi_cmnd data structure.
2292  * @done: Pointer to done routine.
2293  *
2294  * Driver registers this routine to scsi midlayer to submit a @cmd to process.
2295  * This routine prepares an IOCB from scsi command and provides to firmware.
2296  * The @done callback is invoked after driver finished processing the command.
2297  *
2298  * Return value :
2299  *   0 - Success
2300  *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
2301  **/
2302 static int
2303 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
2304 {
2305         struct Scsi_Host  *shost = cmnd->device->host;
2306         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2307         struct lpfc_hba   *phba = vport->phba;
2308         struct lpfc_sli   *psli = &phba->sli;
2309         struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2310         struct lpfc_nodelist *ndlp = rdata->pnode;
2311         struct lpfc_scsi_buf *lpfc_cmd;
2312         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2313         int err;
2314
2315         err = fc_remote_port_chkready(rport);
2316         if (err) {
2317                 cmnd->result = err;
2318                 goto out_fail_command;
2319         }
2320
2321         if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
2322                 scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2323
2324                 printk(KERN_ERR "BLKGRD ERROR: rcvd protected cmd:%02x op:%02x "
2325                                 "str=%s without registering for BlockGuard - "
2326                                 "Rejecting command\n",
2327                                 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2328                                 dif_op_str[scsi_get_prot_op(cmnd)]);
2329                 goto out_fail_command;
2330         }
2331
2332         /*
2333          * Catch race where our node has transitioned, but the
2334          * transport is still transitioning.
2335          */
2336         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
2337                 cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
2338                 goto out_fail_command;
2339         }
2340         if (vport->cfg_max_scsicmpl_time &&
2341                 (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
2342                 goto out_host_busy;
2343
2344         lpfc_cmd = lpfc_get_scsi_buf(phba);
2345         if (lpfc_cmd == NULL) {
2346                 lpfc_rampdown_queue_depth(phba);
2347
2348                 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2349                                  "0707 driver's buffer pool is empty, "
2350                                  "IO busied\n");
2351                 goto out_host_busy;
2352         }
2353
2354         /*
2355          * Store the midlayer's command structure for the completion phase
2356          * and complete the command initialization.
2357          */
2358         lpfc_cmd->pCmd  = cmnd;
2359         lpfc_cmd->rdata = rdata;
2360         lpfc_cmd->timeout = 0;
2361         lpfc_cmd->start_time = jiffies;
2362         cmnd->host_scribble = (unsigned char *)lpfc_cmd;
2363         cmnd->scsi_done = done;
2364
2365         if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
2366                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2367                                 "9033 BLKGRD: rcvd protected cmd:%02x op:%02x "
2368                                 "str=%s\n",
2369                                 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2370                                 dif_op_str[scsi_get_prot_op(cmnd)]);
2371                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2372                                 "9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2373                                 "%02x %02x %02x %02x %02x \n",
2374                                 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2375                                 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2376                                 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2377                                 cmnd->cmnd[9]);
2378                 if (cmnd->cmnd[0] == READ_10)
2379                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2380                                         "9035 BLKGRD: READ @ sector %llu, "
2381                                          "count %lu\n",
2382                                          (unsigned long long)scsi_get_lba(cmnd),
2383                                         cmnd->request->nr_sectors);
2384                 else if (cmnd->cmnd[0] == WRITE_10)
2385                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2386                                         "9036 BLKGRD: WRITE @ sector %llu, "
2387                                         "count %lu cmd=%p\n",
2388                                         (unsigned long long)scsi_get_lba(cmnd),
2389                                         cmnd->request->nr_sectors,
2390                                         cmnd);
2391
2392                 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
2393         } else {
2394                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2395                                 "9038 BLKGRD: rcvd unprotected cmd:%02x op:%02x"
2396                                 " str=%s\n",
2397                                 cmnd->cmnd[0], scsi_get_prot_op(cmnd),
2398                                 dif_op_str[scsi_get_prot_op(cmnd)]);
2399                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2400                                  "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
2401                                  "%02x %02x %02x %02x %02x \n",
2402                                  cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
2403                                  cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
2404                                  cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
2405                                  cmnd->cmnd[9]);
2406                 if (cmnd->cmnd[0] == READ_10)
2407                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2408                                          "9040 dbg: READ @ sector %llu, "
2409                                          "count %lu\n",
2410                                          (unsigned long long)scsi_get_lba(cmnd),
2411                                          cmnd->request->nr_sectors);
2412                 else if (cmnd->cmnd[0] == WRITE_10)
2413                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2414                                          "9041 dbg: WRITE @ sector %llu, "
2415                                          "count %lu cmd=%p\n",
2416                                          (unsigned long long)scsi_get_lba(cmnd),
2417                                          cmnd->request->nr_sectors, cmnd);
2418                 else
2419                         lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
2420                                          "9042 dbg: parser not implemented\n");
2421                 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
2422         }
2423
2424         if (err)
2425                 goto out_host_busy_free_buf;
2426
2427         lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
2428
2429         atomic_inc(&ndlp->cmd_pending);
2430         err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
2431                                   &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
2432         if (err) {
2433                 atomic_dec(&ndlp->cmd_pending);
2434                 goto out_host_busy_free_buf;
2435         }
2436         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2437                 lpfc_sli_poll_fcp_ring(phba);
2438                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2439                         lpfc_poll_rearm_timer(phba);
2440         }
2441
2442         return 0;
2443
2444  out_host_busy_free_buf:
2445         lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
2446         lpfc_release_scsi_buf(phba, lpfc_cmd);
2447  out_host_busy:
2448         return SCSI_MLQUEUE_HOST_BUSY;
2449
2450  out_fail_command:
2451         done(cmnd);
2452         return 0;
2453 }
2454
2455 /**
2456  * lpfc_block_error_handler - Routine to block error  handler
2457  * @cmnd: Pointer to scsi_cmnd data structure.
2458  *
2459  *  This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
2460  **/
2461 static void
2462 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
2463 {
2464         struct Scsi_Host *shost = cmnd->device->host;
2465         struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
2466
2467         spin_lock_irq(shost->host_lock);
2468         while (rport->port_state == FC_PORTSTATE_BLOCKED) {
2469                 spin_unlock_irq(shost->host_lock);
2470                 msleep(1000);
2471                 spin_lock_irq(shost->host_lock);
2472         }
2473         spin_unlock_irq(shost->host_lock);
2474         return;
2475 }
2476
2477 /**
2478  * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point
2479  * @cmnd: Pointer to scsi_cmnd data structure.
2480  *
2481  * This routine aborts @cmnd pending in base driver.
2482  *
2483  * Return code :
2484  *   0x2003 - Error
2485  *   0x2002 - Success
2486  **/
2487 static int
2488 lpfc_abort_handler(struct scsi_cmnd *cmnd)
2489 {
2490         struct Scsi_Host  *shost = cmnd->device->host;
2491         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2492         struct lpfc_hba   *phba = vport->phba;
2493         struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
2494         struct lpfc_iocbq *iocb;
2495         struct lpfc_iocbq *abtsiocb;
2496         struct lpfc_scsi_buf *lpfc_cmd;
2497         IOCB_t *cmd, *icmd;
2498         int ret = SUCCESS;
2499         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
2500
2501         lpfc_block_error_handler(cmnd);
2502         lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
2503         BUG_ON(!lpfc_cmd);
2504
2505         /*
2506          * If pCmd field of the corresponding lpfc_scsi_buf structure
2507          * points to a different SCSI command, then the driver has
2508          * already completed this command, but the midlayer did not
2509          * see the completion before the eh fired.  Just return
2510          * SUCCESS.
2511          */
2512         iocb = &lpfc_cmd->cur_iocbq;
2513         if (lpfc_cmd->pCmd != cmnd)
2514                 goto out;
2515
2516         BUG_ON(iocb->context1 != lpfc_cmd);
2517
2518         abtsiocb = lpfc_sli_get_iocbq(phba);
2519         if (abtsiocb == NULL) {
2520                 ret = FAILED;
2521                 goto out;
2522         }
2523
2524         /*
2525          * The scsi command can not be in txq and it is in flight because the
2526          * pCmd is still pointig at the SCSI command we have to abort. There
2527          * is no need to search the txcmplq. Just send an abort to the FW.
2528          */
2529
2530         cmd = &iocb->iocb;
2531         icmd = &abtsiocb->iocb;
2532         icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
2533         icmd->un.acxri.abortContextTag = cmd->ulpContext;
2534         icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
2535
2536         icmd->ulpLe = 1;
2537         icmd->ulpClass = cmd->ulpClass;
2538         if (lpfc_is_link_up(phba))
2539                 icmd->ulpCommand = CMD_ABORT_XRI_CN;
2540         else
2541                 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
2542
2543         abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2544         abtsiocb->vport = vport;
2545         if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
2546                 lpfc_sli_release_iocbq(phba, abtsiocb);
2547                 ret = FAILED;
2548                 goto out;
2549         }
2550
2551         if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2552                 lpfc_sli_poll_fcp_ring (phba);
2553
2554         lpfc_cmd->waitq = &waitq;
2555         /* Wait for abort to complete */
2556         wait_event_timeout(waitq,
2557                           (lpfc_cmd->pCmd != cmnd),
2558                            (2*vport->cfg_devloss_tmo*HZ));
2559
2560         spin_lock_irq(shost->host_lock);
2561         lpfc_cmd->waitq = NULL;
2562         spin_unlock_irq(shost->host_lock);
2563
2564         if (lpfc_cmd->pCmd == cmnd) {
2565                 ret = FAILED;
2566                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2567                                  "0748 abort handler timed out waiting "
2568                                  "for abort to complete: ret %#x, ID %d, "
2569                                  "LUN %d, snum %#lx\n",
2570                                  ret, cmnd->device->id, cmnd->device->lun,
2571                                  cmnd->serial_number);
2572         }
2573
2574  out:
2575         lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2576                          "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
2577                          "LUN %d snum %#lx\n", ret, cmnd->device->id,
2578                          cmnd->device->lun, cmnd->serial_number);
2579         return ret;
2580 }
2581
2582 /**
2583  * lpfc_device_reset_handler - scsi_host_template eh_device_reset entry point
2584  * @cmnd: Pointer to scsi_cmnd data structure.
2585  *
2586  * This routine does a device reset by sending a TARGET_RESET task management
2587  * command.
2588  *
2589  * Return code :
2590  *  0x2003 - Error
2591  *  0x2002 - Success
2592  **/
2593 static int
2594 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
2595 {
2596         struct Scsi_Host  *shost = cmnd->device->host;
2597         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2598         struct lpfc_hba   *phba = vport->phba;
2599         struct lpfc_scsi_buf *lpfc_cmd;
2600         struct lpfc_iocbq *iocbq, *iocbqrsp;
2601         struct lpfc_rport_data *rdata = cmnd->device->hostdata;
2602         struct lpfc_nodelist *pnode = rdata->pnode;
2603         unsigned long later;
2604         int ret = SUCCESS;
2605         int status;
2606         int cnt;
2607         struct lpfc_scsi_event_header scsi_event;
2608
2609         lpfc_block_error_handler(cmnd);
2610         /*
2611          * If target is not in a MAPPED state, delay the reset until
2612          * target is rediscovered or devloss timeout expires.
2613          */
2614         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2615         while (time_after(later, jiffies)) {
2616                 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
2617                         return FAILED;
2618                 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
2619                         break;
2620                 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
2621                 rdata = cmnd->device->hostdata;
2622                 if (!rdata)
2623                         break;
2624                 pnode = rdata->pnode;
2625         }
2626
2627         scsi_event.event_type = FC_REG_SCSI_EVENT;
2628         scsi_event.subcategory = LPFC_EVENT_TGTRESET;
2629         scsi_event.lun = 0;
2630         memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
2631         memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));
2632
2633         fc_host_post_vendor_event(shost,
2634                 fc_get_event_number(),
2635                 sizeof(scsi_event),
2636                 (char *)&scsi_event,
2637                 LPFC_NL_VENDOR_ID);
2638
2639         if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
2640                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2641                                  "0721 LUN Reset rport "
2642                                  "failure: msec x%x rdata x%p\n",
2643                                  jiffies_to_msecs(jiffies - later), rdata);
2644                 return FAILED;
2645         }
2646         lpfc_cmd = lpfc_get_scsi_buf(phba);
2647         if (lpfc_cmd == NULL)
2648                 return FAILED;
2649         lpfc_cmd->timeout = 60;
2650         lpfc_cmd->rdata = rdata;
2651
2652         status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
2653                                               cmnd->device->lun,
2654                                               FCP_TARGET_RESET);
2655         if (!status) {
2656                 lpfc_release_scsi_buf(phba, lpfc_cmd);
2657                 return FAILED;
2658         }
2659         iocbq = &lpfc_cmd->cur_iocbq;
2660
2661         /* get a buffer for this IOCB command response */
2662         iocbqrsp = lpfc_sli_get_iocbq(phba);
2663         if (iocbqrsp == NULL) {
2664                 lpfc_release_scsi_buf(phba, lpfc_cmd);
2665                 return FAILED;
2666         }
2667         lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
2668                          "0703 Issue target reset to TGT %d LUN %d "
2669                          "rpi x%x nlp_flag x%x\n", cmnd->device->id,
2670                          cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
2671         status = lpfc_sli_issue_iocb_wait(phba,
2672                                           &phba->sli.ring[phba->sli.fcp_ring],
2673                                           iocbq, iocbqrsp, lpfc_cmd->timeout);
2674         if (status == IOCB_TIMEDOUT) {
2675                 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
2676                 ret = TIMEOUT_ERROR;
2677         } else {
2678                 if (status != IOCB_SUCCESS)
2679                         ret = FAILED;
2680                 lpfc_release_scsi_buf(phba, lpfc_cmd);
2681         }
2682         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2683                          "0713 SCSI layer issued device reset (%d, %d) "
2684                          "return x%x status x%x result x%x\n",
2685                          cmnd->device->id, cmnd->device->lun, ret,
2686                          iocbqrsp->iocb.ulpStatus,
2687                          iocbqrsp->iocb.un.ulpWord[4]);
2688         lpfc_sli_release_iocbq(phba, iocbqrsp);
2689         cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
2690                                 LPFC_CTX_TGT);
2691         if (cnt)
2692                 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2693                                     cmnd->device->id, cmnd->device->lun,
2694                                     LPFC_CTX_TGT);
2695         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2696         while (time_after(later, jiffies) && cnt) {
2697                 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
2698                 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
2699                                         cmnd->device->lun, LPFC_CTX_TGT);
2700         }
2701         if (cnt) {
2702                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2703                                  "0719 device reset I/O flush failure: "
2704                                  "cnt x%x\n", cnt);
2705                 ret = FAILED;
2706         }
2707         return ret;
2708 }
2709
2710 /**
2711  * lpfc_bus_reset_handler - scsi_host_template eh_bus_reset_handler entry point
2712  * @cmnd: Pointer to scsi_cmnd data structure.
2713  *
2714  * This routine does target reset to all target on @cmnd->device->host.
2715  *
2716  * Return Code:
2717  *   0x2003 - Error
2718  *   0x2002 - Success
2719  **/
2720 static int
2721 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
2722 {
2723         struct Scsi_Host  *shost = cmnd->device->host;
2724         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2725         struct lpfc_hba   *phba = vport->phba;
2726         struct lpfc_nodelist *ndlp = NULL;
2727         int match;
2728         int ret = SUCCESS, status = SUCCESS, i;
2729         int cnt;
2730         struct lpfc_scsi_buf * lpfc_cmd;
2731         unsigned long later;
2732         struct lpfc_scsi_event_header scsi_event;
2733
2734         scsi_event.event_type = FC_REG_SCSI_EVENT;
2735         scsi_event.subcategory = LPFC_EVENT_BUSRESET;
2736         scsi_event.lun = 0;
2737         memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
2738         memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));
2739
2740         fc_host_post_vendor_event(shost,
2741                 fc_get_event_number(),
2742                 sizeof(scsi_event),
2743                 (char *)&scsi_event,
2744                 LPFC_NL_VENDOR_ID);
2745
2746         lpfc_block_error_handler(cmnd);
2747         /*
2748          * Since the driver manages a single bus device, reset all
2749          * targets known to the driver.  Should any target reset
2750          * fail, this routine returns failure to the midlayer.
2751          */
2752         for (i = 0; i < LPFC_MAX_TARGET; i++) {
2753                 /* Search for mapped node by target ID */
2754                 match = 0;
2755                 spin_lock_irq(shost->host_lock);
2756                 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
2757                         if (!NLP_CHK_NODE_ACT(ndlp))
2758                                 continue;
2759                         if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
2760                             ndlp->nlp_sid == i &&
2761                             ndlp->rport) {
2762                                 match = 1;
2763                                 break;
2764                         }
2765                 }
2766                 spin_unlock_irq(shost->host_lock);
2767                 if (!match)
2768                         continue;
2769                 lpfc_cmd = lpfc_get_scsi_buf(phba);
2770                 if (lpfc_cmd) {
2771                         lpfc_cmd->timeout = 60;
2772                         status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
2773                                                      cmnd->device->lun,
2774                                                      ndlp->rport->dd_data);
2775                         if (status != TIMEOUT_ERROR)
2776                                 lpfc_release_scsi_buf(phba, lpfc_cmd);
2777                 }
2778                 if (!lpfc_cmd || status != SUCCESS) {
2779                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2780                                          "0700 Bus Reset on target %d failed\n",
2781                                          i);
2782                         ret = FAILED;
2783                 }
2784         }
2785         /*
2786          * All outstanding txcmplq I/Os should have been aborted by
2787          * the targets.  Unfortunately, some targets do not abide by
2788          * this forcing the driver to double check.
2789          */
2790         cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
2791         if (cnt)
2792                 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
2793                                     0, 0, LPFC_CTX_HOST);
2794         later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
2795         while (time_after(later, jiffies) && cnt) {
2796                 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
2797                 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
2798         }
2799         if (cnt) {
2800                 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2801                                  "0715 Bus Reset I/O flush failure: "
2802                                  "cnt x%x left x%x\n", cnt, i);
2803                 ret = FAILED;
2804         }
2805         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2806                          "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
2807         return ret;
2808 }
2809
2810 /**
2811  * lpfc_slave_alloc - scsi_host_template slave_alloc entry point
2812  * @sdev: Pointer to scsi_device.
2813  *
2814  * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
2815  * globally available list of scsi buffers. This routine also makes sure scsi
2816  * buffer is not allocated more than HBA limit conveyed to midlayer. This list
2817  * of scsi buffer exists for the lifetime of the driver.
2818  *
2819  * Return codes:
2820  *   non-0 - Error
2821  *   0 - Success
2822  **/
2823 static int
2824 lpfc_slave_alloc(struct scsi_device *sdev)
2825 {
2826         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2827         struct lpfc_hba   *phba = vport->phba;
2828         struct lpfc_scsi_buf *scsi_buf = NULL;
2829         struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2830         uint32_t total = 0, i;
2831         uint32_t num_to_alloc = 0;
2832         unsigned long flags;
2833
2834         if (!rport || fc_remote_port_chkready(rport))
2835                 return -ENXIO;
2836
2837         sdev->hostdata = rport->dd_data;
2838
2839         /*
2840          * Populate the cmds_per_lun count scsi_bufs into this host's globally
2841          * available list of scsi buffers.  Don't allocate more than the
2842          * HBA limit conveyed to the midlayer via the host structure.  The
2843          * formula accounts for the lun_queue_depth + error handlers + 1
2844          * extra.  This list of scsi bufs exists for the lifetime of the driver.
2845          */
2846         total = phba->total_scsi_bufs;
2847         num_to_alloc = vport->cfg_lun_queue_depth + 2;
2848
2849         /* Allow some exchanges to be available always to complete discovery */
2850         if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2851                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2852                                  "0704 At limitation of %d preallocated "
2853                                  "command buffers\n", total);
2854                 return 0;
2855         /* Allow some exchanges to be available always to complete discovery */
2856         } else if (total + num_to_alloc >
2857                 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2858                 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
2859                                  "0705 Allocation request of %d "
2860                                  "command buffers will exceed max of %d.  "
2861                                  "Reducing allocation request to %d.\n",
2862                                  num_to_alloc, phba->cfg_hba_queue_depth,
2863                                  (phba->cfg_hba_queue_depth - total));
2864                 num_to_alloc = phba->cfg_hba_queue_depth - total;
2865         }
2866
2867         for (i = 0; i < num_to_alloc; i++) {
2868                 scsi_buf = lpfc_new_scsi_buf(vport);
2869                 if (!scsi_buf) {
2870                         lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
2871                                          "0706 Failed to allocate "
2872                                          "command buffer\n");
2873                         break;
2874                 }
2875
2876                 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
2877                 phba->total_scsi_bufs++;
2878                 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2879                 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
2880         }
2881         return 0;
2882 }
2883
2884 /**
2885  * lpfc_slave_configure - scsi_host_template slave_configure entry point
2886  * @sdev: Pointer to scsi_device.
2887  *
2888  * This routine configures following items
2889  *   - Tag command queuing support for @sdev if supported.
2890  *   - Dev loss time out value of fc_rport.
2891  *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
2892  *
2893  * Return codes:
2894  *   0 - Success
2895  **/
2896 static int
2897 lpfc_slave_configure(struct scsi_device *sdev)
2898 {
2899         struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
2900         struct lpfc_hba   *phba = vport->phba;
2901         struct fc_rport   *rport = starget_to_rport(sdev->sdev_target);
2902
2903         if (sdev->tagged_supported)
2904                 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
2905         else
2906                 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
2907
2908         /*
2909          * Initialize the fc transport attributes for the target
2910          * containing this scsi device.  Also note that the driver's
2911          * target pointer is stored in the starget_data for the
2912          * driver's sysfs entry point functions.
2913          */
2914         rport->dev_loss_tmo = vport->cfg_devloss_tmo;
2915
2916         if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
2917                 lpfc_sli_poll_fcp_ring(phba);
2918                 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
2919                         lpfc_poll_rearm_timer(phba);
2920         }
2921
2922         return 0;
2923 }
2924
2925 /**
2926  * lpfc_slave_destroy - slave_destroy entry point of SHT data structure
2927  * @sdev: Pointer to scsi_device.
2928  *
2929  * This routine sets @sdev hostatdata filed to null.
2930  **/
2931 static void
2932 lpfc_slave_destroy(struct scsi_device *sdev)
2933 {
2934         sdev->hostdata = NULL;
2935         return;
2936 }
2937
2938
2939 struct scsi_host_template lpfc_template = {
2940         .module                 = THIS_MODULE,
2941         .name                   = LPFC_DRIVER_NAME,
2942         .info                   = lpfc_info,
2943         .queuecommand           = lpfc_queuecommand,
2944         .eh_abort_handler       = lpfc_abort_handler,
2945         .eh_device_reset_handler= lpfc_device_reset_handler,
2946         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
2947         .slave_alloc            = lpfc_slave_alloc,
2948         .slave_configure        = lpfc_slave_configure,
2949         .slave_destroy          = lpfc_slave_destroy,
2950         .scan_finished          = lpfc_scan_finished,
2951         .this_id                = -1,
2952         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
2953         .cmd_per_lun            = LPFC_CMD_PER_LUN,
2954         .use_clustering         = ENABLE_CLUSTERING,
2955         .shost_attrs            = lpfc_hba_attrs,
2956         .max_sectors            = 0xFFFF,
2957 };
2958
2959 struct scsi_host_template lpfc_vport_template = {
2960         .module                 = THIS_MODULE,
2961         .name                   = LPFC_DRIVER_NAME,
2962         .info                   = lpfc_info,
2963         .queuecommand           = lpfc_queuecommand,
2964         .eh_abort_handler       = lpfc_abort_handler,
2965         .eh_device_reset_handler= lpfc_device_reset_handler,
2966         .eh_bus_reset_handler   = lpfc_bus_reset_handler,
2967         .slave_alloc            = lpfc_slave_alloc,
2968         .slave_configure        = lpfc_slave_configure,
2969         .slave_destroy          = lpfc_slave_destroy,
2970         .scan_finished          = lpfc_scan_finished,
2971         .this_id                = -1,
2972         .sg_tablesize           = LPFC_DEFAULT_SG_SEG_CNT,
2973         .cmd_per_lun            = LPFC_CMD_PER_LUN,
2974         .use_clustering         = ENABLE_CLUSTERING,
2975         .shost_attrs            = lpfc_vport_attrs,
2976         .max_sectors            = 0xFFFF,
2977 };