[SCSI] lpfc 8.3.0 : Add active interrupt test for enabling MSI/MSI-X/INTx
[linux-2.6] / drivers / scsi / lpfc / lpfc_init.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
31
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_nl.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
42 #include "lpfc.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_vport.h"
46 #include "lpfc_version.h"
47
48 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
49 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
50 static int lpfc_post_rcv_buf(struct lpfc_hba *);
51
52 static struct scsi_transport_template *lpfc_transport_template = NULL;
53 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
54 static DEFINE_IDR(lpfc_hba_index);
55
56 /**
57  * lpfc_config_port_prep: Perform lpfc initialization prior to config port.
58  * @phba: pointer to lpfc hba data structure.
59  *
60  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
61  * mailbox command. It retrieves the revision information from the HBA and
62  * collects the Vital Product Data (VPD) about the HBA for preparing the
63  * configuration of the HBA.
64  *
65  * Return codes:
66  *   0 - success.
67  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
68  *   Any other value - indicates an error.
69  **/
70 int
71 lpfc_config_port_prep(struct lpfc_hba *phba)
72 {
73         lpfc_vpd_t *vp = &phba->vpd;
74         int i = 0, rc;
75         LPFC_MBOXQ_t *pmb;
76         MAILBOX_t *mb;
77         char *lpfc_vpd_data = NULL;
78         uint16_t offset = 0;
79         static char licensed[56] =
80                     "key unlock for use with gnu public licensed code only\0";
81         static int init_key = 1;
82
83         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
84         if (!pmb) {
85                 phba->link_state = LPFC_HBA_ERROR;
86                 return -ENOMEM;
87         }
88
89         mb = &pmb->mb;
90         phba->link_state = LPFC_INIT_MBX_CMDS;
91
92         if (lpfc_is_LC_HBA(phba->pcidev->device)) {
93                 if (init_key) {
94                         uint32_t *ptext = (uint32_t *) licensed;
95
96                         for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
97                                 *ptext = cpu_to_be32(*ptext);
98                         init_key = 0;
99                 }
100
101                 lpfc_read_nv(phba, pmb);
102                 memset((char*)mb->un.varRDnvp.rsvd3, 0,
103                         sizeof (mb->un.varRDnvp.rsvd3));
104                 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
105                          sizeof (licensed));
106
107                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
108
109                 if (rc != MBX_SUCCESS) {
110                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
111                                         "0324 Config Port initialization "
112                                         "error, mbxCmd x%x READ_NVPARM, "
113                                         "mbxStatus x%x\n",
114                                         mb->mbxCommand, mb->mbxStatus);
115                         mempool_free(pmb, phba->mbox_mem_pool);
116                         return -ERESTART;
117                 }
118                 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
119                        sizeof(phba->wwnn));
120                 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
121                        sizeof(phba->wwpn));
122         }
123
124         phba->sli3_options = 0x0;
125
126         /* Setup and issue mailbox READ REV command */
127         lpfc_read_rev(phba, pmb);
128         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
129         if (rc != MBX_SUCCESS) {
130                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
131                                 "0439 Adapter failed to init, mbxCmd x%x "
132                                 "READ_REV, mbxStatus x%x\n",
133                                 mb->mbxCommand, mb->mbxStatus);
134                 mempool_free( pmb, phba->mbox_mem_pool);
135                 return -ERESTART;
136         }
137
138
139         /*
140          * The value of rr must be 1 since the driver set the cv field to 1.
141          * This setting requires the FW to set all revision fields.
142          */
143         if (mb->un.varRdRev.rr == 0) {
144                 vp->rev.rBit = 0;
145                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
146                                 "0440 Adapter failed to init, READ_REV has "
147                                 "missing revision information.\n");
148                 mempool_free(pmb, phba->mbox_mem_pool);
149                 return -ERESTART;
150         }
151
152         if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
153                 mempool_free(pmb, phba->mbox_mem_pool);
154                 return -EINVAL;
155         }
156
157         /* Save information as VPD data */
158         vp->rev.rBit = 1;
159         memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
160         vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
161         memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
162         vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
163         memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
164         vp->rev.biuRev = mb->un.varRdRev.biuRev;
165         vp->rev.smRev = mb->un.varRdRev.smRev;
166         vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
167         vp->rev.endecRev = mb->un.varRdRev.endecRev;
168         vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
169         vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
170         vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
171         vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
172         vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
173         vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
174
175         /* If the sli feature level is less then 9, we must
176          * tear down all RPIs and VPIs on link down if NPIV
177          * is enabled.
178          */
179         if (vp->rev.feaLevelHigh < 9)
180                 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
181
182         if (lpfc_is_LC_HBA(phba->pcidev->device))
183                 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
184                                                 sizeof (phba->RandomData));
185
186         /* Get adapter VPD information */
187         lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
188         if (!lpfc_vpd_data)
189                 goto out_free_mbox;
190
191         do {
192                 lpfc_dump_mem(phba, pmb, offset);
193                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
194
195                 if (rc != MBX_SUCCESS) {
196                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
197                                         "0441 VPD not present on adapter, "
198                                         "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
199                                         mb->mbxCommand, mb->mbxStatus);
200                         mb->un.varDmp.word_cnt = 0;
201                 }
202                 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
203                         mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
204                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
205                                       lpfc_vpd_data + offset,
206                                       mb->un.varDmp.word_cnt);
207                 offset += mb->un.varDmp.word_cnt;
208         } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
209         lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
210
211         kfree(lpfc_vpd_data);
212 out_free_mbox:
213         mempool_free(pmb, phba->mbox_mem_pool);
214         return 0;
215 }
216
217 /**
218  * lpfc_config_async_cmpl: Completion handler for config async event mbox cmd.
219  * @phba: pointer to lpfc hba data structure.
220  * @pmboxq: pointer to the driver internal queue element for mailbox command.
221  *
222  * This is the completion handler for driver's configuring asynchronous event
223  * mailbox command to the device. If the mailbox command returns successfully,
224  * it will set internal async event support flag to 1; otherwise, it will
225  * set internal async event support flag to 0.
226  **/
227 static void
228 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
229 {
230         if (pmboxq->mb.mbxStatus == MBX_SUCCESS)
231                 phba->temp_sensor_support = 1;
232         else
233                 phba->temp_sensor_support = 0;
234         mempool_free(pmboxq, phba->mbox_mem_pool);
235         return;
236 }
237
238 /**
239  * lpfc_dump_wakeup_param_cmpl: Completion handler for dump memory mailbox
240  *     command used for getting wake up parameters.
241  * @phba: pointer to lpfc hba data structure.
242  * @pmboxq: pointer to the driver internal queue element for mailbox command.
243  *
244  * This is the completion handler for dump mailbox command for getting
245  * wake up parameters. When this command complete, the response contain
246  * Option rom version of the HBA. This function translate the version number
247  * into a human readable string and store it in OptionROMVersion.
248  **/
249 static void
250 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
251 {
252         struct prog_id *prg;
253         uint32_t prog_id_word;
254         char dist = ' ';
255         /* character array used for decoding dist type. */
256         char dist_char[] = "nabx";
257
258         if (pmboxq->mb.mbxStatus != MBX_SUCCESS)
259                 return;
260
261         prg = (struct prog_id *) &prog_id_word;
262
263         /* word 7 contain option rom version */
264         prog_id_word = pmboxq->mb.un.varWords[7];
265
266         /* Decode the Option rom version word to a readable string */
267         if (prg->dist < 4)
268                 dist = dist_char[prg->dist];
269
270         if ((prg->dist == 3) && (prg->num == 0))
271                 sprintf(phba->OptionROMVersion, "%d.%d%d",
272                         prg->ver, prg->rev, prg->lev);
273         else
274                 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
275                         prg->ver, prg->rev, prg->lev,
276                         dist, prg->num);
277         return;
278 }
279
280 /**
281  * lpfc_config_port_post: Perform lpfc initialization after config port.
282  * @phba: pointer to lpfc hba data structure.
283  *
284  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
285  * command call. It performs all internal resource and state setups on the
286  * port: post IOCB buffers, enable appropriate host interrupt attentions,
287  * ELS ring timers, etc.
288  *
289  * Return codes
290  *   0 - success.
291  *   Any other value - error.
292  **/
293 int
294 lpfc_config_port_post(struct lpfc_hba *phba)
295 {
296         struct lpfc_vport *vport = phba->pport;
297         LPFC_MBOXQ_t *pmb;
298         MAILBOX_t *mb;
299         struct lpfc_dmabuf *mp;
300         struct lpfc_sli *psli = &phba->sli;
301         uint32_t status, timeout;
302         int i, j;
303         int rc;
304
305         spin_lock_irq(&phba->hbalock);
306         /*
307          * If the Config port completed correctly the HBA is not
308          * over heated any more.
309          */
310         if (phba->over_temp_state == HBA_OVER_TEMP)
311                 phba->over_temp_state = HBA_NORMAL_TEMP;
312         spin_unlock_irq(&phba->hbalock);
313
314         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
315         if (!pmb) {
316                 phba->link_state = LPFC_HBA_ERROR;
317                 return -ENOMEM;
318         }
319         mb = &pmb->mb;
320
321         /* Get login parameters for NID.  */
322         lpfc_read_sparam(phba, pmb, 0);
323         pmb->vport = vport;
324         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
325                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
326                                 "0448 Adapter failed init, mbxCmd x%x "
327                                 "READ_SPARM mbxStatus x%x\n",
328                                 mb->mbxCommand, mb->mbxStatus);
329                 phba->link_state = LPFC_HBA_ERROR;
330                 mp = (struct lpfc_dmabuf *) pmb->context1;
331                 mempool_free( pmb, phba->mbox_mem_pool);
332                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
333                 kfree(mp);
334                 return -EIO;
335         }
336
337         mp = (struct lpfc_dmabuf *) pmb->context1;
338
339         memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
340         lpfc_mbuf_free(phba, mp->virt, mp->phys);
341         kfree(mp);
342         pmb->context1 = NULL;
343
344         if (phba->cfg_soft_wwnn)
345                 u64_to_wwn(phba->cfg_soft_wwnn,
346                            vport->fc_sparam.nodeName.u.wwn);
347         if (phba->cfg_soft_wwpn)
348                 u64_to_wwn(phba->cfg_soft_wwpn,
349                            vport->fc_sparam.portName.u.wwn);
350         memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
351                sizeof (struct lpfc_name));
352         memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
353                sizeof (struct lpfc_name));
354         /* If no serial number in VPD data, use low 6 bytes of WWNN */
355         /* This should be consolidated into parse_vpd ? - mr */
356         if (phba->SerialNumber[0] == 0) {
357                 uint8_t *outptr;
358
359                 outptr = &vport->fc_nodename.u.s.IEEE[0];
360                 for (i = 0; i < 12; i++) {
361                         status = *outptr++;
362                         j = ((status & 0xf0) >> 4);
363                         if (j <= 9)
364                                 phba->SerialNumber[i] =
365                                     (char)((uint8_t) 0x30 + (uint8_t) j);
366                         else
367                                 phba->SerialNumber[i] =
368                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
369                         i++;
370                         j = (status & 0xf);
371                         if (j <= 9)
372                                 phba->SerialNumber[i] =
373                                     (char)((uint8_t) 0x30 + (uint8_t) j);
374                         else
375                                 phba->SerialNumber[i] =
376                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
377                 }
378         }
379
380         lpfc_read_config(phba, pmb);
381         pmb->vport = vport;
382         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
383                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
384                                 "0453 Adapter failed to init, mbxCmd x%x "
385                                 "READ_CONFIG, mbxStatus x%x\n",
386                                 mb->mbxCommand, mb->mbxStatus);
387                 phba->link_state = LPFC_HBA_ERROR;
388                 mempool_free( pmb, phba->mbox_mem_pool);
389                 return -EIO;
390         }
391
392         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
393         if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
394                 phba->cfg_hba_queue_depth =
395                         mb->un.varRdConfig.max_xri + 1;
396
397         phba->lmt = mb->un.varRdConfig.lmt;
398
399         /* Get the default values for Model Name and Description */
400         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
401
402         if ((phba->cfg_link_speed > LINK_SPEED_10G)
403             || ((phba->cfg_link_speed == LINK_SPEED_1G)
404                 && !(phba->lmt & LMT_1Gb))
405             || ((phba->cfg_link_speed == LINK_SPEED_2G)
406                 && !(phba->lmt & LMT_2Gb))
407             || ((phba->cfg_link_speed == LINK_SPEED_4G)
408                 && !(phba->lmt & LMT_4Gb))
409             || ((phba->cfg_link_speed == LINK_SPEED_8G)
410                 && !(phba->lmt & LMT_8Gb))
411             || ((phba->cfg_link_speed == LINK_SPEED_10G)
412                 && !(phba->lmt & LMT_10Gb))) {
413                 /* Reset link speed to auto */
414                 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
415                         "1302 Invalid speed for this board: "
416                         "Reset link speed to auto: x%x\n",
417                         phba->cfg_link_speed);
418                         phba->cfg_link_speed = LINK_SPEED_AUTO;
419         }
420
421         phba->link_state = LPFC_LINK_DOWN;
422
423         /* Only process IOCBs on ELS ring till hba_state is READY */
424         if (psli->ring[psli->extra_ring].cmdringaddr)
425                 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
426         if (psli->ring[psli->fcp_ring].cmdringaddr)
427                 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
428         if (psli->ring[psli->next_ring].cmdringaddr)
429                 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
430
431         /* Post receive buffers for desired rings */
432         if (phba->sli_rev != 3)
433                 lpfc_post_rcv_buf(phba);
434
435         /*
436          * Configure HBA MSI-X attention conditions to messages if MSI-X mode
437          */
438         if (phba->intr_type == MSIX) {
439                 rc = lpfc_config_msi(phba, pmb);
440                 if (rc) {
441                         mempool_free(pmb, phba->mbox_mem_pool);
442                         return -EIO;
443                 }
444                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
445                 if (rc != MBX_SUCCESS) {
446                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
447                                         "0352 Config MSI mailbox command "
448                                         "failed, mbxCmd x%x, mbxStatus x%x\n",
449                                         pmb->mb.mbxCommand, pmb->mb.mbxStatus);
450                         mempool_free(pmb, phba->mbox_mem_pool);
451                         return -EIO;
452                 }
453         }
454
455         /* Initialize ERATT handling flag */
456         phba->hba_flag &= ~HBA_ERATT_HANDLED;
457
458         /* Enable appropriate host interrupts */
459         spin_lock_irq(&phba->hbalock);
460         status = readl(phba->HCregaddr);
461         status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
462         if (psli->num_rings > 0)
463                 status |= HC_R0INT_ENA;
464         if (psli->num_rings > 1)
465                 status |= HC_R1INT_ENA;
466         if (psli->num_rings > 2)
467                 status |= HC_R2INT_ENA;
468         if (psli->num_rings > 3)
469                 status |= HC_R3INT_ENA;
470
471         if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
472             (phba->cfg_poll & DISABLE_FCP_RING_INT))
473                 status &= ~(HC_R0INT_ENA);
474
475         writel(status, phba->HCregaddr);
476         readl(phba->HCregaddr); /* flush */
477         spin_unlock_irq(&phba->hbalock);
478
479         /* Set up ring-0 (ELS) timer */
480         timeout = phba->fc_ratov * 2;
481         mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
482         /* Set up heart beat (HB) timer */
483         mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
484         phba->hb_outstanding = 0;
485         phba->last_completion_time = jiffies;
486         /* Set up error attention (ERATT) polling timer */
487         mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
488
489         lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
490         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
491         lpfc_set_loopback_flag(phba);
492         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
493         if (rc != MBX_SUCCESS) {
494                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
495                                 "0454 Adapter failed to init, mbxCmd x%x "
496                                 "INIT_LINK, mbxStatus x%x\n",
497                                 mb->mbxCommand, mb->mbxStatus);
498
499                 /* Clear all interrupt enable conditions */
500                 writel(0, phba->HCregaddr);
501                 readl(phba->HCregaddr); /* flush */
502                 /* Clear all pending interrupts */
503                 writel(0xffffffff, phba->HAregaddr);
504                 readl(phba->HAregaddr); /* flush */
505
506                 phba->link_state = LPFC_HBA_ERROR;
507                 if (rc != MBX_BUSY)
508                         mempool_free(pmb, phba->mbox_mem_pool);
509                 return -EIO;
510         }
511         /* MBOX buffer will be freed in mbox compl */
512         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
513         lpfc_config_async(phba, pmb, LPFC_ELS_RING);
514         pmb->mbox_cmpl = lpfc_config_async_cmpl;
515         pmb->vport = phba->pport;
516         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
517
518         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
519                 lpfc_printf_log(phba,
520                                 KERN_ERR,
521                                 LOG_INIT,
522                                 "0456 Adapter failed to issue "
523                                 "ASYNCEVT_ENABLE mbox status x%x \n.",
524                                 rc);
525                 mempool_free(pmb, phba->mbox_mem_pool);
526         }
527
528         /* Get Option rom version */
529         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
530         lpfc_dump_wakeup_param(phba, pmb);
531         pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
532         pmb->vport = phba->pport;
533         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
534
535         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
536                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
537                                 "to get Option ROM version status x%x\n.", rc);
538                 mempool_free(pmb, phba->mbox_mem_pool);
539         }
540
541         return 0;
542 }
543
544 /**
545  * lpfc_hba_down_prep: Perform lpfc uninitialization prior to HBA reset.
546  * @phba: pointer to lpfc HBA data structure.
547  *
548  * This routine will do LPFC uninitialization before the HBA is reset when
549  * bringing down the SLI Layer.
550  *
551  * Return codes
552  *   0 - success.
553  *   Any other value - error.
554  **/
555 int
556 lpfc_hba_down_prep(struct lpfc_hba *phba)
557 {
558         struct lpfc_vport **vports;
559         int i;
560         /* Disable interrupts */
561         writel(0, phba->HCregaddr);
562         readl(phba->HCregaddr); /* flush */
563
564         if (phba->pport->load_flag & FC_UNLOADING)
565                 lpfc_cleanup_discovery_resources(phba->pport);
566         else {
567                 vports = lpfc_create_vport_work_array(phba);
568                 if (vports != NULL)
569                         for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
570                                 lpfc_cleanup_discovery_resources(vports[i]);
571                 lpfc_destroy_vport_work_array(phba, vports);
572         }
573         return 0;
574 }
575
576 /**
577  * lpfc_hba_down_post: Perform lpfc uninitialization after HBA reset.
578  * @phba: pointer to lpfc HBA data structure.
579  *
580  * This routine will do uninitialization after the HBA is reset when bring
581  * down the SLI Layer.
582  *
583  * Return codes
584  *   0 - sucess.
585  *   Any other value - error.
586  **/
587 int
588 lpfc_hba_down_post(struct lpfc_hba *phba)
589 {
590         struct lpfc_sli *psli = &phba->sli;
591         struct lpfc_sli_ring *pring;
592         struct lpfc_dmabuf *mp, *next_mp;
593         struct lpfc_iocbq *iocb;
594         IOCB_t *cmd = NULL;
595         LIST_HEAD(completions);
596         int i;
597
598         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
599                 lpfc_sli_hbqbuf_free_all(phba);
600         else {
601                 /* Cleanup preposted buffers on the ELS ring */
602                 pring = &psli->ring[LPFC_ELS_RING];
603                 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
604                         list_del(&mp->list);
605                         pring->postbufq_cnt--;
606                         lpfc_mbuf_free(phba, mp->virt, mp->phys);
607                         kfree(mp);
608                 }
609         }
610
611         spin_lock_irq(&phba->hbalock);
612         for (i = 0; i < psli->num_rings; i++) {
613                 pring = &psli->ring[i];
614
615                 /* At this point in time the HBA is either reset or DOA. Either
616                  * way, nothing should be on txcmplq as it will NEVER complete.
617                  */
618                 list_splice_init(&pring->txcmplq, &completions);
619                 pring->txcmplq_cnt = 0;
620                 spin_unlock_irq(&phba->hbalock);
621
622                 while (!list_empty(&completions)) {
623                         iocb = list_get_first(&completions, struct lpfc_iocbq,
624                                 list);
625                         cmd = &iocb->iocb;
626                         list_del_init(&iocb->list);
627
628                         if (!iocb->iocb_cmpl)
629                                 lpfc_sli_release_iocbq(phba, iocb);
630                         else {
631                                 cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
632                                 cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
633                                 (iocb->iocb_cmpl) (phba, iocb, iocb);
634                         }
635                 }
636
637                 lpfc_sli_abort_iocb_ring(phba, pring);
638                 spin_lock_irq(&phba->hbalock);
639         }
640         spin_unlock_irq(&phba->hbalock);
641
642         return 0;
643 }
644
645 /**
646  * lpfc_hb_timeout: The HBA-timer timeout handler.
647  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
648  *
649  * This is the HBA-timer timeout handler registered to the lpfc driver. When
650  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
651  * work-port-events bitmap and the worker thread is notified. This timeout
652  * event will be used by the worker thread to invoke the actual timeout
653  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
654  * be performed in the timeout handler and the HBA timeout event bit shall
655  * be cleared by the worker thread after it has taken the event bitmap out.
656  **/
657 static void
658 lpfc_hb_timeout(unsigned long ptr)
659 {
660         struct lpfc_hba *phba;
661         uint32_t tmo_posted;
662         unsigned long iflag;
663
664         phba = (struct lpfc_hba *)ptr;
665
666         /* Check for heart beat timeout conditions */
667         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
668         tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
669         if (!tmo_posted)
670                 phba->pport->work_port_events |= WORKER_HB_TMO;
671         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
672
673         /* Tell the worker thread there is work to do */
674         if (!tmo_posted)
675                 lpfc_worker_wake_up(phba);
676         return;
677 }
678
679 /**
680  * lpfc_hb_mbox_cmpl: The lpfc heart-beat mailbox command callback function.
681  * @phba: pointer to lpfc hba data structure.
682  * @pmboxq: pointer to the driver internal queue element for mailbox command.
683  *
684  * This is the callback function to the lpfc heart-beat mailbox command.
685  * If configured, the lpfc driver issues the heart-beat mailbox command to
686  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
687  * heart-beat mailbox command is issued, the driver shall set up heart-beat
688  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
689  * heart-beat outstanding state. Once the mailbox command comes back and
690  * no error conditions detected, the heart-beat mailbox command timer is
691  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
692  * state is cleared for the next heart-beat. If the timer expired with the
693  * heart-beat outstanding state set, the driver will put the HBA offline.
694  **/
695 static void
696 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
697 {
698         unsigned long drvr_flag;
699
700         spin_lock_irqsave(&phba->hbalock, drvr_flag);
701         phba->hb_outstanding = 0;
702         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
703
704         /* Check and reset heart-beat timer is necessary */
705         mempool_free(pmboxq, phba->mbox_mem_pool);
706         if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
707                 !(phba->link_state == LPFC_HBA_ERROR) &&
708                 !(phba->pport->load_flag & FC_UNLOADING))
709                 mod_timer(&phba->hb_tmofunc,
710                         jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
711         return;
712 }
713
714 /**
715  * lpfc_hb_timeout_handler: The HBA-timer timeout handler.
716  * @phba: pointer to lpfc hba data structure.
717  *
718  * This is the actual HBA-timer timeout handler to be invoked by the worker
719  * thread whenever the HBA timer fired and HBA-timeout event posted. This
720  * handler performs any periodic operations needed for the device. If such
721  * periodic event has already been attended to either in the interrupt handler
722  * or by processing slow-ring or fast-ring events within the HBA-timer
723  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
724  * the timer for the next timeout period. If lpfc heart-beat mailbox command
725  * is configured and there is no heart-beat mailbox command outstanding, a
726  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
727  * has been a heart-beat mailbox command outstanding, the HBA shall be put
728  * to offline.
729  **/
730 void
731 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
732 {
733         LPFC_MBOXQ_t *pmboxq;
734         struct lpfc_dmabuf *buf_ptr;
735         int retval;
736         struct lpfc_sli *psli = &phba->sli;
737         LIST_HEAD(completions);
738
739         if ((phba->link_state == LPFC_HBA_ERROR) ||
740                 (phba->pport->load_flag & FC_UNLOADING) ||
741                 (phba->pport->fc_flag & FC_OFFLINE_MODE))
742                 return;
743
744         spin_lock_irq(&phba->pport->work_port_lock);
745
746         if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
747                 jiffies)) {
748                 spin_unlock_irq(&phba->pport->work_port_lock);
749                 if (!phba->hb_outstanding)
750                         mod_timer(&phba->hb_tmofunc,
751                                 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
752                 else
753                         mod_timer(&phba->hb_tmofunc,
754                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
755                 return;
756         }
757         spin_unlock_irq(&phba->pport->work_port_lock);
758
759         if (phba->elsbuf_cnt &&
760                 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
761                 spin_lock_irq(&phba->hbalock);
762                 list_splice_init(&phba->elsbuf, &completions);
763                 phba->elsbuf_cnt = 0;
764                 phba->elsbuf_prev_cnt = 0;
765                 spin_unlock_irq(&phba->hbalock);
766
767                 while (!list_empty(&completions)) {
768                         list_remove_head(&completions, buf_ptr,
769                                 struct lpfc_dmabuf, list);
770                         lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
771                         kfree(buf_ptr);
772                 }
773         }
774         phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
775
776         /* If there is no heart beat outstanding, issue a heartbeat command */
777         if (phba->cfg_enable_hba_heartbeat) {
778                 if (!phba->hb_outstanding) {
779                         pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
780                         if (!pmboxq) {
781                                 mod_timer(&phba->hb_tmofunc,
782                                           jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
783                                 return;
784                         }
785
786                         lpfc_heart_beat(phba, pmboxq);
787                         pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
788                         pmboxq->vport = phba->pport;
789                         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
790
791                         if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
792                                 mempool_free(pmboxq, phba->mbox_mem_pool);
793                                 mod_timer(&phba->hb_tmofunc,
794                                           jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
795                                 return;
796                         }
797                         mod_timer(&phba->hb_tmofunc,
798                                   jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
799                         phba->hb_outstanding = 1;
800                         return;
801                 } else {
802                         /*
803                         * If heart beat timeout called with hb_outstanding set
804                         * we need to take the HBA offline.
805                         */
806                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
807                                         "0459 Adapter heartbeat failure, "
808                                         "taking this port offline.\n");
809
810                         spin_lock_irq(&phba->hbalock);
811                         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
812                         spin_unlock_irq(&phba->hbalock);
813
814                         lpfc_offline_prep(phba);
815                         lpfc_offline(phba);
816                         lpfc_unblock_mgmt_io(phba);
817                         phba->link_state = LPFC_HBA_ERROR;
818                         lpfc_hba_down_post(phba);
819                 }
820         }
821 }
822
823 /**
824  * lpfc_offline_eratt: Bring lpfc offline on hardware error attention.
825  * @phba: pointer to lpfc hba data structure.
826  *
827  * This routine is called to bring the HBA offline when HBA hardware error
828  * other than Port Error 6 has been detected.
829  **/
830 static void
831 lpfc_offline_eratt(struct lpfc_hba *phba)
832 {
833         struct lpfc_sli   *psli = &phba->sli;
834
835         spin_lock_irq(&phba->hbalock);
836         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
837         spin_unlock_irq(&phba->hbalock);
838         lpfc_offline_prep(phba);
839
840         lpfc_offline(phba);
841         lpfc_reset_barrier(phba);
842         lpfc_sli_brdreset(phba);
843         lpfc_hba_down_post(phba);
844         lpfc_sli_brdready(phba, HS_MBRDY);
845         lpfc_unblock_mgmt_io(phba);
846         phba->link_state = LPFC_HBA_ERROR;
847         return;
848 }
849
850 /**
851  * lpfc_handle_eratt: The HBA hardware error handler.
852  * @phba: pointer to lpfc hba data structure.
853  *
854  * This routine is invoked to handle the following HBA hardware error
855  * conditions:
856  * 1 - HBA error attention interrupt
857  * 2 - DMA ring index out of range
858  * 3 - Mailbox command came back as unknown
859  **/
860 void
861 lpfc_handle_eratt(struct lpfc_hba *phba)
862 {
863         struct lpfc_vport *vport = phba->pport;
864         struct lpfc_sli   *psli = &phba->sli;
865         struct lpfc_sli_ring  *pring;
866         uint32_t event_data;
867         unsigned long temperature;
868         struct temp_event temp_event_data;
869         struct Scsi_Host  *shost;
870         struct lpfc_board_event_header board_event;
871
872         /* If the pci channel is offline, ignore possible errors,
873          * since we cannot communicate with the pci card anyway. */
874         if (pci_channel_offline(phba->pcidev))
875                 return;
876         /* If resets are disabled then leave the HBA alone and return */
877         if (!phba->cfg_enable_hba_reset)
878                 return;
879
880         /* Send an internal error event to mgmt application */
881         board_event.event_type = FC_REG_BOARD_EVENT;
882         board_event.subcategory = LPFC_EVENT_PORTINTERR;
883         shost = lpfc_shost_from_vport(phba->pport);
884         fc_host_post_vendor_event(shost, fc_get_event_number(),
885                                   sizeof(board_event),
886                                   (char *) &board_event,
887                                   LPFC_NL_VENDOR_ID);
888
889         if (phba->work_hs & HS_FFER6) {
890                 /* Re-establishing Link */
891                 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
892                                 "1301 Re-establishing Link "
893                                 "Data: x%x x%x x%x\n",
894                                 phba->work_hs,
895                                 phba->work_status[0], phba->work_status[1]);
896
897                 spin_lock_irq(&phba->hbalock);
898                 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
899                 spin_unlock_irq(&phba->hbalock);
900
901                 /*
902                 * Firmware stops when it triggled erratt with HS_FFER6.
903                 * That could cause the I/Os dropped by the firmware.
904                 * Error iocb (I/O) on txcmplq and let the SCSI layer
905                 * retry it after re-establishing link.
906                 */
907                 pring = &psli->ring[psli->fcp_ring];
908                 lpfc_sli_abort_iocb_ring(phba, pring);
909
910                 /*
911                  * There was a firmware error.  Take the hba offline and then
912                  * attempt to restart it.
913                  */
914                 lpfc_offline_prep(phba);
915                 lpfc_offline(phba);
916                 lpfc_sli_brdrestart(phba);
917                 if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
918                         lpfc_unblock_mgmt_io(phba);
919                         return;
920                 }
921                 lpfc_unblock_mgmt_io(phba);
922         } else if (phba->work_hs & HS_CRIT_TEMP) {
923                 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
924                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
925                 temp_event_data.event_code = LPFC_CRIT_TEMP;
926                 temp_event_data.data = (uint32_t)temperature;
927
928                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
929                                 "0406 Adapter maximum temperature exceeded "
930                                 "(%ld), taking this port offline "
931                                 "Data: x%x x%x x%x\n",
932                                 temperature, phba->work_hs,
933                                 phba->work_status[0], phba->work_status[1]);
934
935                 shost = lpfc_shost_from_vport(phba->pport);
936                 fc_host_post_vendor_event(shost, fc_get_event_number(),
937                                           sizeof(temp_event_data),
938                                           (char *) &temp_event_data,
939                                           SCSI_NL_VID_TYPE_PCI
940                                           | PCI_VENDOR_ID_EMULEX);
941
942                 spin_lock_irq(&phba->hbalock);
943                 phba->over_temp_state = HBA_OVER_TEMP;
944                 spin_unlock_irq(&phba->hbalock);
945                 lpfc_offline_eratt(phba);
946
947         } else {
948                 /* The if clause above forces this code path when the status
949                  * failure is a value other than FFER6. Do not call the offline
950                  * twice. This is the adapter hardware error path.
951                  */
952                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
953                                 "0457 Adapter Hardware Error "
954                                 "Data: x%x x%x x%x\n",
955                                 phba->work_hs,
956                                 phba->work_status[0], phba->work_status[1]);
957
958                 event_data = FC_REG_DUMP_EVENT;
959                 shost = lpfc_shost_from_vport(vport);
960                 fc_host_post_vendor_event(shost, fc_get_event_number(),
961                                 sizeof(event_data), (char *) &event_data,
962                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
963
964                 lpfc_offline_eratt(phba);
965         }
966         return;
967 }
968
969 /**
970  * lpfc_handle_latt: The HBA link event handler.
971  * @phba: pointer to lpfc hba data structure.
972  *
973  * This routine is invoked from the worker thread to handle a HBA host
974  * attention link event.
975  **/
976 void
977 lpfc_handle_latt(struct lpfc_hba *phba)
978 {
979         struct lpfc_vport *vport = phba->pport;
980         struct lpfc_sli   *psli = &phba->sli;
981         LPFC_MBOXQ_t *pmb;
982         volatile uint32_t control;
983         struct lpfc_dmabuf *mp;
984         int rc = 0;
985
986         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
987         if (!pmb) {
988                 rc = 1;
989                 goto lpfc_handle_latt_err_exit;
990         }
991
992         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
993         if (!mp) {
994                 rc = 2;
995                 goto lpfc_handle_latt_free_pmb;
996         }
997
998         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
999         if (!mp->virt) {
1000                 rc = 3;
1001                 goto lpfc_handle_latt_free_mp;
1002         }
1003
1004         /* Cleanup any outstanding ELS commands */
1005         lpfc_els_flush_all_cmd(phba);
1006
1007         psli->slistat.link_event++;
1008         lpfc_read_la(phba, pmb, mp);
1009         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1010         pmb->vport = vport;
1011         /* Block ELS IOCBs until we have processed this mbox command */
1012         phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1013         rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1014         if (rc == MBX_NOT_FINISHED) {
1015                 rc = 4;
1016                 goto lpfc_handle_latt_free_mbuf;
1017         }
1018
1019         /* Clear Link Attention in HA REG */
1020         spin_lock_irq(&phba->hbalock);
1021         writel(HA_LATT, phba->HAregaddr);
1022         readl(phba->HAregaddr); /* flush */
1023         spin_unlock_irq(&phba->hbalock);
1024
1025         return;
1026
1027 lpfc_handle_latt_free_mbuf:
1028         phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1029         lpfc_mbuf_free(phba, mp->virt, mp->phys);
1030 lpfc_handle_latt_free_mp:
1031         kfree(mp);
1032 lpfc_handle_latt_free_pmb:
1033         mempool_free(pmb, phba->mbox_mem_pool);
1034 lpfc_handle_latt_err_exit:
1035         /* Enable Link attention interrupts */
1036         spin_lock_irq(&phba->hbalock);
1037         psli->sli_flag |= LPFC_PROCESS_LA;
1038         control = readl(phba->HCregaddr);
1039         control |= HC_LAINT_ENA;
1040         writel(control, phba->HCregaddr);
1041         readl(phba->HCregaddr); /* flush */
1042
1043         /* Clear Link Attention in HA REG */
1044         writel(HA_LATT, phba->HAregaddr);
1045         readl(phba->HAregaddr); /* flush */
1046         spin_unlock_irq(&phba->hbalock);
1047         lpfc_linkdown(phba);
1048         phba->link_state = LPFC_HBA_ERROR;
1049
1050         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1051                      "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1052
1053         return;
1054 }
1055
1056 /**
1057  * lpfc_parse_vpd: Parse VPD (Vital Product Data).
1058  * @phba: pointer to lpfc hba data structure.
1059  * @vpd: pointer to the vital product data.
1060  * @len: length of the vital product data in bytes.
1061  *
1062  * This routine parses the Vital Product Data (VPD). The VPD is treated as
1063  * an array of characters. In this routine, the ModelName, ProgramType, and
1064  * ModelDesc, etc. fields of the phba data structure will be populated.
1065  *
1066  * Return codes
1067  *   0 - pointer to the VPD passed in is NULL
1068  *   1 - success
1069  **/
1070 static int
1071 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1072 {
1073         uint8_t lenlo, lenhi;
1074         int Length;
1075         int i, j;
1076         int finished = 0;
1077         int index = 0;
1078
1079         if (!vpd)
1080                 return 0;
1081
1082         /* Vital Product */
1083         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1084                         "0455 Vital Product Data: x%x x%x x%x x%x\n",
1085                         (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1086                         (uint32_t) vpd[3]);
1087         while (!finished && (index < (len - 4))) {
1088                 switch (vpd[index]) {
1089                 case 0x82:
1090                 case 0x91:
1091                         index += 1;
1092                         lenlo = vpd[index];
1093                         index += 1;
1094                         lenhi = vpd[index];
1095                         index += 1;
1096                         i = ((((unsigned short)lenhi) << 8) + lenlo);
1097                         index += i;
1098                         break;
1099                 case 0x90:
1100                         index += 1;
1101                         lenlo = vpd[index];
1102                         index += 1;
1103                         lenhi = vpd[index];
1104                         index += 1;
1105                         Length = ((((unsigned short)lenhi) << 8) + lenlo);
1106                         if (Length > len - index)
1107                                 Length = len - index;
1108                         while (Length > 0) {
1109                         /* Look for Serial Number */
1110                         if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1111                                 index += 2;
1112                                 i = vpd[index];
1113                                 index += 1;
1114                                 j = 0;
1115                                 Length -= (3+i);
1116                                 while(i--) {
1117                                         phba->SerialNumber[j++] = vpd[index++];
1118                                         if (j == 31)
1119                                                 break;
1120                                 }
1121                                 phba->SerialNumber[j] = 0;
1122                                 continue;
1123                         }
1124                         else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1125                                 phba->vpd_flag |= VPD_MODEL_DESC;
1126                                 index += 2;
1127                                 i = vpd[index];
1128                                 index += 1;
1129                                 j = 0;
1130                                 Length -= (3+i);
1131                                 while(i--) {
1132                                         phba->ModelDesc[j++] = vpd[index++];
1133                                         if (j == 255)
1134                                                 break;
1135                                 }
1136                                 phba->ModelDesc[j] = 0;
1137                                 continue;
1138                         }
1139                         else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1140                                 phba->vpd_flag |= VPD_MODEL_NAME;
1141                                 index += 2;
1142                                 i = vpd[index];
1143                                 index += 1;
1144                                 j = 0;
1145                                 Length -= (3+i);
1146                                 while(i--) {
1147                                         phba->ModelName[j++] = vpd[index++];
1148                                         if (j == 79)
1149                                                 break;
1150                                 }
1151                                 phba->ModelName[j] = 0;
1152                                 continue;
1153                         }
1154                         else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1155                                 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1156                                 index += 2;
1157                                 i = vpd[index];
1158                                 index += 1;
1159                                 j = 0;
1160                                 Length -= (3+i);
1161                                 while(i--) {
1162                                         phba->ProgramType[j++] = vpd[index++];
1163                                         if (j == 255)
1164                                                 break;
1165                                 }
1166                                 phba->ProgramType[j] = 0;
1167                                 continue;
1168                         }
1169                         else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1170                                 phba->vpd_flag |= VPD_PORT;
1171                                 index += 2;
1172                                 i = vpd[index];
1173                                 index += 1;
1174                                 j = 0;
1175                                 Length -= (3+i);
1176                                 while(i--) {
1177                                 phba->Port[j++] = vpd[index++];
1178                                 if (j == 19)
1179                                         break;
1180                                 }
1181                                 phba->Port[j] = 0;
1182                                 continue;
1183                         }
1184                         else {
1185                                 index += 2;
1186                                 i = vpd[index];
1187                                 index += 1;
1188                                 index += i;
1189                                 Length -= (3 + i);
1190                         }
1191                 }
1192                 finished = 0;
1193                 break;
1194                 case 0x78:
1195                         finished = 1;
1196                         break;
1197                 default:
1198                         index ++;
1199                         break;
1200                 }
1201         }
1202
1203         return(1);
1204 }
1205
1206 /**
1207  * lpfc_get_hba_model_desc: Retrieve HBA device model name and description.
1208  * @phba: pointer to lpfc hba data structure.
1209  * @mdp: pointer to the data structure to hold the derived model name.
1210  * @descp: pointer to the data structure to hold the derived description.
1211  *
1212  * This routine retrieves HBA's description based on its registered PCI device
1213  * ID. The @descp passed into this function points to an array of 256 chars. It
1214  * shall be returned with the model name, maximum speed, and the host bus type.
1215  * The @mdp passed into this function points to an array of 80 chars. When the
1216  * function returns, the @mdp will be filled with the model name.
1217  **/
1218 static void
1219 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1220 {
1221         lpfc_vpd_t *vp;
1222         uint16_t dev_id = phba->pcidev->device;
1223         int max_speed;
1224         int GE = 0;
1225         struct {
1226                 char * name;
1227                 int    max_speed;
1228                 char * bus;
1229         } m = {"<Unknown>", 0, ""};
1230
1231         if (mdp && mdp[0] != '\0'
1232                 && descp && descp[0] != '\0')
1233                 return;
1234
1235         if (phba->lmt & LMT_10Gb)
1236                 max_speed = 10;
1237         else if (phba->lmt & LMT_8Gb)
1238                 max_speed = 8;
1239         else if (phba->lmt & LMT_4Gb)
1240                 max_speed = 4;
1241         else if (phba->lmt & LMT_2Gb)
1242                 max_speed = 2;
1243         else
1244                 max_speed = 1;
1245
1246         vp = &phba->vpd;
1247
1248         switch (dev_id) {
1249         case PCI_DEVICE_ID_FIREFLY:
1250                 m = (typeof(m)){"LP6000", max_speed, "PCI"};
1251                 break;
1252         case PCI_DEVICE_ID_SUPERFLY:
1253                 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1254                         m = (typeof(m)){"LP7000", max_speed,  "PCI"};
1255                 else
1256                         m = (typeof(m)){"LP7000E", max_speed, "PCI"};
1257                 break;
1258         case PCI_DEVICE_ID_DRAGONFLY:
1259                 m = (typeof(m)){"LP8000", max_speed, "PCI"};
1260                 break;
1261         case PCI_DEVICE_ID_CENTAUR:
1262                 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1263                         m = (typeof(m)){"LP9002", max_speed, "PCI"};
1264                 else
1265                         m = (typeof(m)){"LP9000", max_speed, "PCI"};
1266                 break;
1267         case PCI_DEVICE_ID_RFLY:
1268                 m = (typeof(m)){"LP952", max_speed, "PCI"};
1269                 break;
1270         case PCI_DEVICE_ID_PEGASUS:
1271                 m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
1272                 break;
1273         case PCI_DEVICE_ID_THOR:
1274                 m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
1275                 break;
1276         case PCI_DEVICE_ID_VIPER:
1277                 m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
1278                 break;
1279         case PCI_DEVICE_ID_PFLY:
1280                 m = (typeof(m)){"LP982", max_speed, "PCI-X"};
1281                 break;
1282         case PCI_DEVICE_ID_TFLY:
1283                 m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
1284                 break;
1285         case PCI_DEVICE_ID_HELIOS:
1286                 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
1287                 break;
1288         case PCI_DEVICE_ID_HELIOS_SCSP:
1289                 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
1290                 break;
1291         case PCI_DEVICE_ID_HELIOS_DCSP:
1292                 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
1293                 break;
1294         case PCI_DEVICE_ID_NEPTUNE:
1295                 m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
1296                 break;
1297         case PCI_DEVICE_ID_NEPTUNE_SCSP:
1298                 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
1299                 break;
1300         case PCI_DEVICE_ID_NEPTUNE_DCSP:
1301                 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
1302                 break;
1303         case PCI_DEVICE_ID_BMID:
1304                 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
1305                 break;
1306         case PCI_DEVICE_ID_BSMB:
1307                 m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
1308                 break;
1309         case PCI_DEVICE_ID_ZEPHYR:
1310                 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1311                 break;
1312         case PCI_DEVICE_ID_ZEPHYR_SCSP:
1313                 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1314                 break;
1315         case PCI_DEVICE_ID_ZEPHYR_DCSP:
1316                 m = (typeof(m)){"LPe11002-SP", max_speed, "PCIe"};
1317                 break;
1318         case PCI_DEVICE_ID_ZMID:
1319                 m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
1320                 break;
1321         case PCI_DEVICE_ID_ZSMB:
1322                 m = (typeof(m)){"LPe111", max_speed, "PCIe"};
1323                 break;
1324         case PCI_DEVICE_ID_LP101:
1325                 m = (typeof(m)){"LP101", max_speed, "PCI-X"};
1326                 break;
1327         case PCI_DEVICE_ID_LP10000S:
1328                 m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
1329                 break;
1330         case PCI_DEVICE_ID_LP11000S:
1331                 m = (typeof(m)){"LP11000-S", max_speed,
1332                         "PCI-X2"};
1333                 break;
1334         case PCI_DEVICE_ID_LPE11000S:
1335                 m = (typeof(m)){"LPe11000-S", max_speed,
1336                         "PCIe"};
1337                 break;
1338         case PCI_DEVICE_ID_SAT:
1339                 m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
1340                 break;
1341         case PCI_DEVICE_ID_SAT_MID:
1342                 m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
1343                 break;
1344         case PCI_DEVICE_ID_SAT_SMB:
1345                 m = (typeof(m)){"LPe121", max_speed, "PCIe"};
1346                 break;
1347         case PCI_DEVICE_ID_SAT_DCSP:
1348                 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
1349                 break;
1350         case PCI_DEVICE_ID_SAT_SCSP:
1351                 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
1352                 break;
1353         case PCI_DEVICE_ID_SAT_S:
1354                 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
1355                 break;
1356         case PCI_DEVICE_ID_HORNET:
1357                 m = (typeof(m)){"LP21000", max_speed, "PCIe"};
1358                 GE = 1;
1359                 break;
1360         case PCI_DEVICE_ID_PROTEUS_VF:
1361                 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1362                 break;
1363         case PCI_DEVICE_ID_PROTEUS_PF:
1364                 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1365                 break;
1366         case PCI_DEVICE_ID_PROTEUS_S:
1367                 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1368                 break;
1369         default:
1370                 m = (typeof(m)){ NULL };
1371                 break;
1372         }
1373
1374         if (mdp && mdp[0] == '\0')
1375                 snprintf(mdp, 79,"%s", m.name);
1376         if (descp && descp[0] == '\0')
1377                 snprintf(descp, 255,
1378                         "Emulex %s %d%s %s %s",
1379                         m.name, m.max_speed,
1380                         (GE) ? "GE" : "Gb",
1381                         m.bus,
1382                         (GE) ? "FCoE Adapter" : "Fibre Channel Adapter");
1383 }
1384
1385 /**
1386  * lpfc_post_buffer: Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring.
1387  * @phba: pointer to lpfc hba data structure.
1388  * @pring: pointer to a IOCB ring.
1389  * @cnt: the number of IOCBs to be posted to the IOCB ring.
1390  *
1391  * This routine posts a given number of IOCBs with the associated DMA buffer
1392  * descriptors specified by the cnt argument to the given IOCB ring.
1393  *
1394  * Return codes
1395  *   The number of IOCBs NOT able to be posted to the IOCB ring.
1396  **/
1397 int
1398 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1399 {
1400         IOCB_t *icmd;
1401         struct lpfc_iocbq *iocb;
1402         struct lpfc_dmabuf *mp1, *mp2;
1403
1404         cnt += pring->missbufcnt;
1405
1406         /* While there are buffers to post */
1407         while (cnt > 0) {
1408                 /* Allocate buffer for  command iocb */
1409                 iocb = lpfc_sli_get_iocbq(phba);
1410                 if (iocb == NULL) {
1411                         pring->missbufcnt = cnt;
1412                         return cnt;
1413                 }
1414                 icmd = &iocb->iocb;
1415
1416                 /* 2 buffers can be posted per command */
1417                 /* Allocate buffer to post */
1418                 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1419                 if (mp1)
1420                     mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1421                 if (!mp1 || !mp1->virt) {
1422                         kfree(mp1);
1423                         lpfc_sli_release_iocbq(phba, iocb);
1424                         pring->missbufcnt = cnt;
1425                         return cnt;
1426                 }
1427
1428                 INIT_LIST_HEAD(&mp1->list);
1429                 /* Allocate buffer to post */
1430                 if (cnt > 1) {
1431                         mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1432                         if (mp2)
1433                                 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1434                                                             &mp2->phys);
1435                         if (!mp2 || !mp2->virt) {
1436                                 kfree(mp2);
1437                                 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1438                                 kfree(mp1);
1439                                 lpfc_sli_release_iocbq(phba, iocb);
1440                                 pring->missbufcnt = cnt;
1441                                 return cnt;
1442                         }
1443
1444                         INIT_LIST_HEAD(&mp2->list);
1445                 } else {
1446                         mp2 = NULL;
1447                 }
1448
1449                 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1450                 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1451                 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1452                 icmd->ulpBdeCount = 1;
1453                 cnt--;
1454                 if (mp2) {
1455                         icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1456                         icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1457                         icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1458                         cnt--;
1459                         icmd->ulpBdeCount = 2;
1460                 }
1461
1462                 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1463                 icmd->ulpLe = 1;
1464
1465                 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
1466                         lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1467                         kfree(mp1);
1468                         cnt++;
1469                         if (mp2) {
1470                                 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1471                                 kfree(mp2);
1472                                 cnt++;
1473                         }
1474                         lpfc_sli_release_iocbq(phba, iocb);
1475                         pring->missbufcnt = cnt;
1476                         return cnt;
1477                 }
1478                 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1479                 if (mp2)
1480                         lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1481         }
1482         pring->missbufcnt = 0;
1483         return 0;
1484 }
1485
1486 /**
1487  * lpfc_post_rcv_buf: Post the initial receive IOCB buffers to ELS ring.
1488  * @phba: pointer to lpfc hba data structure.
1489  *
1490  * This routine posts initial receive IOCB buffers to the ELS ring. The
1491  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1492  * set to 64 IOCBs.
1493  *
1494  * Return codes
1495  *   0 - success (currently always success)
1496  **/
1497 static int
1498 lpfc_post_rcv_buf(struct lpfc_hba *phba)
1499 {
1500         struct lpfc_sli *psli = &phba->sli;
1501
1502         /* Ring 0, ELS / CT buffers */
1503         lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1504         /* Ring 2 - FCP no buffers needed */
1505
1506         return 0;
1507 }
1508
1509 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1510
1511 /**
1512  * lpfc_sha_init: Set up initial array of hash table entries.
1513  * @HashResultPointer: pointer to an array as hash table.
1514  *
1515  * This routine sets up the initial values to the array of hash table entries
1516  * for the LC HBAs.
1517  **/
1518 static void
1519 lpfc_sha_init(uint32_t * HashResultPointer)
1520 {
1521         HashResultPointer[0] = 0x67452301;
1522         HashResultPointer[1] = 0xEFCDAB89;
1523         HashResultPointer[2] = 0x98BADCFE;
1524         HashResultPointer[3] = 0x10325476;
1525         HashResultPointer[4] = 0xC3D2E1F0;
1526 }
1527
1528 /**
1529  * lpfc_sha_iterate: Iterate initial hash table with the working hash table.
1530  * @HashResultPointer: pointer to an initial/result hash table.
1531  * @HashWorkingPointer: pointer to an working hash table.
1532  *
1533  * This routine iterates an initial hash table pointed by @HashResultPointer
1534  * with the values from the working hash table pointeed by @HashWorkingPointer.
1535  * The results are putting back to the initial hash table, returned through
1536  * the @HashResultPointer as the result hash table.
1537  **/
1538 static void
1539 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1540 {
1541         int t;
1542         uint32_t TEMP;
1543         uint32_t A, B, C, D, E;
1544         t = 16;
1545         do {
1546                 HashWorkingPointer[t] =
1547                     S(1,
1548                       HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1549                                                                      8] ^
1550                       HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1551         } while (++t <= 79);
1552         t = 0;
1553         A = HashResultPointer[0];
1554         B = HashResultPointer[1];
1555         C = HashResultPointer[2];
1556         D = HashResultPointer[3];
1557         E = HashResultPointer[4];
1558
1559         do {
1560                 if (t < 20) {
1561                         TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1562                 } else if (t < 40) {
1563                         TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1564                 } else if (t < 60) {
1565                         TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1566                 } else {
1567                         TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1568                 }
1569                 TEMP += S(5, A) + E + HashWorkingPointer[t];
1570                 E = D;
1571                 D = C;
1572                 C = S(30, B);
1573                 B = A;
1574                 A = TEMP;
1575         } while (++t <= 79);
1576
1577         HashResultPointer[0] += A;
1578         HashResultPointer[1] += B;
1579         HashResultPointer[2] += C;
1580         HashResultPointer[3] += D;
1581         HashResultPointer[4] += E;
1582
1583 }
1584
1585 /**
1586  * lpfc_challenge_key: Create challenge key based on WWPN of the HBA.
1587  * @RandomChallenge: pointer to the entry of host challenge random number array.
1588  * @HashWorking: pointer to the entry of the working hash array.
1589  *
1590  * This routine calculates the working hash array referred by @HashWorking
1591  * from the challenge random numbers associated with the host, referred by
1592  * @RandomChallenge. The result is put into the entry of the working hash
1593  * array and returned by reference through @HashWorking.
1594  **/
1595 static void
1596 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1597 {
1598         *HashWorking = (*RandomChallenge ^ *HashWorking);
1599 }
1600
1601 /**
1602  * lpfc_hba_init: Perform special handling for LC HBA initialization.
1603  * @phba: pointer to lpfc hba data structure.
1604  * @hbainit: pointer to an array of unsigned 32-bit integers.
1605  *
1606  * This routine performs the special handling for LC HBA initialization.
1607  **/
1608 void
1609 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1610 {
1611         int t;
1612         uint32_t *HashWorking;
1613         uint32_t *pwwnn = (uint32_t *) phba->wwnn;
1614
1615         HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
1616         if (!HashWorking)
1617                 return;
1618
1619         HashWorking[0] = HashWorking[78] = *pwwnn++;
1620         HashWorking[1] = HashWorking[79] = *pwwnn;
1621
1622         for (t = 0; t < 7; t++)
1623                 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1624
1625         lpfc_sha_init(hbainit);
1626         lpfc_sha_iterate(hbainit, HashWorking);
1627         kfree(HashWorking);
1628 }
1629
1630 /**
1631  * lpfc_cleanup: Performs vport cleanups before deleting a vport.
1632  * @vport: pointer to a virtual N_Port data structure.
1633  *
1634  * This routine performs the necessary cleanups before deleting the @vport.
1635  * It invokes the discovery state machine to perform necessary state
1636  * transitions and to release the ndlps associated with the @vport. Note,
1637  * the physical port is treated as @vport 0.
1638  **/
1639 void
1640 lpfc_cleanup(struct lpfc_vport *vport)
1641 {
1642         struct lpfc_hba   *phba = vport->phba;
1643         struct lpfc_nodelist *ndlp, *next_ndlp;
1644         int i = 0;
1645
1646         if (phba->link_state > LPFC_LINK_DOWN)
1647                 lpfc_port_link_failure(vport);
1648
1649         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1650                 if (!NLP_CHK_NODE_ACT(ndlp)) {
1651                         ndlp = lpfc_enable_node(vport, ndlp,
1652                                                 NLP_STE_UNUSED_NODE);
1653                         if (!ndlp)
1654                                 continue;
1655                         spin_lock_irq(&phba->ndlp_lock);
1656                         NLP_SET_FREE_REQ(ndlp);
1657                         spin_unlock_irq(&phba->ndlp_lock);
1658                         /* Trigger the release of the ndlp memory */
1659                         lpfc_nlp_put(ndlp);
1660                         continue;
1661                 }
1662                 spin_lock_irq(&phba->ndlp_lock);
1663                 if (NLP_CHK_FREE_REQ(ndlp)) {
1664                         /* The ndlp should not be in memory free mode already */
1665                         spin_unlock_irq(&phba->ndlp_lock);
1666                         continue;
1667                 } else
1668                         /* Indicate request for freeing ndlp memory */
1669                         NLP_SET_FREE_REQ(ndlp);
1670                 spin_unlock_irq(&phba->ndlp_lock);
1671
1672                 if (vport->port_type != LPFC_PHYSICAL_PORT &&
1673                     ndlp->nlp_DID == Fabric_DID) {
1674                         /* Just free up ndlp with Fabric_DID for vports */
1675                         lpfc_nlp_put(ndlp);
1676                         continue;
1677                 }
1678
1679                 if (ndlp->nlp_type & NLP_FABRIC)
1680                         lpfc_disc_state_machine(vport, ndlp, NULL,
1681                                         NLP_EVT_DEVICE_RECOVERY);
1682
1683                 lpfc_disc_state_machine(vport, ndlp, NULL,
1684                                              NLP_EVT_DEVICE_RM);
1685
1686         }
1687
1688         /* At this point, ALL ndlp's should be gone
1689          * because of the previous NLP_EVT_DEVICE_RM.
1690          * Lets wait for this to happen, if needed.
1691          */
1692         while (!list_empty(&vport->fc_nodes)) {
1693
1694                 if (i++ > 3000) {
1695                         lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1696                                 "0233 Nodelist not empty\n");
1697                         list_for_each_entry_safe(ndlp, next_ndlp,
1698                                                 &vport->fc_nodes, nlp_listp) {
1699                                 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
1700                                                 LOG_NODE,
1701                                                 "0282 did:x%x ndlp:x%p "
1702                                                 "usgmap:x%x refcnt:%d\n",
1703                                                 ndlp->nlp_DID, (void *)ndlp,
1704                                                 ndlp->nlp_usg_map,
1705                                                 atomic_read(
1706                                                         &ndlp->kref.refcount));
1707                         }
1708                         break;
1709                 }
1710
1711                 /* Wait for any activity on ndlps to settle */
1712                 msleep(10);
1713         }
1714         return;
1715 }
1716
1717 /**
1718  * lpfc_stop_vport_timers: Stop all the timers associated with a vport.
1719  * @vport: pointer to a virtual N_Port data structure.
1720  *
1721  * This routine stops all the timers associated with a @vport. This function
1722  * is invoked before disabling or deleting a @vport. Note that the physical
1723  * port is treated as @vport 0.
1724  **/
1725 void
1726 lpfc_stop_vport_timers(struct lpfc_vport *vport)
1727 {
1728         del_timer_sync(&vport->els_tmofunc);
1729         del_timer_sync(&vport->fc_fdmitmo);
1730         lpfc_can_disctmo(vport);
1731         return;
1732 }
1733
1734 /**
1735  * lpfc_stop_phba_timers: Stop all the timers associated with an HBA.
1736  * @phba: pointer to lpfc hba data structure.
1737  *
1738  * This routine stops all the timers associated with a HBA. This function is
1739  * invoked before either putting a HBA offline or unloading the driver.
1740  **/
1741 static void
1742 lpfc_stop_phba_timers(struct lpfc_hba *phba)
1743 {
1744         del_timer_sync(&phba->fcp_poll_timer);
1745         lpfc_stop_vport_timers(phba->pport);
1746         del_timer_sync(&phba->sli.mbox_tmo);
1747         del_timer_sync(&phba->fabric_block_timer);
1748         phba->hb_outstanding = 0;
1749         del_timer_sync(&phba->hb_tmofunc);
1750         del_timer_sync(&phba->eratt_poll);
1751         return;
1752 }
1753
1754 /**
1755  * lpfc_block_mgmt_io: Mark a HBA's management interface as blocked.
1756  * @phba: pointer to lpfc hba data structure.
1757  *
1758  * This routine marks a HBA's management interface as blocked. Once the HBA's
1759  * management interface is marked as blocked, all the user space access to
1760  * the HBA, whether they are from sysfs interface or libdfc interface will
1761  * all be blocked. The HBA is set to block the management interface when the
1762  * driver prepares the HBA interface for online or offline.
1763  **/
1764 static void
1765 lpfc_block_mgmt_io(struct lpfc_hba * phba)
1766 {
1767         unsigned long iflag;
1768
1769         spin_lock_irqsave(&phba->hbalock, iflag);
1770         phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
1771         spin_unlock_irqrestore(&phba->hbalock, iflag);
1772 }
1773
1774 /**
1775  * lpfc_online: Initialize and bring a HBA online.
1776  * @phba: pointer to lpfc hba data structure.
1777  *
1778  * This routine initializes the HBA and brings a HBA online. During this
1779  * process, the management interface is blocked to prevent user space access
1780  * to the HBA interfering with the driver initialization.
1781  *
1782  * Return codes
1783  *   0 - successful
1784  *   1 - failed
1785  **/
1786 int
1787 lpfc_online(struct lpfc_hba *phba)
1788 {
1789         struct lpfc_vport *vport = phba->pport;
1790         struct lpfc_vport **vports;
1791         int i;
1792
1793         if (!phba)
1794                 return 0;
1795
1796         if (!(vport->fc_flag & FC_OFFLINE_MODE))
1797                 return 0;
1798
1799         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1800                         "0458 Bring Adapter online\n");
1801
1802         lpfc_block_mgmt_io(phba);
1803
1804         if (!lpfc_sli_queue_setup(phba)) {
1805                 lpfc_unblock_mgmt_io(phba);
1806                 return 1;
1807         }
1808
1809         if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */
1810                 lpfc_unblock_mgmt_io(phba);
1811                 return 1;
1812         }
1813
1814         vports = lpfc_create_vport_work_array(phba);
1815         if (vports != NULL)
1816                 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1817                         struct Scsi_Host *shost;
1818                         shost = lpfc_shost_from_vport(vports[i]);
1819                         spin_lock_irq(shost->host_lock);
1820                         vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
1821                         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
1822                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1823                         spin_unlock_irq(shost->host_lock);
1824                 }
1825                 lpfc_destroy_vport_work_array(phba, vports);
1826
1827         lpfc_unblock_mgmt_io(phba);
1828         return 0;
1829 }
1830
1831 /**
1832  * lpfc_unblock_mgmt_io: Mark a HBA's management interface to be not blocked.
1833  * @phba: pointer to lpfc hba data structure.
1834  *
1835  * This routine marks a HBA's management interface as not blocked. Once the
1836  * HBA's management interface is marked as not blocked, all the user space
1837  * access to the HBA, whether they are from sysfs interface or libdfc
1838  * interface will be allowed. The HBA is set to block the management interface
1839  * when the driver prepares the HBA interface for online or offline and then
1840  * set to unblock the management interface afterwards.
1841  **/
1842 void
1843 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
1844 {
1845         unsigned long iflag;
1846
1847         spin_lock_irqsave(&phba->hbalock, iflag);
1848         phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
1849         spin_unlock_irqrestore(&phba->hbalock, iflag);
1850 }
1851
1852 /**
1853  * lpfc_offline_prep: Prepare a HBA to be brought offline.
1854  * @phba: pointer to lpfc hba data structure.
1855  *
1856  * This routine is invoked to prepare a HBA to be brought offline. It performs
1857  * unregistration login to all the nodes on all vports and flushes the mailbox
1858  * queue to make it ready to be brought offline.
1859  **/
1860 void
1861 lpfc_offline_prep(struct lpfc_hba * phba)
1862 {
1863         struct lpfc_vport *vport = phba->pport;
1864         struct lpfc_nodelist  *ndlp, *next_ndlp;
1865         struct lpfc_vport **vports;
1866         int i;
1867
1868         if (vport->fc_flag & FC_OFFLINE_MODE)
1869                 return;
1870
1871         lpfc_block_mgmt_io(phba);
1872
1873         lpfc_linkdown(phba);
1874
1875         /* Issue an unreg_login to all nodes on all vports */
1876         vports = lpfc_create_vport_work_array(phba);
1877         if (vports != NULL) {
1878                 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1879                         struct Scsi_Host *shost;
1880
1881                         if (vports[i]->load_flag & FC_UNLOADING)
1882                                 continue;
1883                         shost = lpfc_shost_from_vport(vports[i]);
1884                         list_for_each_entry_safe(ndlp, next_ndlp,
1885                                                  &vports[i]->fc_nodes,
1886                                                  nlp_listp) {
1887                                 if (!NLP_CHK_NODE_ACT(ndlp))
1888                                         continue;
1889                                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1890                                         continue;
1891                                 if (ndlp->nlp_type & NLP_FABRIC) {
1892                                         lpfc_disc_state_machine(vports[i], ndlp,
1893                                                 NULL, NLP_EVT_DEVICE_RECOVERY);
1894                                         lpfc_disc_state_machine(vports[i], ndlp,
1895                                                 NULL, NLP_EVT_DEVICE_RM);
1896                                 }
1897                                 spin_lock_irq(shost->host_lock);
1898                                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1899                                 spin_unlock_irq(shost->host_lock);
1900                                 lpfc_unreg_rpi(vports[i], ndlp);
1901                         }
1902                 }
1903         }
1904         lpfc_destroy_vport_work_array(phba, vports);
1905
1906         lpfc_sli_flush_mbox_queue(phba);
1907 }
1908
1909 /**
1910  * lpfc_offline: Bring a HBA offline.
1911  * @phba: pointer to lpfc hba data structure.
1912  *
1913  * This routine actually brings a HBA offline. It stops all the timers
1914  * associated with the HBA, brings down the SLI layer, and eventually
1915  * marks the HBA as in offline state for the upper layer protocol.
1916  **/
1917 void
1918 lpfc_offline(struct lpfc_hba *phba)
1919 {
1920         struct Scsi_Host  *shost;
1921         struct lpfc_vport **vports;
1922         int i;
1923
1924         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1925                 return;
1926
1927         /* stop all timers associated with this hba */
1928         lpfc_stop_phba_timers(phba);
1929         vports = lpfc_create_vport_work_array(phba);
1930         if (vports != NULL)
1931                 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
1932                         lpfc_stop_vport_timers(vports[i]);
1933         lpfc_destroy_vport_work_array(phba, vports);
1934         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1935                         "0460 Bring Adapter offline\n");
1936         /* Bring down the SLI Layer and cleanup.  The HBA is offline
1937            now.  */
1938         lpfc_sli_hba_down(phba);
1939         spin_lock_irq(&phba->hbalock);
1940         phba->work_ha = 0;
1941         spin_unlock_irq(&phba->hbalock);
1942         vports = lpfc_create_vport_work_array(phba);
1943         if (vports != NULL)
1944                 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1945                         shost = lpfc_shost_from_vport(vports[i]);
1946                         spin_lock_irq(shost->host_lock);
1947                         vports[i]->work_port_events = 0;
1948                         vports[i]->fc_flag |= FC_OFFLINE_MODE;
1949                         spin_unlock_irq(shost->host_lock);
1950                 }
1951         lpfc_destroy_vport_work_array(phba, vports);
1952 }
1953
1954 /**
1955  * lpfc_scsi_free: Free all the SCSI buffers and IOCBs from driver lists.
1956  * @phba: pointer to lpfc hba data structure.
1957  *
1958  * This routine is to free all the SCSI buffers and IOCBs from the driver
1959  * list back to kernel. It is called from lpfc_pci_remove_one to free
1960  * the internal resources before the device is removed from the system.
1961  *
1962  * Return codes
1963  *   0 - successful (for now, it always returns 0)
1964  **/
1965 static int
1966 lpfc_scsi_free(struct lpfc_hba *phba)
1967 {
1968         struct lpfc_scsi_buf *sb, *sb_next;
1969         struct lpfc_iocbq *io, *io_next;
1970
1971         spin_lock_irq(&phba->hbalock);
1972         /* Release all the lpfc_scsi_bufs maintained by this host. */
1973         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
1974                 list_del(&sb->list);
1975                 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
1976                               sb->dma_handle);
1977                 kfree(sb);
1978                 phba->total_scsi_bufs--;
1979         }
1980
1981         /* Release all the lpfc_iocbq entries maintained by this host. */
1982         list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
1983                 list_del(&io->list);
1984                 kfree(io);
1985                 phba->total_iocbq_bufs--;
1986         }
1987
1988         spin_unlock_irq(&phba->hbalock);
1989
1990         return 0;
1991 }
1992
1993 /**
1994  * lpfc_create_port: Create an FC port.
1995  * @phba: pointer to lpfc hba data structure.
1996  * @instance: a unique integer ID to this FC port.
1997  * @dev: pointer to the device data structure.
1998  *
1999  * This routine creates a FC port for the upper layer protocol. The FC port
2000  * can be created on top of either a physical port or a virtual port provided
2001  * by the HBA. This routine also allocates a SCSI host data structure (shost)
2002  * and associates the FC port created before adding the shost into the SCSI
2003  * layer.
2004  *
2005  * Return codes
2006  *   @vport - pointer to the virtual N_Port data structure.
2007  *   NULL - port create failed.
2008  **/
2009 struct lpfc_vport *
2010 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2011 {
2012         struct lpfc_vport *vport;
2013         struct Scsi_Host  *shost;
2014         int error = 0;
2015
2016         if (dev != &phba->pcidev->dev)
2017                 shost = scsi_host_alloc(&lpfc_vport_template,
2018                                         sizeof(struct lpfc_vport));
2019         else
2020                 shost = scsi_host_alloc(&lpfc_template,
2021                                         sizeof(struct lpfc_vport));
2022         if (!shost)
2023                 goto out;
2024
2025         vport = (struct lpfc_vport *) shost->hostdata;
2026         vport->phba = phba;
2027         vport->load_flag |= FC_LOADING;
2028         vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2029         vport->fc_rscn_flush = 0;
2030
2031         lpfc_get_vport_cfgparam(vport);
2032         shost->unique_id = instance;
2033         shost->max_id = LPFC_MAX_TARGET;
2034         shost->max_lun = vport->cfg_max_luns;
2035         shost->this_id = -1;
2036         shost->max_cmd_len = 16;
2037         /*
2038          * Set initial can_queue value since 0 is no longer supported and
2039          * scsi_add_host will fail. This will be adjusted later based on the
2040          * max xri value determined in hba setup.
2041          */
2042         shost->can_queue = phba->cfg_hba_queue_depth - 10;
2043         if (dev != &phba->pcidev->dev) {
2044                 shost->transportt = lpfc_vport_transport_template;
2045                 vport->port_type = LPFC_NPIV_PORT;
2046         } else {
2047                 shost->transportt = lpfc_transport_template;
2048                 vport->port_type = LPFC_PHYSICAL_PORT;
2049         }
2050
2051         /* Initialize all internally managed lists. */
2052         INIT_LIST_HEAD(&vport->fc_nodes);
2053         spin_lock_init(&vport->work_port_lock);
2054
2055         init_timer(&vport->fc_disctmo);
2056         vport->fc_disctmo.function = lpfc_disc_timeout;
2057         vport->fc_disctmo.data = (unsigned long)vport;
2058
2059         init_timer(&vport->fc_fdmitmo);
2060         vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2061         vport->fc_fdmitmo.data = (unsigned long)vport;
2062
2063         init_timer(&vport->els_tmofunc);
2064         vport->els_tmofunc.function = lpfc_els_timeout;
2065         vport->els_tmofunc.data = (unsigned long)vport;
2066
2067         error = scsi_add_host(shost, dev);
2068         if (error)
2069                 goto out_put_shost;
2070
2071         spin_lock_irq(&phba->hbalock);
2072         list_add_tail(&vport->listentry, &phba->port_list);
2073         spin_unlock_irq(&phba->hbalock);
2074         return vport;
2075
2076 out_put_shost:
2077         scsi_host_put(shost);
2078 out:
2079         return NULL;
2080 }
2081
2082 /**
2083  * destroy_port: Destroy an FC port.
2084  * @vport: pointer to an lpfc virtual N_Port data structure.
2085  *
2086  * This routine destroys a FC port from the upper layer protocol. All the
2087  * resources associated with the port are released.
2088  **/
2089 void
2090 destroy_port(struct lpfc_vport *vport)
2091 {
2092         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2093         struct lpfc_hba  *phba = vport->phba;
2094
2095         lpfc_debugfs_terminate(vport);
2096         fc_remove_host(shost);
2097         scsi_remove_host(shost);
2098
2099         spin_lock_irq(&phba->hbalock);
2100         list_del_init(&vport->listentry);
2101         spin_unlock_irq(&phba->hbalock);
2102
2103         lpfc_cleanup(vport);
2104         return;
2105 }
2106
2107 /**
2108  * lpfc_get_instance: Get a unique integer ID.
2109  *
2110  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2111  * uses the kernel idr facility to perform the task.
2112  *
2113  * Return codes:
2114  *   instance - a unique integer ID allocated as the new instance.
2115  *   -1 - lpfc get instance failed.
2116  **/
2117 int
2118 lpfc_get_instance(void)
2119 {
2120         int instance = 0;
2121
2122         /* Assign an unused number */
2123         if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2124                 return -1;
2125         if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2126                 return -1;
2127         return instance;
2128 }
2129
2130 /**
2131  * lpfc_scan_finished: method for SCSI layer to detect whether scan is done.
2132  * @shost: pointer to SCSI host data structure.
2133  * @time: elapsed time of the scan in jiffies.
2134  *
2135  * This routine is called by the SCSI layer with a SCSI host to determine
2136  * whether the scan host is finished.
2137  *
2138  * Note: there is no scan_start function as adapter initialization will have
2139  * asynchronously kicked off the link initialization.
2140  *
2141  * Return codes
2142  *   0 - SCSI host scan is not over yet.
2143  *   1 - SCSI host scan is over.
2144  **/
2145 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2146 {
2147         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2148         struct lpfc_hba   *phba = vport->phba;
2149         int stat = 0;
2150
2151         spin_lock_irq(shost->host_lock);
2152
2153         if (vport->load_flag & FC_UNLOADING) {
2154                 stat = 1;
2155                 goto finished;
2156         }
2157         if (time >= 30 * HZ) {
2158                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2159                                 "0461 Scanning longer than 30 "
2160                                 "seconds.  Continuing initialization\n");
2161                 stat = 1;
2162                 goto finished;
2163         }
2164         if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2165                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2166                                 "0465 Link down longer than 15 "
2167                                 "seconds.  Continuing initialization\n");
2168                 stat = 1;
2169                 goto finished;
2170         }
2171
2172         if (vport->port_state != LPFC_VPORT_READY)
2173                 goto finished;
2174         if (vport->num_disc_nodes || vport->fc_prli_sent)
2175                 goto finished;
2176         if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2177                 goto finished;
2178         if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2179                 goto finished;
2180
2181         stat = 1;
2182
2183 finished:
2184         spin_unlock_irq(shost->host_lock);
2185         return stat;
2186 }
2187
2188 /**
2189  * lpfc_host_attrib_init: Initialize SCSI host attributes on a FC port.
2190  * @shost: pointer to SCSI host data structure.
2191  *
2192  * This routine initializes a given SCSI host attributes on a FC port. The
2193  * SCSI host can be either on top of a physical port or a virtual port.
2194  **/
2195 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2196 {
2197         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2198         struct lpfc_hba   *phba = vport->phba;
2199         /*
2200          * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2201          */
2202
2203         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2204         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2205         fc_host_supported_classes(shost) = FC_COS_CLASS3;
2206
2207         memset(fc_host_supported_fc4s(shost), 0,
2208                sizeof(fc_host_supported_fc4s(shost)));
2209         fc_host_supported_fc4s(shost)[2] = 1;
2210         fc_host_supported_fc4s(shost)[7] = 1;
2211
2212         lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2213                                  sizeof fc_host_symbolic_name(shost));
2214
2215         fc_host_supported_speeds(shost) = 0;
2216         if (phba->lmt & LMT_10Gb)
2217                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2218         if (phba->lmt & LMT_8Gb)
2219                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2220         if (phba->lmt & LMT_4Gb)
2221                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2222         if (phba->lmt & LMT_2Gb)
2223                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2224         if (phba->lmt & LMT_1Gb)
2225                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2226
2227         fc_host_maxframe_size(shost) =
2228                 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2229                 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2230
2231         /* This value is also unchanging */
2232         memset(fc_host_active_fc4s(shost), 0,
2233                sizeof(fc_host_active_fc4s(shost)));
2234         fc_host_active_fc4s(shost)[2] = 1;
2235         fc_host_active_fc4s(shost)[7] = 1;
2236
2237         fc_host_max_npiv_vports(shost) = phba->max_vpi;
2238         spin_lock_irq(shost->host_lock);
2239         vport->load_flag &= ~FC_LOADING;
2240         spin_unlock_irq(shost->host_lock);
2241 }
2242
2243 /**
2244  * lpfc_enable_msix: Enable MSI-X interrupt mode.
2245  * @phba: pointer to lpfc hba data structure.
2246  *
2247  * This routine is invoked to enable the MSI-X interrupt vectors. The kernel
2248  * function pci_enable_msix() is called to enable the MSI-X vectors. Note that
2249  * pci_enable_msix(), once invoked, enables either all or nothing, depending
2250  * on the current availability of PCI vector resources. The device driver is
2251  * responsible for calling the individual request_irq() to register each MSI-X
2252  * vector with a interrupt handler, which is done in this function. Note that
2253  * later when device is unloading, the driver should always call free_irq()
2254  * on all MSI-X vectors it has done request_irq() on before calling
2255  * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
2256  * will be left with MSI-X enabled and leaks its vectors.
2257  *
2258  * Return codes
2259  *   0 - sucessful
2260  *   other values - error
2261  **/
2262 static int
2263 lpfc_enable_msix(struct lpfc_hba *phba)
2264 {
2265         int rc, i;
2266         LPFC_MBOXQ_t *pmb;
2267
2268         /* Set up MSI-X multi-message vectors */
2269         for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2270                 phba->msix_entries[i].entry = i;
2271
2272         /* Configure MSI-X capability structure */
2273         rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
2274                                 ARRAY_SIZE(phba->msix_entries));
2275         if (rc) {
2276                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2277                                 "0420 PCI enable MSI-X failed (%d)\n", rc);
2278                 goto msi_fail_out;
2279         } else
2280                 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2281                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2282                                         "0477 MSI-X entry[%d]: vector=x%x "
2283                                         "message=%d\n", i,
2284                                         phba->msix_entries[i].vector,
2285                                         phba->msix_entries[i].entry);
2286         /*
2287          * Assign MSI-X vectors to interrupt handlers
2288          */
2289
2290         /* vector-0 is associated to slow-path handler */
2291         rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
2292                          IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
2293         if (rc) {
2294                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2295                                 "0421 MSI-X slow-path request_irq failed "
2296                                 "(%d)\n", rc);
2297                 goto msi_fail_out;
2298         }
2299
2300         /* vector-1 is associated to fast-path handler */
2301         rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler,
2302                          IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
2303
2304         if (rc) {
2305                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2306                                 "0429 MSI-X fast-path request_irq failed "
2307                                 "(%d)\n", rc);
2308                 goto irq_fail_out;
2309         }
2310
2311         /*
2312          * Configure HBA MSI-X attention conditions to messages
2313          */
2314         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2315
2316         if (!pmb) {
2317                 rc = -ENOMEM;
2318                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2319                                 "0474 Unable to allocate memory for issuing "
2320                                 "MBOX_CONFIG_MSI command\n");
2321                 goto mem_fail_out;
2322         }
2323         rc = lpfc_config_msi(phba, pmb);
2324         if (rc)
2325                 goto mbx_fail_out;
2326         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2327         if (rc != MBX_SUCCESS) {
2328                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2329                                 "0351 Config MSI mailbox command failed, "
2330                                 "mbxCmd x%x, mbxStatus x%x\n",
2331                                 pmb->mb.mbxCommand, pmb->mb.mbxStatus);
2332                 goto mbx_fail_out;
2333         }
2334
2335         /* Free memory allocated for mailbox command */
2336         mempool_free(pmb, phba->mbox_mem_pool);
2337         return rc;
2338
2339 mbx_fail_out:
2340         /* Free memory allocated for mailbox command */
2341         mempool_free(pmb, phba->mbox_mem_pool);
2342
2343 mem_fail_out:
2344         /* free the irq already requested */
2345         free_irq(phba->msix_entries[1].vector, phba);
2346
2347 irq_fail_out:
2348         /* free the irq already requested */
2349         free_irq(phba->msix_entries[0].vector, phba);
2350
2351 msi_fail_out:
2352         /* Unconfigure MSI-X capability structure */
2353         pci_disable_msix(phba->pcidev);
2354         return rc;
2355 }
2356
2357 /**
2358  * lpfc_disable_msix: Disable MSI-X interrupt mode.
2359  * @phba: pointer to lpfc hba data structure.
2360  *
2361  * This routine is invoked to release the MSI-X vectors and then disable the
2362  * MSI-X interrupt mode.
2363  **/
2364 static void
2365 lpfc_disable_msix(struct lpfc_hba *phba)
2366 {
2367         int i;
2368
2369         /* Free up MSI-X multi-message vectors */
2370         for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2371                 free_irq(phba->msix_entries[i].vector, phba);
2372         /* Disable MSI-X */
2373         pci_disable_msix(phba->pcidev);
2374 }
2375
2376 /**
2377  * lpfc_enable_msi: Enable MSI interrupt mode.
2378  * @phba: pointer to lpfc hba data structure.
2379  *
2380  * This routine is invoked to enable the MSI interrupt mode. The kernel
2381  * function pci_enable_msi() is called to enable the MSI vector. The
2382  * device driver is responsible for calling the request_irq() to register
2383  * MSI vector with a interrupt the handler, which is done in this function.
2384  *
2385  * Return codes
2386  *      0 - sucessful
2387  *      other values - error
2388  */
2389 static int
2390 lpfc_enable_msi(struct lpfc_hba *phba)
2391 {
2392         int rc;
2393
2394         rc = pci_enable_msi(phba->pcidev);
2395         if (!rc)
2396                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2397                                 "0462 PCI enable MSI mode success.\n");
2398         else {
2399                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2400                                 "0471 PCI enable MSI mode failed (%d)\n", rc);
2401                 return rc;
2402         }
2403
2404         rc = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2405                          IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2406         if (rc) {
2407                 pci_disable_msi(phba->pcidev);
2408                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2409                                 "0478 MSI request_irq failed (%d)\n", rc);
2410         }
2411         return rc;
2412 }
2413
2414 /**
2415  * lpfc_disable_msi: Disable MSI interrupt mode.
2416  * @phba: pointer to lpfc hba data structure.
2417  *
2418  * This routine is invoked to disable the MSI interrupt mode. The driver
2419  * calls free_irq() on MSI vector it has done request_irq() on before
2420  * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and
2421  * a device will be left with MSI enabled and leaks its vector.
2422  */
2423
2424 static void
2425 lpfc_disable_msi(struct lpfc_hba *phba)
2426 {
2427         free_irq(phba->pcidev->irq, phba);
2428         pci_disable_msi(phba->pcidev);
2429         return;
2430 }
2431
2432 /**
2433  * lpfc_log_intr_mode: Log the active interrupt mode
2434  * @phba: pointer to lpfc hba data structure.
2435  * @intr_mode: active interrupt mode adopted.
2436  *
2437  * This routine it invoked to log the currently used active interrupt mode
2438  * to the device.
2439  */
2440 static void
2441 lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
2442 {
2443         switch (intr_mode) {
2444         case 0:
2445                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2446                                 "0470 Enable INTx interrupt mode.\n");
2447                 break;
2448         case 1:
2449                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2450                                 "0481 Enabled MSI interrupt mode.\n");
2451                 break;
2452         case 2:
2453                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2454                                 "0480 Enabled MSI-X interrupt mode.\n");
2455                 break;
2456         default:
2457                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2458                                 "0482 Illegal interrupt mode.\n");
2459                 break;
2460         }
2461         return;
2462 }
2463
2464 static void
2465 lpfc_stop_port(struct lpfc_hba *phba)
2466 {
2467         /* Clear all interrupt enable conditions */
2468         writel(0, phba->HCregaddr);
2469         readl(phba->HCregaddr); /* flush */
2470         /* Clear all pending interrupts */
2471         writel(0xffffffff, phba->HAregaddr);
2472         readl(phba->HAregaddr); /* flush */
2473
2474         /* Reset some HBA SLI setup states */
2475         lpfc_stop_phba_timers(phba);
2476         phba->pport->work_port_events = 0;
2477
2478         return;
2479 }
2480
2481 /**
2482  * lpfc_enable_intr: Enable device interrupt.
2483  * @phba: pointer to lpfc hba data structure.
2484  *
2485  * This routine is invoked to enable device interrupt and associate driver's
2486  * interrupt handler(s) to interrupt vector(s). Depends on the interrupt
2487  * mode configured to the driver, the driver will try to fallback from the
2488  * configured interrupt mode to an interrupt mode which is supported by the
2489  * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ.
2490  *
2491  * Return codes
2492  *   0 - sucessful
2493  *   other values - error
2494  **/
2495 static uint32_t
2496 lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2497 {
2498         uint32_t intr_mode = LPFC_INTR_ERROR;
2499         int retval;
2500
2501         if (cfg_mode == 2) {
2502                 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
2503                 retval = lpfc_sli_config_port(phba, 3);
2504                 if (!retval) {
2505                         /* Now, try to enable MSI-X interrupt mode */
2506                         retval = lpfc_enable_msix(phba);
2507                         if (!retval) {
2508                                 /* Indicate initialization to MSI-X mode */
2509                                 phba->intr_type = MSIX;
2510                                 intr_mode = 2;
2511                         }
2512                 }
2513         }
2514
2515         /* Fallback to MSI if MSI-X initialization failed */
2516         if (cfg_mode >= 1 && phba->intr_type == NONE) {
2517                 retval = lpfc_enable_msi(phba);
2518                 if (!retval) {
2519                         /* Indicate initialization to MSI mode */
2520                         phba->intr_type = MSI;
2521                         intr_mode = 1;
2522                 }
2523         }
2524
2525         /* Fallback to INTx if both MSI-X/MSI initalization failed */
2526         if (phba->intr_type == NONE) {
2527                 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2528                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2529                 if (!retval) {
2530                         /* Indicate initialization to INTx mode */
2531                         phba->intr_type = INTx;
2532                         intr_mode = 0;
2533                 }
2534         }
2535         return intr_mode;
2536 }
2537
2538 /**
2539  * lpfc_disable_intr: Disable device interrupt.
2540  * @phba: pointer to lpfc hba data structure.
2541  *
2542  * This routine is invoked to disable device interrupt and disassociate the
2543  * driver's interrupt handler(s) from interrupt vector(s). Depending on the
2544  * interrupt mode, the driver will release the interrupt vector(s) for the
2545  * message signaled interrupt.
2546  **/
2547 static void
2548 lpfc_disable_intr(struct lpfc_hba *phba)
2549 {
2550         /* Disable the currently initialized interrupt mode */
2551         if (phba->intr_type == MSIX)
2552                 lpfc_disable_msix(phba);
2553         else if (phba->intr_type == MSI)
2554                 lpfc_disable_msi(phba);
2555         else if (phba->intr_type == INTx)
2556                 free_irq(phba->pcidev->irq, phba);
2557
2558         /* Reset interrupt management states */
2559         phba->intr_type = NONE;
2560         phba->sli.slistat.sli_intr = 0;
2561
2562         return;
2563 }
2564
2565 /**
2566  * lpfc_pci_probe_one: lpfc PCI probe func to register device to PCI subsystem.
2567  * @pdev: pointer to PCI device
2568  * @pid: pointer to PCI device identifier
2569  *
2570  * This routine is to be registered to the kernel's PCI subsystem. When an
2571  * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
2572  * PCI device-specific information of the device and driver to see if the
2573  * driver state that it can support this kind of device. If the match is
2574  * successful, the driver core invokes this routine. If this routine
2575  * determines it can claim the HBA, it does all the initialization that it
2576  * needs to do to handle the HBA properly.
2577  *
2578  * Return code
2579  *   0 - driver can claim the device
2580  *   negative value - driver can not claim the device
2581  **/
2582 static int __devinit
2583 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2584 {
2585         struct lpfc_vport *vport = NULL;
2586         struct lpfc_hba   *phba;
2587         struct lpfc_sli   *psli;
2588         struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
2589         struct Scsi_Host  *shost = NULL;
2590         void *ptr;
2591         unsigned long bar0map_len, bar2map_len;
2592         int error = -ENODEV, retval;
2593         int  i, hbq_count;
2594         uint16_t iotag;
2595         uint32_t cfg_mode, intr_mode;
2596         int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2597         struct lpfc_adapter_event_header adapter_event;
2598
2599         if (pci_enable_device_mem(pdev))
2600                 goto out;
2601         if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
2602                 goto out_disable_device;
2603
2604         phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
2605         if (!phba)
2606                 goto out_release_regions;
2607
2608         atomic_set(&phba->fast_event_count, 0);
2609         spin_lock_init(&phba->hbalock);
2610
2611         /* Initialize ndlp management spinlock */
2612         spin_lock_init(&phba->ndlp_lock);
2613
2614         phba->pcidev = pdev;
2615
2616         /* Assign an unused board number */
2617         if ((phba->brd_no = lpfc_get_instance()) < 0)
2618                 goto out_free_phba;
2619
2620         INIT_LIST_HEAD(&phba->port_list);
2621         init_waitqueue_head(&phba->wait_4_mlo_m_q);
2622         /*
2623          * Get all the module params for configuring this host and then
2624          * establish the host.
2625          */
2626         lpfc_get_cfgparam(phba);
2627         phba->max_vpi = LPFC_MAX_VPI;
2628
2629         /* Initialize timers used by driver */
2630         init_timer(&phba->hb_tmofunc);
2631         phba->hb_tmofunc.function = lpfc_hb_timeout;
2632         phba->hb_tmofunc.data = (unsigned long)phba;
2633
2634         psli = &phba->sli;
2635         init_timer(&psli->mbox_tmo);
2636         psli->mbox_tmo.function = lpfc_mbox_timeout;
2637         psli->mbox_tmo.data = (unsigned long) phba;
2638         init_timer(&phba->fcp_poll_timer);
2639         phba->fcp_poll_timer.function = lpfc_poll_timeout;
2640         phba->fcp_poll_timer.data = (unsigned long) phba;
2641         init_timer(&phba->fabric_block_timer);
2642         phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
2643         phba->fabric_block_timer.data = (unsigned long) phba;
2644         init_timer(&phba->eratt_poll);
2645         phba->eratt_poll.function = lpfc_poll_eratt;
2646         phba->eratt_poll.data = (unsigned long) phba;
2647
2648         pci_set_master(pdev);
2649         pci_save_state(pdev);
2650         pci_try_set_mwi(pdev);
2651
2652         if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
2653                 if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
2654                         goto out_idr_remove;
2655
2656         /*
2657          * Get the bus address of Bar0 and Bar2 and the number of bytes
2658          * required by each mapping.
2659          */
2660         phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
2661         bar0map_len        = pci_resource_len(phba->pcidev, 0);
2662
2663         phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
2664         bar2map_len        = pci_resource_len(phba->pcidev, 2);
2665
2666         /* Map HBA SLIM to a kernel virtual address. */
2667         phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
2668         if (!phba->slim_memmap_p) {
2669                 error = -ENODEV;
2670                 dev_printk(KERN_ERR, &pdev->dev,
2671                            "ioremap failed for SLIM memory.\n");
2672                 goto out_idr_remove;
2673         }
2674
2675         /* Map HBA Control Registers to a kernel virtual address. */
2676         phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
2677         if (!phba->ctrl_regs_memmap_p) {
2678                 error = -ENODEV;
2679                 dev_printk(KERN_ERR, &pdev->dev,
2680                            "ioremap failed for HBA control registers.\n");
2681                 goto out_iounmap_slim;
2682         }
2683
2684         /* Allocate memory for SLI-2 structures */
2685         phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev,
2686                                                SLI2_SLIM_SIZE,
2687                                                &phba->slim2p.phys,
2688                                                GFP_KERNEL);
2689         if (!phba->slim2p.virt)
2690                 goto out_iounmap;
2691
2692         memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
2693         phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
2694         phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
2695         phba->IOCBs = (phba->slim2p.virt +
2696                        offsetof(struct lpfc_sli2_slim, IOCBs));
2697
2698         phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
2699                                                  lpfc_sli_hbq_size(),
2700                                                  &phba->hbqslimp.phys,
2701                                                  GFP_KERNEL);
2702         if (!phba->hbqslimp.virt)
2703                 goto out_free_slim;
2704
2705         hbq_count = lpfc_sli_hbq_count();
2706         ptr = phba->hbqslimp.virt;
2707         for (i = 0; i < hbq_count; ++i) {
2708                 phba->hbqs[i].hbq_virt = ptr;
2709                 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
2710                 ptr += (lpfc_hbq_defs[i]->entry_count *
2711                         sizeof(struct lpfc_hbq_entry));
2712         }
2713         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
2714         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer  = lpfc_els_hbq_free;
2715
2716         memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
2717
2718         INIT_LIST_HEAD(&phba->hbqbuf_in_list);
2719
2720         /* Initialize the SLI Layer to run with lpfc HBAs. */
2721         lpfc_sli_setup(phba);
2722         lpfc_sli_queue_setup(phba);
2723
2724         retval = lpfc_mem_alloc(phba);
2725         if (retval) {
2726                 error = retval;
2727                 goto out_free_hbqslimp;
2728         }
2729
2730         /* Initialize and populate the iocb list per host.  */
2731         INIT_LIST_HEAD(&phba->lpfc_iocb_list);
2732         for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
2733                 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
2734                 if (iocbq_entry == NULL) {
2735                         printk(KERN_ERR "%s: only allocated %d iocbs of "
2736                                 "expected %d count. Unloading driver.\n",
2737                                 __func__, i, LPFC_IOCB_LIST_CNT);
2738                         error = -ENOMEM;
2739                         goto out_free_iocbq;
2740                 }
2741
2742                 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
2743                 if (iotag == 0) {
2744                         kfree (iocbq_entry);
2745                         printk(KERN_ERR "%s: failed to allocate IOTAG. "
2746                                "Unloading driver.\n",
2747                                 __func__);
2748                         error = -ENOMEM;
2749                         goto out_free_iocbq;
2750                 }
2751
2752                 spin_lock_irq(&phba->hbalock);
2753                 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
2754                 phba->total_iocbq_bufs++;
2755                 spin_unlock_irq(&phba->hbalock);
2756         }
2757
2758         /* Initialize HBA structure */
2759         phba->fc_edtov = FF_DEF_EDTOV;
2760         phba->fc_ratov = FF_DEF_RATOV;
2761         phba->fc_altov = FF_DEF_ALTOV;
2762         phba->fc_arbtov = FF_DEF_ARBTOV;
2763
2764         INIT_LIST_HEAD(&phba->work_list);
2765         phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
2766         phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
2767
2768         /* Initialize the wait queue head for the kernel thread */
2769         init_waitqueue_head(&phba->work_waitq);
2770
2771         /* Startup the kernel thread for this host adapter. */
2772         phba->worker_thread = kthread_run(lpfc_do_work, phba,
2773                                        "lpfc_worker_%d", phba->brd_no);
2774         if (IS_ERR(phba->worker_thread)) {
2775                 error = PTR_ERR(phba->worker_thread);
2776                 goto out_free_iocbq;
2777         }
2778
2779         /* Initialize the list of scsi buffers used by driver for scsi IO. */
2780         spin_lock_init(&phba->scsi_buf_list_lock);
2781         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
2782
2783         /* Initialize list of fabric iocbs */
2784         INIT_LIST_HEAD(&phba->fabric_iocb_list);
2785
2786         /* Initialize list to save ELS buffers */
2787         INIT_LIST_HEAD(&phba->elsbuf);
2788
2789         vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
2790         if (!vport)
2791                 goto out_kthread_stop;
2792
2793         shost = lpfc_shost_from_vport(vport);
2794         phba->pport = vport;
2795         lpfc_debugfs_initialize(vport);
2796
2797         pci_set_drvdata(pdev, shost);
2798
2799         phba->MBslimaddr = phba->slim_memmap_p;
2800         phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
2801         phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
2802         phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
2803         phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
2804
2805         /* Configure sysfs attributes */
2806         if (lpfc_alloc_sysfs_attr(vport)) {
2807                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2808                                 "1476 Failed to allocate sysfs attr\n");
2809                 error = -ENOMEM;
2810                 goto out_destroy_port;
2811         }
2812
2813         cfg_mode = phba->cfg_use_msi;
2814         while (true) {
2815                 /* Configure and enable interrupt */
2816                 intr_mode = lpfc_enable_intr(phba, cfg_mode);
2817                 if (intr_mode == LPFC_INTR_ERROR) {
2818                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2819                                         "0426 Failed to enable interrupt.\n");
2820                         goto out_free_sysfs_attr;
2821                 }
2822                 /* HBA SLI setup */
2823                 if (lpfc_sli_hba_setup(phba)) {
2824                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2825                                         "1477 Failed to set up hba\n");
2826                         error = -ENODEV;
2827                         goto out_remove_device;
2828                 }
2829
2830                 /* Wait 50ms for the interrupts of previous mailbox commands */
2831                 msleep(50);
2832                 /* Check active interrupts received */
2833                 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
2834                         /* Log the current active interrupt mode */
2835                         phba->intr_mode = intr_mode;
2836                         lpfc_log_intr_mode(phba, intr_mode);
2837                         break;
2838                 } else {
2839                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2840                                         "0451 Configure interrupt mode (%d) "
2841                                         "failed active interrupt test.\n",
2842                                         intr_mode);
2843                         if (intr_mode == 0) {
2844                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2845                                                 "0479 Failed to enable "
2846                                                 "interrupt.\n");
2847                                 error = -ENODEV;
2848                                 goto out_remove_device;
2849                         }
2850                         /* Stop HBA SLI setups */
2851                         lpfc_stop_port(phba);
2852                         /* Disable the current interrupt mode */
2853                         lpfc_disable_intr(phba);
2854                         /* Try next level of interrupt mode */
2855                         cfg_mode = --intr_mode;
2856                 }
2857         }
2858
2859         /*
2860          * hba setup may have changed the hba_queue_depth so we need to adjust
2861          * the value of can_queue.
2862          */
2863         shost->can_queue = phba->cfg_hba_queue_depth - 10;
2864
2865         lpfc_host_attrib_init(shost);
2866
2867         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
2868                 spin_lock_irq(shost->host_lock);
2869                 lpfc_poll_start_timer(phba);
2870                 spin_unlock_irq(shost->host_lock);
2871         }
2872
2873         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2874                         "0428 Perform SCSI scan\n");
2875         /* Send board arrival event to upper layer */
2876         adapter_event.event_type = FC_REG_ADAPTER_EVENT;
2877         adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
2878         fc_host_post_vendor_event(shost, fc_get_event_number(),
2879                 sizeof(adapter_event),
2880                 (char *) &adapter_event,
2881                 LPFC_NL_VENDOR_ID);
2882
2883         return 0;
2884
2885 out_remove_device:
2886         spin_lock_irq(shost->host_lock);
2887         vport->load_flag |= FC_UNLOADING;
2888         spin_unlock_irq(shost->host_lock);
2889         lpfc_stop_phba_timers(phba);
2890         phba->pport->work_port_events = 0;
2891         lpfc_disable_intr(phba);
2892 out_free_sysfs_attr:
2893         lpfc_free_sysfs_attr(vport);
2894 out_destroy_port:
2895         destroy_port(vport);
2896 out_kthread_stop:
2897         kthread_stop(phba->worker_thread);
2898 out_free_iocbq:
2899         list_for_each_entry_safe(iocbq_entry, iocbq_next,
2900                                                 &phba->lpfc_iocb_list, list) {
2901                 kfree(iocbq_entry);
2902                 phba->total_iocbq_bufs--;
2903         }
2904         lpfc_mem_free(phba);
2905 out_free_hbqslimp:
2906         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
2907                           phba->hbqslimp.virt, phba->hbqslimp.phys);
2908 out_free_slim:
2909         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
2910                           phba->slim2p.virt, phba->slim2p.phys);
2911 out_iounmap:
2912         iounmap(phba->ctrl_regs_memmap_p);
2913 out_iounmap_slim:
2914         iounmap(phba->slim_memmap_p);
2915 out_idr_remove:
2916         idr_remove(&lpfc_hba_index, phba->brd_no);
2917 out_free_phba:
2918         kfree(phba);
2919 out_release_regions:
2920         pci_release_selected_regions(pdev, bars);
2921 out_disable_device:
2922         pci_disable_device(pdev);
2923 out:
2924         pci_set_drvdata(pdev, NULL);
2925         if (shost)
2926                 scsi_host_put(shost);
2927         return error;
2928 }
2929
2930 /**
2931  * lpfc_pci_remove_one: lpfc PCI func to unregister device from PCI subsystem.
2932  * @pdev: pointer to PCI device
2933  *
2934  * This routine is to be registered to the kernel's PCI subsystem. When an
2935  * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup
2936  * for the HBA device to be removed from the PCI subsystem properly.
2937  **/
2938 static void __devexit
2939 lpfc_pci_remove_one(struct pci_dev *pdev)
2940 {
2941         struct Scsi_Host  *shost = pci_get_drvdata(pdev);
2942         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2943         struct lpfc_vport **vports;
2944         struct lpfc_hba   *phba = vport->phba;
2945         int i;
2946         int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2947
2948         spin_lock_irq(&phba->hbalock);
2949         vport->load_flag |= FC_UNLOADING;
2950         spin_unlock_irq(&phba->hbalock);
2951
2952         lpfc_free_sysfs_attr(vport);
2953
2954         kthread_stop(phba->worker_thread);
2955
2956         /* Release all the vports against this physical port */
2957         vports = lpfc_create_vport_work_array(phba);
2958         if (vports != NULL)
2959                 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++)
2960                         fc_vport_terminate(vports[i]->fc_vport);
2961         lpfc_destroy_vport_work_array(phba, vports);
2962
2963         /* Remove FC host and then SCSI host with the physical port */
2964         fc_remove_host(shost);
2965         scsi_remove_host(shost);
2966         lpfc_cleanup(vport);
2967
2968         /*
2969          * Bring down the SLI Layer. This step disable all interrupts,
2970          * clears the rings, discards all mailbox commands, and resets
2971          * the HBA.
2972          */
2973         lpfc_sli_hba_down(phba);
2974         lpfc_sli_brdrestart(phba);
2975
2976         lpfc_stop_phba_timers(phba);
2977         spin_lock_irq(&phba->hbalock);
2978         list_del_init(&vport->listentry);
2979         spin_unlock_irq(&phba->hbalock);
2980
2981         lpfc_debugfs_terminate(vport);
2982
2983         /* Disable interrupt */
2984         lpfc_disable_intr(phba);
2985
2986         pci_set_drvdata(pdev, NULL);
2987         scsi_host_put(shost);
2988
2989         /*
2990          * Call scsi_free before mem_free since scsi bufs are released to their
2991          * corresponding pools here.
2992          */
2993         lpfc_scsi_free(phba);
2994         lpfc_mem_free(phba);
2995
2996         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
2997                           phba->hbqslimp.virt, phba->hbqslimp.phys);
2998
2999         /* Free resources associated with SLI2 interface */
3000         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3001                           phba->slim2p.virt, phba->slim2p.phys);
3002
3003         /* unmap adapter SLIM and Control Registers */
3004         iounmap(phba->ctrl_regs_memmap_p);
3005         iounmap(phba->slim_memmap_p);
3006
3007         idr_remove(&lpfc_hba_index, phba->brd_no);
3008
3009         kfree(phba);
3010
3011         pci_release_selected_regions(pdev, bars);
3012         pci_disable_device(pdev);
3013 }
3014
3015 /**
3016  * lpfc_pci_suspend_one: lpfc PCI func to suspend device for power management.
3017  * @pdev: pointer to PCI device
3018  * @msg: power management message
3019  *
3020  * This routine is to be registered to the kernel's PCI subsystem to support
3021  * system Power Management (PM). When PM invokes this method, it quiesces the
3022  * device by stopping the driver's worker thread for the device, turning off
3023  * device's interrupt and DMA, and bring the device offline. Note that as the
3024  * driver implements the minimum PM requirements to a power-aware driver's PM
3025  * support for suspend/resume -- all the possible PM messages (SUSPEND,
3026  * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND
3027  * and the driver will fully reinitialize its device during resume() method
3028  * call, the driver will set device to PCI_D3hot state in PCI config space
3029  * instead of setting it according to the @msg provided by the PM.
3030  *
3031  * Return code
3032  *   0 - driver suspended the device
3033  *   Error otherwise
3034  **/
3035 static int
3036 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3037 {
3038         struct Scsi_Host *shost = pci_get_drvdata(pdev);
3039         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3040
3041         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3042                         "0473 PCI device Power Management suspend.\n");
3043
3044         /* Bring down the device */
3045         lpfc_offline_prep(phba);
3046         lpfc_offline(phba);
3047         kthread_stop(phba->worker_thread);
3048
3049         /* Disable interrupt from device */
3050         lpfc_disable_intr(phba);
3051
3052         /* Save device state to PCI config space */
3053         pci_save_state(pdev);
3054         pci_set_power_state(pdev, PCI_D3hot);
3055
3056         return 0;
3057 }
3058
3059 /**
3060  * lpfc_pci_resume_one: lpfc PCI func to resume device for power management.
3061  * @pdev: pointer to PCI device
3062  *
3063  * This routine is to be registered to the kernel's PCI subsystem to support
3064  * system Power Management (PM). When PM invokes this method, it restores
3065  * the device's PCI config space state and fully reinitializes the device
3066  * and brings it online. Note that as the driver implements the minimum PM
3067  * requirements to a power-aware driver's PM for suspend/resume -- all
3068  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
3069  * method call will be treated as SUSPEND and the driver will fully
3070  * reinitialize its device during resume() method call, the device will be
3071  * set to PCI_D0 directly in PCI config space before restoring the state.
3072  *
3073  * Return code
3074  *   0 - driver suspended the device
3075  *   Error otherwise
3076  **/
3077 static int
3078 lpfc_pci_resume_one(struct pci_dev *pdev)
3079 {
3080         struct Scsi_Host *shost = pci_get_drvdata(pdev);
3081         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3082         uint32_t intr_mode;
3083         int error;
3084
3085         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3086                         "0452 PCI device Power Management resume.\n");
3087
3088         /* Restore device state from PCI config space */
3089         pci_set_power_state(pdev, PCI_D0);
3090         pci_restore_state(pdev);
3091         if (pdev->is_busmaster)
3092                 pci_set_master(pdev);
3093
3094         /* Startup the kernel thread for this host adapter. */
3095         phba->worker_thread = kthread_run(lpfc_do_work, phba,
3096                                         "lpfc_worker_%d", phba->brd_no);
3097         if (IS_ERR(phba->worker_thread)) {
3098                 error = PTR_ERR(phba->worker_thread);
3099                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3100                                 "0434 PM resume failed to start worker "
3101                                 "thread: error=x%x.\n", error);
3102                 return error;
3103         }
3104
3105         /* Configure and enable interrupt */
3106         intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
3107         if (intr_mode == LPFC_INTR_ERROR) {
3108                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3109                                 "0430 PM resume Failed to enable interrupt\n");
3110                 return -EIO;
3111         } else
3112                 phba->intr_mode = intr_mode;
3113
3114         /* Restart HBA and bring it online */
3115         lpfc_sli_brdrestart(phba);
3116         lpfc_online(phba);
3117
3118         /* Log the current active interrupt mode */
3119         lpfc_log_intr_mode(phba, phba->intr_mode);
3120
3121         return 0;
3122 }
3123
3124 /**
3125  * lpfc_io_error_detected: Driver method for handling PCI I/O error detected.
3126  * @pdev: pointer to PCI device.
3127  * @state: the current PCI connection state.
3128  *
3129  * This routine is registered to the PCI subsystem for error handling. This
3130  * function is called by the PCI subsystem after a PCI bus error affecting
3131  * this device has been detected. When this function is invoked, it will
3132  * need to stop all the I/Os and interrupt(s) to the device. Once that is
3133  * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to
3134  * perform proper recovery as desired.
3135  *
3136  * Return codes
3137  *   PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
3138  *   PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3139  **/
3140 static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
3141                                 pci_channel_state_t state)
3142 {
3143         struct Scsi_Host *shost = pci_get_drvdata(pdev);
3144         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3145         struct lpfc_sli *psli = &phba->sli;
3146         struct lpfc_sli_ring  *pring;
3147
3148         if (state == pci_channel_io_perm_failure) {
3149                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3150                                 "0472 PCI channel I/O permanent failure\n");
3151                 /* Block all SCSI devices' I/Os on the host */
3152                 lpfc_scsi_dev_block(phba);
3153                 /* Clean up all driver's outstanding SCSI I/Os */
3154                 lpfc_sli_flush_fcp_rings(phba);
3155                 return PCI_ERS_RESULT_DISCONNECT;
3156         }
3157
3158         pci_disable_device(pdev);
3159         /*
3160          * There may be I/Os dropped by the firmware.
3161          * Error iocb (I/O) on txcmplq and let the SCSI layer
3162          * retry it after re-establishing link.
3163          */
3164         pring = &psli->ring[psli->fcp_ring];
3165         lpfc_sli_abort_iocb_ring(phba, pring);
3166
3167         /* Disable interrupt */
3168         lpfc_disable_intr(phba);
3169
3170         /* Request a slot reset. */
3171         return PCI_ERS_RESULT_NEED_RESET;
3172 }
3173
3174 /**
3175  * lpfc_io_slot_reset: Restart a PCI device from scratch.
3176  * @pdev: pointer to PCI device.
3177  *
3178  * This routine is registered to the PCI subsystem for error handling. This is
3179  * called after PCI bus has been reset to restart the PCI card from scratch,
3180  * as if from a cold-boot. During the PCI subsystem error recovery, after the
3181  * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform
3182  * proper error recovery and then call this routine before calling the .resume
3183  * method to recover the device. This function will initialize the HBA device,
3184  * enable the interrupt, but it will just put the HBA to offline state without
3185  * passing any I/O traffic.
3186  *
3187  * Return codes
3188  *   PCI_ERS_RESULT_RECOVERED - the device has been recovered
3189  *   PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3190  */
3191 static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3192 {
3193         struct Scsi_Host *shost = pci_get_drvdata(pdev);
3194         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3195         struct lpfc_sli *psli = &phba->sli;
3196         uint32_t intr_mode;
3197
3198         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
3199         if (pci_enable_device_mem(pdev)) {
3200                 printk(KERN_ERR "lpfc: Cannot re-enable "
3201                         "PCI device after reset.\n");
3202                 return PCI_ERS_RESULT_DISCONNECT;
3203         }
3204
3205         pci_restore_state(pdev);
3206         if (pdev->is_busmaster)
3207                 pci_set_master(pdev);
3208
3209         spin_lock_irq(&phba->hbalock);
3210         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
3211         spin_unlock_irq(&phba->hbalock);
3212
3213         /* Configure and enable interrupt */
3214         intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
3215         if (intr_mode == LPFC_INTR_ERROR) {
3216                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3217                                 "0427 Cannot re-enable interrupt after "
3218                                 "slot reset.\n");
3219                 return PCI_ERS_RESULT_DISCONNECT;
3220         } else
3221                 phba->intr_mode = intr_mode;
3222
3223         /* Take device offline; this will perform cleanup */
3224         lpfc_offline(phba);
3225         lpfc_sli_brdrestart(phba);
3226
3227         /* Log the current active interrupt mode */
3228         lpfc_log_intr_mode(phba, phba->intr_mode);
3229
3230         return PCI_ERS_RESULT_RECOVERED;
3231 }
3232
3233 /**
3234  * lpfc_io_resume: Resume PCI I/O operation.
3235  * @pdev: pointer to PCI device
3236  *
3237  * This routine is registered to the PCI subsystem for error handling. It is
3238  * called when kernel error recovery tells the lpfc driver that it is ok to
3239  * resume normal PCI operation after PCI bus error recovery. After this call,
3240  * traffic can start to flow from this device again.
3241  */
3242 static void lpfc_io_resume(struct pci_dev *pdev)
3243 {
3244         struct Scsi_Host *shost = pci_get_drvdata(pdev);
3245         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3246
3247         lpfc_online(phba);
3248 }
3249
3250 static struct pci_device_id lpfc_id_table[] = {
3251         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
3252                 PCI_ANY_ID, PCI_ANY_ID, },
3253         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
3254                 PCI_ANY_ID, PCI_ANY_ID, },
3255         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
3256                 PCI_ANY_ID, PCI_ANY_ID, },
3257         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
3258                 PCI_ANY_ID, PCI_ANY_ID, },
3259         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
3260                 PCI_ANY_ID, PCI_ANY_ID, },
3261         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
3262                 PCI_ANY_ID, PCI_ANY_ID, },
3263         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
3264                 PCI_ANY_ID, PCI_ANY_ID, },
3265         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
3266                 PCI_ANY_ID, PCI_ANY_ID, },
3267         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
3268                 PCI_ANY_ID, PCI_ANY_ID, },
3269         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
3270                 PCI_ANY_ID, PCI_ANY_ID, },
3271         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
3272                 PCI_ANY_ID, PCI_ANY_ID, },
3273         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
3274                 PCI_ANY_ID, PCI_ANY_ID, },
3275         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
3276                 PCI_ANY_ID, PCI_ANY_ID, },
3277         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
3278                 PCI_ANY_ID, PCI_ANY_ID, },
3279         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
3280                 PCI_ANY_ID, PCI_ANY_ID, },
3281         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
3282                 PCI_ANY_ID, PCI_ANY_ID, },
3283         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
3284                 PCI_ANY_ID, PCI_ANY_ID, },
3285         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
3286                 PCI_ANY_ID, PCI_ANY_ID, },
3287         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
3288                 PCI_ANY_ID, PCI_ANY_ID, },
3289         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
3290                 PCI_ANY_ID, PCI_ANY_ID, },
3291         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
3292                 PCI_ANY_ID, PCI_ANY_ID, },
3293         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
3294                 PCI_ANY_ID, PCI_ANY_ID, },
3295         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
3296                 PCI_ANY_ID, PCI_ANY_ID, },
3297         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
3298                 PCI_ANY_ID, PCI_ANY_ID, },
3299         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
3300                 PCI_ANY_ID, PCI_ANY_ID, },
3301         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
3302                 PCI_ANY_ID, PCI_ANY_ID, },
3303         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
3304                 PCI_ANY_ID, PCI_ANY_ID, },
3305         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
3306                 PCI_ANY_ID, PCI_ANY_ID, },
3307         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
3308                 PCI_ANY_ID, PCI_ANY_ID, },
3309         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
3310                 PCI_ANY_ID, PCI_ANY_ID, },
3311         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
3312                 PCI_ANY_ID, PCI_ANY_ID, },
3313         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
3314                 PCI_ANY_ID, PCI_ANY_ID, },
3315         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
3316                 PCI_ANY_ID, PCI_ANY_ID, },
3317         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
3318                 PCI_ANY_ID, PCI_ANY_ID, },
3319         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
3320                 PCI_ANY_ID, PCI_ANY_ID, },
3321         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
3322                 PCI_ANY_ID, PCI_ANY_ID, },
3323         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
3324                 PCI_ANY_ID, PCI_ANY_ID, },
3325         { 0 }
3326 };
3327
3328 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
3329
3330 static struct pci_error_handlers lpfc_err_handler = {
3331         .error_detected = lpfc_io_error_detected,
3332         .slot_reset = lpfc_io_slot_reset,
3333         .resume = lpfc_io_resume,
3334 };
3335
3336 static struct pci_driver lpfc_driver = {
3337         .name           = LPFC_DRIVER_NAME,
3338         .id_table       = lpfc_id_table,
3339         .probe          = lpfc_pci_probe_one,
3340         .remove         = __devexit_p(lpfc_pci_remove_one),
3341         .suspend        = lpfc_pci_suspend_one,
3342         .resume         = lpfc_pci_resume_one,
3343         .err_handler    = &lpfc_err_handler,
3344 };
3345
3346 /**
3347  * lpfc_init: lpfc module initialization routine.
3348  *
3349  * This routine is to be invoked when the lpfc module is loaded into the
3350  * kernel. The special kernel macro module_init() is used to indicate the
3351  * role of this routine to the kernel as lpfc module entry point.
3352  *
3353  * Return codes
3354  *   0 - successful
3355  *   -ENOMEM - FC attach transport failed
3356  *   all others - failed
3357  */
3358 static int __init
3359 lpfc_init(void)
3360 {
3361         int error = 0;
3362
3363         printk(LPFC_MODULE_DESC "\n");
3364         printk(LPFC_COPYRIGHT "\n");
3365
3366         if (lpfc_enable_npiv) {
3367                 lpfc_transport_functions.vport_create = lpfc_vport_create;
3368                 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
3369         }
3370         lpfc_transport_template =
3371                                 fc_attach_transport(&lpfc_transport_functions);
3372         if (lpfc_transport_template == NULL)
3373                 return -ENOMEM;
3374         if (lpfc_enable_npiv) {
3375                 lpfc_vport_transport_template =
3376                         fc_attach_transport(&lpfc_vport_transport_functions);
3377                 if (lpfc_vport_transport_template == NULL) {
3378                         fc_release_transport(lpfc_transport_template);
3379                         return -ENOMEM;
3380                 }
3381         }
3382         error = pci_register_driver(&lpfc_driver);
3383         if (error) {
3384                 fc_release_transport(lpfc_transport_template);
3385                 if (lpfc_enable_npiv)
3386                         fc_release_transport(lpfc_vport_transport_template);
3387         }
3388
3389         return error;
3390 }
3391
3392 /**
3393  * lpfc_exit: lpfc module removal routine.
3394  *
3395  * This routine is invoked when the lpfc module is removed from the kernel.
3396  * The special kernel macro module_exit() is used to indicate the role of
3397  * this routine to the kernel as lpfc module exit point.
3398  */
3399 static void __exit
3400 lpfc_exit(void)
3401 {
3402         pci_unregister_driver(&lpfc_driver);
3403         fc_release_transport(lpfc_transport_template);
3404         if (lpfc_enable_npiv)
3405                 fc_release_transport(lpfc_vport_transport_template);
3406 }
3407
3408 module_init(lpfc_init);
3409 module_exit(lpfc_exit);
3410 MODULE_LICENSE("GPL");
3411 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
3412 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
3413 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);