Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[linux-2.6] / drivers / scsi / lpfc / lpfc_init.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21
22 #include <linux/blkdev.h>
23 #include <linux/delay.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/idr.h>
26 #include <linux/interrupt.h>
27 #include <linux/kthread.h>
28 #include <linux/pci.h>
29 #include <linux/spinlock.h>
30 #include <linux/ctype.h>
31
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36
37 #include "lpfc_hw.h"
38 #include "lpfc_sli.h"
39 #include "lpfc_nl.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
42 #include "lpfc.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_vport.h"
46 #include "lpfc_version.h"
47
48 char *_dump_buf_data;
49 unsigned long _dump_buf_data_order;
50 char *_dump_buf_dif;
51 unsigned long _dump_buf_dif_order;
52 spinlock_t _dump_buf_lock;
53
54 static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
55 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
56 static int lpfc_post_rcv_buf(struct lpfc_hba *);
57
58 static struct scsi_transport_template *lpfc_transport_template = NULL;
59 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
60 static DEFINE_IDR(lpfc_hba_index);
61
62 /**
63  * lpfc_config_port_prep - Perform lpfc initialization prior to config port
64  * @phba: pointer to lpfc hba data structure.
65  *
66  * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
67  * mailbox command. It retrieves the revision information from the HBA and
68  * collects the Vital Product Data (VPD) about the HBA for preparing the
69  * configuration of the HBA.
70  *
71  * Return codes:
72  *   0 - success.
73  *   -ERESTART - requests the SLI layer to reset the HBA and try again.
74  *   Any other value - indicates an error.
75  **/
76 int
77 lpfc_config_port_prep(struct lpfc_hba *phba)
78 {
79         lpfc_vpd_t *vp = &phba->vpd;
80         int i = 0, rc;
81         LPFC_MBOXQ_t *pmb;
82         MAILBOX_t *mb;
83         char *lpfc_vpd_data = NULL;
84         uint16_t offset = 0;
85         static char licensed[56] =
86                     "key unlock for use with gnu public licensed code only\0";
87         static int init_key = 1;
88
89         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
90         if (!pmb) {
91                 phba->link_state = LPFC_HBA_ERROR;
92                 return -ENOMEM;
93         }
94
95         mb = &pmb->mb;
96         phba->link_state = LPFC_INIT_MBX_CMDS;
97
98         if (lpfc_is_LC_HBA(phba->pcidev->device)) {
99                 if (init_key) {
100                         uint32_t *ptext = (uint32_t *) licensed;
101
102                         for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
103                                 *ptext = cpu_to_be32(*ptext);
104                         init_key = 0;
105                 }
106
107                 lpfc_read_nv(phba, pmb);
108                 memset((char*)mb->un.varRDnvp.rsvd3, 0,
109                         sizeof (mb->un.varRDnvp.rsvd3));
110                 memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
111                          sizeof (licensed));
112
113                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
114
115                 if (rc != MBX_SUCCESS) {
116                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
117                                         "0324 Config Port initialization "
118                                         "error, mbxCmd x%x READ_NVPARM, "
119                                         "mbxStatus x%x\n",
120                                         mb->mbxCommand, mb->mbxStatus);
121                         mempool_free(pmb, phba->mbox_mem_pool);
122                         return -ERESTART;
123                 }
124                 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
125                        sizeof(phba->wwnn));
126                 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
127                        sizeof(phba->wwpn));
128         }
129
130         phba->sli3_options = 0x0;
131
132         /* Setup and issue mailbox READ REV command */
133         lpfc_read_rev(phba, pmb);
134         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
135         if (rc != MBX_SUCCESS) {
136                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
137                                 "0439 Adapter failed to init, mbxCmd x%x "
138                                 "READ_REV, mbxStatus x%x\n",
139                                 mb->mbxCommand, mb->mbxStatus);
140                 mempool_free( pmb, phba->mbox_mem_pool);
141                 return -ERESTART;
142         }
143
144
145         /*
146          * The value of rr must be 1 since the driver set the cv field to 1.
147          * This setting requires the FW to set all revision fields.
148          */
149         if (mb->un.varRdRev.rr == 0) {
150                 vp->rev.rBit = 0;
151                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
152                                 "0440 Adapter failed to init, READ_REV has "
153                                 "missing revision information.\n");
154                 mempool_free(pmb, phba->mbox_mem_pool);
155                 return -ERESTART;
156         }
157
158         if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
159                 mempool_free(pmb, phba->mbox_mem_pool);
160                 return -EINVAL;
161         }
162
163         /* Save information as VPD data */
164         vp->rev.rBit = 1;
165         memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
166         vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
167         memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
168         vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
169         memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
170         vp->rev.biuRev = mb->un.varRdRev.biuRev;
171         vp->rev.smRev = mb->un.varRdRev.smRev;
172         vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
173         vp->rev.endecRev = mb->un.varRdRev.endecRev;
174         vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
175         vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
176         vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
177         vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
178         vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
179         vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
180
181         /* If the sli feature level is less then 9, we must
182          * tear down all RPIs and VPIs on link down if NPIV
183          * is enabled.
184          */
185         if (vp->rev.feaLevelHigh < 9)
186                 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
187
188         if (lpfc_is_LC_HBA(phba->pcidev->device))
189                 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
190                                                 sizeof (phba->RandomData));
191
192         /* Get adapter VPD information */
193         lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
194         if (!lpfc_vpd_data)
195                 goto out_free_mbox;
196
197         do {
198                 lpfc_dump_mem(phba, pmb, offset);
199                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
200
201                 if (rc != MBX_SUCCESS) {
202                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
203                                         "0441 VPD not present on adapter, "
204                                         "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
205                                         mb->mbxCommand, mb->mbxStatus);
206                         mb->un.varDmp.word_cnt = 0;
207                 }
208                 if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
209                         mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
210                 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
211                                       lpfc_vpd_data + offset,
212                                       mb->un.varDmp.word_cnt);
213                 offset += mb->un.varDmp.word_cnt;
214         } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
215         lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
216
217         kfree(lpfc_vpd_data);
218 out_free_mbox:
219         mempool_free(pmb, phba->mbox_mem_pool);
220         return 0;
221 }
222
223 /**
224  * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
225  * @phba: pointer to lpfc hba data structure.
226  * @pmboxq: pointer to the driver internal queue element for mailbox command.
227  *
228  * This is the completion handler for driver's configuring asynchronous event
229  * mailbox command to the device. If the mailbox command returns successfully,
230  * it will set internal async event support flag to 1; otherwise, it will
231  * set internal async event support flag to 0.
232  **/
233 static void
234 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
235 {
236         if (pmboxq->mb.mbxStatus == MBX_SUCCESS)
237                 phba->temp_sensor_support = 1;
238         else
239                 phba->temp_sensor_support = 0;
240         mempool_free(pmboxq, phba->mbox_mem_pool);
241         return;
242 }
243
244 /**
245  * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
246  * @phba: pointer to lpfc hba data structure.
247  * @pmboxq: pointer to the driver internal queue element for mailbox command.
248  *
249  * This is the completion handler for dump mailbox command for getting
250  * wake up parameters. When this command complete, the response contain
251  * Option rom version of the HBA. This function translate the version number
252  * into a human readable string and store it in OptionROMVersion.
253  **/
254 static void
255 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
256 {
257         struct prog_id *prg;
258         uint32_t prog_id_word;
259         char dist = ' ';
260         /* character array used for decoding dist type. */
261         char dist_char[] = "nabx";
262
263         if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
264                 mempool_free(pmboxq, phba->mbox_mem_pool);
265                 return;
266         }
267
268         prg = (struct prog_id *) &prog_id_word;
269
270         /* word 7 contain option rom version */
271         prog_id_word = pmboxq->mb.un.varWords[7];
272
273         /* Decode the Option rom version word to a readable string */
274         if (prg->dist < 4)
275                 dist = dist_char[prg->dist];
276
277         if ((prg->dist == 3) && (prg->num == 0))
278                 sprintf(phba->OptionROMVersion, "%d.%d%d",
279                         prg->ver, prg->rev, prg->lev);
280         else
281                 sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
282                         prg->ver, prg->rev, prg->lev,
283                         dist, prg->num);
284         mempool_free(pmboxq, phba->mbox_mem_pool);
285         return;
286 }
287
288 /**
289  * lpfc_config_port_post - Perform lpfc initialization after config port
290  * @phba: pointer to lpfc hba data structure.
291  *
292  * This routine will do LPFC initialization after the CONFIG_PORT mailbox
293  * command call. It performs all internal resource and state setups on the
294  * port: post IOCB buffers, enable appropriate host interrupt attentions,
295  * ELS ring timers, etc.
296  *
297  * Return codes
298  *   0 - success.
299  *   Any other value - error.
300  **/
301 int
302 lpfc_config_port_post(struct lpfc_hba *phba)
303 {
304         struct lpfc_vport *vport = phba->pport;
305         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
306         LPFC_MBOXQ_t *pmb;
307         MAILBOX_t *mb;
308         struct lpfc_dmabuf *mp;
309         struct lpfc_sli *psli = &phba->sli;
310         uint32_t status, timeout;
311         int i, j;
312         int rc;
313
314         spin_lock_irq(&phba->hbalock);
315         /*
316          * If the Config port completed correctly the HBA is not
317          * over heated any more.
318          */
319         if (phba->over_temp_state == HBA_OVER_TEMP)
320                 phba->over_temp_state = HBA_NORMAL_TEMP;
321         spin_unlock_irq(&phba->hbalock);
322
323         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
324         if (!pmb) {
325                 phba->link_state = LPFC_HBA_ERROR;
326                 return -ENOMEM;
327         }
328         mb = &pmb->mb;
329
330         /* Get login parameters for NID.  */
331         lpfc_read_sparam(phba, pmb, 0);
332         pmb->vport = vport;
333         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
334                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
335                                 "0448 Adapter failed init, mbxCmd x%x "
336                                 "READ_SPARM mbxStatus x%x\n",
337                                 mb->mbxCommand, mb->mbxStatus);
338                 phba->link_state = LPFC_HBA_ERROR;
339                 mp = (struct lpfc_dmabuf *) pmb->context1;
340                 mempool_free( pmb, phba->mbox_mem_pool);
341                 lpfc_mbuf_free(phba, mp->virt, mp->phys);
342                 kfree(mp);
343                 return -EIO;
344         }
345
346         mp = (struct lpfc_dmabuf *) pmb->context1;
347
348         memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
349         lpfc_mbuf_free(phba, mp->virt, mp->phys);
350         kfree(mp);
351         pmb->context1 = NULL;
352
353         if (phba->cfg_soft_wwnn)
354                 u64_to_wwn(phba->cfg_soft_wwnn,
355                            vport->fc_sparam.nodeName.u.wwn);
356         if (phba->cfg_soft_wwpn)
357                 u64_to_wwn(phba->cfg_soft_wwpn,
358                            vport->fc_sparam.portName.u.wwn);
359         memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
360                sizeof (struct lpfc_name));
361         memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
362                sizeof (struct lpfc_name));
363
364         /* Update the fc_host data structures with new wwn. */
365         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
366         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
367
368         /* If no serial number in VPD data, use low 6 bytes of WWNN */
369         /* This should be consolidated into parse_vpd ? - mr */
370         if (phba->SerialNumber[0] == 0) {
371                 uint8_t *outptr;
372
373                 outptr = &vport->fc_nodename.u.s.IEEE[0];
374                 for (i = 0; i < 12; i++) {
375                         status = *outptr++;
376                         j = ((status & 0xf0) >> 4);
377                         if (j <= 9)
378                                 phba->SerialNumber[i] =
379                                     (char)((uint8_t) 0x30 + (uint8_t) j);
380                         else
381                                 phba->SerialNumber[i] =
382                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
383                         i++;
384                         j = (status & 0xf);
385                         if (j <= 9)
386                                 phba->SerialNumber[i] =
387                                     (char)((uint8_t) 0x30 + (uint8_t) j);
388                         else
389                                 phba->SerialNumber[i] =
390                                     (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
391                 }
392         }
393
394         lpfc_read_config(phba, pmb);
395         pmb->vport = vport;
396         if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
397                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
398                                 "0453 Adapter failed to init, mbxCmd x%x "
399                                 "READ_CONFIG, mbxStatus x%x\n",
400                                 mb->mbxCommand, mb->mbxStatus);
401                 phba->link_state = LPFC_HBA_ERROR;
402                 mempool_free( pmb, phba->mbox_mem_pool);
403                 return -EIO;
404         }
405
406         /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
407         if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
408                 phba->cfg_hba_queue_depth =
409                         mb->un.varRdConfig.max_xri + 1;
410
411         phba->lmt = mb->un.varRdConfig.lmt;
412
413         /* Get the default values for Model Name and Description */
414         lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
415
416         if ((phba->cfg_link_speed > LINK_SPEED_10G)
417             || ((phba->cfg_link_speed == LINK_SPEED_1G)
418                 && !(phba->lmt & LMT_1Gb))
419             || ((phba->cfg_link_speed == LINK_SPEED_2G)
420                 && !(phba->lmt & LMT_2Gb))
421             || ((phba->cfg_link_speed == LINK_SPEED_4G)
422                 && !(phba->lmt & LMT_4Gb))
423             || ((phba->cfg_link_speed == LINK_SPEED_8G)
424                 && !(phba->lmt & LMT_8Gb))
425             || ((phba->cfg_link_speed == LINK_SPEED_10G)
426                 && !(phba->lmt & LMT_10Gb))) {
427                 /* Reset link speed to auto */
428                 lpfc_printf_log(phba, KERN_WARNING, LOG_LINK_EVENT,
429                         "1302 Invalid speed for this board: "
430                         "Reset link speed to auto: x%x\n",
431                         phba->cfg_link_speed);
432                         phba->cfg_link_speed = LINK_SPEED_AUTO;
433         }
434
435         phba->link_state = LPFC_LINK_DOWN;
436
437         /* Only process IOCBs on ELS ring till hba_state is READY */
438         if (psli->ring[psli->extra_ring].cmdringaddr)
439                 psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
440         if (psli->ring[psli->fcp_ring].cmdringaddr)
441                 psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
442         if (psli->ring[psli->next_ring].cmdringaddr)
443                 psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
444
445         /* Post receive buffers for desired rings */
446         if (phba->sli_rev != 3)
447                 lpfc_post_rcv_buf(phba);
448
449         /*
450          * Configure HBA MSI-X attention conditions to messages if MSI-X mode
451          */
452         if (phba->intr_type == MSIX) {
453                 rc = lpfc_config_msi(phba, pmb);
454                 if (rc) {
455                         mempool_free(pmb, phba->mbox_mem_pool);
456                         return -EIO;
457                 }
458                 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
459                 if (rc != MBX_SUCCESS) {
460                         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
461                                         "0352 Config MSI mailbox command "
462                                         "failed, mbxCmd x%x, mbxStatus x%x\n",
463                                         pmb->mb.mbxCommand, pmb->mb.mbxStatus);
464                         mempool_free(pmb, phba->mbox_mem_pool);
465                         return -EIO;
466                 }
467         }
468
469         /* Initialize ERATT handling flag */
470         phba->hba_flag &= ~HBA_ERATT_HANDLED;
471
472         /* Enable appropriate host interrupts */
473         spin_lock_irq(&phba->hbalock);
474         status = readl(phba->HCregaddr);
475         status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
476         if (psli->num_rings > 0)
477                 status |= HC_R0INT_ENA;
478         if (psli->num_rings > 1)
479                 status |= HC_R1INT_ENA;
480         if (psli->num_rings > 2)
481                 status |= HC_R2INT_ENA;
482         if (psli->num_rings > 3)
483                 status |= HC_R3INT_ENA;
484
485         if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
486             (phba->cfg_poll & DISABLE_FCP_RING_INT))
487                 status &= ~(HC_R0INT_ENA);
488
489         writel(status, phba->HCregaddr);
490         readl(phba->HCregaddr); /* flush */
491         spin_unlock_irq(&phba->hbalock);
492
493         /* Set up ring-0 (ELS) timer */
494         timeout = phba->fc_ratov * 2;
495         mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
496         /* Set up heart beat (HB) timer */
497         mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
498         phba->hb_outstanding = 0;
499         phba->last_completion_time = jiffies;
500         /* Set up error attention (ERATT) polling timer */
501         mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
502
503         lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
504         pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
505         lpfc_set_loopback_flag(phba);
506         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
507         if (rc != MBX_SUCCESS) {
508                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
509                                 "0454 Adapter failed to init, mbxCmd x%x "
510                                 "INIT_LINK, mbxStatus x%x\n",
511                                 mb->mbxCommand, mb->mbxStatus);
512
513                 /* Clear all interrupt enable conditions */
514                 writel(0, phba->HCregaddr);
515                 readl(phba->HCregaddr); /* flush */
516                 /* Clear all pending interrupts */
517                 writel(0xffffffff, phba->HAregaddr);
518                 readl(phba->HAregaddr); /* flush */
519
520                 phba->link_state = LPFC_HBA_ERROR;
521                 if (rc != MBX_BUSY)
522                         mempool_free(pmb, phba->mbox_mem_pool);
523                 return -EIO;
524         }
525         /* MBOX buffer will be freed in mbox compl */
526         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
527         lpfc_config_async(phba, pmb, LPFC_ELS_RING);
528         pmb->mbox_cmpl = lpfc_config_async_cmpl;
529         pmb->vport = phba->pport;
530         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
531
532         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
533                 lpfc_printf_log(phba,
534                                 KERN_ERR,
535                                 LOG_INIT,
536                                 "0456 Adapter failed to issue "
537                                 "ASYNCEVT_ENABLE mbox status x%x \n.",
538                                 rc);
539                 mempool_free(pmb, phba->mbox_mem_pool);
540         }
541
542         /* Get Option rom version */
543         pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
544         lpfc_dump_wakeup_param(phba, pmb);
545         pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
546         pmb->vport = phba->pport;
547         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
548
549         if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
550                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
551                                 "to get Option ROM version status x%x\n.", rc);
552                 mempool_free(pmb, phba->mbox_mem_pool);
553         }
554
555         return 0;
556 }
557
558 /**
559  * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
560  * @phba: pointer to lpfc HBA data structure.
561  *
562  * This routine will do LPFC uninitialization before the HBA is reset when
563  * bringing down the SLI Layer.
564  *
565  * Return codes
566  *   0 - success.
567  *   Any other value - error.
568  **/
569 int
570 lpfc_hba_down_prep(struct lpfc_hba *phba)
571 {
572         struct lpfc_vport **vports;
573         int i;
574         /* Disable interrupts */
575         writel(0, phba->HCregaddr);
576         readl(phba->HCregaddr); /* flush */
577
578         if (phba->pport->load_flag & FC_UNLOADING)
579                 lpfc_cleanup_discovery_resources(phba->pport);
580         else {
581                 vports = lpfc_create_vport_work_array(phba);
582                 if (vports != NULL)
583                         for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
584                                 lpfc_cleanup_discovery_resources(vports[i]);
585                 lpfc_destroy_vport_work_array(phba, vports);
586         }
587         return 0;
588 }
589
590 /**
591  * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset
592  * @phba: pointer to lpfc HBA data structure.
593  *
594  * This routine will do uninitialization after the HBA is reset when bring
595  * down the SLI Layer.
596  *
597  * Return codes
598  *   0 - sucess.
599  *   Any other value - error.
600  **/
601 int
602 lpfc_hba_down_post(struct lpfc_hba *phba)
603 {
604         struct lpfc_sli *psli = &phba->sli;
605         struct lpfc_sli_ring *pring;
606         struct lpfc_dmabuf *mp, *next_mp;
607         LIST_HEAD(completions);
608         int i;
609
610         if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
611                 lpfc_sli_hbqbuf_free_all(phba);
612         else {
613                 /* Cleanup preposted buffers on the ELS ring */
614                 pring = &psli->ring[LPFC_ELS_RING];
615                 list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
616                         list_del(&mp->list);
617                         pring->postbufq_cnt--;
618                         lpfc_mbuf_free(phba, mp->virt, mp->phys);
619                         kfree(mp);
620                 }
621         }
622
623         spin_lock_irq(&phba->hbalock);
624         for (i = 0; i < psli->num_rings; i++) {
625                 pring = &psli->ring[i];
626
627                 /* At this point in time the HBA is either reset or DOA. Either
628                  * way, nothing should be on txcmplq as it will NEVER complete.
629                  */
630                 list_splice_init(&pring->txcmplq, &completions);
631                 pring->txcmplq_cnt = 0;
632                 spin_unlock_irq(&phba->hbalock);
633
634                 /* Cancel all the IOCBs from the completions list */
635                 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
636                                       IOERR_SLI_ABORTED);
637
638                 lpfc_sli_abort_iocb_ring(phba, pring);
639                 spin_lock_irq(&phba->hbalock);
640         }
641         spin_unlock_irq(&phba->hbalock);
642
643         return 0;
644 }
645
646 /**
647  * lpfc_hb_timeout - The HBA-timer timeout handler
648  * @ptr: unsigned long holds the pointer to lpfc hba data structure.
649  *
650  * This is the HBA-timer timeout handler registered to the lpfc driver. When
651  * this timer fires, a HBA timeout event shall be posted to the lpfc driver
652  * work-port-events bitmap and the worker thread is notified. This timeout
653  * event will be used by the worker thread to invoke the actual timeout
654  * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
655  * be performed in the timeout handler and the HBA timeout event bit shall
656  * be cleared by the worker thread after it has taken the event bitmap out.
657  **/
658 static void
659 lpfc_hb_timeout(unsigned long ptr)
660 {
661         struct lpfc_hba *phba;
662         uint32_t tmo_posted;
663         unsigned long iflag;
664
665         phba = (struct lpfc_hba *)ptr;
666
667         /* Check for heart beat timeout conditions */
668         spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
669         tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
670         if (!tmo_posted)
671                 phba->pport->work_port_events |= WORKER_HB_TMO;
672         spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
673
674         /* Tell the worker thread there is work to do */
675         if (!tmo_posted)
676                 lpfc_worker_wake_up(phba);
677         return;
678 }
679
680 /**
681  * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
682  * @phba: pointer to lpfc hba data structure.
683  * @pmboxq: pointer to the driver internal queue element for mailbox command.
684  *
685  * This is the callback function to the lpfc heart-beat mailbox command.
686  * If configured, the lpfc driver issues the heart-beat mailbox command to
687  * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
688  * heart-beat mailbox command is issued, the driver shall set up heart-beat
689  * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
690  * heart-beat outstanding state. Once the mailbox command comes back and
691  * no error conditions detected, the heart-beat mailbox command timer is
692  * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
693  * state is cleared for the next heart-beat. If the timer expired with the
694  * heart-beat outstanding state set, the driver will put the HBA offline.
695  **/
696 static void
697 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
698 {
699         unsigned long drvr_flag;
700
701         spin_lock_irqsave(&phba->hbalock, drvr_flag);
702         phba->hb_outstanding = 0;
703         spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
704
705         /* Check and reset heart-beat timer is necessary */
706         mempool_free(pmboxq, phba->mbox_mem_pool);
707         if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
708                 !(phba->link_state == LPFC_HBA_ERROR) &&
709                 !(phba->pport->load_flag & FC_UNLOADING))
710                 mod_timer(&phba->hb_tmofunc,
711                         jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
712         return;
713 }
714
715 /**
716  * lpfc_hb_timeout_handler - The HBA-timer timeout handler
717  * @phba: pointer to lpfc hba data structure.
718  *
719  * This is the actual HBA-timer timeout handler to be invoked by the worker
720  * thread whenever the HBA timer fired and HBA-timeout event posted. This
721  * handler performs any periodic operations needed for the device. If such
722  * periodic event has already been attended to either in the interrupt handler
723  * or by processing slow-ring or fast-ring events within the HBA-timer
724  * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
725  * the timer for the next timeout period. If lpfc heart-beat mailbox command
726  * is configured and there is no heart-beat mailbox command outstanding, a
727  * heart-beat mailbox is issued and timer set properly. Otherwise, if there
728  * has been a heart-beat mailbox command outstanding, the HBA shall be put
729  * to offline.
730  **/
731 void
732 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
733 {
734         LPFC_MBOXQ_t *pmboxq;
735         struct lpfc_dmabuf *buf_ptr;
736         int retval;
737         struct lpfc_sli *psli = &phba->sli;
738         LIST_HEAD(completions);
739
740         if ((phba->link_state == LPFC_HBA_ERROR) ||
741                 (phba->pport->load_flag & FC_UNLOADING) ||
742                 (phba->pport->fc_flag & FC_OFFLINE_MODE))
743                 return;
744
745         spin_lock_irq(&phba->pport->work_port_lock);
746
747         if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ,
748                 jiffies)) {
749                 spin_unlock_irq(&phba->pport->work_port_lock);
750                 if (!phba->hb_outstanding)
751                         mod_timer(&phba->hb_tmofunc,
752                                 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
753                 else
754                         mod_timer(&phba->hb_tmofunc,
755                                 jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
756                 return;
757         }
758         spin_unlock_irq(&phba->pport->work_port_lock);
759
760         if (phba->elsbuf_cnt &&
761                 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
762                 spin_lock_irq(&phba->hbalock);
763                 list_splice_init(&phba->elsbuf, &completions);
764                 phba->elsbuf_cnt = 0;
765                 phba->elsbuf_prev_cnt = 0;
766                 spin_unlock_irq(&phba->hbalock);
767
768                 while (!list_empty(&completions)) {
769                         list_remove_head(&completions, buf_ptr,
770                                 struct lpfc_dmabuf, list);
771                         lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
772                         kfree(buf_ptr);
773                 }
774         }
775         phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
776
777         /* If there is no heart beat outstanding, issue a heartbeat command */
778         if (phba->cfg_enable_hba_heartbeat) {
779                 if (!phba->hb_outstanding) {
780                         pmboxq = mempool_alloc(phba->mbox_mem_pool,GFP_KERNEL);
781                         if (!pmboxq) {
782                                 mod_timer(&phba->hb_tmofunc,
783                                           jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
784                                 return;
785                         }
786
787                         lpfc_heart_beat(phba, pmboxq);
788                         pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
789                         pmboxq->vport = phba->pport;
790                         retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
791
792                         if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
793                                 mempool_free(pmboxq, phba->mbox_mem_pool);
794                                 mod_timer(&phba->hb_tmofunc,
795                                           jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
796                                 return;
797                         }
798                         mod_timer(&phba->hb_tmofunc,
799                                   jiffies + HZ * LPFC_HB_MBOX_TIMEOUT);
800                         phba->hb_outstanding = 1;
801                         return;
802                 } else {
803                         /*
804                         * If heart beat timeout called with hb_outstanding set
805                         * we need to take the HBA offline.
806                         */
807                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
808                                         "0459 Adapter heartbeat failure, "
809                                         "taking this port offline.\n");
810
811                         spin_lock_irq(&phba->hbalock);
812                         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
813                         spin_unlock_irq(&phba->hbalock);
814
815                         lpfc_offline_prep(phba);
816                         lpfc_offline(phba);
817                         lpfc_unblock_mgmt_io(phba);
818                         phba->link_state = LPFC_HBA_ERROR;
819                         lpfc_hba_down_post(phba);
820                 }
821         }
822 }
823
824 /**
825  * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
826  * @phba: pointer to lpfc hba data structure.
827  *
828  * This routine is called to bring the HBA offline when HBA hardware error
829  * other than Port Error 6 has been detected.
830  **/
831 static void
832 lpfc_offline_eratt(struct lpfc_hba *phba)
833 {
834         struct lpfc_sli   *psli = &phba->sli;
835
836         spin_lock_irq(&phba->hbalock);
837         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
838         spin_unlock_irq(&phba->hbalock);
839         lpfc_offline_prep(phba);
840
841         lpfc_offline(phba);
842         lpfc_reset_barrier(phba);
843         lpfc_sli_brdreset(phba);
844         lpfc_hba_down_post(phba);
845         lpfc_sli_brdready(phba, HS_MBRDY);
846         lpfc_unblock_mgmt_io(phba);
847         phba->link_state = LPFC_HBA_ERROR;
848         return;
849 }
850
851 /**
852  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
853  * @phba: pointer to lpfc hba data structure.
854  *
855  * This routine is invoked to handle the deferred HBA hardware error
856  * conditions. This type of error is indicated by HBA by setting ER1
857  * and another ER bit in the host status register. The driver will
858  * wait until the ER1 bit clears before handling the error condition.
859  **/
860 static void
861 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
862 {
863         uint32_t old_host_status = phba->work_hs;
864         struct lpfc_sli_ring  *pring;
865         struct lpfc_sli *psli = &phba->sli;
866
867         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
868                 "0479 Deferred Adapter Hardware Error "
869                 "Data: x%x x%x x%x\n",
870                 phba->work_hs,
871                 phba->work_status[0], phba->work_status[1]);
872
873         spin_lock_irq(&phba->hbalock);
874         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
875         spin_unlock_irq(&phba->hbalock);
876
877
878         /*
879          * Firmware stops when it triggred erratt. That could cause the I/Os
880          * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
881          * SCSI layer retry it after re-establishing link.
882          */
883         pring = &psli->ring[psli->fcp_ring];
884         lpfc_sli_abort_iocb_ring(phba, pring);
885
886         /*
887          * There was a firmware error. Take the hba offline and then
888          * attempt to restart it.
889          */
890         lpfc_offline_prep(phba);
891         lpfc_offline(phba);
892
893         /* Wait for the ER1 bit to clear.*/
894         while (phba->work_hs & HS_FFER1) {
895                 msleep(100);
896                 phba->work_hs = readl(phba->HSregaddr);
897                 /* If driver is unloading let the worker thread continue */
898                 if (phba->pport->load_flag & FC_UNLOADING) {
899                         phba->work_hs = 0;
900                         break;
901                 }
902         }
903
904         /*
905          * This is to ptrotect against a race condition in which
906          * first write to the host attention register clear the
907          * host status register.
908          */
909         if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
910                 phba->work_hs = old_host_status & ~HS_FFER1;
911
912         phba->hba_flag &= ~DEFER_ERATT;
913         phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
914         phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
915 }
916
917 /**
918  * lpfc_handle_eratt - The HBA hardware error handler
919  * @phba: pointer to lpfc hba data structure.
920  *
921  * This routine is invoked to handle the following HBA hardware error
922  * conditions:
923  * 1 - HBA error attention interrupt
924  * 2 - DMA ring index out of range
925  * 3 - Mailbox command came back as unknown
926  **/
927 void
928 lpfc_handle_eratt(struct lpfc_hba *phba)
929 {
930         struct lpfc_vport *vport = phba->pport;
931         struct lpfc_sli   *psli = &phba->sli;
932         struct lpfc_sli_ring  *pring;
933         uint32_t event_data;
934         unsigned long temperature;
935         struct temp_event temp_event_data;
936         struct Scsi_Host  *shost;
937         struct lpfc_board_event_header board_event;
938
939         /* If the pci channel is offline, ignore possible errors,
940          * since we cannot communicate with the pci card anyway. */
941         if (pci_channel_offline(phba->pcidev))
942                 return;
943         /* If resets are disabled then leave the HBA alone and return */
944         if (!phba->cfg_enable_hba_reset)
945                 return;
946
947         /* Send an internal error event to mgmt application */
948         board_event.event_type = FC_REG_BOARD_EVENT;
949         board_event.subcategory = LPFC_EVENT_PORTINTERR;
950         shost = lpfc_shost_from_vport(phba->pport);
951         fc_host_post_vendor_event(shost, fc_get_event_number(),
952                                   sizeof(board_event),
953                                   (char *) &board_event,
954                                   LPFC_NL_VENDOR_ID);
955
956         if (phba->hba_flag & DEFER_ERATT)
957                 lpfc_handle_deferred_eratt(phba);
958
959         if (phba->work_hs & HS_FFER6) {
960                 /* Re-establishing Link */
961                 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
962                                 "1301 Re-establishing Link "
963                                 "Data: x%x x%x x%x\n",
964                                 phba->work_hs,
965                                 phba->work_status[0], phba->work_status[1]);
966
967                 spin_lock_irq(&phba->hbalock);
968                 psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
969                 spin_unlock_irq(&phba->hbalock);
970
971                 /*
972                 * Firmware stops when it triggled erratt with HS_FFER6.
973                 * That could cause the I/Os dropped by the firmware.
974                 * Error iocb (I/O) on txcmplq and let the SCSI layer
975                 * retry it after re-establishing link.
976                 */
977                 pring = &psli->ring[psli->fcp_ring];
978                 lpfc_sli_abort_iocb_ring(phba, pring);
979
980                 /*
981                  * There was a firmware error.  Take the hba offline and then
982                  * attempt to restart it.
983                  */
984                 lpfc_offline_prep(phba);
985                 lpfc_offline(phba);
986                 lpfc_sli_brdrestart(phba);
987                 if (lpfc_online(phba) == 0) {   /* Initialize the HBA */
988                         lpfc_unblock_mgmt_io(phba);
989                         return;
990                 }
991                 lpfc_unblock_mgmt_io(phba);
992         } else if (phba->work_hs & HS_CRIT_TEMP) {
993                 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
994                 temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
995                 temp_event_data.event_code = LPFC_CRIT_TEMP;
996                 temp_event_data.data = (uint32_t)temperature;
997
998                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
999                                 "0406 Adapter maximum temperature exceeded "
1000                                 "(%ld), taking this port offline "
1001                                 "Data: x%x x%x x%x\n",
1002                                 temperature, phba->work_hs,
1003                                 phba->work_status[0], phba->work_status[1]);
1004
1005                 shost = lpfc_shost_from_vport(phba->pport);
1006                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1007                                           sizeof(temp_event_data),
1008                                           (char *) &temp_event_data,
1009                                           SCSI_NL_VID_TYPE_PCI
1010                                           | PCI_VENDOR_ID_EMULEX);
1011
1012                 spin_lock_irq(&phba->hbalock);
1013                 phba->over_temp_state = HBA_OVER_TEMP;
1014                 spin_unlock_irq(&phba->hbalock);
1015                 lpfc_offline_eratt(phba);
1016
1017         } else {
1018                 /* The if clause above forces this code path when the status
1019                  * failure is a value other than FFER6. Do not call the offline
1020                  * twice. This is the adapter hardware error path.
1021                  */
1022                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1023                                 "0457 Adapter Hardware Error "
1024                                 "Data: x%x x%x x%x\n",
1025                                 phba->work_hs,
1026                                 phba->work_status[0], phba->work_status[1]);
1027
1028                 event_data = FC_REG_DUMP_EVENT;
1029                 shost = lpfc_shost_from_vport(vport);
1030                 fc_host_post_vendor_event(shost, fc_get_event_number(),
1031                                 sizeof(event_data), (char *) &event_data,
1032                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
1033
1034                 lpfc_offline_eratt(phba);
1035         }
1036         return;
1037 }
1038
1039 /**
1040  * lpfc_handle_latt - The HBA link event handler
1041  * @phba: pointer to lpfc hba data structure.
1042  *
1043  * This routine is invoked from the worker thread to handle a HBA host
1044  * attention link event.
1045  **/
1046 void
1047 lpfc_handle_latt(struct lpfc_hba *phba)
1048 {
1049         struct lpfc_vport *vport = phba->pport;
1050         struct lpfc_sli   *psli = &phba->sli;
1051         LPFC_MBOXQ_t *pmb;
1052         volatile uint32_t control;
1053         struct lpfc_dmabuf *mp;
1054         int rc = 0;
1055
1056         pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1057         if (!pmb) {
1058                 rc = 1;
1059                 goto lpfc_handle_latt_err_exit;
1060         }
1061
1062         mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1063         if (!mp) {
1064                 rc = 2;
1065                 goto lpfc_handle_latt_free_pmb;
1066         }
1067
1068         mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
1069         if (!mp->virt) {
1070                 rc = 3;
1071                 goto lpfc_handle_latt_free_mp;
1072         }
1073
1074         /* Cleanup any outstanding ELS commands */
1075         lpfc_els_flush_all_cmd(phba);
1076
1077         psli->slistat.link_event++;
1078         lpfc_read_la(phba, pmb, mp);
1079         pmb->mbox_cmpl = lpfc_mbx_cmpl_read_la;
1080         pmb->vport = vport;
1081         /* Block ELS IOCBs until we have processed this mbox command */
1082         phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
1083         rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
1084         if (rc == MBX_NOT_FINISHED) {
1085                 rc = 4;
1086                 goto lpfc_handle_latt_free_mbuf;
1087         }
1088
1089         /* Clear Link Attention in HA REG */
1090         spin_lock_irq(&phba->hbalock);
1091         writel(HA_LATT, phba->HAregaddr);
1092         readl(phba->HAregaddr); /* flush */
1093         spin_unlock_irq(&phba->hbalock);
1094
1095         return;
1096
1097 lpfc_handle_latt_free_mbuf:
1098         phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
1099         lpfc_mbuf_free(phba, mp->virt, mp->phys);
1100 lpfc_handle_latt_free_mp:
1101         kfree(mp);
1102 lpfc_handle_latt_free_pmb:
1103         mempool_free(pmb, phba->mbox_mem_pool);
1104 lpfc_handle_latt_err_exit:
1105         /* Enable Link attention interrupts */
1106         spin_lock_irq(&phba->hbalock);
1107         psli->sli_flag |= LPFC_PROCESS_LA;
1108         control = readl(phba->HCregaddr);
1109         control |= HC_LAINT_ENA;
1110         writel(control, phba->HCregaddr);
1111         readl(phba->HCregaddr); /* flush */
1112
1113         /* Clear Link Attention in HA REG */
1114         writel(HA_LATT, phba->HAregaddr);
1115         readl(phba->HAregaddr); /* flush */
1116         spin_unlock_irq(&phba->hbalock);
1117         lpfc_linkdown(phba);
1118         phba->link_state = LPFC_HBA_ERROR;
1119
1120         lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1121                      "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
1122
1123         return;
1124 }
1125
1126 /**
1127  * lpfc_parse_vpd - Parse VPD (Vital Product Data)
1128  * @phba: pointer to lpfc hba data structure.
1129  * @vpd: pointer to the vital product data.
1130  * @len: length of the vital product data in bytes.
1131  *
1132  * This routine parses the Vital Product Data (VPD). The VPD is treated as
1133  * an array of characters. In this routine, the ModelName, ProgramType, and
1134  * ModelDesc, etc. fields of the phba data structure will be populated.
1135  *
1136  * Return codes
1137  *   0 - pointer to the VPD passed in is NULL
1138  *   1 - success
1139  **/
1140 static int
1141 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
1142 {
1143         uint8_t lenlo, lenhi;
1144         int Length;
1145         int i, j;
1146         int finished = 0;
1147         int index = 0;
1148
1149         if (!vpd)
1150                 return 0;
1151
1152         /* Vital Product */
1153         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1154                         "0455 Vital Product Data: x%x x%x x%x x%x\n",
1155                         (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
1156                         (uint32_t) vpd[3]);
1157         while (!finished && (index < (len - 4))) {
1158                 switch (vpd[index]) {
1159                 case 0x82:
1160                 case 0x91:
1161                         index += 1;
1162                         lenlo = vpd[index];
1163                         index += 1;
1164                         lenhi = vpd[index];
1165                         index += 1;
1166                         i = ((((unsigned short)lenhi) << 8) + lenlo);
1167                         index += i;
1168                         break;
1169                 case 0x90:
1170                         index += 1;
1171                         lenlo = vpd[index];
1172                         index += 1;
1173                         lenhi = vpd[index];
1174                         index += 1;
1175                         Length = ((((unsigned short)lenhi) << 8) + lenlo);
1176                         if (Length > len - index)
1177                                 Length = len - index;
1178                         while (Length > 0) {
1179                         /* Look for Serial Number */
1180                         if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
1181                                 index += 2;
1182                                 i = vpd[index];
1183                                 index += 1;
1184                                 j = 0;
1185                                 Length -= (3+i);
1186                                 while(i--) {
1187                                         phba->SerialNumber[j++] = vpd[index++];
1188                                         if (j == 31)
1189                                                 break;
1190                                 }
1191                                 phba->SerialNumber[j] = 0;
1192                                 continue;
1193                         }
1194                         else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
1195                                 phba->vpd_flag |= VPD_MODEL_DESC;
1196                                 index += 2;
1197                                 i = vpd[index];
1198                                 index += 1;
1199                                 j = 0;
1200                                 Length -= (3+i);
1201                                 while(i--) {
1202                                         phba->ModelDesc[j++] = vpd[index++];
1203                                         if (j == 255)
1204                                                 break;
1205                                 }
1206                                 phba->ModelDesc[j] = 0;
1207                                 continue;
1208                         }
1209                         else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
1210                                 phba->vpd_flag |= VPD_MODEL_NAME;
1211                                 index += 2;
1212                                 i = vpd[index];
1213                                 index += 1;
1214                                 j = 0;
1215                                 Length -= (3+i);
1216                                 while(i--) {
1217                                         phba->ModelName[j++] = vpd[index++];
1218                                         if (j == 79)
1219                                                 break;
1220                                 }
1221                                 phba->ModelName[j] = 0;
1222                                 continue;
1223                         }
1224                         else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
1225                                 phba->vpd_flag |= VPD_PROGRAM_TYPE;
1226                                 index += 2;
1227                                 i = vpd[index];
1228                                 index += 1;
1229                                 j = 0;
1230                                 Length -= (3+i);
1231                                 while(i--) {
1232                                         phba->ProgramType[j++] = vpd[index++];
1233                                         if (j == 255)
1234                                                 break;
1235                                 }
1236                                 phba->ProgramType[j] = 0;
1237                                 continue;
1238                         }
1239                         else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
1240                                 phba->vpd_flag |= VPD_PORT;
1241                                 index += 2;
1242                                 i = vpd[index];
1243                                 index += 1;
1244                                 j = 0;
1245                                 Length -= (3+i);
1246                                 while(i--) {
1247                                 phba->Port[j++] = vpd[index++];
1248                                 if (j == 19)
1249                                         break;
1250                                 }
1251                                 phba->Port[j] = 0;
1252                                 continue;
1253                         }
1254                         else {
1255                                 index += 2;
1256                                 i = vpd[index];
1257                                 index += 1;
1258                                 index += i;
1259                                 Length -= (3 + i);
1260                         }
1261                 }
1262                 finished = 0;
1263                 break;
1264                 case 0x78:
1265                         finished = 1;
1266                         break;
1267                 default:
1268                         index ++;
1269                         break;
1270                 }
1271         }
1272
1273         return(1);
1274 }
1275
1276 /**
1277  * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
1278  * @phba: pointer to lpfc hba data structure.
1279  * @mdp: pointer to the data structure to hold the derived model name.
1280  * @descp: pointer to the data structure to hold the derived description.
1281  *
1282  * This routine retrieves HBA's description based on its registered PCI device
1283  * ID. The @descp passed into this function points to an array of 256 chars. It
1284  * shall be returned with the model name, maximum speed, and the host bus type.
1285  * The @mdp passed into this function points to an array of 80 chars. When the
1286  * function returns, the @mdp will be filled with the model name.
1287  **/
1288 static void
1289 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
1290 {
1291         lpfc_vpd_t *vp;
1292         uint16_t dev_id = phba->pcidev->device;
1293         int max_speed;
1294         int GE = 0;
1295         struct {
1296                 char * name;
1297                 int    max_speed;
1298                 char * bus;
1299         } m = {"<Unknown>", 0, ""};
1300
1301         if (mdp && mdp[0] != '\0'
1302                 && descp && descp[0] != '\0')
1303                 return;
1304
1305         if (phba->lmt & LMT_10Gb)
1306                 max_speed = 10;
1307         else if (phba->lmt & LMT_8Gb)
1308                 max_speed = 8;
1309         else if (phba->lmt & LMT_4Gb)
1310                 max_speed = 4;
1311         else if (phba->lmt & LMT_2Gb)
1312                 max_speed = 2;
1313         else
1314                 max_speed = 1;
1315
1316         vp = &phba->vpd;
1317
1318         switch (dev_id) {
1319         case PCI_DEVICE_ID_FIREFLY:
1320                 m = (typeof(m)){"LP6000", max_speed, "PCI"};
1321                 break;
1322         case PCI_DEVICE_ID_SUPERFLY:
1323                 if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
1324                         m = (typeof(m)){"LP7000", max_speed,  "PCI"};
1325                 else
1326                         m = (typeof(m)){"LP7000E", max_speed, "PCI"};
1327                 break;
1328         case PCI_DEVICE_ID_DRAGONFLY:
1329                 m = (typeof(m)){"LP8000", max_speed, "PCI"};
1330                 break;
1331         case PCI_DEVICE_ID_CENTAUR:
1332                 if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
1333                         m = (typeof(m)){"LP9002", max_speed, "PCI"};
1334                 else
1335                         m = (typeof(m)){"LP9000", max_speed, "PCI"};
1336                 break;
1337         case PCI_DEVICE_ID_RFLY:
1338                 m = (typeof(m)){"LP952", max_speed, "PCI"};
1339                 break;
1340         case PCI_DEVICE_ID_PEGASUS:
1341                 m = (typeof(m)){"LP9802", max_speed, "PCI-X"};
1342                 break;
1343         case PCI_DEVICE_ID_THOR:
1344                 m = (typeof(m)){"LP10000", max_speed, "PCI-X"};
1345                 break;
1346         case PCI_DEVICE_ID_VIPER:
1347                 m = (typeof(m)){"LPX1000", max_speed,  "PCI-X"};
1348                 break;
1349         case PCI_DEVICE_ID_PFLY:
1350                 m = (typeof(m)){"LP982", max_speed, "PCI-X"};
1351                 break;
1352         case PCI_DEVICE_ID_TFLY:
1353                 m = (typeof(m)){"LP1050", max_speed, "PCI-X"};
1354                 break;
1355         case PCI_DEVICE_ID_HELIOS:
1356                 m = (typeof(m)){"LP11000", max_speed, "PCI-X2"};
1357                 break;
1358         case PCI_DEVICE_ID_HELIOS_SCSP:
1359                 m = (typeof(m)){"LP11000-SP", max_speed, "PCI-X2"};
1360                 break;
1361         case PCI_DEVICE_ID_HELIOS_DCSP:
1362                 m = (typeof(m)){"LP11002-SP", max_speed, "PCI-X2"};
1363                 break;
1364         case PCI_DEVICE_ID_NEPTUNE:
1365                 m = (typeof(m)){"LPe1000", max_speed, "PCIe"};
1366                 break;
1367         case PCI_DEVICE_ID_NEPTUNE_SCSP:
1368                 m = (typeof(m)){"LPe1000-SP", max_speed, "PCIe"};
1369                 break;
1370         case PCI_DEVICE_ID_NEPTUNE_DCSP:
1371                 m = (typeof(m)){"LPe1002-SP", max_speed, "PCIe"};
1372                 break;
1373         case PCI_DEVICE_ID_BMID:
1374                 m = (typeof(m)){"LP1150", max_speed, "PCI-X2"};
1375                 break;
1376         case PCI_DEVICE_ID_BSMB:
1377                 m = (typeof(m)){"LP111", max_speed, "PCI-X2"};
1378                 break;
1379         case PCI_DEVICE_ID_ZEPHYR:
1380                 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1381                 break;
1382         case PCI_DEVICE_ID_ZEPHYR_SCSP:
1383                 m = (typeof(m)){"LPe11000", max_speed, "PCIe"};
1384                 break;
1385         case PCI_DEVICE_ID_ZEPHYR_DCSP:
1386                 m = (typeof(m)){"LP2105", max_speed, "PCIe"};
1387                 GE = 1;
1388                 break;
1389         case PCI_DEVICE_ID_ZMID:
1390                 m = (typeof(m)){"LPe1150", max_speed, "PCIe"};
1391                 break;
1392         case PCI_DEVICE_ID_ZSMB:
1393                 m = (typeof(m)){"LPe111", max_speed, "PCIe"};
1394                 break;
1395         case PCI_DEVICE_ID_LP101:
1396                 m = (typeof(m)){"LP101", max_speed, "PCI-X"};
1397                 break;
1398         case PCI_DEVICE_ID_LP10000S:
1399                 m = (typeof(m)){"LP10000-S", max_speed, "PCI"};
1400                 break;
1401         case PCI_DEVICE_ID_LP11000S:
1402                 m = (typeof(m)){"LP11000-S", max_speed,
1403                         "PCI-X2"};
1404                 break;
1405         case PCI_DEVICE_ID_LPE11000S:
1406                 m = (typeof(m)){"LPe11000-S", max_speed,
1407                         "PCIe"};
1408                 break;
1409         case PCI_DEVICE_ID_SAT:
1410                 m = (typeof(m)){"LPe12000", max_speed, "PCIe"};
1411                 break;
1412         case PCI_DEVICE_ID_SAT_MID:
1413                 m = (typeof(m)){"LPe1250", max_speed, "PCIe"};
1414                 break;
1415         case PCI_DEVICE_ID_SAT_SMB:
1416                 m = (typeof(m)){"LPe121", max_speed, "PCIe"};
1417                 break;
1418         case PCI_DEVICE_ID_SAT_DCSP:
1419                 m = (typeof(m)){"LPe12002-SP", max_speed, "PCIe"};
1420                 break;
1421         case PCI_DEVICE_ID_SAT_SCSP:
1422                 m = (typeof(m)){"LPe12000-SP", max_speed, "PCIe"};
1423                 break;
1424         case PCI_DEVICE_ID_SAT_S:
1425                 m = (typeof(m)){"LPe12000-S", max_speed, "PCIe"};
1426                 break;
1427         case PCI_DEVICE_ID_HORNET:
1428                 m = (typeof(m)){"LP21000", max_speed, "PCIe"};
1429                 GE = 1;
1430                 break;
1431         case PCI_DEVICE_ID_PROTEUS_VF:
1432                 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1433                 break;
1434         case PCI_DEVICE_ID_PROTEUS_PF:
1435                 m = (typeof(m)) {"LPev12000", max_speed, "PCIe IOV"};
1436                 break;
1437         case PCI_DEVICE_ID_PROTEUS_S:
1438                 m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
1439                 break;
1440         default:
1441                 m = (typeof(m)){ NULL };
1442                 break;
1443         }
1444
1445         if (mdp && mdp[0] == '\0')
1446                 snprintf(mdp, 79,"%s", m.name);
1447         if (descp && descp[0] == '\0')
1448                 snprintf(descp, 255,
1449                         "Emulex %s %d%s %s %s",
1450                         m.name, m.max_speed,
1451                         (GE) ? "GE" : "Gb",
1452                         m.bus,
1453                         (GE) ? "FCoE Adapter" : "Fibre Channel Adapter");
1454 }
1455
1456 /**
1457  * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
1458  * @phba: pointer to lpfc hba data structure.
1459  * @pring: pointer to a IOCB ring.
1460  * @cnt: the number of IOCBs to be posted to the IOCB ring.
1461  *
1462  * This routine posts a given number of IOCBs with the associated DMA buffer
1463  * descriptors specified by the cnt argument to the given IOCB ring.
1464  *
1465  * Return codes
1466  *   The number of IOCBs NOT able to be posted to the IOCB ring.
1467  **/
1468 int
1469 lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
1470 {
1471         IOCB_t *icmd;
1472         struct lpfc_iocbq *iocb;
1473         struct lpfc_dmabuf *mp1, *mp2;
1474
1475         cnt += pring->missbufcnt;
1476
1477         /* While there are buffers to post */
1478         while (cnt > 0) {
1479                 /* Allocate buffer for  command iocb */
1480                 iocb = lpfc_sli_get_iocbq(phba);
1481                 if (iocb == NULL) {
1482                         pring->missbufcnt = cnt;
1483                         return cnt;
1484                 }
1485                 icmd = &iocb->iocb;
1486
1487                 /* 2 buffers can be posted per command */
1488                 /* Allocate buffer to post */
1489                 mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1490                 if (mp1)
1491                     mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
1492                 if (!mp1 || !mp1->virt) {
1493                         kfree(mp1);
1494                         lpfc_sli_release_iocbq(phba, iocb);
1495                         pring->missbufcnt = cnt;
1496                         return cnt;
1497                 }
1498
1499                 INIT_LIST_HEAD(&mp1->list);
1500                 /* Allocate buffer to post */
1501                 if (cnt > 1) {
1502                         mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
1503                         if (mp2)
1504                                 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
1505                                                             &mp2->phys);
1506                         if (!mp2 || !mp2->virt) {
1507                                 kfree(mp2);
1508                                 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1509                                 kfree(mp1);
1510                                 lpfc_sli_release_iocbq(phba, iocb);
1511                                 pring->missbufcnt = cnt;
1512                                 return cnt;
1513                         }
1514
1515                         INIT_LIST_HEAD(&mp2->list);
1516                 } else {
1517                         mp2 = NULL;
1518                 }
1519
1520                 icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
1521                 icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
1522                 icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
1523                 icmd->ulpBdeCount = 1;
1524                 cnt--;
1525                 if (mp2) {
1526                         icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
1527                         icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
1528                         icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
1529                         cnt--;
1530                         icmd->ulpBdeCount = 2;
1531                 }
1532
1533                 icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
1534                 icmd->ulpLe = 1;
1535
1536                 if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
1537                         lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
1538                         kfree(mp1);
1539                         cnt++;
1540                         if (mp2) {
1541                                 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
1542                                 kfree(mp2);
1543                                 cnt++;
1544                         }
1545                         lpfc_sli_release_iocbq(phba, iocb);
1546                         pring->missbufcnt = cnt;
1547                         return cnt;
1548                 }
1549                 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
1550                 if (mp2)
1551                         lpfc_sli_ringpostbuf_put(phba, pring, mp2);
1552         }
1553         pring->missbufcnt = 0;
1554         return 0;
1555 }
1556
1557 /**
1558  * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
1559  * @phba: pointer to lpfc hba data structure.
1560  *
1561  * This routine posts initial receive IOCB buffers to the ELS ring. The
1562  * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
1563  * set to 64 IOCBs.
1564  *
1565  * Return codes
1566  *   0 - success (currently always success)
1567  **/
1568 static int
1569 lpfc_post_rcv_buf(struct lpfc_hba *phba)
1570 {
1571         struct lpfc_sli *psli = &phba->sli;
1572
1573         /* Ring 0, ELS / CT buffers */
1574         lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
1575         /* Ring 2 - FCP no buffers needed */
1576
1577         return 0;
1578 }
1579
1580 #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
1581
1582 /**
1583  * lpfc_sha_init - Set up initial array of hash table entries
1584  * @HashResultPointer: pointer to an array as hash table.
1585  *
1586  * This routine sets up the initial values to the array of hash table entries
1587  * for the LC HBAs.
1588  **/
1589 static void
1590 lpfc_sha_init(uint32_t * HashResultPointer)
1591 {
1592         HashResultPointer[0] = 0x67452301;
1593         HashResultPointer[1] = 0xEFCDAB89;
1594         HashResultPointer[2] = 0x98BADCFE;
1595         HashResultPointer[3] = 0x10325476;
1596         HashResultPointer[4] = 0xC3D2E1F0;
1597 }
1598
1599 /**
1600  * lpfc_sha_iterate - Iterate initial hash table with the working hash table
1601  * @HashResultPointer: pointer to an initial/result hash table.
1602  * @HashWorkingPointer: pointer to an working hash table.
1603  *
1604  * This routine iterates an initial hash table pointed by @HashResultPointer
1605  * with the values from the working hash table pointeed by @HashWorkingPointer.
1606  * The results are putting back to the initial hash table, returned through
1607  * the @HashResultPointer as the result hash table.
1608  **/
1609 static void
1610 lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
1611 {
1612         int t;
1613         uint32_t TEMP;
1614         uint32_t A, B, C, D, E;
1615         t = 16;
1616         do {
1617                 HashWorkingPointer[t] =
1618                     S(1,
1619                       HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
1620                                                                      8] ^
1621                       HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
1622         } while (++t <= 79);
1623         t = 0;
1624         A = HashResultPointer[0];
1625         B = HashResultPointer[1];
1626         C = HashResultPointer[2];
1627         D = HashResultPointer[3];
1628         E = HashResultPointer[4];
1629
1630         do {
1631                 if (t < 20) {
1632                         TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
1633                 } else if (t < 40) {
1634                         TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
1635                 } else if (t < 60) {
1636                         TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
1637                 } else {
1638                         TEMP = (B ^ C ^ D) + 0xCA62C1D6;
1639                 }
1640                 TEMP += S(5, A) + E + HashWorkingPointer[t];
1641                 E = D;
1642                 D = C;
1643                 C = S(30, B);
1644                 B = A;
1645                 A = TEMP;
1646         } while (++t <= 79);
1647
1648         HashResultPointer[0] += A;
1649         HashResultPointer[1] += B;
1650         HashResultPointer[2] += C;
1651         HashResultPointer[3] += D;
1652         HashResultPointer[4] += E;
1653
1654 }
1655
1656 /**
1657  * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
1658  * @RandomChallenge: pointer to the entry of host challenge random number array.
1659  * @HashWorking: pointer to the entry of the working hash array.
1660  *
1661  * This routine calculates the working hash array referred by @HashWorking
1662  * from the challenge random numbers associated with the host, referred by
1663  * @RandomChallenge. The result is put into the entry of the working hash
1664  * array and returned by reference through @HashWorking.
1665  **/
1666 static void
1667 lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
1668 {
1669         *HashWorking = (*RandomChallenge ^ *HashWorking);
1670 }
1671
1672 /**
1673  * lpfc_hba_init - Perform special handling for LC HBA initialization
1674  * @phba: pointer to lpfc hba data structure.
1675  * @hbainit: pointer to an array of unsigned 32-bit integers.
1676  *
1677  * This routine performs the special handling for LC HBA initialization.
1678  **/
1679 void
1680 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
1681 {
1682         int t;
1683         uint32_t *HashWorking;
1684         uint32_t *pwwnn = (uint32_t *) phba->wwnn;
1685
1686         HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
1687         if (!HashWorking)
1688                 return;
1689
1690         HashWorking[0] = HashWorking[78] = *pwwnn++;
1691         HashWorking[1] = HashWorking[79] = *pwwnn;
1692
1693         for (t = 0; t < 7; t++)
1694                 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
1695
1696         lpfc_sha_init(hbainit);
1697         lpfc_sha_iterate(hbainit, HashWorking);
1698         kfree(HashWorking);
1699 }
1700
1701 /**
1702  * lpfc_cleanup - Performs vport cleanups before deleting a vport
1703  * @vport: pointer to a virtual N_Port data structure.
1704  *
1705  * This routine performs the necessary cleanups before deleting the @vport.
1706  * It invokes the discovery state machine to perform necessary state
1707  * transitions and to release the ndlps associated with the @vport. Note,
1708  * the physical port is treated as @vport 0.
1709  **/
1710 void
1711 lpfc_cleanup(struct lpfc_vport *vport)
1712 {
1713         struct lpfc_hba   *phba = vport->phba;
1714         struct lpfc_nodelist *ndlp, *next_ndlp;
1715         int i = 0;
1716
1717         if (phba->link_state > LPFC_LINK_DOWN)
1718                 lpfc_port_link_failure(vport);
1719
1720         list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1721                 if (!NLP_CHK_NODE_ACT(ndlp)) {
1722                         ndlp = lpfc_enable_node(vport, ndlp,
1723                                                 NLP_STE_UNUSED_NODE);
1724                         if (!ndlp)
1725                                 continue;
1726                         spin_lock_irq(&phba->ndlp_lock);
1727                         NLP_SET_FREE_REQ(ndlp);
1728                         spin_unlock_irq(&phba->ndlp_lock);
1729                         /* Trigger the release of the ndlp memory */
1730                         lpfc_nlp_put(ndlp);
1731                         continue;
1732                 }
1733                 spin_lock_irq(&phba->ndlp_lock);
1734                 if (NLP_CHK_FREE_REQ(ndlp)) {
1735                         /* The ndlp should not be in memory free mode already */
1736                         spin_unlock_irq(&phba->ndlp_lock);
1737                         continue;
1738                 } else
1739                         /* Indicate request for freeing ndlp memory */
1740                         NLP_SET_FREE_REQ(ndlp);
1741                 spin_unlock_irq(&phba->ndlp_lock);
1742
1743                 if (vport->port_type != LPFC_PHYSICAL_PORT &&
1744                     ndlp->nlp_DID == Fabric_DID) {
1745                         /* Just free up ndlp with Fabric_DID for vports */
1746                         lpfc_nlp_put(ndlp);
1747                         continue;
1748                 }
1749
1750                 if (ndlp->nlp_type & NLP_FABRIC)
1751                         lpfc_disc_state_machine(vport, ndlp, NULL,
1752                                         NLP_EVT_DEVICE_RECOVERY);
1753
1754                 lpfc_disc_state_machine(vport, ndlp, NULL,
1755                                              NLP_EVT_DEVICE_RM);
1756
1757         }
1758
1759         /* At this point, ALL ndlp's should be gone
1760          * because of the previous NLP_EVT_DEVICE_RM.
1761          * Lets wait for this to happen, if needed.
1762          */
1763         while (!list_empty(&vport->fc_nodes)) {
1764
1765                 if (i++ > 3000) {
1766                         lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1767                                 "0233 Nodelist not empty\n");
1768                         list_for_each_entry_safe(ndlp, next_ndlp,
1769                                                 &vport->fc_nodes, nlp_listp) {
1770                                 lpfc_printf_vlog(ndlp->vport, KERN_ERR,
1771                                                 LOG_NODE,
1772                                                 "0282 did:x%x ndlp:x%p "
1773                                                 "usgmap:x%x refcnt:%d\n",
1774                                                 ndlp->nlp_DID, (void *)ndlp,
1775                                                 ndlp->nlp_usg_map,
1776                                                 atomic_read(
1777                                                         &ndlp->kref.refcount));
1778                         }
1779                         break;
1780                 }
1781
1782                 /* Wait for any activity on ndlps to settle */
1783                 msleep(10);
1784         }
1785         return;
1786 }
1787
1788 /**
1789  * lpfc_stop_vport_timers - Stop all the timers associated with a vport
1790  * @vport: pointer to a virtual N_Port data structure.
1791  *
1792  * This routine stops all the timers associated with a @vport. This function
1793  * is invoked before disabling or deleting a @vport. Note that the physical
1794  * port is treated as @vport 0.
1795  **/
1796 void
1797 lpfc_stop_vport_timers(struct lpfc_vport *vport)
1798 {
1799         del_timer_sync(&vport->els_tmofunc);
1800         del_timer_sync(&vport->fc_fdmitmo);
1801         lpfc_can_disctmo(vport);
1802         return;
1803 }
1804
1805 /**
1806  * lpfc_stop_phba_timers - Stop all the timers associated with an HBA
1807  * @phba: pointer to lpfc hba data structure.
1808  *
1809  * This routine stops all the timers associated with a HBA. This function is
1810  * invoked before either putting a HBA offline or unloading the driver.
1811  **/
1812 static void
1813 lpfc_stop_phba_timers(struct lpfc_hba *phba)
1814 {
1815         del_timer_sync(&phba->fcp_poll_timer);
1816         lpfc_stop_vport_timers(phba->pport);
1817         del_timer_sync(&phba->sli.mbox_tmo);
1818         del_timer_sync(&phba->fabric_block_timer);
1819         phba->hb_outstanding = 0;
1820         del_timer_sync(&phba->hb_tmofunc);
1821         del_timer_sync(&phba->eratt_poll);
1822         return;
1823 }
1824
1825 /**
1826  * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
1827  * @phba: pointer to lpfc hba data structure.
1828  *
1829  * This routine marks a HBA's management interface as blocked. Once the HBA's
1830  * management interface is marked as blocked, all the user space access to
1831  * the HBA, whether they are from sysfs interface or libdfc interface will
1832  * all be blocked. The HBA is set to block the management interface when the
1833  * driver prepares the HBA interface for online or offline.
1834  **/
1835 static void
1836 lpfc_block_mgmt_io(struct lpfc_hba * phba)
1837 {
1838         unsigned long iflag;
1839
1840         spin_lock_irqsave(&phba->hbalock, iflag);
1841         phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
1842         spin_unlock_irqrestore(&phba->hbalock, iflag);
1843 }
1844
1845 /**
1846  * lpfc_online - Initialize and bring a HBA online
1847  * @phba: pointer to lpfc hba data structure.
1848  *
1849  * This routine initializes the HBA and brings a HBA online. During this
1850  * process, the management interface is blocked to prevent user space access
1851  * to the HBA interfering with the driver initialization.
1852  *
1853  * Return codes
1854  *   0 - successful
1855  *   1 - failed
1856  **/
1857 int
1858 lpfc_online(struct lpfc_hba *phba)
1859 {
1860         struct lpfc_vport *vport;
1861         struct lpfc_vport **vports;
1862         int i;
1863
1864         if (!phba)
1865                 return 0;
1866         vport = phba->pport;
1867
1868         if (!(vport->fc_flag & FC_OFFLINE_MODE))
1869                 return 0;
1870
1871         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1872                         "0458 Bring Adapter online\n");
1873
1874         lpfc_block_mgmt_io(phba);
1875
1876         if (!lpfc_sli_queue_setup(phba)) {
1877                 lpfc_unblock_mgmt_io(phba);
1878                 return 1;
1879         }
1880
1881         if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */
1882                 lpfc_unblock_mgmt_io(phba);
1883                 return 1;
1884         }
1885
1886         vports = lpfc_create_vport_work_array(phba);
1887         if (vports != NULL)
1888                 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1889                         struct Scsi_Host *shost;
1890                         shost = lpfc_shost_from_vport(vports[i]);
1891                         spin_lock_irq(shost->host_lock);
1892                         vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
1893                         if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
1894                                 vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
1895                         spin_unlock_irq(shost->host_lock);
1896                 }
1897                 lpfc_destroy_vport_work_array(phba, vports);
1898
1899         lpfc_unblock_mgmt_io(phba);
1900         return 0;
1901 }
1902
1903 /**
1904  * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
1905  * @phba: pointer to lpfc hba data structure.
1906  *
1907  * This routine marks a HBA's management interface as not blocked. Once the
1908  * HBA's management interface is marked as not blocked, all the user space
1909  * access to the HBA, whether they are from sysfs interface or libdfc
1910  * interface will be allowed. The HBA is set to block the management interface
1911  * when the driver prepares the HBA interface for online or offline and then
1912  * set to unblock the management interface afterwards.
1913  **/
1914 void
1915 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
1916 {
1917         unsigned long iflag;
1918
1919         spin_lock_irqsave(&phba->hbalock, iflag);
1920         phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
1921         spin_unlock_irqrestore(&phba->hbalock, iflag);
1922 }
1923
1924 /**
1925  * lpfc_offline_prep - Prepare a HBA to be brought offline
1926  * @phba: pointer to lpfc hba data structure.
1927  *
1928  * This routine is invoked to prepare a HBA to be brought offline. It performs
1929  * unregistration login to all the nodes on all vports and flushes the mailbox
1930  * queue to make it ready to be brought offline.
1931  **/
1932 void
1933 lpfc_offline_prep(struct lpfc_hba * phba)
1934 {
1935         struct lpfc_vport *vport = phba->pport;
1936         struct lpfc_nodelist  *ndlp, *next_ndlp;
1937         struct lpfc_vport **vports;
1938         int i;
1939
1940         if (vport->fc_flag & FC_OFFLINE_MODE)
1941                 return;
1942
1943         lpfc_block_mgmt_io(phba);
1944
1945         lpfc_linkdown(phba);
1946
1947         /* Issue an unreg_login to all nodes on all vports */
1948         vports = lpfc_create_vport_work_array(phba);
1949         if (vports != NULL) {
1950                 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1951                         struct Scsi_Host *shost;
1952
1953                         if (vports[i]->load_flag & FC_UNLOADING)
1954                                 continue;
1955                         shost = lpfc_shost_from_vport(vports[i]);
1956                         list_for_each_entry_safe(ndlp, next_ndlp,
1957                                                  &vports[i]->fc_nodes,
1958                                                  nlp_listp) {
1959                                 if (!NLP_CHK_NODE_ACT(ndlp))
1960                                         continue;
1961                                 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
1962                                         continue;
1963                                 if (ndlp->nlp_type & NLP_FABRIC) {
1964                                         lpfc_disc_state_machine(vports[i], ndlp,
1965                                                 NULL, NLP_EVT_DEVICE_RECOVERY);
1966                                         lpfc_disc_state_machine(vports[i], ndlp,
1967                                                 NULL, NLP_EVT_DEVICE_RM);
1968                                 }
1969                                 spin_lock_irq(shost->host_lock);
1970                                 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
1971                                 spin_unlock_irq(shost->host_lock);
1972                                 lpfc_unreg_rpi(vports[i], ndlp);
1973                         }
1974                 }
1975         }
1976         lpfc_destroy_vport_work_array(phba, vports);
1977
1978         lpfc_sli_flush_mbox_queue(phba);
1979 }
1980
1981 /**
1982  * lpfc_offline - Bring a HBA offline
1983  * @phba: pointer to lpfc hba data structure.
1984  *
1985  * This routine actually brings a HBA offline. It stops all the timers
1986  * associated with the HBA, brings down the SLI layer, and eventually
1987  * marks the HBA as in offline state for the upper layer protocol.
1988  **/
1989 void
1990 lpfc_offline(struct lpfc_hba *phba)
1991 {
1992         struct Scsi_Host  *shost;
1993         struct lpfc_vport **vports;
1994         int i;
1995
1996         if (phba->pport->fc_flag & FC_OFFLINE_MODE)
1997                 return;
1998
1999         /* stop all timers associated with this hba */
2000         lpfc_stop_phba_timers(phba);
2001         vports = lpfc_create_vport_work_array(phba);
2002         if (vports != NULL)
2003                 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
2004                         lpfc_stop_vport_timers(vports[i]);
2005         lpfc_destroy_vport_work_array(phba, vports);
2006         lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2007                         "0460 Bring Adapter offline\n");
2008         /* Bring down the SLI Layer and cleanup.  The HBA is offline
2009            now.  */
2010         lpfc_sli_hba_down(phba);
2011         spin_lock_irq(&phba->hbalock);
2012         phba->work_ha = 0;
2013         spin_unlock_irq(&phba->hbalock);
2014         vports = lpfc_create_vport_work_array(phba);
2015         if (vports != NULL)
2016                 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
2017                         shost = lpfc_shost_from_vport(vports[i]);
2018                         spin_lock_irq(shost->host_lock);
2019                         vports[i]->work_port_events = 0;
2020                         vports[i]->fc_flag |= FC_OFFLINE_MODE;
2021                         spin_unlock_irq(shost->host_lock);
2022                 }
2023         lpfc_destroy_vport_work_array(phba, vports);
2024 }
2025
2026 /**
2027  * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
2028  * @phba: pointer to lpfc hba data structure.
2029  *
2030  * This routine is to free all the SCSI buffers and IOCBs from the driver
2031  * list back to kernel. It is called from lpfc_pci_remove_one to free
2032  * the internal resources before the device is removed from the system.
2033  *
2034  * Return codes
2035  *   0 - successful (for now, it always returns 0)
2036  **/
2037 static int
2038 lpfc_scsi_free(struct lpfc_hba *phba)
2039 {
2040         struct lpfc_scsi_buf *sb, *sb_next;
2041         struct lpfc_iocbq *io, *io_next;
2042
2043         spin_lock_irq(&phba->hbalock);
2044         /* Release all the lpfc_scsi_bufs maintained by this host. */
2045         list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) {
2046                 list_del(&sb->list);
2047                 pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
2048                               sb->dma_handle);
2049                 kfree(sb);
2050                 phba->total_scsi_bufs--;
2051         }
2052
2053         /* Release all the lpfc_iocbq entries maintained by this host. */
2054         list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
2055                 list_del(&io->list);
2056                 kfree(io);
2057                 phba->total_iocbq_bufs--;
2058         }
2059
2060         spin_unlock_irq(&phba->hbalock);
2061
2062         return 0;
2063 }
2064
2065 /**
2066  * lpfc_create_port - Create an FC port
2067  * @phba: pointer to lpfc hba data structure.
2068  * @instance: a unique integer ID to this FC port.
2069  * @dev: pointer to the device data structure.
2070  *
2071  * This routine creates a FC port for the upper layer protocol. The FC port
2072  * can be created on top of either a physical port or a virtual port provided
2073  * by the HBA. This routine also allocates a SCSI host data structure (shost)
2074  * and associates the FC port created before adding the shost into the SCSI
2075  * layer.
2076  *
2077  * Return codes
2078  *   @vport - pointer to the virtual N_Port data structure.
2079  *   NULL - port create failed.
2080  **/
2081 struct lpfc_vport *
2082 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
2083 {
2084         struct lpfc_vport *vport;
2085         struct Scsi_Host  *shost;
2086         int error = 0;
2087
2088         if (dev != &phba->pcidev->dev)
2089                 shost = scsi_host_alloc(&lpfc_vport_template,
2090                                         sizeof(struct lpfc_vport));
2091         else
2092                 shost = scsi_host_alloc(&lpfc_template,
2093                                         sizeof(struct lpfc_vport));
2094         if (!shost)
2095                 goto out;
2096
2097         vport = (struct lpfc_vport *) shost->hostdata;
2098         vport->phba = phba;
2099         vport->load_flag |= FC_LOADING;
2100         vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2101         vport->fc_rscn_flush = 0;
2102
2103         lpfc_get_vport_cfgparam(vport);
2104         shost->unique_id = instance;
2105         shost->max_id = LPFC_MAX_TARGET;
2106         shost->max_lun = vport->cfg_max_luns;
2107         shost->this_id = -1;
2108         shost->max_cmd_len = 16;
2109
2110         /*
2111          * Set initial can_queue value since 0 is no longer supported and
2112          * scsi_add_host will fail. This will be adjusted later based on the
2113          * max xri value determined in hba setup.
2114          */
2115         shost->can_queue = phba->cfg_hba_queue_depth - 10;
2116         if (dev != &phba->pcidev->dev) {
2117                 shost->transportt = lpfc_vport_transport_template;
2118                 vport->port_type = LPFC_NPIV_PORT;
2119         } else {
2120                 shost->transportt = lpfc_transport_template;
2121                 vport->port_type = LPFC_PHYSICAL_PORT;
2122         }
2123
2124         /* Initialize all internally managed lists. */
2125         INIT_LIST_HEAD(&vport->fc_nodes);
2126         spin_lock_init(&vport->work_port_lock);
2127
2128         init_timer(&vport->fc_disctmo);
2129         vport->fc_disctmo.function = lpfc_disc_timeout;
2130         vport->fc_disctmo.data = (unsigned long)vport;
2131
2132         init_timer(&vport->fc_fdmitmo);
2133         vport->fc_fdmitmo.function = lpfc_fdmi_tmo;
2134         vport->fc_fdmitmo.data = (unsigned long)vport;
2135
2136         init_timer(&vport->els_tmofunc);
2137         vport->els_tmofunc.function = lpfc_els_timeout;
2138         vport->els_tmofunc.data = (unsigned long)vport;
2139
2140         error = scsi_add_host(shost, dev);
2141         if (error)
2142                 goto out_put_shost;
2143
2144         spin_lock_irq(&phba->hbalock);
2145         list_add_tail(&vport->listentry, &phba->port_list);
2146         spin_unlock_irq(&phba->hbalock);
2147         return vport;
2148
2149 out_put_shost:
2150         scsi_host_put(shost);
2151 out:
2152         return NULL;
2153 }
2154
2155 /**
2156  * destroy_port -  destroy an FC port
2157  * @vport: pointer to an lpfc virtual N_Port data structure.
2158  *
2159  * This routine destroys a FC port from the upper layer protocol. All the
2160  * resources associated with the port are released.
2161  **/
2162 void
2163 destroy_port(struct lpfc_vport *vport)
2164 {
2165         struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2166         struct lpfc_hba  *phba = vport->phba;
2167
2168         lpfc_debugfs_terminate(vport);
2169         fc_remove_host(shost);
2170         scsi_remove_host(shost);
2171
2172         spin_lock_irq(&phba->hbalock);
2173         list_del_init(&vport->listentry);
2174         spin_unlock_irq(&phba->hbalock);
2175
2176         lpfc_cleanup(vport);
2177         return;
2178 }
2179
2180 /**
2181  * lpfc_get_instance - Get a unique integer ID
2182  *
2183  * This routine allocates a unique integer ID from lpfc_hba_index pool. It
2184  * uses the kernel idr facility to perform the task.
2185  *
2186  * Return codes:
2187  *   instance - a unique integer ID allocated as the new instance.
2188  *   -1 - lpfc get instance failed.
2189  **/
2190 int
2191 lpfc_get_instance(void)
2192 {
2193         int instance = 0;
2194
2195         /* Assign an unused number */
2196         if (!idr_pre_get(&lpfc_hba_index, GFP_KERNEL))
2197                 return -1;
2198         if (idr_get_new(&lpfc_hba_index, NULL, &instance))
2199                 return -1;
2200         return instance;
2201 }
2202
2203 /**
2204  * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
2205  * @shost: pointer to SCSI host data structure.
2206  * @time: elapsed time of the scan in jiffies.
2207  *
2208  * This routine is called by the SCSI layer with a SCSI host to determine
2209  * whether the scan host is finished.
2210  *
2211  * Note: there is no scan_start function as adapter initialization will have
2212  * asynchronously kicked off the link initialization.
2213  *
2214  * Return codes
2215  *   0 - SCSI host scan is not over yet.
2216  *   1 - SCSI host scan is over.
2217  **/
2218 int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2219 {
2220         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2221         struct lpfc_hba   *phba = vport->phba;
2222         int stat = 0;
2223
2224         spin_lock_irq(shost->host_lock);
2225
2226         if (vport->load_flag & FC_UNLOADING) {
2227                 stat = 1;
2228                 goto finished;
2229         }
2230         if (time >= 30 * HZ) {
2231                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2232                                 "0461 Scanning longer than 30 "
2233                                 "seconds.  Continuing initialization\n");
2234                 stat = 1;
2235                 goto finished;
2236         }
2237         if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) {
2238                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2239                                 "0465 Link down longer than 15 "
2240                                 "seconds.  Continuing initialization\n");
2241                 stat = 1;
2242                 goto finished;
2243         }
2244
2245         if (vport->port_state != LPFC_VPORT_READY)
2246                 goto finished;
2247         if (vport->num_disc_nodes || vport->fc_prli_sent)
2248                 goto finished;
2249         if (vport->fc_map_cnt == 0 && time < 2 * HZ)
2250                 goto finished;
2251         if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
2252                 goto finished;
2253
2254         stat = 1;
2255
2256 finished:
2257         spin_unlock_irq(shost->host_lock);
2258         return stat;
2259 }
2260
2261 /**
2262  * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
2263  * @shost: pointer to SCSI host data structure.
2264  *
2265  * This routine initializes a given SCSI host attributes on a FC port. The
2266  * SCSI host can be either on top of a physical port or a virtual port.
2267  **/
2268 void lpfc_host_attrib_init(struct Scsi_Host *shost)
2269 {
2270         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
2271         struct lpfc_hba   *phba = vport->phba;
2272         /*
2273          * Set fixed host attributes.  Must done after lpfc_sli_hba_setup().
2274          */
2275
2276         fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
2277         fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
2278         fc_host_supported_classes(shost) = FC_COS_CLASS3;
2279
2280         memset(fc_host_supported_fc4s(shost), 0,
2281                sizeof(fc_host_supported_fc4s(shost)));
2282         fc_host_supported_fc4s(shost)[2] = 1;
2283         fc_host_supported_fc4s(shost)[7] = 1;
2284
2285         lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
2286                                  sizeof fc_host_symbolic_name(shost));
2287
2288         fc_host_supported_speeds(shost) = 0;
2289         if (phba->lmt & LMT_10Gb)
2290                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
2291         if (phba->lmt & LMT_8Gb)
2292                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
2293         if (phba->lmt & LMT_4Gb)
2294                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
2295         if (phba->lmt & LMT_2Gb)
2296                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
2297         if (phba->lmt & LMT_1Gb)
2298                 fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
2299
2300         fc_host_maxframe_size(shost) =
2301                 (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
2302                 (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
2303
2304         /* This value is also unchanging */
2305         memset(fc_host_active_fc4s(shost), 0,
2306                sizeof(fc_host_active_fc4s(shost)));
2307         fc_host_active_fc4s(shost)[2] = 1;
2308         fc_host_active_fc4s(shost)[7] = 1;
2309
2310         fc_host_max_npiv_vports(shost) = phba->max_vpi;
2311         spin_lock_irq(shost->host_lock);
2312         vport->load_flag &= ~FC_LOADING;
2313         spin_unlock_irq(shost->host_lock);
2314 }
2315
2316 /**
2317  * lpfc_enable_msix - Enable MSI-X interrupt mode
2318  * @phba: pointer to lpfc hba data structure.
2319  *
2320  * This routine is invoked to enable the MSI-X interrupt vectors. The kernel
2321  * function pci_enable_msix() is called to enable the MSI-X vectors. Note that
2322  * pci_enable_msix(), once invoked, enables either all or nothing, depending
2323  * on the current availability of PCI vector resources. The device driver is
2324  * responsible for calling the individual request_irq() to register each MSI-X
2325  * vector with a interrupt handler, which is done in this function. Note that
2326  * later when device is unloading, the driver should always call free_irq()
2327  * on all MSI-X vectors it has done request_irq() on before calling
2328  * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
2329  * will be left with MSI-X enabled and leaks its vectors.
2330  *
2331  * Return codes
2332  *   0 - sucessful
2333  *   other values - error
2334  **/
2335 static int
2336 lpfc_enable_msix(struct lpfc_hba *phba)
2337 {
2338         int rc, i;
2339         LPFC_MBOXQ_t *pmb;
2340
2341         /* Set up MSI-X multi-message vectors */
2342         for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2343                 phba->msix_entries[i].entry = i;
2344
2345         /* Configure MSI-X capability structure */
2346         rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
2347                                 ARRAY_SIZE(phba->msix_entries));
2348         if (rc) {
2349                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2350                                 "0420 PCI enable MSI-X failed (%d)\n", rc);
2351                 goto msi_fail_out;
2352         } else
2353                 for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2354                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2355                                         "0477 MSI-X entry[%d]: vector=x%x "
2356                                         "message=%d\n", i,
2357                                         phba->msix_entries[i].vector,
2358                                         phba->msix_entries[i].entry);
2359         /*
2360          * Assign MSI-X vectors to interrupt handlers
2361          */
2362
2363         /* vector-0 is associated to slow-path handler */
2364         rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
2365                          IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
2366         if (rc) {
2367                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2368                                 "0421 MSI-X slow-path request_irq failed "
2369                                 "(%d)\n", rc);
2370                 goto msi_fail_out;
2371         }
2372
2373         /* vector-1 is associated to fast-path handler */
2374         rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler,
2375                          IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
2376
2377         if (rc) {
2378                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2379                                 "0429 MSI-X fast-path request_irq failed "
2380                                 "(%d)\n", rc);
2381                 goto irq_fail_out;
2382         }
2383
2384         /*
2385          * Configure HBA MSI-X attention conditions to messages
2386          */
2387         pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2388
2389         if (!pmb) {
2390                 rc = -ENOMEM;
2391                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2392                                 "0474 Unable to allocate memory for issuing "
2393                                 "MBOX_CONFIG_MSI command\n");
2394                 goto mem_fail_out;
2395         }
2396         rc = lpfc_config_msi(phba, pmb);
2397         if (rc)
2398                 goto mbx_fail_out;
2399         rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2400         if (rc != MBX_SUCCESS) {
2401                 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2402                                 "0351 Config MSI mailbox command failed, "
2403                                 "mbxCmd x%x, mbxStatus x%x\n",
2404                                 pmb->mb.mbxCommand, pmb->mb.mbxStatus);
2405                 goto mbx_fail_out;
2406         }
2407
2408         /* Free memory allocated for mailbox command */
2409         mempool_free(pmb, phba->mbox_mem_pool);
2410         return rc;
2411
2412 mbx_fail_out:
2413         /* Free memory allocated for mailbox command */
2414         mempool_free(pmb, phba->mbox_mem_pool);
2415
2416 mem_fail_out:
2417         /* free the irq already requested */
2418         free_irq(phba->msix_entries[1].vector, phba);
2419
2420 irq_fail_out:
2421         /* free the irq already requested */
2422         free_irq(phba->msix_entries[0].vector, phba);
2423
2424 msi_fail_out:
2425         /* Unconfigure MSI-X capability structure */
2426         pci_disable_msix(phba->pcidev);
2427         return rc;
2428 }
2429
2430 /**
2431  * lpfc_disable_msix - Disable MSI-X interrupt mode
2432  * @phba: pointer to lpfc hba data structure.
2433  *
2434  * This routine is invoked to release the MSI-X vectors and then disable the
2435  * MSI-X interrupt mode.
2436  **/
2437 static void
2438 lpfc_disable_msix(struct lpfc_hba *phba)
2439 {
2440         int i;
2441
2442         /* Free up MSI-X multi-message vectors */
2443         for (i = 0; i < LPFC_MSIX_VECTORS; i++)
2444                 free_irq(phba->msix_entries[i].vector, phba);
2445         /* Disable MSI-X */
2446         pci_disable_msix(phba->pcidev);
2447 }
2448
2449 /**
2450  * lpfc_enable_msi - Enable MSI interrupt mode
2451  * @phba: pointer to lpfc hba data structure.
2452  *
2453  * This routine is invoked to enable the MSI interrupt mode. The kernel
2454  * function pci_enable_msi() is called to enable the MSI vector. The
2455  * device driver is responsible for calling the request_irq() to register
2456  * MSI vector with a interrupt the handler, which is done in this function.
2457  *
2458  * Return codes
2459  *      0 - sucessful
2460  *      other values - error
2461  */
2462 static int
2463 lpfc_enable_msi(struct lpfc_hba *phba)
2464 {
2465         int rc;
2466
2467         rc = pci_enable_msi(phba->pcidev);
2468         if (!rc)
2469                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2470                                 "0462 PCI enable MSI mode success.\n");
2471         else {
2472                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2473                                 "0471 PCI enable MSI mode failed (%d)\n", rc);
2474                 return rc;
2475         }
2476
2477         rc = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2478                          IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2479         if (rc) {
2480                 pci_disable_msi(phba->pcidev);
2481                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2482                                 "0478 MSI request_irq failed (%d)\n", rc);
2483         }
2484         return rc;
2485 }
2486
2487 /**
2488  * lpfc_disable_msi - Disable MSI interrupt mode
2489  * @phba: pointer to lpfc hba data structure.
2490  *
2491  * This routine is invoked to disable the MSI interrupt mode. The driver
2492  * calls free_irq() on MSI vector it has done request_irq() on before
2493  * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and
2494  * a device will be left with MSI enabled and leaks its vector.
2495  */
2496
2497 static void
2498 lpfc_disable_msi(struct lpfc_hba *phba)
2499 {
2500         free_irq(phba->pcidev->irq, phba);
2501         pci_disable_msi(phba->pcidev);
2502         return;
2503 }
2504
2505 /**
2506  * lpfc_log_intr_mode - Log the active interrupt mode
2507  * @phba: pointer to lpfc hba data structure.
2508  * @intr_mode: active interrupt mode adopted.
2509  *
2510  * This routine it invoked to log the currently used active interrupt mode
2511  * to the device.
2512  */
2513 static void
2514 lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
2515 {
2516         switch (intr_mode) {
2517         case 0:
2518                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2519                                 "0470 Enable INTx interrupt mode.\n");
2520                 break;
2521         case 1:
2522                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2523                                 "0481 Enabled MSI interrupt mode.\n");
2524                 break;
2525         case 2:
2526                 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2527                                 "0480 Enabled MSI-X interrupt mode.\n");
2528                 break;
2529         default:
2530                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2531                                 "0482 Illegal interrupt mode.\n");
2532                 break;
2533         }
2534         return;
2535 }
2536
2537 static void
2538 lpfc_stop_port(struct lpfc_hba *phba)
2539 {
2540         /* Clear all interrupt enable conditions */
2541         writel(0, phba->HCregaddr);
2542         readl(phba->HCregaddr); /* flush */
2543         /* Clear all pending interrupts */
2544         writel(0xffffffff, phba->HAregaddr);
2545         readl(phba->HAregaddr); /* flush */
2546
2547         /* Reset some HBA SLI setup states */
2548         lpfc_stop_phba_timers(phba);
2549         phba->pport->work_port_events = 0;
2550
2551         return;
2552 }
2553
2554 /**
2555  * lpfc_enable_intr - Enable device interrupt
2556  * @phba: pointer to lpfc hba data structure.
2557  *
2558  * This routine is invoked to enable device interrupt and associate driver's
2559  * interrupt handler(s) to interrupt vector(s). Depends on the interrupt
2560  * mode configured to the driver, the driver will try to fallback from the
2561  * configured interrupt mode to an interrupt mode which is supported by the
2562  * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ.
2563  *
2564  * Return codes
2565  *   0 - sucessful
2566  *   other values - error
2567  **/
2568 static uint32_t
2569 lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
2570 {
2571         uint32_t intr_mode = LPFC_INTR_ERROR;
2572         int retval;
2573
2574         if (cfg_mode == 2) {
2575                 /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
2576                 retval = lpfc_sli_config_port(phba, 3);
2577                 if (!retval) {
2578                         /* Now, try to enable MSI-X interrupt mode */
2579                         retval = lpfc_enable_msix(phba);
2580                         if (!retval) {
2581                                 /* Indicate initialization to MSI-X mode */
2582                                 phba->intr_type = MSIX;
2583                                 intr_mode = 2;
2584                         }
2585                 }
2586         }
2587
2588         /* Fallback to MSI if MSI-X initialization failed */
2589         if (cfg_mode >= 1 && phba->intr_type == NONE) {
2590                 retval = lpfc_enable_msi(phba);
2591                 if (!retval) {
2592                         /* Indicate initialization to MSI mode */
2593                         phba->intr_type = MSI;
2594                         intr_mode = 1;
2595                 }
2596         }
2597
2598         /* Fallback to INTx if both MSI-X/MSI initalization failed */
2599         if (phba->intr_type == NONE) {
2600                 retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
2601                                      IRQF_SHARED, LPFC_DRIVER_NAME, phba);
2602                 if (!retval) {
2603                         /* Indicate initialization to INTx mode */
2604                         phba->intr_type = INTx;
2605                         intr_mode = 0;
2606                 }
2607         }
2608         return intr_mode;
2609 }
2610
2611 /**
2612  * lpfc_disable_intr - Disable device interrupt
2613  * @phba: pointer to lpfc hba data structure.
2614  *
2615  * This routine is invoked to disable device interrupt and disassociate the
2616  * driver's interrupt handler(s) from interrupt vector(s). Depending on the
2617  * interrupt mode, the driver will release the interrupt vector(s) for the
2618  * message signaled interrupt.
2619  **/
2620 static void
2621 lpfc_disable_intr(struct lpfc_hba *phba)
2622 {
2623         /* Disable the currently initialized interrupt mode */
2624         if (phba->intr_type == MSIX)
2625                 lpfc_disable_msix(phba);
2626         else if (phba->intr_type == MSI)
2627                 lpfc_disable_msi(phba);
2628         else if (phba->intr_type == INTx)
2629                 free_irq(phba->pcidev->irq, phba);
2630
2631         /* Reset interrupt management states */
2632         phba->intr_type = NONE;
2633         phba->sli.slistat.sli_intr = 0;
2634
2635         return;
2636 }
2637
2638 /**
2639  * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem
2640  * @pdev: pointer to PCI device
2641  * @pid: pointer to PCI device identifier
2642  *
2643  * This routine is to be registered to the kernel's PCI subsystem. When an
2644  * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
2645  * PCI device-specific information of the device and driver to see if the
2646  * driver state that it can support this kind of device. If the match is
2647  * successful, the driver core invokes this routine. If this routine
2648  * determines it can claim the HBA, it does all the initialization that it
2649  * needs to do to handle the HBA properly.
2650  *
2651  * Return code
2652  *   0 - driver can claim the device
2653  *   negative value - driver can not claim the device
2654  **/
2655 static int __devinit
2656 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
2657 {
2658         struct lpfc_vport *vport = NULL;
2659         struct lpfc_hba   *phba;
2660         struct lpfc_sli   *psli;
2661         struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
2662         struct Scsi_Host  *shost = NULL;
2663         void *ptr;
2664         unsigned long bar0map_len, bar2map_len;
2665         int error = -ENODEV, retval;
2666         int  i, hbq_count;
2667         uint16_t iotag;
2668         uint32_t cfg_mode, intr_mode;
2669         int bars = pci_select_bars(pdev, IORESOURCE_MEM);
2670         struct lpfc_adapter_event_header adapter_event;
2671
2672         if (pci_enable_device_mem(pdev))
2673                 goto out;
2674         if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
2675                 goto out_disable_device;
2676
2677         phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
2678         if (!phba)
2679                 goto out_release_regions;
2680
2681         atomic_set(&phba->fast_event_count, 0);
2682         spin_lock_init(&phba->hbalock);
2683
2684         /* Initialize ndlp management spinlock */
2685         spin_lock_init(&phba->ndlp_lock);
2686
2687         phba->pcidev = pdev;
2688
2689         /* Assign an unused board number */
2690         if ((phba->brd_no = lpfc_get_instance()) < 0)
2691                 goto out_free_phba;
2692
2693         INIT_LIST_HEAD(&phba->port_list);
2694         init_waitqueue_head(&phba->wait_4_mlo_m_q);
2695         /*
2696          * Get all the module params for configuring this host and then
2697          * establish the host.
2698          */
2699         lpfc_get_cfgparam(phba);
2700         phba->max_vpi = LPFC_MAX_VPI;
2701
2702         /* Initialize timers used by driver */
2703         init_timer(&phba->hb_tmofunc);
2704         phba->hb_tmofunc.function = lpfc_hb_timeout;
2705         phba->hb_tmofunc.data = (unsigned long)phba;
2706
2707         psli = &phba->sli;
2708         init_timer(&psli->mbox_tmo);
2709         psli->mbox_tmo.function = lpfc_mbox_timeout;
2710         psli->mbox_tmo.data = (unsigned long) phba;
2711         init_timer(&phba->fcp_poll_timer);
2712         phba->fcp_poll_timer.function = lpfc_poll_timeout;
2713         phba->fcp_poll_timer.data = (unsigned long) phba;
2714         init_timer(&phba->fabric_block_timer);
2715         phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
2716         phba->fabric_block_timer.data = (unsigned long) phba;
2717         init_timer(&phba->eratt_poll);
2718         phba->eratt_poll.function = lpfc_poll_eratt;
2719         phba->eratt_poll.data = (unsigned long) phba;
2720
2721         pci_set_master(pdev);
2722         pci_save_state(pdev);
2723         pci_try_set_mwi(pdev);
2724
2725         if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0)
2726                 if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0)
2727                         goto out_idr_remove;
2728
2729         /*
2730          * Get the bus address of Bar0 and Bar2 and the number of bytes
2731          * required by each mapping.
2732          */
2733         phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
2734         bar0map_len        = pci_resource_len(phba->pcidev, 0);
2735
2736         phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
2737         bar2map_len        = pci_resource_len(phba->pcidev, 2);
2738
2739         /* Map HBA SLIM to a kernel virtual address. */
2740         phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
2741         if (!phba->slim_memmap_p) {
2742                 error = -ENODEV;
2743                 dev_printk(KERN_ERR, &pdev->dev,
2744                            "ioremap failed for SLIM memory.\n");
2745                 goto out_idr_remove;
2746         }
2747
2748         /* Map HBA Control Registers to a kernel virtual address. */
2749         phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
2750         if (!phba->ctrl_regs_memmap_p) {
2751                 error = -ENODEV;
2752                 dev_printk(KERN_ERR, &pdev->dev,
2753                            "ioremap failed for HBA control registers.\n");
2754                 goto out_iounmap_slim;
2755         }
2756
2757         /* Allocate memory for SLI-2 structures */
2758         phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev,
2759                                                SLI2_SLIM_SIZE,
2760                                                &phba->slim2p.phys,
2761                                                GFP_KERNEL);
2762         if (!phba->slim2p.virt)
2763                 goto out_iounmap;
2764
2765         memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
2766         phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
2767         phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
2768         phba->IOCBs = (phba->slim2p.virt +
2769                        offsetof(struct lpfc_sli2_slim, IOCBs));
2770
2771         phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
2772                                                  lpfc_sli_hbq_size(),
2773                                                  &phba->hbqslimp.phys,
2774                                                  GFP_KERNEL);
2775         if (!phba->hbqslimp.virt)
2776                 goto out_free_slim;
2777
2778         hbq_count = lpfc_sli_hbq_count();
2779         ptr = phba->hbqslimp.virt;
2780         for (i = 0; i < hbq_count; ++i) {
2781                 phba->hbqs[i].hbq_virt = ptr;
2782                 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
2783                 ptr += (lpfc_hbq_defs[i]->entry_count *
2784                         sizeof(struct lpfc_hbq_entry));
2785         }
2786         phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
2787         phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer  = lpfc_els_hbq_free;
2788
2789         memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
2790
2791         INIT_LIST_HEAD(&phba->hbqbuf_in_list);
2792
2793         /* Initialize the SLI Layer to run with lpfc HBAs. */
2794         lpfc_sli_setup(phba);
2795         lpfc_sli_queue_setup(phba);
2796
2797         retval = lpfc_mem_alloc(phba);
2798         if (retval) {
2799                 error = retval;
2800                 goto out_free_hbqslimp;
2801         }
2802
2803         /* Initialize and populate the iocb list per host.  */
2804         INIT_LIST_HEAD(&phba->lpfc_iocb_list);
2805         for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
2806                 iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
2807                 if (iocbq_entry == NULL) {
2808                         printk(KERN_ERR "%s: only allocated %d iocbs of "
2809                                 "expected %d count. Unloading driver.\n",
2810                                 __func__, i, LPFC_IOCB_LIST_CNT);
2811                         error = -ENOMEM;
2812                         goto out_free_iocbq;
2813                 }
2814
2815                 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
2816                 if (iotag == 0) {
2817                         kfree (iocbq_entry);
2818                         printk(KERN_ERR "%s: failed to allocate IOTAG. "
2819                                "Unloading driver.\n",
2820                                 __func__);
2821                         error = -ENOMEM;
2822                         goto out_free_iocbq;
2823                 }
2824
2825                 spin_lock_irq(&phba->hbalock);
2826                 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
2827                 phba->total_iocbq_bufs++;
2828                 spin_unlock_irq(&phba->hbalock);
2829         }
2830
2831         /* Initialize HBA structure */
2832         phba->fc_edtov = FF_DEF_EDTOV;
2833         phba->fc_ratov = FF_DEF_RATOV;
2834         phba->fc_altov = FF_DEF_ALTOV;
2835         phba->fc_arbtov = FF_DEF_ARBTOV;
2836
2837         INIT_LIST_HEAD(&phba->work_list);
2838         phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
2839         phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
2840
2841         /* Initialize the wait queue head for the kernel thread */
2842         init_waitqueue_head(&phba->work_waitq);
2843
2844         /* Startup the kernel thread for this host adapter. */
2845         phba->worker_thread = kthread_run(lpfc_do_work, phba,
2846                                        "lpfc_worker_%d", phba->brd_no);
2847         if (IS_ERR(phba->worker_thread)) {
2848                 error = PTR_ERR(phba->worker_thread);
2849                 goto out_free_iocbq;
2850         }
2851
2852         /* Initialize the list of scsi buffers used by driver for scsi IO. */
2853         spin_lock_init(&phba->scsi_buf_list_lock);
2854         INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
2855
2856         /* Initialize list of fabric iocbs */
2857         INIT_LIST_HEAD(&phba->fabric_iocb_list);
2858
2859         /* Initialize list to save ELS buffers */
2860         INIT_LIST_HEAD(&phba->elsbuf);
2861
2862         vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
2863         if (!vport)
2864                 goto out_kthread_stop;
2865
2866         shost = lpfc_shost_from_vport(vport);
2867         phba->pport = vport;
2868         lpfc_debugfs_initialize(vport);
2869
2870         pci_set_drvdata(pdev, shost);
2871
2872         phba->MBslimaddr = phba->slim_memmap_p;
2873         phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
2874         phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
2875         phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
2876         phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
2877
2878         /* Configure sysfs attributes */
2879         if (lpfc_alloc_sysfs_attr(vport)) {
2880                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2881                                 "1476 Failed to allocate sysfs attr\n");
2882                 error = -ENOMEM;
2883                 goto out_destroy_port;
2884         }
2885
2886         cfg_mode = phba->cfg_use_msi;
2887         while (true) {
2888                 /* Configure and enable interrupt */
2889                 intr_mode = lpfc_enable_intr(phba, cfg_mode);
2890                 if (intr_mode == LPFC_INTR_ERROR) {
2891                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2892                                         "0426 Failed to enable interrupt.\n");
2893                         goto out_free_sysfs_attr;
2894                 }
2895                 /* HBA SLI setup */
2896                 if (lpfc_sli_hba_setup(phba)) {
2897                         lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2898                                         "1477 Failed to set up hba\n");
2899                         error = -ENODEV;
2900                         goto out_remove_device;
2901                 }
2902
2903                 /* Wait 50ms for the interrupts of previous mailbox commands */
2904                 msleep(50);
2905                 /* Check active interrupts received */
2906                 if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
2907                         /* Log the current active interrupt mode */
2908                         phba->intr_mode = intr_mode;
2909                         lpfc_log_intr_mode(phba, intr_mode);
2910                         break;
2911                 } else {
2912                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2913                                         "0451 Configure interrupt mode (%d) "
2914                                         "failed active interrupt test.\n",
2915                                         intr_mode);
2916                         if (intr_mode == 0) {
2917                                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2918                                                 "0479 Failed to enable "
2919                                                 "interrupt.\n");
2920                                 error = -ENODEV;
2921                                 goto out_remove_device;
2922                         }
2923                         /* Stop HBA SLI setups */
2924                         lpfc_stop_port(phba);
2925                         /* Disable the current interrupt mode */
2926                         lpfc_disable_intr(phba);
2927                         /* Try next level of interrupt mode */
2928                         cfg_mode = --intr_mode;
2929                 }
2930         }
2931
2932         /*
2933          * hba setup may have changed the hba_queue_depth so we need to adjust
2934          * the value of can_queue.
2935          */
2936         shost->can_queue = phba->cfg_hba_queue_depth - 10;
2937         if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
2938
2939                 if (lpfc_prot_mask && lpfc_prot_guard) {
2940                         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2941                                         "1478 Registering BlockGuard with the "
2942                                         "SCSI layer\n");
2943
2944                         scsi_host_set_prot(shost, lpfc_prot_mask);
2945                         scsi_host_set_guard(shost, lpfc_prot_guard);
2946                 }
2947         }
2948
2949         if (!_dump_buf_data) {
2950                 int pagecnt = 10;
2951                 while (pagecnt) {
2952                         spin_lock_init(&_dump_buf_lock);
2953                         _dump_buf_data =
2954                                 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2955                         if (_dump_buf_data) {
2956                                 printk(KERN_ERR "BLKGRD allocated %d pages for "
2957                                                 "_dump_buf_data at 0x%p\n",
2958                                                 (1 << pagecnt), _dump_buf_data);
2959                                 _dump_buf_data_order = pagecnt;
2960                                 memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
2961                                                            << pagecnt));
2962                                 break;
2963                         } else {
2964                                 --pagecnt;
2965                         }
2966
2967                 }
2968
2969                 if (!_dump_buf_data_order)
2970                         printk(KERN_ERR "BLKGRD ERROR unable to allocate "
2971                                         "memory for hexdump\n");
2972
2973         } else {
2974                 printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
2975                        "\n", _dump_buf_data);
2976         }
2977
2978
2979         if (!_dump_buf_dif) {
2980                 int pagecnt = 10;
2981                 while (pagecnt) {
2982                         _dump_buf_dif =
2983                                 (char *) __get_free_pages(GFP_KERNEL, pagecnt);
2984                         if (_dump_buf_dif) {
2985                                 printk(KERN_ERR "BLKGRD allocated %d pages for "
2986                                                 "_dump_buf_dif at 0x%p\n",
2987                                                 (1 << pagecnt), _dump_buf_dif);
2988                                 _dump_buf_dif_order = pagecnt;
2989                                 memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
2990                                                           << pagecnt));
2991                                 break;
2992                         } else {
2993                                 --pagecnt;
2994                         }
2995
2996                 }
2997
2998                 if (!_dump_buf_dif_order)
2999                         printk(KERN_ERR "BLKGRD ERROR unable to allocate "
3000                                         "memory for hexdump\n");
3001
3002         } else {
3003                 printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
3004                                 _dump_buf_dif);
3005         }
3006
3007         lpfc_host_attrib_init(shost);
3008
3009         if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3010                 spin_lock_irq(shost->host_lock);
3011                 lpfc_poll_start_timer(phba);
3012                 spin_unlock_irq(shost->host_lock);
3013         }
3014
3015         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3016                         "0428 Perform SCSI scan\n");
3017         /* Send board arrival event to upper layer */
3018         adapter_event.event_type = FC_REG_ADAPTER_EVENT;
3019         adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
3020         fc_host_post_vendor_event(shost, fc_get_event_number(),
3021                 sizeof(adapter_event),
3022                 (char *) &adapter_event,
3023                 LPFC_NL_VENDOR_ID);
3024
3025         return 0;
3026
3027 out_remove_device:
3028         spin_lock_irq(shost->host_lock);
3029         vport->load_flag |= FC_UNLOADING;
3030         spin_unlock_irq(shost->host_lock);
3031         lpfc_stop_phba_timers(phba);
3032         phba->pport->work_port_events = 0;
3033         lpfc_disable_intr(phba);
3034         lpfc_sli_hba_down(phba);
3035         lpfc_sli_brdrestart(phba);
3036 out_free_sysfs_attr:
3037         lpfc_free_sysfs_attr(vport);
3038 out_destroy_port:
3039         destroy_port(vport);
3040 out_kthread_stop:
3041         kthread_stop(phba->worker_thread);
3042 out_free_iocbq:
3043         list_for_each_entry_safe(iocbq_entry, iocbq_next,
3044                                                 &phba->lpfc_iocb_list, list) {
3045                 kfree(iocbq_entry);
3046                 phba->total_iocbq_bufs--;
3047         }
3048         lpfc_mem_free(phba);
3049 out_free_hbqslimp:
3050         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3051                           phba->hbqslimp.virt, phba->hbqslimp.phys);
3052 out_free_slim:
3053         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3054                           phba->slim2p.virt, phba->slim2p.phys);
3055 out_iounmap:
3056         iounmap(phba->ctrl_regs_memmap_p);
3057 out_iounmap_slim:
3058         iounmap(phba->slim_memmap_p);
3059 out_idr_remove:
3060         idr_remove(&lpfc_hba_index, phba->brd_no);
3061 out_free_phba:
3062         kfree(phba);
3063 out_release_regions:
3064         pci_release_selected_regions(pdev, bars);
3065 out_disable_device:
3066         pci_disable_device(pdev);
3067 out:
3068         pci_set_drvdata(pdev, NULL);
3069         if (shost)
3070                 scsi_host_put(shost);
3071         return error;
3072 }
3073
3074 /**
3075  * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem
3076  * @pdev: pointer to PCI device
3077  *
3078  * This routine is to be registered to the kernel's PCI subsystem. When an
3079  * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup
3080  * for the HBA device to be removed from the PCI subsystem properly.
3081  **/
3082 static void __devexit
3083 lpfc_pci_remove_one(struct pci_dev *pdev)
3084 {
3085         struct Scsi_Host  *shost = pci_get_drvdata(pdev);
3086         struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
3087         struct lpfc_vport **vports;
3088         struct lpfc_hba   *phba = vport->phba;
3089         int i;
3090         int bars = pci_select_bars(pdev, IORESOURCE_MEM);
3091
3092         spin_lock_irq(&phba->hbalock);
3093         vport->load_flag |= FC_UNLOADING;
3094         spin_unlock_irq(&phba->hbalock);
3095
3096         lpfc_free_sysfs_attr(vport);
3097
3098         /* Release all the vports against this physical port */
3099         vports = lpfc_create_vport_work_array(phba);
3100         if (vports != NULL)
3101                 for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++)
3102                         fc_vport_terminate(vports[i]->fc_vport);
3103         lpfc_destroy_vport_work_array(phba, vports);
3104
3105         /* Remove FC host and then SCSI host with the physical port */
3106         fc_remove_host(shost);
3107         scsi_remove_host(shost);
3108         lpfc_cleanup(vport);
3109
3110         /*
3111          * Bring down the SLI Layer. This step disable all interrupts,
3112          * clears the rings, discards all mailbox commands, and resets
3113          * the HBA.
3114          */
3115
3116         /* HBA interrupt will be diabled after this call */
3117         lpfc_sli_hba_down(phba);
3118         /* Stop kthread signal shall trigger work_done one more time */
3119         kthread_stop(phba->worker_thread);
3120         /* Final cleanup of txcmplq and reset the HBA */
3121         lpfc_sli_brdrestart(phba);
3122
3123         lpfc_stop_phba_timers(phba);
3124         spin_lock_irq(&phba->hbalock);
3125         list_del_init(&vport->listentry);
3126         spin_unlock_irq(&phba->hbalock);
3127
3128         lpfc_debugfs_terminate(vport);
3129
3130         /* Disable interrupt */
3131         lpfc_disable_intr(phba);
3132
3133         pci_set_drvdata(pdev, NULL);
3134         scsi_host_put(shost);
3135
3136         /*
3137          * Call scsi_free before mem_free since scsi bufs are released to their
3138          * corresponding pools here.
3139          */
3140         lpfc_scsi_free(phba);
3141         lpfc_mem_free(phba);
3142
3143         dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
3144                           phba->hbqslimp.virt, phba->hbqslimp.phys);
3145
3146         /* Free resources associated with SLI2 interface */
3147         dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
3148                           phba->slim2p.virt, phba->slim2p.phys);
3149
3150         /* unmap adapter SLIM and Control Registers */
3151         iounmap(phba->ctrl_regs_memmap_p);
3152         iounmap(phba->slim_memmap_p);
3153
3154         idr_remove(&lpfc_hba_index, phba->brd_no);
3155
3156         kfree(phba);
3157
3158         pci_release_selected_regions(pdev, bars);
3159         pci_disable_device(pdev);
3160 }
3161
3162 /**
3163  * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management
3164  * @pdev: pointer to PCI device
3165  * @msg: power management message
3166  *
3167  * This routine is to be registered to the kernel's PCI subsystem to support
3168  * system Power Management (PM). When PM invokes this method, it quiesces the
3169  * device by stopping the driver's worker thread for the device, turning off
3170  * device's interrupt and DMA, and bring the device offline. Note that as the
3171  * driver implements the minimum PM requirements to a power-aware driver's PM
3172  * support for suspend/resume -- all the possible PM messages (SUSPEND,
3173  * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND
3174  * and the driver will fully reinitialize its device during resume() method
3175  * call, the driver will set device to PCI_D3hot state in PCI config space
3176  * instead of setting it according to the @msg provided by the PM.
3177  *
3178  * Return code
3179  *   0 - driver suspended the device
3180  *   Error otherwise
3181  **/
3182 static int
3183 lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
3184 {
3185         struct Scsi_Host *shost = pci_get_drvdata(pdev);
3186         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3187
3188         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3189                         "0473 PCI device Power Management suspend.\n");
3190
3191         /* Bring down the device */
3192         lpfc_offline_prep(phba);
3193         lpfc_offline(phba);
3194         kthread_stop(phba->worker_thread);
3195
3196         /* Disable interrupt from device */
3197         lpfc_disable_intr(phba);
3198
3199         /* Save device state to PCI config space */
3200         pci_save_state(pdev);
3201         pci_set_power_state(pdev, PCI_D3hot);
3202
3203         return 0;
3204 }
3205
3206 /**
3207  * lpfc_pci_resume_one - lpfc PCI func to resume device for power management
3208  * @pdev: pointer to PCI device
3209  *
3210  * This routine is to be registered to the kernel's PCI subsystem to support
3211  * system Power Management (PM). When PM invokes this method, it restores
3212  * the device's PCI config space state and fully reinitializes the device
3213  * and brings it online. Note that as the driver implements the minimum PM
3214  * requirements to a power-aware driver's PM for suspend/resume -- all
3215  * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
3216  * method call will be treated as SUSPEND and the driver will fully
3217  * reinitialize its device during resume() method call, the device will be
3218  * set to PCI_D0 directly in PCI config space before restoring the state.
3219  *
3220  * Return code
3221  *   0 - driver suspended the device
3222  *   Error otherwise
3223  **/
3224 static int
3225 lpfc_pci_resume_one(struct pci_dev *pdev)
3226 {
3227         struct Scsi_Host *shost = pci_get_drvdata(pdev);
3228         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3229         uint32_t intr_mode;
3230         int error;
3231
3232         lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3233                         "0452 PCI device Power Management resume.\n");
3234
3235         /* Restore device state from PCI config space */
3236         pci_set_power_state(pdev, PCI_D0);
3237         pci_restore_state(pdev);
3238         if (pdev->is_busmaster)
3239                 pci_set_master(pdev);
3240
3241         /* Startup the kernel thread for this host adapter. */
3242         phba->worker_thread = kthread_run(lpfc_do_work, phba,
3243                                         "lpfc_worker_%d", phba->brd_no);
3244         if (IS_ERR(phba->worker_thread)) {
3245                 error = PTR_ERR(phba->worker_thread);
3246                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3247                                 "0434 PM resume failed to start worker "
3248                                 "thread: error=x%x.\n", error);
3249                 return error;
3250         }
3251
3252         /* Configure and enable interrupt */
3253         intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
3254         if (intr_mode == LPFC_INTR_ERROR) {
3255                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3256                                 "0430 PM resume Failed to enable interrupt\n");
3257                 return -EIO;
3258         } else
3259                 phba->intr_mode = intr_mode;
3260
3261         /* Restart HBA and bring it online */
3262         lpfc_sli_brdrestart(phba);
3263         lpfc_online(phba);
3264
3265         /* Log the current active interrupt mode */
3266         lpfc_log_intr_mode(phba, phba->intr_mode);
3267
3268         return 0;
3269 }
3270
3271 /**
3272  * lpfc_io_error_detected - Driver method for handling PCI I/O error detected
3273  * @pdev: pointer to PCI device.
3274  * @state: the current PCI connection state.
3275  *
3276  * This routine is registered to the PCI subsystem for error handling. This
3277  * function is called by the PCI subsystem after a PCI bus error affecting
3278  * this device has been detected. When this function is invoked, it will
3279  * need to stop all the I/Os and interrupt(s) to the device. Once that is
3280  * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to
3281  * perform proper recovery as desired.
3282  *
3283  * Return codes
3284  *   PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
3285  *   PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3286  **/
3287 static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
3288                                 pci_channel_state_t state)
3289 {
3290         struct Scsi_Host *shost = pci_get_drvdata(pdev);
3291         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3292         struct lpfc_sli *psli = &phba->sli;
3293         struct lpfc_sli_ring  *pring;
3294
3295         if (state == pci_channel_io_perm_failure) {
3296                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3297                                 "0472 PCI channel I/O permanent failure\n");
3298                 /* Block all SCSI devices' I/Os on the host */
3299                 lpfc_scsi_dev_block(phba);
3300                 /* Clean up all driver's outstanding SCSI I/Os */
3301                 lpfc_sli_flush_fcp_rings(phba);
3302                 return PCI_ERS_RESULT_DISCONNECT;
3303         }
3304
3305         pci_disable_device(pdev);
3306         /*
3307          * There may be I/Os dropped by the firmware.
3308          * Error iocb (I/O) on txcmplq and let the SCSI layer
3309          * retry it after re-establishing link.
3310          */
3311         pring = &psli->ring[psli->fcp_ring];
3312         lpfc_sli_abort_iocb_ring(phba, pring);
3313
3314         /* Disable interrupt */
3315         lpfc_disable_intr(phba);
3316
3317         /* Request a slot reset. */
3318         return PCI_ERS_RESULT_NEED_RESET;
3319 }
3320
3321 /**
3322  * lpfc_io_slot_reset - Restart a PCI device from scratch
3323  * @pdev: pointer to PCI device.
3324  *
3325  * This routine is registered to the PCI subsystem for error handling. This is
3326  * called after PCI bus has been reset to restart the PCI card from scratch,
3327  * as if from a cold-boot. During the PCI subsystem error recovery, after the
3328  * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform
3329  * proper error recovery and then call this routine before calling the .resume
3330  * method to recover the device. This function will initialize the HBA device,
3331  * enable the interrupt, but it will just put the HBA to offline state without
3332  * passing any I/O traffic.
3333  *
3334  * Return codes
3335  *   PCI_ERS_RESULT_RECOVERED - the device has been recovered
3336  *   PCI_ERS_RESULT_DISCONNECT - device could not be recovered
3337  */
3338 static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
3339 {
3340         struct Scsi_Host *shost = pci_get_drvdata(pdev);
3341         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3342         struct lpfc_sli *psli = &phba->sli;
3343         uint32_t intr_mode;
3344
3345         dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
3346         if (pci_enable_device_mem(pdev)) {
3347                 printk(KERN_ERR "lpfc: Cannot re-enable "
3348                         "PCI device after reset.\n");
3349                 return PCI_ERS_RESULT_DISCONNECT;
3350         }
3351
3352         pci_restore_state(pdev);
3353         if (pdev->is_busmaster)
3354                 pci_set_master(pdev);
3355
3356         spin_lock_irq(&phba->hbalock);
3357         psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
3358         spin_unlock_irq(&phba->hbalock);
3359
3360         /* Configure and enable interrupt */
3361         intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
3362         if (intr_mode == LPFC_INTR_ERROR) {
3363                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3364                                 "0427 Cannot re-enable interrupt after "
3365                                 "slot reset.\n");
3366                 return PCI_ERS_RESULT_DISCONNECT;
3367         } else
3368                 phba->intr_mode = intr_mode;
3369
3370         /* Take device offline; this will perform cleanup */
3371         lpfc_offline(phba);
3372         lpfc_sli_brdrestart(phba);
3373
3374         /* Log the current active interrupt mode */
3375         lpfc_log_intr_mode(phba, phba->intr_mode);
3376
3377         return PCI_ERS_RESULT_RECOVERED;
3378 }
3379
3380 /**
3381  * lpfc_io_resume - Resume PCI I/O operation
3382  * @pdev: pointer to PCI device
3383  *
3384  * This routine is registered to the PCI subsystem for error handling. It is
3385  * called when kernel error recovery tells the lpfc driver that it is ok to
3386  * resume normal PCI operation after PCI bus error recovery. After this call,
3387  * traffic can start to flow from this device again.
3388  */
3389 static void lpfc_io_resume(struct pci_dev *pdev)
3390 {
3391         struct Scsi_Host *shost = pci_get_drvdata(pdev);
3392         struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
3393
3394         lpfc_online(phba);
3395 }
3396
3397 static struct pci_device_id lpfc_id_table[] = {
3398         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_VIPER,
3399                 PCI_ANY_ID, PCI_ANY_ID, },
3400         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_FIREFLY,
3401                 PCI_ANY_ID, PCI_ANY_ID, },
3402         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_THOR,
3403                 PCI_ANY_ID, PCI_ANY_ID, },
3404         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PEGASUS,
3405                 PCI_ANY_ID, PCI_ANY_ID, },
3406         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_CENTAUR,
3407                 PCI_ANY_ID, PCI_ANY_ID, },
3408         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_DRAGONFLY,
3409                 PCI_ANY_ID, PCI_ANY_ID, },
3410         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SUPERFLY,
3411                 PCI_ANY_ID, PCI_ANY_ID, },
3412         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_RFLY,
3413                 PCI_ANY_ID, PCI_ANY_ID, },
3414         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PFLY,
3415                 PCI_ANY_ID, PCI_ANY_ID, },
3416         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE,
3417                 PCI_ANY_ID, PCI_ANY_ID, },
3418         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_SCSP,
3419                 PCI_ANY_ID, PCI_ANY_ID, },
3420         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_NEPTUNE_DCSP,
3421                 PCI_ANY_ID, PCI_ANY_ID, },
3422         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS,
3423                 PCI_ANY_ID, PCI_ANY_ID, },
3424         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_SCSP,
3425                 PCI_ANY_ID, PCI_ANY_ID, },
3426         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HELIOS_DCSP,
3427                 PCI_ANY_ID, PCI_ANY_ID, },
3428         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BMID,
3429                 PCI_ANY_ID, PCI_ANY_ID, },
3430         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_BSMB,
3431                 PCI_ANY_ID, PCI_ANY_ID, },
3432         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR,
3433                 PCI_ANY_ID, PCI_ANY_ID, },
3434         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_HORNET,
3435                 PCI_ANY_ID, PCI_ANY_ID, },
3436         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_SCSP,
3437                 PCI_ANY_ID, PCI_ANY_ID, },
3438         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZEPHYR_DCSP,
3439                 PCI_ANY_ID, PCI_ANY_ID, },
3440         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZMID,
3441                 PCI_ANY_ID, PCI_ANY_ID, },
3442         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_ZSMB,
3443                 PCI_ANY_ID, PCI_ANY_ID, },
3444         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_TFLY,
3445                 PCI_ANY_ID, PCI_ANY_ID, },
3446         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP101,
3447                 PCI_ANY_ID, PCI_ANY_ID, },
3448         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP10000S,
3449                 PCI_ANY_ID, PCI_ANY_ID, },
3450         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LP11000S,
3451                 PCI_ANY_ID, PCI_ANY_ID, },
3452         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_LPE11000S,
3453                 PCI_ANY_ID, PCI_ANY_ID, },
3454         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT,
3455                 PCI_ANY_ID, PCI_ANY_ID, },
3456         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_MID,
3457                 PCI_ANY_ID, PCI_ANY_ID, },
3458         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SMB,
3459                 PCI_ANY_ID, PCI_ANY_ID, },
3460         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_DCSP,
3461                 PCI_ANY_ID, PCI_ANY_ID, },
3462         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_SCSP,
3463                 PCI_ANY_ID, PCI_ANY_ID, },
3464         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_SAT_S,
3465                 PCI_ANY_ID, PCI_ANY_ID, },
3466         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_VF,
3467                 PCI_ANY_ID, PCI_ANY_ID, },
3468         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_PF,
3469                 PCI_ANY_ID, PCI_ANY_ID, },
3470         {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
3471                 PCI_ANY_ID, PCI_ANY_ID, },
3472         { 0 }
3473 };
3474
3475 MODULE_DEVICE_TABLE(pci, lpfc_id_table);
3476
3477 static struct pci_error_handlers lpfc_err_handler = {
3478         .error_detected = lpfc_io_error_detected,
3479         .slot_reset = lpfc_io_slot_reset,
3480         .resume = lpfc_io_resume,
3481 };
3482
3483 static struct pci_driver lpfc_driver = {
3484         .name           = LPFC_DRIVER_NAME,
3485         .id_table       = lpfc_id_table,
3486         .probe          = lpfc_pci_probe_one,
3487         .remove         = __devexit_p(lpfc_pci_remove_one),
3488         .suspend        = lpfc_pci_suspend_one,
3489         .resume         = lpfc_pci_resume_one,
3490         .err_handler    = &lpfc_err_handler,
3491 };
3492
3493 /**
3494  * lpfc_init - lpfc module initialization routine
3495  *
3496  * This routine is to be invoked when the lpfc module is loaded into the
3497  * kernel. The special kernel macro module_init() is used to indicate the
3498  * role of this routine to the kernel as lpfc module entry point.
3499  *
3500  * Return codes
3501  *   0 - successful
3502  *   -ENOMEM - FC attach transport failed
3503  *   all others - failed
3504  */
3505 static int __init
3506 lpfc_init(void)
3507 {
3508         int error = 0;
3509
3510         printk(LPFC_MODULE_DESC "\n");
3511         printk(LPFC_COPYRIGHT "\n");
3512
3513         if (lpfc_enable_npiv) {
3514                 lpfc_transport_functions.vport_create = lpfc_vport_create;
3515                 lpfc_transport_functions.vport_delete = lpfc_vport_delete;
3516         }
3517         lpfc_transport_template =
3518                                 fc_attach_transport(&lpfc_transport_functions);
3519         if (lpfc_transport_template == NULL)
3520                 return -ENOMEM;
3521         if (lpfc_enable_npiv) {
3522                 lpfc_vport_transport_template =
3523                         fc_attach_transport(&lpfc_vport_transport_functions);
3524                 if (lpfc_vport_transport_template == NULL) {
3525                         fc_release_transport(lpfc_transport_template);
3526                         return -ENOMEM;
3527                 }
3528         }
3529         error = pci_register_driver(&lpfc_driver);
3530         if (error) {
3531                 fc_release_transport(lpfc_transport_template);
3532                 if (lpfc_enable_npiv)
3533                         fc_release_transport(lpfc_vport_transport_template);
3534         }
3535
3536         return error;
3537 }
3538
3539 /**
3540  * lpfc_exit - lpfc module removal routine
3541  *
3542  * This routine is invoked when the lpfc module is removed from the kernel.
3543  * The special kernel macro module_exit() is used to indicate the role of
3544  * this routine to the kernel as lpfc module exit point.
3545  */
3546 static void __exit
3547 lpfc_exit(void)
3548 {
3549         pci_unregister_driver(&lpfc_driver);
3550         fc_release_transport(lpfc_transport_template);
3551         if (lpfc_enable_npiv)
3552                 fc_release_transport(lpfc_vport_transport_template);
3553         if (_dump_buf_data) {
3554                 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_data "
3555                                 "at 0x%p\n",
3556                                 (1L << _dump_buf_data_order), _dump_buf_data);
3557                 free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
3558         }
3559
3560         if (_dump_buf_dif) {
3561                 printk(KERN_ERR "BLKGRD freeing %lu pages for _dump_buf_dif "
3562                                 "at 0x%p\n",
3563                                 (1L << _dump_buf_dif_order), _dump_buf_dif);
3564                 free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
3565         }
3566 }
3567
3568 module_init(lpfc_init);
3569 module_exit(lpfc_exit);
3570 MODULE_LICENSE("GPL");
3571 MODULE_DESCRIPTION(LPFC_MODULE_DESC);
3572 MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
3573 MODULE_VERSION("0:" LPFC_DRIVER_VERSION);