2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "firmware_exports.h"
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
178 val64 |= (u64) val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216 mutex_lock(&adapter->mdio_lock);
217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 mutex_unlock(&adapter->mdio_lock);
227 static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
228 int reg_addr, unsigned int val)
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
236 mutex_lock(&adapter->mdio_lock);
237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242 mutex_unlock(&adapter->mdio_lock);
246 static const struct mdio_ops mi1_mdio_ops = {
252 * Performs the address cycle for clause 45 PHYs.
253 * Must be called with the MDIO_LOCK held.
255 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 t3_write_reg(adapter, A_MI1_ADDR, addr);
262 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 * MI1 read/write operations for indirect-addressed PHYs.
271 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 int reg_addr, unsigned int *valp)
276 mutex_lock(&adapter->mdio_lock);
277 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
279 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
283 *valp = t3_read_reg(adapter, A_MI1_DATA);
285 mutex_unlock(&adapter->mdio_lock);
289 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 int reg_addr, unsigned int val)
294 mutex_lock(&adapter->mdio_lock);
295 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
297 t3_write_reg(adapter, A_MI1_DATA, val);
298 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
302 mutex_unlock(&adapter->mdio_lock);
306 static const struct mdio_ops mi1_mdio_ext_ops = {
312 * t3_mdio_change_bits - modify the value of a PHY register
313 * @phy: the PHY to operate on
314 * @mmd: the device address
315 * @reg: the register address
316 * @clear: what part of the register value to mask off
317 * @set: what part of the register value to set
319 * Changes the value of a PHY register by applying a mask to its current
320 * value and ORing the result with a new value.
322 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
328 ret = mdio_read(phy, mmd, reg, &val);
331 ret = mdio_write(phy, mmd, reg, val | set);
337 * t3_phy_reset - reset a PHY block
338 * @phy: the PHY to operate on
339 * @mmd: the device address of the PHY block to reset
340 * @wait: how long to wait for the reset to complete in 1ms increments
342 * Resets a PHY block and optionally waits for the reset to complete.
343 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
346 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
351 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
356 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
362 } while (ctl && --wait);
368 * t3_phy_advertise - set the PHY advertisement registers for autoneg
369 * @phy: the PHY to operate on
370 * @advert: bitmap of capabilities the PHY should advertise
372 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
373 * requested capabilities.
375 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
378 unsigned int val = 0;
380 err = mdio_read(phy, 0, MII_CTRL1000, &val);
384 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 if (advert & ADVERTISED_1000baseT_Half)
386 val |= ADVERTISE_1000HALF;
387 if (advert & ADVERTISED_1000baseT_Full)
388 val |= ADVERTISE_1000FULL;
390 err = mdio_write(phy, 0, MII_CTRL1000, val);
395 if (advert & ADVERTISED_10baseT_Half)
396 val |= ADVERTISE_10HALF;
397 if (advert & ADVERTISED_10baseT_Full)
398 val |= ADVERTISE_10FULL;
399 if (advert & ADVERTISED_100baseT_Half)
400 val |= ADVERTISE_100HALF;
401 if (advert & ADVERTISED_100baseT_Full)
402 val |= ADVERTISE_100FULL;
403 if (advert & ADVERTISED_Pause)
404 val |= ADVERTISE_PAUSE_CAP;
405 if (advert & ADVERTISED_Asym_Pause)
406 val |= ADVERTISE_PAUSE_ASYM;
407 return mdio_write(phy, 0, MII_ADVERTISE, val);
411 * t3_phy_advertise_fiber - set fiber PHY advertisement register
412 * @phy: the PHY to operate on
413 * @advert: bitmap of capabilities the PHY should advertise
415 * Sets a fiber PHY's advertisement register to advertise the
416 * requested capabilities.
418 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
420 unsigned int val = 0;
422 if (advert & ADVERTISED_1000baseT_Half)
423 val |= ADVERTISE_1000XHALF;
424 if (advert & ADVERTISED_1000baseT_Full)
425 val |= ADVERTISE_1000XFULL;
426 if (advert & ADVERTISED_Pause)
427 val |= ADVERTISE_1000XPAUSE;
428 if (advert & ADVERTISED_Asym_Pause)
429 val |= ADVERTISE_1000XPSE_ASYM;
430 return mdio_write(phy, 0, MII_ADVERTISE, val);
434 * t3_set_phy_speed_duplex - force PHY speed and duplex
435 * @phy: the PHY to operate on
436 * @speed: requested PHY speed
437 * @duplex: requested PHY duplex
439 * Force a 10/100/1000 PHY's speed and duplex. This also disables
440 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
442 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
447 err = mdio_read(phy, 0, MII_BMCR, &ctl);
452 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
453 if (speed == SPEED_100)
454 ctl |= BMCR_SPEED100;
455 else if (speed == SPEED_1000)
456 ctl |= BMCR_SPEED1000;
459 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
460 if (duplex == DUPLEX_FULL)
461 ctl |= BMCR_FULLDPLX;
463 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
464 ctl |= BMCR_ANENABLE;
465 return mdio_write(phy, 0, MII_BMCR, ctl);
468 int t3_phy_lasi_intr_enable(struct cphy *phy)
470 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
473 int t3_phy_lasi_intr_disable(struct cphy *phy)
475 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
478 int t3_phy_lasi_intr_clear(struct cphy *phy)
482 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
485 int t3_phy_lasi_intr_handler(struct cphy *phy)
488 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
492 return (status & 1) ? cphy_cause_link_change : 0;
495 static const struct adapter_info t3_adap_info[] = {
497 F_GPIO2_OEN | F_GPIO4_OEN |
498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
499 &mi1_mdio_ops, "Chelsio PE9000"},
501 F_GPIO2_OEN | F_GPIO4_OEN |
502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
503 &mi1_mdio_ops, "Chelsio T302"},
505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
508 &mi1_mdio_ext_ops, "Chelsio T310"},
510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
513 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
514 &mi1_mdio_ext_ops, "Chelsio T320"},
518 * Return the adapter_info structure with a given index. Out-of-range indices
521 const struct adapter_info *t3_get_adapter_info(unsigned int id)
523 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
526 struct port_type_info {
527 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
528 int phy_addr, const struct mdio_ops *ops);
531 static const struct port_type_info port_types[] = {
533 { t3_ael1002_phy_prep },
534 { t3_vsc8211_phy_prep },
536 { t3_xaui_direct_phy_prep },
537 { t3_ael2005_phy_prep },
538 { t3_qt2045_phy_prep },
539 { t3_ael1006_phy_prep },
543 #define VPD_ENTRY(name, len) \
544 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
547 * Partial EEPROM Vital Product Data structure. Includes only the ID and
556 VPD_ENTRY(pn, 16); /* part number */
557 VPD_ENTRY(ec, 16); /* EC level */
558 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
559 VPD_ENTRY(na, 12); /* MAC address base */
560 VPD_ENTRY(cclk, 6); /* core clock */
561 VPD_ENTRY(mclk, 6); /* mem clock */
562 VPD_ENTRY(uclk, 6); /* uP clk */
563 VPD_ENTRY(mdc, 6); /* MDIO clk */
564 VPD_ENTRY(mt, 2); /* mem timing */
565 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
566 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
567 VPD_ENTRY(port0, 2); /* PHY0 complex */
568 VPD_ENTRY(port1, 2); /* PHY1 complex */
569 VPD_ENTRY(port2, 2); /* PHY2 complex */
570 VPD_ENTRY(port3, 2); /* PHY3 complex */
571 VPD_ENTRY(rv, 1); /* csum */
572 u32 pad; /* for multiple-of-4 sizing and alignment */
575 #define EEPROM_MAX_POLL 40
576 #define EEPROM_STAT_ADDR 0x4000
577 #define VPD_BASE 0xc00
580 * t3_seeprom_read - read a VPD EEPROM location
581 * @adapter: adapter to read
582 * @addr: EEPROM address
583 * @data: where to store the read data
585 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
586 * VPD ROM capability. A zero is written to the flag bit when the
587 * addres is written to the control register. The hardware device will
588 * set the flag to 1 when 4 bytes have been read into the data register.
590 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
593 int attempts = EEPROM_MAX_POLL;
595 unsigned int base = adapter->params.pci.vpd_cap_addr;
597 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
600 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
603 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
604 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
606 if (!(val & PCI_VPD_ADDR_F)) {
607 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
610 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
611 *data = cpu_to_le32(v);
616 * t3_seeprom_write - write a VPD EEPROM location
617 * @adapter: adapter to write
618 * @addr: EEPROM address
619 * @data: value to write
621 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
622 * VPD ROM capability.
624 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
627 int attempts = EEPROM_MAX_POLL;
628 unsigned int base = adapter->params.pci.vpd_cap_addr;
630 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
633 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
635 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
636 addr | PCI_VPD_ADDR_F);
639 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
640 } while ((val & PCI_VPD_ADDR_F) && --attempts);
642 if (val & PCI_VPD_ADDR_F) {
643 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
650 * t3_seeprom_wp - enable/disable EEPROM write protection
651 * @adapter: the adapter
652 * @enable: 1 to enable write protection, 0 to disable it
654 * Enables or disables write protection on the serial EEPROM.
656 int t3_seeprom_wp(struct adapter *adapter, int enable)
658 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
662 * Convert a character holding a hex digit to a number.
664 static unsigned int hex2int(unsigned char c)
666 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
670 * get_vpd_params - read VPD parameters from VPD EEPROM
671 * @adapter: adapter to read
672 * @p: where to store the parameters
674 * Reads card parameters stored in VPD EEPROM.
676 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
682 * Card information is normally at VPD_BASE but some early cards had
685 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
688 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
690 for (i = 0; i < sizeof(vpd); i += 4) {
691 ret = t3_seeprom_read(adapter, addr + i,
692 (__le32 *)((u8 *)&vpd + i));
697 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
698 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
699 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
700 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
701 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
702 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
704 /* Old eeproms didn't have port information */
705 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
706 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
707 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
709 p->port_type[0] = hex2int(vpd.port0_data[0]);
710 p->port_type[1] = hex2int(vpd.port1_data[0]);
711 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
712 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
715 for (i = 0; i < 6; i++)
716 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
717 hex2int(vpd.na_data[2 * i + 1]);
721 /* serial flash and firmware constants */
723 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
724 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
725 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
727 /* flash command opcodes */
728 SF_PROG_PAGE = 2, /* program page */
729 SF_WR_DISABLE = 4, /* disable writes */
730 SF_RD_STATUS = 5, /* read status register */
731 SF_WR_ENABLE = 6, /* enable writes */
732 SF_RD_DATA_FAST = 0xb, /* read flash */
733 SF_ERASE_SECTOR = 0xd8, /* erase sector */
735 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
736 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
737 FW_MIN_SIZE = 8 /* at least version and csum */
741 * sf1_read - read data from the serial flash
742 * @adapter: the adapter
743 * @byte_cnt: number of bytes to read
744 * @cont: whether another operation will be chained
745 * @valp: where to store the read data
747 * Reads up to 4 bytes of data from the serial flash. The location of
748 * the read needs to be specified prior to calling this by issuing the
749 * appropriate commands to the serial flash.
751 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
756 if (!byte_cnt || byte_cnt > 4)
758 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
760 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
761 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
763 *valp = t3_read_reg(adapter, A_SF_DATA);
768 * sf1_write - write data to the serial flash
769 * @adapter: the adapter
770 * @byte_cnt: number of bytes to write
771 * @cont: whether another operation will be chained
772 * @val: value to write
774 * Writes up to 4 bytes of data to the serial flash. The location of
775 * the write needs to be specified prior to calling this by issuing the
776 * appropriate commands to the serial flash.
778 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
781 if (!byte_cnt || byte_cnt > 4)
783 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
785 t3_write_reg(adapter, A_SF_DATA, val);
786 t3_write_reg(adapter, A_SF_OP,
787 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
788 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
792 * flash_wait_op - wait for a flash operation to complete
793 * @adapter: the adapter
794 * @attempts: max number of polls of the status register
795 * @delay: delay between polls in ms
797 * Wait for a flash operation to complete by polling the status register.
799 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
805 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
806 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
818 * t3_read_flash - read words from serial flash
819 * @adapter: the adapter
820 * @addr: the start address for the read
821 * @nwords: how many 32-bit words to read
822 * @data: where to store the read data
823 * @byte_oriented: whether to store data as bytes or as words
825 * Read the specified number of 32-bit words from the serial flash.
826 * If @byte_oriented is set the read data is stored as a byte array
827 * (i.e., big-endian), otherwise as 32-bit words in the platform's
830 int t3_read_flash(struct adapter *adapter, unsigned int addr,
831 unsigned int nwords, u32 *data, int byte_oriented)
835 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
838 addr = swab32(addr) | SF_RD_DATA_FAST;
840 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
841 (ret = sf1_read(adapter, 1, 1, data)) != 0)
844 for (; nwords; nwords--, data++) {
845 ret = sf1_read(adapter, 4, nwords > 1, data);
849 *data = htonl(*data);
855 * t3_write_flash - write up to a page of data to the serial flash
856 * @adapter: the adapter
857 * @addr: the start address to write
858 * @n: length of data to write
859 * @data: the data to write
861 * Writes up to a page of data (256 bytes) to the serial flash starting
862 * at the given address.
864 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
865 unsigned int n, const u8 *data)
869 unsigned int i, c, left, val, offset = addr & 0xff;
871 if (addr + n > SF_SIZE || offset + n > 256)
874 val = swab32(addr) | SF_PROG_PAGE;
876 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
877 (ret = sf1_write(adapter, 4, 1, val)) != 0)
880 for (left = n; left; left -= c) {
882 for (val = 0, i = 0; i < c; ++i)
883 val = (val << 8) + *data++;
885 ret = sf1_write(adapter, c, c != left, val);
889 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
892 /* Read the page to verify the write succeeded */
893 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
897 if (memcmp(data - n, (u8 *) buf + offset, n))
903 * t3_get_tp_version - read the tp sram version
904 * @adapter: the adapter
905 * @vers: where to place the version
907 * Reads the protocol sram version from sram.
909 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
913 /* Get version loaded in SRAM */
914 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
915 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
920 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
926 * t3_check_tpsram_version - read the tp sram version
927 * @adapter: the adapter
929 * Reads the protocol sram version from flash.
931 int t3_check_tpsram_version(struct adapter *adapter)
935 unsigned int major, minor;
937 if (adapter->params.rev == T3_REV_A)
941 ret = t3_get_tp_version(adapter, &vers);
945 major = G_TP_VERSION_MAJOR(vers);
946 minor = G_TP_VERSION_MINOR(vers);
948 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
951 CH_ERR(adapter, "found wrong TP version (%u.%u), "
952 "driver compiled for version %d.%d\n", major, minor,
953 TP_VERSION_MAJOR, TP_VERSION_MINOR);
959 * t3_check_tpsram - check if provided protocol SRAM
960 * is compatible with this driver
961 * @adapter: the adapter
962 * @tp_sram: the firmware image to write
965 * Checks if an adapter's tp sram is compatible with the driver.
966 * Returns 0 if the versions are compatible, a negative error otherwise.
968 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
973 const __be32 *p = (const __be32 *)tp_sram;
975 /* Verify checksum */
976 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
978 if (csum != 0xffffffff) {
979 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
987 enum fw_version_type {
993 * t3_get_fw_version - read the firmware version
994 * @adapter: the adapter
995 * @vers: where to place the version
997 * Reads the FW version from flash.
999 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1001 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1005 * t3_check_fw_version - check if the FW is compatible with this driver
1006 * @adapter: the adapter
1008 * Checks if an adapter's FW is compatible with the driver. Returns 0
1009 * if the versions are compatible, a negative error otherwise.
1011 int t3_check_fw_version(struct adapter *adapter)
1015 unsigned int type, major, minor;
1017 ret = t3_get_fw_version(adapter, &vers);
1021 type = G_FW_VERSION_TYPE(vers);
1022 major = G_FW_VERSION_MAJOR(vers);
1023 minor = G_FW_VERSION_MINOR(vers);
1025 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1026 minor == FW_VERSION_MINOR)
1028 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1029 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1030 "driver compiled for version %u.%u\n", major, minor,
1031 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1033 CH_WARN(adapter, "found newer FW version(%u.%u), "
1034 "driver compiled for version %u.%u\n", major, minor,
1035 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1042 * t3_flash_erase_sectors - erase a range of flash sectors
1043 * @adapter: the adapter
1044 * @start: the first sector to erase
1045 * @end: the last sector to erase
1047 * Erases the sectors in the given range.
1049 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1051 while (start <= end) {
1054 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1055 (ret = sf1_write(adapter, 4, 0,
1056 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1057 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1065 * t3_load_fw - download firmware
1066 * @adapter: the adapter
1067 * @fw_data: the firmware image to write
1070 * Write the supplied firmware image to the card's serial flash.
1071 * The FW image has the following sections: @size - 8 bytes of code and
1072 * data, followed by 4 bytes of FW version, followed by the 32-bit
1073 * 1's complement checksum of the whole image.
1075 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1079 const __be32 *p = (const __be32 *)fw_data;
1080 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1082 if ((size & 3) || size < FW_MIN_SIZE)
1084 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1087 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1088 csum += ntohl(p[i]);
1089 if (csum != 0xffffffff) {
1090 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1095 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1099 size -= 8; /* trim off version and checksum */
1100 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1101 unsigned int chunk_size = min(size, 256U);
1103 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1108 fw_data += chunk_size;
1112 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1115 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1119 #define CIM_CTL_BASE 0x2000
1122 * t3_cim_ctl_blk_read - read a block from CIM control region
1124 * @adap: the adapter
1125 * @addr: the start address within the CIM control region
1126 * @n: number of words to read
1127 * @valp: where to store the result
1129 * Reads a block of 4-byte words from the CIM control region.
1131 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1132 unsigned int n, unsigned int *valp)
1136 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1139 for ( ; !ret && n--; addr += 4) {
1140 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1141 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1144 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1151 * t3_link_changed - handle interface link changes
1152 * @adapter: the adapter
1153 * @port_id: the port index that changed link state
1155 * Called when a port's link settings change to propagate the new values
1156 * to the associated PHY and MAC. After performing the common tasks it
1157 * invokes an OS-specific handler.
1159 void t3_link_changed(struct adapter *adapter, int port_id)
1161 int link_ok, speed, duplex, fc;
1162 struct port_info *pi = adap2pinfo(adapter, port_id);
1163 struct cphy *phy = &pi->phy;
1164 struct cmac *mac = &pi->mac;
1165 struct link_config *lc = &pi->link_config;
1167 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1169 if (lc->requested_fc & PAUSE_AUTONEG)
1170 fc &= lc->requested_fc;
1172 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1174 if (link_ok == lc->link_ok && speed == lc->speed &&
1175 duplex == lc->duplex && fc == lc->fc)
1176 return; /* nothing changed */
1178 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1179 uses_xaui(adapter)) {
1182 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1183 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1185 lc->link_ok = link_ok;
1186 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1187 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1189 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1190 /* Set MAC speed, duplex, and flow control to match PHY. */
1191 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1195 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1199 * t3_link_start - apply link configuration to MAC/PHY
1200 * @phy: the PHY to setup
1201 * @mac: the MAC to setup
1202 * @lc: the requested link configuration
1204 * Set up a port's MAC and PHY according to a desired link configuration.
1205 * - If the PHY can auto-negotiate first decide what to advertise, then
1206 * enable/disable auto-negotiation as desired, and reset.
1207 * - If the PHY does not auto-negotiate just reset it.
1208 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1209 * otherwise do it later based on the outcome of auto-negotiation.
1211 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1213 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1216 if (lc->supported & SUPPORTED_Autoneg) {
1217 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1219 lc->advertising |= ADVERTISED_Asym_Pause;
1221 lc->advertising |= ADVERTISED_Pause;
1223 phy->ops->advertise(phy, lc->advertising);
1225 if (lc->autoneg == AUTONEG_DISABLE) {
1226 lc->speed = lc->requested_speed;
1227 lc->duplex = lc->requested_duplex;
1228 lc->fc = (unsigned char)fc;
1229 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1231 /* Also disables autoneg */
1232 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1234 phy->ops->autoneg_enable(phy);
1236 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1237 lc->fc = (unsigned char)fc;
1238 phy->ops->reset(phy, 0);
1244 * t3_set_vlan_accel - control HW VLAN extraction
1245 * @adapter: the adapter
1246 * @ports: bitmap of adapter ports to operate on
1247 * @on: enable (1) or disable (0) HW VLAN extraction
1249 * Enables or disables HW extraction of VLAN tags for the given port.
1251 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1253 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1254 ports << S_VLANEXTRACTIONENABLE,
1255 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1259 unsigned int mask; /* bits to check in interrupt status */
1260 const char *msg; /* message to print or NULL */
1261 short stat_idx; /* stat counter to increment or -1 */
1262 unsigned short fatal; /* whether the condition reported is fatal */
1266 * t3_handle_intr_status - table driven interrupt handler
1267 * @adapter: the adapter that generated the interrupt
1268 * @reg: the interrupt status register to process
1269 * @mask: a mask to apply to the interrupt status
1270 * @acts: table of interrupt actions
1271 * @stats: statistics counters tracking interrupt occurences
1273 * A table driven interrupt handler that applies a set of masks to an
1274 * interrupt status word and performs the corresponding actions if the
1275 * interrupts described by the mask have occured. The actions include
1276 * optionally printing a warning or alert message, and optionally
1277 * incrementing a stat counter. The table is terminated by an entry
1278 * specifying mask 0. Returns the number of fatal interrupt conditions.
1280 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1282 const struct intr_info *acts,
1283 unsigned long *stats)
1286 unsigned int status = t3_read_reg(adapter, reg) & mask;
1288 for (; acts->mask; ++acts) {
1289 if (!(status & acts->mask))
1293 CH_ALERT(adapter, "%s (0x%x)\n",
1294 acts->msg, status & acts->mask);
1295 } else if (acts->msg)
1296 CH_WARN(adapter, "%s (0x%x)\n",
1297 acts->msg, status & acts->mask);
1298 if (acts->stat_idx >= 0)
1299 stats[acts->stat_idx]++;
1301 if (status) /* clear processed interrupts */
1302 t3_write_reg(adapter, reg, status);
1306 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1307 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1308 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1309 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1310 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1311 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1313 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1314 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1316 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1317 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1318 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1319 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1320 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1321 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1322 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1323 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1324 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1325 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1326 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1327 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1328 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1329 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1330 F_TXPARERR | V_BISTERR(M_BISTERR))
1331 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1332 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1333 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1334 #define ULPTX_INTR_MASK 0xfc
1335 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1336 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1337 F_ZERO_SWITCH_ERROR)
1338 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1339 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1340 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1341 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1342 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1343 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1344 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1345 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1346 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1347 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1348 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1349 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1350 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1351 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1352 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1353 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1354 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1355 V_MCAPARERRENB(M_MCAPARERRENB))
1356 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1357 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1358 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1359 F_MPS0 | F_CPL_SWITCH)
1362 * Interrupt handler for the PCIX1 module.
1364 static void pci_intr_handler(struct adapter *adapter)
1366 static const struct intr_info pcix1_intr_info[] = {
1367 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1368 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1369 {F_RCVTARABT, "PCI received target abort", -1, 1},
1370 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1371 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1372 {F_DETPARERR, "PCI detected parity error", -1, 1},
1373 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1374 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1375 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1377 {F_DETCORECCERR, "PCI correctable ECC error",
1378 STAT_PCI_CORR_ECC, 0},
1379 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1380 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1381 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1383 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1385 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1387 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1392 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1393 pcix1_intr_info, adapter->irq_stats))
1394 t3_fatal_err(adapter);
1398 * Interrupt handler for the PCIE module.
1400 static void pcie_intr_handler(struct adapter *adapter)
1402 static const struct intr_info pcie_intr_info[] = {
1403 {F_PEXERR, "PCI PEX error", -1, 1},
1405 "PCI unexpected split completion DMA read error", -1, 1},
1407 "PCI unexpected split completion DMA command error", -1, 1},
1408 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1409 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1410 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1411 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1412 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1413 "PCI MSI-X table/PBA parity error", -1, 1},
1414 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1415 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1416 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1417 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1418 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1422 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1423 CH_ALERT(adapter, "PEX error code 0x%x\n",
1424 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1426 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1427 pcie_intr_info, adapter->irq_stats))
1428 t3_fatal_err(adapter);
1432 * TP interrupt handler.
1434 static void tp_intr_handler(struct adapter *adapter)
1436 static const struct intr_info tp_intr_info[] = {
1437 {0xffffff, "TP parity error", -1, 1},
1438 {0x1000000, "TP out of Rx pages", -1, 1},
1439 {0x2000000, "TP out of Tx pages", -1, 1},
1443 static struct intr_info tp_intr_info_t3c[] = {
1444 {0x1fffffff, "TP parity error", -1, 1},
1445 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1446 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1450 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1451 adapter->params.rev < T3_REV_C ?
1452 tp_intr_info : tp_intr_info_t3c, NULL))
1453 t3_fatal_err(adapter);
1457 * CIM interrupt handler.
1459 static void cim_intr_handler(struct adapter *adapter)
1461 static const struct intr_info cim_intr_info[] = {
1462 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1463 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1464 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1465 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1466 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1467 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1468 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1469 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1470 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1471 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1472 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1473 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1474 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1475 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1476 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1477 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1478 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1479 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1480 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1481 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1482 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1483 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1484 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1485 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1489 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1490 cim_intr_info, NULL))
1491 t3_fatal_err(adapter);
1495 * ULP RX interrupt handler.
1497 static void ulprx_intr_handler(struct adapter *adapter)
1499 static const struct intr_info ulprx_intr_info[] = {
1500 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1501 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1502 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1503 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1504 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1505 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1506 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1507 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1511 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1512 ulprx_intr_info, NULL))
1513 t3_fatal_err(adapter);
1517 * ULP TX interrupt handler.
1519 static void ulptx_intr_handler(struct adapter *adapter)
1521 static const struct intr_info ulptx_intr_info[] = {
1522 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1523 STAT_ULP_CH0_PBL_OOB, 0},
1524 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1525 STAT_ULP_CH1_PBL_OOB, 0},
1526 {0xfc, "ULP TX parity error", -1, 1},
1530 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1531 ulptx_intr_info, adapter->irq_stats))
1532 t3_fatal_err(adapter);
1535 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1536 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1537 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1538 F_ICSPI1_TX_FRAMING_ERROR)
1539 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1540 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1541 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1542 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1545 * PM TX interrupt handler.
1547 static void pmtx_intr_handler(struct adapter *adapter)
1549 static const struct intr_info pmtx_intr_info[] = {
1550 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1551 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1552 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1553 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1554 "PMTX ispi parity error", -1, 1},
1555 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1556 "PMTX ospi parity error", -1, 1},
1560 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1561 pmtx_intr_info, NULL))
1562 t3_fatal_err(adapter);
1565 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1566 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1567 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1568 F_IESPI1_TX_FRAMING_ERROR)
1569 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1570 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1571 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1572 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1575 * PM RX interrupt handler.
1577 static void pmrx_intr_handler(struct adapter *adapter)
1579 static const struct intr_info pmrx_intr_info[] = {
1580 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1581 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1582 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1583 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1584 "PMRX ispi parity error", -1, 1},
1585 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1586 "PMRX ospi parity error", -1, 1},
1590 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1591 pmrx_intr_info, NULL))
1592 t3_fatal_err(adapter);
1596 * CPL switch interrupt handler.
1598 static void cplsw_intr_handler(struct adapter *adapter)
1600 static const struct intr_info cplsw_intr_info[] = {
1601 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1602 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1603 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1604 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1605 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1606 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1610 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1611 cplsw_intr_info, NULL))
1612 t3_fatal_err(adapter);
1616 * MPS interrupt handler.
1618 static void mps_intr_handler(struct adapter *adapter)
1620 static const struct intr_info mps_intr_info[] = {
1621 {0x1ff, "MPS parity error", -1, 1},
1625 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1626 mps_intr_info, NULL))
1627 t3_fatal_err(adapter);
1630 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1633 * MC7 interrupt handler.
1635 static void mc7_intr_handler(struct mc7 *mc7)
1637 struct adapter *adapter = mc7->adapter;
1638 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1641 mc7->stats.corr_err++;
1642 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1643 "data 0x%x 0x%x 0x%x\n", mc7->name,
1644 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1645 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1646 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1647 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1651 mc7->stats.uncorr_err++;
1652 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1653 "data 0x%x 0x%x 0x%x\n", mc7->name,
1654 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1655 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1656 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1657 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1661 mc7->stats.parity_err++;
1662 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1663 mc7->name, G_PE(cause));
1669 if (adapter->params.rev > 0)
1670 addr = t3_read_reg(adapter,
1671 mc7->offset + A_MC7_ERR_ADDR);
1672 mc7->stats.addr_err++;
1673 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1677 if (cause & MC7_INTR_FATAL)
1678 t3_fatal_err(adapter);
1680 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1683 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1684 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1686 * XGMAC interrupt handler.
1688 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1690 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1691 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1693 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1694 mac->stats.tx_fifo_parity_err++;
1695 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1697 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1698 mac->stats.rx_fifo_parity_err++;
1699 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1701 if (cause & F_TXFIFO_UNDERRUN)
1702 mac->stats.tx_fifo_urun++;
1703 if (cause & F_RXFIFO_OVERFLOW)
1704 mac->stats.rx_fifo_ovfl++;
1705 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1706 mac->stats.serdes_signal_loss++;
1707 if (cause & F_XAUIPCSCTCERR)
1708 mac->stats.xaui_pcs_ctc_err++;
1709 if (cause & F_XAUIPCSALIGNCHANGE)
1710 mac->stats.xaui_pcs_align_change++;
1712 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1713 if (cause & XGM_INTR_FATAL)
1719 * Interrupt handler for PHY events.
1721 int t3_phy_intr_handler(struct adapter *adapter)
1723 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1725 for_each_port(adapter, i) {
1726 struct port_info *p = adap2pinfo(adapter, i);
1728 if (!(p->phy.caps & SUPPORTED_IRQ))
1731 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1732 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1734 if (phy_cause & cphy_cause_link_change)
1735 t3_link_changed(adapter, i);
1736 if (phy_cause & cphy_cause_fifo_error)
1737 p->phy.fifo_errors++;
1738 if (phy_cause & cphy_cause_module_change)
1739 t3_os_phymod_changed(adapter, i);
1743 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1748 * T3 slow path (non-data) interrupt handler.
1750 int t3_slow_intr_handler(struct adapter *adapter)
1752 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1754 cause &= adapter->slow_intr_mask;
1757 if (cause & F_PCIM0) {
1758 if (is_pcie(adapter))
1759 pcie_intr_handler(adapter);
1761 pci_intr_handler(adapter);
1764 t3_sge_err_intr_handler(adapter);
1765 if (cause & F_MC7_PMRX)
1766 mc7_intr_handler(&adapter->pmrx);
1767 if (cause & F_MC7_PMTX)
1768 mc7_intr_handler(&adapter->pmtx);
1769 if (cause & F_MC7_CM)
1770 mc7_intr_handler(&adapter->cm);
1772 cim_intr_handler(adapter);
1774 tp_intr_handler(adapter);
1775 if (cause & F_ULP2_RX)
1776 ulprx_intr_handler(adapter);
1777 if (cause & F_ULP2_TX)
1778 ulptx_intr_handler(adapter);
1779 if (cause & F_PM1_RX)
1780 pmrx_intr_handler(adapter);
1781 if (cause & F_PM1_TX)
1782 pmtx_intr_handler(adapter);
1783 if (cause & F_CPL_SWITCH)
1784 cplsw_intr_handler(adapter);
1786 mps_intr_handler(adapter);
1788 t3_mc5_intr_handler(&adapter->mc5);
1789 if (cause & F_XGMAC0_0)
1790 mac_intr_handler(adapter, 0);
1791 if (cause & F_XGMAC0_1)
1792 mac_intr_handler(adapter, 1);
1793 if (cause & F_T3DBG)
1794 t3_os_ext_intr_handler(adapter);
1796 /* Clear the interrupts just processed. */
1797 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1798 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1802 static unsigned int calc_gpio_intr(struct adapter *adap)
1804 unsigned int i, gpi_intr = 0;
1806 for_each_port(adap, i)
1807 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1808 adapter_info(adap)->gpio_intr[i])
1809 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1814 * t3_intr_enable - enable interrupts
1815 * @adapter: the adapter whose interrupts should be enabled
1817 * Enable interrupts by setting the interrupt enable registers of the
1818 * various HW modules and then enabling the top-level interrupt
1821 void t3_intr_enable(struct adapter *adapter)
1823 static const struct addr_val_pair intr_en_avp[] = {
1824 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1825 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1826 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1828 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1830 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1831 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1832 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1833 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1834 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1835 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1838 adapter->slow_intr_mask = PL_INTR_MASK;
1840 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1841 t3_write_reg(adapter, A_TP_INT_ENABLE,
1842 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1844 if (adapter->params.rev > 0) {
1845 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1846 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1847 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1848 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1849 F_PBL_BOUND_ERR_CH1);
1851 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1852 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1855 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1857 if (is_pcie(adapter))
1858 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1860 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1861 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1862 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1866 * t3_intr_disable - disable a card's interrupts
1867 * @adapter: the adapter whose interrupts should be disabled
1869 * Disable interrupts. We only disable the top-level interrupt
1870 * concentrator and the SGE data interrupts.
1872 void t3_intr_disable(struct adapter *adapter)
1874 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1875 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1876 adapter->slow_intr_mask = 0;
1880 * t3_intr_clear - clear all interrupts
1881 * @adapter: the adapter whose interrupts should be cleared
1883 * Clears all interrupts.
1885 void t3_intr_clear(struct adapter *adapter)
1887 static const unsigned int cause_reg_addr[] = {
1889 A_SG_RSPQ_FL_STATUS,
1892 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1893 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1894 A_CIM_HOST_INT_CAUSE,
1907 /* Clear PHY and MAC interrupts for each port. */
1908 for_each_port(adapter, i)
1909 t3_port_intr_clear(adapter, i);
1911 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1912 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1914 if (is_pcie(adapter))
1915 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
1916 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1917 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1921 * t3_port_intr_enable - enable port-specific interrupts
1922 * @adapter: associated adapter
1923 * @idx: index of port whose interrupts should be enabled
1925 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1928 void t3_port_intr_enable(struct adapter *adapter, int idx)
1930 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1932 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1933 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1934 phy->ops->intr_enable(phy);
1938 * t3_port_intr_disable - disable port-specific interrupts
1939 * @adapter: associated adapter
1940 * @idx: index of port whose interrupts should be disabled
1942 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1945 void t3_port_intr_disable(struct adapter *adapter, int idx)
1947 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1949 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1950 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1951 phy->ops->intr_disable(phy);
1955 * t3_port_intr_clear - clear port-specific interrupts
1956 * @adapter: associated adapter
1957 * @idx: index of port whose interrupts to clear
1959 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1962 void t3_port_intr_clear(struct adapter *adapter, int idx)
1964 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1966 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1967 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1968 phy->ops->intr_clear(phy);
1971 #define SG_CONTEXT_CMD_ATTEMPTS 100
1974 * t3_sge_write_context - write an SGE context
1975 * @adapter: the adapter
1976 * @id: the context id
1977 * @type: the context type
1979 * Program an SGE context with the values already loaded in the
1980 * CONTEXT_DATA? registers.
1982 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1985 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1986 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1987 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1988 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1989 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1990 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1991 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1992 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
1995 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
1998 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
1999 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2000 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2001 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2002 return t3_sge_write_context(adap, id, type);
2006 * t3_sge_init_ecntxt - initialize an SGE egress context
2007 * @adapter: the adapter to configure
2008 * @id: the context id
2009 * @gts_enable: whether to enable GTS for the context
2010 * @type: the egress context type
2011 * @respq: associated response queue
2012 * @base_addr: base address of queue
2013 * @size: number of queue entries
2015 * @gen: initial generation value for the context
2016 * @cidx: consumer pointer
2018 * Initialize an SGE egress context and make it ready for use. If the
2019 * platform allows concurrent context operations, the caller is
2020 * responsible for appropriate locking.
2022 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2023 enum sge_context_type type, int respq, u64 base_addr,
2024 unsigned int size, unsigned int token, int gen,
2027 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2029 if (base_addr & 0xfff) /* must be 4K aligned */
2031 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2035 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2036 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2037 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2038 V_EC_BASE_LO(base_addr & 0xffff));
2040 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2042 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2043 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2044 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2046 return t3_sge_write_context(adapter, id, F_EGRESS);
2050 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2051 * @adapter: the adapter to configure
2052 * @id: the context id
2053 * @gts_enable: whether to enable GTS for the context
2054 * @base_addr: base address of queue
2055 * @size: number of queue entries
2056 * @bsize: size of each buffer for this queue
2057 * @cong_thres: threshold to signal congestion to upstream producers
2058 * @gen: initial generation value for the context
2059 * @cidx: consumer pointer
2061 * Initialize an SGE free list context and make it ready for use. The
2062 * caller is responsible for ensuring only one context operation occurs
2065 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2066 int gts_enable, u64 base_addr, unsigned int size,
2067 unsigned int bsize, unsigned int cong_thres, int gen,
2070 if (base_addr & 0xfff) /* must be 4K aligned */
2072 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2076 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2078 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2079 V_FL_BASE_HI((u32) base_addr) |
2080 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2081 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2082 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2083 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2084 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2085 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2086 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2087 return t3_sge_write_context(adapter, id, F_FREELIST);
2091 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2092 * @adapter: the adapter to configure
2093 * @id: the context id
2094 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2095 * @base_addr: base address of queue
2096 * @size: number of queue entries
2097 * @fl_thres: threshold for selecting the normal or jumbo free list
2098 * @gen: initial generation value for the context
2099 * @cidx: consumer pointer
2101 * Initialize an SGE response queue context and make it ready for use.
2102 * The caller is responsible for ensuring only one context operation
2105 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2106 int irq_vec_idx, u64 base_addr, unsigned int size,
2107 unsigned int fl_thres, int gen, unsigned int cidx)
2109 unsigned int intr = 0;
2111 if (base_addr & 0xfff) /* must be 4K aligned */
2113 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2117 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2119 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2121 if (irq_vec_idx >= 0)
2122 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2123 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2124 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2125 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2126 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2130 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2131 * @adapter: the adapter to configure
2132 * @id: the context id
2133 * @base_addr: base address of queue
2134 * @size: number of queue entries
2135 * @rspq: response queue for async notifications
2136 * @ovfl_mode: CQ overflow mode
2137 * @credits: completion queue credits
2138 * @credit_thres: the credit threshold
2140 * Initialize an SGE completion queue context and make it ready for use.
2141 * The caller is responsible for ensuring only one context operation
2144 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2145 unsigned int size, int rspq, int ovfl_mode,
2146 unsigned int credits, unsigned int credit_thres)
2148 if (base_addr & 0xfff) /* must be 4K aligned */
2150 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2154 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2155 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2157 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2158 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2159 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2160 V_CQ_ERR(ovfl_mode));
2161 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2162 V_CQ_CREDIT_THRES(credit_thres));
2163 return t3_sge_write_context(adapter, id, F_CQ);
2167 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2168 * @adapter: the adapter
2169 * @id: the egress context id
2170 * @enable: enable (1) or disable (0) the context
2172 * Enable or disable an SGE egress context. The caller is responsible for
2173 * ensuring only one context operation occurs at a time.
2175 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2177 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2180 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2181 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2182 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2183 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2184 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2185 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2186 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2187 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2188 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2192 * t3_sge_disable_fl - disable an SGE free-buffer list
2193 * @adapter: the adapter
2194 * @id: the free list context id
2196 * Disable an SGE free-buffer list. The caller is responsible for
2197 * ensuring only one context operation occurs at a time.
2199 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2201 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2204 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2205 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2206 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2207 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2208 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2209 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2210 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2211 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2212 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2216 * t3_sge_disable_rspcntxt - disable an SGE response queue
2217 * @adapter: the adapter
2218 * @id: the response queue context id
2220 * Disable an SGE response queue. The caller is responsible for
2221 * ensuring only one context operation occurs at a time.
2223 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2225 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2228 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2229 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2230 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2231 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2232 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2233 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2234 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2235 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2236 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2240 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2241 * @adapter: the adapter
2242 * @id: the completion queue context id
2244 * Disable an SGE completion queue. The caller is responsible for
2245 * ensuring only one context operation occurs at a time.
2247 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2249 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2252 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2253 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2254 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2255 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2256 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2257 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2258 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2259 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2260 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2264 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2265 * @adapter: the adapter
2266 * @id: the context id
2267 * @op: the operation to perform
2269 * Perform the selected operation on an SGE completion queue context.
2270 * The caller is responsible for ensuring only one context operation
2273 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2274 unsigned int credits)
2278 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2281 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2282 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2283 V_CONTEXT(id) | F_CQ);
2284 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2285 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2288 if (op >= 2 && op < 7) {
2289 if (adapter->params.rev > 0)
2290 return G_CQ_INDEX(val);
2292 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2293 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2294 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2295 F_CONTEXT_CMD_BUSY, 0,
2296 SG_CONTEXT_CMD_ATTEMPTS, 1))
2298 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2304 * t3_sge_read_context - read an SGE context
2305 * @type: the context type
2306 * @adapter: the adapter
2307 * @id: the context id
2308 * @data: holds the retrieved context
2310 * Read an SGE egress context. The caller is responsible for ensuring
2311 * only one context operation occurs at a time.
2313 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2314 unsigned int id, u32 data[4])
2316 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2319 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2320 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2321 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2322 SG_CONTEXT_CMD_ATTEMPTS, 1))
2324 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2325 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2326 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2327 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2332 * t3_sge_read_ecntxt - read an SGE egress context
2333 * @adapter: the adapter
2334 * @id: the context id
2335 * @data: holds the retrieved context
2337 * Read an SGE egress context. The caller is responsible for ensuring
2338 * only one context operation occurs at a time.
2340 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2344 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2348 * t3_sge_read_cq - read an SGE CQ context
2349 * @adapter: the adapter
2350 * @id: the context id
2351 * @data: holds the retrieved context
2353 * Read an SGE CQ context. The caller is responsible for ensuring
2354 * only one context operation occurs at a time.
2356 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2360 return t3_sge_read_context(F_CQ, adapter, id, data);
2364 * t3_sge_read_fl - read an SGE free-list context
2365 * @adapter: the adapter
2366 * @id: the context id
2367 * @data: holds the retrieved context
2369 * Read an SGE free-list context. The caller is responsible for ensuring
2370 * only one context operation occurs at a time.
2372 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2374 if (id >= SGE_QSETS * 2)
2376 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2380 * t3_sge_read_rspq - read an SGE response queue context
2381 * @adapter: the adapter
2382 * @id: the context id
2383 * @data: holds the retrieved context
2385 * Read an SGE response queue context. The caller is responsible for
2386 * ensuring only one context operation occurs at a time.
2388 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2390 if (id >= SGE_QSETS)
2392 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2396 * t3_config_rss - configure Rx packet steering
2397 * @adapter: the adapter
2398 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2399 * @cpus: values for the CPU lookup table (0xff terminated)
2400 * @rspq: values for the response queue lookup table (0xffff terminated)
2402 * Programs the receive packet steering logic. @cpus and @rspq provide
2403 * the values for the CPU and response queue lookup tables. If they
2404 * provide fewer values than the size of the tables the supplied values
2405 * are used repeatedly until the tables are fully populated.
2407 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2408 const u8 * cpus, const u16 *rspq)
2410 int i, j, cpu_idx = 0, q_idx = 0;
2413 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2416 for (j = 0; j < 2; ++j) {
2417 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2418 if (cpus[cpu_idx] == 0xff)
2421 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2425 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2426 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2427 (i << 16) | rspq[q_idx++]);
2428 if (rspq[q_idx] == 0xffff)
2432 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2436 * t3_read_rss - read the contents of the RSS tables
2437 * @adapter: the adapter
2438 * @lkup: holds the contents of the RSS lookup table
2439 * @map: holds the contents of the RSS map table
2441 * Reads the contents of the receive packet steering tables.
2443 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2449 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2450 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2452 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2453 if (!(val & 0x80000000))
2456 *lkup++ = (val >> 8);
2460 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2461 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2463 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2464 if (!(val & 0x80000000))
2472 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2473 * @adap: the adapter
2474 * @enable: 1 to select offload mode, 0 for regular NIC
2476 * Switches TP to NIC/offload mode.
2478 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2480 if (is_offload(adap) || !enable)
2481 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2482 V_NICMODE(!enable));
2486 * pm_num_pages - calculate the number of pages of the payload memory
2487 * @mem_size: the size of the payload memory
2488 * @pg_size: the size of each payload memory page
2490 * Calculate the number of pages, each of the given size, that fit in a
2491 * memory of the specified size, respecting the HW requirement that the
2492 * number of pages must be a multiple of 24.
2494 static inline unsigned int pm_num_pages(unsigned int mem_size,
2495 unsigned int pg_size)
2497 unsigned int n = mem_size / pg_size;
2502 #define mem_region(adap, start, size, reg) \
2503 t3_write_reg((adap), A_ ## reg, (start)); \
2507 * partition_mem - partition memory and configure TP memory settings
2508 * @adap: the adapter
2509 * @p: the TP parameters
2511 * Partitions context and payload memory and configures TP's memory
2514 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2516 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2517 unsigned int timers = 0, timers_shift = 22;
2519 if (adap->params.rev > 0) {
2520 if (tids <= 16 * 1024) {
2523 } else if (tids <= 64 * 1024) {
2526 } else if (tids <= 256 * 1024) {
2532 t3_write_reg(adap, A_TP_PMM_SIZE,
2533 p->chan_rx_size | (p->chan_tx_size >> 16));
2535 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2536 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2537 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2538 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2539 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2541 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2542 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2543 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2545 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2546 /* Add a bit of headroom and make multiple of 24 */
2548 pstructs -= pstructs % 24;
2549 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2551 m = tids * TCB_SIZE;
2552 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2553 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2554 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2555 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2556 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2557 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2558 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2559 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2561 m = (m + 4095) & ~0xfff;
2562 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2563 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2565 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2566 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2567 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2569 adap->params.mc5.nservers += m - tids;
2572 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2575 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2576 t3_write_reg(adap, A_TP_PIO_DATA, val);
2579 static void tp_config(struct adapter *adap, const struct tp_params *p)
2581 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2582 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2583 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2584 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2585 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2586 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2587 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2588 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2589 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2590 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2591 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2592 F_IPV6ENABLE | F_NICMODE);
2593 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2594 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2595 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2596 adap->params.rev > 0 ? F_ENABLEESND :
2599 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2601 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2602 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2603 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2604 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2605 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2606 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2607 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2609 if (adap->params.rev > 0) {
2610 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2611 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2613 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2614 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2616 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2618 if (adap->params.rev == T3_REV_C)
2619 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2620 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2621 V_TABLELATENCYDELTA(4));
2623 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2624 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2625 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2626 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2629 /* Desired TP timer resolution in usec */
2630 #define TP_TMR_RES 50
2632 /* TCP timer values in ms */
2633 #define TP_DACK_TIMER 50
2634 #define TP_RTO_MIN 250
2637 * tp_set_timers - set TP timing parameters
2638 * @adap: the adapter to set
2639 * @core_clk: the core clock frequency in Hz
2641 * Set TP's timing parameters, such as the various timer resolutions and
2642 * the TCP timer values.
2644 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2646 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2647 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2648 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2649 unsigned int tps = core_clk >> tre;
2651 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2652 V_DELAYEDACKRESOLUTION(dack_re) |
2653 V_TIMESTAMPRESOLUTION(tstamp_re));
2654 t3_write_reg(adap, A_TP_DACK_TIMER,
2655 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2656 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2657 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2658 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2659 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2660 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2661 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2662 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2665 #define SECONDS * tps
2667 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2668 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2669 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2670 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2671 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2672 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2673 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2674 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2675 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2681 * t3_tp_set_coalescing_size - set receive coalescing size
2682 * @adap: the adapter
2683 * @size: the receive coalescing size
2684 * @psh: whether a set PSH bit should deliver coalesced data
2686 * Set the receive coalescing size and PSH bit handling.
2688 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2692 if (size > MAX_RX_COALESCING_LEN)
2695 val = t3_read_reg(adap, A_TP_PARA_REG3);
2696 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2699 val |= F_RXCOALESCEENABLE;
2701 val |= F_RXCOALESCEPSHEN;
2702 size = min(MAX_RX_COALESCING_LEN, size);
2703 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2704 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2706 t3_write_reg(adap, A_TP_PARA_REG3, val);
2711 * t3_tp_set_max_rxsize - set the max receive size
2712 * @adap: the adapter
2713 * @size: the max receive size
2715 * Set TP's max receive size. This is the limit that applies when
2716 * receive coalescing is disabled.
2718 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2720 t3_write_reg(adap, A_TP_PARA_REG7,
2721 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2724 static void init_mtus(unsigned short mtus[])
2727 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2728 * it can accomodate max size TCP/IP headers when SACK and timestamps
2729 * are enabled and still have at least 8 bytes of payload.
2750 * Initial congestion control parameters.
2752 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2754 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2779 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2782 b[13] = b[14] = b[15] = b[16] = 3;
2783 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2784 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2789 /* The minimum additive increment value for the congestion control table */
2790 #define CC_MIN_INCR 2U
2793 * t3_load_mtus - write the MTU and congestion control HW tables
2794 * @adap: the adapter
2795 * @mtus: the unrestricted values for the MTU table
2796 * @alphs: the values for the congestion control alpha parameter
2797 * @beta: the values for the congestion control beta parameter
2798 * @mtu_cap: the maximum permitted effective MTU
2800 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2801 * Update the high-speed congestion control table with the supplied alpha,
2804 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2805 unsigned short alpha[NCCTRL_WIN],
2806 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2808 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2809 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2810 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2811 28672, 40960, 57344, 81920, 114688, 163840, 229376
2816 for (i = 0; i < NMTUS; ++i) {
2817 unsigned int mtu = min(mtus[i], mtu_cap);
2818 unsigned int log2 = fls(mtu);
2820 if (!(mtu & ((1 << log2) >> 2))) /* round */
2822 t3_write_reg(adap, A_TP_MTU_TABLE,
2823 (i << 24) | (log2 << 16) | mtu);
2825 for (w = 0; w < NCCTRL_WIN; ++w) {
2828 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2831 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2832 (w << 16) | (beta[w] << 13) | inc);
2838 * t3_read_hw_mtus - returns the values in the HW MTU table
2839 * @adap: the adapter
2840 * @mtus: where to store the HW MTU values
2842 * Reads the HW MTU table.
2844 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2848 for (i = 0; i < NMTUS; ++i) {
2851 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2852 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2853 mtus[i] = val & 0x3fff;
2858 * t3_get_cong_cntl_tab - reads the congestion control table
2859 * @adap: the adapter
2860 * @incr: where to store the alpha values
2862 * Reads the additive increments programmed into the HW congestion
2865 void t3_get_cong_cntl_tab(struct adapter *adap,
2866 unsigned short incr[NMTUS][NCCTRL_WIN])
2868 unsigned int mtu, w;
2870 for (mtu = 0; mtu < NMTUS; ++mtu)
2871 for (w = 0; w < NCCTRL_WIN; ++w) {
2872 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2873 0xffff0000 | (mtu << 5) | w);
2874 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2880 * t3_tp_get_mib_stats - read TP's MIB counters
2881 * @adap: the adapter
2882 * @tps: holds the returned counter values
2884 * Returns the values of TP's MIB counters.
2886 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2888 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2889 sizeof(*tps) / sizeof(u32), 0);
2892 #define ulp_region(adap, name, start, len) \
2893 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2894 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2895 (start) + (len) - 1); \
2898 #define ulptx_region(adap, name, start, len) \
2899 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2900 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2901 (start) + (len) - 1)
2903 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2905 unsigned int m = p->chan_rx_size;
2907 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2908 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2909 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2910 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2911 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2912 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2913 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2914 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2918 * t3_set_proto_sram - set the contents of the protocol sram
2919 * @adapter: the adapter
2920 * @data: the protocol image
2922 * Write the contents of the protocol SRAM.
2924 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2927 const __be32 *buf = (const __be32 *)data;
2929 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2930 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2931 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2932 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2933 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2934 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2936 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2937 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2940 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2945 void t3_config_trace_filter(struct adapter *adapter,
2946 const struct trace_params *tp, int filter_index,
2947 int invert, int enable)
2949 u32 addr, key[4], mask[4];
2951 key[0] = tp->sport | (tp->sip << 16);
2952 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2954 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2956 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2957 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2958 mask[2] = tp->dip_mask;
2959 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2962 key[3] |= (1 << 29);
2964 key[3] |= (1 << 28);
2966 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2967 tp_wr_indirect(adapter, addr++, key[0]);
2968 tp_wr_indirect(adapter, addr++, mask[0]);
2969 tp_wr_indirect(adapter, addr++, key[1]);
2970 tp_wr_indirect(adapter, addr++, mask[1]);
2971 tp_wr_indirect(adapter, addr++, key[2]);
2972 tp_wr_indirect(adapter, addr++, mask[2]);
2973 tp_wr_indirect(adapter, addr++, key[3]);
2974 tp_wr_indirect(adapter, addr, mask[3]);
2975 t3_read_reg(adapter, A_TP_PIO_DATA);
2979 * t3_config_sched - configure a HW traffic scheduler
2980 * @adap: the adapter
2981 * @kbps: target rate in Kbps
2982 * @sched: the scheduler index
2984 * Configure a HW scheduler for the target rate
2986 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2988 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2989 unsigned int clk = adap->params.vpd.cclk * 1000;
2990 unsigned int selected_cpt = 0, selected_bpt = 0;
2993 kbps *= 125; /* -> bytes */
2994 for (cpt = 1; cpt <= 255; cpt++) {
2996 bpt = (kbps + tps / 2) / tps;
2997 if (bpt > 0 && bpt <= 255) {
2999 delta = v >= kbps ? v - kbps : kbps - v;
3000 if (delta <= mindelta) {
3005 } else if (selected_cpt)
3011 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3012 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3013 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3015 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3017 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3018 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3022 static int tp_init(struct adapter *adap, const struct tp_params *p)
3027 t3_set_vlan_accel(adap, 3, 0);
3029 if (is_offload(adap)) {
3030 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3031 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3032 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3035 CH_ERR(adap, "TP initialization timed out\n");
3039 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3043 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3045 if (port_mask & ~((1 << adap->params.nports) - 1))
3047 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3048 port_mask << S_PORT0ACTIVE);
3053 * Perform the bits of HW initialization that are dependent on the number
3054 * of available ports.
3056 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3061 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3062 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3063 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3064 F_PORT0ACTIVE | F_ENFORCEPKT);
3065 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
3067 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3068 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3069 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3070 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3071 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3072 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3074 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3075 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3076 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3077 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3078 for (i = 0; i < 16; i++)
3079 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3080 (i << 16) | 0x1010);
3084 static int calibrate_xgm(struct adapter *adapter)
3086 if (uses_xaui(adapter)) {
3089 for (i = 0; i < 5; ++i) {
3090 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3091 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3093 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3094 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3095 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3096 V_XAUIIMP(G_CALIMP(v) >> 2));
3100 CH_ERR(adapter, "MAC calibration failed\n");
3103 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3104 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3105 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3106 F_XGM_IMPSETUPDATE);
3111 static void calibrate_xgm_t3b(struct adapter *adapter)
3113 if (!uses_xaui(adapter)) {
3114 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3115 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3116 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3117 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3118 F_XGM_IMPSETUPDATE);
3119 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3121 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3122 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3126 struct mc7_timing_params {
3127 unsigned char ActToPreDly;
3128 unsigned char ActToRdWrDly;
3129 unsigned char PreCyc;
3130 unsigned char RefCyc[5];
3131 unsigned char BkCyc;
3132 unsigned char WrToRdDly;
3133 unsigned char RdToWrDly;
3137 * Write a value to a register and check that the write completed. These
3138 * writes normally complete in a cycle or two, so one read should suffice.
3139 * The very first read exists to flush the posted write to the device.
3141 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3143 t3_write_reg(adapter, addr, val);
3144 t3_read_reg(adapter, addr); /* flush */
3145 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3147 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3151 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3153 static const unsigned int mc7_mode[] = {
3154 0x632, 0x642, 0x652, 0x432, 0x442
3156 static const struct mc7_timing_params mc7_timings[] = {
3157 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3158 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3159 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3160 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3161 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3165 unsigned int width, density, slow, attempts;
3166 struct adapter *adapter = mc7->adapter;
3167 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3172 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3173 slow = val & F_SLOW;
3174 width = G_WIDTH(val);
3175 density = G_DEN(val);
3177 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3178 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3182 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3183 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3185 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3186 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3187 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3193 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3194 V_ACTTOPREDLY(p->ActToPreDly) |
3195 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3196 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3197 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3199 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3200 val | F_CLKEN | F_TERM150);
3201 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3204 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3209 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3210 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3211 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3212 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3216 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3217 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3221 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3222 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3223 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3224 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3225 mc7_mode[mem_type]) ||
3226 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3227 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3230 /* clock value is in KHz */
3231 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3232 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3234 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3235 F_PERREFEN | V_PREREFDIV(mc7_clock));
3236 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3238 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3239 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3240 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3241 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3242 (mc7->size << width) - 1);
3243 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3244 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3249 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3250 } while ((val & F_BUSY) && --attempts);
3252 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3256 /* Enable normal memory accesses. */
3257 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3264 static void config_pcie(struct adapter *adap)
3266 static const u16 ack_lat[4][6] = {
3267 {237, 416, 559, 1071, 2095, 4143},
3268 {128, 217, 289, 545, 1057, 2081},
3269 {73, 118, 154, 282, 538, 1050},
3270 {67, 107, 86, 150, 278, 534}
3272 static const u16 rpl_tmr[4][6] = {
3273 {711, 1248, 1677, 3213, 6285, 12429},
3274 {384, 651, 867, 1635, 3171, 6243},
3275 {219, 354, 462, 846, 1614, 3150},
3276 {201, 321, 258, 450, 834, 1602}
3280 unsigned int log2_width, pldsize;
3281 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3283 pci_read_config_word(adap->pdev,
3284 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3286 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3287 pci_read_config_word(adap->pdev,
3288 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3291 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3292 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3293 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3294 log2_width = fls(adap->params.pci.width) - 1;
3295 acklat = ack_lat[log2_width][pldsize];
3296 if (val & 1) /* check LOsEnable */
3297 acklat += fst_trn_tx * 4;
3298 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3300 if (adap->params.rev == 0)
3301 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3302 V_T3A_ACKLAT(M_T3A_ACKLAT),
3303 V_T3A_ACKLAT(acklat));
3305 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3308 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3309 V_REPLAYLMT(rpllmt));
3311 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3312 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3313 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3314 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3318 * Initialize and configure T3 HW modules. This performs the
3319 * initialization steps that need to be done once after a card is reset.
3320 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3322 * fw_params are passed to FW and their value is platform dependent. Only the
3323 * top 8 bits are available for use, the rest must be 0.
3325 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3327 int err = -EIO, attempts, i;
3328 const struct vpd_params *vpd = &adapter->params.vpd;
3330 if (adapter->params.rev > 0)
3331 calibrate_xgm_t3b(adapter);
3332 else if (calibrate_xgm(adapter))
3336 partition_mem(adapter, &adapter->params.tp);
3338 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3339 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3340 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3341 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3342 adapter->params.mc5.nfilters,
3343 adapter->params.mc5.nroutes))
3346 for (i = 0; i < 32; i++)
3347 if (clear_sge_ctxt(adapter, i, F_CQ))
3351 if (tp_init(adapter, &adapter->params.tp))
3354 t3_tp_set_coalescing_size(adapter,
3355 min(adapter->params.sge.max_pkt_size,
3356 MAX_RX_COALESCING_LEN), 1);
3357 t3_tp_set_max_rxsize(adapter,
3358 min(adapter->params.sge.max_pkt_size, 16384U));
3359 ulp_config(adapter, &adapter->params.tp);
3361 if (is_pcie(adapter))
3362 config_pcie(adapter);
3364 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3365 F_DMASTOPEN | F_CLIDECEN);
3367 if (adapter->params.rev == T3_REV_C)
3368 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3369 F_CFG_CQE_SOP_MASK);
3371 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3372 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3373 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3374 init_hw_for_avail_ports(adapter, adapter->params.nports);
3375 t3_sge_init(adapter, &adapter->params.sge);
3377 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3379 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3380 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3381 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3382 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3385 do { /* wait for uP to initialize */
3387 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3389 CH_ERR(adapter, "uP initialization timed out\n");
3399 * get_pci_mode - determine a card's PCI mode
3400 * @adapter: the adapter
3401 * @p: where to store the PCI settings
3403 * Determines a card's PCI mode and associated parameters, such as speed
3406 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3408 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3409 u32 pci_mode, pcie_cap;
3411 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3415 p->variant = PCI_VARIANT_PCIE;
3416 p->pcie_cap_addr = pcie_cap;
3417 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3419 p->width = (val >> 4) & 0x3f;
3423 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3424 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3425 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3426 pci_mode = G_PCIXINITPAT(pci_mode);
3428 p->variant = PCI_VARIANT_PCI;
3429 else if (pci_mode < 4)
3430 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3431 else if (pci_mode < 8)
3432 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3434 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3438 * init_link_config - initialize a link's SW state
3439 * @lc: structure holding the link state
3440 * @ai: information about the current card
3442 * Initializes the SW state maintained for each link, including the link's
3443 * capabilities and default speed/duplex/flow-control/autonegotiation
3446 static void init_link_config(struct link_config *lc, unsigned int caps)
3448 lc->supported = caps;
3449 lc->requested_speed = lc->speed = SPEED_INVALID;
3450 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3451 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3452 if (lc->supported & SUPPORTED_Autoneg) {
3453 lc->advertising = lc->supported;
3454 lc->autoneg = AUTONEG_ENABLE;
3455 lc->requested_fc |= PAUSE_AUTONEG;
3457 lc->advertising = 0;
3458 lc->autoneg = AUTONEG_DISABLE;
3463 * mc7_calc_size - calculate MC7 memory size
3464 * @cfg: the MC7 configuration
3466 * Calculates the size of an MC7 memory in bytes from the value of its
3467 * configuration register.
3469 static unsigned int mc7_calc_size(u32 cfg)
3471 unsigned int width = G_WIDTH(cfg);
3472 unsigned int banks = !!(cfg & F_BKS) + 1;
3473 unsigned int org = !!(cfg & F_ORG) + 1;
3474 unsigned int density = G_DEN(cfg);
3475 unsigned int MBs = ((256 << density) * banks) / (org << width);
3480 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3481 unsigned int base_addr, const char *name)
3485 mc7->adapter = adapter;
3487 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3488 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3489 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3490 mc7->width = G_WIDTH(cfg);
3493 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3495 mac->adapter = adapter;
3496 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3499 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3500 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3501 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3502 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3507 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3509 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3511 mi1_init(adapter, ai);
3512 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3513 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3514 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3515 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3516 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3517 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3519 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3522 /* Enable MAC clocks so we can access the registers */
3523 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3524 t3_read_reg(adapter, A_XGM_PORT_CFG);
3526 val |= F_CLKDIVRESET_;
3527 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3528 t3_read_reg(adapter, A_XGM_PORT_CFG);
3529 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3530 t3_read_reg(adapter, A_XGM_PORT_CFG);
3534 * Reset the adapter.
3535 * Older PCIe cards lose their config space during reset, PCI-X
3538 int t3_reset_adapter(struct adapter *adapter)
3540 int i, save_and_restore_pcie =
3541 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3544 if (save_and_restore_pcie)
3545 pci_save_state(adapter->pdev);
3546 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3549 * Delay. Give Some time to device to reset fully.
3550 * XXX The delay time should be modified.
3552 for (i = 0; i < 10; i++) {
3554 pci_read_config_word(adapter->pdev, 0x00, &devid);
3555 if (devid == 0x1425)
3559 if (devid != 0x1425)
3562 if (save_and_restore_pcie)
3563 pci_restore_state(adapter->pdev);
3567 static int init_parity(struct adapter *adap)
3571 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3574 for (err = i = 0; !err && i < 16; i++)
3575 err = clear_sge_ctxt(adap, i, F_EGRESS);
3576 for (i = 0xfff0; !err && i <= 0xffff; i++)
3577 err = clear_sge_ctxt(adap, i, F_EGRESS);
3578 for (i = 0; !err && i < SGE_QSETS; i++)
3579 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3583 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3584 for (i = 0; i < 4; i++)
3585 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3586 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3587 F_IBQDBGWR | V_IBQDBGQID(i) |
3588 V_IBQDBGADDR(addr));
3589 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3590 F_IBQDBGBUSY, 0, 2, 1);
3598 * Initialize adapter SW state for the various HW modules, set initial values
3599 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3602 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3606 unsigned int i, j = -1;
3608 get_pci_mode(adapter, &adapter->params.pci);
3610 adapter->params.info = ai;
3611 adapter->params.nports = ai->nports;
3612 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3613 adapter->params.linkpoll_period = 0;
3614 adapter->params.stats_update_period = is_10G(adapter) ?
3615 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3616 adapter->params.pci.vpd_cap_addr =
3617 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3618 ret = get_vpd_params(adapter, &adapter->params.vpd);
3622 if (reset && t3_reset_adapter(adapter))
3625 t3_sge_prep(adapter, &adapter->params.sge);
3627 if (adapter->params.vpd.mclk) {
3628 struct tp_params *p = &adapter->params.tp;
3630 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3631 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3632 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3634 p->nchan = ai->nports;
3635 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3636 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3637 p->cm_size = t3_mc7_size(&adapter->cm);
3638 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3639 p->chan_tx_size = p->pmtx_size / p->nchan;
3640 p->rx_pg_size = 64 * 1024;
3641 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3642 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3643 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3644 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3645 adapter->params.rev > 0 ? 12 : 6;
3648 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3649 t3_mc7_size(&adapter->pmtx) &&
3650 t3_mc7_size(&adapter->cm);
3652 if (is_offload(adapter)) {
3653 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3654 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3655 DEFAULT_NFILTERS : 0;
3656 adapter->params.mc5.nroutes = 0;
3657 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3659 init_mtus(adapter->params.mtus);
3660 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3663 early_hw_init(adapter, ai);
3664 ret = init_parity(adapter);
3668 for_each_port(adapter, i) {
3670 const struct port_type_info *pti;
3671 struct port_info *p = adap2pinfo(adapter, i);
3673 while (!adapter->params.vpd.port_type[++j])
3676 pti = &port_types[adapter->params.vpd.port_type[j]];
3677 if (!pti->phy_prep) {
3678 CH_ALERT(adapter, "Invalid port type index %d\n",
3679 adapter->params.vpd.port_type[j]);
3683 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3687 mac_prep(&p->mac, adapter, j);
3690 * The VPD EEPROM stores the base Ethernet address for the
3691 * card. A port's address is derived from the base by adding
3692 * the port's index to the base's low octet.
3694 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3695 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3697 memcpy(adapter->port[i]->dev_addr, hw_addr,
3699 memcpy(adapter->port[i]->perm_addr, hw_addr,
3701 init_link_config(&p->link_config, p->phy.caps);
3702 p->phy.ops->power_down(&p->phy, 1);
3703 if (!(p->phy.caps & SUPPORTED_IRQ))
3704 adapter->params.linkpoll_period = 10;
3710 void t3_led_ready(struct adapter *adapter)
3712 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3716 int t3_replay_prep_adapter(struct adapter *adapter)
3718 const struct adapter_info *ai = adapter->params.info;
3719 unsigned int i, j = -1;
3722 early_hw_init(adapter, ai);
3723 ret = init_parity(adapter);
3727 for_each_port(adapter, i) {
3728 const struct port_type_info *pti;
3729 struct port_info *p = adap2pinfo(adapter, i);
3731 while (!adapter->params.vpd.port_type[++j])
3734 pti = &port_types[adapter->params.vpd.port_type[j]];
3735 ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
3738 p->phy.ops->power_down(&p->phy, 1);