2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "firmware_exports.h"
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
178 val64 |= (u64) val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216 mutex_lock(&adapter->mdio_lock);
217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 mutex_unlock(&adapter->mdio_lock);
227 static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
228 int reg_addr, unsigned int val)
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
236 mutex_lock(&adapter->mdio_lock);
237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242 mutex_unlock(&adapter->mdio_lock);
246 static const struct mdio_ops mi1_mdio_ops = {
252 * Performs the address cycle for clause 45 PHYs.
253 * Must be called with the MDIO_LOCK held.
255 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 t3_write_reg(adapter, A_MI1_ADDR, addr);
262 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 * MI1 read/write operations for indirect-addressed PHYs.
271 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 int reg_addr, unsigned int *valp)
276 mutex_lock(&adapter->mdio_lock);
277 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
279 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
283 *valp = t3_read_reg(adapter, A_MI1_DATA);
285 mutex_unlock(&adapter->mdio_lock);
289 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 int reg_addr, unsigned int val)
294 mutex_lock(&adapter->mdio_lock);
295 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
297 t3_write_reg(adapter, A_MI1_DATA, val);
298 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
302 mutex_unlock(&adapter->mdio_lock);
306 static const struct mdio_ops mi1_mdio_ext_ops = {
312 * t3_mdio_change_bits - modify the value of a PHY register
313 * @phy: the PHY to operate on
314 * @mmd: the device address
315 * @reg: the register address
316 * @clear: what part of the register value to mask off
317 * @set: what part of the register value to set
319 * Changes the value of a PHY register by applying a mask to its current
320 * value and ORing the result with a new value.
322 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
328 ret = mdio_read(phy, mmd, reg, &val);
331 ret = mdio_write(phy, mmd, reg, val | set);
337 * t3_phy_reset - reset a PHY block
338 * @phy: the PHY to operate on
339 * @mmd: the device address of the PHY block to reset
340 * @wait: how long to wait for the reset to complete in 1ms increments
342 * Resets a PHY block and optionally waits for the reset to complete.
343 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
346 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
351 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
356 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
362 } while (ctl && --wait);
368 * t3_phy_advertise - set the PHY advertisement registers for autoneg
369 * @phy: the PHY to operate on
370 * @advert: bitmap of capabilities the PHY should advertise
372 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
373 * requested capabilities.
375 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
378 unsigned int val = 0;
380 err = mdio_read(phy, 0, MII_CTRL1000, &val);
384 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 if (advert & ADVERTISED_1000baseT_Half)
386 val |= ADVERTISE_1000HALF;
387 if (advert & ADVERTISED_1000baseT_Full)
388 val |= ADVERTISE_1000FULL;
390 err = mdio_write(phy, 0, MII_CTRL1000, val);
395 if (advert & ADVERTISED_10baseT_Half)
396 val |= ADVERTISE_10HALF;
397 if (advert & ADVERTISED_10baseT_Full)
398 val |= ADVERTISE_10FULL;
399 if (advert & ADVERTISED_100baseT_Half)
400 val |= ADVERTISE_100HALF;
401 if (advert & ADVERTISED_100baseT_Full)
402 val |= ADVERTISE_100FULL;
403 if (advert & ADVERTISED_Pause)
404 val |= ADVERTISE_PAUSE_CAP;
405 if (advert & ADVERTISED_Asym_Pause)
406 val |= ADVERTISE_PAUSE_ASYM;
407 return mdio_write(phy, 0, MII_ADVERTISE, val);
411 * t3_phy_advertise_fiber - set fiber PHY advertisement register
412 * @phy: the PHY to operate on
413 * @advert: bitmap of capabilities the PHY should advertise
415 * Sets a fiber PHY's advertisement register to advertise the
416 * requested capabilities.
418 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
420 unsigned int val = 0;
422 if (advert & ADVERTISED_1000baseT_Half)
423 val |= ADVERTISE_1000XHALF;
424 if (advert & ADVERTISED_1000baseT_Full)
425 val |= ADVERTISE_1000XFULL;
426 if (advert & ADVERTISED_Pause)
427 val |= ADVERTISE_1000XPAUSE;
428 if (advert & ADVERTISED_Asym_Pause)
429 val |= ADVERTISE_1000XPSE_ASYM;
430 return mdio_write(phy, 0, MII_ADVERTISE, val);
434 * t3_set_phy_speed_duplex - force PHY speed and duplex
435 * @phy: the PHY to operate on
436 * @speed: requested PHY speed
437 * @duplex: requested PHY duplex
439 * Force a 10/100/1000 PHY's speed and duplex. This also disables
440 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
442 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
447 err = mdio_read(phy, 0, MII_BMCR, &ctl);
452 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
453 if (speed == SPEED_100)
454 ctl |= BMCR_SPEED100;
455 else if (speed == SPEED_1000)
456 ctl |= BMCR_SPEED1000;
459 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
460 if (duplex == DUPLEX_FULL)
461 ctl |= BMCR_FULLDPLX;
463 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
464 ctl |= BMCR_ANENABLE;
465 return mdio_write(phy, 0, MII_BMCR, ctl);
468 int t3_phy_lasi_intr_enable(struct cphy *phy)
470 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
473 int t3_phy_lasi_intr_disable(struct cphy *phy)
475 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
478 int t3_phy_lasi_intr_clear(struct cphy *phy)
482 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
485 int t3_phy_lasi_intr_handler(struct cphy *phy)
488 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
492 return (status & 1) ? cphy_cause_link_change : 0;
495 static const struct adapter_info t3_adap_info[] = {
497 F_GPIO2_OEN | F_GPIO4_OEN |
498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
499 &mi1_mdio_ops, "Chelsio PE9000"},
501 F_GPIO2_OEN | F_GPIO4_OEN |
502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
503 &mi1_mdio_ops, "Chelsio T302"},
505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
508 &mi1_mdio_ext_ops, "Chelsio T310"},
510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
513 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
514 &mi1_mdio_ext_ops, "Chelsio T320"},
518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 &mi1_mdio_ext_ops, "Chelsio T310" },
525 * Return the adapter_info structure with a given index. Out-of-range indices
528 const struct adapter_info *t3_get_adapter_info(unsigned int id)
530 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
533 struct port_type_info {
534 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
535 int phy_addr, const struct mdio_ops *ops);
538 static const struct port_type_info port_types[] = {
540 { t3_ael1002_phy_prep },
541 { t3_vsc8211_phy_prep },
543 { t3_xaui_direct_phy_prep },
544 { t3_ael2005_phy_prep },
545 { t3_qt2045_phy_prep },
546 { t3_ael1006_phy_prep },
550 #define VPD_ENTRY(name, len) \
551 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
554 * Partial EEPROM Vital Product Data structure. Includes only the ID and
563 VPD_ENTRY(pn, 16); /* part number */
564 VPD_ENTRY(ec, 16); /* EC level */
565 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
566 VPD_ENTRY(na, 12); /* MAC address base */
567 VPD_ENTRY(cclk, 6); /* core clock */
568 VPD_ENTRY(mclk, 6); /* mem clock */
569 VPD_ENTRY(uclk, 6); /* uP clk */
570 VPD_ENTRY(mdc, 6); /* MDIO clk */
571 VPD_ENTRY(mt, 2); /* mem timing */
572 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
573 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
574 VPD_ENTRY(port0, 2); /* PHY0 complex */
575 VPD_ENTRY(port1, 2); /* PHY1 complex */
576 VPD_ENTRY(port2, 2); /* PHY2 complex */
577 VPD_ENTRY(port3, 2); /* PHY3 complex */
578 VPD_ENTRY(rv, 1); /* csum */
579 u32 pad; /* for multiple-of-4 sizing and alignment */
582 #define EEPROM_MAX_POLL 40
583 #define EEPROM_STAT_ADDR 0x4000
584 #define VPD_BASE 0xc00
587 * t3_seeprom_read - read a VPD EEPROM location
588 * @adapter: adapter to read
589 * @addr: EEPROM address
590 * @data: where to store the read data
592 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
593 * VPD ROM capability. A zero is written to the flag bit when the
594 * addres is written to the control register. The hardware device will
595 * set the flag to 1 when 4 bytes have been read into the data register.
597 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
600 int attempts = EEPROM_MAX_POLL;
602 unsigned int base = adapter->params.pci.vpd_cap_addr;
604 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
607 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
610 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
611 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
613 if (!(val & PCI_VPD_ADDR_F)) {
614 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
617 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
618 *data = cpu_to_le32(v);
623 * t3_seeprom_write - write a VPD EEPROM location
624 * @adapter: adapter to write
625 * @addr: EEPROM address
626 * @data: value to write
628 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
629 * VPD ROM capability.
631 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
634 int attempts = EEPROM_MAX_POLL;
635 unsigned int base = adapter->params.pci.vpd_cap_addr;
637 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
640 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
642 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
643 addr | PCI_VPD_ADDR_F);
646 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
647 } while ((val & PCI_VPD_ADDR_F) && --attempts);
649 if (val & PCI_VPD_ADDR_F) {
650 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
657 * t3_seeprom_wp - enable/disable EEPROM write protection
658 * @adapter: the adapter
659 * @enable: 1 to enable write protection, 0 to disable it
661 * Enables or disables write protection on the serial EEPROM.
663 int t3_seeprom_wp(struct adapter *adapter, int enable)
665 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
669 * Convert a character holding a hex digit to a number.
671 static unsigned int hex2int(unsigned char c)
673 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
677 * get_vpd_params - read VPD parameters from VPD EEPROM
678 * @adapter: adapter to read
679 * @p: where to store the parameters
681 * Reads card parameters stored in VPD EEPROM.
683 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
689 * Card information is normally at VPD_BASE but some early cards had
692 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
695 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
697 for (i = 0; i < sizeof(vpd); i += 4) {
698 ret = t3_seeprom_read(adapter, addr + i,
699 (__le32 *)((u8 *)&vpd + i));
704 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
705 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
706 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
707 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
708 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
709 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
711 /* Old eeproms didn't have port information */
712 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
713 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
714 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
716 p->port_type[0] = hex2int(vpd.port0_data[0]);
717 p->port_type[1] = hex2int(vpd.port1_data[0]);
718 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
719 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
722 for (i = 0; i < 6; i++)
723 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
724 hex2int(vpd.na_data[2 * i + 1]);
728 /* serial flash and firmware constants */
730 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
731 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
732 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
734 /* flash command opcodes */
735 SF_PROG_PAGE = 2, /* program page */
736 SF_WR_DISABLE = 4, /* disable writes */
737 SF_RD_STATUS = 5, /* read status register */
738 SF_WR_ENABLE = 6, /* enable writes */
739 SF_RD_DATA_FAST = 0xb, /* read flash */
740 SF_ERASE_SECTOR = 0xd8, /* erase sector */
742 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
743 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
744 FW_MIN_SIZE = 8 /* at least version and csum */
748 * sf1_read - read data from the serial flash
749 * @adapter: the adapter
750 * @byte_cnt: number of bytes to read
751 * @cont: whether another operation will be chained
752 * @valp: where to store the read data
754 * Reads up to 4 bytes of data from the serial flash. The location of
755 * the read needs to be specified prior to calling this by issuing the
756 * appropriate commands to the serial flash.
758 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
763 if (!byte_cnt || byte_cnt > 4)
765 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
767 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
768 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
770 *valp = t3_read_reg(adapter, A_SF_DATA);
775 * sf1_write - write data to the serial flash
776 * @adapter: the adapter
777 * @byte_cnt: number of bytes to write
778 * @cont: whether another operation will be chained
779 * @val: value to write
781 * Writes up to 4 bytes of data to the serial flash. The location of
782 * the write needs to be specified prior to calling this by issuing the
783 * appropriate commands to the serial flash.
785 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
788 if (!byte_cnt || byte_cnt > 4)
790 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
792 t3_write_reg(adapter, A_SF_DATA, val);
793 t3_write_reg(adapter, A_SF_OP,
794 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
795 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
799 * flash_wait_op - wait for a flash operation to complete
800 * @adapter: the adapter
801 * @attempts: max number of polls of the status register
802 * @delay: delay between polls in ms
804 * Wait for a flash operation to complete by polling the status register.
806 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
812 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
813 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
825 * t3_read_flash - read words from serial flash
826 * @adapter: the adapter
827 * @addr: the start address for the read
828 * @nwords: how many 32-bit words to read
829 * @data: where to store the read data
830 * @byte_oriented: whether to store data as bytes or as words
832 * Read the specified number of 32-bit words from the serial flash.
833 * If @byte_oriented is set the read data is stored as a byte array
834 * (i.e., big-endian), otherwise as 32-bit words in the platform's
837 int t3_read_flash(struct adapter *adapter, unsigned int addr,
838 unsigned int nwords, u32 *data, int byte_oriented)
842 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
845 addr = swab32(addr) | SF_RD_DATA_FAST;
847 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
848 (ret = sf1_read(adapter, 1, 1, data)) != 0)
851 for (; nwords; nwords--, data++) {
852 ret = sf1_read(adapter, 4, nwords > 1, data);
856 *data = htonl(*data);
862 * t3_write_flash - write up to a page of data to the serial flash
863 * @adapter: the adapter
864 * @addr: the start address to write
865 * @n: length of data to write
866 * @data: the data to write
868 * Writes up to a page of data (256 bytes) to the serial flash starting
869 * at the given address.
871 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
872 unsigned int n, const u8 *data)
876 unsigned int i, c, left, val, offset = addr & 0xff;
878 if (addr + n > SF_SIZE || offset + n > 256)
881 val = swab32(addr) | SF_PROG_PAGE;
883 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
884 (ret = sf1_write(adapter, 4, 1, val)) != 0)
887 for (left = n; left; left -= c) {
889 for (val = 0, i = 0; i < c; ++i)
890 val = (val << 8) + *data++;
892 ret = sf1_write(adapter, c, c != left, val);
896 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
899 /* Read the page to verify the write succeeded */
900 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
904 if (memcmp(data - n, (u8 *) buf + offset, n))
910 * t3_get_tp_version - read the tp sram version
911 * @adapter: the adapter
912 * @vers: where to place the version
914 * Reads the protocol sram version from sram.
916 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
920 /* Get version loaded in SRAM */
921 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
922 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
927 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
933 * t3_check_tpsram_version - read the tp sram version
934 * @adapter: the adapter
936 * Reads the protocol sram version from flash.
938 int t3_check_tpsram_version(struct adapter *adapter)
942 unsigned int major, minor;
944 if (adapter->params.rev == T3_REV_A)
948 ret = t3_get_tp_version(adapter, &vers);
952 major = G_TP_VERSION_MAJOR(vers);
953 minor = G_TP_VERSION_MINOR(vers);
955 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
958 CH_ERR(adapter, "found wrong TP version (%u.%u), "
959 "driver compiled for version %d.%d\n", major, minor,
960 TP_VERSION_MAJOR, TP_VERSION_MINOR);
966 * t3_check_tpsram - check if provided protocol SRAM
967 * is compatible with this driver
968 * @adapter: the adapter
969 * @tp_sram: the firmware image to write
972 * Checks if an adapter's tp sram is compatible with the driver.
973 * Returns 0 if the versions are compatible, a negative error otherwise.
975 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
980 const __be32 *p = (const __be32 *)tp_sram;
982 /* Verify checksum */
983 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
985 if (csum != 0xffffffff) {
986 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
994 enum fw_version_type {
1000 * t3_get_fw_version - read the firmware version
1001 * @adapter: the adapter
1002 * @vers: where to place the version
1004 * Reads the FW version from flash.
1006 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1008 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1012 * t3_check_fw_version - check if the FW is compatible with this driver
1013 * @adapter: the adapter
1015 * Checks if an adapter's FW is compatible with the driver. Returns 0
1016 * if the versions are compatible, a negative error otherwise.
1018 int t3_check_fw_version(struct adapter *adapter)
1022 unsigned int type, major, minor;
1024 ret = t3_get_fw_version(adapter, &vers);
1028 type = G_FW_VERSION_TYPE(vers);
1029 major = G_FW_VERSION_MAJOR(vers);
1030 minor = G_FW_VERSION_MINOR(vers);
1032 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1033 minor == FW_VERSION_MINOR)
1035 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1036 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1037 "driver compiled for version %u.%u\n", major, minor,
1038 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1040 CH_WARN(adapter, "found newer FW version(%u.%u), "
1041 "driver compiled for version %u.%u\n", major, minor,
1042 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1049 * t3_flash_erase_sectors - erase a range of flash sectors
1050 * @adapter: the adapter
1051 * @start: the first sector to erase
1052 * @end: the last sector to erase
1054 * Erases the sectors in the given range.
1056 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1058 while (start <= end) {
1061 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1062 (ret = sf1_write(adapter, 4, 0,
1063 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1064 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1072 * t3_load_fw - download firmware
1073 * @adapter: the adapter
1074 * @fw_data: the firmware image to write
1077 * Write the supplied firmware image to the card's serial flash.
1078 * The FW image has the following sections: @size - 8 bytes of code and
1079 * data, followed by 4 bytes of FW version, followed by the 32-bit
1080 * 1's complement checksum of the whole image.
1082 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1086 const __be32 *p = (const __be32 *)fw_data;
1087 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1089 if ((size & 3) || size < FW_MIN_SIZE)
1091 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1094 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1095 csum += ntohl(p[i]);
1096 if (csum != 0xffffffff) {
1097 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1102 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1106 size -= 8; /* trim off version and checksum */
1107 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1108 unsigned int chunk_size = min(size, 256U);
1110 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1115 fw_data += chunk_size;
1119 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1122 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1126 #define CIM_CTL_BASE 0x2000
1129 * t3_cim_ctl_blk_read - read a block from CIM control region
1131 * @adap: the adapter
1132 * @addr: the start address within the CIM control region
1133 * @n: number of words to read
1134 * @valp: where to store the result
1136 * Reads a block of 4-byte words from the CIM control region.
1138 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1139 unsigned int n, unsigned int *valp)
1143 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1146 for ( ; !ret && n--; addr += 4) {
1147 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1148 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1151 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1158 * t3_link_changed - handle interface link changes
1159 * @adapter: the adapter
1160 * @port_id: the port index that changed link state
1162 * Called when a port's link settings change to propagate the new values
1163 * to the associated PHY and MAC. After performing the common tasks it
1164 * invokes an OS-specific handler.
1166 void t3_link_changed(struct adapter *adapter, int port_id)
1168 int link_ok, speed, duplex, fc;
1169 struct port_info *pi = adap2pinfo(adapter, port_id);
1170 struct cphy *phy = &pi->phy;
1171 struct cmac *mac = &pi->mac;
1172 struct link_config *lc = &pi->link_config;
1174 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1176 if (lc->requested_fc & PAUSE_AUTONEG)
1177 fc &= lc->requested_fc;
1179 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1181 if (link_ok == lc->link_ok && speed == lc->speed &&
1182 duplex == lc->duplex && fc == lc->fc)
1183 return; /* nothing changed */
1185 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1186 uses_xaui(adapter)) {
1189 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1190 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1192 lc->link_ok = link_ok;
1193 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1194 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1196 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1197 /* Set MAC speed, duplex, and flow control to match PHY. */
1198 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1202 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1206 * t3_link_start - apply link configuration to MAC/PHY
1207 * @phy: the PHY to setup
1208 * @mac: the MAC to setup
1209 * @lc: the requested link configuration
1211 * Set up a port's MAC and PHY according to a desired link configuration.
1212 * - If the PHY can auto-negotiate first decide what to advertise, then
1213 * enable/disable auto-negotiation as desired, and reset.
1214 * - If the PHY does not auto-negotiate just reset it.
1215 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1216 * otherwise do it later based on the outcome of auto-negotiation.
1218 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1220 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1223 if (lc->supported & SUPPORTED_Autoneg) {
1224 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1226 lc->advertising |= ADVERTISED_Asym_Pause;
1228 lc->advertising |= ADVERTISED_Pause;
1230 phy->ops->advertise(phy, lc->advertising);
1232 if (lc->autoneg == AUTONEG_DISABLE) {
1233 lc->speed = lc->requested_speed;
1234 lc->duplex = lc->requested_duplex;
1235 lc->fc = (unsigned char)fc;
1236 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1238 /* Also disables autoneg */
1239 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1241 phy->ops->autoneg_enable(phy);
1243 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1244 lc->fc = (unsigned char)fc;
1245 phy->ops->reset(phy, 0);
1251 * t3_set_vlan_accel - control HW VLAN extraction
1252 * @adapter: the adapter
1253 * @ports: bitmap of adapter ports to operate on
1254 * @on: enable (1) or disable (0) HW VLAN extraction
1256 * Enables or disables HW extraction of VLAN tags for the given port.
1258 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1260 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1261 ports << S_VLANEXTRACTIONENABLE,
1262 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1266 unsigned int mask; /* bits to check in interrupt status */
1267 const char *msg; /* message to print or NULL */
1268 short stat_idx; /* stat counter to increment or -1 */
1269 unsigned short fatal; /* whether the condition reported is fatal */
1273 * t3_handle_intr_status - table driven interrupt handler
1274 * @adapter: the adapter that generated the interrupt
1275 * @reg: the interrupt status register to process
1276 * @mask: a mask to apply to the interrupt status
1277 * @acts: table of interrupt actions
1278 * @stats: statistics counters tracking interrupt occurences
1280 * A table driven interrupt handler that applies a set of masks to an
1281 * interrupt status word and performs the corresponding actions if the
1282 * interrupts described by the mask have occured. The actions include
1283 * optionally printing a warning or alert message, and optionally
1284 * incrementing a stat counter. The table is terminated by an entry
1285 * specifying mask 0. Returns the number of fatal interrupt conditions.
1287 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1289 const struct intr_info *acts,
1290 unsigned long *stats)
1293 unsigned int status = t3_read_reg(adapter, reg) & mask;
1295 for (; acts->mask; ++acts) {
1296 if (!(status & acts->mask))
1300 CH_ALERT(adapter, "%s (0x%x)\n",
1301 acts->msg, status & acts->mask);
1302 } else if (acts->msg)
1303 CH_WARN(adapter, "%s (0x%x)\n",
1304 acts->msg, status & acts->mask);
1305 if (acts->stat_idx >= 0)
1306 stats[acts->stat_idx]++;
1308 if (status) /* clear processed interrupts */
1309 t3_write_reg(adapter, reg, status);
1313 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1314 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1315 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1316 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1317 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1318 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1320 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1321 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1323 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1324 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1325 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1326 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1327 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1328 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1329 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1330 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1331 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1332 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1333 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1334 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1335 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1336 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1337 F_TXPARERR | V_BISTERR(M_BISTERR))
1338 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1339 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1340 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1341 #define ULPTX_INTR_MASK 0xfc
1342 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1343 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1344 F_ZERO_SWITCH_ERROR)
1345 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1346 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1347 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1348 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1349 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1350 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1351 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1352 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1353 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1354 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1355 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1356 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1357 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1358 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1359 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1360 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1361 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1362 V_MCAPARERRENB(M_MCAPARERRENB))
1363 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1364 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1365 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1366 F_MPS0 | F_CPL_SWITCH)
1369 * Interrupt handler for the PCIX1 module.
1371 static void pci_intr_handler(struct adapter *adapter)
1373 static const struct intr_info pcix1_intr_info[] = {
1374 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1375 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1376 {F_RCVTARABT, "PCI received target abort", -1, 1},
1377 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1378 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1379 {F_DETPARERR, "PCI detected parity error", -1, 1},
1380 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1381 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1382 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1384 {F_DETCORECCERR, "PCI correctable ECC error",
1385 STAT_PCI_CORR_ECC, 0},
1386 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1387 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1388 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1390 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1392 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1394 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1399 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1400 pcix1_intr_info, adapter->irq_stats))
1401 t3_fatal_err(adapter);
1405 * Interrupt handler for the PCIE module.
1407 static void pcie_intr_handler(struct adapter *adapter)
1409 static const struct intr_info pcie_intr_info[] = {
1410 {F_PEXERR, "PCI PEX error", -1, 1},
1412 "PCI unexpected split completion DMA read error", -1, 1},
1414 "PCI unexpected split completion DMA command error", -1, 1},
1415 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1416 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1417 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1418 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1419 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1420 "PCI MSI-X table/PBA parity error", -1, 1},
1421 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1422 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1423 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1424 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1425 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1429 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1430 CH_ALERT(adapter, "PEX error code 0x%x\n",
1431 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1433 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1434 pcie_intr_info, adapter->irq_stats))
1435 t3_fatal_err(adapter);
1439 * TP interrupt handler.
1441 static void tp_intr_handler(struct adapter *adapter)
1443 static const struct intr_info tp_intr_info[] = {
1444 {0xffffff, "TP parity error", -1, 1},
1445 {0x1000000, "TP out of Rx pages", -1, 1},
1446 {0x2000000, "TP out of Tx pages", -1, 1},
1450 static struct intr_info tp_intr_info_t3c[] = {
1451 {0x1fffffff, "TP parity error", -1, 1},
1452 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1453 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1457 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1458 adapter->params.rev < T3_REV_C ?
1459 tp_intr_info : tp_intr_info_t3c, NULL))
1460 t3_fatal_err(adapter);
1464 * CIM interrupt handler.
1466 static void cim_intr_handler(struct adapter *adapter)
1468 static const struct intr_info cim_intr_info[] = {
1469 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1470 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1471 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1472 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1473 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1474 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1475 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1476 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1477 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1478 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1479 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1480 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1481 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1482 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1483 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1484 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1485 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1486 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1487 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1488 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1489 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1490 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1491 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1492 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1496 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1497 cim_intr_info, NULL))
1498 t3_fatal_err(adapter);
1502 * ULP RX interrupt handler.
1504 static void ulprx_intr_handler(struct adapter *adapter)
1506 static const struct intr_info ulprx_intr_info[] = {
1507 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1508 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1509 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1510 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1511 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1512 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1513 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1514 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1518 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1519 ulprx_intr_info, NULL))
1520 t3_fatal_err(adapter);
1524 * ULP TX interrupt handler.
1526 static void ulptx_intr_handler(struct adapter *adapter)
1528 static const struct intr_info ulptx_intr_info[] = {
1529 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1530 STAT_ULP_CH0_PBL_OOB, 0},
1531 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1532 STAT_ULP_CH1_PBL_OOB, 0},
1533 {0xfc, "ULP TX parity error", -1, 1},
1537 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1538 ulptx_intr_info, adapter->irq_stats))
1539 t3_fatal_err(adapter);
1542 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1543 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1544 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1545 F_ICSPI1_TX_FRAMING_ERROR)
1546 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1547 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1548 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1549 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1552 * PM TX interrupt handler.
1554 static void pmtx_intr_handler(struct adapter *adapter)
1556 static const struct intr_info pmtx_intr_info[] = {
1557 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1558 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1559 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1560 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1561 "PMTX ispi parity error", -1, 1},
1562 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1563 "PMTX ospi parity error", -1, 1},
1567 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1568 pmtx_intr_info, NULL))
1569 t3_fatal_err(adapter);
1572 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1573 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1574 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1575 F_IESPI1_TX_FRAMING_ERROR)
1576 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1577 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1578 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1579 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1582 * PM RX interrupt handler.
1584 static void pmrx_intr_handler(struct adapter *adapter)
1586 static const struct intr_info pmrx_intr_info[] = {
1587 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1588 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1589 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1590 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1591 "PMRX ispi parity error", -1, 1},
1592 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1593 "PMRX ospi parity error", -1, 1},
1597 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1598 pmrx_intr_info, NULL))
1599 t3_fatal_err(adapter);
1603 * CPL switch interrupt handler.
1605 static void cplsw_intr_handler(struct adapter *adapter)
1607 static const struct intr_info cplsw_intr_info[] = {
1608 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1609 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1610 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1611 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1612 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1613 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1617 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1618 cplsw_intr_info, NULL))
1619 t3_fatal_err(adapter);
1623 * MPS interrupt handler.
1625 static void mps_intr_handler(struct adapter *adapter)
1627 static const struct intr_info mps_intr_info[] = {
1628 {0x1ff, "MPS parity error", -1, 1},
1632 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1633 mps_intr_info, NULL))
1634 t3_fatal_err(adapter);
1637 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1640 * MC7 interrupt handler.
1642 static void mc7_intr_handler(struct mc7 *mc7)
1644 struct adapter *adapter = mc7->adapter;
1645 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1648 mc7->stats.corr_err++;
1649 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1650 "data 0x%x 0x%x 0x%x\n", mc7->name,
1651 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1652 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1653 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1654 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1658 mc7->stats.uncorr_err++;
1659 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1660 "data 0x%x 0x%x 0x%x\n", mc7->name,
1661 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1662 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1663 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1664 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1668 mc7->stats.parity_err++;
1669 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1670 mc7->name, G_PE(cause));
1676 if (adapter->params.rev > 0)
1677 addr = t3_read_reg(adapter,
1678 mc7->offset + A_MC7_ERR_ADDR);
1679 mc7->stats.addr_err++;
1680 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1684 if (cause & MC7_INTR_FATAL)
1685 t3_fatal_err(adapter);
1687 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1690 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1691 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1693 * XGMAC interrupt handler.
1695 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1697 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1698 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1700 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1701 mac->stats.tx_fifo_parity_err++;
1702 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1704 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1705 mac->stats.rx_fifo_parity_err++;
1706 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1708 if (cause & F_TXFIFO_UNDERRUN)
1709 mac->stats.tx_fifo_urun++;
1710 if (cause & F_RXFIFO_OVERFLOW)
1711 mac->stats.rx_fifo_ovfl++;
1712 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1713 mac->stats.serdes_signal_loss++;
1714 if (cause & F_XAUIPCSCTCERR)
1715 mac->stats.xaui_pcs_ctc_err++;
1716 if (cause & F_XAUIPCSALIGNCHANGE)
1717 mac->stats.xaui_pcs_align_change++;
1719 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1720 if (cause & XGM_INTR_FATAL)
1726 * Interrupt handler for PHY events.
1728 int t3_phy_intr_handler(struct adapter *adapter)
1730 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1732 for_each_port(adapter, i) {
1733 struct port_info *p = adap2pinfo(adapter, i);
1735 if (!(p->phy.caps & SUPPORTED_IRQ))
1738 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1739 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1741 if (phy_cause & cphy_cause_link_change)
1742 t3_link_changed(adapter, i);
1743 if (phy_cause & cphy_cause_fifo_error)
1744 p->phy.fifo_errors++;
1745 if (phy_cause & cphy_cause_module_change)
1746 t3_os_phymod_changed(adapter, i);
1750 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1755 * T3 slow path (non-data) interrupt handler.
1757 int t3_slow_intr_handler(struct adapter *adapter)
1759 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1761 cause &= adapter->slow_intr_mask;
1764 if (cause & F_PCIM0) {
1765 if (is_pcie(adapter))
1766 pcie_intr_handler(adapter);
1768 pci_intr_handler(adapter);
1771 t3_sge_err_intr_handler(adapter);
1772 if (cause & F_MC7_PMRX)
1773 mc7_intr_handler(&adapter->pmrx);
1774 if (cause & F_MC7_PMTX)
1775 mc7_intr_handler(&adapter->pmtx);
1776 if (cause & F_MC7_CM)
1777 mc7_intr_handler(&adapter->cm);
1779 cim_intr_handler(adapter);
1781 tp_intr_handler(adapter);
1782 if (cause & F_ULP2_RX)
1783 ulprx_intr_handler(adapter);
1784 if (cause & F_ULP2_TX)
1785 ulptx_intr_handler(adapter);
1786 if (cause & F_PM1_RX)
1787 pmrx_intr_handler(adapter);
1788 if (cause & F_PM1_TX)
1789 pmtx_intr_handler(adapter);
1790 if (cause & F_CPL_SWITCH)
1791 cplsw_intr_handler(adapter);
1793 mps_intr_handler(adapter);
1795 t3_mc5_intr_handler(&adapter->mc5);
1796 if (cause & F_XGMAC0_0)
1797 mac_intr_handler(adapter, 0);
1798 if (cause & F_XGMAC0_1)
1799 mac_intr_handler(adapter, 1);
1800 if (cause & F_T3DBG)
1801 t3_os_ext_intr_handler(adapter);
1803 /* Clear the interrupts just processed. */
1804 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1805 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1809 static unsigned int calc_gpio_intr(struct adapter *adap)
1811 unsigned int i, gpi_intr = 0;
1813 for_each_port(adap, i)
1814 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1815 adapter_info(adap)->gpio_intr[i])
1816 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1821 * t3_intr_enable - enable interrupts
1822 * @adapter: the adapter whose interrupts should be enabled
1824 * Enable interrupts by setting the interrupt enable registers of the
1825 * various HW modules and then enabling the top-level interrupt
1828 void t3_intr_enable(struct adapter *adapter)
1830 static const struct addr_val_pair intr_en_avp[] = {
1831 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1832 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1833 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1835 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1837 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1838 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1839 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1840 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1841 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1842 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1845 adapter->slow_intr_mask = PL_INTR_MASK;
1847 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1848 t3_write_reg(adapter, A_TP_INT_ENABLE,
1849 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1851 if (adapter->params.rev > 0) {
1852 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1853 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1854 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1855 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1856 F_PBL_BOUND_ERR_CH1);
1858 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1859 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1862 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1864 if (is_pcie(adapter))
1865 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1867 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1868 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1869 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1873 * t3_intr_disable - disable a card's interrupts
1874 * @adapter: the adapter whose interrupts should be disabled
1876 * Disable interrupts. We only disable the top-level interrupt
1877 * concentrator and the SGE data interrupts.
1879 void t3_intr_disable(struct adapter *adapter)
1881 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1882 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1883 adapter->slow_intr_mask = 0;
1887 * t3_intr_clear - clear all interrupts
1888 * @adapter: the adapter whose interrupts should be cleared
1890 * Clears all interrupts.
1892 void t3_intr_clear(struct adapter *adapter)
1894 static const unsigned int cause_reg_addr[] = {
1896 A_SG_RSPQ_FL_STATUS,
1899 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1900 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1901 A_CIM_HOST_INT_CAUSE,
1914 /* Clear PHY and MAC interrupts for each port. */
1915 for_each_port(adapter, i)
1916 t3_port_intr_clear(adapter, i);
1918 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1919 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1921 if (is_pcie(adapter))
1922 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
1923 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1924 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1928 * t3_port_intr_enable - enable port-specific interrupts
1929 * @adapter: associated adapter
1930 * @idx: index of port whose interrupts should be enabled
1932 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1935 void t3_port_intr_enable(struct adapter *adapter, int idx)
1937 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1939 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1940 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1941 phy->ops->intr_enable(phy);
1945 * t3_port_intr_disable - disable port-specific interrupts
1946 * @adapter: associated adapter
1947 * @idx: index of port whose interrupts should be disabled
1949 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1952 void t3_port_intr_disable(struct adapter *adapter, int idx)
1954 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1956 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1957 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1958 phy->ops->intr_disable(phy);
1962 * t3_port_intr_clear - clear port-specific interrupts
1963 * @adapter: associated adapter
1964 * @idx: index of port whose interrupts to clear
1966 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1969 void t3_port_intr_clear(struct adapter *adapter, int idx)
1971 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1973 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1974 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1975 phy->ops->intr_clear(phy);
1978 #define SG_CONTEXT_CMD_ATTEMPTS 100
1981 * t3_sge_write_context - write an SGE context
1982 * @adapter: the adapter
1983 * @id: the context id
1984 * @type: the context type
1986 * Program an SGE context with the values already loaded in the
1987 * CONTEXT_DATA? registers.
1989 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1992 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1993 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1994 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1995 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1996 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1997 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1998 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1999 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2002 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2005 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2006 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2007 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2008 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2009 return t3_sge_write_context(adap, id, type);
2013 * t3_sge_init_ecntxt - initialize an SGE egress context
2014 * @adapter: the adapter to configure
2015 * @id: the context id
2016 * @gts_enable: whether to enable GTS for the context
2017 * @type: the egress context type
2018 * @respq: associated response queue
2019 * @base_addr: base address of queue
2020 * @size: number of queue entries
2022 * @gen: initial generation value for the context
2023 * @cidx: consumer pointer
2025 * Initialize an SGE egress context and make it ready for use. If the
2026 * platform allows concurrent context operations, the caller is
2027 * responsible for appropriate locking.
2029 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2030 enum sge_context_type type, int respq, u64 base_addr,
2031 unsigned int size, unsigned int token, int gen,
2034 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2036 if (base_addr & 0xfff) /* must be 4K aligned */
2038 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2042 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2043 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2044 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2045 V_EC_BASE_LO(base_addr & 0xffff));
2047 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2049 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2050 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2051 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2053 return t3_sge_write_context(adapter, id, F_EGRESS);
2057 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2058 * @adapter: the adapter to configure
2059 * @id: the context id
2060 * @gts_enable: whether to enable GTS for the context
2061 * @base_addr: base address of queue
2062 * @size: number of queue entries
2063 * @bsize: size of each buffer for this queue
2064 * @cong_thres: threshold to signal congestion to upstream producers
2065 * @gen: initial generation value for the context
2066 * @cidx: consumer pointer
2068 * Initialize an SGE free list context and make it ready for use. The
2069 * caller is responsible for ensuring only one context operation occurs
2072 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2073 int gts_enable, u64 base_addr, unsigned int size,
2074 unsigned int bsize, unsigned int cong_thres, int gen,
2077 if (base_addr & 0xfff) /* must be 4K aligned */
2079 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2083 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2085 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2086 V_FL_BASE_HI((u32) base_addr) |
2087 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2088 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2089 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2090 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2091 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2092 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2093 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2094 return t3_sge_write_context(adapter, id, F_FREELIST);
2098 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2099 * @adapter: the adapter to configure
2100 * @id: the context id
2101 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2102 * @base_addr: base address of queue
2103 * @size: number of queue entries
2104 * @fl_thres: threshold for selecting the normal or jumbo free list
2105 * @gen: initial generation value for the context
2106 * @cidx: consumer pointer
2108 * Initialize an SGE response queue context and make it ready for use.
2109 * The caller is responsible for ensuring only one context operation
2112 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2113 int irq_vec_idx, u64 base_addr, unsigned int size,
2114 unsigned int fl_thres, int gen, unsigned int cidx)
2116 unsigned int intr = 0;
2118 if (base_addr & 0xfff) /* must be 4K aligned */
2120 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2124 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2126 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2128 if (irq_vec_idx >= 0)
2129 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2130 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2131 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2132 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2133 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2137 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2138 * @adapter: the adapter to configure
2139 * @id: the context id
2140 * @base_addr: base address of queue
2141 * @size: number of queue entries
2142 * @rspq: response queue for async notifications
2143 * @ovfl_mode: CQ overflow mode
2144 * @credits: completion queue credits
2145 * @credit_thres: the credit threshold
2147 * Initialize an SGE completion queue context and make it ready for use.
2148 * The caller is responsible for ensuring only one context operation
2151 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2152 unsigned int size, int rspq, int ovfl_mode,
2153 unsigned int credits, unsigned int credit_thres)
2155 if (base_addr & 0xfff) /* must be 4K aligned */
2157 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2161 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2162 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2164 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2165 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2166 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2167 V_CQ_ERR(ovfl_mode));
2168 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2169 V_CQ_CREDIT_THRES(credit_thres));
2170 return t3_sge_write_context(adapter, id, F_CQ);
2174 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2175 * @adapter: the adapter
2176 * @id: the egress context id
2177 * @enable: enable (1) or disable (0) the context
2179 * Enable or disable an SGE egress context. The caller is responsible for
2180 * ensuring only one context operation occurs at a time.
2182 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2184 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2187 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2188 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2189 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2190 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2191 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2192 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2193 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2194 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2195 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2199 * t3_sge_disable_fl - disable an SGE free-buffer list
2200 * @adapter: the adapter
2201 * @id: the free list context id
2203 * Disable an SGE free-buffer list. The caller is responsible for
2204 * ensuring only one context operation occurs at a time.
2206 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2208 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2211 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2212 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2213 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2214 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2215 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2216 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2217 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2218 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2219 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2223 * t3_sge_disable_rspcntxt - disable an SGE response queue
2224 * @adapter: the adapter
2225 * @id: the response queue context id
2227 * Disable an SGE response queue. The caller is responsible for
2228 * ensuring only one context operation occurs at a time.
2230 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2232 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2235 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2236 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2237 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2238 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2239 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2240 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2241 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2242 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2243 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2247 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2248 * @adapter: the adapter
2249 * @id: the completion queue context id
2251 * Disable an SGE completion queue. The caller is responsible for
2252 * ensuring only one context operation occurs at a time.
2254 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2256 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2259 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2260 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2261 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2262 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2263 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2264 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2265 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2266 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2267 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2271 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2272 * @adapter: the adapter
2273 * @id: the context id
2274 * @op: the operation to perform
2276 * Perform the selected operation on an SGE completion queue context.
2277 * The caller is responsible for ensuring only one context operation
2280 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2281 unsigned int credits)
2285 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2288 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2289 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2290 V_CONTEXT(id) | F_CQ);
2291 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2292 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2295 if (op >= 2 && op < 7) {
2296 if (adapter->params.rev > 0)
2297 return G_CQ_INDEX(val);
2299 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2300 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2301 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2302 F_CONTEXT_CMD_BUSY, 0,
2303 SG_CONTEXT_CMD_ATTEMPTS, 1))
2305 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2311 * t3_sge_read_context - read an SGE context
2312 * @type: the context type
2313 * @adapter: the adapter
2314 * @id: the context id
2315 * @data: holds the retrieved context
2317 * Read an SGE egress context. The caller is responsible for ensuring
2318 * only one context operation occurs at a time.
2320 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2321 unsigned int id, u32 data[4])
2323 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2326 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2327 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2328 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2329 SG_CONTEXT_CMD_ATTEMPTS, 1))
2331 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2332 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2333 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2334 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2339 * t3_sge_read_ecntxt - read an SGE egress context
2340 * @adapter: the adapter
2341 * @id: the context id
2342 * @data: holds the retrieved context
2344 * Read an SGE egress context. The caller is responsible for ensuring
2345 * only one context operation occurs at a time.
2347 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2351 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2355 * t3_sge_read_cq - read an SGE CQ context
2356 * @adapter: the adapter
2357 * @id: the context id
2358 * @data: holds the retrieved context
2360 * Read an SGE CQ context. The caller is responsible for ensuring
2361 * only one context operation occurs at a time.
2363 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2367 return t3_sge_read_context(F_CQ, adapter, id, data);
2371 * t3_sge_read_fl - read an SGE free-list context
2372 * @adapter: the adapter
2373 * @id: the context id
2374 * @data: holds the retrieved context
2376 * Read an SGE free-list context. The caller is responsible for ensuring
2377 * only one context operation occurs at a time.
2379 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2381 if (id >= SGE_QSETS * 2)
2383 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2387 * t3_sge_read_rspq - read an SGE response queue context
2388 * @adapter: the adapter
2389 * @id: the context id
2390 * @data: holds the retrieved context
2392 * Read an SGE response queue context. The caller is responsible for
2393 * ensuring only one context operation occurs at a time.
2395 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2397 if (id >= SGE_QSETS)
2399 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2403 * t3_config_rss - configure Rx packet steering
2404 * @adapter: the adapter
2405 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2406 * @cpus: values for the CPU lookup table (0xff terminated)
2407 * @rspq: values for the response queue lookup table (0xffff terminated)
2409 * Programs the receive packet steering logic. @cpus and @rspq provide
2410 * the values for the CPU and response queue lookup tables. If they
2411 * provide fewer values than the size of the tables the supplied values
2412 * are used repeatedly until the tables are fully populated.
2414 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2415 const u8 * cpus, const u16 *rspq)
2417 int i, j, cpu_idx = 0, q_idx = 0;
2420 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2423 for (j = 0; j < 2; ++j) {
2424 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2425 if (cpus[cpu_idx] == 0xff)
2428 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2432 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2433 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2434 (i << 16) | rspq[q_idx++]);
2435 if (rspq[q_idx] == 0xffff)
2439 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2443 * t3_read_rss - read the contents of the RSS tables
2444 * @adapter: the adapter
2445 * @lkup: holds the contents of the RSS lookup table
2446 * @map: holds the contents of the RSS map table
2448 * Reads the contents of the receive packet steering tables.
2450 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2456 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2457 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2459 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2460 if (!(val & 0x80000000))
2463 *lkup++ = (val >> 8);
2467 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2468 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2470 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2471 if (!(val & 0x80000000))
2479 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2480 * @adap: the adapter
2481 * @enable: 1 to select offload mode, 0 for regular NIC
2483 * Switches TP to NIC/offload mode.
2485 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2487 if (is_offload(adap) || !enable)
2488 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2489 V_NICMODE(!enable));
2493 * pm_num_pages - calculate the number of pages of the payload memory
2494 * @mem_size: the size of the payload memory
2495 * @pg_size: the size of each payload memory page
2497 * Calculate the number of pages, each of the given size, that fit in a
2498 * memory of the specified size, respecting the HW requirement that the
2499 * number of pages must be a multiple of 24.
2501 static inline unsigned int pm_num_pages(unsigned int mem_size,
2502 unsigned int pg_size)
2504 unsigned int n = mem_size / pg_size;
2509 #define mem_region(adap, start, size, reg) \
2510 t3_write_reg((adap), A_ ## reg, (start)); \
2514 * partition_mem - partition memory and configure TP memory settings
2515 * @adap: the adapter
2516 * @p: the TP parameters
2518 * Partitions context and payload memory and configures TP's memory
2521 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2523 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2524 unsigned int timers = 0, timers_shift = 22;
2526 if (adap->params.rev > 0) {
2527 if (tids <= 16 * 1024) {
2530 } else if (tids <= 64 * 1024) {
2533 } else if (tids <= 256 * 1024) {
2539 t3_write_reg(adap, A_TP_PMM_SIZE,
2540 p->chan_rx_size | (p->chan_tx_size >> 16));
2542 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2543 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2544 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2545 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2546 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2548 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2549 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2550 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2552 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2553 /* Add a bit of headroom and make multiple of 24 */
2555 pstructs -= pstructs % 24;
2556 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2558 m = tids * TCB_SIZE;
2559 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2560 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2561 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2562 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2563 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2564 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2565 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2566 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2568 m = (m + 4095) & ~0xfff;
2569 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2570 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2572 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2573 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2574 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2576 adap->params.mc5.nservers += m - tids;
2579 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2582 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2583 t3_write_reg(adap, A_TP_PIO_DATA, val);
2586 static void tp_config(struct adapter *adap, const struct tp_params *p)
2588 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2589 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2590 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2591 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2592 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2593 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2594 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2595 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2596 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2597 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2598 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2599 F_IPV6ENABLE | F_NICMODE);
2600 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2601 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2602 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2603 adap->params.rev > 0 ? F_ENABLEESND :
2606 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2608 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2609 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2610 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2611 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2612 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2613 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2614 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2616 if (adap->params.rev > 0) {
2617 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2618 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2620 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2621 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2623 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2625 if (adap->params.rev == T3_REV_C)
2626 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2627 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2628 V_TABLELATENCYDELTA(4));
2630 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2631 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2632 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2633 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2636 /* Desired TP timer resolution in usec */
2637 #define TP_TMR_RES 50
2639 /* TCP timer values in ms */
2640 #define TP_DACK_TIMER 50
2641 #define TP_RTO_MIN 250
2644 * tp_set_timers - set TP timing parameters
2645 * @adap: the adapter to set
2646 * @core_clk: the core clock frequency in Hz
2648 * Set TP's timing parameters, such as the various timer resolutions and
2649 * the TCP timer values.
2651 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2653 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2654 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2655 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2656 unsigned int tps = core_clk >> tre;
2658 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2659 V_DELAYEDACKRESOLUTION(dack_re) |
2660 V_TIMESTAMPRESOLUTION(tstamp_re));
2661 t3_write_reg(adap, A_TP_DACK_TIMER,
2662 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2663 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2664 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2665 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2666 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2667 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2668 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2669 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2672 #define SECONDS * tps
2674 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2675 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2676 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2677 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2678 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2679 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2680 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2681 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2682 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2688 * t3_tp_set_coalescing_size - set receive coalescing size
2689 * @adap: the adapter
2690 * @size: the receive coalescing size
2691 * @psh: whether a set PSH bit should deliver coalesced data
2693 * Set the receive coalescing size and PSH bit handling.
2695 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2699 if (size > MAX_RX_COALESCING_LEN)
2702 val = t3_read_reg(adap, A_TP_PARA_REG3);
2703 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2706 val |= F_RXCOALESCEENABLE;
2708 val |= F_RXCOALESCEPSHEN;
2709 size = min(MAX_RX_COALESCING_LEN, size);
2710 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2711 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2713 t3_write_reg(adap, A_TP_PARA_REG3, val);
2718 * t3_tp_set_max_rxsize - set the max receive size
2719 * @adap: the adapter
2720 * @size: the max receive size
2722 * Set TP's max receive size. This is the limit that applies when
2723 * receive coalescing is disabled.
2725 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2727 t3_write_reg(adap, A_TP_PARA_REG7,
2728 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2731 static void init_mtus(unsigned short mtus[])
2734 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2735 * it can accomodate max size TCP/IP headers when SACK and timestamps
2736 * are enabled and still have at least 8 bytes of payload.
2757 * Initial congestion control parameters.
2759 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2761 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2786 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2789 b[13] = b[14] = b[15] = b[16] = 3;
2790 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2791 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2796 /* The minimum additive increment value for the congestion control table */
2797 #define CC_MIN_INCR 2U
2800 * t3_load_mtus - write the MTU and congestion control HW tables
2801 * @adap: the adapter
2802 * @mtus: the unrestricted values for the MTU table
2803 * @alphs: the values for the congestion control alpha parameter
2804 * @beta: the values for the congestion control beta parameter
2805 * @mtu_cap: the maximum permitted effective MTU
2807 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2808 * Update the high-speed congestion control table with the supplied alpha,
2811 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2812 unsigned short alpha[NCCTRL_WIN],
2813 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2815 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2816 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2817 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2818 28672, 40960, 57344, 81920, 114688, 163840, 229376
2823 for (i = 0; i < NMTUS; ++i) {
2824 unsigned int mtu = min(mtus[i], mtu_cap);
2825 unsigned int log2 = fls(mtu);
2827 if (!(mtu & ((1 << log2) >> 2))) /* round */
2829 t3_write_reg(adap, A_TP_MTU_TABLE,
2830 (i << 24) | (log2 << 16) | mtu);
2832 for (w = 0; w < NCCTRL_WIN; ++w) {
2835 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2838 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2839 (w << 16) | (beta[w] << 13) | inc);
2845 * t3_read_hw_mtus - returns the values in the HW MTU table
2846 * @adap: the adapter
2847 * @mtus: where to store the HW MTU values
2849 * Reads the HW MTU table.
2851 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2855 for (i = 0; i < NMTUS; ++i) {
2858 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2859 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2860 mtus[i] = val & 0x3fff;
2865 * t3_get_cong_cntl_tab - reads the congestion control table
2866 * @adap: the adapter
2867 * @incr: where to store the alpha values
2869 * Reads the additive increments programmed into the HW congestion
2872 void t3_get_cong_cntl_tab(struct adapter *adap,
2873 unsigned short incr[NMTUS][NCCTRL_WIN])
2875 unsigned int mtu, w;
2877 for (mtu = 0; mtu < NMTUS; ++mtu)
2878 for (w = 0; w < NCCTRL_WIN; ++w) {
2879 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2880 0xffff0000 | (mtu << 5) | w);
2881 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2887 * t3_tp_get_mib_stats - read TP's MIB counters
2888 * @adap: the adapter
2889 * @tps: holds the returned counter values
2891 * Returns the values of TP's MIB counters.
2893 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2895 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2896 sizeof(*tps) / sizeof(u32), 0);
2899 #define ulp_region(adap, name, start, len) \
2900 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2901 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2902 (start) + (len) - 1); \
2905 #define ulptx_region(adap, name, start, len) \
2906 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2907 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2908 (start) + (len) - 1)
2910 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2912 unsigned int m = p->chan_rx_size;
2914 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2915 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2916 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2917 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2918 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2919 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2920 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2921 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2925 * t3_set_proto_sram - set the contents of the protocol sram
2926 * @adapter: the adapter
2927 * @data: the protocol image
2929 * Write the contents of the protocol SRAM.
2931 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2934 const __be32 *buf = (const __be32 *)data;
2936 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2937 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2938 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2939 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2940 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2941 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2943 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2944 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2947 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2952 void t3_config_trace_filter(struct adapter *adapter,
2953 const struct trace_params *tp, int filter_index,
2954 int invert, int enable)
2956 u32 addr, key[4], mask[4];
2958 key[0] = tp->sport | (tp->sip << 16);
2959 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2961 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2963 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2964 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2965 mask[2] = tp->dip_mask;
2966 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2969 key[3] |= (1 << 29);
2971 key[3] |= (1 << 28);
2973 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2974 tp_wr_indirect(adapter, addr++, key[0]);
2975 tp_wr_indirect(adapter, addr++, mask[0]);
2976 tp_wr_indirect(adapter, addr++, key[1]);
2977 tp_wr_indirect(adapter, addr++, mask[1]);
2978 tp_wr_indirect(adapter, addr++, key[2]);
2979 tp_wr_indirect(adapter, addr++, mask[2]);
2980 tp_wr_indirect(adapter, addr++, key[3]);
2981 tp_wr_indirect(adapter, addr, mask[3]);
2982 t3_read_reg(adapter, A_TP_PIO_DATA);
2986 * t3_config_sched - configure a HW traffic scheduler
2987 * @adap: the adapter
2988 * @kbps: target rate in Kbps
2989 * @sched: the scheduler index
2991 * Configure a HW scheduler for the target rate
2993 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2995 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2996 unsigned int clk = adap->params.vpd.cclk * 1000;
2997 unsigned int selected_cpt = 0, selected_bpt = 0;
3000 kbps *= 125; /* -> bytes */
3001 for (cpt = 1; cpt <= 255; cpt++) {
3003 bpt = (kbps + tps / 2) / tps;
3004 if (bpt > 0 && bpt <= 255) {
3006 delta = v >= kbps ? v - kbps : kbps - v;
3007 if (delta <= mindelta) {
3012 } else if (selected_cpt)
3018 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3019 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3020 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3022 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3024 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3025 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3029 static int tp_init(struct adapter *adap, const struct tp_params *p)
3034 t3_set_vlan_accel(adap, 3, 0);
3036 if (is_offload(adap)) {
3037 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3038 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3039 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3042 CH_ERR(adap, "TP initialization timed out\n");
3046 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3050 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3052 if (port_mask & ~((1 << adap->params.nports) - 1))
3054 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3055 port_mask << S_PORT0ACTIVE);
3060 * Perform the bits of HW initialization that are dependent on the number
3061 * of available ports.
3063 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3068 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3069 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3070 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3071 F_PORT0ACTIVE | F_ENFORCEPKT);
3072 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
3074 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3075 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3076 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3077 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3078 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3079 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3081 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3082 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3083 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3084 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3085 for (i = 0; i < 16; i++)
3086 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3087 (i << 16) | 0x1010);
3091 static int calibrate_xgm(struct adapter *adapter)
3093 if (uses_xaui(adapter)) {
3096 for (i = 0; i < 5; ++i) {
3097 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3098 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3100 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3101 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3102 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3103 V_XAUIIMP(G_CALIMP(v) >> 2));
3107 CH_ERR(adapter, "MAC calibration failed\n");
3110 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3111 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3112 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3113 F_XGM_IMPSETUPDATE);
3118 static void calibrate_xgm_t3b(struct adapter *adapter)
3120 if (!uses_xaui(adapter)) {
3121 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3122 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3123 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3124 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3125 F_XGM_IMPSETUPDATE);
3126 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3128 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3129 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3133 struct mc7_timing_params {
3134 unsigned char ActToPreDly;
3135 unsigned char ActToRdWrDly;
3136 unsigned char PreCyc;
3137 unsigned char RefCyc[5];
3138 unsigned char BkCyc;
3139 unsigned char WrToRdDly;
3140 unsigned char RdToWrDly;
3144 * Write a value to a register and check that the write completed. These
3145 * writes normally complete in a cycle or two, so one read should suffice.
3146 * The very first read exists to flush the posted write to the device.
3148 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3150 t3_write_reg(adapter, addr, val);
3151 t3_read_reg(adapter, addr); /* flush */
3152 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3154 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3158 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3160 static const unsigned int mc7_mode[] = {
3161 0x632, 0x642, 0x652, 0x432, 0x442
3163 static const struct mc7_timing_params mc7_timings[] = {
3164 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3165 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3166 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3167 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3168 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3172 unsigned int width, density, slow, attempts;
3173 struct adapter *adapter = mc7->adapter;
3174 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3179 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3180 slow = val & F_SLOW;
3181 width = G_WIDTH(val);
3182 density = G_DEN(val);
3184 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3185 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3189 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3190 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3192 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3193 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3194 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3200 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3201 V_ACTTOPREDLY(p->ActToPreDly) |
3202 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3203 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3204 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3206 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3207 val | F_CLKEN | F_TERM150);
3208 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3211 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3216 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3217 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3218 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3219 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3223 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3224 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3228 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3229 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3230 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3231 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3232 mc7_mode[mem_type]) ||
3233 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3234 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3237 /* clock value is in KHz */
3238 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3239 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3241 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3242 F_PERREFEN | V_PREREFDIV(mc7_clock));
3243 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3245 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3246 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3247 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3248 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3249 (mc7->size << width) - 1);
3250 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3251 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3256 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3257 } while ((val & F_BUSY) && --attempts);
3259 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3263 /* Enable normal memory accesses. */
3264 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3271 static void config_pcie(struct adapter *adap)
3273 static const u16 ack_lat[4][6] = {
3274 {237, 416, 559, 1071, 2095, 4143},
3275 {128, 217, 289, 545, 1057, 2081},
3276 {73, 118, 154, 282, 538, 1050},
3277 {67, 107, 86, 150, 278, 534}
3279 static const u16 rpl_tmr[4][6] = {
3280 {711, 1248, 1677, 3213, 6285, 12429},
3281 {384, 651, 867, 1635, 3171, 6243},
3282 {219, 354, 462, 846, 1614, 3150},
3283 {201, 321, 258, 450, 834, 1602}
3287 unsigned int log2_width, pldsize;
3288 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3290 pci_read_config_word(adap->pdev,
3291 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3293 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3294 pci_read_config_word(adap->pdev,
3295 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3298 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3299 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3300 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3301 log2_width = fls(adap->params.pci.width) - 1;
3302 acklat = ack_lat[log2_width][pldsize];
3303 if (val & 1) /* check LOsEnable */
3304 acklat += fst_trn_tx * 4;
3305 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3307 if (adap->params.rev == 0)
3308 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3309 V_T3A_ACKLAT(M_T3A_ACKLAT),
3310 V_T3A_ACKLAT(acklat));
3312 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3315 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3316 V_REPLAYLMT(rpllmt));
3318 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3319 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3320 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3321 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3325 * Initialize and configure T3 HW modules. This performs the
3326 * initialization steps that need to be done once after a card is reset.
3327 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3329 * fw_params are passed to FW and their value is platform dependent. Only the
3330 * top 8 bits are available for use, the rest must be 0.
3332 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3334 int err = -EIO, attempts, i;
3335 const struct vpd_params *vpd = &adapter->params.vpd;
3337 if (adapter->params.rev > 0)
3338 calibrate_xgm_t3b(adapter);
3339 else if (calibrate_xgm(adapter))
3343 partition_mem(adapter, &adapter->params.tp);
3345 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3346 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3347 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3348 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3349 adapter->params.mc5.nfilters,
3350 adapter->params.mc5.nroutes))
3353 for (i = 0; i < 32; i++)
3354 if (clear_sge_ctxt(adapter, i, F_CQ))
3358 if (tp_init(adapter, &adapter->params.tp))
3361 t3_tp_set_coalescing_size(adapter,
3362 min(adapter->params.sge.max_pkt_size,
3363 MAX_RX_COALESCING_LEN), 1);
3364 t3_tp_set_max_rxsize(adapter,
3365 min(adapter->params.sge.max_pkt_size, 16384U));
3366 ulp_config(adapter, &adapter->params.tp);
3368 if (is_pcie(adapter))
3369 config_pcie(adapter);
3371 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3372 F_DMASTOPEN | F_CLIDECEN);
3374 if (adapter->params.rev == T3_REV_C)
3375 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3376 F_CFG_CQE_SOP_MASK);
3378 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3379 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3380 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3381 init_hw_for_avail_ports(adapter, adapter->params.nports);
3382 t3_sge_init(adapter, &adapter->params.sge);
3384 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3386 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3387 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3388 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3389 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3392 do { /* wait for uP to initialize */
3394 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3396 CH_ERR(adapter, "uP initialization timed out\n");
3406 * get_pci_mode - determine a card's PCI mode
3407 * @adapter: the adapter
3408 * @p: where to store the PCI settings
3410 * Determines a card's PCI mode and associated parameters, such as speed
3413 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3415 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3416 u32 pci_mode, pcie_cap;
3418 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3422 p->variant = PCI_VARIANT_PCIE;
3423 p->pcie_cap_addr = pcie_cap;
3424 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3426 p->width = (val >> 4) & 0x3f;
3430 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3431 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3432 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3433 pci_mode = G_PCIXINITPAT(pci_mode);
3435 p->variant = PCI_VARIANT_PCI;
3436 else if (pci_mode < 4)
3437 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3438 else if (pci_mode < 8)
3439 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3441 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3445 * init_link_config - initialize a link's SW state
3446 * @lc: structure holding the link state
3447 * @ai: information about the current card
3449 * Initializes the SW state maintained for each link, including the link's
3450 * capabilities and default speed/duplex/flow-control/autonegotiation
3453 static void init_link_config(struct link_config *lc, unsigned int caps)
3455 lc->supported = caps;
3456 lc->requested_speed = lc->speed = SPEED_INVALID;
3457 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3458 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3459 if (lc->supported & SUPPORTED_Autoneg) {
3460 lc->advertising = lc->supported;
3461 lc->autoneg = AUTONEG_ENABLE;
3462 lc->requested_fc |= PAUSE_AUTONEG;
3464 lc->advertising = 0;
3465 lc->autoneg = AUTONEG_DISABLE;
3470 * mc7_calc_size - calculate MC7 memory size
3471 * @cfg: the MC7 configuration
3473 * Calculates the size of an MC7 memory in bytes from the value of its
3474 * configuration register.
3476 static unsigned int mc7_calc_size(u32 cfg)
3478 unsigned int width = G_WIDTH(cfg);
3479 unsigned int banks = !!(cfg & F_BKS) + 1;
3480 unsigned int org = !!(cfg & F_ORG) + 1;
3481 unsigned int density = G_DEN(cfg);
3482 unsigned int MBs = ((256 << density) * banks) / (org << width);
3487 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3488 unsigned int base_addr, const char *name)
3492 mc7->adapter = adapter;
3494 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3495 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3496 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3497 mc7->width = G_WIDTH(cfg);
3500 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3502 mac->adapter = adapter;
3503 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3506 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3507 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3508 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3509 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3514 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3516 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3518 mi1_init(adapter, ai);
3519 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3520 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3521 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3522 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3523 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3524 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3526 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3529 /* Enable MAC clocks so we can access the registers */
3530 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3531 t3_read_reg(adapter, A_XGM_PORT_CFG);
3533 val |= F_CLKDIVRESET_;
3534 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3535 t3_read_reg(adapter, A_XGM_PORT_CFG);
3536 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3537 t3_read_reg(adapter, A_XGM_PORT_CFG);
3541 * Reset the adapter.
3542 * Older PCIe cards lose their config space during reset, PCI-X
3545 int t3_reset_adapter(struct adapter *adapter)
3547 int i, save_and_restore_pcie =
3548 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3551 if (save_and_restore_pcie)
3552 pci_save_state(adapter->pdev);
3553 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3556 * Delay. Give Some time to device to reset fully.
3557 * XXX The delay time should be modified.
3559 for (i = 0; i < 10; i++) {
3561 pci_read_config_word(adapter->pdev, 0x00, &devid);
3562 if (devid == 0x1425)
3566 if (devid != 0x1425)
3569 if (save_and_restore_pcie)
3570 pci_restore_state(adapter->pdev);
3574 static int init_parity(struct adapter *adap)
3578 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3581 for (err = i = 0; !err && i < 16; i++)
3582 err = clear_sge_ctxt(adap, i, F_EGRESS);
3583 for (i = 0xfff0; !err && i <= 0xffff; i++)
3584 err = clear_sge_ctxt(adap, i, F_EGRESS);
3585 for (i = 0; !err && i < SGE_QSETS; i++)
3586 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3590 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3591 for (i = 0; i < 4; i++)
3592 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3593 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3594 F_IBQDBGWR | V_IBQDBGQID(i) |
3595 V_IBQDBGADDR(addr));
3596 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3597 F_IBQDBGBUSY, 0, 2, 1);
3605 * Initialize adapter SW state for the various HW modules, set initial values
3606 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3609 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3613 unsigned int i, j = -1;
3615 get_pci_mode(adapter, &adapter->params.pci);
3617 adapter->params.info = ai;
3618 adapter->params.nports = ai->nports;
3619 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3620 adapter->params.linkpoll_period = 0;
3621 adapter->params.stats_update_period = is_10G(adapter) ?
3622 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3623 adapter->params.pci.vpd_cap_addr =
3624 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3625 ret = get_vpd_params(adapter, &adapter->params.vpd);
3629 if (reset && t3_reset_adapter(adapter))
3632 t3_sge_prep(adapter, &adapter->params.sge);
3634 if (adapter->params.vpd.mclk) {
3635 struct tp_params *p = &adapter->params.tp;
3637 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3638 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3639 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3641 p->nchan = ai->nports;
3642 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3643 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3644 p->cm_size = t3_mc7_size(&adapter->cm);
3645 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3646 p->chan_tx_size = p->pmtx_size / p->nchan;
3647 p->rx_pg_size = 64 * 1024;
3648 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3649 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3650 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3651 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3652 adapter->params.rev > 0 ? 12 : 6;
3655 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3656 t3_mc7_size(&adapter->pmtx) &&
3657 t3_mc7_size(&adapter->cm);
3659 if (is_offload(adapter)) {
3660 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3661 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3662 DEFAULT_NFILTERS : 0;
3663 adapter->params.mc5.nroutes = 0;
3664 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3666 init_mtus(adapter->params.mtus);
3667 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3670 early_hw_init(adapter, ai);
3671 ret = init_parity(adapter);
3675 for_each_port(adapter, i) {
3677 const struct port_type_info *pti;
3678 struct port_info *p = adap2pinfo(adapter, i);
3680 while (!adapter->params.vpd.port_type[++j])
3683 pti = &port_types[adapter->params.vpd.port_type[j]];
3684 if (!pti->phy_prep) {
3685 CH_ALERT(adapter, "Invalid port type index %d\n",
3686 adapter->params.vpd.port_type[j]);
3690 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3694 mac_prep(&p->mac, adapter, j);
3697 * The VPD EEPROM stores the base Ethernet address for the
3698 * card. A port's address is derived from the base by adding
3699 * the port's index to the base's low octet.
3701 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3702 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3704 memcpy(adapter->port[i]->dev_addr, hw_addr,
3706 memcpy(adapter->port[i]->perm_addr, hw_addr,
3708 init_link_config(&p->link_config, p->phy.caps);
3709 p->phy.ops->power_down(&p->phy, 1);
3710 if (!(p->phy.caps & SUPPORTED_IRQ))
3711 adapter->params.linkpoll_period = 10;
3717 void t3_led_ready(struct adapter *adapter)
3719 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3723 int t3_replay_prep_adapter(struct adapter *adapter)
3725 const struct adapter_info *ai = adapter->params.info;
3726 unsigned int i, j = -1;
3729 early_hw_init(adapter, ai);
3730 ret = init_parity(adapter);
3734 for_each_port(adapter, i) {
3735 const struct port_type_info *pti;
3736 struct port_info *p = adap2pinfo(adapter, i);
3738 while (!adapter->params.vpd.port_type[++j])
3741 pti = &port_types[adapter->params.vpd.port_type[j]];
3742 ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
3745 p->phy.ops->power_down(&p->phy, 1);