2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "firmware_exports.h"
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
178 val64 |= (u64) val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216 mutex_lock(&adapter->mdio_lock);
217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 mutex_unlock(&adapter->mdio_lock);
227 static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
228 int reg_addr, unsigned int val)
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
236 mutex_lock(&adapter->mdio_lock);
237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242 mutex_unlock(&adapter->mdio_lock);
246 static const struct mdio_ops mi1_mdio_ops = {
252 * Performs the address cycle for clause 45 PHYs.
253 * Must be called with the MDIO_LOCK held.
255 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 t3_write_reg(adapter, A_MI1_ADDR, addr);
262 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 * MI1 read/write operations for indirect-addressed PHYs.
271 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 int reg_addr, unsigned int *valp)
276 mutex_lock(&adapter->mdio_lock);
277 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
279 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
283 *valp = t3_read_reg(adapter, A_MI1_DATA);
285 mutex_unlock(&adapter->mdio_lock);
289 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 int reg_addr, unsigned int val)
294 mutex_lock(&adapter->mdio_lock);
295 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
297 t3_write_reg(adapter, A_MI1_DATA, val);
298 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
302 mutex_unlock(&adapter->mdio_lock);
306 static const struct mdio_ops mi1_mdio_ext_ops = {
312 * t3_mdio_change_bits - modify the value of a PHY register
313 * @phy: the PHY to operate on
314 * @mmd: the device address
315 * @reg: the register address
316 * @clear: what part of the register value to mask off
317 * @set: what part of the register value to set
319 * Changes the value of a PHY register by applying a mask to its current
320 * value and ORing the result with a new value.
322 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
328 ret = mdio_read(phy, mmd, reg, &val);
331 ret = mdio_write(phy, mmd, reg, val | set);
337 * t3_phy_reset - reset a PHY block
338 * @phy: the PHY to operate on
339 * @mmd: the device address of the PHY block to reset
340 * @wait: how long to wait for the reset to complete in 1ms increments
342 * Resets a PHY block and optionally waits for the reset to complete.
343 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
346 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
351 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
356 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
362 } while (ctl && --wait);
368 * t3_phy_advertise - set the PHY advertisement registers for autoneg
369 * @phy: the PHY to operate on
370 * @advert: bitmap of capabilities the PHY should advertise
372 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
373 * requested capabilities.
375 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
378 unsigned int val = 0;
380 err = mdio_read(phy, 0, MII_CTRL1000, &val);
384 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 if (advert & ADVERTISED_1000baseT_Half)
386 val |= ADVERTISE_1000HALF;
387 if (advert & ADVERTISED_1000baseT_Full)
388 val |= ADVERTISE_1000FULL;
390 err = mdio_write(phy, 0, MII_CTRL1000, val);
395 if (advert & ADVERTISED_10baseT_Half)
396 val |= ADVERTISE_10HALF;
397 if (advert & ADVERTISED_10baseT_Full)
398 val |= ADVERTISE_10FULL;
399 if (advert & ADVERTISED_100baseT_Half)
400 val |= ADVERTISE_100HALF;
401 if (advert & ADVERTISED_100baseT_Full)
402 val |= ADVERTISE_100FULL;
403 if (advert & ADVERTISED_Pause)
404 val |= ADVERTISE_PAUSE_CAP;
405 if (advert & ADVERTISED_Asym_Pause)
406 val |= ADVERTISE_PAUSE_ASYM;
407 return mdio_write(phy, 0, MII_ADVERTISE, val);
411 * t3_phy_advertise_fiber - set fiber PHY advertisement register
412 * @phy: the PHY to operate on
413 * @advert: bitmap of capabilities the PHY should advertise
415 * Sets a fiber PHY's advertisement register to advertise the
416 * requested capabilities.
418 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
420 unsigned int val = 0;
422 if (advert & ADVERTISED_1000baseT_Half)
423 val |= ADVERTISE_1000XHALF;
424 if (advert & ADVERTISED_1000baseT_Full)
425 val |= ADVERTISE_1000XFULL;
426 if (advert & ADVERTISED_Pause)
427 val |= ADVERTISE_1000XPAUSE;
428 if (advert & ADVERTISED_Asym_Pause)
429 val |= ADVERTISE_1000XPSE_ASYM;
430 return mdio_write(phy, 0, MII_ADVERTISE, val);
434 * t3_set_phy_speed_duplex - force PHY speed and duplex
435 * @phy: the PHY to operate on
436 * @speed: requested PHY speed
437 * @duplex: requested PHY duplex
439 * Force a 10/100/1000 PHY's speed and duplex. This also disables
440 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
442 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
447 err = mdio_read(phy, 0, MII_BMCR, &ctl);
452 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
453 if (speed == SPEED_100)
454 ctl |= BMCR_SPEED100;
455 else if (speed == SPEED_1000)
456 ctl |= BMCR_SPEED1000;
459 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
460 if (duplex == DUPLEX_FULL)
461 ctl |= BMCR_FULLDPLX;
463 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
464 ctl |= BMCR_ANENABLE;
465 return mdio_write(phy, 0, MII_BMCR, ctl);
468 int t3_phy_lasi_intr_enable(struct cphy *phy)
470 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
473 int t3_phy_lasi_intr_disable(struct cphy *phy)
475 return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
478 int t3_phy_lasi_intr_clear(struct cphy *phy)
482 return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
485 int t3_phy_lasi_intr_handler(struct cphy *phy)
488 int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
492 return (status & 1) ? cphy_cause_link_change : 0;
495 static const struct adapter_info t3_adap_info[] = {
497 F_GPIO2_OEN | F_GPIO4_OEN |
498 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
499 &mi1_mdio_ops, "Chelsio PE9000"},
501 F_GPIO2_OEN | F_GPIO4_OEN |
502 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
503 &mi1_mdio_ops, "Chelsio T302"},
505 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
506 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
507 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
508 &mi1_mdio_ext_ops, "Chelsio T310"},
510 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
511 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
512 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
513 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
514 &mi1_mdio_ext_ops, "Chelsio T320"},
518 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
519 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 &mi1_mdio_ext_ops, "Chelsio T310" },
525 * Return the adapter_info structure with a given index. Out-of-range indices
528 const struct adapter_info *t3_get_adapter_info(unsigned int id)
530 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
533 struct port_type_info {
534 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
535 int phy_addr, const struct mdio_ops *ops);
538 static const struct port_type_info port_types[] = {
540 { t3_ael1002_phy_prep },
541 { t3_vsc8211_phy_prep },
543 { t3_xaui_direct_phy_prep },
544 { t3_ael2005_phy_prep },
545 { t3_qt2045_phy_prep },
546 { t3_ael1006_phy_prep },
550 #define VPD_ENTRY(name, len) \
551 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
554 * Partial EEPROM Vital Product Data structure. Includes only the ID and
563 VPD_ENTRY(pn, 16); /* part number */
564 VPD_ENTRY(ec, 16); /* EC level */
565 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
566 VPD_ENTRY(na, 12); /* MAC address base */
567 VPD_ENTRY(cclk, 6); /* core clock */
568 VPD_ENTRY(mclk, 6); /* mem clock */
569 VPD_ENTRY(uclk, 6); /* uP clk */
570 VPD_ENTRY(mdc, 6); /* MDIO clk */
571 VPD_ENTRY(mt, 2); /* mem timing */
572 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
573 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
574 VPD_ENTRY(port0, 2); /* PHY0 complex */
575 VPD_ENTRY(port1, 2); /* PHY1 complex */
576 VPD_ENTRY(port2, 2); /* PHY2 complex */
577 VPD_ENTRY(port3, 2); /* PHY3 complex */
578 VPD_ENTRY(rv, 1); /* csum */
579 u32 pad; /* for multiple-of-4 sizing and alignment */
582 #define EEPROM_MAX_POLL 40
583 #define EEPROM_STAT_ADDR 0x4000
584 #define VPD_BASE 0xc00
587 * t3_seeprom_read - read a VPD EEPROM location
588 * @adapter: adapter to read
589 * @addr: EEPROM address
590 * @data: where to store the read data
592 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
593 * VPD ROM capability. A zero is written to the flag bit when the
594 * addres is written to the control register. The hardware device will
595 * set the flag to 1 when 4 bytes have been read into the data register.
597 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
600 int attempts = EEPROM_MAX_POLL;
602 unsigned int base = adapter->params.pci.vpd_cap_addr;
604 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
607 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
610 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
611 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
613 if (!(val & PCI_VPD_ADDR_F)) {
614 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
617 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
618 *data = cpu_to_le32(v);
623 * t3_seeprom_write - write a VPD EEPROM location
624 * @adapter: adapter to write
625 * @addr: EEPROM address
626 * @data: value to write
628 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
629 * VPD ROM capability.
631 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
634 int attempts = EEPROM_MAX_POLL;
635 unsigned int base = adapter->params.pci.vpd_cap_addr;
637 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
640 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
642 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
643 addr | PCI_VPD_ADDR_F);
646 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
647 } while ((val & PCI_VPD_ADDR_F) && --attempts);
649 if (val & PCI_VPD_ADDR_F) {
650 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
657 * t3_seeprom_wp - enable/disable EEPROM write protection
658 * @adapter: the adapter
659 * @enable: 1 to enable write protection, 0 to disable it
661 * Enables or disables write protection on the serial EEPROM.
663 int t3_seeprom_wp(struct adapter *adapter, int enable)
665 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
669 * Convert a character holding a hex digit to a number.
671 static unsigned int hex2int(unsigned char c)
673 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
677 * get_vpd_params - read VPD parameters from VPD EEPROM
678 * @adapter: adapter to read
679 * @p: where to store the parameters
681 * Reads card parameters stored in VPD EEPROM.
683 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
689 * Card information is normally at VPD_BASE but some early cards had
692 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
695 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
697 for (i = 0; i < sizeof(vpd); i += 4) {
698 ret = t3_seeprom_read(adapter, addr + i,
699 (__le32 *)((u8 *)&vpd + i));
704 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
705 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
706 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
707 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
708 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
709 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
711 /* Old eeproms didn't have port information */
712 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
713 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
714 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
716 p->port_type[0] = hex2int(vpd.port0_data[0]);
717 p->port_type[1] = hex2int(vpd.port1_data[0]);
718 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
719 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
722 for (i = 0; i < 6; i++)
723 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
724 hex2int(vpd.na_data[2 * i + 1]);
728 /* serial flash and firmware constants */
730 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
731 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
732 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
734 /* flash command opcodes */
735 SF_PROG_PAGE = 2, /* program page */
736 SF_WR_DISABLE = 4, /* disable writes */
737 SF_RD_STATUS = 5, /* read status register */
738 SF_WR_ENABLE = 6, /* enable writes */
739 SF_RD_DATA_FAST = 0xb, /* read flash */
740 SF_ERASE_SECTOR = 0xd8, /* erase sector */
742 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
743 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
744 FW_MIN_SIZE = 8 /* at least version and csum */
748 * sf1_read - read data from the serial flash
749 * @adapter: the adapter
750 * @byte_cnt: number of bytes to read
751 * @cont: whether another operation will be chained
752 * @valp: where to store the read data
754 * Reads up to 4 bytes of data from the serial flash. The location of
755 * the read needs to be specified prior to calling this by issuing the
756 * appropriate commands to the serial flash.
758 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
763 if (!byte_cnt || byte_cnt > 4)
765 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
767 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
768 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
770 *valp = t3_read_reg(adapter, A_SF_DATA);
775 * sf1_write - write data to the serial flash
776 * @adapter: the adapter
777 * @byte_cnt: number of bytes to write
778 * @cont: whether another operation will be chained
779 * @val: value to write
781 * Writes up to 4 bytes of data to the serial flash. The location of
782 * the write needs to be specified prior to calling this by issuing the
783 * appropriate commands to the serial flash.
785 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
788 if (!byte_cnt || byte_cnt > 4)
790 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
792 t3_write_reg(adapter, A_SF_DATA, val);
793 t3_write_reg(adapter, A_SF_OP,
794 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
795 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
799 * flash_wait_op - wait for a flash operation to complete
800 * @adapter: the adapter
801 * @attempts: max number of polls of the status register
802 * @delay: delay between polls in ms
804 * Wait for a flash operation to complete by polling the status register.
806 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
812 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
813 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
825 * t3_read_flash - read words from serial flash
826 * @adapter: the adapter
827 * @addr: the start address for the read
828 * @nwords: how many 32-bit words to read
829 * @data: where to store the read data
830 * @byte_oriented: whether to store data as bytes or as words
832 * Read the specified number of 32-bit words from the serial flash.
833 * If @byte_oriented is set the read data is stored as a byte array
834 * (i.e., big-endian), otherwise as 32-bit words in the platform's
837 int t3_read_flash(struct adapter *adapter, unsigned int addr,
838 unsigned int nwords, u32 *data, int byte_oriented)
842 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
845 addr = swab32(addr) | SF_RD_DATA_FAST;
847 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
848 (ret = sf1_read(adapter, 1, 1, data)) != 0)
851 for (; nwords; nwords--, data++) {
852 ret = sf1_read(adapter, 4, nwords > 1, data);
856 *data = htonl(*data);
862 * t3_write_flash - write up to a page of data to the serial flash
863 * @adapter: the adapter
864 * @addr: the start address to write
865 * @n: length of data to write
866 * @data: the data to write
868 * Writes up to a page of data (256 bytes) to the serial flash starting
869 * at the given address.
871 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
872 unsigned int n, const u8 *data)
876 unsigned int i, c, left, val, offset = addr & 0xff;
878 if (addr + n > SF_SIZE || offset + n > 256)
881 val = swab32(addr) | SF_PROG_PAGE;
883 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
884 (ret = sf1_write(adapter, 4, 1, val)) != 0)
887 for (left = n; left; left -= c) {
889 for (val = 0, i = 0; i < c; ++i)
890 val = (val << 8) + *data++;
892 ret = sf1_write(adapter, c, c != left, val);
896 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
899 /* Read the page to verify the write succeeded */
900 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
904 if (memcmp(data - n, (u8 *) buf + offset, n))
910 * t3_get_tp_version - read the tp sram version
911 * @adapter: the adapter
912 * @vers: where to place the version
914 * Reads the protocol sram version from sram.
916 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
920 /* Get version loaded in SRAM */
921 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
922 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
927 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
933 * t3_check_tpsram_version - read the tp sram version
934 * @adapter: the adapter
936 * Reads the protocol sram version from flash.
938 int t3_check_tpsram_version(struct adapter *adapter)
942 unsigned int major, minor;
944 if (adapter->params.rev == T3_REV_A)
948 ret = t3_get_tp_version(adapter, &vers);
952 major = G_TP_VERSION_MAJOR(vers);
953 minor = G_TP_VERSION_MINOR(vers);
955 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
958 CH_ERR(adapter, "found wrong TP version (%u.%u), "
959 "driver compiled for version %d.%d\n", major, minor,
960 TP_VERSION_MAJOR, TP_VERSION_MINOR);
966 * t3_check_tpsram - check if provided protocol SRAM
967 * is compatible with this driver
968 * @adapter: the adapter
969 * @tp_sram: the firmware image to write
972 * Checks if an adapter's tp sram is compatible with the driver.
973 * Returns 0 if the versions are compatible, a negative error otherwise.
975 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
980 const __be32 *p = (const __be32 *)tp_sram;
982 /* Verify checksum */
983 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
985 if (csum != 0xffffffff) {
986 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
994 enum fw_version_type {
1000 * t3_get_fw_version - read the firmware version
1001 * @adapter: the adapter
1002 * @vers: where to place the version
1004 * Reads the FW version from flash.
1006 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1008 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1012 * t3_check_fw_version - check if the FW is compatible with this driver
1013 * @adapter: the adapter
1015 * Checks if an adapter's FW is compatible with the driver. Returns 0
1016 * if the versions are compatible, a negative error otherwise.
1018 int t3_check_fw_version(struct adapter *adapter)
1022 unsigned int type, major, minor;
1024 ret = t3_get_fw_version(adapter, &vers);
1028 type = G_FW_VERSION_TYPE(vers);
1029 major = G_FW_VERSION_MAJOR(vers);
1030 minor = G_FW_VERSION_MINOR(vers);
1032 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1033 minor == FW_VERSION_MINOR)
1035 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1036 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1037 "driver compiled for version %u.%u\n", major, minor,
1038 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1040 CH_WARN(adapter, "found newer FW version(%u.%u), "
1041 "driver compiled for version %u.%u\n", major, minor,
1042 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1049 * t3_flash_erase_sectors - erase a range of flash sectors
1050 * @adapter: the adapter
1051 * @start: the first sector to erase
1052 * @end: the last sector to erase
1054 * Erases the sectors in the given range.
1056 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1058 while (start <= end) {
1061 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1062 (ret = sf1_write(adapter, 4, 0,
1063 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1064 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1072 * t3_load_fw - download firmware
1073 * @adapter: the adapter
1074 * @fw_data: the firmware image to write
1077 * Write the supplied firmware image to the card's serial flash.
1078 * The FW image has the following sections: @size - 8 bytes of code and
1079 * data, followed by 4 bytes of FW version, followed by the 32-bit
1080 * 1's complement checksum of the whole image.
1082 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1086 const __be32 *p = (const __be32 *)fw_data;
1087 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1089 if ((size & 3) || size < FW_MIN_SIZE)
1091 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1094 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1095 csum += ntohl(p[i]);
1096 if (csum != 0xffffffff) {
1097 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1102 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1106 size -= 8; /* trim off version and checksum */
1107 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1108 unsigned int chunk_size = min(size, 256U);
1110 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1115 fw_data += chunk_size;
1119 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1122 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1126 #define CIM_CTL_BASE 0x2000
1129 * t3_cim_ctl_blk_read - read a block from CIM control region
1131 * @adap: the adapter
1132 * @addr: the start address within the CIM control region
1133 * @n: number of words to read
1134 * @valp: where to store the result
1136 * Reads a block of 4-byte words from the CIM control region.
1138 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1139 unsigned int n, unsigned int *valp)
1143 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1146 for ( ; !ret && n--; addr += 4) {
1147 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1148 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1151 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1156 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1157 u32 *rx_hash_high, u32 *rx_hash_low)
1159 /* stop Rx unicast traffic */
1160 t3_mac_disable_exact_filters(mac);
1162 /* stop broadcast, multicast, promiscuous mode traffic */
1163 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1164 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1165 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1168 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1169 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1171 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1172 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1174 /* Leave time to drain max RX fifo */
1178 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1179 u32 rx_hash_high, u32 rx_hash_low)
1181 t3_mac_enable_exact_filters(mac);
1182 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1183 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1185 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1186 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1190 * t3_link_changed - handle interface link changes
1191 * @adapter: the adapter
1192 * @port_id: the port index that changed link state
1194 * Called when a port's link settings change to propagate the new values
1195 * to the associated PHY and MAC. After performing the common tasks it
1196 * invokes an OS-specific handler.
1198 void t3_link_changed(struct adapter *adapter, int port_id)
1200 int link_ok, speed, duplex, fc;
1201 struct port_info *pi = adap2pinfo(adapter, port_id);
1202 struct cphy *phy = &pi->phy;
1203 struct cmac *mac = &pi->mac;
1204 struct link_config *lc = &pi->link_config;
1206 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1208 if (!lc->link_ok && link_ok) {
1209 u32 rx_cfg, rx_hash_high, rx_hash_low;
1212 t3_xgm_intr_enable(adapter, port_id);
1213 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1214 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1215 t3_mac_enable(mac, MAC_DIRECTION_RX);
1217 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1218 if (status & F_LINKFAULTCHANGE) {
1219 mac->stats.link_faults++;
1222 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1225 if (lc->requested_fc & PAUSE_AUTONEG)
1226 fc &= lc->requested_fc;
1228 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1230 if (link_ok == lc->link_ok && speed == lc->speed &&
1231 duplex == lc->duplex && fc == lc->fc)
1232 return; /* nothing changed */
1234 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1235 uses_xaui(adapter)) {
1238 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1239 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1241 lc->link_ok = link_ok;
1242 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1243 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1245 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1246 /* Set MAC speed, duplex, and flow control to match PHY. */
1247 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1251 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1254 void t3_link_fault(struct adapter *adapter, int port_id)
1256 struct port_info *pi = adap2pinfo(adapter, port_id);
1257 struct cmac *mac = &pi->mac;
1258 struct cphy *phy = &pi->phy;
1259 struct link_config *lc = &pi->link_config;
1260 int link_ok, speed, duplex, fc, link_fault;
1261 u32 rx_cfg, rx_hash_high, rx_hash_low;
1263 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1265 if (adapter->params.rev > 0 && uses_xaui(adapter))
1266 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1268 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1269 t3_mac_enable(mac, MAC_DIRECTION_RX);
1271 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1273 link_fault = t3_read_reg(adapter,
1274 A_XGM_INT_STATUS + mac->offset);
1275 link_fault &= F_LINKFAULTCHANGE;
1277 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1281 lc->speed = SPEED_INVALID;
1282 lc->duplex = DUPLEX_INVALID;
1284 t3_os_link_fault(adapter, port_id, 0);
1286 /* Account link faults only when the phy reports a link up */
1288 mac->stats.link_faults++;
1291 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1292 F_TXACTENABLE | F_RXEN);
1295 lc->link_ok = (unsigned char)link_ok;
1296 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1297 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1298 t3_os_link_fault(adapter, port_id, link_ok);
1303 * t3_link_start - apply link configuration to MAC/PHY
1304 * @phy: the PHY to setup
1305 * @mac: the MAC to setup
1306 * @lc: the requested link configuration
1308 * Set up a port's MAC and PHY according to a desired link configuration.
1309 * - If the PHY can auto-negotiate first decide what to advertise, then
1310 * enable/disable auto-negotiation as desired, and reset.
1311 * - If the PHY does not auto-negotiate just reset it.
1312 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1313 * otherwise do it later based on the outcome of auto-negotiation.
1315 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1317 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1320 if (lc->supported & SUPPORTED_Autoneg) {
1321 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1323 lc->advertising |= ADVERTISED_Asym_Pause;
1325 lc->advertising |= ADVERTISED_Pause;
1327 phy->ops->advertise(phy, lc->advertising);
1329 if (lc->autoneg == AUTONEG_DISABLE) {
1330 lc->speed = lc->requested_speed;
1331 lc->duplex = lc->requested_duplex;
1332 lc->fc = (unsigned char)fc;
1333 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1335 /* Also disables autoneg */
1336 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1338 phy->ops->autoneg_enable(phy);
1340 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1341 lc->fc = (unsigned char)fc;
1342 phy->ops->reset(phy, 0);
1348 * t3_set_vlan_accel - control HW VLAN extraction
1349 * @adapter: the adapter
1350 * @ports: bitmap of adapter ports to operate on
1351 * @on: enable (1) or disable (0) HW VLAN extraction
1353 * Enables or disables HW extraction of VLAN tags for the given port.
1355 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1357 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1358 ports << S_VLANEXTRACTIONENABLE,
1359 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1363 unsigned int mask; /* bits to check in interrupt status */
1364 const char *msg; /* message to print or NULL */
1365 short stat_idx; /* stat counter to increment or -1 */
1366 unsigned short fatal; /* whether the condition reported is fatal */
1370 * t3_handle_intr_status - table driven interrupt handler
1371 * @adapter: the adapter that generated the interrupt
1372 * @reg: the interrupt status register to process
1373 * @mask: a mask to apply to the interrupt status
1374 * @acts: table of interrupt actions
1375 * @stats: statistics counters tracking interrupt occurences
1377 * A table driven interrupt handler that applies a set of masks to an
1378 * interrupt status word and performs the corresponding actions if the
1379 * interrupts described by the mask have occured. The actions include
1380 * optionally printing a warning or alert message, and optionally
1381 * incrementing a stat counter. The table is terminated by an entry
1382 * specifying mask 0. Returns the number of fatal interrupt conditions.
1384 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1386 const struct intr_info *acts,
1387 unsigned long *stats)
1390 unsigned int status = t3_read_reg(adapter, reg) & mask;
1392 for (; acts->mask; ++acts) {
1393 if (!(status & acts->mask))
1397 CH_ALERT(adapter, "%s (0x%x)\n",
1398 acts->msg, status & acts->mask);
1399 } else if (acts->msg)
1400 CH_WARN(adapter, "%s (0x%x)\n",
1401 acts->msg, status & acts->mask);
1402 if (acts->stat_idx >= 0)
1403 stats[acts->stat_idx]++;
1405 if (status) /* clear processed interrupts */
1406 t3_write_reg(adapter, reg, status);
1410 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1411 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1412 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1413 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1414 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1415 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1417 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1418 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1420 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1421 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1422 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1424 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1425 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1426 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1427 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1428 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1429 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1430 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1431 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1432 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1433 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1434 F_TXPARERR | V_BISTERR(M_BISTERR))
1435 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1436 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1437 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1438 #define ULPTX_INTR_MASK 0xfc
1439 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1440 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1441 F_ZERO_SWITCH_ERROR)
1442 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1443 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1444 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1445 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1446 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1447 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1448 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1449 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1450 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1451 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1452 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1453 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1454 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1455 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1456 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1457 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1458 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1459 V_MCAPARERRENB(M_MCAPARERRENB))
1460 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1461 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1462 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1463 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1464 F_MPS0 | F_CPL_SWITCH)
1466 * Interrupt handler for the PCIX1 module.
1468 static void pci_intr_handler(struct adapter *adapter)
1470 static const struct intr_info pcix1_intr_info[] = {
1471 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1472 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1473 {F_RCVTARABT, "PCI received target abort", -1, 1},
1474 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1475 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1476 {F_DETPARERR, "PCI detected parity error", -1, 1},
1477 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1478 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1479 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1481 {F_DETCORECCERR, "PCI correctable ECC error",
1482 STAT_PCI_CORR_ECC, 0},
1483 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1484 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1485 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1487 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1489 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1491 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1496 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1497 pcix1_intr_info, adapter->irq_stats))
1498 t3_fatal_err(adapter);
1502 * Interrupt handler for the PCIE module.
1504 static void pcie_intr_handler(struct adapter *adapter)
1506 static const struct intr_info pcie_intr_info[] = {
1507 {F_PEXERR, "PCI PEX error", -1, 1},
1509 "PCI unexpected split completion DMA read error", -1, 1},
1511 "PCI unexpected split completion DMA command error", -1, 1},
1512 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1513 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1514 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1515 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1516 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1517 "PCI MSI-X table/PBA parity error", -1, 1},
1518 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1519 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1520 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1521 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1522 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1526 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1527 CH_ALERT(adapter, "PEX error code 0x%x\n",
1528 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1530 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1531 pcie_intr_info, adapter->irq_stats))
1532 t3_fatal_err(adapter);
1536 * TP interrupt handler.
1538 static void tp_intr_handler(struct adapter *adapter)
1540 static const struct intr_info tp_intr_info[] = {
1541 {0xffffff, "TP parity error", -1, 1},
1542 {0x1000000, "TP out of Rx pages", -1, 1},
1543 {0x2000000, "TP out of Tx pages", -1, 1},
1547 static struct intr_info tp_intr_info_t3c[] = {
1548 {0x1fffffff, "TP parity error", -1, 1},
1549 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1550 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1554 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1555 adapter->params.rev < T3_REV_C ?
1556 tp_intr_info : tp_intr_info_t3c, NULL))
1557 t3_fatal_err(adapter);
1561 * CIM interrupt handler.
1563 static void cim_intr_handler(struct adapter *adapter)
1565 static const struct intr_info cim_intr_info[] = {
1566 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1567 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1568 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1569 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1570 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1571 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1572 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1573 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1574 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1575 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1576 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1577 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1578 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1579 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1580 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1581 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1582 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1583 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1584 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1585 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1586 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1587 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1588 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1589 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1593 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1594 cim_intr_info, NULL))
1595 t3_fatal_err(adapter);
1599 * ULP RX interrupt handler.
1601 static void ulprx_intr_handler(struct adapter *adapter)
1603 static const struct intr_info ulprx_intr_info[] = {
1604 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1605 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1606 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1607 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1608 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1609 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1610 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1611 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1615 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1616 ulprx_intr_info, NULL))
1617 t3_fatal_err(adapter);
1621 * ULP TX interrupt handler.
1623 static void ulptx_intr_handler(struct adapter *adapter)
1625 static const struct intr_info ulptx_intr_info[] = {
1626 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1627 STAT_ULP_CH0_PBL_OOB, 0},
1628 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1629 STAT_ULP_CH1_PBL_OOB, 0},
1630 {0xfc, "ULP TX parity error", -1, 1},
1634 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1635 ulptx_intr_info, adapter->irq_stats))
1636 t3_fatal_err(adapter);
1639 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1640 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1641 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1642 F_ICSPI1_TX_FRAMING_ERROR)
1643 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1644 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1645 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1646 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1649 * PM TX interrupt handler.
1651 static void pmtx_intr_handler(struct adapter *adapter)
1653 static const struct intr_info pmtx_intr_info[] = {
1654 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1655 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1656 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1657 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1658 "PMTX ispi parity error", -1, 1},
1659 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1660 "PMTX ospi parity error", -1, 1},
1664 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1665 pmtx_intr_info, NULL))
1666 t3_fatal_err(adapter);
1669 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1670 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1671 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1672 F_IESPI1_TX_FRAMING_ERROR)
1673 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1674 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1675 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1676 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1679 * PM RX interrupt handler.
1681 static void pmrx_intr_handler(struct adapter *adapter)
1683 static const struct intr_info pmrx_intr_info[] = {
1684 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1685 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1686 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1687 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1688 "PMRX ispi parity error", -1, 1},
1689 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1690 "PMRX ospi parity error", -1, 1},
1694 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1695 pmrx_intr_info, NULL))
1696 t3_fatal_err(adapter);
1700 * CPL switch interrupt handler.
1702 static void cplsw_intr_handler(struct adapter *adapter)
1704 static const struct intr_info cplsw_intr_info[] = {
1705 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1706 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1707 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1708 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1709 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1710 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1714 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1715 cplsw_intr_info, NULL))
1716 t3_fatal_err(adapter);
1720 * MPS interrupt handler.
1722 static void mps_intr_handler(struct adapter *adapter)
1724 static const struct intr_info mps_intr_info[] = {
1725 {0x1ff, "MPS parity error", -1, 1},
1729 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1730 mps_intr_info, NULL))
1731 t3_fatal_err(adapter);
1734 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1737 * MC7 interrupt handler.
1739 static void mc7_intr_handler(struct mc7 *mc7)
1741 struct adapter *adapter = mc7->adapter;
1742 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1745 mc7->stats.corr_err++;
1746 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1747 "data 0x%x 0x%x 0x%x\n", mc7->name,
1748 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1749 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1750 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1751 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1755 mc7->stats.uncorr_err++;
1756 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1757 "data 0x%x 0x%x 0x%x\n", mc7->name,
1758 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1759 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1760 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1761 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1765 mc7->stats.parity_err++;
1766 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1767 mc7->name, G_PE(cause));
1773 if (adapter->params.rev > 0)
1774 addr = t3_read_reg(adapter,
1775 mc7->offset + A_MC7_ERR_ADDR);
1776 mc7->stats.addr_err++;
1777 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1781 if (cause & MC7_INTR_FATAL)
1782 t3_fatal_err(adapter);
1784 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1787 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1788 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1790 * XGMAC interrupt handler.
1792 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1794 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1796 * We mask out interrupt causes for which we're not taking interrupts.
1797 * This allows us to use polling logic to monitor some of the other
1798 * conditions when taking interrupts would impose too much load on the
1801 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1804 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1805 mac->stats.tx_fifo_parity_err++;
1806 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1808 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1809 mac->stats.rx_fifo_parity_err++;
1810 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1812 if (cause & F_TXFIFO_UNDERRUN)
1813 mac->stats.tx_fifo_urun++;
1814 if (cause & F_RXFIFO_OVERFLOW)
1815 mac->stats.rx_fifo_ovfl++;
1816 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1817 mac->stats.serdes_signal_loss++;
1818 if (cause & F_XAUIPCSCTCERR)
1819 mac->stats.xaui_pcs_ctc_err++;
1820 if (cause & F_XAUIPCSALIGNCHANGE)
1821 mac->stats.xaui_pcs_align_change++;
1822 if (cause & F_XGM_INT) {
1823 t3_set_reg_field(adap,
1824 A_XGM_INT_ENABLE + mac->offset,
1826 mac->stats.link_faults++;
1828 t3_os_link_fault_handler(adap, idx);
1831 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1833 if (cause & XGM_INTR_FATAL)
1840 * Interrupt handler for PHY events.
1842 int t3_phy_intr_handler(struct adapter *adapter)
1844 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1846 for_each_port(adapter, i) {
1847 struct port_info *p = adap2pinfo(adapter, i);
1849 if (!(p->phy.caps & SUPPORTED_IRQ))
1852 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1853 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1855 if (phy_cause & cphy_cause_link_change)
1856 t3_link_changed(adapter, i);
1857 if (phy_cause & cphy_cause_fifo_error)
1858 p->phy.fifo_errors++;
1859 if (phy_cause & cphy_cause_module_change)
1860 t3_os_phymod_changed(adapter, i);
1864 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1869 * T3 slow path (non-data) interrupt handler.
1871 int t3_slow_intr_handler(struct adapter *adapter)
1873 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1875 cause &= adapter->slow_intr_mask;
1878 if (cause & F_PCIM0) {
1879 if (is_pcie(adapter))
1880 pcie_intr_handler(adapter);
1882 pci_intr_handler(adapter);
1885 t3_sge_err_intr_handler(adapter);
1886 if (cause & F_MC7_PMRX)
1887 mc7_intr_handler(&adapter->pmrx);
1888 if (cause & F_MC7_PMTX)
1889 mc7_intr_handler(&adapter->pmtx);
1890 if (cause & F_MC7_CM)
1891 mc7_intr_handler(&adapter->cm);
1893 cim_intr_handler(adapter);
1895 tp_intr_handler(adapter);
1896 if (cause & F_ULP2_RX)
1897 ulprx_intr_handler(adapter);
1898 if (cause & F_ULP2_TX)
1899 ulptx_intr_handler(adapter);
1900 if (cause & F_PM1_RX)
1901 pmrx_intr_handler(adapter);
1902 if (cause & F_PM1_TX)
1903 pmtx_intr_handler(adapter);
1904 if (cause & F_CPL_SWITCH)
1905 cplsw_intr_handler(adapter);
1907 mps_intr_handler(adapter);
1909 t3_mc5_intr_handler(&adapter->mc5);
1910 if (cause & F_XGMAC0_0)
1911 mac_intr_handler(adapter, 0);
1912 if (cause & F_XGMAC0_1)
1913 mac_intr_handler(adapter, 1);
1914 if (cause & F_T3DBG)
1915 t3_os_ext_intr_handler(adapter);
1917 /* Clear the interrupts just processed. */
1918 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1919 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1923 static unsigned int calc_gpio_intr(struct adapter *adap)
1925 unsigned int i, gpi_intr = 0;
1927 for_each_port(adap, i)
1928 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1929 adapter_info(adap)->gpio_intr[i])
1930 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1935 * t3_intr_enable - enable interrupts
1936 * @adapter: the adapter whose interrupts should be enabled
1938 * Enable interrupts by setting the interrupt enable registers of the
1939 * various HW modules and then enabling the top-level interrupt
1942 void t3_intr_enable(struct adapter *adapter)
1944 static const struct addr_val_pair intr_en_avp[] = {
1945 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1946 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1947 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1949 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1951 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1952 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1953 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1954 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1955 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1956 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1959 adapter->slow_intr_mask = PL_INTR_MASK;
1961 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1962 t3_write_reg(adapter, A_TP_INT_ENABLE,
1963 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1965 if (adapter->params.rev > 0) {
1966 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1967 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1968 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1969 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1970 F_PBL_BOUND_ERR_CH1);
1972 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1973 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1976 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1978 if (is_pcie(adapter))
1979 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1981 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1982 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1983 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1987 * t3_intr_disable - disable a card's interrupts
1988 * @adapter: the adapter whose interrupts should be disabled
1990 * Disable interrupts. We only disable the top-level interrupt
1991 * concentrator and the SGE data interrupts.
1993 void t3_intr_disable(struct adapter *adapter)
1995 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1996 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1997 adapter->slow_intr_mask = 0;
2001 * t3_intr_clear - clear all interrupts
2002 * @adapter: the adapter whose interrupts should be cleared
2004 * Clears all interrupts.
2006 void t3_intr_clear(struct adapter *adapter)
2008 static const unsigned int cause_reg_addr[] = {
2010 A_SG_RSPQ_FL_STATUS,
2013 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2014 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2015 A_CIM_HOST_INT_CAUSE,
2028 /* Clear PHY and MAC interrupts for each port. */
2029 for_each_port(adapter, i)
2030 t3_port_intr_clear(adapter, i);
2032 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2033 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2035 if (is_pcie(adapter))
2036 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2037 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2038 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2041 void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2043 struct port_info *pi = adap2pinfo(adapter, idx);
2045 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2046 XGM_EXTRA_INTR_MASK);
2049 void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2051 struct port_info *pi = adap2pinfo(adapter, idx);
2053 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2058 * t3_port_intr_enable - enable port-specific interrupts
2059 * @adapter: associated adapter
2060 * @idx: index of port whose interrupts should be enabled
2062 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2065 void t3_port_intr_enable(struct adapter *adapter, int idx)
2067 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2069 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2070 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2071 phy->ops->intr_enable(phy);
2075 * t3_port_intr_disable - disable port-specific interrupts
2076 * @adapter: associated adapter
2077 * @idx: index of port whose interrupts should be disabled
2079 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2082 void t3_port_intr_disable(struct adapter *adapter, int idx)
2084 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2086 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2087 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2088 phy->ops->intr_disable(phy);
2092 * t3_port_intr_clear - clear port-specific interrupts
2093 * @adapter: associated adapter
2094 * @idx: index of port whose interrupts to clear
2096 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2099 void t3_port_intr_clear(struct adapter *adapter, int idx)
2101 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2103 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2104 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2105 phy->ops->intr_clear(phy);
2108 #define SG_CONTEXT_CMD_ATTEMPTS 100
2111 * t3_sge_write_context - write an SGE context
2112 * @adapter: the adapter
2113 * @id: the context id
2114 * @type: the context type
2116 * Program an SGE context with the values already loaded in the
2117 * CONTEXT_DATA? registers.
2119 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2122 if (type == F_RESPONSEQ) {
2124 * Can't write the Response Queue Context bits for
2125 * Interrupt Armed or the Reserve bits after the chip
2126 * has been initialized out of reset. Writing to these
2127 * bits can confuse the hardware.
2129 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2130 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2131 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2132 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2134 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2135 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2136 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2137 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2139 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2140 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2141 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2142 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2146 * clear_sge_ctxt - completely clear an SGE context
2147 * @adapter: the adapter
2148 * @id: the context id
2149 * @type: the context type
2151 * Completely clear an SGE context. Used predominantly at post-reset
2152 * initialization. Note in particular that we don't skip writing to any
2153 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2156 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2159 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2160 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2161 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2162 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2163 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2164 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2165 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2166 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2167 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2168 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2169 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2170 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2174 * t3_sge_init_ecntxt - initialize an SGE egress context
2175 * @adapter: the adapter to configure
2176 * @id: the context id
2177 * @gts_enable: whether to enable GTS for the context
2178 * @type: the egress context type
2179 * @respq: associated response queue
2180 * @base_addr: base address of queue
2181 * @size: number of queue entries
2183 * @gen: initial generation value for the context
2184 * @cidx: consumer pointer
2186 * Initialize an SGE egress context and make it ready for use. If the
2187 * platform allows concurrent context operations, the caller is
2188 * responsible for appropriate locking.
2190 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2191 enum sge_context_type type, int respq, u64 base_addr,
2192 unsigned int size, unsigned int token, int gen,
2195 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2197 if (base_addr & 0xfff) /* must be 4K aligned */
2199 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2203 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2204 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2205 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2206 V_EC_BASE_LO(base_addr & 0xffff));
2208 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2210 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2211 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2212 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2214 return t3_sge_write_context(adapter, id, F_EGRESS);
2218 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2219 * @adapter: the adapter to configure
2220 * @id: the context id
2221 * @gts_enable: whether to enable GTS for the context
2222 * @base_addr: base address of queue
2223 * @size: number of queue entries
2224 * @bsize: size of each buffer for this queue
2225 * @cong_thres: threshold to signal congestion to upstream producers
2226 * @gen: initial generation value for the context
2227 * @cidx: consumer pointer
2229 * Initialize an SGE free list context and make it ready for use. The
2230 * caller is responsible for ensuring only one context operation occurs
2233 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2234 int gts_enable, u64 base_addr, unsigned int size,
2235 unsigned int bsize, unsigned int cong_thres, int gen,
2238 if (base_addr & 0xfff) /* must be 4K aligned */
2240 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2244 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2246 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2247 V_FL_BASE_HI((u32) base_addr) |
2248 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2249 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2250 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2251 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2252 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2253 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2254 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2255 return t3_sge_write_context(adapter, id, F_FREELIST);
2259 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2260 * @adapter: the adapter to configure
2261 * @id: the context id
2262 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2263 * @base_addr: base address of queue
2264 * @size: number of queue entries
2265 * @fl_thres: threshold for selecting the normal or jumbo free list
2266 * @gen: initial generation value for the context
2267 * @cidx: consumer pointer
2269 * Initialize an SGE response queue context and make it ready for use.
2270 * The caller is responsible for ensuring only one context operation
2273 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2274 int irq_vec_idx, u64 base_addr, unsigned int size,
2275 unsigned int fl_thres, int gen, unsigned int cidx)
2277 unsigned int intr = 0;
2279 if (base_addr & 0xfff) /* must be 4K aligned */
2281 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2285 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2287 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2289 if (irq_vec_idx >= 0)
2290 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2291 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2292 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2293 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2294 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2298 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2299 * @adapter: the adapter to configure
2300 * @id: the context id
2301 * @base_addr: base address of queue
2302 * @size: number of queue entries
2303 * @rspq: response queue for async notifications
2304 * @ovfl_mode: CQ overflow mode
2305 * @credits: completion queue credits
2306 * @credit_thres: the credit threshold
2308 * Initialize an SGE completion queue context and make it ready for use.
2309 * The caller is responsible for ensuring only one context operation
2312 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2313 unsigned int size, int rspq, int ovfl_mode,
2314 unsigned int credits, unsigned int credit_thres)
2316 if (base_addr & 0xfff) /* must be 4K aligned */
2318 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2322 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2323 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2325 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2326 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2327 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2328 V_CQ_ERR(ovfl_mode));
2329 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2330 V_CQ_CREDIT_THRES(credit_thres));
2331 return t3_sge_write_context(adapter, id, F_CQ);
2335 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2336 * @adapter: the adapter
2337 * @id: the egress context id
2338 * @enable: enable (1) or disable (0) the context
2340 * Enable or disable an SGE egress context. The caller is responsible for
2341 * ensuring only one context operation occurs at a time.
2343 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2345 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2348 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2349 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2350 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2351 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2352 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2353 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2354 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2355 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2356 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2360 * t3_sge_disable_fl - disable an SGE free-buffer list
2361 * @adapter: the adapter
2362 * @id: the free list context id
2364 * Disable an SGE free-buffer list. The caller is responsible for
2365 * ensuring only one context operation occurs at a time.
2367 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2369 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2372 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2373 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2374 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2375 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2376 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2377 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2378 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2379 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2380 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2384 * t3_sge_disable_rspcntxt - disable an SGE response queue
2385 * @adapter: the adapter
2386 * @id: the response queue context id
2388 * Disable an SGE response queue. The caller is responsible for
2389 * ensuring only one context operation occurs at a time.
2391 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2393 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2396 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2397 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2398 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2399 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2400 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2401 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2402 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2403 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2404 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2408 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2409 * @adapter: the adapter
2410 * @id: the completion queue context id
2412 * Disable an SGE completion queue. The caller is responsible for
2413 * ensuring only one context operation occurs at a time.
2415 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2417 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2420 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2421 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2422 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2423 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2424 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2425 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2426 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2427 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2428 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2432 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2433 * @adapter: the adapter
2434 * @id: the context id
2435 * @op: the operation to perform
2437 * Perform the selected operation on an SGE completion queue context.
2438 * The caller is responsible for ensuring only one context operation
2441 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2442 unsigned int credits)
2446 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2449 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2450 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2451 V_CONTEXT(id) | F_CQ);
2452 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2453 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2456 if (op >= 2 && op < 7) {
2457 if (adapter->params.rev > 0)
2458 return G_CQ_INDEX(val);
2460 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2461 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2462 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2463 F_CONTEXT_CMD_BUSY, 0,
2464 SG_CONTEXT_CMD_ATTEMPTS, 1))
2466 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2472 * t3_sge_read_context - read an SGE context
2473 * @type: the context type
2474 * @adapter: the adapter
2475 * @id: the context id
2476 * @data: holds the retrieved context
2478 * Read an SGE egress context. The caller is responsible for ensuring
2479 * only one context operation occurs at a time.
2481 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2482 unsigned int id, u32 data[4])
2484 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2487 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2488 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2489 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2490 SG_CONTEXT_CMD_ATTEMPTS, 1))
2492 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2493 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2494 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2495 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2500 * t3_sge_read_ecntxt - read an SGE egress context
2501 * @adapter: the adapter
2502 * @id: the context id
2503 * @data: holds the retrieved context
2505 * Read an SGE egress context. The caller is responsible for ensuring
2506 * only one context operation occurs at a time.
2508 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2512 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2516 * t3_sge_read_cq - read an SGE CQ context
2517 * @adapter: the adapter
2518 * @id: the context id
2519 * @data: holds the retrieved context
2521 * Read an SGE CQ context. The caller is responsible for ensuring
2522 * only one context operation occurs at a time.
2524 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2528 return t3_sge_read_context(F_CQ, adapter, id, data);
2532 * t3_sge_read_fl - read an SGE free-list context
2533 * @adapter: the adapter
2534 * @id: the context id
2535 * @data: holds the retrieved context
2537 * Read an SGE free-list context. The caller is responsible for ensuring
2538 * only one context operation occurs at a time.
2540 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2542 if (id >= SGE_QSETS * 2)
2544 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2548 * t3_sge_read_rspq - read an SGE response queue context
2549 * @adapter: the adapter
2550 * @id: the context id
2551 * @data: holds the retrieved context
2553 * Read an SGE response queue context. The caller is responsible for
2554 * ensuring only one context operation occurs at a time.
2556 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2558 if (id >= SGE_QSETS)
2560 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2564 * t3_config_rss - configure Rx packet steering
2565 * @adapter: the adapter
2566 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2567 * @cpus: values for the CPU lookup table (0xff terminated)
2568 * @rspq: values for the response queue lookup table (0xffff terminated)
2570 * Programs the receive packet steering logic. @cpus and @rspq provide
2571 * the values for the CPU and response queue lookup tables. If they
2572 * provide fewer values than the size of the tables the supplied values
2573 * are used repeatedly until the tables are fully populated.
2575 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2576 const u8 * cpus, const u16 *rspq)
2578 int i, j, cpu_idx = 0, q_idx = 0;
2581 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2584 for (j = 0; j < 2; ++j) {
2585 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2586 if (cpus[cpu_idx] == 0xff)
2589 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2593 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2594 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2595 (i << 16) | rspq[q_idx++]);
2596 if (rspq[q_idx] == 0xffff)
2600 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2604 * t3_read_rss - read the contents of the RSS tables
2605 * @adapter: the adapter
2606 * @lkup: holds the contents of the RSS lookup table
2607 * @map: holds the contents of the RSS map table
2609 * Reads the contents of the receive packet steering tables.
2611 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2617 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2618 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2620 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2621 if (!(val & 0x80000000))
2624 *lkup++ = (val >> 8);
2628 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2629 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2631 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2632 if (!(val & 0x80000000))
2640 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2641 * @adap: the adapter
2642 * @enable: 1 to select offload mode, 0 for regular NIC
2644 * Switches TP to NIC/offload mode.
2646 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2648 if (is_offload(adap) || !enable)
2649 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2650 V_NICMODE(!enable));
2654 * pm_num_pages - calculate the number of pages of the payload memory
2655 * @mem_size: the size of the payload memory
2656 * @pg_size: the size of each payload memory page
2658 * Calculate the number of pages, each of the given size, that fit in a
2659 * memory of the specified size, respecting the HW requirement that the
2660 * number of pages must be a multiple of 24.
2662 static inline unsigned int pm_num_pages(unsigned int mem_size,
2663 unsigned int pg_size)
2665 unsigned int n = mem_size / pg_size;
2670 #define mem_region(adap, start, size, reg) \
2671 t3_write_reg((adap), A_ ## reg, (start)); \
2675 * partition_mem - partition memory and configure TP memory settings
2676 * @adap: the adapter
2677 * @p: the TP parameters
2679 * Partitions context and payload memory and configures TP's memory
2682 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2684 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2685 unsigned int timers = 0, timers_shift = 22;
2687 if (adap->params.rev > 0) {
2688 if (tids <= 16 * 1024) {
2691 } else if (tids <= 64 * 1024) {
2694 } else if (tids <= 256 * 1024) {
2700 t3_write_reg(adap, A_TP_PMM_SIZE,
2701 p->chan_rx_size | (p->chan_tx_size >> 16));
2703 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2704 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2705 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2706 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2707 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2709 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2710 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2711 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2713 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2714 /* Add a bit of headroom and make multiple of 24 */
2716 pstructs -= pstructs % 24;
2717 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2719 m = tids * TCB_SIZE;
2720 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2721 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2722 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2723 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2724 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2725 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2726 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2727 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2729 m = (m + 4095) & ~0xfff;
2730 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2731 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2733 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2734 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2735 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2737 adap->params.mc5.nservers += m - tids;
2740 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2743 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2744 t3_write_reg(adap, A_TP_PIO_DATA, val);
2747 static void tp_config(struct adapter *adap, const struct tp_params *p)
2749 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2750 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2751 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2752 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2753 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2754 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2755 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2756 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2757 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2758 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2759 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2760 F_IPV6ENABLE | F_NICMODE);
2761 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2762 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2763 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2764 adap->params.rev > 0 ? F_ENABLEESND :
2767 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2769 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2770 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2771 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2772 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2773 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2774 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2775 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2777 if (adap->params.rev > 0) {
2778 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2779 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2781 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2782 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2784 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2786 if (adap->params.rev == T3_REV_C)
2787 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2788 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2789 V_TABLELATENCYDELTA(4));
2791 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2792 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2793 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2794 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2797 /* Desired TP timer resolution in usec */
2798 #define TP_TMR_RES 50
2800 /* TCP timer values in ms */
2801 #define TP_DACK_TIMER 50
2802 #define TP_RTO_MIN 250
2805 * tp_set_timers - set TP timing parameters
2806 * @adap: the adapter to set
2807 * @core_clk: the core clock frequency in Hz
2809 * Set TP's timing parameters, such as the various timer resolutions and
2810 * the TCP timer values.
2812 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2814 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2815 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2816 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2817 unsigned int tps = core_clk >> tre;
2819 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2820 V_DELAYEDACKRESOLUTION(dack_re) |
2821 V_TIMESTAMPRESOLUTION(tstamp_re));
2822 t3_write_reg(adap, A_TP_DACK_TIMER,
2823 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2824 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2825 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2826 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2827 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2828 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2829 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2830 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2833 #define SECONDS * tps
2835 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2836 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2837 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2838 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2839 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2840 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2841 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2842 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2843 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2849 * t3_tp_set_coalescing_size - set receive coalescing size
2850 * @adap: the adapter
2851 * @size: the receive coalescing size
2852 * @psh: whether a set PSH bit should deliver coalesced data
2854 * Set the receive coalescing size and PSH bit handling.
2856 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2860 if (size > MAX_RX_COALESCING_LEN)
2863 val = t3_read_reg(adap, A_TP_PARA_REG3);
2864 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2867 val |= F_RXCOALESCEENABLE;
2869 val |= F_RXCOALESCEPSHEN;
2870 size = min(MAX_RX_COALESCING_LEN, size);
2871 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2872 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2874 t3_write_reg(adap, A_TP_PARA_REG3, val);
2879 * t3_tp_set_max_rxsize - set the max receive size
2880 * @adap: the adapter
2881 * @size: the max receive size
2883 * Set TP's max receive size. This is the limit that applies when
2884 * receive coalescing is disabled.
2886 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2888 t3_write_reg(adap, A_TP_PARA_REG7,
2889 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2892 static void init_mtus(unsigned short mtus[])
2895 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2896 * it can accomodate max size TCP/IP headers when SACK and timestamps
2897 * are enabled and still have at least 8 bytes of payload.
2918 * Initial congestion control parameters.
2920 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2922 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2947 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2950 b[13] = b[14] = b[15] = b[16] = 3;
2951 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2952 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2957 /* The minimum additive increment value for the congestion control table */
2958 #define CC_MIN_INCR 2U
2961 * t3_load_mtus - write the MTU and congestion control HW tables
2962 * @adap: the adapter
2963 * @mtus: the unrestricted values for the MTU table
2964 * @alphs: the values for the congestion control alpha parameter
2965 * @beta: the values for the congestion control beta parameter
2966 * @mtu_cap: the maximum permitted effective MTU
2968 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2969 * Update the high-speed congestion control table with the supplied alpha,
2972 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2973 unsigned short alpha[NCCTRL_WIN],
2974 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2976 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2977 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2978 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2979 28672, 40960, 57344, 81920, 114688, 163840, 229376
2984 for (i = 0; i < NMTUS; ++i) {
2985 unsigned int mtu = min(mtus[i], mtu_cap);
2986 unsigned int log2 = fls(mtu);
2988 if (!(mtu & ((1 << log2) >> 2))) /* round */
2990 t3_write_reg(adap, A_TP_MTU_TABLE,
2991 (i << 24) | (log2 << 16) | mtu);
2993 for (w = 0; w < NCCTRL_WIN; ++w) {
2996 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2999 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3000 (w << 16) | (beta[w] << 13) | inc);
3006 * t3_read_hw_mtus - returns the values in the HW MTU table
3007 * @adap: the adapter
3008 * @mtus: where to store the HW MTU values
3010 * Reads the HW MTU table.
3012 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3016 for (i = 0; i < NMTUS; ++i) {
3019 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3020 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3021 mtus[i] = val & 0x3fff;
3026 * t3_get_cong_cntl_tab - reads the congestion control table
3027 * @adap: the adapter
3028 * @incr: where to store the alpha values
3030 * Reads the additive increments programmed into the HW congestion
3033 void t3_get_cong_cntl_tab(struct adapter *adap,
3034 unsigned short incr[NMTUS][NCCTRL_WIN])
3036 unsigned int mtu, w;
3038 for (mtu = 0; mtu < NMTUS; ++mtu)
3039 for (w = 0; w < NCCTRL_WIN; ++w) {
3040 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3041 0xffff0000 | (mtu << 5) | w);
3042 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3048 * t3_tp_get_mib_stats - read TP's MIB counters
3049 * @adap: the adapter
3050 * @tps: holds the returned counter values
3052 * Returns the values of TP's MIB counters.
3054 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3056 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3057 sizeof(*tps) / sizeof(u32), 0);
3060 #define ulp_region(adap, name, start, len) \
3061 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3062 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3063 (start) + (len) - 1); \
3066 #define ulptx_region(adap, name, start, len) \
3067 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3068 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3069 (start) + (len) - 1)
3071 static void ulp_config(struct adapter *adap, const struct tp_params *p)
3073 unsigned int m = p->chan_rx_size;
3075 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3076 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3077 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3078 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3079 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3080 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3081 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3082 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3086 * t3_set_proto_sram - set the contents of the protocol sram
3087 * @adapter: the adapter
3088 * @data: the protocol image
3090 * Write the contents of the protocol SRAM.
3092 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
3095 const __be32 *buf = (const __be32 *)data;
3097 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3098 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3099 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3100 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3101 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3102 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
3104 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3105 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3108 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3113 void t3_config_trace_filter(struct adapter *adapter,
3114 const struct trace_params *tp, int filter_index,
3115 int invert, int enable)
3117 u32 addr, key[4], mask[4];
3119 key[0] = tp->sport | (tp->sip << 16);
3120 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3122 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3124 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3125 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3126 mask[2] = tp->dip_mask;
3127 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3130 key[3] |= (1 << 29);
3132 key[3] |= (1 << 28);
3134 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3135 tp_wr_indirect(adapter, addr++, key[0]);
3136 tp_wr_indirect(adapter, addr++, mask[0]);
3137 tp_wr_indirect(adapter, addr++, key[1]);
3138 tp_wr_indirect(adapter, addr++, mask[1]);
3139 tp_wr_indirect(adapter, addr++, key[2]);
3140 tp_wr_indirect(adapter, addr++, mask[2]);
3141 tp_wr_indirect(adapter, addr++, key[3]);
3142 tp_wr_indirect(adapter, addr, mask[3]);
3143 t3_read_reg(adapter, A_TP_PIO_DATA);
3147 * t3_config_sched - configure a HW traffic scheduler
3148 * @adap: the adapter
3149 * @kbps: target rate in Kbps
3150 * @sched: the scheduler index
3152 * Configure a HW scheduler for the target rate
3154 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3156 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3157 unsigned int clk = adap->params.vpd.cclk * 1000;
3158 unsigned int selected_cpt = 0, selected_bpt = 0;
3161 kbps *= 125; /* -> bytes */
3162 for (cpt = 1; cpt <= 255; cpt++) {
3164 bpt = (kbps + tps / 2) / tps;
3165 if (bpt > 0 && bpt <= 255) {
3167 delta = v >= kbps ? v - kbps : kbps - v;
3168 if (delta <= mindelta) {
3173 } else if (selected_cpt)
3179 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3180 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3181 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3183 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3185 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3186 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3190 static int tp_init(struct adapter *adap, const struct tp_params *p)
3195 t3_set_vlan_accel(adap, 3, 0);
3197 if (is_offload(adap)) {
3198 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3199 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3200 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3203 CH_ERR(adap, "TP initialization timed out\n");
3207 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3211 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3213 if (port_mask & ~((1 << adap->params.nports) - 1))
3215 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3216 port_mask << S_PORT0ACTIVE);
3221 * Perform the bits of HW initialization that are dependent on the Tx
3222 * channels being used.
3224 static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3228 if (chan_map != 3) { /* one channel */
3229 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3230 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3231 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3232 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3233 F_TPTXPORT1EN | F_PORT1ACTIVE));
3234 t3_write_reg(adap, A_PM1_TX_CFG,
3235 chan_map == 1 ? 0xffffffff : 0);
3236 } else { /* two channels */
3237 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3238 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3239 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3240 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3241 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3242 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3244 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3245 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3246 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3247 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3248 for (i = 0; i < 16; i++)
3249 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3250 (i << 16) | 0x1010);
3254 static int calibrate_xgm(struct adapter *adapter)
3256 if (uses_xaui(adapter)) {
3259 for (i = 0; i < 5; ++i) {
3260 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3261 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3263 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3264 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3265 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3266 V_XAUIIMP(G_CALIMP(v) >> 2));
3270 CH_ERR(adapter, "MAC calibration failed\n");
3273 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3274 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3275 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3276 F_XGM_IMPSETUPDATE);
3281 static void calibrate_xgm_t3b(struct adapter *adapter)
3283 if (!uses_xaui(adapter)) {
3284 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3285 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3286 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3287 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3288 F_XGM_IMPSETUPDATE);
3289 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3291 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3292 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3296 struct mc7_timing_params {
3297 unsigned char ActToPreDly;
3298 unsigned char ActToRdWrDly;
3299 unsigned char PreCyc;
3300 unsigned char RefCyc[5];
3301 unsigned char BkCyc;
3302 unsigned char WrToRdDly;
3303 unsigned char RdToWrDly;
3307 * Write a value to a register and check that the write completed. These
3308 * writes normally complete in a cycle or two, so one read should suffice.
3309 * The very first read exists to flush the posted write to the device.
3311 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3313 t3_write_reg(adapter, addr, val);
3314 t3_read_reg(adapter, addr); /* flush */
3315 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3317 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3321 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3323 static const unsigned int mc7_mode[] = {
3324 0x632, 0x642, 0x652, 0x432, 0x442
3326 static const struct mc7_timing_params mc7_timings[] = {
3327 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3328 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3329 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3330 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3331 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3335 unsigned int width, density, slow, attempts;
3336 struct adapter *adapter = mc7->adapter;
3337 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3342 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3343 slow = val & F_SLOW;
3344 width = G_WIDTH(val);
3345 density = G_DEN(val);
3347 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3348 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3352 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3353 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3355 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3356 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3357 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3363 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3364 V_ACTTOPREDLY(p->ActToPreDly) |
3365 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3366 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3367 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3369 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3370 val | F_CLKEN | F_TERM150);
3371 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3374 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3379 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3380 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3381 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3382 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3386 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3387 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3391 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3392 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3393 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3394 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3395 mc7_mode[mem_type]) ||
3396 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3397 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3400 /* clock value is in KHz */
3401 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3402 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3404 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3405 F_PERREFEN | V_PREREFDIV(mc7_clock));
3406 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3408 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3409 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3410 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3411 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3412 (mc7->size << width) - 1);
3413 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3414 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3419 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3420 } while ((val & F_BUSY) && --attempts);
3422 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3426 /* Enable normal memory accesses. */
3427 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3434 static void config_pcie(struct adapter *adap)
3436 static const u16 ack_lat[4][6] = {
3437 {237, 416, 559, 1071, 2095, 4143},
3438 {128, 217, 289, 545, 1057, 2081},
3439 {73, 118, 154, 282, 538, 1050},
3440 {67, 107, 86, 150, 278, 534}
3442 static const u16 rpl_tmr[4][6] = {
3443 {711, 1248, 1677, 3213, 6285, 12429},
3444 {384, 651, 867, 1635, 3171, 6243},
3445 {219, 354, 462, 846, 1614, 3150},
3446 {201, 321, 258, 450, 834, 1602}
3450 unsigned int log2_width, pldsize;
3451 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3453 pci_read_config_word(adap->pdev,
3454 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3456 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3457 pci_read_config_word(adap->pdev,
3458 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3461 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3462 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3463 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3464 log2_width = fls(adap->params.pci.width) - 1;
3465 acklat = ack_lat[log2_width][pldsize];
3466 if (val & 1) /* check LOsEnable */
3467 acklat += fst_trn_tx * 4;
3468 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3470 if (adap->params.rev == 0)
3471 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3472 V_T3A_ACKLAT(M_T3A_ACKLAT),
3473 V_T3A_ACKLAT(acklat));
3475 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3478 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3479 V_REPLAYLMT(rpllmt));
3481 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3482 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3483 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3484 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3488 * Initialize and configure T3 HW modules. This performs the
3489 * initialization steps that need to be done once after a card is reset.
3490 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3492 * fw_params are passed to FW and their value is platform dependent. Only the
3493 * top 8 bits are available for use, the rest must be 0.
3495 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3497 int err = -EIO, attempts, i;
3498 const struct vpd_params *vpd = &adapter->params.vpd;
3500 if (adapter->params.rev > 0)
3501 calibrate_xgm_t3b(adapter);
3502 else if (calibrate_xgm(adapter))
3506 partition_mem(adapter, &adapter->params.tp);
3508 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3509 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3510 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3511 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3512 adapter->params.mc5.nfilters,
3513 adapter->params.mc5.nroutes))
3516 for (i = 0; i < 32; i++)
3517 if (clear_sge_ctxt(adapter, i, F_CQ))
3521 if (tp_init(adapter, &adapter->params.tp))
3524 t3_tp_set_coalescing_size(adapter,
3525 min(adapter->params.sge.max_pkt_size,
3526 MAX_RX_COALESCING_LEN), 1);
3527 t3_tp_set_max_rxsize(adapter,
3528 min(adapter->params.sge.max_pkt_size, 16384U));
3529 ulp_config(adapter, &adapter->params.tp);
3531 if (is_pcie(adapter))
3532 config_pcie(adapter);
3534 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3535 F_DMASTOPEN | F_CLIDECEN);
3537 if (adapter->params.rev == T3_REV_C)
3538 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3539 F_CFG_CQE_SOP_MASK);
3541 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3542 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3543 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3544 chan_init_hw(adapter, adapter->params.chan_map);
3545 t3_sge_init(adapter, &adapter->params.sge);
3547 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3549 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3550 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3551 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3552 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3555 do { /* wait for uP to initialize */
3557 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3559 CH_ERR(adapter, "uP initialization timed out\n");
3569 * get_pci_mode - determine a card's PCI mode
3570 * @adapter: the adapter
3571 * @p: where to store the PCI settings
3573 * Determines a card's PCI mode and associated parameters, such as speed
3576 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3578 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3579 u32 pci_mode, pcie_cap;
3581 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3585 p->variant = PCI_VARIANT_PCIE;
3586 p->pcie_cap_addr = pcie_cap;
3587 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3589 p->width = (val >> 4) & 0x3f;
3593 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3594 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3595 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3596 pci_mode = G_PCIXINITPAT(pci_mode);
3598 p->variant = PCI_VARIANT_PCI;
3599 else if (pci_mode < 4)
3600 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3601 else if (pci_mode < 8)
3602 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3604 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3608 * init_link_config - initialize a link's SW state
3609 * @lc: structure holding the link state
3610 * @ai: information about the current card
3612 * Initializes the SW state maintained for each link, including the link's
3613 * capabilities and default speed/duplex/flow-control/autonegotiation
3616 static void init_link_config(struct link_config *lc, unsigned int caps)
3618 lc->supported = caps;
3619 lc->requested_speed = lc->speed = SPEED_INVALID;
3620 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3621 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3622 if (lc->supported & SUPPORTED_Autoneg) {
3623 lc->advertising = lc->supported;
3624 lc->autoneg = AUTONEG_ENABLE;
3625 lc->requested_fc |= PAUSE_AUTONEG;
3627 lc->advertising = 0;
3628 lc->autoneg = AUTONEG_DISABLE;
3633 * mc7_calc_size - calculate MC7 memory size
3634 * @cfg: the MC7 configuration
3636 * Calculates the size of an MC7 memory in bytes from the value of its
3637 * configuration register.
3639 static unsigned int mc7_calc_size(u32 cfg)
3641 unsigned int width = G_WIDTH(cfg);
3642 unsigned int banks = !!(cfg & F_BKS) + 1;
3643 unsigned int org = !!(cfg & F_ORG) + 1;
3644 unsigned int density = G_DEN(cfg);
3645 unsigned int MBs = ((256 << density) * banks) / (org << width);
3650 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3651 unsigned int base_addr, const char *name)
3655 mc7->adapter = adapter;
3657 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3658 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3659 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3660 mc7->width = G_WIDTH(cfg);
3663 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3665 mac->adapter = adapter;
3666 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3669 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3670 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3671 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3672 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3677 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3679 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3681 mi1_init(adapter, ai);
3682 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3683 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3684 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3685 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3686 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3687 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3689 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3692 /* Enable MAC clocks so we can access the registers */
3693 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3694 t3_read_reg(adapter, A_XGM_PORT_CFG);
3696 val |= F_CLKDIVRESET_;
3697 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3698 t3_read_reg(adapter, A_XGM_PORT_CFG);
3699 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3700 t3_read_reg(adapter, A_XGM_PORT_CFG);
3704 * Reset the adapter.
3705 * Older PCIe cards lose their config space during reset, PCI-X
3708 int t3_reset_adapter(struct adapter *adapter)
3710 int i, save_and_restore_pcie =
3711 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3714 if (save_and_restore_pcie)
3715 pci_save_state(adapter->pdev);
3716 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3719 * Delay. Give Some time to device to reset fully.
3720 * XXX The delay time should be modified.
3722 for (i = 0; i < 10; i++) {
3724 pci_read_config_word(adapter->pdev, 0x00, &devid);
3725 if (devid == 0x1425)
3729 if (devid != 0x1425)
3732 if (save_and_restore_pcie)
3733 pci_restore_state(adapter->pdev);
3737 static int init_parity(struct adapter *adap)
3741 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3744 for (err = i = 0; !err && i < 16; i++)
3745 err = clear_sge_ctxt(adap, i, F_EGRESS);
3746 for (i = 0xfff0; !err && i <= 0xffff; i++)
3747 err = clear_sge_ctxt(adap, i, F_EGRESS);
3748 for (i = 0; !err && i < SGE_QSETS; i++)
3749 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3753 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3754 for (i = 0; i < 4; i++)
3755 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3756 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3757 F_IBQDBGWR | V_IBQDBGQID(i) |
3758 V_IBQDBGADDR(addr));
3759 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3760 F_IBQDBGBUSY, 0, 2, 1);
3768 * Initialize adapter SW state for the various HW modules, set initial values
3769 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3772 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3776 unsigned int i, j = -1;
3778 get_pci_mode(adapter, &adapter->params.pci);
3780 adapter->params.info = ai;
3781 adapter->params.nports = ai->nports0 + ai->nports1;
3782 adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
3783 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3785 * We used to only run the "adapter check task" once a second if
3786 * we had PHYs which didn't support interrupts (we would check
3787 * their link status once a second). Now we check other conditions
3788 * in that routine which could potentially impose a very high
3789 * interrupt load on the system. As such, we now always scan the
3790 * adapter state once a second ...
3792 adapter->params.linkpoll_period = 10;
3793 adapter->params.stats_update_period = is_10G(adapter) ?
3794 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3795 adapter->params.pci.vpd_cap_addr =
3796 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3797 ret = get_vpd_params(adapter, &adapter->params.vpd);
3801 if (reset && t3_reset_adapter(adapter))
3804 t3_sge_prep(adapter, &adapter->params.sge);
3806 if (adapter->params.vpd.mclk) {
3807 struct tp_params *p = &adapter->params.tp;
3809 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3810 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3811 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3813 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3814 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3815 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3816 p->cm_size = t3_mc7_size(&adapter->cm);
3817 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3818 p->chan_tx_size = p->pmtx_size / p->nchan;
3819 p->rx_pg_size = 64 * 1024;
3820 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3821 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3822 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3823 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3824 adapter->params.rev > 0 ? 12 : 6;
3827 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3828 t3_mc7_size(&adapter->pmtx) &&
3829 t3_mc7_size(&adapter->cm);
3831 if (is_offload(adapter)) {
3832 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3833 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3834 DEFAULT_NFILTERS : 0;
3835 adapter->params.mc5.nroutes = 0;
3836 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3838 init_mtus(adapter->params.mtus);
3839 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3842 early_hw_init(adapter, ai);
3843 ret = init_parity(adapter);
3847 for_each_port(adapter, i) {
3849 const struct port_type_info *pti;
3850 struct port_info *p = adap2pinfo(adapter, i);
3852 while (!adapter->params.vpd.port_type[++j])
3855 pti = &port_types[adapter->params.vpd.port_type[j]];
3856 if (!pti->phy_prep) {
3857 CH_ALERT(adapter, "Invalid port type index %d\n",
3858 adapter->params.vpd.port_type[j]);
3862 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3866 mac_prep(&p->mac, adapter, j);
3869 * The VPD EEPROM stores the base Ethernet address for the
3870 * card. A port's address is derived from the base by adding
3871 * the port's index to the base's low octet.
3873 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3874 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3876 memcpy(adapter->port[i]->dev_addr, hw_addr,
3878 memcpy(adapter->port[i]->perm_addr, hw_addr,
3880 init_link_config(&p->link_config, p->phy.caps);
3881 p->phy.ops->power_down(&p->phy, 1);
3884 * If the PHY doesn't support interrupts for link status
3885 * changes, schedule a scan of the adapter links at least
3888 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3889 adapter->params.linkpoll_period > 10)
3890 adapter->params.linkpoll_period = 10;
3896 void t3_led_ready(struct adapter *adapter)
3898 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3902 int t3_replay_prep_adapter(struct adapter *adapter)
3904 const struct adapter_info *ai = adapter->params.info;
3905 unsigned int i, j = -1;
3908 early_hw_init(adapter, ai);
3909 ret = init_parity(adapter);
3913 for_each_port(adapter, i) {
3914 const struct port_type_info *pti;
3915 struct port_info *p = adap2pinfo(adapter, i);
3917 while (!adapter->params.vpd.port_type[++j])
3920 pti = &port_types[adapter->params.vpd.port_type[j]];
3921 ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
3924 p->phy.ops->power_down(&p->phy, 1);