2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "firmware_exports.h"
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
178 val64 |= (u64) val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
210 struct port_info *pi = netdev_priv(dev);
211 struct adapter *adapter = pi->adapter;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215 mutex_lock(&adapter->mdio_lock);
216 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
217 t3_write_reg(adapter, A_MI1_ADDR, addr);
218 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
219 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
221 ret = t3_read_reg(adapter, A_MI1_DATA);
222 mutex_unlock(&adapter->mdio_lock);
226 static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
227 u16 reg_addr, u16 val)
229 struct port_info *pi = netdev_priv(dev);
230 struct adapter *adapter = pi->adapter;
232 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234 mutex_lock(&adapter->mdio_lock);
235 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
236 t3_write_reg(adapter, A_MI1_ADDR, addr);
237 t3_write_reg(adapter, A_MI1_DATA, val);
238 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
239 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
240 mutex_unlock(&adapter->mdio_lock);
244 static const struct mdio_ops mi1_mdio_ops = {
246 .write = t3_mi1_write,
247 .mode_support = MDIO_SUPPORTS_C22
251 * Performs the address cycle for clause 45 PHYs.
252 * Must be called with the MDIO_LOCK held.
254 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
257 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
259 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
260 t3_write_reg(adapter, A_MI1_ADDR, addr);
261 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
262 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
263 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
268 * MI1 read/write operations for indirect-addressed PHYs.
270 static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
273 struct port_info *pi = netdev_priv(dev);
274 struct adapter *adapter = pi->adapter;
277 mutex_lock(&adapter->mdio_lock);
278 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
280 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
281 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
284 ret = t3_read_reg(adapter, A_MI1_DATA);
286 mutex_unlock(&adapter->mdio_lock);
290 static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
291 u16 reg_addr, u16 val)
293 struct port_info *pi = netdev_priv(dev);
294 struct adapter *adapter = pi->adapter;
297 mutex_lock(&adapter->mdio_lock);
298 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
300 t3_write_reg(adapter, A_MI1_DATA, val);
301 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
302 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
305 mutex_unlock(&adapter->mdio_lock);
309 static const struct mdio_ops mi1_mdio_ext_ops = {
310 .read = mi1_ext_read,
311 .write = mi1_ext_write,
312 .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
316 * t3_mdio_change_bits - modify the value of a PHY register
317 * @phy: the PHY to operate on
318 * @mmd: the device address
319 * @reg: the register address
320 * @clear: what part of the register value to mask off
321 * @set: what part of the register value to set
323 * Changes the value of a PHY register by applying a mask to its current
324 * value and ORing the result with a new value.
326 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
332 ret = t3_mdio_read(phy, mmd, reg, &val);
335 ret = t3_mdio_write(phy, mmd, reg, val | set);
341 * t3_phy_reset - reset a PHY block
342 * @phy: the PHY to operate on
343 * @mmd: the device address of the PHY block to reset
344 * @wait: how long to wait for the reset to complete in 1ms increments
346 * Resets a PHY block and optionally waits for the reset to complete.
347 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
350 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
355 err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
361 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
364 ctl &= MDIO_CTRL1_RESET;
367 } while (ctl && --wait);
373 * t3_phy_advertise - set the PHY advertisement registers for autoneg
374 * @phy: the PHY to operate on
375 * @advert: bitmap of capabilities the PHY should advertise
377 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
378 * requested capabilities.
380 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
383 unsigned int val = 0;
385 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
389 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
390 if (advert & ADVERTISED_1000baseT_Half)
391 val |= ADVERTISE_1000HALF;
392 if (advert & ADVERTISED_1000baseT_Full)
393 val |= ADVERTISE_1000FULL;
395 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
400 if (advert & ADVERTISED_10baseT_Half)
401 val |= ADVERTISE_10HALF;
402 if (advert & ADVERTISED_10baseT_Full)
403 val |= ADVERTISE_10FULL;
404 if (advert & ADVERTISED_100baseT_Half)
405 val |= ADVERTISE_100HALF;
406 if (advert & ADVERTISED_100baseT_Full)
407 val |= ADVERTISE_100FULL;
408 if (advert & ADVERTISED_Pause)
409 val |= ADVERTISE_PAUSE_CAP;
410 if (advert & ADVERTISED_Asym_Pause)
411 val |= ADVERTISE_PAUSE_ASYM;
412 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
416 * t3_phy_advertise_fiber - set fiber PHY advertisement register
417 * @phy: the PHY to operate on
418 * @advert: bitmap of capabilities the PHY should advertise
420 * Sets a fiber PHY's advertisement register to advertise the
421 * requested capabilities.
423 int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
425 unsigned int val = 0;
427 if (advert & ADVERTISED_1000baseT_Half)
428 val |= ADVERTISE_1000XHALF;
429 if (advert & ADVERTISED_1000baseT_Full)
430 val |= ADVERTISE_1000XFULL;
431 if (advert & ADVERTISED_Pause)
432 val |= ADVERTISE_1000XPAUSE;
433 if (advert & ADVERTISED_Asym_Pause)
434 val |= ADVERTISE_1000XPSE_ASYM;
435 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
439 * t3_set_phy_speed_duplex - force PHY speed and duplex
440 * @phy: the PHY to operate on
441 * @speed: requested PHY speed
442 * @duplex: requested PHY duplex
444 * Force a 10/100/1000 PHY's speed and duplex. This also disables
445 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
447 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
452 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
457 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
458 if (speed == SPEED_100)
459 ctl |= BMCR_SPEED100;
460 else if (speed == SPEED_1000)
461 ctl |= BMCR_SPEED1000;
464 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
465 if (duplex == DUPLEX_FULL)
466 ctl |= BMCR_FULLDPLX;
468 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
469 ctl |= BMCR_ANENABLE;
470 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
473 int t3_phy_lasi_intr_enable(struct cphy *phy)
475 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
476 MDIO_PMA_LASI_LSALARM);
479 int t3_phy_lasi_intr_disable(struct cphy *phy)
481 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
484 int t3_phy_lasi_intr_clear(struct cphy *phy)
488 return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
491 int t3_phy_lasi_intr_handler(struct cphy *phy)
494 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
499 return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
502 static const struct adapter_info t3_adap_info[] = {
504 F_GPIO2_OEN | F_GPIO4_OEN |
505 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
506 &mi1_mdio_ops, "Chelsio PE9000"},
508 F_GPIO2_OEN | F_GPIO4_OEN |
509 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
510 &mi1_mdio_ops, "Chelsio T302"},
512 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
513 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
514 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
515 &mi1_mdio_ext_ops, "Chelsio T310"},
517 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
518 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
519 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 &mi1_mdio_ext_ops, "Chelsio T320"},
525 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
526 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
527 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
528 &mi1_mdio_ext_ops, "Chelsio T310" },
530 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
531 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
532 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
533 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
537 * Return the adapter_info structure with a given index. Out-of-range indices
540 const struct adapter_info *t3_get_adapter_info(unsigned int id)
542 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
545 struct port_type_info {
546 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
547 int phy_addr, const struct mdio_ops *ops);
550 static const struct port_type_info port_types[] = {
552 { t3_ael1002_phy_prep },
553 { t3_vsc8211_phy_prep },
555 { t3_xaui_direct_phy_prep },
556 { t3_ael2005_phy_prep },
557 { t3_qt2045_phy_prep },
558 { t3_ael1006_phy_prep },
560 { t3_aq100x_phy_prep },
561 { t3_ael2020_phy_prep },
564 #define VPD_ENTRY(name, len) \
565 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
568 * Partial EEPROM Vital Product Data structure. Includes only the ID and
577 VPD_ENTRY(pn, 16); /* part number */
578 VPD_ENTRY(ec, 16); /* EC level */
579 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
580 VPD_ENTRY(na, 12); /* MAC address base */
581 VPD_ENTRY(cclk, 6); /* core clock */
582 VPD_ENTRY(mclk, 6); /* mem clock */
583 VPD_ENTRY(uclk, 6); /* uP clk */
584 VPD_ENTRY(mdc, 6); /* MDIO clk */
585 VPD_ENTRY(mt, 2); /* mem timing */
586 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
587 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
588 VPD_ENTRY(port0, 2); /* PHY0 complex */
589 VPD_ENTRY(port1, 2); /* PHY1 complex */
590 VPD_ENTRY(port2, 2); /* PHY2 complex */
591 VPD_ENTRY(port3, 2); /* PHY3 complex */
592 VPD_ENTRY(rv, 1); /* csum */
593 u32 pad; /* for multiple-of-4 sizing and alignment */
596 #define EEPROM_MAX_POLL 40
597 #define EEPROM_STAT_ADDR 0x4000
598 #define VPD_BASE 0xc00
601 * t3_seeprom_read - read a VPD EEPROM location
602 * @adapter: adapter to read
603 * @addr: EEPROM address
604 * @data: where to store the read data
606 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
607 * VPD ROM capability. A zero is written to the flag bit when the
608 * addres is written to the control register. The hardware device will
609 * set the flag to 1 when 4 bytes have been read into the data register.
611 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
614 int attempts = EEPROM_MAX_POLL;
616 unsigned int base = adapter->params.pci.vpd_cap_addr;
618 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
621 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
624 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
625 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
627 if (!(val & PCI_VPD_ADDR_F)) {
628 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
631 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
632 *data = cpu_to_le32(v);
637 * t3_seeprom_write - write a VPD EEPROM location
638 * @adapter: adapter to write
639 * @addr: EEPROM address
640 * @data: value to write
642 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
643 * VPD ROM capability.
645 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
648 int attempts = EEPROM_MAX_POLL;
649 unsigned int base = adapter->params.pci.vpd_cap_addr;
651 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
654 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
656 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
657 addr | PCI_VPD_ADDR_F);
660 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
661 } while ((val & PCI_VPD_ADDR_F) && --attempts);
663 if (val & PCI_VPD_ADDR_F) {
664 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
671 * t3_seeprom_wp - enable/disable EEPROM write protection
672 * @adapter: the adapter
673 * @enable: 1 to enable write protection, 0 to disable it
675 * Enables or disables write protection on the serial EEPROM.
677 int t3_seeprom_wp(struct adapter *adapter, int enable)
679 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
683 * Convert a character holding a hex digit to a number.
685 static unsigned int hex2int(unsigned char c)
687 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
691 * get_vpd_params - read VPD parameters from VPD EEPROM
692 * @adapter: adapter to read
693 * @p: where to store the parameters
695 * Reads card parameters stored in VPD EEPROM.
697 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
703 * Card information is normally at VPD_BASE but some early cards had
706 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
709 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
711 for (i = 0; i < sizeof(vpd); i += 4) {
712 ret = t3_seeprom_read(adapter, addr + i,
713 (__le32 *)((u8 *)&vpd + i));
718 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
719 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
720 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
721 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
722 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
723 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
725 /* Old eeproms didn't have port information */
726 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
727 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
728 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
730 p->port_type[0] = hex2int(vpd.port0_data[0]);
731 p->port_type[1] = hex2int(vpd.port1_data[0]);
732 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
733 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
736 for (i = 0; i < 6; i++)
737 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
738 hex2int(vpd.na_data[2 * i + 1]);
742 /* serial flash and firmware constants */
744 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
745 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
746 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
748 /* flash command opcodes */
749 SF_PROG_PAGE = 2, /* program page */
750 SF_WR_DISABLE = 4, /* disable writes */
751 SF_RD_STATUS = 5, /* read status register */
752 SF_WR_ENABLE = 6, /* enable writes */
753 SF_RD_DATA_FAST = 0xb, /* read flash */
754 SF_ERASE_SECTOR = 0xd8, /* erase sector */
756 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
757 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
758 FW_MIN_SIZE = 8 /* at least version and csum */
762 * sf1_read - read data from the serial flash
763 * @adapter: the adapter
764 * @byte_cnt: number of bytes to read
765 * @cont: whether another operation will be chained
766 * @valp: where to store the read data
768 * Reads up to 4 bytes of data from the serial flash. The location of
769 * the read needs to be specified prior to calling this by issuing the
770 * appropriate commands to the serial flash.
772 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
777 if (!byte_cnt || byte_cnt > 4)
779 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
781 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
782 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
784 *valp = t3_read_reg(adapter, A_SF_DATA);
789 * sf1_write - write data to the serial flash
790 * @adapter: the adapter
791 * @byte_cnt: number of bytes to write
792 * @cont: whether another operation will be chained
793 * @val: value to write
795 * Writes up to 4 bytes of data to the serial flash. The location of
796 * the write needs to be specified prior to calling this by issuing the
797 * appropriate commands to the serial flash.
799 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
802 if (!byte_cnt || byte_cnt > 4)
804 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
806 t3_write_reg(adapter, A_SF_DATA, val);
807 t3_write_reg(adapter, A_SF_OP,
808 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
809 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
813 * flash_wait_op - wait for a flash operation to complete
814 * @adapter: the adapter
815 * @attempts: max number of polls of the status register
816 * @delay: delay between polls in ms
818 * Wait for a flash operation to complete by polling the status register.
820 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
826 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
827 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
839 * t3_read_flash - read words from serial flash
840 * @adapter: the adapter
841 * @addr: the start address for the read
842 * @nwords: how many 32-bit words to read
843 * @data: where to store the read data
844 * @byte_oriented: whether to store data as bytes or as words
846 * Read the specified number of 32-bit words from the serial flash.
847 * If @byte_oriented is set the read data is stored as a byte array
848 * (i.e., big-endian), otherwise as 32-bit words in the platform's
851 int t3_read_flash(struct adapter *adapter, unsigned int addr,
852 unsigned int nwords, u32 *data, int byte_oriented)
856 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
859 addr = swab32(addr) | SF_RD_DATA_FAST;
861 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
862 (ret = sf1_read(adapter, 1, 1, data)) != 0)
865 for (; nwords; nwords--, data++) {
866 ret = sf1_read(adapter, 4, nwords > 1, data);
870 *data = htonl(*data);
876 * t3_write_flash - write up to a page of data to the serial flash
877 * @adapter: the adapter
878 * @addr: the start address to write
879 * @n: length of data to write
880 * @data: the data to write
882 * Writes up to a page of data (256 bytes) to the serial flash starting
883 * at the given address.
885 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
886 unsigned int n, const u8 *data)
890 unsigned int i, c, left, val, offset = addr & 0xff;
892 if (addr + n > SF_SIZE || offset + n > 256)
895 val = swab32(addr) | SF_PROG_PAGE;
897 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
898 (ret = sf1_write(adapter, 4, 1, val)) != 0)
901 for (left = n; left; left -= c) {
903 for (val = 0, i = 0; i < c; ++i)
904 val = (val << 8) + *data++;
906 ret = sf1_write(adapter, c, c != left, val);
910 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
913 /* Read the page to verify the write succeeded */
914 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
918 if (memcmp(data - n, (u8 *) buf + offset, n))
924 * t3_get_tp_version - read the tp sram version
925 * @adapter: the adapter
926 * @vers: where to place the version
928 * Reads the protocol sram version from sram.
930 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
934 /* Get version loaded in SRAM */
935 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
936 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
941 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
947 * t3_check_tpsram_version - read the tp sram version
948 * @adapter: the adapter
950 * Reads the protocol sram version from flash.
952 int t3_check_tpsram_version(struct adapter *adapter)
956 unsigned int major, minor;
958 if (adapter->params.rev == T3_REV_A)
962 ret = t3_get_tp_version(adapter, &vers);
966 major = G_TP_VERSION_MAJOR(vers);
967 minor = G_TP_VERSION_MINOR(vers);
969 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
972 CH_ERR(adapter, "found wrong TP version (%u.%u), "
973 "driver compiled for version %d.%d\n", major, minor,
974 TP_VERSION_MAJOR, TP_VERSION_MINOR);
980 * t3_check_tpsram - check if provided protocol SRAM
981 * is compatible with this driver
982 * @adapter: the adapter
983 * @tp_sram: the firmware image to write
986 * Checks if an adapter's tp sram is compatible with the driver.
987 * Returns 0 if the versions are compatible, a negative error otherwise.
989 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
994 const __be32 *p = (const __be32 *)tp_sram;
996 /* Verify checksum */
997 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
999 if (csum != 0xffffffff) {
1000 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1008 enum fw_version_type {
1014 * t3_get_fw_version - read the firmware version
1015 * @adapter: the adapter
1016 * @vers: where to place the version
1018 * Reads the FW version from flash.
1020 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1022 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1026 * t3_check_fw_version - check if the FW is compatible with this driver
1027 * @adapter: the adapter
1029 * Checks if an adapter's FW is compatible with the driver. Returns 0
1030 * if the versions are compatible, a negative error otherwise.
1032 int t3_check_fw_version(struct adapter *adapter)
1036 unsigned int type, major, minor;
1038 ret = t3_get_fw_version(adapter, &vers);
1042 type = G_FW_VERSION_TYPE(vers);
1043 major = G_FW_VERSION_MAJOR(vers);
1044 minor = G_FW_VERSION_MINOR(vers);
1046 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1047 minor == FW_VERSION_MINOR)
1049 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1050 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1051 "driver compiled for version %u.%u\n", major, minor,
1052 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1054 CH_WARN(adapter, "found newer FW version(%u.%u), "
1055 "driver compiled for version %u.%u\n", major, minor,
1056 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1063 * t3_flash_erase_sectors - erase a range of flash sectors
1064 * @adapter: the adapter
1065 * @start: the first sector to erase
1066 * @end: the last sector to erase
1068 * Erases the sectors in the given range.
1070 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1072 while (start <= end) {
1075 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1076 (ret = sf1_write(adapter, 4, 0,
1077 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1078 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1086 * t3_load_fw - download firmware
1087 * @adapter: the adapter
1088 * @fw_data: the firmware image to write
1091 * Write the supplied firmware image to the card's serial flash.
1092 * The FW image has the following sections: @size - 8 bytes of code and
1093 * data, followed by 4 bytes of FW version, followed by the 32-bit
1094 * 1's complement checksum of the whole image.
1096 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1100 const __be32 *p = (const __be32 *)fw_data;
1101 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1103 if ((size & 3) || size < FW_MIN_SIZE)
1105 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1108 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1109 csum += ntohl(p[i]);
1110 if (csum != 0xffffffff) {
1111 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1116 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1120 size -= 8; /* trim off version and checksum */
1121 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1122 unsigned int chunk_size = min(size, 256U);
1124 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1129 fw_data += chunk_size;
1133 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1136 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1140 #define CIM_CTL_BASE 0x2000
1143 * t3_cim_ctl_blk_read - read a block from CIM control region
1145 * @adap: the adapter
1146 * @addr: the start address within the CIM control region
1147 * @n: number of words to read
1148 * @valp: where to store the result
1150 * Reads a block of 4-byte words from the CIM control region.
1152 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1153 unsigned int n, unsigned int *valp)
1157 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1160 for ( ; !ret && n--; addr += 4) {
1161 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1162 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1165 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1170 static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1171 u32 *rx_hash_high, u32 *rx_hash_low)
1173 /* stop Rx unicast traffic */
1174 t3_mac_disable_exact_filters(mac);
1176 /* stop broadcast, multicast, promiscuous mode traffic */
1177 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1178 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1179 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1182 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1183 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1185 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1186 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1188 /* Leave time to drain max RX fifo */
1192 static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1193 u32 rx_hash_high, u32 rx_hash_low)
1195 t3_mac_enable_exact_filters(mac);
1196 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1197 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1199 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1200 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1204 * t3_link_changed - handle interface link changes
1205 * @adapter: the adapter
1206 * @port_id: the port index that changed link state
1208 * Called when a port's link settings change to propagate the new values
1209 * to the associated PHY and MAC. After performing the common tasks it
1210 * invokes an OS-specific handler.
1212 void t3_link_changed(struct adapter *adapter, int port_id)
1214 int link_ok, speed, duplex, fc;
1215 struct port_info *pi = adap2pinfo(adapter, port_id);
1216 struct cphy *phy = &pi->phy;
1217 struct cmac *mac = &pi->mac;
1218 struct link_config *lc = &pi->link_config;
1220 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1222 if (!lc->link_ok && link_ok) {
1223 u32 rx_cfg, rx_hash_high, rx_hash_low;
1226 t3_xgm_intr_enable(adapter, port_id);
1227 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1228 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1229 t3_mac_enable(mac, MAC_DIRECTION_RX);
1231 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1232 if (status & F_LINKFAULTCHANGE) {
1233 mac->stats.link_faults++;
1236 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1239 if (lc->requested_fc & PAUSE_AUTONEG)
1240 fc &= lc->requested_fc;
1242 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1244 if (link_ok == lc->link_ok && speed == lc->speed &&
1245 duplex == lc->duplex && fc == lc->fc)
1246 return; /* nothing changed */
1248 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1249 uses_xaui(adapter)) {
1252 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1253 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1255 lc->link_ok = link_ok;
1256 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1257 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1259 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1260 /* Set MAC speed, duplex, and flow control to match PHY. */
1261 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1265 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1268 void t3_link_fault(struct adapter *adapter, int port_id)
1270 struct port_info *pi = adap2pinfo(adapter, port_id);
1271 struct cmac *mac = &pi->mac;
1272 struct cphy *phy = &pi->phy;
1273 struct link_config *lc = &pi->link_config;
1274 int link_ok, speed, duplex, fc, link_fault;
1275 u32 rx_cfg, rx_hash_high, rx_hash_low;
1277 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1279 if (adapter->params.rev > 0 && uses_xaui(adapter))
1280 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1282 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1283 t3_mac_enable(mac, MAC_DIRECTION_RX);
1285 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1287 link_fault = t3_read_reg(adapter,
1288 A_XGM_INT_STATUS + mac->offset);
1289 link_fault &= F_LINKFAULTCHANGE;
1291 link_ok = lc->link_ok;
1293 duplex = lc->duplex;
1296 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1300 lc->speed = SPEED_INVALID;
1301 lc->duplex = DUPLEX_INVALID;
1303 t3_os_link_fault(adapter, port_id, 0);
1305 /* Account link faults only when the phy reports a link up */
1307 mac->stats.link_faults++;
1310 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1311 F_TXACTENABLE | F_RXEN);
1314 lc->link_ok = (unsigned char)link_ok;
1315 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1316 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1317 t3_os_link_fault(adapter, port_id, link_ok);
1322 * t3_link_start - apply link configuration to MAC/PHY
1323 * @phy: the PHY to setup
1324 * @mac: the MAC to setup
1325 * @lc: the requested link configuration
1327 * Set up a port's MAC and PHY according to a desired link configuration.
1328 * - If the PHY can auto-negotiate first decide what to advertise, then
1329 * enable/disable auto-negotiation as desired, and reset.
1330 * - If the PHY does not auto-negotiate just reset it.
1331 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1332 * otherwise do it later based on the outcome of auto-negotiation.
1334 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1336 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1339 if (lc->supported & SUPPORTED_Autoneg) {
1340 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1342 lc->advertising |= ADVERTISED_Asym_Pause;
1344 lc->advertising |= ADVERTISED_Pause;
1346 phy->ops->advertise(phy, lc->advertising);
1348 if (lc->autoneg == AUTONEG_DISABLE) {
1349 lc->speed = lc->requested_speed;
1350 lc->duplex = lc->requested_duplex;
1351 lc->fc = (unsigned char)fc;
1352 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1354 /* Also disables autoneg */
1355 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1357 phy->ops->autoneg_enable(phy);
1359 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1360 lc->fc = (unsigned char)fc;
1361 phy->ops->reset(phy, 0);
1367 * t3_set_vlan_accel - control HW VLAN extraction
1368 * @adapter: the adapter
1369 * @ports: bitmap of adapter ports to operate on
1370 * @on: enable (1) or disable (0) HW VLAN extraction
1372 * Enables or disables HW extraction of VLAN tags for the given port.
1374 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1376 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1377 ports << S_VLANEXTRACTIONENABLE,
1378 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1382 unsigned int mask; /* bits to check in interrupt status */
1383 const char *msg; /* message to print or NULL */
1384 short stat_idx; /* stat counter to increment or -1 */
1385 unsigned short fatal; /* whether the condition reported is fatal */
1389 * t3_handle_intr_status - table driven interrupt handler
1390 * @adapter: the adapter that generated the interrupt
1391 * @reg: the interrupt status register to process
1392 * @mask: a mask to apply to the interrupt status
1393 * @acts: table of interrupt actions
1394 * @stats: statistics counters tracking interrupt occurences
1396 * A table driven interrupt handler that applies a set of masks to an
1397 * interrupt status word and performs the corresponding actions if the
1398 * interrupts described by the mask have occured. The actions include
1399 * optionally printing a warning or alert message, and optionally
1400 * incrementing a stat counter. The table is terminated by an entry
1401 * specifying mask 0. Returns the number of fatal interrupt conditions.
1403 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1405 const struct intr_info *acts,
1406 unsigned long *stats)
1409 unsigned int status = t3_read_reg(adapter, reg) & mask;
1411 for (; acts->mask; ++acts) {
1412 if (!(status & acts->mask))
1416 CH_ALERT(adapter, "%s (0x%x)\n",
1417 acts->msg, status & acts->mask);
1418 } else if (acts->msg)
1419 CH_WARN(adapter, "%s (0x%x)\n",
1420 acts->msg, status & acts->mask);
1421 if (acts->stat_idx >= 0)
1422 stats[acts->stat_idx]++;
1424 if (status) /* clear processed interrupts */
1425 t3_write_reg(adapter, reg, status);
1429 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1430 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1431 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1432 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1433 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1434 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1436 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1437 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1439 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1440 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1441 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1443 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1444 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1445 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1446 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1447 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1448 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1449 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1450 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1451 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1452 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1453 F_TXPARERR | V_BISTERR(M_BISTERR))
1454 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1455 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1456 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1457 #define ULPTX_INTR_MASK 0xfc
1458 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1459 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1460 F_ZERO_SWITCH_ERROR)
1461 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1462 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1463 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1464 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1465 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1466 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1467 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1468 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1469 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1470 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1471 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1472 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1473 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1474 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1475 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1476 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1477 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1478 V_MCAPARERRENB(M_MCAPARERRENB))
1479 #define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1480 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1481 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1482 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1483 F_MPS0 | F_CPL_SWITCH)
1485 * Interrupt handler for the PCIX1 module.
1487 static void pci_intr_handler(struct adapter *adapter)
1489 static const struct intr_info pcix1_intr_info[] = {
1490 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1491 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1492 {F_RCVTARABT, "PCI received target abort", -1, 1},
1493 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1494 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1495 {F_DETPARERR, "PCI detected parity error", -1, 1},
1496 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1497 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1498 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1500 {F_DETCORECCERR, "PCI correctable ECC error",
1501 STAT_PCI_CORR_ECC, 0},
1502 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1503 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1504 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1506 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1508 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1510 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1515 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1516 pcix1_intr_info, adapter->irq_stats))
1517 t3_fatal_err(adapter);
1521 * Interrupt handler for the PCIE module.
1523 static void pcie_intr_handler(struct adapter *adapter)
1525 static const struct intr_info pcie_intr_info[] = {
1526 {F_PEXERR, "PCI PEX error", -1, 1},
1528 "PCI unexpected split completion DMA read error", -1, 1},
1530 "PCI unexpected split completion DMA command error", -1, 1},
1531 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1532 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1533 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1534 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1535 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1536 "PCI MSI-X table/PBA parity error", -1, 1},
1537 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1538 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1539 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1540 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1541 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1545 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1546 CH_ALERT(adapter, "PEX error code 0x%x\n",
1547 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1549 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1550 pcie_intr_info, adapter->irq_stats))
1551 t3_fatal_err(adapter);
1555 * TP interrupt handler.
1557 static void tp_intr_handler(struct adapter *adapter)
1559 static const struct intr_info tp_intr_info[] = {
1560 {0xffffff, "TP parity error", -1, 1},
1561 {0x1000000, "TP out of Rx pages", -1, 1},
1562 {0x2000000, "TP out of Tx pages", -1, 1},
1566 static struct intr_info tp_intr_info_t3c[] = {
1567 {0x1fffffff, "TP parity error", -1, 1},
1568 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1569 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1573 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1574 adapter->params.rev < T3_REV_C ?
1575 tp_intr_info : tp_intr_info_t3c, NULL))
1576 t3_fatal_err(adapter);
1580 * CIM interrupt handler.
1582 static void cim_intr_handler(struct adapter *adapter)
1584 static const struct intr_info cim_intr_info[] = {
1585 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1586 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1587 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1588 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1589 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1590 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1591 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1592 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1593 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1594 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1595 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1596 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1597 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1598 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1599 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1600 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1601 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1602 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1603 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1604 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1605 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1606 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1607 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1608 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1612 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1613 cim_intr_info, NULL))
1614 t3_fatal_err(adapter);
1618 * ULP RX interrupt handler.
1620 static void ulprx_intr_handler(struct adapter *adapter)
1622 static const struct intr_info ulprx_intr_info[] = {
1623 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1624 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1625 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1626 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1627 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1628 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1629 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1630 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1634 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1635 ulprx_intr_info, NULL))
1636 t3_fatal_err(adapter);
1640 * ULP TX interrupt handler.
1642 static void ulptx_intr_handler(struct adapter *adapter)
1644 static const struct intr_info ulptx_intr_info[] = {
1645 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1646 STAT_ULP_CH0_PBL_OOB, 0},
1647 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1648 STAT_ULP_CH1_PBL_OOB, 0},
1649 {0xfc, "ULP TX parity error", -1, 1},
1653 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1654 ulptx_intr_info, adapter->irq_stats))
1655 t3_fatal_err(adapter);
1658 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1659 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1660 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1661 F_ICSPI1_TX_FRAMING_ERROR)
1662 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1663 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1664 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1665 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1668 * PM TX interrupt handler.
1670 static void pmtx_intr_handler(struct adapter *adapter)
1672 static const struct intr_info pmtx_intr_info[] = {
1673 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1674 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1675 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1676 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1677 "PMTX ispi parity error", -1, 1},
1678 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1679 "PMTX ospi parity error", -1, 1},
1683 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1684 pmtx_intr_info, NULL))
1685 t3_fatal_err(adapter);
1688 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1689 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1690 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1691 F_IESPI1_TX_FRAMING_ERROR)
1692 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1693 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1694 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1695 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1698 * PM RX interrupt handler.
1700 static void pmrx_intr_handler(struct adapter *adapter)
1702 static const struct intr_info pmrx_intr_info[] = {
1703 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1704 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1705 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1706 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1707 "PMRX ispi parity error", -1, 1},
1708 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1709 "PMRX ospi parity error", -1, 1},
1713 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1714 pmrx_intr_info, NULL))
1715 t3_fatal_err(adapter);
1719 * CPL switch interrupt handler.
1721 static void cplsw_intr_handler(struct adapter *adapter)
1723 static const struct intr_info cplsw_intr_info[] = {
1724 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1725 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1726 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1727 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1728 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1729 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1733 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1734 cplsw_intr_info, NULL))
1735 t3_fatal_err(adapter);
1739 * MPS interrupt handler.
1741 static void mps_intr_handler(struct adapter *adapter)
1743 static const struct intr_info mps_intr_info[] = {
1744 {0x1ff, "MPS parity error", -1, 1},
1748 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1749 mps_intr_info, NULL))
1750 t3_fatal_err(adapter);
1753 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1756 * MC7 interrupt handler.
1758 static void mc7_intr_handler(struct mc7 *mc7)
1760 struct adapter *adapter = mc7->adapter;
1761 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1764 mc7->stats.corr_err++;
1765 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1766 "data 0x%x 0x%x 0x%x\n", mc7->name,
1767 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1768 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1769 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1770 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1774 mc7->stats.uncorr_err++;
1775 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1776 "data 0x%x 0x%x 0x%x\n", mc7->name,
1777 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1778 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1779 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1780 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1784 mc7->stats.parity_err++;
1785 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1786 mc7->name, G_PE(cause));
1792 if (adapter->params.rev > 0)
1793 addr = t3_read_reg(adapter,
1794 mc7->offset + A_MC7_ERR_ADDR);
1795 mc7->stats.addr_err++;
1796 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1800 if (cause & MC7_INTR_FATAL)
1801 t3_fatal_err(adapter);
1803 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1806 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1807 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1809 * XGMAC interrupt handler.
1811 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1813 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1815 * We mask out interrupt causes for which we're not taking interrupts.
1816 * This allows us to use polling logic to monitor some of the other
1817 * conditions when taking interrupts would impose too much load on the
1820 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1823 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1824 mac->stats.tx_fifo_parity_err++;
1825 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1827 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1828 mac->stats.rx_fifo_parity_err++;
1829 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1831 if (cause & F_TXFIFO_UNDERRUN)
1832 mac->stats.tx_fifo_urun++;
1833 if (cause & F_RXFIFO_OVERFLOW)
1834 mac->stats.rx_fifo_ovfl++;
1835 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1836 mac->stats.serdes_signal_loss++;
1837 if (cause & F_XAUIPCSCTCERR)
1838 mac->stats.xaui_pcs_ctc_err++;
1839 if (cause & F_XAUIPCSALIGNCHANGE)
1840 mac->stats.xaui_pcs_align_change++;
1841 if (cause & F_XGM_INT) {
1842 t3_set_reg_field(adap,
1843 A_XGM_INT_ENABLE + mac->offset,
1845 mac->stats.link_faults++;
1847 t3_os_link_fault_handler(adap, idx);
1850 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1852 if (cause & XGM_INTR_FATAL)
1859 * Interrupt handler for PHY events.
1861 int t3_phy_intr_handler(struct adapter *adapter)
1863 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1865 for_each_port(adapter, i) {
1866 struct port_info *p = adap2pinfo(adapter, i);
1868 if (!(p->phy.caps & SUPPORTED_IRQ))
1871 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1872 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1874 if (phy_cause & cphy_cause_link_change)
1875 t3_link_changed(adapter, i);
1876 if (phy_cause & cphy_cause_fifo_error)
1877 p->phy.fifo_errors++;
1878 if (phy_cause & cphy_cause_module_change)
1879 t3_os_phymod_changed(adapter, i);
1883 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1888 * T3 slow path (non-data) interrupt handler.
1890 int t3_slow_intr_handler(struct adapter *adapter)
1892 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1894 cause &= adapter->slow_intr_mask;
1897 if (cause & F_PCIM0) {
1898 if (is_pcie(adapter))
1899 pcie_intr_handler(adapter);
1901 pci_intr_handler(adapter);
1904 t3_sge_err_intr_handler(adapter);
1905 if (cause & F_MC7_PMRX)
1906 mc7_intr_handler(&adapter->pmrx);
1907 if (cause & F_MC7_PMTX)
1908 mc7_intr_handler(&adapter->pmtx);
1909 if (cause & F_MC7_CM)
1910 mc7_intr_handler(&adapter->cm);
1912 cim_intr_handler(adapter);
1914 tp_intr_handler(adapter);
1915 if (cause & F_ULP2_RX)
1916 ulprx_intr_handler(adapter);
1917 if (cause & F_ULP2_TX)
1918 ulptx_intr_handler(adapter);
1919 if (cause & F_PM1_RX)
1920 pmrx_intr_handler(adapter);
1921 if (cause & F_PM1_TX)
1922 pmtx_intr_handler(adapter);
1923 if (cause & F_CPL_SWITCH)
1924 cplsw_intr_handler(adapter);
1926 mps_intr_handler(adapter);
1928 t3_mc5_intr_handler(&adapter->mc5);
1929 if (cause & F_XGMAC0_0)
1930 mac_intr_handler(adapter, 0);
1931 if (cause & F_XGMAC0_1)
1932 mac_intr_handler(adapter, 1);
1933 if (cause & F_T3DBG)
1934 t3_os_ext_intr_handler(adapter);
1936 /* Clear the interrupts just processed. */
1937 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1938 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1942 static unsigned int calc_gpio_intr(struct adapter *adap)
1944 unsigned int i, gpi_intr = 0;
1946 for_each_port(adap, i)
1947 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1948 adapter_info(adap)->gpio_intr[i])
1949 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1954 * t3_intr_enable - enable interrupts
1955 * @adapter: the adapter whose interrupts should be enabled
1957 * Enable interrupts by setting the interrupt enable registers of the
1958 * various HW modules and then enabling the top-level interrupt
1961 void t3_intr_enable(struct adapter *adapter)
1963 static const struct addr_val_pair intr_en_avp[] = {
1964 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1965 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1966 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1968 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1970 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1971 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1972 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1973 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1974 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1975 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1978 adapter->slow_intr_mask = PL_INTR_MASK;
1980 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1981 t3_write_reg(adapter, A_TP_INT_ENABLE,
1982 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1984 if (adapter->params.rev > 0) {
1985 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1986 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1987 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1988 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1989 F_PBL_BOUND_ERR_CH1);
1991 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1992 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1995 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1997 if (is_pcie(adapter))
1998 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2000 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2001 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2002 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2006 * t3_intr_disable - disable a card's interrupts
2007 * @adapter: the adapter whose interrupts should be disabled
2009 * Disable interrupts. We only disable the top-level interrupt
2010 * concentrator and the SGE data interrupts.
2012 void t3_intr_disable(struct adapter *adapter)
2014 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2015 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2016 adapter->slow_intr_mask = 0;
2020 * t3_intr_clear - clear all interrupts
2021 * @adapter: the adapter whose interrupts should be cleared
2023 * Clears all interrupts.
2025 void t3_intr_clear(struct adapter *adapter)
2027 static const unsigned int cause_reg_addr[] = {
2029 A_SG_RSPQ_FL_STATUS,
2032 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2033 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2034 A_CIM_HOST_INT_CAUSE,
2047 /* Clear PHY and MAC interrupts for each port. */
2048 for_each_port(adapter, i)
2049 t3_port_intr_clear(adapter, i);
2051 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2052 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2054 if (is_pcie(adapter))
2055 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2056 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2057 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2060 void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2062 struct port_info *pi = adap2pinfo(adapter, idx);
2064 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2065 XGM_EXTRA_INTR_MASK);
2068 void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2070 struct port_info *pi = adap2pinfo(adapter, idx);
2072 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2077 * t3_port_intr_enable - enable port-specific interrupts
2078 * @adapter: associated adapter
2079 * @idx: index of port whose interrupts should be enabled
2081 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2084 void t3_port_intr_enable(struct adapter *adapter, int idx)
2086 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2088 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2089 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2090 phy->ops->intr_enable(phy);
2094 * t3_port_intr_disable - disable port-specific interrupts
2095 * @adapter: associated adapter
2096 * @idx: index of port whose interrupts should be disabled
2098 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2101 void t3_port_intr_disable(struct adapter *adapter, int idx)
2103 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2105 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2106 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2107 phy->ops->intr_disable(phy);
2111 * t3_port_intr_clear - clear port-specific interrupts
2112 * @adapter: associated adapter
2113 * @idx: index of port whose interrupts to clear
2115 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2118 void t3_port_intr_clear(struct adapter *adapter, int idx)
2120 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2122 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2123 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2124 phy->ops->intr_clear(phy);
2127 #define SG_CONTEXT_CMD_ATTEMPTS 100
2130 * t3_sge_write_context - write an SGE context
2131 * @adapter: the adapter
2132 * @id: the context id
2133 * @type: the context type
2135 * Program an SGE context with the values already loaded in the
2136 * CONTEXT_DATA? registers.
2138 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2141 if (type == F_RESPONSEQ) {
2143 * Can't write the Response Queue Context bits for
2144 * Interrupt Armed or the Reserve bits after the chip
2145 * has been initialized out of reset. Writing to these
2146 * bits can confuse the hardware.
2148 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2149 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2150 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2151 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2153 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2154 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2155 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2156 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2158 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2159 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2160 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2161 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2165 * clear_sge_ctxt - completely clear an SGE context
2166 * @adapter: the adapter
2167 * @id: the context id
2168 * @type: the context type
2170 * Completely clear an SGE context. Used predominantly at post-reset
2171 * initialization. Note in particular that we don't skip writing to any
2172 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2175 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2178 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2179 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2180 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2181 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2182 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2183 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2184 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2185 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2186 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2187 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2188 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2189 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2193 * t3_sge_init_ecntxt - initialize an SGE egress context
2194 * @adapter: the adapter to configure
2195 * @id: the context id
2196 * @gts_enable: whether to enable GTS for the context
2197 * @type: the egress context type
2198 * @respq: associated response queue
2199 * @base_addr: base address of queue
2200 * @size: number of queue entries
2202 * @gen: initial generation value for the context
2203 * @cidx: consumer pointer
2205 * Initialize an SGE egress context and make it ready for use. If the
2206 * platform allows concurrent context operations, the caller is
2207 * responsible for appropriate locking.
2209 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2210 enum sge_context_type type, int respq, u64 base_addr,
2211 unsigned int size, unsigned int token, int gen,
2214 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2216 if (base_addr & 0xfff) /* must be 4K aligned */
2218 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2222 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2223 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2224 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2225 V_EC_BASE_LO(base_addr & 0xffff));
2227 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2229 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2230 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2231 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2233 return t3_sge_write_context(adapter, id, F_EGRESS);
2237 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2238 * @adapter: the adapter to configure
2239 * @id: the context id
2240 * @gts_enable: whether to enable GTS for the context
2241 * @base_addr: base address of queue
2242 * @size: number of queue entries
2243 * @bsize: size of each buffer for this queue
2244 * @cong_thres: threshold to signal congestion to upstream producers
2245 * @gen: initial generation value for the context
2246 * @cidx: consumer pointer
2248 * Initialize an SGE free list context and make it ready for use. The
2249 * caller is responsible for ensuring only one context operation occurs
2252 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2253 int gts_enable, u64 base_addr, unsigned int size,
2254 unsigned int bsize, unsigned int cong_thres, int gen,
2257 if (base_addr & 0xfff) /* must be 4K aligned */
2259 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2263 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2265 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2266 V_FL_BASE_HI((u32) base_addr) |
2267 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2268 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2269 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2270 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2271 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2272 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2273 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2274 return t3_sge_write_context(adapter, id, F_FREELIST);
2278 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2279 * @adapter: the adapter to configure
2280 * @id: the context id
2281 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2282 * @base_addr: base address of queue
2283 * @size: number of queue entries
2284 * @fl_thres: threshold for selecting the normal or jumbo free list
2285 * @gen: initial generation value for the context
2286 * @cidx: consumer pointer
2288 * Initialize an SGE response queue context and make it ready for use.
2289 * The caller is responsible for ensuring only one context operation
2292 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2293 int irq_vec_idx, u64 base_addr, unsigned int size,
2294 unsigned int fl_thres, int gen, unsigned int cidx)
2296 unsigned int intr = 0;
2298 if (base_addr & 0xfff) /* must be 4K aligned */
2300 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2304 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2306 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2308 if (irq_vec_idx >= 0)
2309 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2310 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2311 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2312 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2313 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2317 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2318 * @adapter: the adapter to configure
2319 * @id: the context id
2320 * @base_addr: base address of queue
2321 * @size: number of queue entries
2322 * @rspq: response queue for async notifications
2323 * @ovfl_mode: CQ overflow mode
2324 * @credits: completion queue credits
2325 * @credit_thres: the credit threshold
2327 * Initialize an SGE completion queue context and make it ready for use.
2328 * The caller is responsible for ensuring only one context operation
2331 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2332 unsigned int size, int rspq, int ovfl_mode,
2333 unsigned int credits, unsigned int credit_thres)
2335 if (base_addr & 0xfff) /* must be 4K aligned */
2337 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2341 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2342 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2344 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2345 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2346 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2347 V_CQ_ERR(ovfl_mode));
2348 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2349 V_CQ_CREDIT_THRES(credit_thres));
2350 return t3_sge_write_context(adapter, id, F_CQ);
2354 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2355 * @adapter: the adapter
2356 * @id: the egress context id
2357 * @enable: enable (1) or disable (0) the context
2359 * Enable or disable an SGE egress context. The caller is responsible for
2360 * ensuring only one context operation occurs at a time.
2362 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2364 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2367 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2368 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2369 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2370 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2371 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2372 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2373 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2374 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2375 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2379 * t3_sge_disable_fl - disable an SGE free-buffer list
2380 * @adapter: the adapter
2381 * @id: the free list context id
2383 * Disable an SGE free-buffer list. The caller is responsible for
2384 * ensuring only one context operation occurs at a time.
2386 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2388 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2391 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2392 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2393 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2394 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2395 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2396 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2397 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2398 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2399 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2403 * t3_sge_disable_rspcntxt - disable an SGE response queue
2404 * @adapter: the adapter
2405 * @id: the response queue context id
2407 * Disable an SGE response queue. The caller is responsible for
2408 * ensuring only one context operation occurs at a time.
2410 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2412 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2415 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2416 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2417 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2418 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2419 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2420 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2421 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2422 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2423 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2427 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2428 * @adapter: the adapter
2429 * @id: the completion queue context id
2431 * Disable an SGE completion queue. The caller is responsible for
2432 * ensuring only one context operation occurs at a time.
2434 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2436 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2439 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2440 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2441 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2442 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2443 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2444 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2445 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2446 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2447 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2451 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2452 * @adapter: the adapter
2453 * @id: the context id
2454 * @op: the operation to perform
2456 * Perform the selected operation on an SGE completion queue context.
2457 * The caller is responsible for ensuring only one context operation
2460 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2461 unsigned int credits)
2465 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2468 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2469 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2470 V_CONTEXT(id) | F_CQ);
2471 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2472 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2475 if (op >= 2 && op < 7) {
2476 if (adapter->params.rev > 0)
2477 return G_CQ_INDEX(val);
2479 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2480 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2481 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2482 F_CONTEXT_CMD_BUSY, 0,
2483 SG_CONTEXT_CMD_ATTEMPTS, 1))
2485 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2491 * t3_sge_read_context - read an SGE context
2492 * @type: the context type
2493 * @adapter: the adapter
2494 * @id: the context id
2495 * @data: holds the retrieved context
2497 * Read an SGE egress context. The caller is responsible for ensuring
2498 * only one context operation occurs at a time.
2500 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2501 unsigned int id, u32 data[4])
2503 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2506 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2507 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2508 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2509 SG_CONTEXT_CMD_ATTEMPTS, 1))
2511 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2512 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2513 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2514 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2519 * t3_sge_read_ecntxt - read an SGE egress context
2520 * @adapter: the adapter
2521 * @id: the context id
2522 * @data: holds the retrieved context
2524 * Read an SGE egress context. The caller is responsible for ensuring
2525 * only one context operation occurs at a time.
2527 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2531 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2535 * t3_sge_read_cq - read an SGE CQ context
2536 * @adapter: the adapter
2537 * @id: the context id
2538 * @data: holds the retrieved context
2540 * Read an SGE CQ context. The caller is responsible for ensuring
2541 * only one context operation occurs at a time.
2543 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2547 return t3_sge_read_context(F_CQ, adapter, id, data);
2551 * t3_sge_read_fl - read an SGE free-list context
2552 * @adapter: the adapter
2553 * @id: the context id
2554 * @data: holds the retrieved context
2556 * Read an SGE free-list context. The caller is responsible for ensuring
2557 * only one context operation occurs at a time.
2559 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2561 if (id >= SGE_QSETS * 2)
2563 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2567 * t3_sge_read_rspq - read an SGE response queue context
2568 * @adapter: the adapter
2569 * @id: the context id
2570 * @data: holds the retrieved context
2572 * Read an SGE response queue context. The caller is responsible for
2573 * ensuring only one context operation occurs at a time.
2575 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2577 if (id >= SGE_QSETS)
2579 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2583 * t3_config_rss - configure Rx packet steering
2584 * @adapter: the adapter
2585 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2586 * @cpus: values for the CPU lookup table (0xff terminated)
2587 * @rspq: values for the response queue lookup table (0xffff terminated)
2589 * Programs the receive packet steering logic. @cpus and @rspq provide
2590 * the values for the CPU and response queue lookup tables. If they
2591 * provide fewer values than the size of the tables the supplied values
2592 * are used repeatedly until the tables are fully populated.
2594 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2595 const u8 * cpus, const u16 *rspq)
2597 int i, j, cpu_idx = 0, q_idx = 0;
2600 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2603 for (j = 0; j < 2; ++j) {
2604 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2605 if (cpus[cpu_idx] == 0xff)
2608 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2612 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2613 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2614 (i << 16) | rspq[q_idx++]);
2615 if (rspq[q_idx] == 0xffff)
2619 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2623 * t3_read_rss - read the contents of the RSS tables
2624 * @adapter: the adapter
2625 * @lkup: holds the contents of the RSS lookup table
2626 * @map: holds the contents of the RSS map table
2628 * Reads the contents of the receive packet steering tables.
2630 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2636 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2637 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2639 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2640 if (!(val & 0x80000000))
2643 *lkup++ = (val >> 8);
2647 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2648 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2650 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2651 if (!(val & 0x80000000))
2659 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2660 * @adap: the adapter
2661 * @enable: 1 to select offload mode, 0 for regular NIC
2663 * Switches TP to NIC/offload mode.
2665 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2667 if (is_offload(adap) || !enable)
2668 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2669 V_NICMODE(!enable));
2673 * pm_num_pages - calculate the number of pages of the payload memory
2674 * @mem_size: the size of the payload memory
2675 * @pg_size: the size of each payload memory page
2677 * Calculate the number of pages, each of the given size, that fit in a
2678 * memory of the specified size, respecting the HW requirement that the
2679 * number of pages must be a multiple of 24.
2681 static inline unsigned int pm_num_pages(unsigned int mem_size,
2682 unsigned int pg_size)
2684 unsigned int n = mem_size / pg_size;
2689 #define mem_region(adap, start, size, reg) \
2690 t3_write_reg((adap), A_ ## reg, (start)); \
2694 * partition_mem - partition memory and configure TP memory settings
2695 * @adap: the adapter
2696 * @p: the TP parameters
2698 * Partitions context and payload memory and configures TP's memory
2701 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2703 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2704 unsigned int timers = 0, timers_shift = 22;
2706 if (adap->params.rev > 0) {
2707 if (tids <= 16 * 1024) {
2710 } else if (tids <= 64 * 1024) {
2713 } else if (tids <= 256 * 1024) {
2719 t3_write_reg(adap, A_TP_PMM_SIZE,
2720 p->chan_rx_size | (p->chan_tx_size >> 16));
2722 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2723 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2724 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2725 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2726 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2728 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2729 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2730 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2732 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2733 /* Add a bit of headroom and make multiple of 24 */
2735 pstructs -= pstructs % 24;
2736 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2738 m = tids * TCB_SIZE;
2739 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2740 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2741 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2742 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2743 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2744 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2745 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2746 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2748 m = (m + 4095) & ~0xfff;
2749 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2750 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2752 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2753 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2754 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2756 adap->params.mc5.nservers += m - tids;
2759 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2762 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2763 t3_write_reg(adap, A_TP_PIO_DATA, val);
2766 static void tp_config(struct adapter *adap, const struct tp_params *p)
2768 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2769 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2770 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2771 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2772 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2773 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2774 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2775 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2776 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2777 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2778 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2779 F_IPV6ENABLE | F_NICMODE);
2780 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2781 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2782 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2783 adap->params.rev > 0 ? F_ENABLEESND :
2786 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2788 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2789 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2790 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2791 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2792 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2793 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2794 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2796 if (adap->params.rev > 0) {
2797 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2798 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2800 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2801 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2803 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2805 if (adap->params.rev == T3_REV_C)
2806 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2807 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2808 V_TABLELATENCYDELTA(4));
2810 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2811 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2812 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2813 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2816 /* Desired TP timer resolution in usec */
2817 #define TP_TMR_RES 50
2819 /* TCP timer values in ms */
2820 #define TP_DACK_TIMER 50
2821 #define TP_RTO_MIN 250
2824 * tp_set_timers - set TP timing parameters
2825 * @adap: the adapter to set
2826 * @core_clk: the core clock frequency in Hz
2828 * Set TP's timing parameters, such as the various timer resolutions and
2829 * the TCP timer values.
2831 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2833 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2834 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2835 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2836 unsigned int tps = core_clk >> tre;
2838 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2839 V_DELAYEDACKRESOLUTION(dack_re) |
2840 V_TIMESTAMPRESOLUTION(tstamp_re));
2841 t3_write_reg(adap, A_TP_DACK_TIMER,
2842 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2843 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2844 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2845 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2846 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2847 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2848 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2849 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2852 #define SECONDS * tps
2854 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2855 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2856 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2857 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2858 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2859 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2860 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2861 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2862 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2868 * t3_tp_set_coalescing_size - set receive coalescing size
2869 * @adap: the adapter
2870 * @size: the receive coalescing size
2871 * @psh: whether a set PSH bit should deliver coalesced data
2873 * Set the receive coalescing size and PSH bit handling.
2875 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2879 if (size > MAX_RX_COALESCING_LEN)
2882 val = t3_read_reg(adap, A_TP_PARA_REG3);
2883 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2886 val |= F_RXCOALESCEENABLE;
2888 val |= F_RXCOALESCEPSHEN;
2889 size = min(MAX_RX_COALESCING_LEN, size);
2890 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2891 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2893 t3_write_reg(adap, A_TP_PARA_REG3, val);
2898 * t3_tp_set_max_rxsize - set the max receive size
2899 * @adap: the adapter
2900 * @size: the max receive size
2902 * Set TP's max receive size. This is the limit that applies when
2903 * receive coalescing is disabled.
2905 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2907 t3_write_reg(adap, A_TP_PARA_REG7,
2908 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2911 static void init_mtus(unsigned short mtus[])
2914 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2915 * it can accomodate max size TCP/IP headers when SACK and timestamps
2916 * are enabled and still have at least 8 bytes of payload.
2937 * Initial congestion control parameters.
2939 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2941 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2966 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2969 b[13] = b[14] = b[15] = b[16] = 3;
2970 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2971 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2976 /* The minimum additive increment value for the congestion control table */
2977 #define CC_MIN_INCR 2U
2980 * t3_load_mtus - write the MTU and congestion control HW tables
2981 * @adap: the adapter
2982 * @mtus: the unrestricted values for the MTU table
2983 * @alphs: the values for the congestion control alpha parameter
2984 * @beta: the values for the congestion control beta parameter
2985 * @mtu_cap: the maximum permitted effective MTU
2987 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2988 * Update the high-speed congestion control table with the supplied alpha,
2991 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2992 unsigned short alpha[NCCTRL_WIN],
2993 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2995 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2996 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2997 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2998 28672, 40960, 57344, 81920, 114688, 163840, 229376
3003 for (i = 0; i < NMTUS; ++i) {
3004 unsigned int mtu = min(mtus[i], mtu_cap);
3005 unsigned int log2 = fls(mtu);
3007 if (!(mtu & ((1 << log2) >> 2))) /* round */
3009 t3_write_reg(adap, A_TP_MTU_TABLE,
3010 (i << 24) | (log2 << 16) | mtu);
3012 for (w = 0; w < NCCTRL_WIN; ++w) {
3015 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3018 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3019 (w << 16) | (beta[w] << 13) | inc);
3025 * t3_read_hw_mtus - returns the values in the HW MTU table
3026 * @adap: the adapter
3027 * @mtus: where to store the HW MTU values
3029 * Reads the HW MTU table.
3031 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3035 for (i = 0; i < NMTUS; ++i) {
3038 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3039 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3040 mtus[i] = val & 0x3fff;
3045 * t3_get_cong_cntl_tab - reads the congestion control table
3046 * @adap: the adapter
3047 * @incr: where to store the alpha values
3049 * Reads the additive increments programmed into the HW congestion
3052 void t3_get_cong_cntl_tab(struct adapter *adap,
3053 unsigned short incr[NMTUS][NCCTRL_WIN])
3055 unsigned int mtu, w;
3057 for (mtu = 0; mtu < NMTUS; ++mtu)
3058 for (w = 0; w < NCCTRL_WIN; ++w) {
3059 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3060 0xffff0000 | (mtu << 5) | w);
3061 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3067 * t3_tp_get_mib_stats - read TP's MIB counters
3068 * @adap: the adapter
3069 * @tps: holds the returned counter values
3071 * Returns the values of TP's MIB counters.
3073 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3075 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3076 sizeof(*tps) / sizeof(u32), 0);
3079 #define ulp_region(adap, name, start, len) \
3080 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3081 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3082 (start) + (len) - 1); \
3085 #define ulptx_region(adap, name, start, len) \
3086 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3087 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3088 (start) + (len) - 1)
3090 static void ulp_config(struct adapter *adap, const struct tp_params *p)
3092 unsigned int m = p->chan_rx_size;
3094 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3095 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3096 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3097 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3098 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3099 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3100 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3101 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3105 * t3_set_proto_sram - set the contents of the protocol sram
3106 * @adapter: the adapter
3107 * @data: the protocol image
3109 * Write the contents of the protocol SRAM.
3111 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
3114 const __be32 *buf = (const __be32 *)data;
3116 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3117 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3118 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3119 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3120 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3121 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
3123 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3124 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3127 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3132 void t3_config_trace_filter(struct adapter *adapter,
3133 const struct trace_params *tp, int filter_index,
3134 int invert, int enable)
3136 u32 addr, key[4], mask[4];
3138 key[0] = tp->sport | (tp->sip << 16);
3139 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3141 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3143 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3144 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3145 mask[2] = tp->dip_mask;
3146 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3149 key[3] |= (1 << 29);
3151 key[3] |= (1 << 28);
3153 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3154 tp_wr_indirect(adapter, addr++, key[0]);
3155 tp_wr_indirect(adapter, addr++, mask[0]);
3156 tp_wr_indirect(adapter, addr++, key[1]);
3157 tp_wr_indirect(adapter, addr++, mask[1]);
3158 tp_wr_indirect(adapter, addr++, key[2]);
3159 tp_wr_indirect(adapter, addr++, mask[2]);
3160 tp_wr_indirect(adapter, addr++, key[3]);
3161 tp_wr_indirect(adapter, addr, mask[3]);
3162 t3_read_reg(adapter, A_TP_PIO_DATA);
3166 * t3_config_sched - configure a HW traffic scheduler
3167 * @adap: the adapter
3168 * @kbps: target rate in Kbps
3169 * @sched: the scheduler index
3171 * Configure a HW scheduler for the target rate
3173 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3175 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3176 unsigned int clk = adap->params.vpd.cclk * 1000;
3177 unsigned int selected_cpt = 0, selected_bpt = 0;
3180 kbps *= 125; /* -> bytes */
3181 for (cpt = 1; cpt <= 255; cpt++) {
3183 bpt = (kbps + tps / 2) / tps;
3184 if (bpt > 0 && bpt <= 255) {
3186 delta = v >= kbps ? v - kbps : kbps - v;
3187 if (delta <= mindelta) {
3192 } else if (selected_cpt)
3198 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3199 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3200 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3202 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3204 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3205 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3209 static int tp_init(struct adapter *adap, const struct tp_params *p)
3214 t3_set_vlan_accel(adap, 3, 0);
3216 if (is_offload(adap)) {
3217 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3218 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3219 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3222 CH_ERR(adap, "TP initialization timed out\n");
3226 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3230 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3232 if (port_mask & ~((1 << adap->params.nports) - 1))
3234 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3235 port_mask << S_PORT0ACTIVE);
3240 * Perform the bits of HW initialization that are dependent on the Tx
3241 * channels being used.
3243 static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3247 if (chan_map != 3) { /* one channel */
3248 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3249 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3250 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3251 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3252 F_TPTXPORT1EN | F_PORT1ACTIVE));
3253 t3_write_reg(adap, A_PM1_TX_CFG,
3254 chan_map == 1 ? 0xffffffff : 0);
3255 } else { /* two channels */
3256 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3257 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3258 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3259 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3260 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3261 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3263 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3264 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3265 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3266 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3267 for (i = 0; i < 16; i++)
3268 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3269 (i << 16) | 0x1010);
3273 static int calibrate_xgm(struct adapter *adapter)
3275 if (uses_xaui(adapter)) {
3278 for (i = 0; i < 5; ++i) {
3279 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3280 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3282 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3283 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3284 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3285 V_XAUIIMP(G_CALIMP(v) >> 2));
3289 CH_ERR(adapter, "MAC calibration failed\n");
3292 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3293 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3294 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3295 F_XGM_IMPSETUPDATE);
3300 static void calibrate_xgm_t3b(struct adapter *adapter)
3302 if (!uses_xaui(adapter)) {
3303 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3304 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3305 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3306 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3307 F_XGM_IMPSETUPDATE);
3308 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3310 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3311 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3315 struct mc7_timing_params {
3316 unsigned char ActToPreDly;
3317 unsigned char ActToRdWrDly;
3318 unsigned char PreCyc;
3319 unsigned char RefCyc[5];
3320 unsigned char BkCyc;
3321 unsigned char WrToRdDly;
3322 unsigned char RdToWrDly;
3326 * Write a value to a register and check that the write completed. These
3327 * writes normally complete in a cycle or two, so one read should suffice.
3328 * The very first read exists to flush the posted write to the device.
3330 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3332 t3_write_reg(adapter, addr, val);
3333 t3_read_reg(adapter, addr); /* flush */
3334 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3336 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3340 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3342 static const unsigned int mc7_mode[] = {
3343 0x632, 0x642, 0x652, 0x432, 0x442
3345 static const struct mc7_timing_params mc7_timings[] = {
3346 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3347 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3348 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3349 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3350 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3354 unsigned int width, density, slow, attempts;
3355 struct adapter *adapter = mc7->adapter;
3356 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3361 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3362 slow = val & F_SLOW;
3363 width = G_WIDTH(val);
3364 density = G_DEN(val);
3366 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3367 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3371 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3372 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3374 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3375 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3376 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3382 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3383 V_ACTTOPREDLY(p->ActToPreDly) |
3384 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3385 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3386 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3388 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3389 val | F_CLKEN | F_TERM150);
3390 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3393 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3398 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3399 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3400 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3401 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3405 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3406 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3410 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3411 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3412 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3413 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3414 mc7_mode[mem_type]) ||
3415 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3416 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3419 /* clock value is in KHz */
3420 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3421 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3423 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3424 F_PERREFEN | V_PREREFDIV(mc7_clock));
3425 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3427 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3428 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3429 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3430 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3431 (mc7->size << width) - 1);
3432 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3433 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3438 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3439 } while ((val & F_BUSY) && --attempts);
3441 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3445 /* Enable normal memory accesses. */
3446 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3453 static void config_pcie(struct adapter *adap)
3455 static const u16 ack_lat[4][6] = {
3456 {237, 416, 559, 1071, 2095, 4143},
3457 {128, 217, 289, 545, 1057, 2081},
3458 {73, 118, 154, 282, 538, 1050},
3459 {67, 107, 86, 150, 278, 534}
3461 static const u16 rpl_tmr[4][6] = {
3462 {711, 1248, 1677, 3213, 6285, 12429},
3463 {384, 651, 867, 1635, 3171, 6243},
3464 {219, 354, 462, 846, 1614, 3150},
3465 {201, 321, 258, 450, 834, 1602}
3469 unsigned int log2_width, pldsize;
3470 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3472 pci_read_config_word(adap->pdev,
3473 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3475 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3476 pci_read_config_word(adap->pdev,
3477 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3480 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3481 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3482 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3483 log2_width = fls(adap->params.pci.width) - 1;
3484 acklat = ack_lat[log2_width][pldsize];
3485 if (val & 1) /* check LOsEnable */
3486 acklat += fst_trn_tx * 4;
3487 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3489 if (adap->params.rev == 0)
3490 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3491 V_T3A_ACKLAT(M_T3A_ACKLAT),
3492 V_T3A_ACKLAT(acklat));
3494 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3497 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3498 V_REPLAYLMT(rpllmt));
3500 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3501 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3502 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3503 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3507 * Initialize and configure T3 HW modules. This performs the
3508 * initialization steps that need to be done once after a card is reset.
3509 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3511 * fw_params are passed to FW and their value is platform dependent. Only the
3512 * top 8 bits are available for use, the rest must be 0.
3514 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3516 int err = -EIO, attempts, i;
3517 const struct vpd_params *vpd = &adapter->params.vpd;
3519 if (adapter->params.rev > 0)
3520 calibrate_xgm_t3b(adapter);
3521 else if (calibrate_xgm(adapter))
3525 partition_mem(adapter, &adapter->params.tp);
3527 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3528 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3529 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3530 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3531 adapter->params.mc5.nfilters,
3532 adapter->params.mc5.nroutes))
3535 for (i = 0; i < 32; i++)
3536 if (clear_sge_ctxt(adapter, i, F_CQ))
3540 if (tp_init(adapter, &adapter->params.tp))
3543 t3_tp_set_coalescing_size(adapter,
3544 min(adapter->params.sge.max_pkt_size,
3545 MAX_RX_COALESCING_LEN), 1);
3546 t3_tp_set_max_rxsize(adapter,
3547 min(adapter->params.sge.max_pkt_size, 16384U));
3548 ulp_config(adapter, &adapter->params.tp);
3550 if (is_pcie(adapter))
3551 config_pcie(adapter);
3553 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3554 F_DMASTOPEN | F_CLIDECEN);
3556 if (adapter->params.rev == T3_REV_C)
3557 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3558 F_CFG_CQE_SOP_MASK);
3560 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3561 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3562 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3563 chan_init_hw(adapter, adapter->params.chan_map);
3564 t3_sge_init(adapter, &adapter->params.sge);
3566 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3568 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3569 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3570 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3571 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3574 do { /* wait for uP to initialize */
3576 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3578 CH_ERR(adapter, "uP initialization timed out\n");
3588 * get_pci_mode - determine a card's PCI mode
3589 * @adapter: the adapter
3590 * @p: where to store the PCI settings
3592 * Determines a card's PCI mode and associated parameters, such as speed
3595 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3597 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3598 u32 pci_mode, pcie_cap;
3600 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3604 p->variant = PCI_VARIANT_PCIE;
3605 p->pcie_cap_addr = pcie_cap;
3606 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3608 p->width = (val >> 4) & 0x3f;
3612 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3613 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3614 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3615 pci_mode = G_PCIXINITPAT(pci_mode);
3617 p->variant = PCI_VARIANT_PCI;
3618 else if (pci_mode < 4)
3619 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3620 else if (pci_mode < 8)
3621 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3623 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3627 * init_link_config - initialize a link's SW state
3628 * @lc: structure holding the link state
3629 * @ai: information about the current card
3631 * Initializes the SW state maintained for each link, including the link's
3632 * capabilities and default speed/duplex/flow-control/autonegotiation
3635 static void init_link_config(struct link_config *lc, unsigned int caps)
3637 lc->supported = caps;
3638 lc->requested_speed = lc->speed = SPEED_INVALID;
3639 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3640 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3641 if (lc->supported & SUPPORTED_Autoneg) {
3642 lc->advertising = lc->supported;
3643 lc->autoneg = AUTONEG_ENABLE;
3644 lc->requested_fc |= PAUSE_AUTONEG;
3646 lc->advertising = 0;
3647 lc->autoneg = AUTONEG_DISABLE;
3652 * mc7_calc_size - calculate MC7 memory size
3653 * @cfg: the MC7 configuration
3655 * Calculates the size of an MC7 memory in bytes from the value of its
3656 * configuration register.
3658 static unsigned int mc7_calc_size(u32 cfg)
3660 unsigned int width = G_WIDTH(cfg);
3661 unsigned int banks = !!(cfg & F_BKS) + 1;
3662 unsigned int org = !!(cfg & F_ORG) + 1;
3663 unsigned int density = G_DEN(cfg);
3664 unsigned int MBs = ((256 << density) * banks) / (org << width);
3669 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3670 unsigned int base_addr, const char *name)
3674 mc7->adapter = adapter;
3676 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3677 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3678 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3679 mc7->width = G_WIDTH(cfg);
3682 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3684 mac->adapter = adapter;
3685 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3688 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3689 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3690 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3691 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3696 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3698 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3700 mi1_init(adapter, ai);
3701 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3702 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3703 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3704 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3705 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3706 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3708 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3711 /* Enable MAC clocks so we can access the registers */
3712 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3713 t3_read_reg(adapter, A_XGM_PORT_CFG);
3715 val |= F_CLKDIVRESET_;
3716 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3717 t3_read_reg(adapter, A_XGM_PORT_CFG);
3718 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3719 t3_read_reg(adapter, A_XGM_PORT_CFG);
3723 * Reset the adapter.
3724 * Older PCIe cards lose their config space during reset, PCI-X
3727 int t3_reset_adapter(struct adapter *adapter)
3729 int i, save_and_restore_pcie =
3730 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3733 if (save_and_restore_pcie)
3734 pci_save_state(adapter->pdev);
3735 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3738 * Delay. Give Some time to device to reset fully.
3739 * XXX The delay time should be modified.
3741 for (i = 0; i < 10; i++) {
3743 pci_read_config_word(adapter->pdev, 0x00, &devid);
3744 if (devid == 0x1425)
3748 if (devid != 0x1425)
3751 if (save_and_restore_pcie)
3752 pci_restore_state(adapter->pdev);
3756 static int init_parity(struct adapter *adap)
3760 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3763 for (err = i = 0; !err && i < 16; i++)
3764 err = clear_sge_ctxt(adap, i, F_EGRESS);
3765 for (i = 0xfff0; !err && i <= 0xffff; i++)
3766 err = clear_sge_ctxt(adap, i, F_EGRESS);
3767 for (i = 0; !err && i < SGE_QSETS; i++)
3768 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3772 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3773 for (i = 0; i < 4; i++)
3774 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3775 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3776 F_IBQDBGWR | V_IBQDBGQID(i) |
3777 V_IBQDBGADDR(addr));
3778 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3779 F_IBQDBGBUSY, 0, 2, 1);
3787 * Initialize adapter SW state for the various HW modules, set initial values
3788 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3791 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3795 unsigned int i, j = -1;
3797 get_pci_mode(adapter, &adapter->params.pci);
3799 adapter->params.info = ai;
3800 adapter->params.nports = ai->nports0 + ai->nports1;
3801 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3802 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3804 * We used to only run the "adapter check task" once a second if
3805 * we had PHYs which didn't support interrupts (we would check
3806 * their link status once a second). Now we check other conditions
3807 * in that routine which could potentially impose a very high
3808 * interrupt load on the system. As such, we now always scan the
3809 * adapter state once a second ...
3811 adapter->params.linkpoll_period = 10;
3812 adapter->params.stats_update_period = is_10G(adapter) ?
3813 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3814 adapter->params.pci.vpd_cap_addr =
3815 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3816 ret = get_vpd_params(adapter, &adapter->params.vpd);
3820 if (reset && t3_reset_adapter(adapter))
3823 t3_sge_prep(adapter, &adapter->params.sge);
3825 if (adapter->params.vpd.mclk) {
3826 struct tp_params *p = &adapter->params.tp;
3828 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3829 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3830 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3832 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3833 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3834 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3835 p->cm_size = t3_mc7_size(&adapter->cm);
3836 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3837 p->chan_tx_size = p->pmtx_size / p->nchan;
3838 p->rx_pg_size = 64 * 1024;
3839 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3840 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3841 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3842 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3843 adapter->params.rev > 0 ? 12 : 6;
3846 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3847 t3_mc7_size(&adapter->pmtx) &&
3848 t3_mc7_size(&adapter->cm);
3850 if (is_offload(adapter)) {
3851 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3852 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3853 DEFAULT_NFILTERS : 0;
3854 adapter->params.mc5.nroutes = 0;
3855 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3857 init_mtus(adapter->params.mtus);
3858 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3861 early_hw_init(adapter, ai);
3862 ret = init_parity(adapter);
3866 for_each_port(adapter, i) {
3868 const struct port_type_info *pti;
3869 struct port_info *p = adap2pinfo(adapter, i);
3871 while (!adapter->params.vpd.port_type[++j])
3874 pti = &port_types[adapter->params.vpd.port_type[j]];
3875 if (!pti->phy_prep) {
3876 CH_ALERT(adapter, "Invalid port type index %d\n",
3877 adapter->params.vpd.port_type[j]);
3881 p->phy.mdio.dev = adapter->port[i];
3882 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3886 mac_prep(&p->mac, adapter, j);
3889 * The VPD EEPROM stores the base Ethernet address for the
3890 * card. A port's address is derived from the base by adding
3891 * the port's index to the base's low octet.
3893 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3894 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3896 memcpy(adapter->port[i]->dev_addr, hw_addr,
3898 memcpy(adapter->port[i]->perm_addr, hw_addr,
3900 init_link_config(&p->link_config, p->phy.caps);
3901 p->phy.ops->power_down(&p->phy, 1);
3904 * If the PHY doesn't support interrupts for link status
3905 * changes, schedule a scan of the adapter links at least
3908 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3909 adapter->params.linkpoll_period > 10)
3910 adapter->params.linkpoll_period = 10;
3916 void t3_led_ready(struct adapter *adapter)
3918 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3922 int t3_replay_prep_adapter(struct adapter *adapter)
3924 const struct adapter_info *ai = adapter->params.info;
3925 unsigned int i, j = -1;
3928 early_hw_init(adapter, ai);
3929 ret = init_parity(adapter);
3933 for_each_port(adapter, i) {
3934 const struct port_type_info *pti;
3935 struct port_info *p = adap2pinfo(adapter, i);
3937 while (!adapter->params.vpd.port_type[++j])
3940 pti = &port_types[adapter->params.vpd.port_type[j]];
3941 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3944 p->phy.ops->power_down(&p->phy, 1);