2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "firmware_exports.h"
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
178 val64 |= (u64) val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
202 t3_write_reg(adap, A_MI1_CFG, val);
205 #define MDIO_ATTEMPTS 10
208 * MI1 read/write operations for direct-addressed PHYs.
210 static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
229 static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
247 static const struct mdio_ops mi1_mdio_ops = {
253 * MI1 read/write operations for indirect-addressed PHYs.
255 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
273 mutex_unlock(&adapter->mdio_lock);
277 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
294 mutex_unlock(&adapter->mdio_lock);
298 static const struct mdio_ops mi1_mdio_ext_ops = {
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
314 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
320 ret = mdio_read(phy, mmd, reg, &val);
323 ret = mdio_write(phy, mmd, reg, val | set);
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
338 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
354 } while (ctl && --wait);
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
367 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
370 unsigned int val = 0;
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
411 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
437 static const struct adapter_info t3_adap_info[] = {
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
442 &mi1_mdio_ops, "Chelsio PE9000"},
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
447 &mi1_mdio_ops, "Chelsio T302"},
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
451 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
452 &mi1_mdio_ext_ops, "Chelsio T310"},
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
458 &mi1_mdio_ext_ops, "Chelsio T320"},
462 * Return the adapter_info structure with a given index. Out-of-range indices
465 const struct adapter_info *t3_get_adapter_info(unsigned int id)
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
470 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
474 static const struct port_type_info port_types[] = {
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
493 #define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
525 #define EEPROM_MAX_POLL 4
526 #define EEPROM_STAT_ADDR 0x4000
527 #define VPD_BASE 0xc00
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
540 int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
573 int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
603 * Enables or disables write protection on the serial EEPROM.
605 int t3_seeprom_wp(struct adapter *adapter, int enable)
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
611 * Convert a character holding a hex digit to a number.
613 static unsigned int hex2int(unsigned char c)
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
623 * Reads card parameters stored in VPD EEPROM.
625 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
631 * Card information is normally at VPD_BASE but some early cards had
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
651 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
653 /* Old eeproms didn't have port information */
654 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
655 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
656 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
658 p->port_type[0] = hex2int(vpd.port0_data[0]);
659 p->port_type[1] = hex2int(vpd.port1_data[0]);
660 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
661 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
664 for (i = 0; i < 6; i++)
665 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
666 hex2int(vpd.na_data[2 * i + 1]);
670 /* serial flash and firmware constants */
672 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
673 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
674 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
676 /* flash command opcodes */
677 SF_PROG_PAGE = 2, /* program page */
678 SF_WR_DISABLE = 4, /* disable writes */
679 SF_RD_STATUS = 5, /* read status register */
680 SF_WR_ENABLE = 6, /* enable writes */
681 SF_RD_DATA_FAST = 0xb, /* read flash */
682 SF_ERASE_SECTOR = 0xd8, /* erase sector */
684 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
685 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
686 FW_MIN_SIZE = 8 /* at least version and csum */
690 * sf1_read - read data from the serial flash
691 * @adapter: the adapter
692 * @byte_cnt: number of bytes to read
693 * @cont: whether another operation will be chained
694 * @valp: where to store the read data
696 * Reads up to 4 bytes of data from the serial flash. The location of
697 * the read needs to be specified prior to calling this by issuing the
698 * appropriate commands to the serial flash.
700 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
705 if (!byte_cnt || byte_cnt > 4)
707 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
709 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
710 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
712 *valp = t3_read_reg(adapter, A_SF_DATA);
717 * sf1_write - write data to the serial flash
718 * @adapter: the adapter
719 * @byte_cnt: number of bytes to write
720 * @cont: whether another operation will be chained
721 * @val: value to write
723 * Writes up to 4 bytes of data to the serial flash. The location of
724 * the write needs to be specified prior to calling this by issuing the
725 * appropriate commands to the serial flash.
727 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
730 if (!byte_cnt || byte_cnt > 4)
732 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
734 t3_write_reg(adapter, A_SF_DATA, val);
735 t3_write_reg(adapter, A_SF_OP,
736 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
737 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
741 * flash_wait_op - wait for a flash operation to complete
742 * @adapter: the adapter
743 * @attempts: max number of polls of the status register
744 * @delay: delay between polls in ms
746 * Wait for a flash operation to complete by polling the status register.
748 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
754 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
755 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
767 * t3_read_flash - read words from serial flash
768 * @adapter: the adapter
769 * @addr: the start address for the read
770 * @nwords: how many 32-bit words to read
771 * @data: where to store the read data
772 * @byte_oriented: whether to store data as bytes or as words
774 * Read the specified number of 32-bit words from the serial flash.
775 * If @byte_oriented is set the read data is stored as a byte array
776 * (i.e., big-endian), otherwise as 32-bit words in the platform's
779 int t3_read_flash(struct adapter *adapter, unsigned int addr,
780 unsigned int nwords, u32 *data, int byte_oriented)
784 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
787 addr = swab32(addr) | SF_RD_DATA_FAST;
789 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
790 (ret = sf1_read(adapter, 1, 1, data)) != 0)
793 for (; nwords; nwords--, data++) {
794 ret = sf1_read(adapter, 4, nwords > 1, data);
798 *data = htonl(*data);
804 * t3_write_flash - write up to a page of data to the serial flash
805 * @adapter: the adapter
806 * @addr: the start address to write
807 * @n: length of data to write
808 * @data: the data to write
810 * Writes up to a page of data (256 bytes) to the serial flash starting
811 * at the given address.
813 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
814 unsigned int n, const u8 *data)
818 unsigned int i, c, left, val, offset = addr & 0xff;
820 if (addr + n > SF_SIZE || offset + n > 256)
823 val = swab32(addr) | SF_PROG_PAGE;
825 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
826 (ret = sf1_write(adapter, 4, 1, val)) != 0)
829 for (left = n; left; left -= c) {
831 for (val = 0, i = 0; i < c; ++i)
832 val = (val << 8) + *data++;
834 ret = sf1_write(adapter, c, c != left, val);
838 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
841 /* Read the page to verify the write succeeded */
842 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
846 if (memcmp(data - n, (u8 *) buf + offset, n))
852 * t3_get_tp_version - read the tp sram version
853 * @adapter: the adapter
854 * @vers: where to place the version
856 * Reads the protocol sram version from sram.
858 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
862 /* Get version loaded in SRAM */
863 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
864 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
869 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
875 * t3_check_tpsram_version - read the tp sram version
876 * @adapter: the adapter
877 * @must_load: set to 1 if loading a new microcode image is required
879 * Reads the protocol sram version from flash.
881 int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
885 unsigned int major, minor;
887 if (adapter->params.rev == T3_REV_A)
892 ret = t3_get_tp_version(adapter, &vers);
896 major = G_TP_VERSION_MAJOR(vers);
897 minor = G_TP_VERSION_MINOR(vers);
899 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
902 if (major != TP_VERSION_MAJOR)
903 CH_ERR(adapter, "found wrong TP version (%u.%u), "
904 "driver needs version %d.%d\n", major, minor,
905 TP_VERSION_MAJOR, TP_VERSION_MINOR);
908 CH_ERR(adapter, "found wrong TP version (%u.%u), "
909 "driver compiled for version %d.%d\n", major, minor,
910 TP_VERSION_MAJOR, TP_VERSION_MINOR);
916 * t3_check_tpsram - check if provided protocol SRAM
917 * is compatible with this driver
918 * @adapter: the adapter
919 * @tp_sram: the firmware image to write
922 * Checks if an adapter's tp sram is compatible with the driver.
923 * Returns 0 if the versions are compatible, a negative error otherwise.
925 int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
929 const u32 *p = (const u32 *)tp_sram;
931 /* Verify checksum */
932 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
934 if (csum != 0xffffffff) {
935 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
943 enum fw_version_type {
949 * t3_get_fw_version - read the firmware version
950 * @adapter: the adapter
951 * @vers: where to place the version
953 * Reads the FW version from flash.
955 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
957 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
961 * t3_check_fw_version - check if the FW is compatible with this driver
962 * @adapter: the adapter
963 * @must_load: set to 1 if loading a new FW image is required
965 * Checks if an adapter's FW is compatible with the driver. Returns 0
966 * if the versions are compatible, a negative error otherwise.
968 int t3_check_fw_version(struct adapter *adapter, int *must_load)
972 unsigned int type, major, minor;
975 ret = t3_get_fw_version(adapter, &vers);
979 type = G_FW_VERSION_TYPE(vers);
980 major = G_FW_VERSION_MAJOR(vers);
981 minor = G_FW_VERSION_MINOR(vers);
983 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
984 minor == FW_VERSION_MINOR)
987 if (major != FW_VERSION_MAJOR)
988 CH_ERR(adapter, "found wrong FW version(%u.%u), "
989 "driver needs version %u.%u\n", major, minor,
990 FW_VERSION_MAJOR, FW_VERSION_MINOR);
991 else if (minor < FW_VERSION_MINOR) {
993 CH_WARN(adapter, "found old FW minor version(%u.%u), "
994 "driver compiled for version %u.%u\n", major, minor,
995 FW_VERSION_MAJOR, FW_VERSION_MINOR);
997 CH_WARN(adapter, "found newer FW version(%u.%u), "
998 "driver compiled for version %u.%u\n", major, minor,
999 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1006 * t3_flash_erase_sectors - erase a range of flash sectors
1007 * @adapter: the adapter
1008 * @start: the first sector to erase
1009 * @end: the last sector to erase
1011 * Erases the sectors in the given range.
1013 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1015 while (start <= end) {
1018 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1019 (ret = sf1_write(adapter, 4, 0,
1020 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1021 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1029 * t3_load_fw - download firmware
1030 * @adapter: the adapter
1031 * @fw_data: the firmware image to write
1034 * Write the supplied firmware image to the card's serial flash.
1035 * The FW image has the following sections: @size - 8 bytes of code and
1036 * data, followed by 4 bytes of FW version, followed by the 32-bit
1037 * 1's complement checksum of the whole image.
1039 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1043 const u32 *p = (const u32 *)fw_data;
1044 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1046 if ((size & 3) || size < FW_MIN_SIZE)
1048 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1051 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1052 csum += ntohl(p[i]);
1053 if (csum != 0xffffffff) {
1054 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1059 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1063 size -= 8; /* trim off version and checksum */
1064 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1065 unsigned int chunk_size = min(size, 256U);
1067 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1072 fw_data += chunk_size;
1076 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1079 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1083 #define CIM_CTL_BASE 0x2000
1086 * t3_cim_ctl_blk_read - read a block from CIM control region
1088 * @adap: the adapter
1089 * @addr: the start address within the CIM control region
1090 * @n: number of words to read
1091 * @valp: where to store the result
1093 * Reads a block of 4-byte words from the CIM control region.
1095 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1096 unsigned int n, unsigned int *valp)
1100 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1103 for ( ; !ret && n--; addr += 4) {
1104 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1105 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1108 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1115 * t3_link_changed - handle interface link changes
1116 * @adapter: the adapter
1117 * @port_id: the port index that changed link state
1119 * Called when a port's link settings change to propagate the new values
1120 * to the associated PHY and MAC. After performing the common tasks it
1121 * invokes an OS-specific handler.
1123 void t3_link_changed(struct adapter *adapter, int port_id)
1125 int link_ok, speed, duplex, fc;
1126 struct port_info *pi = adap2pinfo(adapter, port_id);
1127 struct cphy *phy = &pi->phy;
1128 struct cmac *mac = &pi->mac;
1129 struct link_config *lc = &pi->link_config;
1131 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1133 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1134 uses_xaui(adapter)) {
1137 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1138 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1140 lc->link_ok = link_ok;
1141 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1142 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1143 if (lc->requested_fc & PAUSE_AUTONEG)
1144 fc &= lc->requested_fc;
1146 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1148 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1149 /* Set MAC speed, duplex, and flow control to match PHY. */
1150 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1154 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1158 * t3_link_start - apply link configuration to MAC/PHY
1159 * @phy: the PHY to setup
1160 * @mac: the MAC to setup
1161 * @lc: the requested link configuration
1163 * Set up a port's MAC and PHY according to a desired link configuration.
1164 * - If the PHY can auto-negotiate first decide what to advertise, then
1165 * enable/disable auto-negotiation as desired, and reset.
1166 * - If the PHY does not auto-negotiate just reset it.
1167 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1168 * otherwise do it later based on the outcome of auto-negotiation.
1170 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1172 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1175 if (lc->supported & SUPPORTED_Autoneg) {
1176 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1178 lc->advertising |= ADVERTISED_Asym_Pause;
1180 lc->advertising |= ADVERTISED_Pause;
1182 phy->ops->advertise(phy, lc->advertising);
1184 if (lc->autoneg == AUTONEG_DISABLE) {
1185 lc->speed = lc->requested_speed;
1186 lc->duplex = lc->requested_duplex;
1187 lc->fc = (unsigned char)fc;
1188 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1190 /* Also disables autoneg */
1191 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1192 phy->ops->reset(phy, 0);
1194 phy->ops->autoneg_enable(phy);
1196 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1197 lc->fc = (unsigned char)fc;
1198 phy->ops->reset(phy, 0);
1204 * t3_set_vlan_accel - control HW VLAN extraction
1205 * @adapter: the adapter
1206 * @ports: bitmap of adapter ports to operate on
1207 * @on: enable (1) or disable (0) HW VLAN extraction
1209 * Enables or disables HW extraction of VLAN tags for the given port.
1211 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1213 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1214 ports << S_VLANEXTRACTIONENABLE,
1215 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1219 unsigned int mask; /* bits to check in interrupt status */
1220 const char *msg; /* message to print or NULL */
1221 short stat_idx; /* stat counter to increment or -1 */
1222 unsigned short fatal:1; /* whether the condition reported is fatal */
1226 * t3_handle_intr_status - table driven interrupt handler
1227 * @adapter: the adapter that generated the interrupt
1228 * @reg: the interrupt status register to process
1229 * @mask: a mask to apply to the interrupt status
1230 * @acts: table of interrupt actions
1231 * @stats: statistics counters tracking interrupt occurences
1233 * A table driven interrupt handler that applies a set of masks to an
1234 * interrupt status word and performs the corresponding actions if the
1235 * interrupts described by the mask have occured. The actions include
1236 * optionally printing a warning or alert message, and optionally
1237 * incrementing a stat counter. The table is terminated by an entry
1238 * specifying mask 0. Returns the number of fatal interrupt conditions.
1240 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1242 const struct intr_info *acts,
1243 unsigned long *stats)
1246 unsigned int status = t3_read_reg(adapter, reg) & mask;
1248 for (; acts->mask; ++acts) {
1249 if (!(status & acts->mask))
1253 CH_ALERT(adapter, "%s (0x%x)\n",
1254 acts->msg, status & acts->mask);
1255 } else if (acts->msg)
1256 CH_WARN(adapter, "%s (0x%x)\n",
1257 acts->msg, status & acts->mask);
1258 if (acts->stat_idx >= 0)
1259 stats[acts->stat_idx]++;
1261 if (status) /* clear processed interrupts */
1262 t3_write_reg(adapter, reg, status);
1266 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1267 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1268 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1269 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1270 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1271 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1273 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1274 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1276 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1277 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1278 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1279 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1280 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1281 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1282 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1283 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1284 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1285 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1286 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1287 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1288 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1289 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1290 F_TXPARERR | V_BISTERR(M_BISTERR))
1291 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1292 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1293 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1294 #define ULPTX_INTR_MASK 0xfc
1295 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1296 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1297 F_ZERO_SWITCH_ERROR)
1298 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1299 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1300 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1301 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1302 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1303 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1304 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1305 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1306 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1307 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1308 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1309 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1310 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1311 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1312 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1313 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1314 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1315 V_MCAPARERRENB(M_MCAPARERRENB))
1316 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1317 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1318 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1319 F_MPS0 | F_CPL_SWITCH)
1322 * Interrupt handler for the PCIX1 module.
1324 static void pci_intr_handler(struct adapter *adapter)
1326 static const struct intr_info pcix1_intr_info[] = {
1327 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1328 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1329 {F_RCVTARABT, "PCI received target abort", -1, 1},
1330 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1331 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1332 {F_DETPARERR, "PCI detected parity error", -1, 1},
1333 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1334 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1335 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1337 {F_DETCORECCERR, "PCI correctable ECC error",
1338 STAT_PCI_CORR_ECC, 0},
1339 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1340 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1341 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1343 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1345 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1347 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1352 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1353 pcix1_intr_info, adapter->irq_stats))
1354 t3_fatal_err(adapter);
1358 * Interrupt handler for the PCIE module.
1360 static void pcie_intr_handler(struct adapter *adapter)
1362 static const struct intr_info pcie_intr_info[] = {
1363 {F_PEXERR, "PCI PEX error", -1, 1},
1365 "PCI unexpected split completion DMA read error", -1, 1},
1367 "PCI unexpected split completion DMA command error", -1, 1},
1368 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1369 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1370 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1371 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1372 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1373 "PCI MSI-X table/PBA parity error", -1, 1},
1374 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1375 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1376 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1377 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1378 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1382 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1383 CH_ALERT(adapter, "PEX error code 0x%x\n",
1384 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1386 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1387 pcie_intr_info, adapter->irq_stats))
1388 t3_fatal_err(adapter);
1392 * TP interrupt handler.
1394 static void tp_intr_handler(struct adapter *adapter)
1396 static const struct intr_info tp_intr_info[] = {
1397 {0xffffff, "TP parity error", -1, 1},
1398 {0x1000000, "TP out of Rx pages", -1, 1},
1399 {0x2000000, "TP out of Tx pages", -1, 1},
1403 static struct intr_info tp_intr_info_t3c[] = {
1404 {0x1fffffff, "TP parity error", -1, 1},
1405 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1406 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1410 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1411 adapter->params.rev < T3_REV_C ?
1412 tp_intr_info : tp_intr_info_t3c, NULL))
1413 t3_fatal_err(adapter);
1417 * CIM interrupt handler.
1419 static void cim_intr_handler(struct adapter *adapter)
1421 static const struct intr_info cim_intr_info[] = {
1422 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1423 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1424 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1425 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1426 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1427 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1428 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1429 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1430 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1431 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1432 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1433 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1434 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1435 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1436 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1437 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1438 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1439 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1440 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1441 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1442 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1443 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1444 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1445 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1449 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1450 cim_intr_info, NULL))
1451 t3_fatal_err(adapter);
1455 * ULP RX interrupt handler.
1457 static void ulprx_intr_handler(struct adapter *adapter)
1459 static const struct intr_info ulprx_intr_info[] = {
1460 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1461 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1462 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1463 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1464 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1465 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1466 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1467 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1471 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1472 ulprx_intr_info, NULL))
1473 t3_fatal_err(adapter);
1477 * ULP TX interrupt handler.
1479 static void ulptx_intr_handler(struct adapter *adapter)
1481 static const struct intr_info ulptx_intr_info[] = {
1482 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1483 STAT_ULP_CH0_PBL_OOB, 0},
1484 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1485 STAT_ULP_CH1_PBL_OOB, 0},
1486 {0xfc, "ULP TX parity error", -1, 1},
1490 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1491 ulptx_intr_info, adapter->irq_stats))
1492 t3_fatal_err(adapter);
1495 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1496 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1497 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1498 F_ICSPI1_TX_FRAMING_ERROR)
1499 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1500 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1501 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1502 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1505 * PM TX interrupt handler.
1507 static void pmtx_intr_handler(struct adapter *adapter)
1509 static const struct intr_info pmtx_intr_info[] = {
1510 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1511 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1512 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1513 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1514 "PMTX ispi parity error", -1, 1},
1515 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1516 "PMTX ospi parity error", -1, 1},
1520 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1521 pmtx_intr_info, NULL))
1522 t3_fatal_err(adapter);
1525 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1526 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1527 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1528 F_IESPI1_TX_FRAMING_ERROR)
1529 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1530 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1531 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1532 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1535 * PM RX interrupt handler.
1537 static void pmrx_intr_handler(struct adapter *adapter)
1539 static const struct intr_info pmrx_intr_info[] = {
1540 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1541 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1542 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1543 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1544 "PMRX ispi parity error", -1, 1},
1545 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1546 "PMRX ospi parity error", -1, 1},
1550 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1551 pmrx_intr_info, NULL))
1552 t3_fatal_err(adapter);
1556 * CPL switch interrupt handler.
1558 static void cplsw_intr_handler(struct adapter *adapter)
1560 static const struct intr_info cplsw_intr_info[] = {
1561 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1562 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1563 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1564 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1565 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1566 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1570 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1571 cplsw_intr_info, NULL))
1572 t3_fatal_err(adapter);
1576 * MPS interrupt handler.
1578 static void mps_intr_handler(struct adapter *adapter)
1580 static const struct intr_info mps_intr_info[] = {
1581 {0x1ff, "MPS parity error", -1, 1},
1585 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1586 mps_intr_info, NULL))
1587 t3_fatal_err(adapter);
1590 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1593 * MC7 interrupt handler.
1595 static void mc7_intr_handler(struct mc7 *mc7)
1597 struct adapter *adapter = mc7->adapter;
1598 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1601 mc7->stats.corr_err++;
1602 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1603 "data 0x%x 0x%x 0x%x\n", mc7->name,
1604 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1605 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1606 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1607 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1611 mc7->stats.uncorr_err++;
1612 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1613 "data 0x%x 0x%x 0x%x\n", mc7->name,
1614 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1615 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1616 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1617 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1621 mc7->stats.parity_err++;
1622 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1623 mc7->name, G_PE(cause));
1629 if (adapter->params.rev > 0)
1630 addr = t3_read_reg(adapter,
1631 mc7->offset + A_MC7_ERR_ADDR);
1632 mc7->stats.addr_err++;
1633 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1637 if (cause & MC7_INTR_FATAL)
1638 t3_fatal_err(adapter);
1640 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1643 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1644 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1646 * XGMAC interrupt handler.
1648 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1650 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1651 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1653 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1654 mac->stats.tx_fifo_parity_err++;
1655 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1657 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1658 mac->stats.rx_fifo_parity_err++;
1659 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1661 if (cause & F_TXFIFO_UNDERRUN)
1662 mac->stats.tx_fifo_urun++;
1663 if (cause & F_RXFIFO_OVERFLOW)
1664 mac->stats.rx_fifo_ovfl++;
1665 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1666 mac->stats.serdes_signal_loss++;
1667 if (cause & F_XAUIPCSCTCERR)
1668 mac->stats.xaui_pcs_ctc_err++;
1669 if (cause & F_XAUIPCSALIGNCHANGE)
1670 mac->stats.xaui_pcs_align_change++;
1672 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1673 if (cause & XGM_INTR_FATAL)
1679 * Interrupt handler for PHY events.
1681 int t3_phy_intr_handler(struct adapter *adapter)
1683 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1684 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1686 for_each_port(adapter, i) {
1687 struct port_info *p = adap2pinfo(adapter, i);
1689 mask = gpi - (gpi & (gpi - 1));
1692 if (!(p->port_type->caps & SUPPORTED_IRQ))
1696 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1698 if (phy_cause & cphy_cause_link_change)
1699 t3_link_changed(adapter, i);
1700 if (phy_cause & cphy_cause_fifo_error)
1701 p->phy.fifo_errors++;
1705 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1710 * T3 slow path (non-data) interrupt handler.
1712 int t3_slow_intr_handler(struct adapter *adapter)
1714 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1716 cause &= adapter->slow_intr_mask;
1719 if (cause & F_PCIM0) {
1720 if (is_pcie(adapter))
1721 pcie_intr_handler(adapter);
1723 pci_intr_handler(adapter);
1726 t3_sge_err_intr_handler(adapter);
1727 if (cause & F_MC7_PMRX)
1728 mc7_intr_handler(&adapter->pmrx);
1729 if (cause & F_MC7_PMTX)
1730 mc7_intr_handler(&adapter->pmtx);
1731 if (cause & F_MC7_CM)
1732 mc7_intr_handler(&adapter->cm);
1734 cim_intr_handler(adapter);
1736 tp_intr_handler(adapter);
1737 if (cause & F_ULP2_RX)
1738 ulprx_intr_handler(adapter);
1739 if (cause & F_ULP2_TX)
1740 ulptx_intr_handler(adapter);
1741 if (cause & F_PM1_RX)
1742 pmrx_intr_handler(adapter);
1743 if (cause & F_PM1_TX)
1744 pmtx_intr_handler(adapter);
1745 if (cause & F_CPL_SWITCH)
1746 cplsw_intr_handler(adapter);
1748 mps_intr_handler(adapter);
1750 t3_mc5_intr_handler(&adapter->mc5);
1751 if (cause & F_XGMAC0_0)
1752 mac_intr_handler(adapter, 0);
1753 if (cause & F_XGMAC0_1)
1754 mac_intr_handler(adapter, 1);
1755 if (cause & F_T3DBG)
1756 t3_os_ext_intr_handler(adapter);
1758 /* Clear the interrupts just processed. */
1759 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1760 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1765 * t3_intr_enable - enable interrupts
1766 * @adapter: the adapter whose interrupts should be enabled
1768 * Enable interrupts by setting the interrupt enable registers of the
1769 * various HW modules and then enabling the top-level interrupt
1772 void t3_intr_enable(struct adapter *adapter)
1774 static const struct addr_val_pair intr_en_avp[] = {
1775 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1776 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1777 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1779 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1781 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1782 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1783 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1784 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1785 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1786 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1789 adapter->slow_intr_mask = PL_INTR_MASK;
1791 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1792 t3_write_reg(adapter, A_TP_INT_ENABLE,
1793 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1795 if (adapter->params.rev > 0) {
1796 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1797 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1798 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1799 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1800 F_PBL_BOUND_ERR_CH1);
1802 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1803 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1806 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1807 adapter_info(adapter)->gpio_intr);
1808 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1809 adapter_info(adapter)->gpio_intr);
1810 if (is_pcie(adapter))
1811 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1813 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1814 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1815 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1819 * t3_intr_disable - disable a card's interrupts
1820 * @adapter: the adapter whose interrupts should be disabled
1822 * Disable interrupts. We only disable the top-level interrupt
1823 * concentrator and the SGE data interrupts.
1825 void t3_intr_disable(struct adapter *adapter)
1827 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1828 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1829 adapter->slow_intr_mask = 0;
1833 * t3_intr_clear - clear all interrupts
1834 * @adapter: the adapter whose interrupts should be cleared
1836 * Clears all interrupts.
1838 void t3_intr_clear(struct adapter *adapter)
1840 static const unsigned int cause_reg_addr[] = {
1842 A_SG_RSPQ_FL_STATUS,
1845 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1846 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1847 A_CIM_HOST_INT_CAUSE,
1860 /* Clear PHY and MAC interrupts for each port. */
1861 for_each_port(adapter, i)
1862 t3_port_intr_clear(adapter, i);
1864 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1865 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1867 if (is_pcie(adapter))
1868 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
1869 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1870 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1874 * t3_port_intr_enable - enable port-specific interrupts
1875 * @adapter: associated adapter
1876 * @idx: index of port whose interrupts should be enabled
1878 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1881 void t3_port_intr_enable(struct adapter *adapter, int idx)
1883 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1885 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1886 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1887 phy->ops->intr_enable(phy);
1891 * t3_port_intr_disable - disable port-specific interrupts
1892 * @adapter: associated adapter
1893 * @idx: index of port whose interrupts should be disabled
1895 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1898 void t3_port_intr_disable(struct adapter *adapter, int idx)
1900 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1902 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1903 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1904 phy->ops->intr_disable(phy);
1908 * t3_port_intr_clear - clear port-specific interrupts
1909 * @adapter: associated adapter
1910 * @idx: index of port whose interrupts to clear
1912 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1915 void t3_port_intr_clear(struct adapter *adapter, int idx)
1917 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1919 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1920 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1921 phy->ops->intr_clear(phy);
1924 #define SG_CONTEXT_CMD_ATTEMPTS 100
1927 * t3_sge_write_context - write an SGE context
1928 * @adapter: the adapter
1929 * @id: the context id
1930 * @type: the context type
1932 * Program an SGE context with the values already loaded in the
1933 * CONTEXT_DATA? registers.
1935 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1938 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1939 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1940 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1941 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1942 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1943 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1944 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1945 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
1948 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
1951 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
1952 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
1953 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
1954 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
1955 return t3_sge_write_context(adap, id, type);
1959 * t3_sge_init_ecntxt - initialize an SGE egress context
1960 * @adapter: the adapter to configure
1961 * @id: the context id
1962 * @gts_enable: whether to enable GTS for the context
1963 * @type: the egress context type
1964 * @respq: associated response queue
1965 * @base_addr: base address of queue
1966 * @size: number of queue entries
1968 * @gen: initial generation value for the context
1969 * @cidx: consumer pointer
1971 * Initialize an SGE egress context and make it ready for use. If the
1972 * platform allows concurrent context operations, the caller is
1973 * responsible for appropriate locking.
1975 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1976 enum sge_context_type type, int respq, u64 base_addr,
1977 unsigned int size, unsigned int token, int gen,
1980 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1982 if (base_addr & 0xfff) /* must be 4K aligned */
1984 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1988 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1989 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1990 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1991 V_EC_BASE_LO(base_addr & 0xffff));
1993 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1995 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1996 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1997 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1999 return t3_sge_write_context(adapter, id, F_EGRESS);
2003 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2004 * @adapter: the adapter to configure
2005 * @id: the context id
2006 * @gts_enable: whether to enable GTS for the context
2007 * @base_addr: base address of queue
2008 * @size: number of queue entries
2009 * @bsize: size of each buffer for this queue
2010 * @cong_thres: threshold to signal congestion to upstream producers
2011 * @gen: initial generation value for the context
2012 * @cidx: consumer pointer
2014 * Initialize an SGE free list context and make it ready for use. The
2015 * caller is responsible for ensuring only one context operation occurs
2018 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2019 int gts_enable, u64 base_addr, unsigned int size,
2020 unsigned int bsize, unsigned int cong_thres, int gen,
2023 if (base_addr & 0xfff) /* must be 4K aligned */
2025 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2029 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2031 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2032 V_FL_BASE_HI((u32) base_addr) |
2033 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2034 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2035 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2036 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2037 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2038 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2039 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2040 return t3_sge_write_context(adapter, id, F_FREELIST);
2044 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2045 * @adapter: the adapter to configure
2046 * @id: the context id
2047 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2048 * @base_addr: base address of queue
2049 * @size: number of queue entries
2050 * @fl_thres: threshold for selecting the normal or jumbo free list
2051 * @gen: initial generation value for the context
2052 * @cidx: consumer pointer
2054 * Initialize an SGE response queue context and make it ready for use.
2055 * The caller is responsible for ensuring only one context operation
2058 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2059 int irq_vec_idx, u64 base_addr, unsigned int size,
2060 unsigned int fl_thres, int gen, unsigned int cidx)
2062 unsigned int intr = 0;
2064 if (base_addr & 0xfff) /* must be 4K aligned */
2066 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2070 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2072 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2074 if (irq_vec_idx >= 0)
2075 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2076 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2077 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2078 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2079 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2083 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2084 * @adapter: the adapter to configure
2085 * @id: the context id
2086 * @base_addr: base address of queue
2087 * @size: number of queue entries
2088 * @rspq: response queue for async notifications
2089 * @ovfl_mode: CQ overflow mode
2090 * @credits: completion queue credits
2091 * @credit_thres: the credit threshold
2093 * Initialize an SGE completion queue context and make it ready for use.
2094 * The caller is responsible for ensuring only one context operation
2097 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2098 unsigned int size, int rspq, int ovfl_mode,
2099 unsigned int credits, unsigned int credit_thres)
2101 if (base_addr & 0xfff) /* must be 4K aligned */
2103 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2107 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2108 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2110 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2111 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2112 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2113 V_CQ_ERR(ovfl_mode));
2114 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2115 V_CQ_CREDIT_THRES(credit_thres));
2116 return t3_sge_write_context(adapter, id, F_CQ);
2120 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2121 * @adapter: the adapter
2122 * @id: the egress context id
2123 * @enable: enable (1) or disable (0) the context
2125 * Enable or disable an SGE egress context. The caller is responsible for
2126 * ensuring only one context operation occurs at a time.
2128 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2130 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2133 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2134 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2135 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2136 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2137 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2138 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2139 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2140 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2141 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2145 * t3_sge_disable_fl - disable an SGE free-buffer list
2146 * @adapter: the adapter
2147 * @id: the free list context id
2149 * Disable an SGE free-buffer list. The caller is responsible for
2150 * ensuring only one context operation occurs at a time.
2152 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2154 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2157 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2158 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2159 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2160 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2161 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2162 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2163 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2164 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2165 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2169 * t3_sge_disable_rspcntxt - disable an SGE response queue
2170 * @adapter: the adapter
2171 * @id: the response queue context id
2173 * Disable an SGE response queue. The caller is responsible for
2174 * ensuring only one context operation occurs at a time.
2176 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2178 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2181 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2182 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2183 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2184 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2185 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2186 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2187 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2188 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2189 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2193 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2194 * @adapter: the adapter
2195 * @id: the completion queue context id
2197 * Disable an SGE completion queue. The caller is responsible for
2198 * ensuring only one context operation occurs at a time.
2200 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2202 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2205 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2206 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2207 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2208 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2209 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2210 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2211 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2212 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2213 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2217 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2218 * @adapter: the adapter
2219 * @id: the context id
2220 * @op: the operation to perform
2222 * Perform the selected operation on an SGE completion queue context.
2223 * The caller is responsible for ensuring only one context operation
2226 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2227 unsigned int credits)
2231 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2234 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2235 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2236 V_CONTEXT(id) | F_CQ);
2237 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2238 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2241 if (op >= 2 && op < 7) {
2242 if (adapter->params.rev > 0)
2243 return G_CQ_INDEX(val);
2245 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2246 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2247 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2248 F_CONTEXT_CMD_BUSY, 0,
2249 SG_CONTEXT_CMD_ATTEMPTS, 1))
2251 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2257 * t3_sge_read_context - read an SGE context
2258 * @type: the context type
2259 * @adapter: the adapter
2260 * @id: the context id
2261 * @data: holds the retrieved context
2263 * Read an SGE egress context. The caller is responsible for ensuring
2264 * only one context operation occurs at a time.
2266 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2267 unsigned int id, u32 data[4])
2269 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2272 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2273 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2274 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2275 SG_CONTEXT_CMD_ATTEMPTS, 1))
2277 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2278 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2279 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2280 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2285 * t3_sge_read_ecntxt - read an SGE egress context
2286 * @adapter: the adapter
2287 * @id: the context id
2288 * @data: holds the retrieved context
2290 * Read an SGE egress context. The caller is responsible for ensuring
2291 * only one context operation occurs at a time.
2293 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2297 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2301 * t3_sge_read_cq - read an SGE CQ context
2302 * @adapter: the adapter
2303 * @id: the context id
2304 * @data: holds the retrieved context
2306 * Read an SGE CQ context. The caller is responsible for ensuring
2307 * only one context operation occurs at a time.
2309 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2313 return t3_sge_read_context(F_CQ, adapter, id, data);
2317 * t3_sge_read_fl - read an SGE free-list context
2318 * @adapter: the adapter
2319 * @id: the context id
2320 * @data: holds the retrieved context
2322 * Read an SGE free-list context. The caller is responsible for ensuring
2323 * only one context operation occurs at a time.
2325 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2327 if (id >= SGE_QSETS * 2)
2329 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2333 * t3_sge_read_rspq - read an SGE response queue context
2334 * @adapter: the adapter
2335 * @id: the context id
2336 * @data: holds the retrieved context
2338 * Read an SGE response queue context. The caller is responsible for
2339 * ensuring only one context operation occurs at a time.
2341 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2343 if (id >= SGE_QSETS)
2345 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2349 * t3_config_rss - configure Rx packet steering
2350 * @adapter: the adapter
2351 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2352 * @cpus: values for the CPU lookup table (0xff terminated)
2353 * @rspq: values for the response queue lookup table (0xffff terminated)
2355 * Programs the receive packet steering logic. @cpus and @rspq provide
2356 * the values for the CPU and response queue lookup tables. If they
2357 * provide fewer values than the size of the tables the supplied values
2358 * are used repeatedly until the tables are fully populated.
2360 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2361 const u8 * cpus, const u16 *rspq)
2363 int i, j, cpu_idx = 0, q_idx = 0;
2366 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2369 for (j = 0; j < 2; ++j) {
2370 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2371 if (cpus[cpu_idx] == 0xff)
2374 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2378 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2379 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2380 (i << 16) | rspq[q_idx++]);
2381 if (rspq[q_idx] == 0xffff)
2385 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2389 * t3_read_rss - read the contents of the RSS tables
2390 * @adapter: the adapter
2391 * @lkup: holds the contents of the RSS lookup table
2392 * @map: holds the contents of the RSS map table
2394 * Reads the contents of the receive packet steering tables.
2396 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2402 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2403 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2405 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2406 if (!(val & 0x80000000))
2409 *lkup++ = (val >> 8);
2413 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2414 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2416 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2417 if (!(val & 0x80000000))
2425 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2426 * @adap: the adapter
2427 * @enable: 1 to select offload mode, 0 for regular NIC
2429 * Switches TP to NIC/offload mode.
2431 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2433 if (is_offload(adap) || !enable)
2434 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2435 V_NICMODE(!enable));
2439 * pm_num_pages - calculate the number of pages of the payload memory
2440 * @mem_size: the size of the payload memory
2441 * @pg_size: the size of each payload memory page
2443 * Calculate the number of pages, each of the given size, that fit in a
2444 * memory of the specified size, respecting the HW requirement that the
2445 * number of pages must be a multiple of 24.
2447 static inline unsigned int pm_num_pages(unsigned int mem_size,
2448 unsigned int pg_size)
2450 unsigned int n = mem_size / pg_size;
2455 #define mem_region(adap, start, size, reg) \
2456 t3_write_reg((adap), A_ ## reg, (start)); \
2460 * partition_mem - partition memory and configure TP memory settings
2461 * @adap: the adapter
2462 * @p: the TP parameters
2464 * Partitions context and payload memory and configures TP's memory
2467 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2469 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2470 unsigned int timers = 0, timers_shift = 22;
2472 if (adap->params.rev > 0) {
2473 if (tids <= 16 * 1024) {
2476 } else if (tids <= 64 * 1024) {
2479 } else if (tids <= 256 * 1024) {
2485 t3_write_reg(adap, A_TP_PMM_SIZE,
2486 p->chan_rx_size | (p->chan_tx_size >> 16));
2488 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2489 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2490 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2491 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2492 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2494 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2495 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2496 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2498 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2499 /* Add a bit of headroom and make multiple of 24 */
2501 pstructs -= pstructs % 24;
2502 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2504 m = tids * TCB_SIZE;
2505 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2506 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2507 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2508 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2509 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2510 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2511 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2512 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2514 m = (m + 4095) & ~0xfff;
2515 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2516 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2518 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2519 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2520 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2522 adap->params.mc5.nservers += m - tids;
2525 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2528 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2529 t3_write_reg(adap, A_TP_PIO_DATA, val);
2532 static void tp_config(struct adapter *adap, const struct tp_params *p)
2534 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2535 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2536 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2537 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2538 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2539 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2540 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2541 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2542 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2543 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2544 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2545 F_IPV6ENABLE | F_NICMODE);
2546 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2547 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2548 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2549 adap->params.rev > 0 ? F_ENABLEESND :
2552 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2554 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2555 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2556 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2557 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2558 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2559 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2560 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2562 if (adap->params.rev > 0) {
2563 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2564 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2566 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2567 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2569 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2571 if (adap->params.rev == T3_REV_C)
2572 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2573 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2574 V_TABLELATENCYDELTA(4));
2576 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2577 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2578 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2579 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2582 /* Desired TP timer resolution in usec */
2583 #define TP_TMR_RES 50
2585 /* TCP timer values in ms */
2586 #define TP_DACK_TIMER 50
2587 #define TP_RTO_MIN 250
2590 * tp_set_timers - set TP timing parameters
2591 * @adap: the adapter to set
2592 * @core_clk: the core clock frequency in Hz
2594 * Set TP's timing parameters, such as the various timer resolutions and
2595 * the TCP timer values.
2597 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2599 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2600 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2601 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2602 unsigned int tps = core_clk >> tre;
2604 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2605 V_DELAYEDACKRESOLUTION(dack_re) |
2606 V_TIMESTAMPRESOLUTION(tstamp_re));
2607 t3_write_reg(adap, A_TP_DACK_TIMER,
2608 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2609 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2610 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2611 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2612 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2613 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2614 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2615 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2618 #define SECONDS * tps
2620 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2621 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2622 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2623 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2624 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2625 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2626 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2627 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2628 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2634 * t3_tp_set_coalescing_size - set receive coalescing size
2635 * @adap: the adapter
2636 * @size: the receive coalescing size
2637 * @psh: whether a set PSH bit should deliver coalesced data
2639 * Set the receive coalescing size and PSH bit handling.
2641 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2645 if (size > MAX_RX_COALESCING_LEN)
2648 val = t3_read_reg(adap, A_TP_PARA_REG3);
2649 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2652 val |= F_RXCOALESCEENABLE;
2654 val |= F_RXCOALESCEPSHEN;
2655 size = min(MAX_RX_COALESCING_LEN, size);
2656 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2657 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2659 t3_write_reg(adap, A_TP_PARA_REG3, val);
2664 * t3_tp_set_max_rxsize - set the max receive size
2665 * @adap: the adapter
2666 * @size: the max receive size
2668 * Set TP's max receive size. This is the limit that applies when
2669 * receive coalescing is disabled.
2671 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2673 t3_write_reg(adap, A_TP_PARA_REG7,
2674 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2677 static void __devinit init_mtus(unsigned short mtus[])
2680 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2681 * it can accomodate max size TCP/IP headers when SACK and timestamps
2682 * are enabled and still have at least 8 bytes of payload.
2703 * Initial congestion control parameters.
2705 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2707 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2732 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2735 b[13] = b[14] = b[15] = b[16] = 3;
2736 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2737 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2742 /* The minimum additive increment value for the congestion control table */
2743 #define CC_MIN_INCR 2U
2746 * t3_load_mtus - write the MTU and congestion control HW tables
2747 * @adap: the adapter
2748 * @mtus: the unrestricted values for the MTU table
2749 * @alphs: the values for the congestion control alpha parameter
2750 * @beta: the values for the congestion control beta parameter
2751 * @mtu_cap: the maximum permitted effective MTU
2753 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2754 * Update the high-speed congestion control table with the supplied alpha,
2757 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2758 unsigned short alpha[NCCTRL_WIN],
2759 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2761 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2762 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2763 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2764 28672, 40960, 57344, 81920, 114688, 163840, 229376
2769 for (i = 0; i < NMTUS; ++i) {
2770 unsigned int mtu = min(mtus[i], mtu_cap);
2771 unsigned int log2 = fls(mtu);
2773 if (!(mtu & ((1 << log2) >> 2))) /* round */
2775 t3_write_reg(adap, A_TP_MTU_TABLE,
2776 (i << 24) | (log2 << 16) | mtu);
2778 for (w = 0; w < NCCTRL_WIN; ++w) {
2781 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2784 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2785 (w << 16) | (beta[w] << 13) | inc);
2791 * t3_read_hw_mtus - returns the values in the HW MTU table
2792 * @adap: the adapter
2793 * @mtus: where to store the HW MTU values
2795 * Reads the HW MTU table.
2797 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2801 for (i = 0; i < NMTUS; ++i) {
2804 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2805 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2806 mtus[i] = val & 0x3fff;
2811 * t3_get_cong_cntl_tab - reads the congestion control table
2812 * @adap: the adapter
2813 * @incr: where to store the alpha values
2815 * Reads the additive increments programmed into the HW congestion
2818 void t3_get_cong_cntl_tab(struct adapter *adap,
2819 unsigned short incr[NMTUS][NCCTRL_WIN])
2821 unsigned int mtu, w;
2823 for (mtu = 0; mtu < NMTUS; ++mtu)
2824 for (w = 0; w < NCCTRL_WIN; ++w) {
2825 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2826 0xffff0000 | (mtu << 5) | w);
2827 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2833 * t3_tp_get_mib_stats - read TP's MIB counters
2834 * @adap: the adapter
2835 * @tps: holds the returned counter values
2837 * Returns the values of TP's MIB counters.
2839 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2841 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2842 sizeof(*tps) / sizeof(u32), 0);
2845 #define ulp_region(adap, name, start, len) \
2846 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2847 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2848 (start) + (len) - 1); \
2851 #define ulptx_region(adap, name, start, len) \
2852 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2853 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2854 (start) + (len) - 1)
2856 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2858 unsigned int m = p->chan_rx_size;
2860 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2861 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2862 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2863 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2864 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2865 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2866 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2867 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2871 * t3_set_proto_sram - set the contents of the protocol sram
2872 * @adapter: the adapter
2873 * @data: the protocol image
2875 * Write the contents of the protocol SRAM.
2877 int t3_set_proto_sram(struct adapter *adap, u8 *data)
2880 u32 *buf = (u32 *)data;
2882 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2883 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2884 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2885 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2886 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2887 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
2889 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2890 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2893 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2898 void t3_config_trace_filter(struct adapter *adapter,
2899 const struct trace_params *tp, int filter_index,
2900 int invert, int enable)
2902 u32 addr, key[4], mask[4];
2904 key[0] = tp->sport | (tp->sip << 16);
2905 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2907 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2909 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2910 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2911 mask[2] = tp->dip_mask;
2912 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2915 key[3] |= (1 << 29);
2917 key[3] |= (1 << 28);
2919 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2920 tp_wr_indirect(adapter, addr++, key[0]);
2921 tp_wr_indirect(adapter, addr++, mask[0]);
2922 tp_wr_indirect(adapter, addr++, key[1]);
2923 tp_wr_indirect(adapter, addr++, mask[1]);
2924 tp_wr_indirect(adapter, addr++, key[2]);
2925 tp_wr_indirect(adapter, addr++, mask[2]);
2926 tp_wr_indirect(adapter, addr++, key[3]);
2927 tp_wr_indirect(adapter, addr, mask[3]);
2928 t3_read_reg(adapter, A_TP_PIO_DATA);
2932 * t3_config_sched - configure a HW traffic scheduler
2933 * @adap: the adapter
2934 * @kbps: target rate in Kbps
2935 * @sched: the scheduler index
2937 * Configure a HW scheduler for the target rate
2939 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2941 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2942 unsigned int clk = adap->params.vpd.cclk * 1000;
2943 unsigned int selected_cpt = 0, selected_bpt = 0;
2946 kbps *= 125; /* -> bytes */
2947 for (cpt = 1; cpt <= 255; cpt++) {
2949 bpt = (kbps + tps / 2) / tps;
2950 if (bpt > 0 && bpt <= 255) {
2952 delta = v >= kbps ? v - kbps : kbps - v;
2953 if (delta <= mindelta) {
2958 } else if (selected_cpt)
2964 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2965 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2966 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2968 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2970 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2971 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2975 static int tp_init(struct adapter *adap, const struct tp_params *p)
2980 t3_set_vlan_accel(adap, 3, 0);
2982 if (is_offload(adap)) {
2983 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2984 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2985 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2988 CH_ERR(adap, "TP initialization timed out\n");
2992 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2996 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2998 if (port_mask & ~((1 << adap->params.nports) - 1))
3000 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3001 port_mask << S_PORT0ACTIVE);
3006 * Perform the bits of HW initialization that are dependent on the number
3007 * of available ports.
3009 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3014 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3015 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3016 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3017 F_PORT0ACTIVE | F_ENFORCEPKT);
3018 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
3020 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3021 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3022 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3023 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3024 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3025 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3027 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3028 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3029 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3030 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3031 for (i = 0; i < 16; i++)
3032 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3033 (i << 16) | 0x1010);
3037 static int calibrate_xgm(struct adapter *adapter)
3039 if (uses_xaui(adapter)) {
3042 for (i = 0; i < 5; ++i) {
3043 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3044 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3046 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3047 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3048 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3049 V_XAUIIMP(G_CALIMP(v) >> 2));
3053 CH_ERR(adapter, "MAC calibration failed\n");
3056 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3057 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3058 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3059 F_XGM_IMPSETUPDATE);
3064 static void calibrate_xgm_t3b(struct adapter *adapter)
3066 if (!uses_xaui(adapter)) {
3067 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3068 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3069 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3070 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3071 F_XGM_IMPSETUPDATE);
3072 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3074 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3075 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3079 struct mc7_timing_params {
3080 unsigned char ActToPreDly;
3081 unsigned char ActToRdWrDly;
3082 unsigned char PreCyc;
3083 unsigned char RefCyc[5];
3084 unsigned char BkCyc;
3085 unsigned char WrToRdDly;
3086 unsigned char RdToWrDly;
3090 * Write a value to a register and check that the write completed. These
3091 * writes normally complete in a cycle or two, so one read should suffice.
3092 * The very first read exists to flush the posted write to the device.
3094 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3096 t3_write_reg(adapter, addr, val);
3097 t3_read_reg(adapter, addr); /* flush */
3098 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3100 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3104 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3106 static const unsigned int mc7_mode[] = {
3107 0x632, 0x642, 0x652, 0x432, 0x442
3109 static const struct mc7_timing_params mc7_timings[] = {
3110 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3111 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3112 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3113 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3114 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3118 unsigned int width, density, slow, attempts;
3119 struct adapter *adapter = mc7->adapter;
3120 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3125 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3126 slow = val & F_SLOW;
3127 width = G_WIDTH(val);
3128 density = G_DEN(val);
3130 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3131 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3135 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3136 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3138 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3139 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3140 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3146 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3147 V_ACTTOPREDLY(p->ActToPreDly) |
3148 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3149 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3150 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3152 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3153 val | F_CLKEN | F_TERM150);
3154 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3157 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3162 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3163 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3164 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3165 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3169 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3170 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3174 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3175 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3176 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3177 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3178 mc7_mode[mem_type]) ||
3179 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3180 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3183 /* clock value is in KHz */
3184 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3185 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3187 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3188 F_PERREFEN | V_PREREFDIV(mc7_clock));
3189 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3191 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3192 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3193 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3194 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3195 (mc7->size << width) - 1);
3196 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3197 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3202 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3203 } while ((val & F_BUSY) && --attempts);
3205 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3209 /* Enable normal memory accesses. */
3210 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3217 static void config_pcie(struct adapter *adap)
3219 static const u16 ack_lat[4][6] = {
3220 {237, 416, 559, 1071, 2095, 4143},
3221 {128, 217, 289, 545, 1057, 2081},
3222 {73, 118, 154, 282, 538, 1050},
3223 {67, 107, 86, 150, 278, 534}
3225 static const u16 rpl_tmr[4][6] = {
3226 {711, 1248, 1677, 3213, 6285, 12429},
3227 {384, 651, 867, 1635, 3171, 6243},
3228 {219, 354, 462, 846, 1614, 3150},
3229 {201, 321, 258, 450, 834, 1602}
3233 unsigned int log2_width, pldsize;
3234 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3236 pci_read_config_word(adap->pdev,
3237 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3239 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3240 pci_read_config_word(adap->pdev,
3241 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3244 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3245 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3246 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3247 log2_width = fls(adap->params.pci.width) - 1;
3248 acklat = ack_lat[log2_width][pldsize];
3249 if (val & 1) /* check LOsEnable */
3250 acklat += fst_trn_tx * 4;
3251 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3253 if (adap->params.rev == 0)
3254 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3255 V_T3A_ACKLAT(M_T3A_ACKLAT),
3256 V_T3A_ACKLAT(acklat));
3258 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3261 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3262 V_REPLAYLMT(rpllmt));
3264 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3265 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3266 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3270 * Initialize and configure T3 HW modules. This performs the
3271 * initialization steps that need to be done once after a card is reset.
3272 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3274 * fw_params are passed to FW and their value is platform dependent. Only the
3275 * top 8 bits are available for use, the rest must be 0.
3277 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3279 int err = -EIO, attempts, i;
3280 const struct vpd_params *vpd = &adapter->params.vpd;
3282 if (adapter->params.rev > 0)
3283 calibrate_xgm_t3b(adapter);
3284 else if (calibrate_xgm(adapter))
3288 partition_mem(adapter, &adapter->params.tp);
3290 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3291 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3292 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3293 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3294 adapter->params.mc5.nfilters,
3295 adapter->params.mc5.nroutes))
3298 for (i = 0; i < 32; i++)
3299 if (clear_sge_ctxt(adapter, i, F_CQ))
3303 if (tp_init(adapter, &adapter->params.tp))
3306 t3_tp_set_coalescing_size(adapter,
3307 min(adapter->params.sge.max_pkt_size,
3308 MAX_RX_COALESCING_LEN), 1);
3309 t3_tp_set_max_rxsize(adapter,
3310 min(adapter->params.sge.max_pkt_size, 16384U));
3311 ulp_config(adapter, &adapter->params.tp);
3313 if (is_pcie(adapter))
3314 config_pcie(adapter);
3316 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3317 F_DMASTOPEN | F_CLIDECEN);
3319 if (adapter->params.rev == T3_REV_C)
3320 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3321 F_CFG_CQE_SOP_MASK);
3323 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3324 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3325 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3326 init_hw_for_avail_ports(adapter, adapter->params.nports);
3327 t3_sge_init(adapter, &adapter->params.sge);
3329 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3330 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3331 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3332 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3335 do { /* wait for uP to initialize */
3337 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3339 CH_ERR(adapter, "uP initialization timed out\n");
3349 * get_pci_mode - determine a card's PCI mode
3350 * @adapter: the adapter
3351 * @p: where to store the PCI settings
3353 * Determines a card's PCI mode and associated parameters, such as speed
3356 static void __devinit get_pci_mode(struct adapter *adapter,
3357 struct pci_params *p)
3359 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3360 u32 pci_mode, pcie_cap;
3362 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3366 p->variant = PCI_VARIANT_PCIE;
3367 p->pcie_cap_addr = pcie_cap;
3368 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3370 p->width = (val >> 4) & 0x3f;
3374 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3375 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3376 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3377 pci_mode = G_PCIXINITPAT(pci_mode);
3379 p->variant = PCI_VARIANT_PCI;
3380 else if (pci_mode < 4)
3381 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3382 else if (pci_mode < 8)
3383 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3385 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3389 * init_link_config - initialize a link's SW state
3390 * @lc: structure holding the link state
3391 * @ai: information about the current card
3393 * Initializes the SW state maintained for each link, including the link's
3394 * capabilities and default speed/duplex/flow-control/autonegotiation
3397 static void __devinit init_link_config(struct link_config *lc,
3400 lc->supported = caps;
3401 lc->requested_speed = lc->speed = SPEED_INVALID;
3402 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3403 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3404 if (lc->supported & SUPPORTED_Autoneg) {
3405 lc->advertising = lc->supported;
3406 lc->autoneg = AUTONEG_ENABLE;
3407 lc->requested_fc |= PAUSE_AUTONEG;
3409 lc->advertising = 0;
3410 lc->autoneg = AUTONEG_DISABLE;
3415 * mc7_calc_size - calculate MC7 memory size
3416 * @cfg: the MC7 configuration
3418 * Calculates the size of an MC7 memory in bytes from the value of its
3419 * configuration register.
3421 static unsigned int __devinit mc7_calc_size(u32 cfg)
3423 unsigned int width = G_WIDTH(cfg);
3424 unsigned int banks = !!(cfg & F_BKS) + 1;
3425 unsigned int org = !!(cfg & F_ORG) + 1;
3426 unsigned int density = G_DEN(cfg);
3427 unsigned int MBs = ((256 << density) * banks) / (org << width);
3432 static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3433 unsigned int base_addr, const char *name)
3437 mc7->adapter = adapter;
3439 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3440 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3441 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3442 mc7->width = G_WIDTH(cfg);
3445 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3447 mac->adapter = adapter;
3448 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3451 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3452 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3453 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3454 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3459 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3461 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3463 mi1_init(adapter, ai);
3464 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3465 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3466 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3467 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3468 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3469 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3471 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3474 /* Enable MAC clocks so we can access the registers */
3475 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3476 t3_read_reg(adapter, A_XGM_PORT_CFG);
3478 val |= F_CLKDIVRESET_;
3479 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3480 t3_read_reg(adapter, A_XGM_PORT_CFG);
3481 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3482 t3_read_reg(adapter, A_XGM_PORT_CFG);
3486 * Reset the adapter.
3487 * Older PCIe cards lose their config space during reset, PCI-X
3490 static int t3_reset_adapter(struct adapter *adapter)
3492 int i, save_and_restore_pcie =
3493 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3496 if (save_and_restore_pcie)
3497 pci_save_state(adapter->pdev);
3498 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3501 * Delay. Give Some time to device to reset fully.
3502 * XXX The delay time should be modified.
3504 for (i = 0; i < 10; i++) {
3506 pci_read_config_word(adapter->pdev, 0x00, &devid);
3507 if (devid == 0x1425)
3511 if (devid != 0x1425)
3514 if (save_and_restore_pcie)
3515 pci_restore_state(adapter->pdev);
3519 static int __devinit init_parity(struct adapter *adap)
3523 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3526 for (err = i = 0; !err && i < 16; i++)
3527 err = clear_sge_ctxt(adap, i, F_EGRESS);
3528 for (i = 0xfff0; !err && i <= 0xffff; i++)
3529 err = clear_sge_ctxt(adap, i, F_EGRESS);
3530 for (i = 0; !err && i < SGE_QSETS; i++)
3531 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3535 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3536 for (i = 0; i < 4; i++)
3537 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3538 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3539 F_IBQDBGWR | V_IBQDBGQID(i) |
3540 V_IBQDBGADDR(addr));
3541 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3542 F_IBQDBGBUSY, 0, 2, 1);
3550 * Initialize adapter SW state for the various HW modules, set initial values
3551 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3554 int __devinit t3_prep_adapter(struct adapter *adapter,
3555 const struct adapter_info *ai, int reset)
3558 unsigned int i, j = 0;
3560 get_pci_mode(adapter, &adapter->params.pci);
3562 adapter->params.info = ai;
3563 adapter->params.nports = ai->nports;
3564 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3565 adapter->params.linkpoll_period = 0;
3566 adapter->params.stats_update_period = is_10G(adapter) ?
3567 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3568 adapter->params.pci.vpd_cap_addr =
3569 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3570 ret = get_vpd_params(adapter, &adapter->params.vpd);
3574 if (reset && t3_reset_adapter(adapter))
3577 t3_sge_prep(adapter, &adapter->params.sge);
3579 if (adapter->params.vpd.mclk) {
3580 struct tp_params *p = &adapter->params.tp;
3582 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3583 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3584 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3586 p->nchan = ai->nports;
3587 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3588 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3589 p->cm_size = t3_mc7_size(&adapter->cm);
3590 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3591 p->chan_tx_size = p->pmtx_size / p->nchan;
3592 p->rx_pg_size = 64 * 1024;
3593 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3594 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3595 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3596 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3597 adapter->params.rev > 0 ? 12 : 6;
3600 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3601 t3_mc7_size(&adapter->pmtx) &&
3602 t3_mc7_size(&adapter->cm);
3604 if (is_offload(adapter)) {
3605 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3606 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3607 DEFAULT_NFILTERS : 0;
3608 adapter->params.mc5.nroutes = 0;
3609 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3611 init_mtus(adapter->params.mtus);
3612 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3615 early_hw_init(adapter, ai);
3616 ret = init_parity(adapter);
3620 for_each_port(adapter, i) {
3622 struct port_info *p = adap2pinfo(adapter, i);
3624 while (!adapter->params.vpd.port_type[j])
3627 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3628 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3630 mac_prep(&p->mac, adapter, j);
3634 * The VPD EEPROM stores the base Ethernet address for the
3635 * card. A port's address is derived from the base by adding
3636 * the port's index to the base's low octet.
3638 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3639 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3641 memcpy(adapter->port[i]->dev_addr, hw_addr,
3643 memcpy(adapter->port[i]->perm_addr, hw_addr,
3645 init_link_config(&p->link_config, p->port_type->caps);
3646 p->phy.ops->power_down(&p->phy, 1);
3647 if (!(p->port_type->caps & SUPPORTED_IRQ))
3648 adapter->params.linkpoll_period = 10;
3654 void t3_led_ready(struct adapter *adapter)
3656 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,