#include <linux/dma-mapping.h>
#include <net/checksum.h>
+#include <net/ip.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_SPARC64
+#ifdef CONFIG_SPARC
#include <asm/idprom.h>
-#include <asm/oplib.h>
-#include <asm/pbm.h>
+#include <asm/prom.h>
#endif
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.73"
-#define DRV_MODULE_RELDATE "February 12, 2007"
+#define DRV_MODULE_VERSION "3.86"
+#define DRV_MODULE_RELDATE "November 9, 2007"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
return (readl(tp->regs + off));
}
+static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
+{
+ writel(val, tp->aperegs + off);
+}
+
+static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
+{
+ return (readl(tp->aperegs + off));
+}
+
static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
{
unsigned long flags;
spin_unlock_irqrestore(&tp->indirect_lock, flags);
}
+static void tg3_ape_lock_init(struct tg3 *tp)
+{
+ int i;
+
+ /* Make sure the driver hasn't any stale locks. */
+ for (i = 0; i < 8; i++)
+ tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
+ APE_LOCK_GRANT_DRIVER);
+}
+
+static int tg3_ape_lock(struct tg3 *tp, int locknum)
+{
+ int i, off;
+ int ret = 0;
+ u32 status;
+
+ if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
+ return 0;
+
+ switch (locknum) {
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ off = 4 * locknum;
+
+ tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
+
+ /* Wait for up to 1 millisecond to acquire lock. */
+ for (i = 0; i < 100; i++) {
+ status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
+ if (status == APE_LOCK_GRANT_DRIVER)
+ break;
+ udelay(10);
+ }
+
+ if (status != APE_LOCK_GRANT_DRIVER) {
+ /* Revoke the lock request. */
+ tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
+ APE_LOCK_GRANT_DRIVER);
+
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+static void tg3_ape_unlock(struct tg3 *tp, int locknum)
+{
+ int off;
+
+ if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
+ return;
+
+ switch (locknum) {
+ case TG3_APE_LOCK_MEM:
+ break;
+ default:
+ return;
+ }
+
+ off = 4 * locknum;
+ tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
+}
+
static void tg3_disable_ints(struct tg3 *tp)
{
tw32(TG3PCI_MISC_HOST_CTRL,
static inline void tg3_netif_stop(struct tg3 *tp)
{
tp->dev->trans_start = jiffies; /* prevent tx timeout */
- netif_poll_disable(tp->dev);
+ napi_disable(&tp->napi);
netif_tx_disable(tp->dev);
}
* so long as all callers are assured to have free tx slots
* (such as after tg3_init_hw)
*/
- netif_poll_enable(tp->dev);
+ napi_enable(&tp->napi);
tp->hw_status->status |= SD_STATUS_UPDATED;
tg3_enable_ints(tp);
}
u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
u32 orig_clock_ctrl;
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+ if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
+ (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
return;
orig_clock_ctrl = clock_ctrl;
return ret;
}
+static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
+{
+ u32 phy;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
+ return;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+ u32 ephy;
+
+ if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
+ tg3_writephy(tp, MII_TG3_EPHY_TEST,
+ ephy | MII_TG3_EPHY_SHADOW_EN);
+ if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
+ if (enable)
+ phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
+ else
+ phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
+ tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
+ }
+ tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
+ }
+ } else {
+ phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
+ MII_TG3_AUXCTL_SHDWSEL_MISC;
+ if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
+ !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
+ if (enable)
+ phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+ else
+ phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+ phy |= MII_TG3_AUXCTL_MISC_WREN;
+ tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
+ }
+ }
+}
+
static void tg3_phy_set_wirespeed(struct tg3 *tp)
{
u32 val;
if (err)
return err;
+ if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
+ u32 val;
+
+ val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
+ if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
+ CPMU_LSPD_1000MB_MACCLK_12_5) {
+ val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
+ udelay(40);
+ tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
+ }
+
+ /* Disable GPHY autopowerdown. */
+ tg3_writephy(tp, MII_TG3_MISC_SHDW,
+ MII_TG3_MISC_SHDW_WREN |
+ MII_TG3_MISC_SHDW_APD_SEL |
+ MII_TG3_MISC_SHDW_APD_WKTM_84MS);
+ }
+
out:
if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- u32 phy_reg;
-
/* adjust output voltage */
tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
-
- if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
- u32 phy_reg2;
-
- tg3_writephy(tp, MII_TG3_EPHY_TEST,
- phy_reg | MII_TG3_EPHY_SHADOW_EN);
- /* Enable auto-MDIX */
- if (!tg3_readphy(tp, 0x10, &phy_reg2))
- tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
- tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
- }
}
+ tg3_phy_toggle_automdix(tp, 1);
tg3_phy_set_wirespeed(tp);
return 0;
}
}
}
+static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
+{
+ if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
+ return 1;
+ else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
+ if (speed != SPEED_10)
+ return 1;
+ } else if (speed == SPEED_10)
+ return 1;
+
+ return 0;
+}
+
static int tg3_setup_phy(struct tg3 *, int);
#define RESET_KIND_SHUTDOWN 0
static void tg3_power_down_phy(struct tg3 *tp)
{
+ u32 val;
+
if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- u32 val;
-
tg3_bmcr_reset(tp);
val = tr32(GRC_MISC_CFG);
tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
return;
+
+ if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
+ val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
+ val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
+ val |= CPMU_LSPD_1000MB_MACCLK_12_5;
+ tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
+ }
+
tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
}
msleep(1);
}
}
- tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
- WOL_DRV_STATE_SHUTDOWN |
- WOL_DRV_WOL | WOL_SET_MAGIC_PKT);
+ if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
+ tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
+ WOL_DRV_STATE_SHUTDOWN |
+ WOL_DRV_WOL |
+ WOL_SET_MAGIC_PKT);
pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
else
mac_mode = MAC_MODE_PORT_MODE_MII;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
- !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
- mac_mode |= MAC_MODE_LINK_POLARITY;
+ mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+ ASIC_REV_5700) {
+ u32 speed = (tp->tg3_flags &
+ TG3_FLAG_WOL_SPEED_100MB) ?
+ SPEED_100 : SPEED_10;
+ if (tg3_5700_link_polarity(tp, speed))
+ mac_mode |= MAC_MODE_LINK_POLARITY;
+ else
+ mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ }
} else {
mac_mode = MAC_MODE_PORT_MODE_TBI;
}
tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
CLOCK_CTRL_PWRDOWN_PLL133, 40);
} else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
+ (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
/* do nothing */
} else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
}
if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
- !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
tg3_power_down_phy(tp);
tg3_frob_aux_power(tp);
if (tp->link_config.active_duplex == DUPLEX_HALF)
tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
- tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
- if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
- (current_link_up == 1 &&
- tp->link_config.active_speed == SPEED_10))
- tp->mac_mode |= MAC_MODE_LINK_POLARITY;
- } else {
- if (current_link_up == 1)
+ if (current_link_up == 1 &&
+ tg3_5700_link_polarity(tp, tp->link_config.active_speed))
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+ else
+ tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
}
/* ??? Without this setting Netgear GA302T PHY does not
{
int current_link_up = 0;
- if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
- tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
+ if (!(mac_status & MAC_STATUS_PCS_SYNCED))
goto out;
- }
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
u32 flags;
tg3_setup_flow_control(tp, local_adv, remote_adv);
- tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
current_link_up = 1;
}
for (i = 0; i < 30; i++) {
} else {
/* Forcing 1000FD link up. */
current_link_up = 1;
- tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
udelay(40);
+
+ tw32_f(MAC_MODE, tp->mac_mode);
+ udelay(40);
}
out:
else
current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
- tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
- tw32_f(MAC_MODE, tp->mac_mode);
- udelay(40);
-
tp->hw_status->status =
(SD_STATUS_UPDATED |
(tp->hw_status->status & ~SD_STATUS_LINK_CHG));
err = tg3_setup_copper_phy(tp, force_reset);
}
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
+ u32 val, scale;
+
+ val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
+ if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
+ scale = 65;
+ else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
+ scale = 6;
+ else
+ scale = 12;
+
+ val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
+ val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
+ tw32(GRC_MISC_CFG, val);
+ }
+
if (tp->link_config.active_speed == SPEED_1000 &&
tp->link_config.active_duplex == DUPLEX_HALF)
tw32(MAC_TX_LENGTHS,
}
}
+ if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
+ u32 val = tr32(PCIE_PWR_MGMT_THRESH);
+ if (!netif_carrier_ok(tp->dev))
+ val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
+ tp->pwrmgmt_thresh;
+ else
+ val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
+ tw32(PCIE_PWR_MGMT_THRESH, val);
+ }
+
return err;
}
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
- memcpy(copy_skb->data, skb->data, len);
+ skb_copy_from_linear_data(skb, copy_skb->data, len);
pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
/* We'll reuse the original ring buffer. */
return received;
}
-static int tg3_poll(struct net_device *netdev, int *budget)
+static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
{
- struct tg3 *tp = netdev_priv(netdev);
struct tg3_hw_status *sblk = tp->hw_status;
- int done;
/* handle link change and other phy events */
if (!(tp->tg3_flags &
/* run TX completion thread */
if (sblk->idx[0].tx_consumer != tp->tx_cons) {
tg3_tx(tp);
- if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
- netif_rx_complete(netdev);
- schedule_work(&tp->reset_task);
- return 0;
- }
+ if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
+ return work_done;
}
/* run RX thread, within the bounds set by NAPI.
* All RX "locking" is done by ensuring outside
- * code synchronizes with dev->poll()
+ * code synchronizes with tg3->napi.poll()
*/
- if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
- int orig_budget = *budget;
- int work_done;
+ if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
+ work_done += tg3_rx(tp, budget - work_done);
- if (orig_budget > netdev->quota)
- orig_budget = netdev->quota;
+ return work_done;
+}
- work_done = tg3_rx(tp, orig_budget);
+static int tg3_poll(struct napi_struct *napi, int budget)
+{
+ struct tg3 *tp = container_of(napi, struct tg3, napi);
+ int work_done = 0;
+ struct tg3_hw_status *sblk = tp->hw_status;
- *budget -= work_done;
- netdev->quota -= work_done;
- }
+ while (1) {
+ work_done = tg3_poll_work(tp, work_done, budget);
- if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
- tp->last_tag = sblk->status_tag;
- rmb();
- } else
- sblk->status &= ~SD_STATUS_UPDATED;
+ if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
+ goto tx_recovery;
+
+ if (unlikely(work_done >= budget))
+ break;
+
+ if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
+ /* tp->last_tag is used in tg3_restart_ints() below
+ * to tell the hw how much work has been processed,
+ * so we must read it before checking for more work.
+ */
+ tp->last_tag = sblk->status_tag;
+ rmb();
+ } else
+ sblk->status &= ~SD_STATUS_UPDATED;
- /* if no more work, tell net stack and NIC we're done */
- done = !tg3_has_work(tp);
- if (done) {
- netif_rx_complete(netdev);
- tg3_restart_ints(tp);
+ if (likely(!tg3_has_work(tp))) {
+ netif_rx_complete(tp->dev, napi);
+ tg3_restart_ints(tp);
+ break;
+ }
}
- return (done ? 0 : 1);
+ return work_done;
+
+tx_recovery:
+ /* work_done is guaranteed to be less than budget. */
+ netif_rx_complete(tp->dev, napi);
+ schedule_work(&tp->reset_task);
+ return work_done;
}
static void tg3_irq_quiesce(struct tg3 *tp)
*/
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
{
+ spin_lock_bh(&tp->lock);
if (irq_sync)
tg3_irq_quiesce(tp);
- spin_lock_bh(&tp->lock);
}
static inline void tg3_full_unlock(struct tg3 *tp)
prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
if (likely(!tg3_irq_sync(tp)))
- netif_rx_schedule(dev); /* schedule NAPI poll */
+ netif_rx_schedule(dev, &tp->napi);
return IRQ_HANDLED;
}
*/
tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
if (likely(!tg3_irq_sync(tp)))
- netif_rx_schedule(dev); /* schedule NAPI poll */
+ netif_rx_schedule(dev, &tp->napi);
return IRQ_RETVAL(1);
}
* Reading the PCI State register will confirm whether the
* interrupt is ours and will flush the status block.
*/
- if ((sblk->status & SD_STATUS_UPDATED) ||
- !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
- /*
- * Writing any value to intr-mbox-0 clears PCI INTA# and
- * chip-internal interrupt pending events.
- * Writing non-zero to intr-mbox-0 additional tells the
- * NIC to stop sending us irqs, engaging "in-intr-handler"
- * event coalescing.
- */
- tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
- 0x00000001);
- if (tg3_irq_sync(tp))
+ if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
+ if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
+ (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+ handled = 0;
goto out;
- sblk->status &= ~SD_STATUS_UPDATED;
- if (likely(tg3_has_work(tp))) {
- prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
- netif_rx_schedule(dev); /* schedule NAPI poll */
- } else {
- /* No work, shared interrupt perhaps? re-enable
- * interrupts, and flush that PCI write
- */
- tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
- 0x00000000);
}
- } else { /* shared interrupt */
- handled = 0;
+ }
+
+ /*
+ * Writing any value to intr-mbox-0 clears PCI INTA# and
+ * chip-internal interrupt pending events.
+ * Writing non-zero to intr-mbox-0 additional tells the
+ * NIC to stop sending us irqs, engaging "in-intr-handler"
+ * event coalescing.
+ *
+ * Flush the mailbox to de-assert the IRQ immediately to prevent
+ * spurious interrupts. The flush impacts performance but
+ * excessive spurious interrupts can be worse in some cases.
+ */
+ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+ if (tg3_irq_sync(tp))
+ goto out;
+ sblk->status &= ~SD_STATUS_UPDATED;
+ if (likely(tg3_has_work(tp))) {
+ prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+ netif_rx_schedule(dev, &tp->napi);
+ } else {
+ /* No work, shared interrupt perhaps? re-enable
+ * interrupts, and flush that PCI write
+ */
+ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+ 0x00000000);
}
out:
return IRQ_RETVAL(handled);
* Reading the PCI State register will confirm whether the
* interrupt is ours and will flush the status block.
*/
- if ((sblk->status_tag != tp->last_tag) ||
- !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
- /*
- * writing any value to intr-mbox-0 clears PCI INTA# and
- * chip-internal interrupt pending events.
- * writing non-zero to intr-mbox-0 additional tells the
- * NIC to stop sending us irqs, engaging "in-intr-handler"
- * event coalescing.
- */
- tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
- 0x00000001);
- if (tg3_irq_sync(tp))
+ if (unlikely(sblk->status_tag == tp->last_tag)) {
+ if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
+ (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+ handled = 0;
goto out;
- if (netif_rx_schedule_prep(dev)) {
- prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
- /* Update last_tag to mark that this status has been
- * seen. Because interrupt may be shared, we may be
- * racing with tg3_poll(), so only update last_tag
- * if tg3_poll() is not scheduled.
- */
- tp->last_tag = sblk->status_tag;
- __netif_rx_schedule(dev);
}
- } else { /* shared interrupt */
- handled = 0;
+ }
+
+ /*
+ * writing any value to intr-mbox-0 clears PCI INTA# and
+ * chip-internal interrupt pending events.
+ * writing non-zero to intr-mbox-0 additional tells the
+ * NIC to stop sending us irqs, engaging "in-intr-handler"
+ * event coalescing.
+ *
+ * Flush the mailbox to de-assert the IRQ immediately to prevent
+ * spurious interrupts. The flush impacts performance but
+ * excessive spurious interrupts can be worse in some cases.
+ */
+ tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+ if (tg3_irq_sync(tp))
+ goto out;
+ if (netif_rx_schedule_prep(dev, &tp->napi)) {
+ prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
+ /* Update last_tag to mark that this status has been
+ * seen. Because interrupt may be shared, we may be
+ * racing with tg3_poll(), so only update last_tag
+ * if tg3_poll() is not scheduled.
+ */
+ tp->last_tag = sblk->status_tag;
+ __netif_rx_schedule(dev, &tp->napi);
}
out:
return IRQ_RETVAL(handled);
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
tp->irq_sync = 0;
- netif_poll_enable(tp->dev);
+ napi_enable(&tp->napi);
dev_close(tp->dev);
tg3_full_lock(tp, 0);
}
unsigned int restart_timer;
tg3_full_lock(tp, 0);
- tp->tg3_flags |= TG3_FLAG_IN_RESET_TASK;
if (!netif_running(tp->dev)) {
- tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
tg3_full_unlock(tp);
return;
}
mod_timer(&tp->timer, jiffies + 1);
out:
- tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
-
tg3_full_unlock(tp);
}
len = skb_headlen(skb);
/* We are running in BH disabled context with netif_tx_lock
- * and TX reclaim runs via tp->poll inside of a software
+ * and TX reclaim runs via tp->napi.poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
entry = tp->tx_prod;
base_flags = 0;
mss = 0;
- if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
- (mss = skb_shinfo(skb)->gso_size) != 0) {
+ if ((mss = skb_shinfo(skb)->gso_size) != 0) {
int tcp_opt_len, ip_tcp_len;
if (skb_header_cloned(skb) &&
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
else {
- tcp_opt_len = ((skb->h.th->doff - 5) * 4);
- ip_tcp_len = (skb->nh.iph->ihl * 4) +
- sizeof(struct tcphdr);
+ struct iphdr *iph = ip_hdr(skb);
- skb->nh.iph->check = 0;
- skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
- tcp_opt_len);
+ tcp_opt_len = tcp_optlen(skb);
+ ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
+
+ iph->check = 0;
+ iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
mss |= (ip_tcp_len + tcp_opt_len) << 9;
}
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
- skb->h.th->check = 0;
+ tcp_hdr(skb)->check = 0;
}
else if (skb->ip_summed == CHECKSUM_PARTIAL)
/* Estimate the number of fragments in the worst case */
if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
netif_stop_queue(tp->dev);
- return NETDEV_TX_BUSY;
+ if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(tp->dev);
}
segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
len = skb_headlen(skb);
/* We are running in BH disabled context with netif_tx_lock
- * and TX reclaim runs via tp->poll inside of a software
+ * and TX reclaim runs via tp->napi.poll inside of a software
* interrupt. Furthermore, IRQ processing runs lockless so we have
* no IRQ context deadlocks to worry about either. Rejoice!
*/
if (skb->ip_summed == CHECKSUM_PARTIAL)
base_flags |= TXD_FLAG_TCPUDP_CSUM;
mss = 0;
- if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
- (mss = skb_shinfo(skb)->gso_size) != 0) {
+ if ((mss = skb_shinfo(skb)->gso_size) != 0) {
+ struct iphdr *iph;
int tcp_opt_len, ip_tcp_len, hdr_len;
if (skb_header_cloned(skb) &&
goto out_unlock;
}
- tcp_opt_len = ((skb->h.th->doff - 5) * 4);
- ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
+ tcp_opt_len = tcp_optlen(skb);
+ ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
hdr_len = ip_tcp_len + tcp_opt_len;
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
- (tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG))
+ (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
return (tg3_tso_bug(tp, skb));
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
- skb->nh.iph->check = 0;
- skb->nh.iph->tot_len = htons(mss + hdr_len);
+ iph = ip_hdr(skb);
+ iph->check = 0;
+ iph->tot_len = htons(mss + hdr_len);
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
- skb->h.th->check = 0;
+ tcp_hdr(skb)->check = 0;
base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
- }
- else {
- skb->h.th->check =
- ~csum_tcpudp_magic(skb->nh.iph->saddr,
- skb->nh.iph->daddr,
- 0, IPPROTO_TCP, 0);
- }
+ } else
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
- if (tcp_opt_len || skb->nh.iph->ihl > 5) {
+ if (tcp_opt_len || iph->ihl > 5) {
int tsflags;
- tsflags = ((skb->nh.iph->ihl - 5) +
- (tcp_opt_len >> 2));
+ tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
mss |= (tsflags << 11);
}
} else {
- if (tcp_opt_len || skb->nh.iph->ihl > 5) {
+ if (tcp_opt_len || iph->ihl > 5) {
int tsflags;
- tsflags = ((skb->nh.iph->ihl - 5) +
- (tcp_opt_len >> 2));
+ tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
base_flags |= tsflags << 12;
}
}
}
}
+static void tg3_ape_send_event(struct tg3 *tp, u32 event)
+{
+ int i;
+ u32 apedata;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+ if (apedata != APE_SEG_SIG_MAGIC)
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (apedata != APE_FW_STATUS_READY)
+ return;
+
+ /* Wait for up to 1 millisecond for APE to service previous event. */
+ for (i = 0; i < 10; i++) {
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
+ return;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
+ event | APE_EVENT_STATUS_EVENT_PENDING);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ udelay(100);
+ }
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+}
+
+static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
+{
+ u32 event;
+ u32 apedata;
+
+ if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
+ return;
+
+ switch (kind) {
+ case RESET_KIND_INIT:
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
+ APE_HOST_SEG_SIG_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
+ APE_HOST_SEG_LEN_MAGIC);
+ apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
+ tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
+ tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
+ APE_HOST_DRIVER_ID_MAGIC);
+ tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
+ APE_HOST_BEHAV_NO_PHYLOCK);
+
+ event = APE_EVENT_STATUS_STATE_START;
+ break;
+ case RESET_KIND_SHUTDOWN:
+ event = APE_EVENT_STATUS_STATE_UNLOAD;
+ break;
+ case RESET_KIND_SUSPEND:
+ event = APE_EVENT_STATUS_STATE_SUSPEND;
+ break;
+ default:
+ return;
+ }
+
+ event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
+
+ tg3_ape_send_event(tp, event);
+}
+
/* tp->lock is held. */
static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
{
break;
};
}
+
+ if (kind == RESET_KIND_INIT ||
+ kind == RESET_KIND_SUSPEND)
+ tg3_ape_driver_state_change(tp, kind);
}
/* tp->lock is held. */
break;
};
}
+
+ if (kind == RESET_KIND_SHUTDOWN)
+ tg3_ape_driver_state_change(tp, kind);
}
/* tp->lock is held. */
return 0;
}
+/* Save PCI command register before chip reset */
+static void tg3_save_pci_state(struct tg3 *tp)
+{
+ pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
+}
+
+/* Restore PCI state after chip reset */
+static void tg3_restore_pci_state(struct tg3 *tp)
+{
+ u32 val;
+
+ /* Re-enable indirect register accesses. */
+ pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+ tp->misc_host_ctrl);
+
+ /* Set MAX PCI retry to zero. */
+ val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+ (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
+ val |= PCISTATE_RETRY_SAME_DMA;
+ /* Allow reads and writes to the APE register and memory space. */
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+ PCISTATE_ALLOW_APE_SHMEM_WR;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
+
+ pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
+
+ if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
+ pcie_set_readrq(tp->pdev, 4096);
+ else {
+ pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+ tp->pci_cacheline_sz);
+ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ tp->pci_lat_timer);
+ }
+
+ /* Make sure PCI-X relaxed ordering bit is clear. */
+ if (tp->pcix_cap) {
+ u16 pcix_cmd;
+
+ pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ &pcix_cmd);
+ pcix_cmd &= ~PCI_X_CMD_ERO;
+ pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ pcix_cmd);
+ }
+
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+
+ /* Chip reset on 5780 will reset MSI enable bit,
+ * so need to restore it.
+ */
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+ u16 ctrl;
+
+ pci_read_config_word(tp->pdev,
+ tp->msi_cap + PCI_MSI_FLAGS,
+ &ctrl);
+ pci_write_config_word(tp->pdev,
+ tp->msi_cap + PCI_MSI_FLAGS,
+ ctrl | PCI_MSI_FLAGS_ENABLE);
+ val = tr32(MSGINT_MODE);
+ tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
+ }
+ }
+}
+
static void tg3_stop_fw(struct tg3 *);
/* tp->lock is held. */
*/
tp->nvram_lock_cnt = 0;
+ /* GRC_MISC_CFG core clock reset will clear the memory
+ * enable bit in PCI register 4 and the MSI enable bit
+ * on some chips, so we save relevant registers here.
+ */
+ tg3_save_pci_state(tp);
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
tw32(GRC_FASTBOOT_PC, 0);
/*
if (write_op == tg3_write_flush_reg32)
tp->write32 = tg3_write32;
+ /* Prevent the irq handler from reading or writing PCI registers
+ * during chip reset when the memory enable bit in the PCI command
+ * register may be cleared. The chip does not generate interrupt
+ * at this time, but the irq handler may still be called due to irq
+ * sharing or irqpoll.
+ */
+ tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
+ if (tp->hw_status) {
+ tp->hw_status->status = 0;
+ tp->hw_status->status_tag = 0;
+ }
+ tp->last_tag = 0;
+ smp_mb();
+ synchronize_irq(tp->pdev->irq);
+
/* do the reset */
val = GRC_MISC_CFG_CORECLK_RESET;
pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
}
- /* Re-enable indirect register accesses. */
- pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
- tp->misc_host_ctrl);
-
- /* Set MAX PCI retry to zero. */
- val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
- val |= PCISTATE_RETRY_SAME_DMA;
- pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
-
- pci_restore_state(tp->pdev);
-
- /* Make sure PCI-X relaxed ordering bit is clear. */
- pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
- val &= ~PCIX_CAPS_RELAXED_ORDERING;
- pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
+ tg3_restore_pci_state(tp);
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
- u32 val;
-
- /* Chip reset on 5780 will reset MSI enable bit,
- * so need to restore it.
- */
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
- u16 ctrl;
-
- pci_read_config_word(tp->pdev,
- tp->msi_cap + PCI_MSI_FLAGS,
- &ctrl);
- pci_write_config_word(tp->pdev,
- tp->msi_cap + PCI_MSI_FLAGS,
- ctrl | PCI_MSI_FLAGS_ENABLE);
- val = tr32(MSGINT_MODE);
- tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
- }
+ tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
+ val = 0;
+ if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
val = tr32(MEMARB_MODE);
- tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
-
- } else
- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
tg3_stop_fw(tp);
tw32(GRC_MODE, tp->grc_mode);
if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
- u32 val = tr32(0xc4);
+ val = tr32(0xc4);
tw32(0xc4, val | (1 << 15));
}
if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
- u32 val = tr32(0x7c00);
+ val = tr32(0x7c00);
tw32(0x7c00, val | (1 << 25));
}
/* tp->lock is held. */
static void tg3_stop_fw(struct tg3 *tp)
{
- if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+ if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
+ !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
u32 val;
int i;
/* tp->lock is held. */
-static void __tg3_set_mac_addr(struct tg3 *tp)
+static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
{
u32 addr_high, addr_low;
int i;
(tp->dev->dev_addr[4] << 8) |
(tp->dev->dev_addr[5] << 0));
for (i = 0; i < 4; i++) {
+ if (i == 1 && skip_mac_1)
+ continue;
tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
}
{
struct tg3 *tp = netdev_priv(dev);
struct sockaddr *addr = p;
- int err = 0;
+ int err = 0, skip_mac_1 = 0;
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
return 0;
if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
- /* Reset chip so that ASF can re-init any MAC addresses it
- * needs.
- */
- tg3_netif_stop(tp);
- tg3_full_lock(tp, 1);
+ u32 addr0_high, addr0_low, addr1_high, addr1_low;
- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_restart_hw(tp, 0);
- if (!err)
- tg3_netif_start(tp);
- tg3_full_unlock(tp);
- } else {
- spin_lock_bh(&tp->lock);
- __tg3_set_mac_addr(tp);
- spin_unlock_bh(&tp->lock);
+ addr0_high = tr32(MAC_ADDR_0_HIGH);
+ addr0_low = tr32(MAC_ADDR_0_LOW);
+ addr1_high = tr32(MAC_ADDR_1_HIGH);
+ addr1_low = tr32(MAC_ADDR_1_LOW);
+
+ /* Skip MAC addr 1 if ASF is using it. */
+ if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
+ !(addr1_high == 0 && addr1_low == 0))
+ skip_mac_1 = 1;
}
+ spin_lock_bh(&tp->lock);
+ __tg3_set_mac_addr(tp, skip_mac_1);
+ spin_unlock_bh(&tp->lock);
return err;
}
tg3_write_sig_legacy(tp, RESET_KIND_INIT);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5784_A1) {
+ val = tr32(TG3_CPMU_CTRL);
+ val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
+ tw32(TG3_CPMU_CTRL, val);
+
+ val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+ val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+ val |= CPMU_LSPD_10MB_MACCLK_6_25;
+ tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+
+ val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
+ val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
+ val |= CPMU_LNK_AWARE_MACCLK_6_25;
+ tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
+
+ val = tr32(TG3_CPMU_HST_ACC);
+ val &= ~CPMU_HST_ACC_MACCLK_MASK;
+ val |= CPMU_HST_ACC_MACCLK_6_25;
+ tw32(TG3_CPMU_HST_ACC, val);
+ }
+
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
- * chips.
+ * chips and don't even touch the clocks if the CPMU is present.
*/
- if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
- tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
- tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+ if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
+ if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+ tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
+ tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+ }
if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
tw32(TG3PCI_PCISTATE, val);
}
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ /* Allow reads and writes to the
+ * APE register and memory space.
+ */
+ val = tr32(TG3PCI_PCISTATE);
+ val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+ PCISTATE_ALLOW_APE_SHMEM_WR;
+ tw32(TG3PCI_PCISTATE, val);
+ }
+
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
/* Enable some hw fixes. */
val = tr32(TG3PCI_MSI_DATA);
if (err)
return err;
- /* This value is determined during the probe time DMA
- * engine test, tg3_test_dma.
- */
- tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
+ /* This value is determined during the probe time DMA
+ * engine test, tg3_test_dma.
+ */
+ tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+ }
tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
GRC_MODE_4X_NIC_SEND_RINGS |
tp->rx_jumbo_ptr);
/* Initialize MAC address and backoff seed. */
- __tg3_set_mac_addr(tp);
+ __tg3_set_mac_addr(tp, 0);
/* MTU + ethernet header + FCS + optional VLAN tag */
tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
- if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
- rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
+ rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
+ RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
+ RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
/* If statement applies to 5705 and 5750 PCI devices only */
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
- (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
+ !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+ tp->mac_mode |= MAC_MODE_LINK_POLARITY;
tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
udelay(40);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
+ tp->grc_local_ctrl &= ~gpio_mask;
tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
/* GPIO1 must be driven high for eeprom write protect */
/* Enable host coalescing bug fix */
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761))
val |= (1 << 29);
tw32_f(WDMAC_MODE, val);
udelay(40);
- if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
- val = tr32(TG3PCI_X_CAPS);
+ if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
+ u16 pcix_cmd;
+
+ pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ &pcix_cmd);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
- val &= ~PCIX_CAPS_BURST_MASK;
- val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
+ pcix_cmd &= ~PCI_X_CMD_MAX_READ;
+ pcix_cmd |= PCI_X_CMD_READ_2K;
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
- val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
- val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
- if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
- val |= (tp->split_mode_max_reqs <<
- PCIX_CAPS_SPLIT_SHIFT);
+ pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
+ pcix_cmd |= PCI_X_CMD_READ_2K;
}
- tw32(TG3PCI_X_CAPS, val);
+ pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+ pcix_cmd);
}
tw32_f(RDMAC_MODE, rdmac_mode);
tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
- tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ tw32(SNDDATAC_MODE,
+ SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
+ else
+ tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
+
tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
udelay(100);
tp->rx_mode = RX_MODE_ENABLE;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
tw32_f(MAC_RX_MODE, tp->rx_mode);
break;
};
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ /* Write our heartbeat update interval to APE. */
+ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
+ APE_HOST_HEARTBEAT_INT_DISABLE);
+
tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
return 0;
if (err)
return err;
- if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
- (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) &&
- (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) &&
- !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) &&
- (tp->pdev_peer == tp->pdev))) {
+ if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
*/
return err;
}
+ napi_enable(&tp->napi);
+
tg3_full_lock(tp, 0);
err = tg3_init_hw(tp, 1);
tg3_full_unlock(tp);
if (err) {
+ napi_disable(&tp->napi);
free_irq(tp->pdev->irq, dev);
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
pci_disable_msi(tp->pdev);
tg3_full_unlock(tp);
+ napi_disable(&tp->napi);
+
return err;
}
{
struct tg3 *tp = netdev_priv(dev);
- /* Calling flush_scheduled_work() may deadlock because
- * linkwatch_event() may be on the workqueue and it will try to get
- * the rtnl_lock which we are holding.
- */
- while (tp->tg3_flags & TG3_FLAG_IN_RESET_TASK)
- msleep(1);
+ napi_disable(&tp->napi);
+ cancel_work_sync(&tp->reset_task);
netif_stop_queue(dev);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
- tp->tg3_flags &=
- ~(TG3_FLAG_INIT_COMPLETE |
- TG3_FLAG_GOT_SERDES_FLOWCTL);
+ tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
tg3_full_unlock(tp);
buf = data;
if (b_offset || odd_len) {
buf = kmalloc(len, GFP_KERNEL);
- if (buf == 0)
+ if (!buf)
return -ENOMEM;
if (b_offset)
memcpy(buf, &start, 4);
SUPPORTED_100baseT_Full |
SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
- SUPPORTED_MII);
+ SUPPORTED_TP);
cmd->port = PORT_TP;
} else {
cmd->supported |= SUPPORTED_FIBRE;
tp->link_config.autoneg = cmd->autoneg;
if (cmd->autoneg == AUTONEG_ENABLE) {
- tp->link_config.advertising = cmd->advertising;
+ tp->link_config.advertising = (cmd->advertising |
+ ADVERTISED_Autoneg);
tp->link_config.speed = SPEED_INVALID;
tp->link_config.duplex = DUPLEX_INVALID;
} else {
{
struct tg3 *tp = netdev_priv(dev);
- wol->supported = WAKE_MAGIC;
+ if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
+ wol->supported = WAKE_MAGIC;
+ else
+ wol->supported = 0;
wol->wolopts = 0;
if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
wol->wolopts = WAKE_MAGIC;
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
if ((wol->wolopts & WAKE_MAGIC) &&
- tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
- !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
+ !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
return -EINVAL;
spin_lock_bh(&tp->lock);
}
if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
- if (value)
+ if (value) {
dev->features |= NETIF_F_TSO6;
- else
- dev->features &= ~NETIF_F_TSO6;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ dev->features |= NETIF_F_TSO_ECN;
+ } else
+ dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
}
return ethtool_op_set_tso(dev, value);
}
(ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
(ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
(ering->tx_pending <= MAX_SKB_FRAGS) ||
- ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1_BUG) &&
+ ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
(ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
return -EINVAL;
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
- ethtool_op_set_tx_hw_csum(dev, data);
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ ethtool_op_set_tx_ipv6_csum(dev, data);
else
ethtool_op_set_tx_csum(dev, data);
return 0;
}
-static int tg3_get_stats_count (struct net_device *dev)
-{
- return TG3_NUM_STATS;
-}
-
-static int tg3_get_test_count (struct net_device *dev)
+static int tg3_get_sset_count (struct net_device *dev, int sset)
{
- return TG3_NUM_TEST;
+ switch (sset) {
+ case ETH_SS_TEST:
+ return TG3_NUM_TEST;
+ case ETH_SS_STATS:
+ return TG3_NUM_STATS;
+ default:
+ return -EOPNOTSUPP;
+ }
}
static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
}
#define NVRAM_TEST_SIZE 0x100
-#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
+#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
+#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
+#define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
#define NVRAM_SELFBOOT_HW_SIZE 0x20
#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
static int tg3_test_nvram(struct tg3 *tp)
{
u32 *buf, csum, magic;
- int i, j, err = 0, size;
+ int i, j, k, err = 0, size;
if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
return -EIO;
if (magic == TG3_EEPROM_MAGIC)
size = NVRAM_TEST_SIZE;
else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
- if ((magic & 0xe00000) == 0x200000)
- size = NVRAM_SELFBOOT_FORMAT1_SIZE;
- else
+ if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
+ TG3_EEPROM_SB_FORMAT_1) {
+ switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
+ case TG3_EEPROM_SB_REVISION_0:
+ size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_2:
+ size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
+ break;
+ case TG3_EEPROM_SB_REVISION_3:
+ size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
+ break;
+ default:
+ return 0;
+ }
+ } else
return 0;
} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
size = NVRAM_SELFBOOT_HW_SIZE;
TG3_EEPROM_MAGIC_FW) {
u8 *buf8 = (u8 *) buf, csum8 = 0;
- for (i = 0; i < size; i++)
- csum8 += buf8[i];
+ if ((cpu_to_be32(buf[0]) & TG3_EEPROM_SB_REVISION_MASK) ==
+ TG3_EEPROM_SB_REVISION_2) {
+ /* For rev 2, the csum doesn't include the MBA. */
+ for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
+ csum8 += buf8[i];
+ for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
+ csum8 += buf8[i];
+ } else {
+ for (i = 0; i < size; i++)
+ csum8 += buf8[i];
+ }
if (csum8 == 0) {
err = 0;
u8 data[NVRAM_SELFBOOT_DATA_SIZE];
u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
u8 *buf8 = (u8 *) buf;
- int j, k;
/* Separate the parity bits and the data bytes. */
for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
int i;
u32 j;
- for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
+ for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
for (j = 0; j < len; j += 4) {
u32 val;
if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
mem_tbl = mem_tbl_5755;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
mem_tbl = mem_tbl_5906;
return 0;
mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
+ MAC_MODE_PORT_INT_LPBACK;
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ mac_mode |= MAC_MODE_LINK_POLARITY;
if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
mac_mode |= MAC_MODE_PORT_MODE_MII;
else
phytest | MII_TG3_EPHY_SHADOW_EN);
if (!tg3_readphy(tp, 0x1b, &phy))
tg3_writephy(tp, 0x1b, phy & ~0x20);
- if (!tg3_readphy(tp, 0x10, &phy))
- tg3_writephy(tp, 0x10, phy & ~0x4000);
tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
}
val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
} else
val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+ tg3_phy_toggle_automdix(tp, 0);
+
tg3_writephy(tp, MII_BMCR, val);
udelay(40);
- mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
- MAC_MODE_LINK_POLARITY;
+ mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
mac_mode |= MAC_MODE_PORT_MODE_MII;
udelay(10);
tw32_f(MAC_RX_MODE, tp->rx_mode);
}
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
- mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+ if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
+ mac_mode &= ~MAC_MODE_LINK_POLARITY;
+ else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
+ mac_mode |= MAC_MODE_LINK_POLARITY;
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
}
static int tg3_test_loopback(struct tg3 *tp)
{
int err = 0;
+ u32 cpmuctrl = 0;
if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED;
if (err)
return TG3_LOOPBACK_FAILED;
+ if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
+ int i;
+ u32 status;
+
+ tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
+
+ /* Wait for up to 40 microseconds to acquire lock. */
+ for (i = 0; i < 4; i++) {
+ status = tr32(TG3_CPMU_MUTEX_GNT);
+ if (status == CPMU_MUTEX_GNT_DRIVER)
+ break;
+ udelay(10);
+ }
+
+ if (status != CPMU_MUTEX_GNT_DRIVER)
+ return TG3_LOOPBACK_FAILED;
+
+ /* Turn off power management based on link speed. */
+ cpmuctrl = tr32(TG3_CPMU_CTRL);
+ tw32(TG3_CPMU_CTRL,
+ cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
+ CPMU_CTRL_LINK_AWARE_MODE));
+ }
+
if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
err |= TG3_MAC_LOOPBACK_FAILED;
+
+ if (tp->tg3_flags3 & TG3_FLG3_5761_5784_AX_FIXES) {
+ tw32(TG3_CPMU_CTRL, cpmuctrl);
+
+ /* Release the mutex */
+ tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
+ }
+
if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
err |= TG3_PHY_LOOPBACK_FAILED;
/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
__tg3_set_rx_mode(dev);
- tg3_full_unlock(tp);
-
if (netif_running(dev))
tg3_netif_start(tp);
-}
-
-static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
-{
- struct tg3 *tp = netdev_priv(dev);
-
- if (netif_running(dev))
- tg3_netif_stop(tp);
- tg3_full_lock(tp, 0);
- if (tp->vlgrp)
- tp->vlgrp->vlan_devices[vid] = NULL;
tg3_full_unlock(tp);
-
- if (netif_running(dev))
- tg3_netif_start(tp);
}
#endif
.set_pauseparam = tg3_set_pauseparam,
.get_rx_csum = tg3_get_rx_csum,
.set_rx_csum = tg3_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
.set_tx_csum = tg3_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
.set_tso = tg3_set_tso,
- .self_test_count = tg3_get_test_count,
.self_test = tg3_self_test,
.get_strings = tg3_get_strings,
.phys_id = tg3_phys_id,
- .get_stats_count = tg3_get_stats_count,
.get_ethtool_stats = tg3_get_ethtool_stats,
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
- .get_perm_addr = ethtool_op_get_perm_addr,
+ .get_sset_count = tg3_get_sset_count,
};
static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
return;
}
}
- tp->nvram_size = 0x20000;
+ tp->nvram_size = 0x80000;
}
static void __devinit tg3_get_nvram_info(struct tg3 *tp)
static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
{
- u32 nvcfg1;
+ u32 nvcfg1, protect = 0;
nvcfg1 = tr32(NVRAM_CFG1);
/* NVRAM protection for TPM */
- if (nvcfg1 & (1 << 27))
+ if (nvcfg1 & (1 << 27)) {
tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
+ protect = 1;
+ }
- switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
- case FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ:
- case FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ:
- tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
-
- nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
- tw32(NVRAM_CFG1, nvcfg1);
- break;
- case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+ nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
+ switch (nvcfg1) {
case FLASH_5755VENDOR_ATMEL_FLASH_1:
case FLASH_5755VENDOR_ATMEL_FLASH_2:
case FLASH_5755VENDOR_ATMEL_FLASH_3:
- case FLASH_5755VENDOR_ATMEL_FLASH_4:
+ case FLASH_5755VENDOR_ATMEL_FLASH_5:
tp->nvram_jedecnum = JEDEC_ATMEL;
tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
tp->tg3_flags2 |= TG3_FLG2_FLASH;
tp->nvram_pagesize = 264;
+ if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
+ nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
+ tp->nvram_size = (protect ? 0x3e200 : 0x80000);
+ else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
+ tp->nvram_size = (protect ? 0x1f200 : 0x40000);
+ else
+ tp->nvram_size = (protect ? 0x1f200 : 0x20000);
break;
case FLASH_5752VENDOR_ST_M45PE10:
case FLASH_5752VENDOR_ST_M45PE20:
tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
tp->tg3_flags2 |= TG3_FLG2_FLASH;
tp->nvram_pagesize = 256;
+ if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
+ tp->nvram_size = (protect ? 0x10000 : 0x20000);
+ else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
+ tp->nvram_size = (protect ? 0x10000 : 0x40000);
+ else
+ tp->nvram_size = (protect ? 0x20000 : 0x80000);
break;
}
}
}
}
+static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1, protect = 0;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+
+ /* NVRAM protection for TPM */
+ if (nvcfg1 & (1 << 27)) {
+ tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
+ protect = 1;
+ }
+
+ nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
+ switch (nvcfg1) {
+ case FLASH_5761VENDOR_ATMEL_ADB021D:
+ case FLASH_5761VENDOR_ATMEL_ADB041D:
+ case FLASH_5761VENDOR_ATMEL_ADB081D:
+ case FLASH_5761VENDOR_ATMEL_ADB161D:
+ case FLASH_5761VENDOR_ATMEL_MDB021D:
+ case FLASH_5761VENDOR_ATMEL_MDB041D:
+ case FLASH_5761VENDOR_ATMEL_MDB081D:
+ case FLASH_5761VENDOR_ATMEL_MDB161D:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
+ tp->nvram_pagesize = 256;
+ break;
+ case FLASH_5761VENDOR_ST_A_M45PE20:
+ case FLASH_5761VENDOR_ST_A_M45PE40:
+ case FLASH_5761VENDOR_ST_A_M45PE80:
+ case FLASH_5761VENDOR_ST_A_M45PE16:
+ case FLASH_5761VENDOR_ST_M_M45PE20:
+ case FLASH_5761VENDOR_ST_M_M45PE40:
+ case FLASH_5761VENDOR_ST_M_M45PE80:
+ case FLASH_5761VENDOR_ST_M_M45PE16:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tp->nvram_pagesize = 256;
+ break;
+ }
+
+ if (protect) {
+ tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
+ } else {
+ switch (nvcfg1) {
+ case FLASH_5761VENDOR_ATMEL_ADB161D:
+ case FLASH_5761VENDOR_ATMEL_MDB161D:
+ case FLASH_5761VENDOR_ST_A_M45PE16:
+ case FLASH_5761VENDOR_ST_M_M45PE16:
+ tp->nvram_size = 0x100000;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB081D:
+ case FLASH_5761VENDOR_ATMEL_MDB081D:
+ case FLASH_5761VENDOR_ST_A_M45PE80:
+ case FLASH_5761VENDOR_ST_M_M45PE80:
+ tp->nvram_size = 0x80000;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB041D:
+ case FLASH_5761VENDOR_ATMEL_MDB041D:
+ case FLASH_5761VENDOR_ST_A_M45PE40:
+ case FLASH_5761VENDOR_ST_M_M45PE40:
+ tp->nvram_size = 0x40000;
+ break;
+ case FLASH_5761VENDOR_ATMEL_ADB021D:
+ case FLASH_5761VENDOR_ATMEL_MDB021D:
+ case FLASH_5761VENDOR_ST_A_M45PE20:
+ case FLASH_5761VENDOR_ST_M_M45PE20:
+ tp->nvram_size = 0x20000;
+ break;
+ }
+ }
+}
+
static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
{
tp->nvram_jedecnum = JEDEC_ATMEL;
}
tg3_enable_nvram_access(tp);
+ tp->nvram_size = 0;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
tg3_get_5752_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
tg3_get_5755_nvram_info(tp);
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784)
tg3_get_5787_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ tg3_get_5761_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tg3_get_5906_nvram_info(tp);
else
tg3_get_nvram_info(tp);
- tg3_get_nvram_size(tp);
+ if (tp->nvram_size == 0)
+ tg3_get_nvram_size(tp);
tg3_disable_nvram_access(tp);
tg3_nvram_unlock(tp);
if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
(tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
(tp->tg3_flags2 & TG3_FLG2_FLASH) &&
+ !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
(tp->nvram_jedecnum == JEDEC_ATMEL))
addr = ((addr / tp->nvram_pagesize) <<
if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
(tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
(tp->tg3_flags2 & TG3_FLG2_FLASH) &&
+ !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
(tp->nvram_jedecnum == JEDEC_ATMEL))
addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
+ (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
(tp->nvram_jedecnum == JEDEC_ST) &&
(nvram_cmd & NVRAM_CMD_FIRST)) {
tp->phy_id = PHY_ID_INVALID;
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
- /* Assume an onboard device by default. */
- tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
+ /* Assume an onboard device and WOL capable by default. */
+ tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
}
+ val = tr32(VCPU_CFGSHDW);
+ if (val & VCPU_CFGSHDW_ASPM_DBNC)
+ tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
+ if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
+ (val & VCPU_CFGSHDW_WOL_MAGPKT))
+ tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
return;
}
tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5784_A1)
+ tp->led_ctrl = LED_CTRL_MODE_MAC;
+
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
if ((tp->pdev->subsystem_vendor ==
if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
}
- if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
- tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
+ if (nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE)
+ tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
+ if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
+ !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
+ tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
+
+ if (tp->tg3_flags & TG3_FLAG_WOL_CAP &&
+ nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)
+ tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
if (cfg2 & (1 << 17))
tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
/* bootcode if bit 18 is set */
if (cfg2 & (1 << 18))
tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
+
+ if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+ u32 cfg3;
+
+ tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
+ if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
+ tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
+ }
}
}
* firwmare access to the PHY hardware.
*/
err = 0;
- if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+ if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
+ (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
} else {
/* Now read the physical PHY_ID from the chip and verify
}
if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
+ !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
u32 bmsr, adv_reg, tg3_ctrl, mask;
strcpy(tp->board_part_number, "none");
}
+static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
+{
+ u32 val;
+
+ if (tg3_nvram_read_swab(tp, offset, &val) ||
+ (val & 0xfc000000) != 0x0c000000 ||
+ tg3_nvram_read_swab(tp, offset + 4, &val) ||
+ val != 0)
+ return 0;
+
+ return 1;
+}
+
static void __devinit tg3_read_fw_ver(struct tg3 *tp)
{
u32 val, offset, start;
+ u32 ver_offset;
+ int i, bcnt;
if (tg3_nvram_read_swab(tp, 0, &val))
return;
return;
offset = tg3_nvram_logical_addr(tp, offset);
- if (tg3_nvram_read_swab(tp, offset, &val))
+
+ if (!tg3_fw_img_is_valid(tp, offset) ||
+ tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
return;
- if ((val & 0xfc000000) == 0x0c000000) {
- u32 ver_offset, addr;
- int i;
+ offset = offset + ver_offset - start;
+ for (i = 0; i < 16; i += 4) {
+ if (tg3_nvram_read(tp, offset + i, &val))
+ return;
+
+ val = le32_to_cpu(val);
+ memcpy(tp->fw_ver + i, &val, 4);
+ }
+
+ if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
+ (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
+ return;
- if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
- tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
+ for (offset = TG3_NVM_DIR_START;
+ offset < TG3_NVM_DIR_END;
+ offset += TG3_NVM_DIRENT_SIZE) {
+ if (tg3_nvram_read_swab(tp, offset, &val))
return;
- if (val != 0)
+ if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
+ break;
+ }
+
+ if (offset == TG3_NVM_DIR_END)
+ return;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ start = 0x08000000;
+ else if (tg3_nvram_read_swab(tp, offset - 4, &start))
+ return;
+
+ if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
+ !tg3_fw_img_is_valid(tp, offset) ||
+ tg3_nvram_read_swab(tp, offset + 8, &val))
+ return;
+
+ offset += val - start;
+
+ bcnt = strlen(tp->fw_ver);
+
+ tp->fw_ver[bcnt++] = ',';
+ tp->fw_ver[bcnt++] = ' ';
+
+ for (i = 0; i < 4; i++) {
+ if (tg3_nvram_read(tp, offset, &val))
return;
- addr = offset + ver_offset - start;
- for (i = 0; i < 16; i += 4) {
- if (tg3_nvram_read(tp, addr + i, &val))
- return;
+ val = le32_to_cpu(val);
+ offset += sizeof(val);
- val = cpu_to_le32(val);
- memcpy(tp->fw_ver + i, &val, 4);
+ if (bcnt > TG3_VER_SIZE - sizeof(val)) {
+ memcpy(&tp->fw_ver[bcnt], &val, TG3_VER_SIZE - bcnt);
+ break;
}
+
+ memcpy(&tp->fw_ver[bcnt], &val, sizeof(val));
+ bcnt += sizeof(val);
}
+
+ tp->fw_ver[TG3_VER_SIZE - 1] = 0;
}
+static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
+
static int __devinit tg3_get_invariants(struct tg3 *tp)
{
static struct pci_device_id write_reorder_chipsets[] = {
tp->pci_chip_rev_id = (misc_ctrl_reg >>
MISC_HOST_CTRL_CHIPREV_SHIFT);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
+ u32 prod_id_asic_rev;
+
+ pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
+ &prod_id_asic_rev);
+ tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
+ }
/* Wrong chip ID in 5752 A0. This code can be removed later
* as A0 is not in production.
continue;
}
if (pci_id->rev != PCI_ANY_ID) {
- u8 rev;
-
- pci_read_config_byte(bridge, PCI_REVISION_ID,
- &rev);
- if (rev > pci_id->rev)
+ if (bridge->revision > pci_id->rev)
continue;
}
if (bridge->subordinate &&
tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
+ tp->pdev_peer = tg3_find_peer(tp);
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
+ tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
+ if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
+ GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
+ tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
+ tp->pdev_peer == tp->pdev))
+ tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
} else {
- tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 |
- TG3_FLG2_HW_TSO_1_BUG;
+ tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
ASIC_REV_5750 &&
tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
- tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_1_BUG;
+ tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
}
}
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
if (pcie_cap != 0) {
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
+
+ pcie_set_readrq(tp->pdev, 4096);
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
u16 lnkctl;
cacheline_sz_reg);
}
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
+ if (!tp->pcix_cap) {
+ printk(KERN_ERR PFX "Cannot find PCI-X "
+ "capability, aborting.\n");
+ return -EIO;
+ }
+ }
+
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
&pci_state_reg);
- if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
+ if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
/* If this is a 5700 BX chipset, and we are in PCI-X
*/
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
u32 pm_reg;
- u16 pci_cmd;
tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
* space registers clobbered due to this bug.
* So explicitly force the chip into D0 here.
*/
- pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
+ pci_read_config_dword(tp->pdev,
+ tp->pm_cap + PCI_PM_CTRL,
&pm_reg);
pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
- pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
+ pci_write_config_dword(tp->pdev,
+ tp->pm_cap + PCI_PM_CTRL,
pm_reg);
/* Also, force SERR#/PERR# in PCI command. */
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
- /* Back to back register writes can cause problems on this chip,
- * the workaround is to read back all reg writes except those to
- * mailbox regs. See tg3_write_indirect_reg32().
- *
- * PCI Express 5750_A0 rev chips need this workaround too.
- */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
- ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
- tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
- tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
-
if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
/* Various workaround register access methods */
if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
tp->write32 = tg3_write_indirect_reg32;
- else if (tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG)
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+ ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
+ tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
+ /*
+ * Back to back register writes can cause problems on these
+ * chips, the workaround is to read back all reg writes
+ * except those to mailbox regs.
+ *
+ * See tg3_write_indirect_reg32().
+ */
tp->write32 = tg3_write_flush_reg32;
+ }
+
if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
*/
tg3_get_eeprom_hw_cfg(tp);
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ /* Allow reads and writes to the
+ * APE register and memory space.
+ */
+ pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+ PCISTATE_ALLOW_APE_SHMEM_WR;
+ pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
+ pci_state_reg);
+ }
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+ tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5784_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5784_A1 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5761_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5761_A1)
+ tp->tg3_flags3 |= TG3_FLG3_5761_5784_AX_FIXES;
+ }
+
/* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
* GPIO1 driven high will bring 5700's external PHY out of reset.
* It is also used as eeprom write protect on LOMs.
if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
grc_misc_cfg = tr32(GRC_MISC_CFG);
grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
- /* Broadcom's driver says that CIOBE multisplit has a bug */
-#if 0
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
- grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
- tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
- tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
- }
-#endif
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
(grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
* upon subsystem IDs.
*/
if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
TG3_FLAG_USE_LINKCHG_REG);
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tp->dev->hard_start_xmit = tg3_start_xmit;
else
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
tp->rx_std_max_post = 8;
- /* By default, disable wake-on-lan. User can change this
- * using ETHTOOL_SWOL.
- */
- tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
+ if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
+ tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
+ PCIE_PWR_MGMT_L1_THRESH_MSK;
return err;
}
-#ifdef CONFIG_SPARC64
+#ifdef CONFIG_SPARC
static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
struct pci_dev *pdev = tp->pdev;
- struct pcidev_cookie *pcp = pdev->sysdata;
-
- if (pcp != NULL) {
- unsigned char *addr;
- int len;
-
- addr = of_get_property(pcp->prom_node, "local-mac-address",
- &len);
- if (addr && len == 6) {
- memcpy(dev->dev_addr, addr, 6);
- memcpy(dev->perm_addr, dev->dev_addr, 6);
- return 0;
- }
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+ const unsigned char *addr;
+ int len;
+
+ addr = of_get_property(dp, "local-mac-address", &len);
+ if (addr && len == 6) {
+ memcpy(dev->dev_addr, addr, 6);
+ memcpy(dev->perm_addr, dev->dev_addr, 6);
+ return 0;
}
return -ENODEV;
}
u32 hi, lo, mac_offset;
int addr_ok = 0;
-#ifdef CONFIG_SPARC64
+#ifdef CONFIG_SPARC
if (!tg3_get_macaddr_sparc(tp))
return 0;
#endif
case PHY_ID_BCM5780: return "5780";
case PHY_ID_BCM5755: return "5755";
case PHY_ID_BCM5787: return "5787";
+ case PHY_ID_BCM5784: return "5784";
case PHY_ID_BCM5756: return "5722/5756";
case PHY_ID_BCM5906: return "5906";
+ case PHY_ID_BCM5761: return "5761";
case PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
goto err_out_free_res;
}
- SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
#if TG3_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register = tg3_vlan_rx_register;
- dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
#endif
tp = netdev_priv(dev);
INIT_WORK(&tp->reset_task, tg3_reset_task);
tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
- if (tp->regs == 0UL) {
+ if (!tp->regs) {
printk(KERN_ERR PFX "Cannot map device registers, "
"aborting.\n");
err = -ENOMEM;
dev->set_mac_address = tg3_set_mac_addr;
dev->do_ioctl = tg3_ioctl;
dev->tx_timeout = tg3_tx_timeout;
- dev->poll = tg3_poll;
+ netif_napi_add(dev, &tp->napi, tg3_poll, 64);
dev->ethtool_ops = &tg3_ethtool_ops;
- dev->weight = 64;
dev->watchdog_timeo = TG3_TX_TIMEOUT;
dev->change_mtu = tg3_change_mtu;
dev->irq = pdev->irq;
(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
} else {
- tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
+ tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
}
/* TSO is on by default on chips that support hardware TSO.
if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
dev->features |= NETIF_F_TSO6;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ dev->features |= NETIF_F_TSO_ECN;
}
tp->rx_pending = 63;
}
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
- tp->pdev_peer = tg3_find_peer(tp);
-
err = tg3_get_device_address(tp);
if (err) {
printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
goto err_out_iounmap;
}
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ printk(KERN_ERR PFX "Cannot find proper PCI device "
+ "base address for APE, aborting.\n");
+ err = -ENODEV;
+ goto err_out_iounmap;
+ }
+
+ tg3reg_base = pci_resource_start(pdev, 2);
+ tg3reg_len = pci_resource_len(pdev, 2);
+
+ tp->aperegs = ioremap_nocache(tg3reg_base, tg3reg_len);
+ if (tp->aperegs == 0UL) {
+ printk(KERN_ERR PFX "Cannot map APE registers, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
+ tg3_ape_lock_init(tp);
+ }
+
/*
* Reset chip in case UNDI or EFI driver did not shutdown
* DMA self test will enable WDMAC and we'll see (spurious)
*/
if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
- pci_save_state(tp->pdev);
tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
}
err = tg3_test_dma(tp);
if (err) {
printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
- goto err_out_iounmap;
+ goto err_out_apeunmap;
}
/* Tigon3 can do ipv4 only... and some chips have buggy
* checksumming.
*/
if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
- dev->features |= NETIF_F_HW_CSUM;
- else
- dev->features |= NETIF_F_IP_CSUM;
- dev->features |= NETIF_F_SG;
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+ dev->features |= NETIF_F_IPV6_CSUM;
+
tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
} else
tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
tg3_init_coal(tp);
- /* Now that we have fully setup the chip, save away a snapshot
- * of the PCI config space. We need to restore this after
- * GRC_MISC_CFG core clock resets and some resume events.
- */
- pci_save_state(tp->pdev);
-
pci_set_drvdata(pdev, dev);
err = register_netdev(dev);
if (err) {
printk(KERN_ERR PFX "Cannot register net device, "
"aborting.\n");
- goto err_out_iounmap;
+ goto err_out_apeunmap;
}
printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
i == 5 ? '\n' : ':');
printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
- "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
- "TSOcap[%d] \n",
+ "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
dev->name,
(tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
(tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
(tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
- (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
(tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
return 0;
+err_out_apeunmap:
+ if (tp->aperegs) {
+ iounmap(tp->aperegs);
+ tp->aperegs = NULL;
+ }
+
err_out_iounmap:
if (tp->regs) {
iounmap(tp->regs);
flush_scheduled_work();
unregister_netdev(dev);
+ if (tp->aperegs) {
+ iounmap(tp->aperegs);
+ tp->aperegs = NULL;
+ }
if (tp->regs) {
iounmap(tp->regs);
tp->regs = NULL;
struct tg3 *tp = netdev_priv(dev);
int err;
+ /* PCI register 4 needs to be saved whether netif_running() or not.
+ * MSI address and data need to be saved if using MSI and
+ * netif_running().
+ */
+ pci_save_state(pdev);
+
if (!netif_running(dev))
return 0;
tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
tg3_full_unlock(tp);
- /* Save MSI address and data for resume. */
- pci_save_state(pdev);
-
err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
if (err) {
tg3_full_lock(tp, 0);
struct tg3 *tp = netdev_priv(dev);
int err;
+ pci_restore_state(tp->pdev);
+
if (!netif_running(dev))
return 0;
- pci_restore_state(tp->pdev);
-
err = tg3_set_power_state(tp, PCI_D0);
if (err)
return err;