static int
bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
{
u32 speed_arg = 0, pause_adv;
static int
bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
{
u32 adv, bmcr;
u32 new_adv = 0;
static int
bnx2_setup_copper_phy(struct bnx2 *bp)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
{
u32 bmcr;
u32 new_bmcr;
static int
bnx2_setup_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
{
if (bp->loopback == MAC_LOOPBACK)
return 0;
static int
bnx2_init_phy(struct bnx2 *bp, int reset_phy)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
{
u32 val;
int rc = 0;
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- netif_rx_schedule(&bnapi->napi);
+ napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
}
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- netif_rx_schedule(&bnapi->napi);
+ napi_schedule(&bnapi->napi);
return IRQ_HANDLED;
}
if (unlikely(atomic_read(&bp->intr_sem) != 0))
return IRQ_HANDLED;
- if (netif_rx_schedule_prep(&bnapi->napi)) {
+ if (napi_schedule_prep(&bnapi->napi)) {
bnapi->last_status_idx = sblk->status_idx;
- __netif_rx_schedule(&bnapi->napi);
+ __napi_schedule(&bnapi->napi);
}
return IRQ_HANDLED;
rmb();
if (likely(!bnx2_has_fast_work(bnapi))) {
- netif_rx_complete(napi);
+ napi_complete(napi);
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
bnapi->last_status_idx);
rmb();
if (likely(!bnx2_has_work(bnapi))) {
- netif_rx_complete(napi);
+ napi_complete(napi);
if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |