spin_lock_irqsave(&cp->lock, flags);
cp->vlgrp = grp;
- cp->cpcmd |= RxVlanOn;
- cpw16(CpCmd, cp->cpcmd);
- spin_unlock_irqrestore(&cp->lock, flags);
-}
-
-static void cp_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
-{
- struct cp_private *cp = netdev_priv(dev);
- unsigned long flags;
+ if (grp)
+ cp->cpcmd |= RxVlanOn;
+ else
+ cp->cpcmd &= ~RxVlanOn;
- spin_lock_irqsave(&cp->lock, flags);
- cp->cpcmd &= ~RxVlanOn;
cpw16(CpCmd, cp->cpcmd);
- if (cp->vlgrp)
- cp->vlgrp->vlan_devices[vid] = NULL;
spin_unlock_irqrestore(&cp->lock, flags);
}
#endif /* CP_VLAN_TAG_USED */
}
skb_reserve(new_skb, RX_OFFSET);
- new_skb->dev = dev;
pci_unmap_single(cp->pdev, mapping,
buflen, PCI_DMA_FROMDEVICE);
* this round of polling
*/
if (rx_work) {
+ unsigned long flags;
+
if (cpr16(IntrStatus) & cp_rx_intr_mask)
goto rx_status_loop;
- local_irq_disable();
+ local_irq_save(flags);
cpw16_f(IntrMask, cp_intr_mask);
__netif_rx_complete(dev);
- local_irq_enable();
+ local_irq_restore(flags);
return 0; /* done */
}
struct cp_private *cp = netdev_priv(dev);
unsigned entry;
u32 eor, flags;
+ unsigned long intr_flags;
#if CP_VLAN_TAG_USED
u32 vlan_tag = 0;
#endif
int mss = 0;
- spin_lock_irq(&cp->lock);
+ spin_lock_irqsave(&cp->lock, intr_flags);
/* This is a hard error, log it. */
if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
- spin_unlock_irq(&cp->lock);
+ spin_unlock_irqrestore(&cp->lock, intr_flags);
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
return 1;
if (mss)
flags |= LargeSend | ((mss & MSSMask) << MSSShift);
else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const struct iphdr *ip = skb->nh.iph;
+ const struct iphdr *ip = ip_hdr(skb);
if (ip->protocol == IPPROTO_TCP)
flags |= IPCS | TCPCS;
else if (ip->protocol == IPPROTO_UDP)
u32 first_len, first_eor;
dma_addr_t first_mapping;
int frag, first_entry = entry;
- const struct iphdr *ip = skb->nh.iph;
+ const struct iphdr *ip = ip_hdr(skb);
/* We must give this initial chunk to the device last.
* Otherwise we could race with the device.
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
- spin_unlock_irq(&cp->lock);
+ spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll);
dev->trans_start = jiffies;
if (!skb)
goto err_out;
- skb->dev = cp->dev;
skb_reserve(skb, RX_OFFSET);
mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
#if CP_VLAN_TAG_USED
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register = cp_vlan_rx_register;
- dev->vlan_rx_kill_vid = cp_vlan_rx_kill_vid;
#endif
if (pci_using_dac)