Merge branch 'drm-fixes' of master.kernel.org:/pub/scm/linux/kernel/git/airlied/drm-2.6
[linux-2.6] / drivers / net / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   
4   Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5   
6   This program is free software; you can redistribute it and/or modify it 
7   under the terms of the GNU General Public License as published by the Free 
8   Software Foundation; either version 2 of the License, or (at your option) 
9   any later version.
10   
11   This program is distributed in the hope that it will be useful, but WITHOUT 
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
14   more details.
15   
16   You should have received a copy of the GNU General Public License along with
17   this program; if not, write to the Free Software Foundation, Inc., 59 
18   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
19   
20   The full GNU General Public License is included in this distribution in the
21   file called LICENSE.
22   
23   Contact Information:
24   Linux NICS <linux.nics@intel.com>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30
31 /* Change Log
32  * 6.0.58       4/20/05
33  *   o Accepted ethtool cleanup patch from Stephen Hemminger 
34  * 6.0.44+      2/15/05
35  *   o applied Anton's patch to resolve tx hang in hardware
36  *   o Applied Andrew Mortons patch - e1000 stops working after resume
37  */
38
39 char e1000_driver_name[] = "e1000";
40 char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
41 #ifndef CONFIG_E1000_NAPI
42 #define DRIVERNAPI
43 #else
44 #define DRIVERNAPI "-NAPI"
45 #endif
46 #define DRV_VERSION             "6.0.60-k2"DRIVERNAPI
47 char e1000_driver_version[] = DRV_VERSION;
48 char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
49
50 /* e1000_pci_tbl - PCI Device ID Table
51  *
52  * Last entry must be all 0s
53  *
54  * Macro expands to...
55  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
56  */
57 static struct pci_device_id e1000_pci_tbl[] = {
58         INTEL_E1000_ETHERNET_DEVICE(0x1000),
59         INTEL_E1000_ETHERNET_DEVICE(0x1001),
60         INTEL_E1000_ETHERNET_DEVICE(0x1004),
61         INTEL_E1000_ETHERNET_DEVICE(0x1008),
62         INTEL_E1000_ETHERNET_DEVICE(0x1009),
63         INTEL_E1000_ETHERNET_DEVICE(0x100C),
64         INTEL_E1000_ETHERNET_DEVICE(0x100D),
65         INTEL_E1000_ETHERNET_DEVICE(0x100E),
66         INTEL_E1000_ETHERNET_DEVICE(0x100F),
67         INTEL_E1000_ETHERNET_DEVICE(0x1010),
68         INTEL_E1000_ETHERNET_DEVICE(0x1011),
69         INTEL_E1000_ETHERNET_DEVICE(0x1012),
70         INTEL_E1000_ETHERNET_DEVICE(0x1013),
71         INTEL_E1000_ETHERNET_DEVICE(0x1014),
72         INTEL_E1000_ETHERNET_DEVICE(0x1015),
73         INTEL_E1000_ETHERNET_DEVICE(0x1016),
74         INTEL_E1000_ETHERNET_DEVICE(0x1017),
75         INTEL_E1000_ETHERNET_DEVICE(0x1018),
76         INTEL_E1000_ETHERNET_DEVICE(0x1019),
77         INTEL_E1000_ETHERNET_DEVICE(0x101A),
78         INTEL_E1000_ETHERNET_DEVICE(0x101D),
79         INTEL_E1000_ETHERNET_DEVICE(0x101E),
80         INTEL_E1000_ETHERNET_DEVICE(0x1026),
81         INTEL_E1000_ETHERNET_DEVICE(0x1027),
82         INTEL_E1000_ETHERNET_DEVICE(0x1028),
83         INTEL_E1000_ETHERNET_DEVICE(0x1075),
84         INTEL_E1000_ETHERNET_DEVICE(0x1076),
85         INTEL_E1000_ETHERNET_DEVICE(0x1077),
86         INTEL_E1000_ETHERNET_DEVICE(0x1078),
87         INTEL_E1000_ETHERNET_DEVICE(0x1079),
88         INTEL_E1000_ETHERNET_DEVICE(0x107A),
89         INTEL_E1000_ETHERNET_DEVICE(0x107B),
90         INTEL_E1000_ETHERNET_DEVICE(0x107C),
91         INTEL_E1000_ETHERNET_DEVICE(0x108A),
92         INTEL_E1000_ETHERNET_DEVICE(0x108B),
93         INTEL_E1000_ETHERNET_DEVICE(0x108C),
94         INTEL_E1000_ETHERNET_DEVICE(0x1099),
95         /* required last entry */
96         {0,}
97 };
98
99 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
100
101 int e1000_up(struct e1000_adapter *adapter);
102 void e1000_down(struct e1000_adapter *adapter);
103 void e1000_reset(struct e1000_adapter *adapter);
104 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
105 int e1000_setup_tx_resources(struct e1000_adapter *adapter);
106 int e1000_setup_rx_resources(struct e1000_adapter *adapter);
107 void e1000_free_tx_resources(struct e1000_adapter *adapter);
108 void e1000_free_rx_resources(struct e1000_adapter *adapter);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110
111 /* Local Function Prototypes */
112
113 static int e1000_init_module(void);
114 static void e1000_exit_module(void);
115 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
116 static void __devexit e1000_remove(struct pci_dev *pdev);
117 static int e1000_sw_init(struct e1000_adapter *adapter);
118 static int e1000_open(struct net_device *netdev);
119 static int e1000_close(struct net_device *netdev);
120 static void e1000_configure_tx(struct e1000_adapter *adapter);
121 static void e1000_configure_rx(struct e1000_adapter *adapter);
122 static void e1000_setup_rctl(struct e1000_adapter *adapter);
123 static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
124 static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
125 static void e1000_set_multi(struct net_device *netdev);
126 static void e1000_update_phy_info(unsigned long data);
127 static void e1000_watchdog(unsigned long data);
128 static void e1000_watchdog_task(struct e1000_adapter *adapter);
129 static void e1000_82547_tx_fifo_stall(unsigned long data);
130 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
131 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
132 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
133 static int e1000_set_mac(struct net_device *netdev, void *p);
134 static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
135 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
136 #ifdef CONFIG_E1000_NAPI
137 static int e1000_clean(struct net_device *netdev, int *budget);
138 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
139                                     int *work_done, int work_to_do);
140 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
141                                        int *work_done, int work_to_do);
142 #else
143 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
144 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter);
145 #endif
146 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
147 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter);
148 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
149 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
150                            int cmd);
151 void e1000_set_ethtool_ops(struct net_device *netdev);
152 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
153 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
154 static void e1000_tx_timeout(struct net_device *dev);
155 static void e1000_tx_timeout_task(struct net_device *dev);
156 static void e1000_smartspeed(struct e1000_adapter *adapter);
157 static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
158                                               struct sk_buff *skb);
159
160 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
161 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
162 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
163 static void e1000_restore_vlan(struct e1000_adapter *adapter);
164
165 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
166 #ifdef CONFIG_PM
167 static int e1000_resume(struct pci_dev *pdev);
168 #endif
169
170 #ifdef CONFIG_NET_POLL_CONTROLLER
171 /* for netdump / net console */
172 static void e1000_netpoll (struct net_device *netdev);
173 #endif
174
175 /* Exported from other modules */
176
177 extern void e1000_check_options(struct e1000_adapter *adapter);
178
179 static struct pci_driver e1000_driver = {
180         .name     = e1000_driver_name,
181         .id_table = e1000_pci_tbl,
182         .probe    = e1000_probe,
183         .remove   = __devexit_p(e1000_remove),
184         /* Power Managment Hooks */
185 #ifdef CONFIG_PM
186         .suspend  = e1000_suspend,
187         .resume   = e1000_resume
188 #endif
189 };
190
191 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
192 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
193 MODULE_LICENSE("GPL");
194 MODULE_VERSION(DRV_VERSION);
195
196 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
197 module_param(debug, int, 0);
198 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
199
200 /**
201  * e1000_init_module - Driver Registration Routine
202  *
203  * e1000_init_module is the first routine called when the driver is
204  * loaded. All it does is register with the PCI subsystem.
205  **/
206
207 static int __init
208 e1000_init_module(void)
209 {
210         int ret;
211         printk(KERN_INFO "%s - version %s\n",
212                e1000_driver_string, e1000_driver_version);
213
214         printk(KERN_INFO "%s\n", e1000_copyright);
215
216         ret = pci_module_init(&e1000_driver);
217
218         return ret;
219 }
220
221 module_init(e1000_init_module);
222
223 /**
224  * e1000_exit_module - Driver Exit Cleanup Routine
225  *
226  * e1000_exit_module is called just before the driver is removed
227  * from memory.
228  **/
229
230 static void __exit
231 e1000_exit_module(void)
232 {
233         pci_unregister_driver(&e1000_driver);
234 }
235
236 module_exit(e1000_exit_module);
237
238 /**
239  * e1000_irq_disable - Mask off interrupt generation on the NIC
240  * @adapter: board private structure
241  **/
242
243 static inline void
244 e1000_irq_disable(struct e1000_adapter *adapter)
245 {
246         atomic_inc(&adapter->irq_sem);
247         E1000_WRITE_REG(&adapter->hw, IMC, ~0);
248         E1000_WRITE_FLUSH(&adapter->hw);
249         synchronize_irq(adapter->pdev->irq);
250 }
251
252 /**
253  * e1000_irq_enable - Enable default interrupt generation settings
254  * @adapter: board private structure
255  **/
256
257 static inline void
258 e1000_irq_enable(struct e1000_adapter *adapter)
259 {
260         if(likely(atomic_dec_and_test(&adapter->irq_sem))) {
261                 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
262                 E1000_WRITE_FLUSH(&adapter->hw);
263         }
264 }
265 void
266 e1000_update_mng_vlan(struct e1000_adapter *adapter)
267 {
268         struct net_device *netdev = adapter->netdev;
269         uint16_t vid = adapter->hw.mng_cookie.vlan_id;
270         uint16_t old_vid = adapter->mng_vlan_id;
271         if(adapter->vlgrp) {
272                 if(!adapter->vlgrp->vlan_devices[vid]) {
273                         if(adapter->hw.mng_cookie.status &
274                                 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
275                                 e1000_vlan_rx_add_vid(netdev, vid);
276                                 adapter->mng_vlan_id = vid;
277                         } else
278                                 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
279                                 
280                         if((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
281                                         (vid != old_vid) && 
282                                         !adapter->vlgrp->vlan_devices[old_vid])
283                                 e1000_vlan_rx_kill_vid(netdev, old_vid);
284                 }
285         }
286 }
287         
288 int
289 e1000_up(struct e1000_adapter *adapter)
290 {
291         struct net_device *netdev = adapter->netdev;
292         int err;
293
294         /* hardware has been reset, we need to reload some things */
295
296         /* Reset the PHY if it was previously powered down */
297         if(adapter->hw.media_type == e1000_media_type_copper) {
298                 uint16_t mii_reg;
299                 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
300                 if(mii_reg & MII_CR_POWER_DOWN)
301                         e1000_phy_reset(&adapter->hw);
302         }
303
304         e1000_set_multi(netdev);
305
306         e1000_restore_vlan(adapter);
307
308         e1000_configure_tx(adapter);
309         e1000_setup_rctl(adapter);
310         e1000_configure_rx(adapter);
311         adapter->alloc_rx_buf(adapter);
312
313 #ifdef CONFIG_PCI_MSI
314         if(adapter->hw.mac_type > e1000_82547_rev_2) {
315                 adapter->have_msi = TRUE;
316                 if((err = pci_enable_msi(adapter->pdev))) {
317                         DPRINTK(PROBE, ERR,
318                          "Unable to allocate MSI interrupt Error: %d\n", err);
319                         adapter->have_msi = FALSE;
320                 }
321         }
322 #endif
323         if((err = request_irq(adapter->pdev->irq, &e1000_intr,
324                               SA_SHIRQ | SA_SAMPLE_RANDOM,
325                               netdev->name, netdev))) {
326                 DPRINTK(PROBE, ERR,
327                     "Unable to allocate interrupt Error: %d\n", err);
328                 return err;
329         }
330
331         mod_timer(&adapter->watchdog_timer, jiffies);
332
333 #ifdef CONFIG_E1000_NAPI
334         netif_poll_enable(netdev);
335 #endif
336         e1000_irq_enable(adapter);
337
338         return 0;
339 }
340
341 void
342 e1000_down(struct e1000_adapter *adapter)
343 {
344         struct net_device *netdev = adapter->netdev;
345
346         e1000_irq_disable(adapter);
347         free_irq(adapter->pdev->irq, netdev);
348 #ifdef CONFIG_PCI_MSI
349         if(adapter->hw.mac_type > e1000_82547_rev_2 &&
350            adapter->have_msi == TRUE)
351                 pci_disable_msi(adapter->pdev);
352 #endif
353         del_timer_sync(&adapter->tx_fifo_stall_timer);
354         del_timer_sync(&adapter->watchdog_timer);
355         del_timer_sync(&adapter->phy_info_timer);
356
357 #ifdef CONFIG_E1000_NAPI
358         netif_poll_disable(netdev);
359 #endif
360         adapter->link_speed = 0;
361         adapter->link_duplex = 0;
362         netif_carrier_off(netdev);
363         netif_stop_queue(netdev);
364
365         e1000_reset(adapter);
366         e1000_clean_tx_ring(adapter);
367         e1000_clean_rx_ring(adapter);
368
369         /* If WoL is not enabled
370          * and management mode is not IAMT
371          * Power down the PHY so no link is implied when interface is down */
372         if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
373            adapter->hw.media_type == e1000_media_type_copper &&
374            !e1000_check_mng_mode(&adapter->hw) &&
375            !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) {
376                 uint16_t mii_reg;
377                 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
378                 mii_reg |= MII_CR_POWER_DOWN;
379                 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
380                 mdelay(1);
381         }
382 }
383
384 void
385 e1000_reset(struct e1000_adapter *adapter)
386 {
387         struct net_device *netdev = adapter->netdev;
388         uint32_t pba, manc;
389         uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
390         uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
391
392         /* Repartition Pba for greater than 9k mtu
393          * To take effect CTRL.RST is required.
394          */
395
396         switch (adapter->hw.mac_type) {
397         case e1000_82547:
398         case e1000_82547_rev_2:
399                 pba = E1000_PBA_30K;
400                 break;
401         case e1000_82573:
402                 pba = E1000_PBA_12K;
403                 break;
404         default:
405                 pba = E1000_PBA_48K;
406                 break;
407         }
408
409         if((adapter->hw.mac_type != e1000_82573) &&
410            (adapter->rx_buffer_len > E1000_RXBUFFER_8192)) {
411                 pba -= 8; /* allocate more FIFO for Tx */
412                 /* send an XOFF when there is enough space in the
413                  * Rx FIFO to hold one extra full size Rx packet 
414                 */
415                 fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE + 
416                                         ETHERNET_FCS_SIZE + 1;
417                 fc_low_water_mark = fc_high_water_mark + 8;
418         }
419
420
421         if(adapter->hw.mac_type == e1000_82547) {
422                 adapter->tx_fifo_head = 0;
423                 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
424                 adapter->tx_fifo_size =
425                         (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
426                 atomic_set(&adapter->tx_fifo_stall, 0);
427         }
428
429         E1000_WRITE_REG(&adapter->hw, PBA, pba);
430
431         /* flow control settings */
432         adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) -
433                                     fc_high_water_mark;
434         adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) -
435                                    fc_low_water_mark;
436         adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
437         adapter->hw.fc_send_xon = 1;
438         adapter->hw.fc = adapter->hw.original_fc;
439
440         /* Allow time for pending master requests to run */
441         e1000_reset_hw(&adapter->hw);
442         if(adapter->hw.mac_type >= e1000_82544)
443                 E1000_WRITE_REG(&adapter->hw, WUC, 0);
444         if(e1000_init_hw(&adapter->hw))
445                 DPRINTK(PROBE, ERR, "Hardware Error\n");
446         e1000_update_mng_vlan(adapter);
447         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
448         E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
449
450         e1000_reset_adaptive(&adapter->hw);
451         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
452         if (adapter->en_mng_pt) {
453                 manc = E1000_READ_REG(&adapter->hw, MANC);
454                 manc |= (E1000_MANC_ARP_EN | E1000_MANC_EN_MNG2HOST);
455                 E1000_WRITE_REG(&adapter->hw, MANC, manc);
456         }
457 }
458
459 /**
460  * e1000_probe - Device Initialization Routine
461  * @pdev: PCI device information struct
462  * @ent: entry in e1000_pci_tbl
463  *
464  * Returns 0 on success, negative on failure
465  *
466  * e1000_probe initializes an adapter identified by a pci_dev structure.
467  * The OS initialization, configuring of the adapter private structure,
468  * and a hardware reset occur.
469  **/
470
471 static int __devinit
472 e1000_probe(struct pci_dev *pdev,
473             const struct pci_device_id *ent)
474 {
475         struct net_device *netdev;
476         struct e1000_adapter *adapter;
477         unsigned long mmio_start, mmio_len;
478         uint32_t swsm;
479
480         static int cards_found = 0;
481         int i, err, pci_using_dac;
482         uint16_t eeprom_data;
483         uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
484         if((err = pci_enable_device(pdev)))
485                 return err;
486
487         if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
488                 pci_using_dac = 1;
489         } else {
490                 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
491                         E1000_ERR("No usable DMA configuration, aborting\n");
492                         return err;
493                 }
494                 pci_using_dac = 0;
495         }
496
497         if((err = pci_request_regions(pdev, e1000_driver_name)))
498                 return err;
499
500         pci_set_master(pdev);
501
502         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
503         if(!netdev) {
504                 err = -ENOMEM;
505                 goto err_alloc_etherdev;
506         }
507
508         SET_MODULE_OWNER(netdev);
509         SET_NETDEV_DEV(netdev, &pdev->dev);
510
511         pci_set_drvdata(pdev, netdev);
512         adapter = netdev_priv(netdev);
513         adapter->netdev = netdev;
514         adapter->pdev = pdev;
515         adapter->hw.back = adapter;
516         adapter->msg_enable = (1 << debug) - 1;
517
518         mmio_start = pci_resource_start(pdev, BAR_0);
519         mmio_len = pci_resource_len(pdev, BAR_0);
520
521         adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
522         if(!adapter->hw.hw_addr) {
523                 err = -EIO;
524                 goto err_ioremap;
525         }
526
527         for(i = BAR_1; i <= BAR_5; i++) {
528                 if(pci_resource_len(pdev, i) == 0)
529                         continue;
530                 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
531                         adapter->hw.io_base = pci_resource_start(pdev, i);
532                         break;
533                 }
534         }
535
536         netdev->open = &e1000_open;
537         netdev->stop = &e1000_close;
538         netdev->hard_start_xmit = &e1000_xmit_frame;
539         netdev->get_stats = &e1000_get_stats;
540         netdev->set_multicast_list = &e1000_set_multi;
541         netdev->set_mac_address = &e1000_set_mac;
542         netdev->change_mtu = &e1000_change_mtu;
543         netdev->do_ioctl = &e1000_ioctl;
544         e1000_set_ethtool_ops(netdev);
545         netdev->tx_timeout = &e1000_tx_timeout;
546         netdev->watchdog_timeo = 5 * HZ;
547 #ifdef CONFIG_E1000_NAPI
548         netdev->poll = &e1000_clean;
549         netdev->weight = 64;
550 #endif
551         netdev->vlan_rx_register = e1000_vlan_rx_register;
552         netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
553         netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
554 #ifdef CONFIG_NET_POLL_CONTROLLER
555         netdev->poll_controller = e1000_netpoll;
556 #endif
557         strcpy(netdev->name, pci_name(pdev));
558
559         netdev->mem_start = mmio_start;
560         netdev->mem_end = mmio_start + mmio_len;
561         netdev->base_addr = adapter->hw.io_base;
562
563         adapter->bd_number = cards_found;
564
565         /* setup the private structure */
566
567         if((err = e1000_sw_init(adapter)))
568                 goto err_sw_init;
569
570         if((err = e1000_check_phy_reset_block(&adapter->hw)))
571                 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
572
573         if(adapter->hw.mac_type >= e1000_82543) {
574                 netdev->features = NETIF_F_SG |
575                                    NETIF_F_HW_CSUM |
576                                    NETIF_F_HW_VLAN_TX |
577                                    NETIF_F_HW_VLAN_RX |
578                                    NETIF_F_HW_VLAN_FILTER;
579         }
580
581 #ifdef NETIF_F_TSO
582         if((adapter->hw.mac_type >= e1000_82544) &&
583            (adapter->hw.mac_type != e1000_82547))
584                 netdev->features |= NETIF_F_TSO;
585
586 #ifdef NETIF_F_TSO_IPV6
587         if(adapter->hw.mac_type > e1000_82547_rev_2)
588                 netdev->features |= NETIF_F_TSO_IPV6;
589 #endif
590 #endif
591         if(pci_using_dac)
592                 netdev->features |= NETIF_F_HIGHDMA;
593
594         /* hard_start_xmit is safe against parallel locking */
595         netdev->features |= NETIF_F_LLTX; 
596  
597         adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
598
599         /* before reading the EEPROM, reset the controller to 
600          * put the device in a known good starting state */
601         
602         e1000_reset_hw(&adapter->hw);
603
604         /* make sure the EEPROM is good */
605
606         if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
607                 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
608                 err = -EIO;
609                 goto err_eeprom;
610         }
611
612         /* copy the MAC address out of the EEPROM */
613
614         if(e1000_read_mac_addr(&adapter->hw))
615                 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
616         memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
617
618         if(!is_valid_ether_addr(netdev->dev_addr)) {
619                 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
620                 err = -EIO;
621                 goto err_eeprom;
622         }
623
624         e1000_read_part_num(&adapter->hw, &(adapter->part_num));
625
626         e1000_get_bus_info(&adapter->hw);
627
628         init_timer(&adapter->tx_fifo_stall_timer);
629         adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
630         adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
631
632         init_timer(&adapter->watchdog_timer);
633         adapter->watchdog_timer.function = &e1000_watchdog;
634         adapter->watchdog_timer.data = (unsigned long) adapter;
635
636         INIT_WORK(&adapter->watchdog_task,
637                 (void (*)(void *))e1000_watchdog_task, adapter);
638
639         init_timer(&adapter->phy_info_timer);
640         adapter->phy_info_timer.function = &e1000_update_phy_info;
641         adapter->phy_info_timer.data = (unsigned long) adapter;
642
643         INIT_WORK(&adapter->tx_timeout_task,
644                 (void (*)(void *))e1000_tx_timeout_task, netdev);
645
646         /* we're going to reset, so assume we have no link for now */
647
648         netif_carrier_off(netdev);
649         netif_stop_queue(netdev);
650
651         e1000_check_options(adapter);
652
653         /* Initial Wake on LAN setting
654          * If APM wake is enabled in the EEPROM,
655          * enable the ACPI Magic Packet filter
656          */
657
658         switch(adapter->hw.mac_type) {
659         case e1000_82542_rev2_0:
660         case e1000_82542_rev2_1:
661         case e1000_82543:
662                 break;
663         case e1000_82544:
664                 e1000_read_eeprom(&adapter->hw,
665                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
666                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
667                 break;
668         case e1000_82546:
669         case e1000_82546_rev_3:
670                 if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
671                    && (adapter->hw.media_type == e1000_media_type_copper)) {
672                         e1000_read_eeprom(&adapter->hw,
673                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
674                         break;
675                 }
676                 /* Fall Through */
677         default:
678                 e1000_read_eeprom(&adapter->hw,
679                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
680                 break;
681         }
682         if(eeprom_data & eeprom_apme_mask)
683                 adapter->wol |= E1000_WUFC_MAG;
684
685         /* reset the hardware with the new settings */
686         e1000_reset(adapter);
687
688         /* Let firmware know the driver has taken over */
689         switch(adapter->hw.mac_type) {
690         case e1000_82573:
691                 swsm = E1000_READ_REG(&adapter->hw, SWSM);
692                 E1000_WRITE_REG(&adapter->hw, SWSM,
693                                 swsm | E1000_SWSM_DRV_LOAD);
694                 break;
695         default:
696                 break;
697         }
698
699         strcpy(netdev->name, "eth%d");
700         if((err = register_netdev(netdev)))
701                 goto err_register;
702
703         DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
704
705         cards_found++;
706         return 0;
707
708 err_register:
709 err_sw_init:
710 err_eeprom:
711         iounmap(adapter->hw.hw_addr);
712 err_ioremap:
713         free_netdev(netdev);
714 err_alloc_etherdev:
715         pci_release_regions(pdev);
716         return err;
717 }
718
719 /**
720  * e1000_remove - Device Removal Routine
721  * @pdev: PCI device information struct
722  *
723  * e1000_remove is called by the PCI subsystem to alert the driver
724  * that it should release a PCI device.  The could be caused by a
725  * Hot-Plug event, or because the driver is going to be removed from
726  * memory.
727  **/
728
729 static void __devexit
730 e1000_remove(struct pci_dev *pdev)
731 {
732         struct net_device *netdev = pci_get_drvdata(pdev);
733         struct e1000_adapter *adapter = netdev_priv(netdev);
734         uint32_t manc, swsm;
735
736         flush_scheduled_work();
737
738         if(adapter->hw.mac_type >= e1000_82540 &&
739            adapter->hw.media_type == e1000_media_type_copper) {
740                 manc = E1000_READ_REG(&adapter->hw, MANC);
741                 if(manc & E1000_MANC_SMBUS_EN) {
742                         manc |= E1000_MANC_ARP_EN;
743                         E1000_WRITE_REG(&adapter->hw, MANC, manc);
744                 }
745         }
746
747         switch(adapter->hw.mac_type) {
748         case e1000_82573:
749                 swsm = E1000_READ_REG(&adapter->hw, SWSM);
750                 E1000_WRITE_REG(&adapter->hw, SWSM,
751                                 swsm & ~E1000_SWSM_DRV_LOAD);
752                 break;
753
754         default:
755                 break;
756         }
757
758         unregister_netdev(netdev);
759
760         if(!e1000_check_phy_reset_block(&adapter->hw))
761                 e1000_phy_hw_reset(&adapter->hw);
762
763         iounmap(adapter->hw.hw_addr);
764         pci_release_regions(pdev);
765
766         free_netdev(netdev);
767
768         pci_disable_device(pdev);
769 }
770
771 /**
772  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
773  * @adapter: board private structure to initialize
774  *
775  * e1000_sw_init initializes the Adapter private data structure.
776  * Fields are initialized based on PCI device information and
777  * OS network device settings (MTU size).
778  **/
779
780 static int __devinit
781 e1000_sw_init(struct e1000_adapter *adapter)
782 {
783         struct e1000_hw *hw = &adapter->hw;
784         struct net_device *netdev = adapter->netdev;
785         struct pci_dev *pdev = adapter->pdev;
786
787         /* PCI config space info */
788
789         hw->vendor_id = pdev->vendor;
790         hw->device_id = pdev->device;
791         hw->subsystem_vendor_id = pdev->subsystem_vendor;
792         hw->subsystem_id = pdev->subsystem_device;
793
794         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
795
796         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
797
798         adapter->rx_buffer_len = E1000_RXBUFFER_2048;
799         adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
800         hw->max_frame_size = netdev->mtu +
801                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
802         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
803
804         /* identify the MAC */
805
806         if(e1000_set_mac_type(hw)) {
807                 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
808                 return -EIO;
809         }
810
811         /* initialize eeprom parameters */
812
813         if(e1000_init_eeprom_params(hw)) {
814                 E1000_ERR("EEPROM initialization failed\n");
815                 return -EIO;
816         }
817
818         switch(hw->mac_type) {
819         default:
820                 break;
821         case e1000_82541:
822         case e1000_82547:
823         case e1000_82541_rev_2:
824         case e1000_82547_rev_2:
825                 hw->phy_init_script = 1;
826                 break;
827         }
828
829         e1000_set_media_type(hw);
830
831         hw->wait_autoneg_complete = FALSE;
832         hw->tbi_compatibility_en = TRUE;
833         hw->adaptive_ifs = TRUE;
834
835         /* Copper options */
836
837         if(hw->media_type == e1000_media_type_copper) {
838                 hw->mdix = AUTO_ALL_MODES;
839                 hw->disable_polarity_correction = FALSE;
840                 hw->master_slave = E1000_MASTER_SLAVE;
841         }
842
843         atomic_set(&adapter->irq_sem, 1);
844         spin_lock_init(&adapter->stats_lock);
845         spin_lock_init(&adapter->tx_lock);
846
847         return 0;
848 }
849
850 /**
851  * e1000_open - Called when a network interface is made active
852  * @netdev: network interface device structure
853  *
854  * Returns 0 on success, negative value on failure
855  *
856  * The open entry point is called when a network interface is made
857  * active by the system (IFF_UP).  At this point all resources needed
858  * for transmit and receive operations are allocated, the interrupt
859  * handler is registered with the OS, the watchdog timer is started,
860  * and the stack is notified that the interface is ready.
861  **/
862
863 static int
864 e1000_open(struct net_device *netdev)
865 {
866         struct e1000_adapter *adapter = netdev_priv(netdev);
867         int err;
868
869         /* allocate transmit descriptors */
870
871         if((err = e1000_setup_tx_resources(adapter)))
872                 goto err_setup_tx;
873
874         /* allocate receive descriptors */
875
876         if((err = e1000_setup_rx_resources(adapter)))
877                 goto err_setup_rx;
878
879         if((err = e1000_up(adapter)))
880                 goto err_up;
881         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
882         if((adapter->hw.mng_cookie.status &
883                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
884                 e1000_update_mng_vlan(adapter);
885         }
886
887         return E1000_SUCCESS;
888
889 err_up:
890         e1000_free_rx_resources(adapter);
891 err_setup_rx:
892         e1000_free_tx_resources(adapter);
893 err_setup_tx:
894         e1000_reset(adapter);
895
896         return err;
897 }
898
899 /**
900  * e1000_close - Disables a network interface
901  * @netdev: network interface device structure
902  *
903  * Returns 0, this is not allowed to fail
904  *
905  * The close entry point is called when an interface is de-activated
906  * by the OS.  The hardware is still under the drivers control, but
907  * needs to be disabled.  A global MAC reset is issued to stop the
908  * hardware, and all transmit and receive resources are freed.
909  **/
910
911 static int
912 e1000_close(struct net_device *netdev)
913 {
914         struct e1000_adapter *adapter = netdev_priv(netdev);
915
916         e1000_down(adapter);
917
918         e1000_free_tx_resources(adapter);
919         e1000_free_rx_resources(adapter);
920
921         if((adapter->hw.mng_cookie.status &
922                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
923                 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
924         }
925         return 0;
926 }
927
928 /**
929  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
930  * @adapter: address of board private structure
931  * @start: address of beginning of memory
932  * @len: length of memory
933  **/
934 static inline boolean_t
935 e1000_check_64k_bound(struct e1000_adapter *adapter,
936                       void *start, unsigned long len)
937 {
938         unsigned long begin = (unsigned long) start;
939         unsigned long end = begin + len;
940
941         /* First rev 82545 and 82546 need to not allow any memory
942          * write location to cross 64k boundary due to errata 23 */
943         if (adapter->hw.mac_type == e1000_82545 ||
944             adapter->hw.mac_type == e1000_82546) {
945                 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
946         }
947
948         return TRUE;
949 }
950
951 /**
952  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
953  * @adapter: board private structure
954  *
955  * Return 0 on success, negative on failure
956  **/
957
958 int
959 e1000_setup_tx_resources(struct e1000_adapter *adapter)
960 {
961         struct e1000_desc_ring *txdr = &adapter->tx_ring;
962         struct pci_dev *pdev = adapter->pdev;
963         int size;
964
965         size = sizeof(struct e1000_buffer) * txdr->count;
966         txdr->buffer_info = vmalloc(size);
967         if(!txdr->buffer_info) {
968                 DPRINTK(PROBE, ERR,
969                 "Unable to allocate memory for the transmit descriptor ring\n");
970                 return -ENOMEM;
971         }
972         memset(txdr->buffer_info, 0, size);
973
974         /* round up to nearest 4K */
975
976         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
977         E1000_ROUNDUP(txdr->size, 4096);
978
979         txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
980         if(!txdr->desc) {
981 setup_tx_desc_die:
982                 vfree(txdr->buffer_info);
983                 DPRINTK(PROBE, ERR,
984                 "Unable to allocate memory for the transmit descriptor ring\n");
985                 return -ENOMEM;
986         }
987
988         /* Fix for errata 23, can't cross 64kB boundary */
989         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
990                 void *olddesc = txdr->desc;
991                 dma_addr_t olddma = txdr->dma;
992                 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
993                                      "at %p\n", txdr->size, txdr->desc);
994                 /* Try again, without freeing the previous */
995                 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
996                 if(!txdr->desc) {
997                 /* Failed allocation, critical failure */
998                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
999                         goto setup_tx_desc_die;
1000                 }
1001
1002                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1003                         /* give up */
1004                         pci_free_consistent(pdev, txdr->size, txdr->desc,
1005                                             txdr->dma);
1006                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1007                         DPRINTK(PROBE, ERR,
1008                                 "Unable to allocate aligned memory "
1009                                 "for the transmit descriptor ring\n");
1010                         vfree(txdr->buffer_info);
1011                         return -ENOMEM;
1012                 } else {
1013                         /* Free old allocation, new allocation was successful */
1014                         pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1015                 }
1016         }
1017         memset(txdr->desc, 0, txdr->size);
1018
1019         txdr->next_to_use = 0;
1020         txdr->next_to_clean = 0;
1021
1022         return 0;
1023 }
1024
1025 /**
1026  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1027  * @adapter: board private structure
1028  *
1029  * Configure the Tx unit of the MAC after a reset.
1030  **/
1031
1032 static void
1033 e1000_configure_tx(struct e1000_adapter *adapter)
1034 {
1035         uint64_t tdba = adapter->tx_ring.dma;
1036         uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
1037         uint32_t tctl, tipg;
1038
1039         E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1040         E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
1041
1042         E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
1043
1044         /* Setup the HW Tx Head and Tail descriptor pointers */
1045
1046         E1000_WRITE_REG(&adapter->hw, TDH, 0);
1047         E1000_WRITE_REG(&adapter->hw, TDT, 0);
1048
1049         /* Set the default values for the Tx Inter Packet Gap timer */
1050
1051         switch (adapter->hw.mac_type) {
1052         case e1000_82542_rev2_0:
1053         case e1000_82542_rev2_1:
1054                 tipg = DEFAULT_82542_TIPG_IPGT;
1055                 tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1056                 tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1057                 break;
1058         default:
1059                 if(adapter->hw.media_type == e1000_media_type_fiber ||
1060                    adapter->hw.media_type == e1000_media_type_internal_serdes)
1061                         tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1062                 else
1063                         tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1064                 tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
1065                 tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
1066         }
1067         E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
1068
1069         /* Set the Tx Interrupt Delay register */
1070
1071         E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
1072         if(adapter->hw.mac_type >= e1000_82540)
1073                 E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
1074
1075         /* Program the Transmit Control Register */
1076
1077         tctl = E1000_READ_REG(&adapter->hw, TCTL);
1078
1079         tctl &= ~E1000_TCTL_CT;
1080         tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
1081                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1082
1083         E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1084
1085         e1000_config_collision_dist(&adapter->hw);
1086
1087         /* Setup Transmit Descriptor Settings for eop descriptor */
1088         adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
1089                 E1000_TXD_CMD_IFCS;
1090
1091         if(adapter->hw.mac_type < e1000_82543)
1092                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1093         else
1094                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1095
1096         /* Cache if we're 82544 running in PCI-X because we'll
1097          * need this to apply a workaround later in the send path. */
1098         if(adapter->hw.mac_type == e1000_82544 &&
1099            adapter->hw.bus_type == e1000_bus_type_pcix)
1100                 adapter->pcix_82544 = 1;
1101 }
1102
1103 /**
1104  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1105  * @adapter: board private structure
1106  *
1107  * Returns 0 on success, negative on failure
1108  **/
1109
1110 int
1111 e1000_setup_rx_resources(struct e1000_adapter *adapter)
1112 {
1113         struct e1000_desc_ring *rxdr = &adapter->rx_ring;
1114         struct pci_dev *pdev = adapter->pdev;
1115         int size, desc_len;
1116
1117         size = sizeof(struct e1000_buffer) * rxdr->count;
1118         rxdr->buffer_info = vmalloc(size);
1119         if(!rxdr->buffer_info) {
1120                 DPRINTK(PROBE, ERR,
1121                 "Unable to allocate memory for the receive descriptor ring\n");
1122                 return -ENOMEM;
1123         }
1124         memset(rxdr->buffer_info, 0, size);
1125
1126         size = sizeof(struct e1000_ps_page) * rxdr->count;
1127         rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1128         if(!rxdr->ps_page) {
1129                 vfree(rxdr->buffer_info);
1130                 DPRINTK(PROBE, ERR,
1131                 "Unable to allocate memory for the receive descriptor ring\n");
1132                 return -ENOMEM;
1133         }
1134         memset(rxdr->ps_page, 0, size);
1135
1136         size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1137         rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1138         if(!rxdr->ps_page_dma) {
1139                 vfree(rxdr->buffer_info);
1140                 kfree(rxdr->ps_page);
1141                 DPRINTK(PROBE, ERR,
1142                 "Unable to allocate memory for the receive descriptor ring\n");
1143                 return -ENOMEM;
1144         }
1145         memset(rxdr->ps_page_dma, 0, size);
1146
1147         if(adapter->hw.mac_type <= e1000_82547_rev_2)
1148                 desc_len = sizeof(struct e1000_rx_desc);
1149         else
1150                 desc_len = sizeof(union e1000_rx_desc_packet_split);
1151
1152         /* Round up to nearest 4K */
1153
1154         rxdr->size = rxdr->count * desc_len;
1155         E1000_ROUNDUP(rxdr->size, 4096);
1156
1157         rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1158
1159         if(!rxdr->desc) {
1160 setup_rx_desc_die:
1161                 vfree(rxdr->buffer_info);
1162                 kfree(rxdr->ps_page);
1163                 kfree(rxdr->ps_page_dma);
1164                 DPRINTK(PROBE, ERR,
1165                 "Unable to allocate memory for the receive descriptor ring\n");
1166                 return -ENOMEM;
1167         }
1168
1169         /* Fix for errata 23, can't cross 64kB boundary */
1170         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1171                 void *olddesc = rxdr->desc;
1172                 dma_addr_t olddma = rxdr->dma;
1173                 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
1174                                      "at %p\n", rxdr->size, rxdr->desc);
1175                 /* Try again, without freeing the previous */
1176                 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1177                 if(!rxdr->desc) {
1178                 /* Failed allocation, critical failure */
1179                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1180                         goto setup_rx_desc_die;
1181                 }
1182
1183                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1184                         /* give up */
1185                         pci_free_consistent(pdev, rxdr->size, rxdr->desc,
1186                                             rxdr->dma);
1187                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1188                         DPRINTK(PROBE, ERR,
1189                                 "Unable to allocate aligned memory "
1190                                 "for the receive descriptor ring\n");
1191                         vfree(rxdr->buffer_info);
1192                         kfree(rxdr->ps_page);
1193                         kfree(rxdr->ps_page_dma);
1194                         return -ENOMEM;
1195                 } else {
1196                         /* Free old allocation, new allocation was successful */
1197                         pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1198                 }
1199         }
1200         memset(rxdr->desc, 0, rxdr->size);
1201
1202         rxdr->next_to_clean = 0;
1203         rxdr->next_to_use = 0;
1204
1205         return 0;
1206 }
1207
1208 /**
1209  * e1000_setup_rctl - configure the receive control registers
1210  * @adapter: Board private structure
1211  **/
1212
1213 static void
1214 e1000_setup_rctl(struct e1000_adapter *adapter)
1215 {
1216         uint32_t rctl, rfctl;
1217         uint32_t psrctl = 0;
1218
1219         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1220
1221         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1222
1223         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1224                 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1225                 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1226
1227         if(adapter->hw.tbi_compatibility_on == 1)
1228                 rctl |= E1000_RCTL_SBP;
1229         else
1230                 rctl &= ~E1000_RCTL_SBP;
1231
1232         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1233                 rctl &= ~E1000_RCTL_LPE;
1234         else
1235                 rctl |= E1000_RCTL_LPE;
1236
1237         /* Setup buffer sizes */
1238         if(adapter->hw.mac_type == e1000_82573) {
1239                 /* We can now specify buffers in 1K increments.
1240                  * BSIZE and BSEX are ignored in this case. */
1241                 rctl |= adapter->rx_buffer_len << 0x11;
1242         } else {
1243                 rctl &= ~E1000_RCTL_SZ_4096;
1244                 rctl |= E1000_RCTL_BSEX; 
1245                 switch (adapter->rx_buffer_len) {
1246                 case E1000_RXBUFFER_2048:
1247                 default:
1248                         rctl |= E1000_RCTL_SZ_2048;
1249                         rctl &= ~E1000_RCTL_BSEX;
1250                         break;
1251                 case E1000_RXBUFFER_4096:
1252                         rctl |= E1000_RCTL_SZ_4096;
1253                         break;
1254                 case E1000_RXBUFFER_8192:
1255                         rctl |= E1000_RCTL_SZ_8192;
1256                         break;
1257                 case E1000_RXBUFFER_16384:
1258                         rctl |= E1000_RCTL_SZ_16384;
1259                         break;
1260                 }
1261         }
1262
1263 #ifdef CONFIG_E1000_PACKET_SPLIT
1264         /* 82571 and greater support packet-split where the protocol
1265          * header is placed in skb->data and the packet data is
1266          * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1267          * In the case of a non-split, skb->data is linearly filled,
1268          * followed by the page buffers.  Therefore, skb->data is
1269          * sized to hold the largest protocol header.
1270          */
1271         adapter->rx_ps = (adapter->hw.mac_type > e1000_82547_rev_2) 
1272                           && (adapter->netdev->mtu 
1273                               < ((3 * PAGE_SIZE) + adapter->rx_ps_bsize0));
1274 #endif
1275         if(adapter->rx_ps) {
1276                 /* Configure extra packet-split registers */
1277                 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1278                 rfctl |= E1000_RFCTL_EXTEN;
1279                 /* disable IPv6 packet split support */
1280                 rfctl |= E1000_RFCTL_IPV6_DIS;
1281                 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1282
1283                 rctl |= E1000_RCTL_DTYP_PS | E1000_RCTL_SECRC;
1284                 
1285                 psrctl |= adapter->rx_ps_bsize0 >>
1286                         E1000_PSRCTL_BSIZE0_SHIFT;
1287                 psrctl |= PAGE_SIZE >>
1288                         E1000_PSRCTL_BSIZE1_SHIFT;
1289                 psrctl |= PAGE_SIZE <<
1290                         E1000_PSRCTL_BSIZE2_SHIFT;
1291                 psrctl |= PAGE_SIZE <<
1292                         E1000_PSRCTL_BSIZE3_SHIFT;
1293
1294                 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
1295         }
1296
1297         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1298 }
1299
1300 /**
1301  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1302  * @adapter: board private structure
1303  *
1304  * Configure the Rx unit of the MAC after a reset.
1305  **/
1306
1307 static void
1308 e1000_configure_rx(struct e1000_adapter *adapter)
1309 {
1310         uint64_t rdba = adapter->rx_ring.dma;
1311         uint32_t rdlen, rctl, rxcsum;
1312
1313         if(adapter->rx_ps) {
1314                 rdlen = adapter->rx_ring.count *
1315                         sizeof(union e1000_rx_desc_packet_split);
1316                 adapter->clean_rx = e1000_clean_rx_irq_ps;
1317                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
1318         } else {
1319                 rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
1320                 adapter->clean_rx = e1000_clean_rx_irq;
1321                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1322         }
1323
1324         /* disable receives while setting up the descriptors */
1325         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1326         E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
1327
1328         /* set the Receive Delay Timer Register */
1329         E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
1330
1331         if(adapter->hw.mac_type >= e1000_82540) {
1332                 E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
1333                 if(adapter->itr > 1)
1334                         E1000_WRITE_REG(&adapter->hw, ITR,
1335                                 1000000000 / (adapter->itr * 256));
1336         }
1337
1338         /* Setup the Base and Length of the Rx Descriptor Ring */
1339         E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1340         E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
1341
1342         E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
1343
1344         /* Setup the HW Rx Head and Tail Descriptor Pointers */
1345         E1000_WRITE_REG(&adapter->hw, RDH, 0);
1346         E1000_WRITE_REG(&adapter->hw, RDT, 0);
1347
1348         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1349         if(adapter->hw.mac_type >= e1000_82543) {
1350                 rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
1351                 if(adapter->rx_csum == TRUE) {
1352                         rxcsum |= E1000_RXCSUM_TUOFL;
1353
1354                         /* Enable 82573 IPv4 payload checksum for UDP fragments
1355                          * Must be used in conjunction with packet-split. */
1356                         if((adapter->hw.mac_type > e1000_82547_rev_2) && 
1357                            (adapter->rx_ps)) {
1358                                 rxcsum |= E1000_RXCSUM_IPPCSE;
1359                         }
1360                 } else {
1361                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1362                         /* don't need to clear IPPCSE as it defaults to 0 */
1363                 }
1364                 E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
1365         }
1366
1367         if (adapter->hw.mac_type == e1000_82573)
1368                 E1000_WRITE_REG(&adapter->hw, ERT, 0x0100);
1369
1370         /* Enable Receives */
1371         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1372 }
1373
1374 /**
1375  * e1000_free_tx_resources - Free Tx Resources
1376  * @adapter: board private structure
1377  *
1378  * Free all transmit software resources
1379  **/
1380
1381 void
1382 e1000_free_tx_resources(struct e1000_adapter *adapter)
1383 {
1384         struct pci_dev *pdev = adapter->pdev;
1385
1386         e1000_clean_tx_ring(adapter);
1387
1388         vfree(adapter->tx_ring.buffer_info);
1389         adapter->tx_ring.buffer_info = NULL;
1390
1391         pci_free_consistent(pdev, adapter->tx_ring.size,
1392                             adapter->tx_ring.desc, adapter->tx_ring.dma);
1393
1394         adapter->tx_ring.desc = NULL;
1395 }
1396
1397 static inline void
1398 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1399                         struct e1000_buffer *buffer_info)
1400 {
1401         if(buffer_info->dma) {
1402                 pci_unmap_page(adapter->pdev,
1403                                 buffer_info->dma,
1404                                 buffer_info->length,
1405                                 PCI_DMA_TODEVICE);
1406                 buffer_info->dma = 0;
1407         }
1408         if(buffer_info->skb) {
1409                 dev_kfree_skb_any(buffer_info->skb);
1410                 buffer_info->skb = NULL;
1411         }
1412 }
1413
1414 /**
1415  * e1000_clean_tx_ring - Free Tx Buffers
1416  * @adapter: board private structure
1417  **/
1418
1419 static void
1420 e1000_clean_tx_ring(struct e1000_adapter *adapter)
1421 {
1422         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1423         struct e1000_buffer *buffer_info;
1424         unsigned long size;
1425         unsigned int i;
1426
1427         /* Free all the Tx ring sk_buffs */
1428
1429         if (likely(adapter->previous_buffer_info.skb != NULL)) {
1430                 e1000_unmap_and_free_tx_resource(adapter,
1431                                 &adapter->previous_buffer_info);
1432         }
1433
1434         for(i = 0; i < tx_ring->count; i++) {
1435                 buffer_info = &tx_ring->buffer_info[i];
1436                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1437         }
1438
1439         size = sizeof(struct e1000_buffer) * tx_ring->count;
1440         memset(tx_ring->buffer_info, 0, size);
1441
1442         /* Zero out the descriptor ring */
1443
1444         memset(tx_ring->desc, 0, tx_ring->size);
1445
1446         tx_ring->next_to_use = 0;
1447         tx_ring->next_to_clean = 0;
1448
1449         E1000_WRITE_REG(&adapter->hw, TDH, 0);
1450         E1000_WRITE_REG(&adapter->hw, TDT, 0);
1451 }
1452
1453 /**
1454  * e1000_free_rx_resources - Free Rx Resources
1455  * @adapter: board private structure
1456  *
1457  * Free all receive software resources
1458  **/
1459
1460 void
1461 e1000_free_rx_resources(struct e1000_adapter *adapter)
1462 {
1463         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1464         struct pci_dev *pdev = adapter->pdev;
1465
1466         e1000_clean_rx_ring(adapter);
1467
1468         vfree(rx_ring->buffer_info);
1469         rx_ring->buffer_info = NULL;
1470         kfree(rx_ring->ps_page);
1471         rx_ring->ps_page = NULL;
1472         kfree(rx_ring->ps_page_dma);
1473         rx_ring->ps_page_dma = NULL;
1474
1475         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1476
1477         rx_ring->desc = NULL;
1478 }
1479
1480 /**
1481  * e1000_clean_rx_ring - Free Rx Buffers
1482  * @adapter: board private structure
1483  **/
1484
1485 static void
1486 e1000_clean_rx_ring(struct e1000_adapter *adapter)
1487 {
1488         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1489         struct e1000_buffer *buffer_info;
1490         struct e1000_ps_page *ps_page;
1491         struct e1000_ps_page_dma *ps_page_dma;
1492         struct pci_dev *pdev = adapter->pdev;
1493         unsigned long size;
1494         unsigned int i, j;
1495
1496         /* Free all the Rx ring sk_buffs */
1497
1498         for(i = 0; i < rx_ring->count; i++) {
1499                 buffer_info = &rx_ring->buffer_info[i];
1500                 if(buffer_info->skb) {
1501                         ps_page = &rx_ring->ps_page[i];
1502                         ps_page_dma = &rx_ring->ps_page_dma[i];
1503                         pci_unmap_single(pdev,
1504                                          buffer_info->dma,
1505                                          buffer_info->length,
1506                                          PCI_DMA_FROMDEVICE);
1507
1508                         dev_kfree_skb(buffer_info->skb);
1509                         buffer_info->skb = NULL;
1510
1511                         for(j = 0; j < PS_PAGE_BUFFERS; j++) {
1512                                 if(!ps_page->ps_page[j]) break;
1513                                 pci_unmap_single(pdev,
1514                                                  ps_page_dma->ps_page_dma[j],
1515                                                  PAGE_SIZE, PCI_DMA_FROMDEVICE);
1516                                 ps_page_dma->ps_page_dma[j] = 0;
1517                                 put_page(ps_page->ps_page[j]);
1518                                 ps_page->ps_page[j] = NULL;
1519                         }
1520                 }
1521         }
1522
1523         size = sizeof(struct e1000_buffer) * rx_ring->count;
1524         memset(rx_ring->buffer_info, 0, size);
1525         size = sizeof(struct e1000_ps_page) * rx_ring->count;
1526         memset(rx_ring->ps_page, 0, size);
1527         size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
1528         memset(rx_ring->ps_page_dma, 0, size);
1529
1530         /* Zero out the descriptor ring */
1531
1532         memset(rx_ring->desc, 0, rx_ring->size);
1533
1534         rx_ring->next_to_clean = 0;
1535         rx_ring->next_to_use = 0;
1536
1537         E1000_WRITE_REG(&adapter->hw, RDH, 0);
1538         E1000_WRITE_REG(&adapter->hw, RDT, 0);
1539 }
1540
1541 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
1542  * and memory write and invalidate disabled for certain operations
1543  */
1544 static void
1545 e1000_enter_82542_rst(struct e1000_adapter *adapter)
1546 {
1547         struct net_device *netdev = adapter->netdev;
1548         uint32_t rctl;
1549
1550         e1000_pci_clear_mwi(&adapter->hw);
1551
1552         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1553         rctl |= E1000_RCTL_RST;
1554         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1555         E1000_WRITE_FLUSH(&adapter->hw);
1556         mdelay(5);
1557
1558         if(netif_running(netdev))
1559                 e1000_clean_rx_ring(adapter);
1560 }
1561
1562 static void
1563 e1000_leave_82542_rst(struct e1000_adapter *adapter)
1564 {
1565         struct net_device *netdev = adapter->netdev;
1566         uint32_t rctl;
1567
1568         rctl = E1000_READ_REG(&adapter->hw, RCTL);
1569         rctl &= ~E1000_RCTL_RST;
1570         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1571         E1000_WRITE_FLUSH(&adapter->hw);
1572         mdelay(5);
1573
1574         if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
1575                 e1000_pci_set_mwi(&adapter->hw);
1576
1577         if(netif_running(netdev)) {
1578                 e1000_configure_rx(adapter);
1579                 e1000_alloc_rx_buffers(adapter);
1580         }
1581 }
1582
1583 /**
1584  * e1000_set_mac - Change the Ethernet Address of the NIC
1585  * @netdev: network interface device structure
1586  * @p: pointer to an address structure
1587  *
1588  * Returns 0 on success, negative on failure
1589  **/
1590
1591 static int
1592 e1000_set_mac(struct net_device *netdev, void *p)
1593 {
1594         struct e1000_adapter *adapter = netdev_priv(netdev);
1595         struct sockaddr *addr = p;
1596
1597         if(!is_valid_ether_addr(addr->sa_data))
1598                 return -EADDRNOTAVAIL;
1599
1600         /* 82542 2.0 needs to be in reset to write receive address registers */
1601
1602         if(adapter->hw.mac_type == e1000_82542_rev2_0)
1603                 e1000_enter_82542_rst(adapter);
1604
1605         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1606         memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
1607
1608         e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1609
1610         if(adapter->hw.mac_type == e1000_82542_rev2_0)
1611                 e1000_leave_82542_rst(adapter);
1612
1613         return 0;
1614 }
1615
1616 /**
1617  * e1000_set_multi - Multicast and Promiscuous mode set
1618  * @netdev: network interface device structure
1619  *
1620  * The set_multi entry point is called whenever the multicast address
1621  * list or the network interface flags are updated.  This routine is
1622  * responsible for configuring the hardware for proper multicast,
1623  * promiscuous mode, and all-multi behavior.
1624  **/
1625
1626 static void
1627 e1000_set_multi(struct net_device *netdev)
1628 {
1629         struct e1000_adapter *adapter = netdev_priv(netdev);
1630         struct e1000_hw *hw = &adapter->hw;
1631         struct dev_mc_list *mc_ptr;
1632         unsigned long flags;
1633         uint32_t rctl;
1634         uint32_t hash_value;
1635         int i;
1636
1637         spin_lock_irqsave(&adapter->tx_lock, flags);
1638
1639         /* Check for Promiscuous and All Multicast modes */
1640
1641         rctl = E1000_READ_REG(hw, RCTL);
1642
1643         if(netdev->flags & IFF_PROMISC) {
1644                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1645         } else if(netdev->flags & IFF_ALLMULTI) {
1646                 rctl |= E1000_RCTL_MPE;
1647                 rctl &= ~E1000_RCTL_UPE;
1648         } else {
1649                 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
1650         }
1651
1652         E1000_WRITE_REG(hw, RCTL, rctl);
1653
1654         /* 82542 2.0 needs to be in reset to write receive address registers */
1655
1656         if(hw->mac_type == e1000_82542_rev2_0)
1657                 e1000_enter_82542_rst(adapter);
1658
1659         /* load the first 14 multicast address into the exact filters 1-14
1660          * RAR 0 is used for the station MAC adddress
1661          * if there are not 14 addresses, go ahead and clear the filters
1662          */
1663         mc_ptr = netdev->mc_list;
1664
1665         for(i = 1; i < E1000_RAR_ENTRIES; i++) {
1666                 if(mc_ptr) {
1667                         e1000_rar_set(hw, mc_ptr->dmi_addr, i);
1668                         mc_ptr = mc_ptr->next;
1669                 } else {
1670                         E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
1671                         E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
1672                 }
1673         }
1674
1675         /* clear the old settings from the multicast hash table */
1676
1677         for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
1678                 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
1679
1680         /* load any remaining addresses into the hash table */
1681
1682         for(; mc_ptr; mc_ptr = mc_ptr->next) {
1683                 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
1684                 e1000_mta_set(hw, hash_value);
1685         }
1686
1687         if(hw->mac_type == e1000_82542_rev2_0)
1688                 e1000_leave_82542_rst(adapter);
1689
1690         spin_unlock_irqrestore(&adapter->tx_lock, flags);
1691 }
1692
1693 /* Need to wait a few seconds after link up to get diagnostic information from
1694  * the phy */
1695
1696 static void
1697 e1000_update_phy_info(unsigned long data)
1698 {
1699         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1700         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
1701 }
1702
1703 /**
1704  * e1000_82547_tx_fifo_stall - Timer Call-back
1705  * @data: pointer to adapter cast into an unsigned long
1706  **/
1707
1708 static void
1709 e1000_82547_tx_fifo_stall(unsigned long data)
1710 {
1711         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1712         struct net_device *netdev = adapter->netdev;
1713         uint32_t tctl;
1714
1715         if(atomic_read(&adapter->tx_fifo_stall)) {
1716                 if((E1000_READ_REG(&adapter->hw, TDT) ==
1717                     E1000_READ_REG(&adapter->hw, TDH)) &&
1718                    (E1000_READ_REG(&adapter->hw, TDFT) ==
1719                     E1000_READ_REG(&adapter->hw, TDFH)) &&
1720                    (E1000_READ_REG(&adapter->hw, TDFTS) ==
1721                     E1000_READ_REG(&adapter->hw, TDFHS))) {
1722                         tctl = E1000_READ_REG(&adapter->hw, TCTL);
1723                         E1000_WRITE_REG(&adapter->hw, TCTL,
1724                                         tctl & ~E1000_TCTL_EN);
1725                         E1000_WRITE_REG(&adapter->hw, TDFT,
1726                                         adapter->tx_head_addr);
1727                         E1000_WRITE_REG(&adapter->hw, TDFH,
1728                                         adapter->tx_head_addr);
1729                         E1000_WRITE_REG(&adapter->hw, TDFTS,
1730                                         adapter->tx_head_addr);
1731                         E1000_WRITE_REG(&adapter->hw, TDFHS,
1732                                         adapter->tx_head_addr);
1733                         E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1734                         E1000_WRITE_FLUSH(&adapter->hw);
1735
1736                         adapter->tx_fifo_head = 0;
1737                         atomic_set(&adapter->tx_fifo_stall, 0);
1738                         netif_wake_queue(netdev);
1739                 } else {
1740                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
1741                 }
1742         }
1743 }
1744
1745 /**
1746  * e1000_watchdog - Timer Call-back
1747  * @data: pointer to adapter cast into an unsigned long
1748  **/
1749 static void
1750 e1000_watchdog(unsigned long data)
1751 {
1752         struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1753
1754         /* Do the rest outside of interrupt context */
1755         schedule_work(&adapter->watchdog_task);
1756 }
1757
1758 static void
1759 e1000_watchdog_task(struct e1000_adapter *adapter)
1760 {
1761         struct net_device *netdev = adapter->netdev;
1762         struct e1000_desc_ring *txdr = &adapter->tx_ring;
1763         uint32_t link;
1764
1765         e1000_check_for_link(&adapter->hw);
1766         if (adapter->hw.mac_type == e1000_82573) {
1767                 e1000_enable_tx_pkt_filtering(&adapter->hw);
1768                 if(adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
1769                         e1000_update_mng_vlan(adapter);
1770         }       
1771
1772         if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
1773            !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
1774                 link = !adapter->hw.serdes_link_down;
1775         else
1776                 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
1777
1778         if(link) {
1779                 if(!netif_carrier_ok(netdev)) {
1780                         e1000_get_speed_and_duplex(&adapter->hw,
1781                                                    &adapter->link_speed,
1782                                                    &adapter->link_duplex);
1783
1784                         DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
1785                                adapter->link_speed,
1786                                adapter->link_duplex == FULL_DUPLEX ?
1787                                "Full Duplex" : "Half Duplex");
1788
1789                         netif_carrier_on(netdev);
1790                         netif_wake_queue(netdev);
1791                         mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1792                         adapter->smartspeed = 0;
1793                 }
1794         } else {
1795                 if(netif_carrier_ok(netdev)) {
1796                         adapter->link_speed = 0;
1797                         adapter->link_duplex = 0;
1798                         DPRINTK(LINK, INFO, "NIC Link is Down\n");
1799                         netif_carrier_off(netdev);
1800                         netif_stop_queue(netdev);
1801                         mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1802                 }
1803
1804                 e1000_smartspeed(adapter);
1805         }
1806
1807         e1000_update_stats(adapter);
1808
1809         adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
1810         adapter->tpt_old = adapter->stats.tpt;
1811         adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
1812         adapter->colc_old = adapter->stats.colc;
1813
1814         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
1815         adapter->gorcl_old = adapter->stats.gorcl;
1816         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
1817         adapter->gotcl_old = adapter->stats.gotcl;
1818
1819         e1000_update_adaptive(&adapter->hw);
1820
1821         if(!netif_carrier_ok(netdev)) {
1822                 if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1823                         /* We've lost link, so the controller stops DMA,
1824                          * but we've got queued Tx work that's never going
1825                          * to get done, so reset controller to flush Tx.
1826                          * (Do the reset outside of interrupt context). */
1827                         schedule_work(&adapter->tx_timeout_task);
1828                 }
1829         }
1830
1831         /* Dynamic mode for Interrupt Throttle Rate (ITR) */
1832         if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
1833                 /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
1834                  * asymmetrical Tx or Rx gets ITR=8000; everyone
1835                  * else is between 2000-8000. */
1836                 uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
1837                 uint32_t dif = (adapter->gotcl > adapter->gorcl ? 
1838                         adapter->gotcl - adapter->gorcl :
1839                         adapter->gorcl - adapter->gotcl) / 10000;
1840                 uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
1841                 E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
1842         }
1843
1844         /* Cause software interrupt to ensure rx ring is cleaned */
1845         E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
1846
1847         /* Force detection of hung controller every watchdog period */
1848         adapter->detect_tx_hung = TRUE;
1849
1850         /* Reset the timer */
1851         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1852 }
1853
1854 #define E1000_TX_FLAGS_CSUM             0x00000001
1855 #define E1000_TX_FLAGS_VLAN             0x00000002
1856 #define E1000_TX_FLAGS_TSO              0x00000004
1857 #define E1000_TX_FLAGS_IPV4             0x00000008
1858 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
1859 #define E1000_TX_FLAGS_VLAN_SHIFT       16
1860
1861 static inline int
1862 e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1863 {
1864 #ifdef NETIF_F_TSO
1865         struct e1000_context_desc *context_desc;
1866         unsigned int i;
1867         uint32_t cmd_length = 0;
1868         uint16_t ipcse = 0, tucse, mss;
1869         uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1870         int err;
1871
1872         if(skb_shinfo(skb)->tso_size) {
1873                 if (skb_header_cloned(skb)) {
1874                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1875                         if (err)
1876                                 return err;
1877                 }
1878
1879                 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1880                 mss = skb_shinfo(skb)->tso_size;
1881                 if(skb->protocol == ntohs(ETH_P_IP)) {
1882                         skb->nh.iph->tot_len = 0;
1883                         skb->nh.iph->check = 0;
1884                         skb->h.th->check =
1885                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
1886                                                    skb->nh.iph->daddr,
1887                                                    0,
1888                                                    IPPROTO_TCP,
1889                                                    0);
1890                         cmd_length = E1000_TXD_CMD_IP;
1891                         ipcse = skb->h.raw - skb->data - 1;
1892 #ifdef NETIF_F_TSO_IPV6
1893                 } else if(skb->protocol == ntohs(ETH_P_IPV6)) {
1894                         skb->nh.ipv6h->payload_len = 0;
1895                         skb->h.th->check =
1896                                 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
1897                                                  &skb->nh.ipv6h->daddr,
1898                                                  0,
1899                                                  IPPROTO_TCP,
1900                                                  0);
1901                         ipcse = 0;
1902 #endif
1903                 }
1904                 ipcss = skb->nh.raw - skb->data;
1905                 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1906                 tucss = skb->h.raw - skb->data;
1907                 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1908                 tucse = 0;
1909
1910                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1911                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
1912
1913                 i = adapter->tx_ring.next_to_use;
1914                 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1915
1916                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
1917                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
1918                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
1919                 context_desc->upper_setup.tcp_fields.tucss = tucss;
1920                 context_desc->upper_setup.tcp_fields.tucso = tucso;
1921                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
1922                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
1923                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1924                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
1925
1926                 if(++i == adapter->tx_ring.count) i = 0;
1927                 adapter->tx_ring.next_to_use = i;
1928
1929                 return 1;
1930         }
1931 #endif
1932
1933         return 0;
1934 }
1935
1936 static inline boolean_t
1937 e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1938 {
1939         struct e1000_context_desc *context_desc;
1940         unsigned int i;
1941         uint8_t css;
1942
1943         if(likely(skb->ip_summed == CHECKSUM_HW)) {
1944                 css = skb->h.raw - skb->data;
1945
1946                 i = adapter->tx_ring.next_to_use;
1947                 context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1948
1949                 context_desc->upper_setup.tcp_fields.tucss = css;
1950                 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
1951                 context_desc->upper_setup.tcp_fields.tucse = 0;
1952                 context_desc->tcp_seg_setup.data = 0;
1953                 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1954
1955                 if(unlikely(++i == adapter->tx_ring.count)) i = 0;
1956                 adapter->tx_ring.next_to_use = i;
1957
1958                 return TRUE;
1959         }
1960
1961         return FALSE;
1962 }
1963
1964 #define E1000_MAX_TXD_PWR       12
1965 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
1966
1967 static inline int
1968 e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
1969         unsigned int first, unsigned int max_per_txd,
1970         unsigned int nr_frags, unsigned int mss)
1971 {
1972         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1973         struct e1000_buffer *buffer_info;
1974         unsigned int len = skb->len;
1975         unsigned int offset = 0, size, count = 0, i;
1976         unsigned int f;
1977         len -= skb->data_len;
1978
1979         i = tx_ring->next_to_use;
1980
1981         while(len) {
1982                 buffer_info = &tx_ring->buffer_info[i];
1983                 size = min(len, max_per_txd);
1984 #ifdef NETIF_F_TSO
1985                 /* Workaround for premature desc write-backs
1986                  * in TSO mode.  Append 4-byte sentinel desc */
1987                 if(unlikely(mss && !nr_frags && size == len && size > 8))
1988                         size -= 4;
1989 #endif
1990                 /* work-around for errata 10 and it applies
1991                  * to all controllers in PCI-X mode
1992                  * The fix is to make sure that the first descriptor of a
1993                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
1994                  */
1995                 if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
1996                                 (size > 2015) && count == 0))
1997                         size = 2015;
1998                                                                                 
1999                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2000                  * terminating buffers within evenly-aligned dwords. */
2001                 if(unlikely(adapter->pcix_82544 &&
2002                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2003                    size > 4))
2004                         size -= 4;
2005
2006                 buffer_info->length = size;
2007                 buffer_info->dma =
2008                         pci_map_single(adapter->pdev,
2009                                 skb->data + offset,
2010                                 size,
2011                                 PCI_DMA_TODEVICE);
2012                 buffer_info->time_stamp = jiffies;
2013
2014                 len -= size;
2015                 offset += size;
2016                 count++;
2017                 if(unlikely(++i == tx_ring->count)) i = 0;
2018         }
2019
2020         for(f = 0; f < nr_frags; f++) {
2021                 struct skb_frag_struct *frag;
2022
2023                 frag = &skb_shinfo(skb)->frags[f];
2024                 len = frag->size;
2025                 offset = frag->page_offset;
2026
2027                 while(len) {
2028                         buffer_info = &tx_ring->buffer_info[i];
2029                         size = min(len, max_per_txd);
2030 #ifdef NETIF_F_TSO
2031                         /* Workaround for premature desc write-backs
2032                          * in TSO mode.  Append 4-byte sentinel desc */
2033                         if(unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2034                                 size -= 4;
2035 #endif
2036                         /* Workaround for potential 82544 hang in PCI-X.
2037                          * Avoid terminating buffers within evenly-aligned
2038                          * dwords. */
2039                         if(unlikely(adapter->pcix_82544 &&
2040                            !((unsigned long)(frag->page+offset+size-1) & 4) &&
2041                            size > 4))
2042                                 size -= 4;
2043
2044                         buffer_info->length = size;
2045                         buffer_info->dma =
2046                                 pci_map_page(adapter->pdev,
2047                                         frag->page,
2048                                         offset,
2049                                         size,
2050                                         PCI_DMA_TODEVICE);
2051                         buffer_info->time_stamp = jiffies;
2052
2053                         len -= size;
2054                         offset += size;
2055                         count++;
2056                         if(unlikely(++i == tx_ring->count)) i = 0;
2057                 }
2058         }
2059
2060         i = (i == 0) ? tx_ring->count - 1 : i - 1;
2061         tx_ring->buffer_info[i].skb = skb;
2062         tx_ring->buffer_info[first].next_to_watch = i;
2063
2064         return count;
2065 }
2066
2067 static inline void
2068 e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
2069 {
2070         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2071         struct e1000_tx_desc *tx_desc = NULL;
2072         struct e1000_buffer *buffer_info;
2073         uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2074         unsigned int i;
2075
2076         if(likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2077                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2078                              E1000_TXD_CMD_TSE;
2079                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2080
2081                 if(likely(tx_flags & E1000_TX_FLAGS_IPV4))
2082                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2083         }
2084
2085         if(likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2086                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2087                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2088         }
2089
2090         if(unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2091                 txd_lower |= E1000_TXD_CMD_VLE;
2092                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2093         }
2094
2095         i = tx_ring->next_to_use;
2096
2097         while(count--) {
2098                 buffer_info = &tx_ring->buffer_info[i];
2099                 tx_desc = E1000_TX_DESC(*tx_ring, i);
2100                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2101                 tx_desc->lower.data =
2102                         cpu_to_le32(txd_lower | buffer_info->length);
2103                 tx_desc->upper.data = cpu_to_le32(txd_upper);
2104                 if(unlikely(++i == tx_ring->count)) i = 0;
2105         }
2106
2107         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
2108
2109         /* Force memory writes to complete before letting h/w
2110          * know there are new descriptors to fetch.  (Only
2111          * applicable for weak-ordered memory model archs,
2112          * such as IA-64). */
2113         wmb();
2114
2115         tx_ring->next_to_use = i;
2116         E1000_WRITE_REG(&adapter->hw, TDT, i);
2117 }
2118
2119 /**
2120  * 82547 workaround to avoid controller hang in half-duplex environment.
2121  * The workaround is to avoid queuing a large packet that would span
2122  * the internal Tx FIFO ring boundary by notifying the stack to resend
2123  * the packet at a later time.  This gives the Tx FIFO an opportunity to
2124  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
2125  * to the beginning of the Tx FIFO.
2126  **/
2127
2128 #define E1000_FIFO_HDR                  0x10
2129 #define E1000_82547_PAD_LEN             0x3E0
2130
2131 static inline int
2132 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
2133 {
2134         uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
2135         uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
2136
2137         E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
2138
2139         if(adapter->link_duplex != HALF_DUPLEX)
2140                 goto no_fifo_stall_required;
2141
2142         if(atomic_read(&adapter->tx_fifo_stall))
2143                 return 1;
2144
2145         if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
2146                 atomic_set(&adapter->tx_fifo_stall, 1);
2147                 return 1;
2148         }
2149
2150 no_fifo_stall_required:
2151         adapter->tx_fifo_head += skb_fifo_len;
2152         if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
2153                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
2154         return 0;
2155 }
2156
2157 #define MINIMUM_DHCP_PACKET_SIZE 282
2158 static inline int
2159 e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
2160 {
2161         struct e1000_hw *hw =  &adapter->hw;
2162         uint16_t length, offset;
2163         if(vlan_tx_tag_present(skb)) {
2164                 if(!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
2165                         ( adapter->hw.mng_cookie.status &
2166                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
2167                         return 0;
2168         }
2169         if(htons(ETH_P_IP) == skb->protocol) {
2170                 const struct iphdr *ip = skb->nh.iph;
2171                 if(IPPROTO_UDP == ip->protocol) {
2172                         struct udphdr *udp = (struct udphdr *)(skb->h.uh);
2173                         if(ntohs(udp->dest) == 67) {
2174                                 offset = (uint8_t *)udp + 8 - skb->data;
2175                                 length = skb->len - offset;
2176
2177                                 return e1000_mng_write_dhcp_info(hw,
2178                                                 (uint8_t *)udp + 8, length);
2179                         }
2180                 }
2181         } else if((skb->len > MINIMUM_DHCP_PACKET_SIZE) && (!skb->protocol)) {
2182                 struct ethhdr *eth = (struct ethhdr *) skb->data;
2183                 if((htons(ETH_P_IP) == eth->h_proto)) {
2184                         const struct iphdr *ip = 
2185                                 (struct iphdr *)((uint8_t *)skb->data+14);
2186                         if(IPPROTO_UDP == ip->protocol) {
2187                                 struct udphdr *udp = 
2188                                         (struct udphdr *)((uint8_t *)ip + 
2189                                                 (ip->ihl << 2));
2190                                 if(ntohs(udp->dest) == 67) {
2191                                         offset = (uint8_t *)udp + 8 - skb->data;
2192                                         length = skb->len - offset;
2193
2194                                         return e1000_mng_write_dhcp_info(hw,
2195                                                         (uint8_t *)udp + 8, 
2196                                                         length);
2197                                 }
2198                         }
2199                 }
2200         }
2201         return 0;
2202 }
2203
2204 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
2205 static int
2206 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2207 {
2208         struct e1000_adapter *adapter = netdev_priv(netdev);
2209         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
2210         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
2211         unsigned int tx_flags = 0;
2212         unsigned int len = skb->len;
2213         unsigned long flags;
2214         unsigned int nr_frags = 0;
2215         unsigned int mss = 0;
2216         int count = 0;
2217         int tso;
2218         unsigned int f;
2219         len -= skb->data_len;
2220
2221         if(unlikely(skb->len <= 0)) {
2222                 dev_kfree_skb_any(skb);
2223                 return NETDEV_TX_OK;
2224         }
2225
2226 #ifdef NETIF_F_TSO
2227         mss = skb_shinfo(skb)->tso_size;
2228         /* The controller does a simple calculation to 
2229          * make sure there is enough room in the FIFO before
2230          * initiating the DMA for each buffer.  The calc is:
2231          * 4 = ceil(buffer len/mss).  To make sure we don't
2232          * overrun the FIFO, adjust the max buffer len if mss
2233          * drops. */
2234         if(mss) {
2235                 max_per_txd = min(mss << 2, max_per_txd);
2236                 max_txd_pwr = fls(max_per_txd) - 1;
2237         }
2238
2239         if((mss) || (skb->ip_summed == CHECKSUM_HW))
2240                 count++;
2241         count++;
2242 #else
2243         if(skb->ip_summed == CHECKSUM_HW)
2244                 count++;
2245 #endif
2246         count += TXD_USE_COUNT(len, max_txd_pwr);
2247
2248         if(adapter->pcix_82544)
2249                 count++;
2250
2251         /* work-around for errata 10 and it applies to all controllers 
2252          * in PCI-X mode, so add one more descriptor to the count
2253          */
2254         if(unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2255                         (len > 2015)))
2256                 count++;
2257
2258         nr_frags = skb_shinfo(skb)->nr_frags;
2259         for(f = 0; f < nr_frags; f++)
2260                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
2261                                        max_txd_pwr);
2262         if(adapter->pcix_82544)
2263                 count += nr_frags;
2264
2265         local_irq_save(flags); 
2266         if (!spin_trylock(&adapter->tx_lock)) { 
2267                 /* Collision - tell upper layer to requeue */ 
2268                 local_irq_restore(flags); 
2269                 return NETDEV_TX_LOCKED; 
2270         } 
2271         if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
2272                 e1000_transfer_dhcp_info(adapter, skb);
2273
2274
2275         /* need: count + 2 desc gap to keep tail from touching
2276          * head, otherwise try next time */
2277         if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2)) {
2278                 netif_stop_queue(netdev);
2279                 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2280                 return NETDEV_TX_BUSY;
2281         }
2282
2283         if(unlikely(adapter->hw.mac_type == e1000_82547)) {
2284                 if(unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
2285                         netif_stop_queue(netdev);
2286                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
2287                         spin_unlock_irqrestore(&adapter->tx_lock, flags);
2288                         return NETDEV_TX_BUSY;
2289                 }
2290         }
2291
2292         if(unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
2293                 tx_flags |= E1000_TX_FLAGS_VLAN;
2294                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
2295         }
2296
2297         first = adapter->tx_ring.next_to_use;
2298         
2299         tso = e1000_tso(adapter, skb);
2300         if (tso < 0) {
2301                 dev_kfree_skb_any(skb);
2302                 spin_unlock_irqrestore(&adapter->tx_lock, flags);
2303                 return NETDEV_TX_OK;
2304         }
2305
2306         if (likely(tso))
2307                 tx_flags |= E1000_TX_FLAGS_TSO;
2308         else if(likely(e1000_tx_csum(adapter, skb)))
2309                 tx_flags |= E1000_TX_FLAGS_CSUM;
2310
2311         /* Old method was to assume IPv4 packet by default if TSO was enabled.
2312          * 82573 hardware supports TSO capabilities for IPv6 as well...
2313          * no longer assume, we must. */
2314         if(likely(skb->protocol == ntohs(ETH_P_IP)))
2315                 tx_flags |= E1000_TX_FLAGS_IPV4;
2316
2317         e1000_tx_queue(adapter,
2318                 e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss),
2319                 tx_flags);
2320
2321         netdev->trans_start = jiffies;
2322
2323         /* Make sure there is space in the ring for the next send. */
2324         if(unlikely(E1000_DESC_UNUSED(&adapter->tx_ring) < MAX_SKB_FRAGS + 2))
2325                 netif_stop_queue(netdev);
2326
2327         spin_unlock_irqrestore(&adapter->tx_lock, flags);
2328         return NETDEV_TX_OK;
2329 }
2330
2331 /**
2332  * e1000_tx_timeout - Respond to a Tx Hang
2333  * @netdev: network interface device structure
2334  **/
2335
2336 static void
2337 e1000_tx_timeout(struct net_device *netdev)
2338 {
2339         struct e1000_adapter *adapter = netdev_priv(netdev);
2340
2341         /* Do the reset outside of interrupt context */
2342         schedule_work(&adapter->tx_timeout_task);
2343 }
2344
2345 static void
2346 e1000_tx_timeout_task(struct net_device *netdev)
2347 {
2348         struct e1000_adapter *adapter = netdev_priv(netdev);
2349
2350         e1000_down(adapter);
2351         e1000_up(adapter);
2352 }
2353
2354 /**
2355  * e1000_get_stats - Get System Network Statistics
2356  * @netdev: network interface device structure
2357  *
2358  * Returns the address of the device statistics structure.
2359  * The statistics are actually updated from the timer callback.
2360  **/
2361
2362 static struct net_device_stats *
2363 e1000_get_stats(struct net_device *netdev)
2364 {
2365         struct e1000_adapter *adapter = netdev_priv(netdev);
2366
2367         e1000_update_stats(adapter);
2368         return &adapter->net_stats;
2369 }
2370
2371 /**
2372  * e1000_change_mtu - Change the Maximum Transfer Unit
2373  * @netdev: network interface device structure
2374  * @new_mtu: new value for maximum frame size
2375  *
2376  * Returns 0 on success, negative on failure
2377  **/
2378
2379 static int
2380 e1000_change_mtu(struct net_device *netdev, int new_mtu)
2381 {
2382         struct e1000_adapter *adapter = netdev_priv(netdev);
2383         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
2384
2385         if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
2386                 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2387                         DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
2388                         return -EINVAL;
2389         }
2390
2391 #define MAX_STD_JUMBO_FRAME_SIZE 9216
2392         /* might want this to be bigger enum check... */
2393         if (adapter->hw.mac_type == e1000_82573 &&
2394             max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
2395                 DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2396                                     "on 82573\n");
2397                 return -EINVAL;
2398         }
2399
2400         if(adapter->hw.mac_type > e1000_82547_rev_2) {
2401                 adapter->rx_buffer_len = max_frame;
2402                 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
2403         } else {
2404                 if(unlikely((adapter->hw.mac_type < e1000_82543) &&
2405                    (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
2406                         DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
2407                                             "on 82542\n");
2408                         return -EINVAL;
2409
2410                 } else {
2411                         if(max_frame <= E1000_RXBUFFER_2048) {
2412                                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
2413                         } else if(max_frame <= E1000_RXBUFFER_4096) {
2414                                 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
2415                         } else if(max_frame <= E1000_RXBUFFER_8192) {
2416                                 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
2417                         } else if(max_frame <= E1000_RXBUFFER_16384) {
2418                                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
2419                         }
2420                 }
2421         }
2422
2423         netdev->mtu = new_mtu;
2424
2425         if(netif_running(netdev)) {
2426                 e1000_down(adapter);
2427                 e1000_up(adapter);
2428         }
2429
2430         adapter->hw.max_frame_size = max_frame;
2431
2432         return 0;
2433 }
2434
2435 /**
2436  * e1000_update_stats - Update the board statistics counters
2437  * @adapter: board private structure
2438  **/
2439
2440 void
2441 e1000_update_stats(struct e1000_adapter *adapter)
2442 {
2443         struct e1000_hw *hw = &adapter->hw;
2444         unsigned long flags;
2445         uint16_t phy_tmp;
2446
2447 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
2448
2449         spin_lock_irqsave(&adapter->stats_lock, flags);
2450
2451         /* these counters are modified from e1000_adjust_tbi_stats,
2452          * called from the interrupt context, so they must only
2453          * be written while holding adapter->stats_lock
2454          */
2455
2456         adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
2457         adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
2458         adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
2459         adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
2460         adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
2461         adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
2462         adapter->stats.roc += E1000_READ_REG(hw, ROC);
2463         adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
2464         adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
2465         adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
2466         adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
2467         adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
2468         adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
2469
2470         adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
2471         adapter->stats.mpc += E1000_READ_REG(hw, MPC);
2472         adapter->stats.scc += E1000_READ_REG(hw, SCC);
2473         adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
2474         adapter->stats.mcc += E1000_READ_REG(hw, MCC);
2475         adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
2476         adapter->stats.dc += E1000_READ_REG(hw, DC);
2477         adapter->stats.sec += E1000_READ_REG(hw, SEC);
2478         adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
2479         adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
2480         adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
2481         adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
2482         adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
2483         adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
2484         adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
2485         adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
2486         adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
2487         adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
2488         adapter->stats.ruc += E1000_READ_REG(hw, RUC);
2489         adapter->stats.rfc += E1000_READ_REG(hw, RFC);
2490         adapter->stats.rjc += E1000_READ_REG(hw, RJC);
2491         adapter->stats.torl += E1000_READ_REG(hw, TORL);
2492         adapter->stats.torh += E1000_READ_REG(hw, TORH);
2493         adapter->stats.totl += E1000_READ_REG(hw, TOTL);
2494         adapter->stats.toth += E1000_READ_REG(hw, TOTH);
2495         adapter->stats.tpr += E1000_READ_REG(hw, TPR);
2496         adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
2497         adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
2498         adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
2499         adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
2500         adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
2501         adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
2502         adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
2503         adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
2504
2505         /* used for adaptive IFS */
2506
2507         hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
2508         adapter->stats.tpt += hw->tx_packet_delta;
2509         hw->collision_delta = E1000_READ_REG(hw, COLC);
2510         adapter->stats.colc += hw->collision_delta;
2511
2512         if(hw->mac_type >= e1000_82543) {
2513                 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
2514                 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
2515                 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
2516                 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
2517                 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
2518                 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
2519         }
2520         if(hw->mac_type > e1000_82547_rev_2) {
2521                 adapter->stats.iac += E1000_READ_REG(hw, IAC);
2522                 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
2523                 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
2524                 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
2525                 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
2526                 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
2527                 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
2528                 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
2529                 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
2530         }
2531
2532         /* Fill out the OS statistics structure */
2533
2534         adapter->net_stats.rx_packets = adapter->stats.gprc;
2535         adapter->net_stats.tx_packets = adapter->stats.gptc;
2536         adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2537         adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2538         adapter->net_stats.multicast = adapter->stats.mprc;
2539         adapter->net_stats.collisions = adapter->stats.colc;
2540
2541         /* Rx Errors */
2542
2543         adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2544                 adapter->stats.crcerrs + adapter->stats.algnerrc +
2545                 adapter->stats.rlec + adapter->stats.mpc + 
2546                 adapter->stats.cexterr;
2547         adapter->net_stats.rx_dropped = adapter->stats.mpc;
2548         adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2549         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2550         adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2551         adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
2552         adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2553
2554         /* Tx Errors */
2555
2556         adapter->net_stats.tx_errors = adapter->stats.ecol +
2557                                        adapter->stats.latecol;
2558         adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2559         adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2560         adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2561
2562         /* Tx Dropped needs to be maintained elsewhere */
2563
2564         /* Phy Stats */
2565
2566         if(hw->media_type == e1000_media_type_copper) {
2567                 if((adapter->link_speed == SPEED_1000) &&
2568                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
2569                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2570                         adapter->phy_stats.idle_errors += phy_tmp;
2571                 }
2572
2573                 if((hw->mac_type <= e1000_82546) &&
2574                    (hw->phy_type == e1000_phy_m88) &&
2575                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
2576                         adapter->phy_stats.receive_errors += phy_tmp;
2577         }
2578
2579         spin_unlock_irqrestore(&adapter->stats_lock, flags);
2580 }
2581
2582 /**
2583  * e1000_intr - Interrupt Handler
2584  * @irq: interrupt number
2585  * @data: pointer to a network interface device structure
2586  * @pt_regs: CPU registers structure
2587  **/
2588
2589 static irqreturn_t
2590 e1000_intr(int irq, void *data, struct pt_regs *regs)
2591 {
2592         struct net_device *netdev = data;
2593         struct e1000_adapter *adapter = netdev_priv(netdev);
2594         struct e1000_hw *hw = &adapter->hw;
2595         uint32_t icr = E1000_READ_REG(hw, ICR);
2596 #ifndef CONFIG_E1000_NAPI
2597         unsigned int i;
2598 #endif
2599
2600         if(unlikely(!icr))
2601                 return IRQ_NONE;  /* Not our interrupt */
2602
2603         if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
2604                 hw->get_link_status = 1;
2605                 mod_timer(&adapter->watchdog_timer, jiffies);
2606         }
2607
2608 #ifdef CONFIG_E1000_NAPI
2609         if(likely(netif_rx_schedule_prep(netdev))) {
2610
2611                 /* Disable interrupts and register for poll. The flush 
2612                   of the posted write is intentionally left out.
2613                 */
2614
2615                 atomic_inc(&adapter->irq_sem);
2616                 E1000_WRITE_REG(hw, IMC, ~0);
2617                 __netif_rx_schedule(netdev);
2618         }
2619 #else
2620         /* Writing IMC and IMS is needed for 82547.
2621            Due to Hub Link bus being occupied, an interrupt
2622            de-assertion message is not able to be sent.
2623            When an interrupt assertion message is generated later,
2624            two messages are re-ordered and sent out.
2625            That causes APIC to think 82547 is in de-assertion
2626            state, while 82547 is in assertion state, resulting
2627            in dead lock. Writing IMC forces 82547 into
2628            de-assertion state.
2629         */
2630         if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2){
2631                 atomic_inc(&adapter->irq_sem);
2632                 E1000_WRITE_REG(hw, IMC, ~0);
2633         }
2634
2635         for(i = 0; i < E1000_MAX_INTR; i++)
2636                 if(unlikely(!adapter->clean_rx(adapter) &
2637                    !e1000_clean_tx_irq(adapter)))
2638                         break;
2639
2640         if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
2641                 e1000_irq_enable(adapter);
2642 #endif
2643
2644         return IRQ_HANDLED;
2645 }
2646
2647 #ifdef CONFIG_E1000_NAPI
2648 /**
2649  * e1000_clean - NAPI Rx polling callback
2650  * @adapter: board private structure
2651  **/
2652
2653 static int
2654 e1000_clean(struct net_device *netdev, int *budget)
2655 {
2656         struct e1000_adapter *adapter = netdev_priv(netdev);
2657         int work_to_do = min(*budget, netdev->quota);
2658         int tx_cleaned;
2659         int work_done = 0;
2660
2661         tx_cleaned = e1000_clean_tx_irq(adapter);
2662         adapter->clean_rx(adapter, &work_done, work_to_do);
2663
2664         *budget -= work_done;
2665         netdev->quota -= work_done;
2666         
2667         if ((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
2668         /* If no Tx and not enough Rx work done, exit the polling mode */
2669                 netif_rx_complete(netdev);
2670                 e1000_irq_enable(adapter);
2671                 return 0;
2672         }
2673
2674         return 1;
2675 }
2676
2677 #endif
2678 /**
2679  * e1000_clean_tx_irq - Reclaim resources after transmit completes
2680  * @adapter: board private structure
2681  **/
2682
2683 static boolean_t
2684 e1000_clean_tx_irq(struct e1000_adapter *adapter)
2685 {
2686         struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2687         struct net_device *netdev = adapter->netdev;
2688         struct e1000_tx_desc *tx_desc, *eop_desc;
2689         struct e1000_buffer *buffer_info;
2690         unsigned int i, eop;
2691         boolean_t cleaned = FALSE;
2692
2693         i = tx_ring->next_to_clean;
2694         eop = tx_ring->buffer_info[i].next_to_watch;
2695         eop_desc = E1000_TX_DESC(*tx_ring, eop);
2696
2697         while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2698                 /* Premature writeback of Tx descriptors clear (free buffers
2699                  * and unmap pci_mapping) previous_buffer_info */
2700                 if (likely(adapter->previous_buffer_info.skb != NULL)) {
2701                         e1000_unmap_and_free_tx_resource(adapter,
2702                                         &adapter->previous_buffer_info);
2703                 }
2704
2705                 for(cleaned = FALSE; !cleaned; ) {
2706                         tx_desc = E1000_TX_DESC(*tx_ring, i);
2707                         buffer_info = &tx_ring->buffer_info[i];
2708                         cleaned = (i == eop);
2709
2710 #ifdef NETIF_F_TSO
2711                         if (!(netdev->features & NETIF_F_TSO)) {
2712 #endif
2713                                 e1000_unmap_and_free_tx_resource(adapter,
2714                                                                  buffer_info);
2715 #ifdef NETIF_F_TSO
2716                         } else {
2717                                 if (cleaned) {
2718                                         memcpy(&adapter->previous_buffer_info,
2719                                                buffer_info,
2720                                                sizeof(struct e1000_buffer));
2721                                         memset(buffer_info, 0,
2722                                                sizeof(struct e1000_buffer));
2723                                 } else {
2724                                         e1000_unmap_and_free_tx_resource(
2725                                             adapter, buffer_info);
2726                                 }
2727                         }
2728 #endif
2729
2730                         tx_desc->buffer_addr = 0;
2731                         tx_desc->lower.data = 0;
2732                         tx_desc->upper.data = 0;
2733
2734                         if(unlikely(++i == tx_ring->count)) i = 0;
2735                 }
2736                 
2737                 eop = tx_ring->buffer_info[i].next_to_watch;
2738                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
2739         }
2740
2741         tx_ring->next_to_clean = i;
2742
2743         spin_lock(&adapter->tx_lock);
2744
2745         if(unlikely(cleaned && netif_queue_stopped(netdev) &&
2746                     netif_carrier_ok(netdev)))
2747                 netif_wake_queue(netdev);
2748
2749         spin_unlock(&adapter->tx_lock);
2750         if(adapter->detect_tx_hung) {
2751
2752                 /* Detect a transmit hang in hardware, this serializes the
2753                  * check with the clearing of time_stamp and movement of i */
2754                 adapter->detect_tx_hung = FALSE;
2755                 if (tx_ring->buffer_info[i].dma &&
2756                     time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
2757                     && !(E1000_READ_REG(&adapter->hw, STATUS) &
2758                         E1000_STATUS_TXOFF)) {
2759
2760                         /* detected Tx unit hang */
2761                         i = tx_ring->next_to_clean;
2762                         eop = tx_ring->buffer_info[i].next_to_watch;
2763                         eop_desc = E1000_TX_DESC(*tx_ring, eop);
2764                         DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
2765                                         "  TDH                  <%x>\n"
2766                                         "  TDT                  <%x>\n"
2767                                         "  next_to_use          <%x>\n"
2768                                         "  next_to_clean        <%x>\n"
2769                                         "buffer_info[next_to_clean]\n"
2770                                         "  dma                  <%llx>\n"
2771                                         "  time_stamp           <%lx>\n"
2772                                         "  next_to_watch        <%x>\n"
2773                                         "  jiffies              <%lx>\n"
2774                                         "  next_to_watch.status <%x>\n",
2775                                 E1000_READ_REG(&adapter->hw, TDH),
2776                                 E1000_READ_REG(&adapter->hw, TDT),
2777                                 tx_ring->next_to_use,
2778                                 i,
2779                                 (unsigned long long)tx_ring->buffer_info[i].dma,
2780                                 tx_ring->buffer_info[i].time_stamp,
2781                                 eop,
2782                                 jiffies,
2783                                 eop_desc->upper.fields.status);
2784                         netif_stop_queue(netdev);
2785                 }
2786         }
2787 #ifdef NETIF_F_TSO
2788
2789         if( unlikely(!(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
2790             time_after(jiffies, adapter->previous_buffer_info.time_stamp + HZ)))
2791                 e1000_unmap_and_free_tx_resource(
2792                     adapter, &adapter->previous_buffer_info);
2793
2794 #endif
2795         return cleaned;
2796 }
2797
2798 /**
2799  * e1000_rx_checksum - Receive Checksum Offload for 82543
2800  * @adapter:     board private structure
2801  * @status_err:  receive descriptor status and error fields
2802  * @csum:        receive descriptor csum field
2803  * @sk_buff:     socket buffer with received data
2804  **/
2805
2806 static inline void
2807 e1000_rx_checksum(struct e1000_adapter *adapter,
2808                   uint32_t status_err, uint32_t csum,
2809                   struct sk_buff *skb)
2810 {
2811         uint16_t status = (uint16_t)status_err;
2812         uint8_t errors = (uint8_t)(status_err >> 24);
2813         skb->ip_summed = CHECKSUM_NONE;
2814
2815         /* 82543 or newer only */
2816         if(unlikely(adapter->hw.mac_type < e1000_82543)) return;
2817         /* Ignore Checksum bit is set */
2818         if(unlikely(status & E1000_RXD_STAT_IXSM)) return;
2819         /* TCP/UDP checksum error bit is set */
2820         if(unlikely(errors & E1000_RXD_ERR_TCPE)) {
2821                 /* let the stack verify checksum errors */
2822                 adapter->hw_csum_err++;
2823                 return;
2824         }
2825         /* TCP/UDP Checksum has not been calculated */
2826         if(adapter->hw.mac_type <= e1000_82547_rev_2) {
2827                 if(!(status & E1000_RXD_STAT_TCPCS))
2828                         return;
2829         } else {
2830                 if(!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
2831                         return;
2832         }
2833         /* It must be a TCP or UDP packet with a valid checksum */
2834         if (likely(status & E1000_RXD_STAT_TCPCS)) {
2835                 /* TCP checksum is good */
2836                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2837         } else if (adapter->hw.mac_type > e1000_82547_rev_2) {
2838                 /* IP fragment with UDP payload */
2839                 /* Hardware complements the payload checksum, so we undo it
2840                  * and then put the value in host order for further stack use.
2841                  */
2842                 csum = ntohl(csum ^ 0xFFFF);
2843                 skb->csum = csum;
2844                 skb->ip_summed = CHECKSUM_HW;
2845         }
2846         adapter->hw_csum_good++;
2847 }
2848
2849 /**
2850  * e1000_clean_rx_irq - Send received data up the network stack; legacy
2851  * @adapter: board private structure
2852  **/
2853
2854 static boolean_t
2855 #ifdef CONFIG_E1000_NAPI
2856 e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
2857                    int work_to_do)
2858 #else
2859 e1000_clean_rx_irq(struct e1000_adapter *adapter)
2860 #endif
2861 {
2862         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2863         struct net_device *netdev = adapter->netdev;
2864         struct pci_dev *pdev = adapter->pdev;
2865         struct e1000_rx_desc *rx_desc;
2866         struct e1000_buffer *buffer_info;
2867         struct sk_buff *skb;
2868         unsigned long flags;
2869         uint32_t length;
2870         uint8_t last_byte;
2871         unsigned int i;
2872         boolean_t cleaned = FALSE;
2873
2874         i = rx_ring->next_to_clean;
2875         rx_desc = E1000_RX_DESC(*rx_ring, i);
2876
2877         while(rx_desc->status & E1000_RXD_STAT_DD) {
2878                 buffer_info = &rx_ring->buffer_info[i];
2879 #ifdef CONFIG_E1000_NAPI
2880                 if(*work_done >= work_to_do)
2881                         break;
2882                 (*work_done)++;
2883 #endif
2884                 cleaned = TRUE;
2885
2886                 pci_unmap_single(pdev,
2887                                  buffer_info->dma,
2888                                  buffer_info->length,
2889                                  PCI_DMA_FROMDEVICE);
2890
2891                 skb = buffer_info->skb;
2892                 length = le16_to_cpu(rx_desc->length);
2893
2894                 if(unlikely(!(rx_desc->status & E1000_RXD_STAT_EOP))) {
2895                         /* All receives must fit into a single buffer */
2896                         E1000_DBG("%s: Receive packet consumed multiple"
2897                                   " buffers\n", netdev->name);
2898                         dev_kfree_skb_irq(skb);
2899                         goto next_desc;
2900                 }
2901
2902                 if(unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
2903                         last_byte = *(skb->data + length - 1);
2904                         if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
2905                                       rx_desc->errors, length, last_byte)) {
2906                                 spin_lock_irqsave(&adapter->stats_lock, flags);
2907                                 e1000_tbi_adjust_stats(&adapter->hw,
2908                                                        &adapter->stats,
2909                                                        length, skb->data);
2910                                 spin_unlock_irqrestore(&adapter->stats_lock,
2911                                                        flags);
2912                                 length--;
2913                         } else {
2914                                 dev_kfree_skb_irq(skb);
2915                                 goto next_desc;
2916                         }
2917                 }
2918
2919                 /* Good Receive */
2920                 skb_put(skb, length - ETHERNET_FCS_SIZE);
2921
2922                 /* Receive Checksum Offload */
2923                 e1000_rx_checksum(adapter,
2924                                   (uint32_t)(rx_desc->status) |
2925                                   ((uint32_t)(rx_desc->errors) << 24),
2926                                   rx_desc->csum, skb);
2927                 skb->protocol = eth_type_trans(skb, netdev);
2928 #ifdef CONFIG_E1000_NAPI
2929                 if(unlikely(adapter->vlgrp &&
2930                             (rx_desc->status & E1000_RXD_STAT_VP))) {
2931                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2932                                                  le16_to_cpu(rx_desc->special) &
2933                                                  E1000_RXD_SPC_VLAN_MASK);
2934                 } else {
2935                         netif_receive_skb(skb);
2936                 }
2937 #else /* CONFIG_E1000_NAPI */
2938                 if(unlikely(adapter->vlgrp &&
2939                             (rx_desc->status & E1000_RXD_STAT_VP))) {
2940                         vlan_hwaccel_rx(skb, adapter->vlgrp,
2941                                         le16_to_cpu(rx_desc->special) &
2942                                         E1000_RXD_SPC_VLAN_MASK);
2943                 } else {
2944                         netif_rx(skb);
2945                 }
2946 #endif /* CONFIG_E1000_NAPI */
2947                 netdev->last_rx = jiffies;
2948
2949 next_desc:
2950                 rx_desc->status = 0;
2951                 buffer_info->skb = NULL;
2952                 if(unlikely(++i == rx_ring->count)) i = 0;
2953
2954                 rx_desc = E1000_RX_DESC(*rx_ring, i);
2955         }
2956         rx_ring->next_to_clean = i;
2957         adapter->alloc_rx_buf(adapter);
2958
2959         return cleaned;
2960 }
2961
2962 /**
2963  * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
2964  * @adapter: board private structure
2965  **/
2966
2967 static boolean_t
2968 #ifdef CONFIG_E1000_NAPI
2969 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, int *work_done,
2970                       int work_to_do)
2971 #else
2972 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter)
2973 #endif
2974 {
2975         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2976         union e1000_rx_desc_packet_split *rx_desc;
2977         struct net_device *netdev = adapter->netdev;
2978         struct pci_dev *pdev = adapter->pdev;
2979         struct e1000_buffer *buffer_info;
2980         struct e1000_ps_page *ps_page;
2981         struct e1000_ps_page_dma *ps_page_dma;
2982         struct sk_buff *skb;
2983         unsigned int i, j;
2984         uint32_t length, staterr;
2985         boolean_t cleaned = FALSE;
2986
2987         i = rx_ring->next_to_clean;
2988         rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
2989         staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
2990
2991         while(staterr & E1000_RXD_STAT_DD) {
2992                 buffer_info = &rx_ring->buffer_info[i];
2993                 ps_page = &rx_ring->ps_page[i];
2994                 ps_page_dma = &rx_ring->ps_page_dma[i];
2995 #ifdef CONFIG_E1000_NAPI
2996                 if(unlikely(*work_done >= work_to_do))
2997                         break;
2998                 (*work_done)++;
2999 #endif
3000                 cleaned = TRUE;
3001                 pci_unmap_single(pdev, buffer_info->dma,
3002                                  buffer_info->length,
3003                                  PCI_DMA_FROMDEVICE);
3004
3005                 skb = buffer_info->skb;
3006
3007                 if(unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
3008                         E1000_DBG("%s: Packet Split buffers didn't pick up"
3009                                   " the full packet\n", netdev->name);
3010                         dev_kfree_skb_irq(skb);
3011                         goto next_desc;
3012                 }
3013
3014                 if(unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
3015                         dev_kfree_skb_irq(skb);
3016                         goto next_desc;
3017                 }
3018
3019                 length = le16_to_cpu(rx_desc->wb.middle.length0);
3020
3021                 if(unlikely(!length)) {
3022                         E1000_DBG("%s: Last part of the packet spanning"
3023                                   " multiple descriptors\n", netdev->name);
3024                         dev_kfree_skb_irq(skb);
3025                         goto next_desc;
3026                 }
3027
3028                 /* Good Receive */
3029                 skb_put(skb, length);
3030
3031                 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
3032                         if(!(length = le16_to_cpu(rx_desc->wb.upper.length[j])))
3033                                 break;
3034
3035                         pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
3036                                         PAGE_SIZE, PCI_DMA_FROMDEVICE);
3037                         ps_page_dma->ps_page_dma[j] = 0;
3038                         skb_shinfo(skb)->frags[j].page =
3039                                 ps_page->ps_page[j];
3040                         ps_page->ps_page[j] = NULL;
3041                         skb_shinfo(skb)->frags[j].page_offset = 0;
3042                         skb_shinfo(skb)->frags[j].size = length;
3043                         skb_shinfo(skb)->nr_frags++;
3044                         skb->len += length;
3045                         skb->data_len += length;
3046                 }
3047
3048                 e1000_rx_checksum(adapter, staterr,
3049                                   rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
3050                 skb->protocol = eth_type_trans(skb, netdev);
3051
3052 #ifdef HAVE_RX_ZERO_COPY
3053                 if(likely(rx_desc->wb.upper.header_status &
3054                           E1000_RXDPS_HDRSTAT_HDRSP))
3055                         skb_shinfo(skb)->zero_copy = TRUE;
3056 #endif
3057 #ifdef CONFIG_E1000_NAPI
3058                 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3059                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3060                                 le16_to_cpu(rx_desc->wb.middle.vlan) &
3061                                 E1000_RXD_SPC_VLAN_MASK);
3062                 } else {
3063                         netif_receive_skb(skb);
3064                 }
3065 #else /* CONFIG_E1000_NAPI */
3066                 if(unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
3067                         vlan_hwaccel_rx(skb, adapter->vlgrp,
3068                                 le16_to_cpu(rx_desc->wb.middle.vlan) &
3069                                 E1000_RXD_SPC_VLAN_MASK);
3070                 } else {
3071                         netif_rx(skb);
3072                 }
3073 #endif /* CONFIG_E1000_NAPI */
3074                 netdev->last_rx = jiffies;
3075
3076 next_desc:
3077                 rx_desc->wb.middle.status_error &= ~0xFF;
3078                 buffer_info->skb = NULL;
3079                 if(unlikely(++i == rx_ring->count)) i = 0;
3080
3081                 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3082                 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
3083         }
3084         rx_ring->next_to_clean = i;
3085         adapter->alloc_rx_buf(adapter);
3086
3087         return cleaned;
3088 }
3089
3090 /**
3091  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
3092  * @adapter: address of board private structure
3093  **/
3094
3095 static void
3096 e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
3097 {
3098         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3099         struct net_device *netdev = adapter->netdev;
3100         struct pci_dev *pdev = adapter->pdev;
3101         struct e1000_rx_desc *rx_desc;
3102         struct e1000_buffer *buffer_info;
3103         struct sk_buff *skb;
3104         unsigned int i;
3105         unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
3106
3107         i = rx_ring->next_to_use;
3108         buffer_info = &rx_ring->buffer_info[i];
3109
3110         while(!buffer_info->skb) {
3111                 skb = dev_alloc_skb(bufsz);
3112
3113                 if(unlikely(!skb)) {
3114                         /* Better luck next round */
3115                         break;
3116                 }
3117
3118                 /* Fix for errata 23, can't cross 64kB boundary */
3119                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
3120                         struct sk_buff *oldskb = skb;
3121                         DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
3122                                              "at %p\n", bufsz, skb->data);
3123                         /* Try again, without freeing the previous */
3124                         skb = dev_alloc_skb(bufsz);
3125                         /* Failed allocation, critical failure */
3126                         if (!skb) {
3127                                 dev_kfree_skb(oldskb);
3128                                 break;
3129                         }
3130
3131                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
3132                                 /* give up */
3133                                 dev_kfree_skb(skb);
3134                                 dev_kfree_skb(oldskb);
3135                                 break; /* while !buffer_info->skb */
3136                         } else {
3137                                 /* Use new allocation */
3138                                 dev_kfree_skb(oldskb);
3139                         }
3140                 }
3141                 /* Make buffer alignment 2 beyond a 16 byte boundary
3142                  * this will result in a 16 byte aligned IP header after
3143                  * the 14 byte MAC header is removed
3144                  */
3145                 skb_reserve(skb, NET_IP_ALIGN);
3146
3147                 skb->dev = netdev;
3148
3149                 buffer_info->skb = skb;
3150                 buffer_info->length = adapter->rx_buffer_len;
3151                 buffer_info->dma = pci_map_single(pdev,
3152                                                   skb->data,
3153                                                   adapter->rx_buffer_len,
3154                                                   PCI_DMA_FROMDEVICE);
3155
3156                 /* Fix for errata 23, can't cross 64kB boundary */
3157                 if (!e1000_check_64k_bound(adapter,
3158                                         (void *)(unsigned long)buffer_info->dma,
3159                                         adapter->rx_buffer_len)) {
3160                         DPRINTK(RX_ERR, ERR,
3161                                 "dma align check failed: %u bytes at %p\n",
3162                                 adapter->rx_buffer_len,
3163                                 (void *)(unsigned long)buffer_info->dma);
3164                         dev_kfree_skb(skb);
3165                         buffer_info->skb = NULL;
3166
3167                         pci_unmap_single(pdev, buffer_info->dma,
3168                                          adapter->rx_buffer_len,
3169                                          PCI_DMA_FROMDEVICE);
3170
3171                         break; /* while !buffer_info->skb */
3172                 }
3173                 rx_desc = E1000_RX_DESC(*rx_ring, i);
3174                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3175
3176                 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
3177                         /* Force memory writes to complete before letting h/w
3178                          * know there are new descriptors to fetch.  (Only
3179                          * applicable for weak-ordered memory model archs,
3180                          * such as IA-64). */
3181                         wmb();
3182                         E1000_WRITE_REG(&adapter->hw, RDT, i);
3183                 }
3184
3185                 if(unlikely(++i == rx_ring->count)) i = 0;
3186                 buffer_info = &rx_ring->buffer_info[i];
3187         }
3188
3189         rx_ring->next_to_use = i;
3190 }
3191
3192 /**
3193  * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
3194  * @adapter: address of board private structure
3195  **/
3196
3197 static void
3198 e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter)
3199 {
3200         struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
3201         struct net_device *netdev = adapter->netdev;
3202         struct pci_dev *pdev = adapter->pdev;
3203         union e1000_rx_desc_packet_split *rx_desc;
3204         struct e1000_buffer *buffer_info;
3205         struct e1000_ps_page *ps_page;
3206         struct e1000_ps_page_dma *ps_page_dma;
3207         struct sk_buff *skb;
3208         unsigned int i, j;
3209
3210         i = rx_ring->next_to_use;
3211         buffer_info = &rx_ring->buffer_info[i];
3212         ps_page = &rx_ring->ps_page[i];
3213         ps_page_dma = &rx_ring->ps_page_dma[i];
3214
3215         while(!buffer_info->skb) {
3216                 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
3217
3218                 for(j = 0; j < PS_PAGE_BUFFERS; j++) {
3219                         if(unlikely(!ps_page->ps_page[j])) {
3220                                 ps_page->ps_page[j] =
3221                                         alloc_page(GFP_ATOMIC);
3222                                 if(unlikely(!ps_page->ps_page[j]))
3223                                         goto no_buffers;
3224                                 ps_page_dma->ps_page_dma[j] =
3225                                         pci_map_page(pdev,
3226                                                      ps_page->ps_page[j],
3227                                                      0, PAGE_SIZE,
3228                                                      PCI_DMA_FROMDEVICE);
3229                         }
3230                         /* Refresh the desc even if buffer_addrs didn't
3231                          * change because each write-back erases this info.
3232                          */
3233                         rx_desc->read.buffer_addr[j+1] =
3234                                 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
3235                 }
3236
3237                 skb = dev_alloc_skb(adapter->rx_ps_bsize0 + NET_IP_ALIGN);
3238
3239                 if(unlikely(!skb))
3240                         break;
3241
3242                 /* Make buffer alignment 2 beyond a 16 byte boundary
3243                  * this will result in a 16 byte aligned IP header after
3244                  * the 14 byte MAC header is removed
3245                  */
3246                 skb_reserve(skb, NET_IP_ALIGN);
3247
3248                 skb->dev = netdev;
3249
3250                 buffer_info->skb = skb;
3251                 buffer_info->length = adapter->rx_ps_bsize0;
3252                 buffer_info->dma = pci_map_single(pdev, skb->data,
3253                                                   adapter->rx_ps_bsize0,
3254                                                   PCI_DMA_FROMDEVICE);
3255
3256                 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
3257
3258                 if(unlikely((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i)) {
3259                         /* Force memory writes to complete before letting h/w
3260                          * know there are new descriptors to fetch.  (Only
3261                          * applicable for weak-ordered memory model archs,
3262                          * such as IA-64). */
3263                         wmb();
3264                         /* Hardware increments by 16 bytes, but packet split
3265                          * descriptors are 32 bytes...so we increment tail
3266                          * twice as much.
3267                          */
3268                         E1000_WRITE_REG(&adapter->hw, RDT, i<<1);
3269                 }
3270
3271                 if(unlikely(++i == rx_ring->count)) i = 0;
3272                 buffer_info = &rx_ring->buffer_info[i];
3273                 ps_page = &rx_ring->ps_page[i];
3274                 ps_page_dma = &rx_ring->ps_page_dma[i];
3275         }
3276
3277 no_buffers:
3278         rx_ring->next_to_use = i;
3279 }
3280
3281 /**
3282  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
3283  * @adapter:
3284  **/
3285
3286 static void
3287 e1000_smartspeed(struct e1000_adapter *adapter)
3288 {
3289         uint16_t phy_status;
3290         uint16_t phy_ctrl;
3291
3292         if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
3293            !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
3294                 return;
3295
3296         if(adapter->smartspeed == 0) {
3297                 /* If Master/Slave config fault is asserted twice,
3298                  * we assume back-to-back */
3299                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
3300                 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
3301                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
3302                 if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
3303                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
3304                 if(phy_ctrl & CR_1000T_MS_ENABLE) {
3305                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
3306                         e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
3307                                             phy_ctrl);
3308                         adapter->smartspeed++;
3309                         if(!e1000_phy_setup_autoneg(&adapter->hw) &&
3310                            !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
3311                                                &phy_ctrl)) {
3312                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
3313                                              MII_CR_RESTART_AUTO_NEG);
3314                                 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
3315                                                     phy_ctrl);
3316                         }
3317                 }
3318                 return;
3319         } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
3320                 /* If still no link, perhaps using 2/3 pair cable */
3321                 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
3322                 phy_ctrl |= CR_1000T_MS_ENABLE;
3323                 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
3324                 if(!e1000_phy_setup_autoneg(&adapter->hw) &&
3325                    !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
3326                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
3327                                      MII_CR_RESTART_AUTO_NEG);
3328                         e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
3329                 }
3330         }
3331         /* Restart process after E1000_SMARTSPEED_MAX iterations */
3332         if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
3333                 adapter->smartspeed = 0;
3334 }
3335
3336 /**
3337  * e1000_ioctl -
3338  * @netdev:
3339  * @ifreq:
3340  * @cmd:
3341  **/
3342
3343 static int
3344 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3345 {
3346         switch (cmd) {
3347         case SIOCGMIIPHY:
3348         case SIOCGMIIREG:
3349         case SIOCSMIIREG:
3350                 return e1000_mii_ioctl(netdev, ifr, cmd);
3351         default:
3352                 return -EOPNOTSUPP;
3353         }
3354 }
3355
3356 /**
3357  * e1000_mii_ioctl -
3358  * @netdev:
3359  * @ifreq:
3360  * @cmd:
3361  **/
3362
3363 static int
3364 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3365 {
3366         struct e1000_adapter *adapter = netdev_priv(netdev);
3367         struct mii_ioctl_data *data = if_mii(ifr);
3368         int retval;
3369         uint16_t mii_reg;
3370         uint16_t spddplx;
3371         unsigned long flags;
3372
3373         if(adapter->hw.media_type != e1000_media_type_copper)
3374                 return -EOPNOTSUPP;
3375
3376         switch (cmd) {
3377         case SIOCGMIIPHY:
3378                 data->phy_id = adapter->hw.phy_addr;
3379                 break;
3380         case SIOCGMIIREG:
3381                 if(!capable(CAP_NET_ADMIN))
3382                         return -EPERM;
3383                 spin_lock_irqsave(&adapter->stats_lock, flags);
3384                 if(e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
3385                                    &data->val_out)) {
3386                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3387                         return -EIO;
3388                 }
3389                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3390                 break;
3391         case SIOCSMIIREG:
3392                 if(!capable(CAP_NET_ADMIN))
3393                         return -EPERM;
3394                 if(data->reg_num & ~(0x1F))
3395                         return -EFAULT;
3396                 mii_reg = data->val_in;
3397                 spin_lock_irqsave(&adapter->stats_lock, flags);
3398                 if(e1000_write_phy_reg(&adapter->hw, data->reg_num,
3399                                         mii_reg)) {
3400                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3401                         return -EIO;
3402                 }
3403                 if(adapter->hw.phy_type == e1000_phy_m88) {
3404                         switch (data->reg_num) {
3405                         case PHY_CTRL:
3406                                 if(mii_reg & MII_CR_POWER_DOWN)
3407                                         break;
3408                                 if(mii_reg & MII_CR_AUTO_NEG_EN) {
3409                                         adapter->hw.autoneg = 1;
3410                                         adapter->hw.autoneg_advertised = 0x2F;
3411                                 } else {
3412                                         if (mii_reg & 0x40)
3413                                                 spddplx = SPEED_1000;
3414                                         else if (mii_reg & 0x2000)
3415                                                 spddplx = SPEED_100;
3416                                         else
3417                                                 spddplx = SPEED_10;
3418                                         spddplx += (mii_reg & 0x100)
3419                                                    ? FULL_DUPLEX :
3420                                                    HALF_DUPLEX;
3421                                         retval = e1000_set_spd_dplx(adapter,
3422                                                                     spddplx);
3423                                         if(retval) {
3424                                                 spin_unlock_irqrestore(
3425                                                         &adapter->stats_lock, 
3426                                                         flags);
3427                                                 return retval;
3428                                         }
3429                                 }
3430                                 if(netif_running(adapter->netdev)) {
3431                                         e1000_down(adapter);
3432                                         e1000_up(adapter);
3433                                 } else
3434                                         e1000_reset(adapter);
3435                                 break;
3436                         case M88E1000_PHY_SPEC_CTRL:
3437                         case M88E1000_EXT_PHY_SPEC_CTRL:
3438                                 if(e1000_phy_reset(&adapter->hw)) {
3439                                         spin_unlock_irqrestore(
3440                                                 &adapter->stats_lock, flags);
3441                                         return -EIO;
3442                                 }
3443                                 break;
3444                         }
3445                 } else {
3446                         switch (data->reg_num) {
3447                         case PHY_CTRL:
3448                                 if(mii_reg & MII_CR_POWER_DOWN)
3449                                         break;
3450                                 if(netif_running(adapter->netdev)) {
3451                                         e1000_down(adapter);
3452                                         e1000_up(adapter);
3453                                 } else
3454                                         e1000_reset(adapter);
3455                                 break;
3456                         }
3457                 }
3458                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3459                 break;
3460         default:
3461                 return -EOPNOTSUPP;
3462         }
3463         return E1000_SUCCESS;
3464 }
3465
3466 void
3467 e1000_pci_set_mwi(struct e1000_hw *hw)
3468 {
3469         struct e1000_adapter *adapter = hw->back;
3470         int ret_val = pci_set_mwi(adapter->pdev);
3471
3472         if(ret_val)
3473                 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
3474 }
3475
3476 void
3477 e1000_pci_clear_mwi(struct e1000_hw *hw)
3478 {
3479         struct e1000_adapter *adapter = hw->back;
3480
3481         pci_clear_mwi(adapter->pdev);
3482 }
3483
3484 void
3485 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
3486 {
3487         struct e1000_adapter *adapter = hw->back;
3488
3489         pci_read_config_word(adapter->pdev, reg, value);
3490 }
3491
3492 void
3493 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
3494 {
3495         struct e1000_adapter *adapter = hw->back;
3496
3497         pci_write_config_word(adapter->pdev, reg, *value);
3498 }
3499
3500 uint32_t
3501 e1000_io_read(struct e1000_hw *hw, unsigned long port)
3502 {
3503         return inl(port);
3504 }
3505
3506 void
3507 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
3508 {
3509         outl(value, port);
3510 }
3511
3512 static void
3513 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
3514 {
3515         struct e1000_adapter *adapter = netdev_priv(netdev);
3516         uint32_t ctrl, rctl;
3517
3518         e1000_irq_disable(adapter);
3519         adapter->vlgrp = grp;
3520
3521         if(grp) {
3522                 /* enable VLAN tag insert/strip */
3523                 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3524                 ctrl |= E1000_CTRL_VME;
3525                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3526
3527                 /* enable VLAN receive filtering */
3528                 rctl = E1000_READ_REG(&adapter->hw, RCTL);
3529                 rctl |= E1000_RCTL_VFE;
3530                 rctl &= ~E1000_RCTL_CFIEN;
3531                 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3532                 e1000_update_mng_vlan(adapter);
3533         } else {
3534                 /* disable VLAN tag insert/strip */
3535                 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3536                 ctrl &= ~E1000_CTRL_VME;
3537                 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3538
3539                 /* disable VLAN filtering */
3540                 rctl = E1000_READ_REG(&adapter->hw, RCTL);
3541                 rctl &= ~E1000_RCTL_VFE;
3542                 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3543                 if(adapter->mng_vlan_id != (uint16_t)E1000_MNG_VLAN_NONE) {
3544                         e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3545                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3546                 }
3547         }
3548
3549         e1000_irq_enable(adapter);
3550 }
3551
3552 static void
3553 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
3554 {
3555         struct e1000_adapter *adapter = netdev_priv(netdev);
3556         uint32_t vfta, index;
3557         if((adapter->hw.mng_cookie.status &
3558                 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
3559                 (vid == adapter->mng_vlan_id))
3560                 return;
3561         /* add VID to filter table */
3562         index = (vid >> 5) & 0x7F;
3563         vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
3564         vfta |= (1 << (vid & 0x1F));
3565         e1000_write_vfta(&adapter->hw, index, vfta);
3566 }
3567
3568 static void
3569 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
3570 {
3571         struct e1000_adapter *adapter = netdev_priv(netdev);
3572         uint32_t vfta, index;
3573
3574         e1000_irq_disable(adapter);
3575
3576         if(adapter->vlgrp)
3577                 adapter->vlgrp->vlan_devices[vid] = NULL;
3578
3579         e1000_irq_enable(adapter);
3580
3581         if((adapter->hw.mng_cookie.status &
3582                 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
3583                 (vid == adapter->mng_vlan_id))
3584                 return;
3585         /* remove VID from filter table */
3586         index = (vid >> 5) & 0x7F;
3587         vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
3588         vfta &= ~(1 << (vid & 0x1F));
3589         e1000_write_vfta(&adapter->hw, index, vfta);
3590 }
3591
3592 static void
3593 e1000_restore_vlan(struct e1000_adapter *adapter)
3594 {
3595         e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
3596
3597         if(adapter->vlgrp) {
3598                 uint16_t vid;
3599                 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
3600                         if(!adapter->vlgrp->vlan_devices[vid])
3601                                 continue;
3602                         e1000_vlan_rx_add_vid(adapter->netdev, vid);
3603                 }
3604         }
3605 }
3606
3607 int
3608 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
3609 {
3610         adapter->hw.autoneg = 0;
3611
3612         /* Fiber NICs only allow 1000 gbps Full duplex */
3613         if((adapter->hw.media_type == e1000_media_type_fiber) &&
3614                 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
3615                 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
3616                 return -EINVAL;
3617         }
3618
3619         switch(spddplx) {
3620         case SPEED_10 + DUPLEX_HALF:
3621                 adapter->hw.forced_speed_duplex = e1000_10_half;
3622                 break;
3623         case SPEED_10 + DUPLEX_FULL:
3624                 adapter->hw.forced_speed_duplex = e1000_10_full;
3625                 break;
3626         case SPEED_100 + DUPLEX_HALF:
3627                 adapter->hw.forced_speed_duplex = e1000_100_half;
3628                 break;
3629         case SPEED_100 + DUPLEX_FULL:
3630                 adapter->hw.forced_speed_duplex = e1000_100_full;
3631                 break;
3632         case SPEED_1000 + DUPLEX_FULL:
3633                 adapter->hw.autoneg = 1;
3634                 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
3635                 break;
3636         case SPEED_1000 + DUPLEX_HALF: /* not supported */
3637         default:
3638                 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
3639                 return -EINVAL;
3640         }
3641         return 0;
3642 }
3643
3644 static int
3645 e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3646 {
3647         struct net_device *netdev = pci_get_drvdata(pdev);
3648         struct e1000_adapter *adapter = netdev_priv(netdev);
3649         uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm;
3650         uint32_t wufc = adapter->wol;
3651
3652         netif_device_detach(netdev);
3653
3654         if(netif_running(netdev))
3655                 e1000_down(adapter);
3656
3657         status = E1000_READ_REG(&adapter->hw, STATUS);
3658         if(status & E1000_STATUS_LU)
3659                 wufc &= ~E1000_WUFC_LNKC;
3660
3661         if(wufc) {
3662                 e1000_setup_rctl(adapter);
3663                 e1000_set_multi(netdev);
3664
3665                 /* turn on all-multi mode if wake on multicast is enabled */
3666                 if(adapter->wol & E1000_WUFC_MC) {
3667                         rctl = E1000_READ_REG(&adapter->hw, RCTL);
3668                         rctl |= E1000_RCTL_MPE;
3669                         E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
3670                 }
3671
3672                 if(adapter->hw.mac_type >= e1000_82540) {
3673                         ctrl = E1000_READ_REG(&adapter->hw, CTRL);
3674                         /* advertise wake from D3Cold */
3675                         #define E1000_CTRL_ADVD3WUC 0x00100000
3676                         /* phy power management enable */
3677                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
3678                         ctrl |= E1000_CTRL_ADVD3WUC |
3679                                 E1000_CTRL_EN_PHY_PWR_MGMT;
3680                         E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
3681                 }
3682
3683                 if(adapter->hw.media_type == e1000_media_type_fiber ||
3684                    adapter->hw.media_type == e1000_media_type_internal_serdes) {
3685                         /* keep the laser running in D3 */
3686                         ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
3687                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
3688                         E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
3689                 }
3690
3691                 /* Allow time for pending master requests to run */
3692                 e1000_disable_pciex_master(&adapter->hw);
3693
3694                 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
3695                 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
3696                 pci_enable_wake(pdev, 3, 1);
3697                 pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
3698         } else {
3699                 E1000_WRITE_REG(&adapter->hw, WUC, 0);
3700                 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
3701                 pci_enable_wake(pdev, 3, 0);
3702                 pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
3703         }
3704
3705         pci_save_state(pdev);
3706
3707         if(adapter->hw.mac_type >= e1000_82540 &&
3708            adapter->hw.media_type == e1000_media_type_copper) {
3709                 manc = E1000_READ_REG(&adapter->hw, MANC);
3710                 if(manc & E1000_MANC_SMBUS_EN) {
3711                         manc |= E1000_MANC_ARP_EN;
3712                         E1000_WRITE_REG(&adapter->hw, MANC, manc);
3713                         pci_enable_wake(pdev, 3, 1);
3714                         pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
3715                 }
3716         }
3717
3718         switch(adapter->hw.mac_type) {
3719         case e1000_82573:
3720                 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3721                 E1000_WRITE_REG(&adapter->hw, SWSM,
3722                                 swsm & ~E1000_SWSM_DRV_LOAD);
3723                 break;
3724         default:
3725                 break;
3726         }
3727
3728         pci_disable_device(pdev);
3729         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3730
3731         return 0;
3732 }
3733
3734 #ifdef CONFIG_PM
3735 static int
3736 e1000_resume(struct pci_dev *pdev)
3737 {
3738         struct net_device *netdev = pci_get_drvdata(pdev);
3739         struct e1000_adapter *adapter = netdev_priv(netdev);
3740         uint32_t manc, ret_val, swsm;
3741
3742         pci_set_power_state(pdev, PCI_D0);
3743         pci_restore_state(pdev);
3744         ret_val = pci_enable_device(pdev);
3745         pci_set_master(pdev);
3746
3747         pci_enable_wake(pdev, PCI_D3hot, 0);
3748         pci_enable_wake(pdev, PCI_D3cold, 0);
3749
3750         e1000_reset(adapter);
3751         E1000_WRITE_REG(&adapter->hw, WUS, ~0);
3752
3753         if(netif_running(netdev))
3754                 e1000_up(adapter);
3755
3756         netif_device_attach(netdev);
3757
3758         if(adapter->hw.mac_type >= e1000_82540 &&
3759            adapter->hw.media_type == e1000_media_type_copper) {
3760                 manc = E1000_READ_REG(&adapter->hw, MANC);
3761                 manc &= ~(E1000_MANC_ARP_EN);
3762                 E1000_WRITE_REG(&adapter->hw, MANC, manc);
3763         }
3764
3765         switch(adapter->hw.mac_type) {
3766         case e1000_82573:
3767                 swsm = E1000_READ_REG(&adapter->hw, SWSM);
3768                 E1000_WRITE_REG(&adapter->hw, SWSM,
3769                                 swsm | E1000_SWSM_DRV_LOAD);
3770                 break;
3771         default:
3772                 break;
3773         }
3774
3775         return 0;
3776 }
3777 #endif
3778 #ifdef CONFIG_NET_POLL_CONTROLLER
3779 /*
3780  * Polling 'interrupt' - used by things like netconsole to send skbs
3781  * without having to re-enable interrupts. It's not called while
3782  * the interrupt routine is executing.
3783  */
3784 static void
3785 e1000_netpoll(struct net_device *netdev)
3786 {
3787         struct e1000_adapter *adapter = netdev_priv(netdev);
3788         disable_irq(adapter->pdev->irq);
3789         e1000_intr(adapter->pdev->irq, netdev, NULL);
3790         e1000_clean_tx_irq(adapter);
3791         enable_irq(adapter->pdev->irq);
3792 }
3793 #endif
3794
3795 /* e1000_main.c */