Merge branches 'release', 'misc' and 'misc-2.6.25' into release
[linux-2.6] / drivers / net / igb / igb_ethtool.c
1 /*******************************************************************************
2
3   Intel(R) Gigabit Ethernet Linux driver
4   Copyright(c) 2007 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 /* ethtool support for igb */
29
30 #include <linux/vmalloc.h>
31 #include <linux/netdevice.h>
32 #include <linux/pci.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/if_ether.h>
36 #include <linux/ethtool.h>
37
38 #include "igb.h"
39
40 struct igb_stats {
41         char stat_string[ETH_GSTRING_LEN];
42         int sizeof_stat;
43         int stat_offset;
44 };
45
46 #define IGB_STAT(m) sizeof(((struct igb_adapter *)0)->m), \
47                       offsetof(struct igb_adapter, m)
48 static const struct igb_stats igb_gstrings_stats[] = {
49         { "rx_packets", IGB_STAT(stats.gprc) },
50         { "tx_packets", IGB_STAT(stats.gptc) },
51         { "rx_bytes", IGB_STAT(stats.gorc) },
52         { "tx_bytes", IGB_STAT(stats.gotc) },
53         { "rx_broadcast", IGB_STAT(stats.bprc) },
54         { "tx_broadcast", IGB_STAT(stats.bptc) },
55         { "rx_multicast", IGB_STAT(stats.mprc) },
56         { "tx_multicast", IGB_STAT(stats.mptc) },
57         { "rx_errors", IGB_STAT(net_stats.rx_errors) },
58         { "tx_errors", IGB_STAT(net_stats.tx_errors) },
59         { "tx_dropped", IGB_STAT(net_stats.tx_dropped) },
60         { "multicast", IGB_STAT(stats.mprc) },
61         { "collisions", IGB_STAT(stats.colc) },
62         { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) },
63         { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) },
64         { "rx_crc_errors", IGB_STAT(stats.crcerrs) },
65         { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) },
66         { "rx_no_buffer_count", IGB_STAT(stats.rnbc) },
67         { "rx_missed_errors", IGB_STAT(stats.mpc) },
68         { "tx_aborted_errors", IGB_STAT(stats.ecol) },
69         { "tx_carrier_errors", IGB_STAT(stats.tncrs) },
70         { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) },
71         { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) },
72         { "tx_window_errors", IGB_STAT(stats.latecol) },
73         { "tx_abort_late_coll", IGB_STAT(stats.latecol) },
74         { "tx_deferred_ok", IGB_STAT(stats.dc) },
75         { "tx_single_coll_ok", IGB_STAT(stats.scc) },
76         { "tx_multi_coll_ok", IGB_STAT(stats.mcc) },
77         { "tx_timeout_count", IGB_STAT(tx_timeout_count) },
78         { "tx_restart_queue", IGB_STAT(restart_queue) },
79         { "rx_long_length_errors", IGB_STAT(stats.roc) },
80         { "rx_short_length_errors", IGB_STAT(stats.ruc) },
81         { "rx_align_errors", IGB_STAT(stats.algnerrc) },
82         { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) },
83         { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) },
84         { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) },
85         { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) },
86         { "tx_flow_control_xon", IGB_STAT(stats.xontxc) },
87         { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) },
88         { "rx_long_byte_count", IGB_STAT(stats.gorc) },
89         { "rx_csum_offload_good", IGB_STAT(hw_csum_good) },
90         { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) },
91         { "rx_header_split", IGB_STAT(rx_hdr_split) },
92         { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) },
93         { "tx_smbus", IGB_STAT(stats.mgptc) },
94         { "rx_smbus", IGB_STAT(stats.mgprc) },
95         { "dropped_smbus", IGB_STAT(stats.mgpdc) },
96 };
97
98 #define IGB_QUEUE_STATS_LEN \
99         ((((((struct igb_adapter *)netdev->priv)->num_rx_queues > 1) ? \
100           ((struct igb_adapter *)netdev->priv)->num_rx_queues : 0) + \
101          (((((struct igb_adapter *)netdev->priv)->num_tx_queues > 1) ? \
102           ((struct igb_adapter *)netdev->priv)->num_tx_queues : 0))) * \
103         (sizeof(struct igb_queue_stats) / sizeof(u64)))
104 #define IGB_GLOBAL_STATS_LEN    \
105         sizeof(igb_gstrings_stats) / sizeof(struct igb_stats)
106 #define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN)
107 static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
108         "Register test  (offline)", "Eeprom test    (offline)",
109         "Interrupt test (offline)", "Loopback test  (offline)",
110         "Link test   (on/offline)"
111 };
112 #define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN
113
114 static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
115 {
116         struct igb_adapter *adapter = netdev_priv(netdev);
117         struct e1000_hw *hw = &adapter->hw;
118
119         if (hw->phy.media_type == e1000_media_type_copper) {
120
121                 ecmd->supported = (SUPPORTED_10baseT_Half |
122                                    SUPPORTED_10baseT_Full |
123                                    SUPPORTED_100baseT_Half |
124                                    SUPPORTED_100baseT_Full |
125                                    SUPPORTED_1000baseT_Full|
126                                    SUPPORTED_Autoneg |
127                                    SUPPORTED_TP);
128                 ecmd->advertising = ADVERTISED_TP;
129
130                 if (hw->mac.autoneg == 1) {
131                         ecmd->advertising |= ADVERTISED_Autoneg;
132                         /* the e1000 autoneg seems to match ethtool nicely */
133                         ecmd->advertising |= hw->phy.autoneg_advertised;
134                 }
135
136                 ecmd->port = PORT_TP;
137                 ecmd->phy_address = hw->phy.addr;
138         } else {
139                 ecmd->supported   = (SUPPORTED_1000baseT_Full |
140                                      SUPPORTED_FIBRE |
141                                      SUPPORTED_Autoneg);
142
143                 ecmd->advertising = (ADVERTISED_1000baseT_Full |
144                                      ADVERTISED_FIBRE |
145                                      ADVERTISED_Autoneg);
146
147                 ecmd->port = PORT_FIBRE;
148         }
149
150         ecmd->transceiver = XCVR_INTERNAL;
151
152         if (rd32(E1000_STATUS) & E1000_STATUS_LU) {
153
154                 adapter->hw.mac.ops.get_speed_and_duplex(hw,
155                                         &adapter->link_speed,
156                                         &adapter->link_duplex);
157                 ecmd->speed = adapter->link_speed;
158
159                 /* unfortunately FULL_DUPLEX != DUPLEX_FULL
160                  *          and HALF_DUPLEX != DUPLEX_HALF */
161
162                 if (adapter->link_duplex == FULL_DUPLEX)
163                         ecmd->duplex = DUPLEX_FULL;
164                 else
165                         ecmd->duplex = DUPLEX_HALF;
166         } else {
167                 ecmd->speed = -1;
168                 ecmd->duplex = -1;
169         }
170
171         ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
172                          hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
173         return 0;
174 }
175
176 static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
177 {
178         struct igb_adapter *adapter = netdev_priv(netdev);
179         struct e1000_hw *hw = &adapter->hw;
180
181         /* When SoL/IDER sessions are active, autoneg/speed/duplex
182          * cannot be changed */
183         if (igb_check_reset_block(hw)) {
184                 dev_err(&adapter->pdev->dev, "Cannot change link "
185                         "characteristics when SoL/IDER is active.\n");
186                 return -EINVAL;
187         }
188
189         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
190                 msleep(1);
191
192         if (ecmd->autoneg == AUTONEG_ENABLE) {
193                 hw->mac.autoneg = 1;
194                 if (hw->phy.media_type == e1000_media_type_fiber)
195                         hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
196                                                      ADVERTISED_FIBRE |
197                                                      ADVERTISED_Autoneg;
198                 else
199                         hw->phy.autoneg_advertised = ecmd->advertising |
200                                                      ADVERTISED_TP |
201                                                      ADVERTISED_Autoneg;
202                 ecmd->advertising = hw->phy.autoneg_advertised;
203         } else
204                 if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
205                         clear_bit(__IGB_RESETTING, &adapter->state);
206                         return -EINVAL;
207                 }
208
209         /* reset the link */
210
211         if (netif_running(adapter->netdev)) {
212                 igb_down(adapter);
213                 igb_up(adapter);
214         } else
215                 igb_reset(adapter);
216
217         clear_bit(__IGB_RESETTING, &adapter->state);
218         return 0;
219 }
220
221 static void igb_get_pauseparam(struct net_device *netdev,
222                                struct ethtool_pauseparam *pause)
223 {
224         struct igb_adapter *adapter = netdev_priv(netdev);
225         struct e1000_hw *hw = &adapter->hw;
226
227         pause->autoneg =
228                 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
229
230         if (hw->fc.type == e1000_fc_rx_pause)
231                 pause->rx_pause = 1;
232         else if (hw->fc.type == e1000_fc_tx_pause)
233                 pause->tx_pause = 1;
234         else if (hw->fc.type == e1000_fc_full) {
235                 pause->rx_pause = 1;
236                 pause->tx_pause = 1;
237         }
238 }
239
240 static int igb_set_pauseparam(struct net_device *netdev,
241                               struct ethtool_pauseparam *pause)
242 {
243         struct igb_adapter *adapter = netdev_priv(netdev);
244         struct e1000_hw *hw = &adapter->hw;
245         int retval = 0;
246
247         adapter->fc_autoneg = pause->autoneg;
248
249         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
250                 msleep(1);
251
252         if (pause->rx_pause && pause->tx_pause)
253                 hw->fc.type = e1000_fc_full;
254         else if (pause->rx_pause && !pause->tx_pause)
255                 hw->fc.type = e1000_fc_rx_pause;
256         else if (!pause->rx_pause && pause->tx_pause)
257                 hw->fc.type = e1000_fc_tx_pause;
258         else if (!pause->rx_pause && !pause->tx_pause)
259                 hw->fc.type = e1000_fc_none;
260
261         hw->fc.original_type = hw->fc.type;
262
263         if (adapter->fc_autoneg == AUTONEG_ENABLE) {
264                 if (netif_running(adapter->netdev)) {
265                         igb_down(adapter);
266                         igb_up(adapter);
267                 } else
268                         igb_reset(adapter);
269         } else
270                 retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
271                           igb_setup_link(hw) : igb_force_mac_fc(hw));
272
273         clear_bit(__IGB_RESETTING, &adapter->state);
274         return retval;
275 }
276
277 static u32 igb_get_rx_csum(struct net_device *netdev)
278 {
279         struct igb_adapter *adapter = netdev_priv(netdev);
280         return adapter->rx_csum;
281 }
282
283 static int igb_set_rx_csum(struct net_device *netdev, u32 data)
284 {
285         struct igb_adapter *adapter = netdev_priv(netdev);
286         adapter->rx_csum = data;
287
288         return 0;
289 }
290
291 static u32 igb_get_tx_csum(struct net_device *netdev)
292 {
293         return (netdev->features & NETIF_F_HW_CSUM) != 0;
294 }
295
296 static int igb_set_tx_csum(struct net_device *netdev, u32 data)
297 {
298         if (data)
299                 netdev->features |= NETIF_F_HW_CSUM;
300         else
301                 netdev->features &= ~NETIF_F_HW_CSUM;
302
303         return 0;
304 }
305
306 static int igb_set_tso(struct net_device *netdev, u32 data)
307 {
308         struct igb_adapter *adapter = netdev_priv(netdev);
309
310         if (data)
311                 netdev->features |= NETIF_F_TSO;
312         else
313                 netdev->features &= ~NETIF_F_TSO;
314
315         if (data)
316                 netdev->features |= NETIF_F_TSO6;
317         else
318                 netdev->features &= ~NETIF_F_TSO6;
319
320         dev_info(&adapter->pdev->dev, "TSO is %s\n",
321                  data ? "Enabled" : "Disabled");
322         return 0;
323 }
324
325 static u32 igb_get_msglevel(struct net_device *netdev)
326 {
327         struct igb_adapter *adapter = netdev_priv(netdev);
328         return adapter->msg_enable;
329 }
330
331 static void igb_set_msglevel(struct net_device *netdev, u32 data)
332 {
333         struct igb_adapter *adapter = netdev_priv(netdev);
334         adapter->msg_enable = data;
335 }
336
337 static int igb_get_regs_len(struct net_device *netdev)
338 {
339 #define IGB_REGS_LEN 551
340         return IGB_REGS_LEN * sizeof(u32);
341 }
342
343 static void igb_get_regs(struct net_device *netdev,
344                          struct ethtool_regs *regs, void *p)
345 {
346         struct igb_adapter *adapter = netdev_priv(netdev);
347         struct e1000_hw *hw = &adapter->hw;
348         u32 *regs_buff = p;
349         u8 i;
350
351         memset(p, 0, IGB_REGS_LEN * sizeof(u32));
352
353         regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
354
355         /* General Registers */
356         regs_buff[0] = rd32(E1000_CTRL);
357         regs_buff[1] = rd32(E1000_STATUS);
358         regs_buff[2] = rd32(E1000_CTRL_EXT);
359         regs_buff[3] = rd32(E1000_MDIC);
360         regs_buff[4] = rd32(E1000_SCTL);
361         regs_buff[5] = rd32(E1000_CONNSW);
362         regs_buff[6] = rd32(E1000_VET);
363         regs_buff[7] = rd32(E1000_LEDCTL);
364         regs_buff[8] = rd32(E1000_PBA);
365         regs_buff[9] = rd32(E1000_PBS);
366         regs_buff[10] = rd32(E1000_FRTIMER);
367         regs_buff[11] = rd32(E1000_TCPTIMER);
368
369         /* NVM Register */
370         regs_buff[12] = rd32(E1000_EECD);
371
372         /* Interrupt */
373         regs_buff[13] = rd32(E1000_EICR);
374         regs_buff[14] = rd32(E1000_EICS);
375         regs_buff[15] = rd32(E1000_EIMS);
376         regs_buff[16] = rd32(E1000_EIMC);
377         regs_buff[17] = rd32(E1000_EIAC);
378         regs_buff[18] = rd32(E1000_EIAM);
379         regs_buff[19] = rd32(E1000_ICR);
380         regs_buff[20] = rd32(E1000_ICS);
381         regs_buff[21] = rd32(E1000_IMS);
382         regs_buff[22] = rd32(E1000_IMC);
383         regs_buff[23] = rd32(E1000_IAC);
384         regs_buff[24] = rd32(E1000_IAM);
385         regs_buff[25] = rd32(E1000_IMIRVP);
386
387         /* Flow Control */
388         regs_buff[26] = rd32(E1000_FCAL);
389         regs_buff[27] = rd32(E1000_FCAH);
390         regs_buff[28] = rd32(E1000_FCTTV);
391         regs_buff[29] = rd32(E1000_FCRTL);
392         regs_buff[30] = rd32(E1000_FCRTH);
393         regs_buff[31] = rd32(E1000_FCRTV);
394
395         /* Receive */
396         regs_buff[32] = rd32(E1000_RCTL);
397         regs_buff[33] = rd32(E1000_RXCSUM);
398         regs_buff[34] = rd32(E1000_RLPML);
399         regs_buff[35] = rd32(E1000_RFCTL);
400         regs_buff[36] = rd32(E1000_MRQC);
401         regs_buff[37] = rd32(E1000_VMD_CTL);
402
403         /* Transmit */
404         regs_buff[38] = rd32(E1000_TCTL);
405         regs_buff[39] = rd32(E1000_TCTL_EXT);
406         regs_buff[40] = rd32(E1000_TIPG);
407         regs_buff[41] = rd32(E1000_DTXCTL);
408
409         /* Wake Up */
410         regs_buff[42] = rd32(E1000_WUC);
411         regs_buff[43] = rd32(E1000_WUFC);
412         regs_buff[44] = rd32(E1000_WUS);
413         regs_buff[45] = rd32(E1000_IPAV);
414         regs_buff[46] = rd32(E1000_WUPL);
415
416         /* MAC */
417         regs_buff[47] = rd32(E1000_PCS_CFG0);
418         regs_buff[48] = rd32(E1000_PCS_LCTL);
419         regs_buff[49] = rd32(E1000_PCS_LSTAT);
420         regs_buff[50] = rd32(E1000_PCS_ANADV);
421         regs_buff[51] = rd32(E1000_PCS_LPAB);
422         regs_buff[52] = rd32(E1000_PCS_NPTX);
423         regs_buff[53] = rd32(E1000_PCS_LPABNP);
424
425         /* Statistics */
426         regs_buff[54] = adapter->stats.crcerrs;
427         regs_buff[55] = adapter->stats.algnerrc;
428         regs_buff[56] = adapter->stats.symerrs;
429         regs_buff[57] = adapter->stats.rxerrc;
430         regs_buff[58] = adapter->stats.mpc;
431         regs_buff[59] = adapter->stats.scc;
432         regs_buff[60] = adapter->stats.ecol;
433         regs_buff[61] = adapter->stats.mcc;
434         regs_buff[62] = adapter->stats.latecol;
435         regs_buff[63] = adapter->stats.colc;
436         regs_buff[64] = adapter->stats.dc;
437         regs_buff[65] = adapter->stats.tncrs;
438         regs_buff[66] = adapter->stats.sec;
439         regs_buff[67] = adapter->stats.htdpmc;
440         regs_buff[68] = adapter->stats.rlec;
441         regs_buff[69] = adapter->stats.xonrxc;
442         regs_buff[70] = adapter->stats.xontxc;
443         regs_buff[71] = adapter->stats.xoffrxc;
444         regs_buff[72] = adapter->stats.xofftxc;
445         regs_buff[73] = adapter->stats.fcruc;
446         regs_buff[74] = adapter->stats.prc64;
447         regs_buff[75] = adapter->stats.prc127;
448         regs_buff[76] = adapter->stats.prc255;
449         regs_buff[77] = adapter->stats.prc511;
450         regs_buff[78] = adapter->stats.prc1023;
451         regs_buff[79] = adapter->stats.prc1522;
452         regs_buff[80] = adapter->stats.gprc;
453         regs_buff[81] = adapter->stats.bprc;
454         regs_buff[82] = adapter->stats.mprc;
455         regs_buff[83] = adapter->stats.gptc;
456         regs_buff[84] = adapter->stats.gorc;
457         regs_buff[86] = adapter->stats.gotc;
458         regs_buff[88] = adapter->stats.rnbc;
459         regs_buff[89] = adapter->stats.ruc;
460         regs_buff[90] = adapter->stats.rfc;
461         regs_buff[91] = adapter->stats.roc;
462         regs_buff[92] = adapter->stats.rjc;
463         regs_buff[93] = adapter->stats.mgprc;
464         regs_buff[94] = adapter->stats.mgpdc;
465         regs_buff[95] = adapter->stats.mgptc;
466         regs_buff[96] = adapter->stats.tor;
467         regs_buff[98] = adapter->stats.tot;
468         regs_buff[100] = adapter->stats.tpr;
469         regs_buff[101] = adapter->stats.tpt;
470         regs_buff[102] = adapter->stats.ptc64;
471         regs_buff[103] = adapter->stats.ptc127;
472         regs_buff[104] = adapter->stats.ptc255;
473         regs_buff[105] = adapter->stats.ptc511;
474         regs_buff[106] = adapter->stats.ptc1023;
475         regs_buff[107] = adapter->stats.ptc1522;
476         regs_buff[108] = adapter->stats.mptc;
477         regs_buff[109] = adapter->stats.bptc;
478         regs_buff[110] = adapter->stats.tsctc;
479         regs_buff[111] = adapter->stats.iac;
480         regs_buff[112] = adapter->stats.rpthc;
481         regs_buff[113] = adapter->stats.hgptc;
482         regs_buff[114] = adapter->stats.hgorc;
483         regs_buff[116] = adapter->stats.hgotc;
484         regs_buff[118] = adapter->stats.lenerrs;
485         regs_buff[119] = adapter->stats.scvpc;
486         regs_buff[120] = adapter->stats.hrmpc;
487
488         /* These should probably be added to e1000_regs.h instead */
489         #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
490         #define E1000_RAL(_i)         (0x05400 + ((_i) * 8))
491         #define E1000_RAH(_i)         (0x05404 + ((_i) * 8))
492         #define E1000_IP4AT_REG(_i)   (0x05840 + ((_i) * 8))
493         #define E1000_IP6AT_REG(_i)   (0x05880 + ((_i) * 4))
494         #define E1000_WUPM_REG(_i)    (0x05A00 + ((_i) * 4))
495         #define E1000_FFMT_REG(_i)    (0x09000 + ((_i) * 8))
496         #define E1000_FFVT_REG(_i)    (0x09800 + ((_i) * 8))
497         #define E1000_FFLT_REG(_i)    (0x05F00 + ((_i) * 8))
498
499         for (i = 0; i < 4; i++)
500                 regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
501         for (i = 0; i < 4; i++)
502                 regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i));
503         for (i = 0; i < 4; i++)
504                 regs_buff[129 + i] = rd32(E1000_RDBAL(i));
505         for (i = 0; i < 4; i++)
506                 regs_buff[133 + i] = rd32(E1000_RDBAH(i));
507         for (i = 0; i < 4; i++)
508                 regs_buff[137 + i] = rd32(E1000_RDLEN(i));
509         for (i = 0; i < 4; i++)
510                 regs_buff[141 + i] = rd32(E1000_RDH(i));
511         for (i = 0; i < 4; i++)
512                 regs_buff[145 + i] = rd32(E1000_RDT(i));
513         for (i = 0; i < 4; i++)
514                 regs_buff[149 + i] = rd32(E1000_RXDCTL(i));
515
516         for (i = 0; i < 10; i++)
517                 regs_buff[153 + i] = rd32(E1000_EITR(i));
518         for (i = 0; i < 8; i++)
519                 regs_buff[163 + i] = rd32(E1000_IMIR(i));
520         for (i = 0; i < 8; i++)
521                 regs_buff[171 + i] = rd32(E1000_IMIREXT(i));
522         for (i = 0; i < 16; i++)
523                 regs_buff[179 + i] = rd32(E1000_RAL(i));
524         for (i = 0; i < 16; i++)
525                 regs_buff[195 + i] = rd32(E1000_RAH(i));
526
527         for (i = 0; i < 4; i++)
528                 regs_buff[211 + i] = rd32(E1000_TDBAL(i));
529         for (i = 0; i < 4; i++)
530                 regs_buff[215 + i] = rd32(E1000_TDBAH(i));
531         for (i = 0; i < 4; i++)
532                 regs_buff[219 + i] = rd32(E1000_TDLEN(i));
533         for (i = 0; i < 4; i++)
534                 regs_buff[223 + i] = rd32(E1000_TDH(i));
535         for (i = 0; i < 4; i++)
536                 regs_buff[227 + i] = rd32(E1000_TDT(i));
537         for (i = 0; i < 4; i++)
538                 regs_buff[231 + i] = rd32(E1000_TXDCTL(i));
539         for (i = 0; i < 4; i++)
540                 regs_buff[235 + i] = rd32(E1000_TDWBAL(i));
541         for (i = 0; i < 4; i++)
542                 regs_buff[239 + i] = rd32(E1000_TDWBAH(i));
543         for (i = 0; i < 4; i++)
544                 regs_buff[243 + i] = rd32(E1000_DCA_TXCTRL(i));
545
546         for (i = 0; i < 4; i++)
547                 regs_buff[247 + i] = rd32(E1000_IP4AT_REG(i));
548         for (i = 0; i < 4; i++)
549                 regs_buff[251 + i] = rd32(E1000_IP6AT_REG(i));
550         for (i = 0; i < 32; i++)
551                 regs_buff[255 + i] = rd32(E1000_WUPM_REG(i));
552         for (i = 0; i < 128; i++)
553                 regs_buff[287 + i] = rd32(E1000_FFMT_REG(i));
554         for (i = 0; i < 128; i++)
555                 regs_buff[415 + i] = rd32(E1000_FFVT_REG(i));
556         for (i = 0; i < 4; i++)
557                 regs_buff[543 + i] = rd32(E1000_FFLT_REG(i));
558
559         regs_buff[547] = rd32(E1000_TDFH);
560         regs_buff[548] = rd32(E1000_TDFT);
561         regs_buff[549] = rd32(E1000_TDFHS);
562         regs_buff[550] = rd32(E1000_TDFPC);
563
564 }
565
566 static int igb_get_eeprom_len(struct net_device *netdev)
567 {
568         struct igb_adapter *adapter = netdev_priv(netdev);
569         return adapter->hw.nvm.word_size * 2;
570 }
571
572 static int igb_get_eeprom(struct net_device *netdev,
573                           struct ethtool_eeprom *eeprom, u8 *bytes)
574 {
575         struct igb_adapter *adapter = netdev_priv(netdev);
576         struct e1000_hw *hw = &adapter->hw;
577         u16 *eeprom_buff;
578         int first_word, last_word;
579         int ret_val = 0;
580         u16 i;
581
582         if (eeprom->len == 0)
583                 return -EINVAL;
584
585         eeprom->magic = hw->vendor_id | (hw->device_id << 16);
586
587         first_word = eeprom->offset >> 1;
588         last_word = (eeprom->offset + eeprom->len - 1) >> 1;
589
590         eeprom_buff = kmalloc(sizeof(u16) *
591                         (last_word - first_word + 1), GFP_KERNEL);
592         if (!eeprom_buff)
593                 return -ENOMEM;
594
595         if (hw->nvm.type == e1000_nvm_eeprom_spi)
596                 ret_val = hw->nvm.ops.read_nvm(hw, first_word,
597                                             last_word - first_word + 1,
598                                             eeprom_buff);
599         else {
600                 for (i = 0; i < last_word - first_word + 1; i++) {
601                         ret_val = hw->nvm.ops.read_nvm(hw, first_word + i, 1,
602                                                     &eeprom_buff[i]);
603                         if (ret_val)
604                                 break;
605                 }
606         }
607
608         /* Device's eeprom is always little-endian, word addressable */
609         for (i = 0; i < last_word - first_word + 1; i++)
610                 le16_to_cpus(&eeprom_buff[i]);
611
612         memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
613                         eeprom->len);
614         kfree(eeprom_buff);
615
616         return ret_val;
617 }
618
619 static int igb_set_eeprom(struct net_device *netdev,
620                           struct ethtool_eeprom *eeprom, u8 *bytes)
621 {
622         struct igb_adapter *adapter = netdev_priv(netdev);
623         struct e1000_hw *hw = &adapter->hw;
624         u16 *eeprom_buff;
625         void *ptr;
626         int max_len, first_word, last_word, ret_val = 0;
627         u16 i;
628
629         if (eeprom->len == 0)
630                 return -EOPNOTSUPP;
631
632         if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
633                 return -EFAULT;
634
635         max_len = hw->nvm.word_size * 2;
636
637         first_word = eeprom->offset >> 1;
638         last_word = (eeprom->offset + eeprom->len - 1) >> 1;
639         eeprom_buff = kmalloc(max_len, GFP_KERNEL);
640         if (!eeprom_buff)
641                 return -ENOMEM;
642
643         ptr = (void *)eeprom_buff;
644
645         if (eeprom->offset & 1) {
646                 /* need read/modify/write of first changed EEPROM word */
647                 /* only the second byte of the word is being modified */
648                 ret_val = hw->nvm.ops.read_nvm(hw, first_word, 1,
649                                             &eeprom_buff[0]);
650                 ptr++;
651         }
652         if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
653                 /* need read/modify/write of last changed EEPROM word */
654                 /* only the first byte of the word is being modified */
655                 ret_val = hw->nvm.ops.read_nvm(hw, last_word, 1,
656                                    &eeprom_buff[last_word - first_word]);
657         }
658
659         /* Device's eeprom is always little-endian, word addressable */
660         for (i = 0; i < last_word - first_word + 1; i++)
661                 le16_to_cpus(&eeprom_buff[i]);
662
663         memcpy(ptr, bytes, eeprom->len);
664
665         for (i = 0; i < last_word - first_word + 1; i++)
666                 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
667
668         ret_val = hw->nvm.ops.write_nvm(hw, first_word,
669                                      last_word - first_word + 1, eeprom_buff);
670
671         /* Update the checksum over the first part of the EEPROM if needed
672          * and flush shadow RAM for 82573 controllers */
673         if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
674                 igb_update_nvm_checksum(hw);
675
676         kfree(eeprom_buff);
677         return ret_val;
678 }
679
680 static void igb_get_drvinfo(struct net_device *netdev,
681                             struct ethtool_drvinfo *drvinfo)
682 {
683         struct igb_adapter *adapter = netdev_priv(netdev);
684         char firmware_version[32];
685         u16 eeprom_data;
686
687         strncpy(drvinfo->driver,  igb_driver_name, 32);
688         strncpy(drvinfo->version, igb_driver_version, 32);
689
690         /* EEPROM image version # is reported as firmware version # for
691          * 82575 controllers */
692         adapter->hw.nvm.ops.read_nvm(&adapter->hw, 5, 1, &eeprom_data);
693         sprintf(firmware_version, "%d.%d-%d",
694                 (eeprom_data & 0xF000) >> 12,
695                 (eeprom_data & 0x0FF0) >> 4,
696                 eeprom_data & 0x000F);
697
698         strncpy(drvinfo->fw_version, firmware_version, 32);
699         strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
700         drvinfo->n_stats = IGB_STATS_LEN;
701         drvinfo->testinfo_len = IGB_TEST_LEN;
702         drvinfo->regdump_len = igb_get_regs_len(netdev);
703         drvinfo->eedump_len = igb_get_eeprom_len(netdev);
704 }
705
706 static void igb_get_ringparam(struct net_device *netdev,
707                               struct ethtool_ringparam *ring)
708 {
709         struct igb_adapter *adapter = netdev_priv(netdev);
710         struct igb_ring *tx_ring = adapter->tx_ring;
711         struct igb_ring *rx_ring = adapter->rx_ring;
712
713         ring->rx_max_pending = IGB_MAX_RXD;
714         ring->tx_max_pending = IGB_MAX_TXD;
715         ring->rx_mini_max_pending = 0;
716         ring->rx_jumbo_max_pending = 0;
717         ring->rx_pending = rx_ring->count;
718         ring->tx_pending = tx_ring->count;
719         ring->rx_mini_pending = 0;
720         ring->rx_jumbo_pending = 0;
721 }
722
723 static int igb_set_ringparam(struct net_device *netdev,
724                              struct ethtool_ringparam *ring)
725 {
726         struct igb_adapter *adapter = netdev_priv(netdev);
727         struct igb_buffer *old_buf;
728         struct igb_buffer *old_rx_buf;
729         void *old_desc;
730         int i, err;
731         u32 new_rx_count, new_tx_count, old_size;
732         dma_addr_t old_dma;
733
734         if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
735                 return -EINVAL;
736
737         new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD);
738         new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD);
739         new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
740
741         new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD);
742         new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD);
743         new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
744
745         if ((new_tx_count == adapter->tx_ring->count) &&
746             (new_rx_count == adapter->rx_ring->count)) {
747                 /* nothing to do */
748                 return 0;
749         }
750
751         while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
752                 msleep(1);
753
754         if (netif_running(adapter->netdev))
755                 igb_down(adapter);
756
757         /*
758          * We can't just free everything and then setup again,
759          * because the ISRs in MSI-X mode get passed pointers
760          * to the tx and rx ring structs.
761          */
762         if (new_tx_count != adapter->tx_ring->count) {
763                 for (i = 0; i < adapter->num_tx_queues; i++) {
764                         /* Save existing descriptor ring */
765                         old_buf = adapter->tx_ring[i].buffer_info;
766                         old_desc = adapter->tx_ring[i].desc;
767                         old_size = adapter->tx_ring[i].size;
768                         old_dma = adapter->tx_ring[i].dma;
769                         /* Try to allocate a new one */
770                         adapter->tx_ring[i].buffer_info = NULL;
771                         adapter->tx_ring[i].desc = NULL;
772                         adapter->tx_ring[i].count = new_tx_count;
773                         err = igb_setup_tx_resources(adapter,
774                                                 &adapter->tx_ring[i]);
775                         if (err) {
776                                 /* Restore the old one so at least
777                                    the adapter still works, even if
778                                    we failed the request */
779                                 adapter->tx_ring[i].buffer_info = old_buf;
780                                 adapter->tx_ring[i].desc = old_desc;
781                                 adapter->tx_ring[i].size = old_size;
782                                 adapter->tx_ring[i].dma = old_dma;
783                                 goto err_setup;
784                         }
785                         /* Free the old buffer manually */
786                         vfree(old_buf);
787                         pci_free_consistent(adapter->pdev, old_size,
788                                             old_desc, old_dma);
789                 }
790         }
791
792         if (new_rx_count != adapter->rx_ring->count) {
793                 for (i = 0; i < adapter->num_rx_queues; i++) {
794
795                         old_rx_buf = adapter->rx_ring[i].buffer_info;
796                         old_desc = adapter->rx_ring[i].desc;
797                         old_size = adapter->rx_ring[i].size;
798                         old_dma = adapter->rx_ring[i].dma;
799
800                         adapter->rx_ring[i].buffer_info = NULL;
801                         adapter->rx_ring[i].desc = NULL;
802                         adapter->rx_ring[i].dma = 0;
803                         adapter->rx_ring[i].count = new_rx_count;
804                         err = igb_setup_rx_resources(adapter,
805                                                      &adapter->rx_ring[i]);
806                         if (err) {
807                                 adapter->rx_ring[i].buffer_info = old_rx_buf;
808                                 adapter->rx_ring[i].desc = old_desc;
809                                 adapter->rx_ring[i].size = old_size;
810                                 adapter->rx_ring[i].dma = old_dma;
811                                 goto err_setup;
812                         }
813
814                         vfree(old_rx_buf);
815                         pci_free_consistent(adapter->pdev, old_size, old_desc,
816                                             old_dma);
817                 }
818         }
819
820         err = 0;
821 err_setup:
822         if (netif_running(adapter->netdev))
823                 igb_up(adapter);
824
825         clear_bit(__IGB_RESETTING, &adapter->state);
826         return err;
827 }
828
829 /* ethtool register test data */
830 struct igb_reg_test {
831         u16 reg;
832         u8  array_len;
833         u8  test_type;
834         u32 mask;
835         u32 write;
836 };
837
838 /* In the hardware, registers are laid out either singly, in arrays
839  * spaced 0x100 bytes apart, or in contiguous tables.  We assume
840  * most tests take place on arrays or single registers (handled
841  * as a single-element array) and special-case the tables.
842  * Table tests are always pattern tests.
843  *
844  * We also make provision for some required setup steps by specifying
845  * registers to be written without any read-back testing.
846  */
847
848 #define PATTERN_TEST    1
849 #define SET_READ_TEST   2
850 #define WRITE_NO_TEST   3
851 #define TABLE32_TEST    4
852 #define TABLE64_TEST_LO 5
853 #define TABLE64_TEST_HI 6
854
855 /* default register test */
856 static struct igb_reg_test reg_test_82575[] = {
857         { E1000_FCAL, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
858         { E1000_FCAH, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
859         { E1000_FCT, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
860         { E1000_VET, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
861         { E1000_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
862         { E1000_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
863         { E1000_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
864         /* Enable all four RX queues before testing. */
865         { E1000_RXDCTL(0), 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
866         /* RDH is read-only for 82575, only test RDT. */
867         { E1000_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
868         { E1000_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
869         { E1000_FCRTH, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
870         { E1000_FCTTV, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
871         { E1000_TIPG, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
872         { E1000_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
873         { E1000_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
874         { E1000_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
875         { E1000_RCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
876         { E1000_RCTL, 1, SET_READ_TEST, 0x04CFB3FE, 0x003FFFFB },
877         { E1000_RCTL, 1, SET_READ_TEST, 0x04CFB3FE, 0xFFFFFFFF },
878         { E1000_TCTL, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
879         { E1000_TXCW, 1, PATTERN_TEST, 0xC000FFFF, 0x0000FFFF },
880         { E1000_RA, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
881         { E1000_RA, 16, TABLE64_TEST_HI, 0x800FFFFF, 0xFFFFFFFF },
882         { E1000_MTA, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
883         { 0, 0, 0, 0 }
884 };
885
886 static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
887                              int reg, u32 mask, u32 write)
888 {
889         u32 pat, val;
890         u32 _test[] =
891                 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
892         for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
893                 writel((_test[pat] & write), (adapter->hw.hw_addr + reg));
894                 val = readl(adapter->hw.hw_addr + reg);
895                 if (val != (_test[pat] & write & mask)) {
896                         dev_err(&adapter->pdev->dev, "pattern test reg %04X "
897                                 "failed: got 0x%08X expected 0x%08X\n",
898                                 reg, val, (_test[pat] & write & mask));
899                         *data = reg;
900                         return 1;
901                 }
902         }
903         return 0;
904 }
905
906 static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
907                               int reg, u32 mask, u32 write)
908 {
909         u32 val;
910         writel((write & mask), (adapter->hw.hw_addr + reg));
911         val = readl(adapter->hw.hw_addr + reg);
912         if ((write & mask) != (val & mask)) {
913                 dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:"
914                         " got 0x%08X expected 0x%08X\n", reg,
915                         (val & mask), (write & mask));
916                 *data = reg;
917                 return 1;
918         }
919         return 0;
920 }
921
922 #define REG_PATTERN_TEST(reg, mask, write) \
923         do { \
924                 if (reg_pattern_test(adapter, data, reg, mask, write)) \
925                         return 1; \
926         } while (0)
927
928 #define REG_SET_AND_CHECK(reg, mask, write) \
929         do { \
930                 if (reg_set_and_check(adapter, data, reg, mask, write)) \
931                         return 1; \
932         } while (0)
933
934 static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
935 {
936         struct e1000_hw *hw = &adapter->hw;
937         struct igb_reg_test *test;
938         u32 value, before, after;
939         u32 i, toggle;
940
941         toggle = 0x7FFFF3FF;
942         test = reg_test_82575;
943
944         /* Because the status register is such a special case,
945          * we handle it separately from the rest of the register
946          * tests.  Some bits are read-only, some toggle, and some
947          * are writable on newer MACs.
948          */
949         before = rd32(E1000_STATUS);
950         value = (rd32(E1000_STATUS) & toggle);
951         wr32(E1000_STATUS, toggle);
952         after = rd32(E1000_STATUS) & toggle;
953         if (value != after) {
954                 dev_err(&adapter->pdev->dev, "failed STATUS register test "
955                         "got: 0x%08X expected: 0x%08X\n", after, value);
956                 *data = 1;
957                 return 1;
958         }
959         /* restore previous status */
960         wr32(E1000_STATUS, before);
961
962         /* Perform the remainder of the register test, looping through
963          * the test table until we either fail or reach the null entry.
964          */
965         while (test->reg) {
966                 for (i = 0; i < test->array_len; i++) {
967                         switch (test->test_type) {
968                         case PATTERN_TEST:
969                                 REG_PATTERN_TEST(test->reg + (i * 0x100),
970                                                 test->mask,
971                                                 test->write);
972                                 break;
973                         case SET_READ_TEST:
974                                 REG_SET_AND_CHECK(test->reg + (i * 0x100),
975                                                 test->mask,
976                                                 test->write);
977                                 break;
978                         case WRITE_NO_TEST:
979                                 writel(test->write,
980                                     (adapter->hw.hw_addr + test->reg)
981                                         + (i * 0x100));
982                                 break;
983                         case TABLE32_TEST:
984                                 REG_PATTERN_TEST(test->reg + (i * 4),
985                                                 test->mask,
986                                                 test->write);
987                                 break;
988                         case TABLE64_TEST_LO:
989                                 REG_PATTERN_TEST(test->reg + (i * 8),
990                                                 test->mask,
991                                                 test->write);
992                                 break;
993                         case TABLE64_TEST_HI:
994                                 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
995                                                 test->mask,
996                                                 test->write);
997                                 break;
998                         }
999                 }
1000                 test++;
1001         }
1002
1003         *data = 0;
1004         return 0;
1005 }
1006
1007 static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1008 {
1009         u16 temp;
1010         u16 checksum = 0;
1011         u16 i;
1012
1013         *data = 0;
1014         /* Read and add up the contents of the EEPROM */
1015         for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1016                 if ((adapter->hw.nvm.ops.read_nvm(&adapter->hw, i, 1, &temp))
1017                     < 0) {
1018                         *data = 1;
1019                         break;
1020                 }
1021                 checksum += temp;
1022         }
1023
1024         /* If Checksum is not Correct return error else test passed */
1025         if ((checksum != (u16) NVM_SUM) && !(*data))
1026                 *data = 2;
1027
1028         return *data;
1029 }
1030
1031 static irqreturn_t igb_test_intr(int irq, void *data)
1032 {
1033         struct net_device *netdev = (struct net_device *) data;
1034         struct igb_adapter *adapter = netdev_priv(netdev);
1035         struct e1000_hw *hw = &adapter->hw;
1036
1037         adapter->test_icr |= rd32(E1000_ICR);
1038
1039         return IRQ_HANDLED;
1040 }
1041
1042 static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1043 {
1044         struct e1000_hw *hw = &adapter->hw;
1045         struct net_device *netdev = adapter->netdev;
1046         u32 mask, i = 0, shared_int = true;
1047         u32 irq = adapter->pdev->irq;
1048
1049         *data = 0;
1050
1051         /* Hook up test interrupt handler just for this test */
1052         if (adapter->msix_entries) {
1053                 /* NOTE: we don't test MSI-X interrupts here, yet */
1054                 return 0;
1055         } else if (adapter->msi_enabled) {
1056                 shared_int = false;
1057                 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) {
1058                         *data = 1;
1059                         return -1;
1060                 }
1061         } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
1062                                 netdev->name, netdev)) {
1063                 shared_int = false;
1064         } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
1065                  netdev->name, netdev)) {
1066                 *data = 1;
1067                 return -1;
1068         }
1069         dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1070                 (shared_int ? "shared" : "unshared"));
1071
1072         /* Disable all the interrupts */
1073         wr32(E1000_IMC, 0xFFFFFFFF);
1074         msleep(10);
1075
1076         /* Test each interrupt */
1077         for (; i < 10; i++) {
1078                 /* Interrupt to test */
1079                 mask = 1 << i;
1080
1081                 if (!shared_int) {
1082                         /* Disable the interrupt to be reported in
1083                          * the cause register and then force the same
1084                          * interrupt and see if one gets posted.  If
1085                          * an interrupt was posted to the bus, the
1086                          * test failed.
1087                          */
1088                         adapter->test_icr = 0;
1089                         wr32(E1000_IMC, ~mask & 0x00007FFF);
1090                         wr32(E1000_ICS, ~mask & 0x00007FFF);
1091                         msleep(10);
1092
1093                         if (adapter->test_icr & mask) {
1094                                 *data = 3;
1095                                 break;
1096                         }
1097                 }
1098
1099                 /* Enable the interrupt to be reported in
1100                  * the cause register and then force the same
1101                  * interrupt and see if one gets posted.  If
1102                  * an interrupt was not posted to the bus, the
1103                  * test failed.
1104                  */
1105                 adapter->test_icr = 0;
1106                 wr32(E1000_IMS, mask);
1107                 wr32(E1000_ICS, mask);
1108                 msleep(10);
1109
1110                 if (!(adapter->test_icr & mask)) {
1111                         *data = 4;
1112                         break;
1113                 }
1114
1115                 if (!shared_int) {
1116                         /* Disable the other interrupts to be reported in
1117                          * the cause register and then force the other
1118                          * interrupts and see if any get posted.  If
1119                          * an interrupt was posted to the bus, the
1120                          * test failed.
1121                          */
1122                         adapter->test_icr = 0;
1123                         wr32(E1000_IMC, ~mask & 0x00007FFF);
1124                         wr32(E1000_ICS, ~mask & 0x00007FFF);
1125                         msleep(10);
1126
1127                         if (adapter->test_icr) {
1128                                 *data = 5;
1129                                 break;
1130                         }
1131                 }
1132         }
1133
1134         /* Disable all the interrupts */
1135         wr32(E1000_IMC, 0xFFFFFFFF);
1136         msleep(10);
1137
1138         /* Unhook test interrupt handler */
1139         free_irq(irq, netdev);
1140
1141         return *data;
1142 }
1143
1144 static void igb_free_desc_rings(struct igb_adapter *adapter)
1145 {
1146         struct igb_ring *tx_ring = &adapter->test_tx_ring;
1147         struct igb_ring *rx_ring = &adapter->test_rx_ring;
1148         struct pci_dev *pdev = adapter->pdev;
1149         int i;
1150
1151         if (tx_ring->desc && tx_ring->buffer_info) {
1152                 for (i = 0; i < tx_ring->count; i++) {
1153                         struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
1154                         if (buf->dma)
1155                                 pci_unmap_single(pdev, buf->dma, buf->length,
1156                                                  PCI_DMA_TODEVICE);
1157                         if (buf->skb)
1158                                 dev_kfree_skb(buf->skb);
1159                 }
1160         }
1161
1162         if (rx_ring->desc && rx_ring->buffer_info) {
1163                 for (i = 0; i < rx_ring->count; i++) {
1164                         struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
1165                         if (buf->dma)
1166                                 pci_unmap_single(pdev, buf->dma,
1167                                                  IGB_RXBUFFER_2048,
1168                                                  PCI_DMA_FROMDEVICE);
1169                         if (buf->skb)
1170                                 dev_kfree_skb(buf->skb);
1171                 }
1172         }
1173
1174         if (tx_ring->desc) {
1175                 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
1176                                     tx_ring->dma);
1177                 tx_ring->desc = NULL;
1178         }
1179         if (rx_ring->desc) {
1180                 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
1181                                     rx_ring->dma);
1182                 rx_ring->desc = NULL;
1183         }
1184
1185         kfree(tx_ring->buffer_info);
1186         tx_ring->buffer_info = NULL;
1187         kfree(rx_ring->buffer_info);
1188         rx_ring->buffer_info = NULL;
1189
1190         return;
1191 }
1192
1193 static int igb_setup_desc_rings(struct igb_adapter *adapter)
1194 {
1195         struct e1000_hw *hw = &adapter->hw;
1196         struct igb_ring *tx_ring = &adapter->test_tx_ring;
1197         struct igb_ring *rx_ring = &adapter->test_rx_ring;
1198         struct pci_dev *pdev = adapter->pdev;
1199         u32 rctl;
1200         int i, ret_val;
1201
1202         /* Setup Tx descriptor ring and Tx buffers */
1203
1204         if (!tx_ring->count)
1205                 tx_ring->count = IGB_DEFAULT_TXD;
1206
1207         tx_ring->buffer_info = kcalloc(tx_ring->count,
1208                                        sizeof(struct igb_buffer),
1209                                        GFP_KERNEL);
1210         if (!tx_ring->buffer_info) {
1211                 ret_val = 1;
1212                 goto err_nomem;
1213         }
1214
1215         tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1216         tx_ring->size = ALIGN(tx_ring->size, 4096);
1217         tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1218                                              &tx_ring->dma);
1219         if (!tx_ring->desc) {
1220                 ret_val = 2;
1221                 goto err_nomem;
1222         }
1223         tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1224
1225         wr32(E1000_TDBAL(0),
1226                         ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1227         wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
1228         wr32(E1000_TDLEN(0),
1229                         tx_ring->count * sizeof(struct e1000_tx_desc));
1230         wr32(E1000_TDH(0), 0);
1231         wr32(E1000_TDT(0), 0);
1232         wr32(E1000_TCTL,
1233                         E1000_TCTL_PSP | E1000_TCTL_EN |
1234                         E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1235                         E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1236
1237         for (i = 0; i < tx_ring->count; i++) {
1238                 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
1239                 struct sk_buff *skb;
1240                 unsigned int size = 1024;
1241
1242                 skb = alloc_skb(size, GFP_KERNEL);
1243                 if (!skb) {
1244                         ret_val = 3;
1245                         goto err_nomem;
1246                 }
1247                 skb_put(skb, size);
1248                 tx_ring->buffer_info[i].skb = skb;
1249                 tx_ring->buffer_info[i].length = skb->len;
1250                 tx_ring->buffer_info[i].dma =
1251                         pci_map_single(pdev, skb->data, skb->len,
1252                                        PCI_DMA_TODEVICE);
1253                 tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
1254                 tx_desc->lower.data = cpu_to_le32(skb->len);
1255                 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
1256                                                    E1000_TXD_CMD_IFCS |
1257                                                    E1000_TXD_CMD_RS);
1258                 tx_desc->upper.data = 0;
1259         }
1260
1261         /* Setup Rx descriptor ring and Rx buffers */
1262
1263         if (!rx_ring->count)
1264                 rx_ring->count = IGB_DEFAULT_RXD;
1265
1266         rx_ring->buffer_info = kcalloc(rx_ring->count,
1267                                        sizeof(struct igb_buffer),
1268                                        GFP_KERNEL);
1269         if (!rx_ring->buffer_info) {
1270                 ret_val = 4;
1271                 goto err_nomem;
1272         }
1273
1274         rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
1275         rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1276                                              &rx_ring->dma);
1277         if (!rx_ring->desc) {
1278                 ret_val = 5;
1279                 goto err_nomem;
1280         }
1281         rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1282
1283         rctl = rd32(E1000_RCTL);
1284         wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1285         wr32(E1000_RDBAL(0),
1286                         ((u64) rx_ring->dma & 0xFFFFFFFF));
1287         wr32(E1000_RDBAH(0),
1288                         ((u64) rx_ring->dma >> 32));
1289         wr32(E1000_RDLEN(0), rx_ring->size);
1290         wr32(E1000_RDH(0), 0);
1291         wr32(E1000_RDT(0), 0);
1292         rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1293                 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1294                 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1295         wr32(E1000_RCTL, rctl);
1296         wr32(E1000_SRRCTL(0), 0);
1297
1298         for (i = 0; i < rx_ring->count; i++) {
1299                 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
1300                 struct sk_buff *skb;
1301
1302                 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
1303                                 GFP_KERNEL);
1304                 if (!skb) {
1305                         ret_val = 6;
1306                         goto err_nomem;
1307                 }
1308                 skb_reserve(skb, NET_IP_ALIGN);
1309                 rx_ring->buffer_info[i].skb = skb;
1310                 rx_ring->buffer_info[i].dma =
1311                         pci_map_single(pdev, skb->data, IGB_RXBUFFER_2048,
1312                                        PCI_DMA_FROMDEVICE);
1313                 rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma);
1314                 memset(skb->data, 0x00, skb->len);
1315         }
1316
1317         return 0;
1318
1319 err_nomem:
1320         igb_free_desc_rings(adapter);
1321         return ret_val;
1322 }
1323
1324 static void igb_phy_disable_receiver(struct igb_adapter *adapter)
1325 {
1326         struct e1000_hw *hw = &adapter->hw;
1327
1328         /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1329         hw->phy.ops.write_phy_reg(hw, 29, 0x001F);
1330         hw->phy.ops.write_phy_reg(hw, 30, 0x8FFC);
1331         hw->phy.ops.write_phy_reg(hw, 29, 0x001A);
1332         hw->phy.ops.write_phy_reg(hw, 30, 0x8FF0);
1333 }
1334
1335 static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1336 {
1337         struct e1000_hw *hw = &adapter->hw;
1338         u32 ctrl_reg = 0;
1339         u32 stat_reg = 0;
1340
1341         hw->mac.autoneg = false;
1342
1343         if (hw->phy.type == e1000_phy_m88) {
1344                 /* Auto-MDI/MDIX Off */
1345                 hw->phy.ops.write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1346                 /* reset to update Auto-MDI/MDIX */
1347                 hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x9140);
1348                 /* autoneg off */
1349                 hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x8140);
1350         }
1351
1352         ctrl_reg = rd32(E1000_CTRL);
1353
1354         /* force 1000, set loopback */
1355         hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, 0x4140);
1356
1357         /* Now set up the MAC to the same speed/duplex as the PHY. */
1358         ctrl_reg = rd32(E1000_CTRL);
1359         ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1360         ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1361                      E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1362                      E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1363                      E1000_CTRL_FD);     /* Force Duplex to FULL */
1364
1365         if (hw->phy.media_type == e1000_media_type_copper &&
1366             hw->phy.type == e1000_phy_m88)
1367                 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1368         else {
1369                 /* Set the ILOS bit on the fiber Nic if half duplex link is
1370                  * detected. */
1371                 stat_reg = rd32(E1000_STATUS);
1372                 if ((stat_reg & E1000_STATUS_FD) == 0)
1373                         ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1374         }
1375
1376         wr32(E1000_CTRL, ctrl_reg);
1377
1378         /* Disable the receiver on the PHY so when a cable is plugged in, the
1379          * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1380          */
1381         if (hw->phy.type == e1000_phy_m88)
1382                 igb_phy_disable_receiver(adapter);
1383
1384         udelay(500);
1385
1386         return 0;
1387 }
1388
1389 static int igb_set_phy_loopback(struct igb_adapter *adapter)
1390 {
1391         return igb_integrated_phy_loopback(adapter);
1392 }
1393
1394 static int igb_setup_loopback_test(struct igb_adapter *adapter)
1395 {
1396         struct e1000_hw *hw = &adapter->hw;
1397         u32 rctl;
1398
1399         if (hw->phy.media_type == e1000_media_type_fiber ||
1400             hw->phy.media_type == e1000_media_type_internal_serdes) {
1401                 rctl = rd32(E1000_RCTL);
1402                 rctl |= E1000_RCTL_LBM_TCVR;
1403                 wr32(E1000_RCTL, rctl);
1404                 return 0;
1405         } else if (hw->phy.media_type == e1000_media_type_copper) {
1406                 return igb_set_phy_loopback(adapter);
1407         }
1408
1409         return 7;
1410 }
1411
1412 static void igb_loopback_cleanup(struct igb_adapter *adapter)
1413 {
1414         struct e1000_hw *hw = &adapter->hw;
1415         u32 rctl;
1416         u16 phy_reg;
1417
1418         rctl = rd32(E1000_RCTL);
1419         rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1420         wr32(E1000_RCTL, rctl);
1421
1422         hw->mac.autoneg = true;
1423         hw->phy.ops.read_phy_reg(hw, PHY_CONTROL, &phy_reg);
1424         if (phy_reg & MII_CR_LOOPBACK) {
1425                 phy_reg &= ~MII_CR_LOOPBACK;
1426                 hw->phy.ops.write_phy_reg(hw, PHY_CONTROL, phy_reg);
1427                 igb_phy_sw_reset(hw);
1428         }
1429 }
1430
1431 static void igb_create_lbtest_frame(struct sk_buff *skb,
1432                                     unsigned int frame_size)
1433 {
1434         memset(skb->data, 0xFF, frame_size);
1435         frame_size &= ~1;
1436         memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1437         memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
1438         memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
1439 }
1440
1441 static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1442 {
1443         frame_size &= ~1;
1444         if (*(skb->data + 3) == 0xFF)
1445                 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1446                    (*(skb->data + frame_size / 2 + 12) == 0xAF))
1447                         return 0;
1448         return 13;
1449 }
1450
1451 static int igb_run_loopback_test(struct igb_adapter *adapter)
1452 {
1453         struct e1000_hw *hw = &adapter->hw;
1454         struct igb_ring *tx_ring = &adapter->test_tx_ring;
1455         struct igb_ring *rx_ring = &adapter->test_rx_ring;
1456         struct pci_dev *pdev = adapter->pdev;
1457         int i, j, k, l, lc, good_cnt;
1458         int ret_val = 0;
1459         unsigned long time;
1460
1461         wr32(E1000_RDT(0), rx_ring->count - 1);
1462
1463         /* Calculate the loop count based on the largest descriptor ring
1464          * The idea is to wrap the largest ring a number of times using 64
1465          * send/receive pairs during each loop
1466          */
1467
1468         if (rx_ring->count <= tx_ring->count)
1469                 lc = ((tx_ring->count / 64) * 2) + 1;
1470         else
1471                 lc = ((rx_ring->count / 64) * 2) + 1;
1472
1473         k = l = 0;
1474         for (j = 0; j <= lc; j++) { /* loop count loop */
1475                 for (i = 0; i < 64; i++) { /* send the packets */
1476                         igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1477                                                 1024);
1478                         pci_dma_sync_single_for_device(pdev,
1479                                 tx_ring->buffer_info[k].dma,
1480                                 tx_ring->buffer_info[k].length,
1481                                 PCI_DMA_TODEVICE);
1482                         k++;
1483                         if (k == tx_ring->count)
1484                                 k = 0;
1485                 }
1486                 wr32(E1000_TDT(0), k);
1487                 msleep(200);
1488                 time = jiffies; /* set the start time for the receive */
1489                 good_cnt = 0;
1490                 do { /* receive the sent packets */
1491                         pci_dma_sync_single_for_cpu(pdev,
1492                                         rx_ring->buffer_info[l].dma,
1493                                         IGB_RXBUFFER_2048,
1494                                         PCI_DMA_FROMDEVICE);
1495
1496                         ret_val = igb_check_lbtest_frame(
1497                                              rx_ring->buffer_info[l].skb, 1024);
1498                         if (!ret_val)
1499                                 good_cnt++;
1500                         l++;
1501                         if (l == rx_ring->count)
1502                                 l = 0;
1503                         /* time + 20 msecs (200 msecs on 2.4) is more than
1504                          * enough time to complete the receives, if it's
1505                          * exceeded, break and error off
1506                          */
1507                 } while (good_cnt < 64 && jiffies < (time + 20));
1508                 if (good_cnt != 64) {
1509                         ret_val = 13; /* ret_val is the same as mis-compare */
1510                         break;
1511                 }
1512                 if (jiffies >= (time + 20)) {
1513                         ret_val = 14; /* error code for time out error */
1514                         break;
1515                 }
1516         } /* end loop count loop */
1517         return ret_val;
1518 }
1519
1520 static int igb_loopback_test(struct igb_adapter *adapter, u64 *data)
1521 {
1522         /* PHY loopback cannot be performed if SoL/IDER
1523          * sessions are active */
1524         if (igb_check_reset_block(&adapter->hw)) {
1525                 dev_err(&adapter->pdev->dev,
1526                         "Cannot do PHY loopback test "
1527                         "when SoL/IDER is active.\n");
1528                 *data = 0;
1529                 goto out;
1530         }
1531         *data = igb_setup_desc_rings(adapter);
1532         if (*data)
1533                 goto out;
1534         *data = igb_setup_loopback_test(adapter);
1535         if (*data)
1536                 goto err_loopback;
1537         *data = igb_run_loopback_test(adapter);
1538         igb_loopback_cleanup(adapter);
1539
1540 err_loopback:
1541         igb_free_desc_rings(adapter);
1542 out:
1543         return *data;
1544 }
1545
1546 static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1547 {
1548         struct e1000_hw *hw = &adapter->hw;
1549         *data = 0;
1550         if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1551                 int i = 0;
1552                 hw->mac.serdes_has_link = false;
1553
1554                 /* On some blade server designs, link establishment
1555                  * could take as long as 2-3 minutes */
1556                 do {
1557                         hw->mac.ops.check_for_link(&adapter->hw);
1558                         if (hw->mac.serdes_has_link)
1559                                 return *data;
1560                         msleep(20);
1561                 } while (i++ < 3750);
1562
1563                 *data = 1;
1564         } else {
1565                 hw->mac.ops.check_for_link(&adapter->hw);
1566                 if (hw->mac.autoneg)
1567                         msleep(4000);
1568
1569                 if (!(rd32(E1000_STATUS) &
1570                       E1000_STATUS_LU))
1571                         *data = 1;
1572         }
1573         return *data;
1574 }
1575
1576 static void igb_diag_test(struct net_device *netdev,
1577                           struct ethtool_test *eth_test, u64 *data)
1578 {
1579         struct igb_adapter *adapter = netdev_priv(netdev);
1580         u16 autoneg_advertised;
1581         u8 forced_speed_duplex, autoneg;
1582         bool if_running = netif_running(netdev);
1583
1584         set_bit(__IGB_TESTING, &adapter->state);
1585         if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1586                 /* Offline tests */
1587
1588                 /* save speed, duplex, autoneg settings */
1589                 autoneg_advertised = adapter->hw.phy.autoneg_advertised;
1590                 forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
1591                 autoneg = adapter->hw.mac.autoneg;
1592
1593                 dev_info(&adapter->pdev->dev, "offline testing starting\n");
1594
1595                 /* Link test performed before hardware reset so autoneg doesn't
1596                  * interfere with test result */
1597                 if (igb_link_test(adapter, &data[4]))
1598                         eth_test->flags |= ETH_TEST_FL_FAILED;
1599
1600                 if (if_running)
1601                         /* indicate we're in test mode */
1602                         dev_close(netdev);
1603                 else
1604                         igb_reset(adapter);
1605
1606                 if (igb_reg_test(adapter, &data[0]))
1607                         eth_test->flags |= ETH_TEST_FL_FAILED;
1608
1609                 igb_reset(adapter);
1610                 if (igb_eeprom_test(adapter, &data[1]))
1611                         eth_test->flags |= ETH_TEST_FL_FAILED;
1612
1613                 igb_reset(adapter);
1614                 if (igb_intr_test(adapter, &data[2]))
1615                         eth_test->flags |= ETH_TEST_FL_FAILED;
1616
1617                 igb_reset(adapter);
1618                 if (igb_loopback_test(adapter, &data[3]))
1619                         eth_test->flags |= ETH_TEST_FL_FAILED;
1620
1621                 /* restore speed, duplex, autoneg settings */
1622                 adapter->hw.phy.autoneg_advertised = autoneg_advertised;
1623                 adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
1624                 adapter->hw.mac.autoneg = autoneg;
1625
1626                 /* force this routine to wait until autoneg complete/timeout */
1627                 adapter->hw.phy.autoneg_wait_to_complete = true;
1628                 igb_reset(adapter);
1629                 adapter->hw.phy.autoneg_wait_to_complete = false;
1630
1631                 clear_bit(__IGB_TESTING, &adapter->state);
1632                 if (if_running)
1633                         dev_open(netdev);
1634         } else {
1635                 dev_info(&adapter->pdev->dev, "online testing starting\n");
1636                 /* Online tests */
1637                 if (igb_link_test(adapter, &data[4]))
1638                         eth_test->flags |= ETH_TEST_FL_FAILED;
1639
1640                 /* Online tests aren't run; pass by default */
1641                 data[0] = 0;
1642                 data[1] = 0;
1643                 data[2] = 0;
1644                 data[3] = 0;
1645
1646                 clear_bit(__IGB_TESTING, &adapter->state);
1647         }
1648         msleep_interruptible(4 * 1000);
1649 }
1650
1651 static int igb_wol_exclusion(struct igb_adapter *adapter,
1652                              struct ethtool_wolinfo *wol)
1653 {
1654         struct e1000_hw *hw = &adapter->hw;
1655         int retval = 1; /* fail by default */
1656
1657         switch (hw->device_id) {
1658         case E1000_DEV_ID_82575GB_QUAD_COPPER:
1659                 /* WoL not supported */
1660                 wol->supported = 0;
1661                 break;
1662         case E1000_DEV_ID_82575EB_FIBER_SERDES:
1663                 /* Wake events not supported on port B */
1664                 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
1665                         wol->supported = 0;
1666                         break;
1667                 }
1668                 /* return success for non excluded adapter ports */
1669                 retval = 0;
1670                 break;
1671         default:
1672                 /* dual port cards only support WoL on port A from now on
1673                  * unless it was enabled in the eeprom for port B
1674                  * so exclude FUNC_1 ports from having WoL enabled */
1675                 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 &&
1676                     !adapter->eeprom_wol) {
1677                         wol->supported = 0;
1678                         break;
1679                 }
1680
1681                 retval = 0;
1682         }
1683
1684         return retval;
1685 }
1686
1687 static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1688 {
1689         struct igb_adapter *adapter = netdev_priv(netdev);
1690
1691         wol->supported = WAKE_UCAST | WAKE_MCAST |
1692                          WAKE_BCAST | WAKE_MAGIC;
1693         wol->wolopts = 0;
1694
1695         /* this function will set ->supported = 0 and return 1 if wol is not
1696          * supported by this hardware */
1697         if (igb_wol_exclusion(adapter, wol))
1698                 return;
1699
1700         /* apply any specific unsupported masks here */
1701         switch (adapter->hw.device_id) {
1702         default:
1703                 break;
1704         }
1705
1706         if (adapter->wol & E1000_WUFC_EX)
1707                 wol->wolopts |= WAKE_UCAST;
1708         if (adapter->wol & E1000_WUFC_MC)
1709                 wol->wolopts |= WAKE_MCAST;
1710         if (adapter->wol & E1000_WUFC_BC)
1711                 wol->wolopts |= WAKE_BCAST;
1712         if (adapter->wol & E1000_WUFC_MAG)
1713                 wol->wolopts |= WAKE_MAGIC;
1714
1715         return;
1716 }
1717
1718 static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1719 {
1720         struct igb_adapter *adapter = netdev_priv(netdev);
1721         struct e1000_hw *hw = &adapter->hw;
1722
1723         if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1724                 return -EOPNOTSUPP;
1725
1726         if (igb_wol_exclusion(adapter, wol))
1727                 return wol->wolopts ? -EOPNOTSUPP : 0;
1728
1729         switch (hw->device_id) {
1730         default:
1731                 break;
1732         }
1733
1734         /* these settings will always override what we currently have */
1735         adapter->wol = 0;
1736
1737         if (wol->wolopts & WAKE_UCAST)
1738                 adapter->wol |= E1000_WUFC_EX;
1739         if (wol->wolopts & WAKE_MCAST)
1740                 adapter->wol |= E1000_WUFC_MC;
1741         if (wol->wolopts & WAKE_BCAST)
1742                 adapter->wol |= E1000_WUFC_BC;
1743         if (wol->wolopts & WAKE_MAGIC)
1744                 adapter->wol |= E1000_WUFC_MAG;
1745
1746         return 0;
1747 }
1748
1749 /* toggle LED 4 times per second = 2 "blinks" per second */
1750 #define IGB_ID_INTERVAL         (HZ/4)
1751
1752 /* bit defines for adapter->led_status */
1753 #define IGB_LED_ON              0
1754
1755 static int igb_phys_id(struct net_device *netdev, u32 data)
1756 {
1757         struct igb_adapter *adapter = netdev_priv(netdev);
1758         struct e1000_hw *hw = &adapter->hw;
1759
1760         if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
1761                 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
1762
1763         igb_blink_led(hw);
1764         msleep_interruptible(data * 1000);
1765
1766         igb_led_off(hw);
1767         clear_bit(IGB_LED_ON, &adapter->led_status);
1768         igb_cleanup_led(hw);
1769
1770         return 0;
1771 }
1772
1773 static int igb_set_coalesce(struct net_device *netdev,
1774                             struct ethtool_coalesce *ec)
1775 {
1776         struct igb_adapter *adapter = netdev_priv(netdev);
1777
1778         if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1779             ((ec->rx_coalesce_usecs > 3) &&
1780              (ec->rx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1781             (ec->rx_coalesce_usecs == 2))
1782                 return -EINVAL;
1783
1784         /* convert to rate of irq's per second */
1785         if (ec->rx_coalesce_usecs <= 3)
1786                 adapter->itr_setting = ec->rx_coalesce_usecs;
1787         else
1788                 adapter->itr_setting = (1000000 / ec->rx_coalesce_usecs);
1789
1790         if (netif_running(netdev))
1791                 igb_reinit_locked(adapter);
1792
1793         return 0;
1794 }
1795
1796 static int igb_get_coalesce(struct net_device *netdev,
1797                             struct ethtool_coalesce *ec)
1798 {
1799         struct igb_adapter *adapter = netdev_priv(netdev);
1800
1801         if (adapter->itr_setting <= 3)
1802                 ec->rx_coalesce_usecs = adapter->itr_setting;
1803         else
1804                 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
1805
1806         return 0;
1807 }
1808
1809
1810 static int igb_nway_reset(struct net_device *netdev)
1811 {
1812         struct igb_adapter *adapter = netdev_priv(netdev);
1813         if (netif_running(netdev))
1814                 igb_reinit_locked(adapter);
1815         return 0;
1816 }
1817
1818 static int igb_get_sset_count(struct net_device *netdev, int sset)
1819 {
1820         switch (sset) {
1821         case ETH_SS_STATS:
1822                 return IGB_STATS_LEN;
1823         case ETH_SS_TEST:
1824                 return IGB_TEST_LEN;
1825         default:
1826                 return -ENOTSUPP;
1827         }
1828 }
1829
1830 static void igb_get_ethtool_stats(struct net_device *netdev,
1831                                   struct ethtool_stats *stats, u64 *data)
1832 {
1833         struct igb_adapter *adapter = netdev_priv(netdev);
1834         u64 *queue_stat;
1835         int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
1836         int j;
1837         int i;
1838
1839         igb_update_stats(adapter);
1840         for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1841                 char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset;
1842                 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
1843                         sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1844         }
1845         for (j = 0; j < adapter->num_rx_queues; j++) {
1846                 int k;
1847                 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
1848                 for (k = 0; k < stat_count; k++)
1849                         data[i + k] = queue_stat[k];
1850                 i += k;
1851         }
1852 }
1853
1854 static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1855 {
1856         struct igb_adapter *adapter = netdev_priv(netdev);
1857         u8 *p = data;
1858         int i;
1859
1860         switch (stringset) {
1861         case ETH_SS_TEST:
1862                 memcpy(data, *igb_gstrings_test,
1863                         IGB_TEST_LEN*ETH_GSTRING_LEN);
1864                 break;
1865         case ETH_SS_STATS:
1866                 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1867                         memcpy(p, igb_gstrings_stats[i].stat_string,
1868                                ETH_GSTRING_LEN);
1869                         p += ETH_GSTRING_LEN;
1870                 }
1871                 for (i = 0; i < adapter->num_tx_queues; i++) {
1872                         sprintf(p, "tx_queue_%u_packets", i);
1873                         p += ETH_GSTRING_LEN;
1874                         sprintf(p, "tx_queue_%u_bytes", i);
1875                         p += ETH_GSTRING_LEN;
1876                 }
1877                 for (i = 0; i < adapter->num_rx_queues; i++) {
1878                         sprintf(p, "rx_queue_%u_packets", i);
1879                         p += ETH_GSTRING_LEN;
1880                         sprintf(p, "rx_queue_%u_bytes", i);
1881                         p += ETH_GSTRING_LEN;
1882                 }
1883 /*              BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
1884                 break;
1885         }
1886 }
1887
1888 static struct ethtool_ops igb_ethtool_ops = {
1889         .get_settings           = igb_get_settings,
1890         .set_settings           = igb_set_settings,
1891         .get_drvinfo            = igb_get_drvinfo,
1892         .get_regs_len           = igb_get_regs_len,
1893         .get_regs               = igb_get_regs,
1894         .get_wol                = igb_get_wol,
1895         .set_wol                = igb_set_wol,
1896         .get_msglevel           = igb_get_msglevel,
1897         .set_msglevel           = igb_set_msglevel,
1898         .nway_reset             = igb_nway_reset,
1899         .get_link               = ethtool_op_get_link,
1900         .get_eeprom_len         = igb_get_eeprom_len,
1901         .get_eeprom             = igb_get_eeprom,
1902         .set_eeprom             = igb_set_eeprom,
1903         .get_ringparam          = igb_get_ringparam,
1904         .set_ringparam          = igb_set_ringparam,
1905         .get_pauseparam         = igb_get_pauseparam,
1906         .set_pauseparam         = igb_set_pauseparam,
1907         .get_rx_csum            = igb_get_rx_csum,
1908         .set_rx_csum            = igb_set_rx_csum,
1909         .get_tx_csum            = igb_get_tx_csum,
1910         .set_tx_csum            = igb_set_tx_csum,
1911         .get_sg                 = ethtool_op_get_sg,
1912         .set_sg                 = ethtool_op_set_sg,
1913         .get_tso                = ethtool_op_get_tso,
1914         .set_tso                = igb_set_tso,
1915         .self_test              = igb_diag_test,
1916         .get_strings            = igb_get_strings,
1917         .phys_id                = igb_phys_id,
1918         .get_sset_count         = igb_get_sset_count,
1919         .get_ethtool_stats      = igb_get_ethtool_stats,
1920         .get_coalesce           = igb_get_coalesce,
1921         .set_coalesce           = igb_set_coalesce,
1922 };
1923
1924 void igb_set_ethtool_ops(struct net_device *netdev)
1925 {
1926         SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
1927 }