Merge branch 'linus'
[linux-2.6] / drivers / net / chelsio / cxgb2.c
1 /*****************************************************************************
2  *                                                                           *
3  * File: cxgb2.c                                                             *
4  * $Revision: 1.25 $                                                         *
5  * $Date: 2005/06/22 00:43:25 $                                              *
6  * Description:                                                              *
7  *  Chelsio 10Gb Ethernet Driver.                                            *
8  *                                                                           *
9  * This program is free software; you can redistribute it and/or modify      *
10  * it under the terms of the GNU General Public License, version 2, as       *
11  * published by the Free Software Foundation.                                *
12  *                                                                           *
13  * You should have received a copy of the GNU General Public License along   *
14  * with this program; if not, write to the Free Software Foundation, Inc.,   *
15  * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
16  *                                                                           *
17  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
18  * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
20  *                                                                           *
21  * http://www.chelsio.com                                                    *
22  *                                                                           *
23  * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
24  * All rights reserved.                                                      *
25  *                                                                           *
26  * Maintainers: maintainers@chelsio.com                                      *
27  *                                                                           *
28  * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
29  *          Tina Yang               <tainay@chelsio.com>                     *
30  *          Felix Marti             <felix@chelsio.com>                      *
31  *          Scott Bardone           <sbardone@chelsio.com>                   *
32  *          Kurt Ottaway            <kottaway@chelsio.com>                   *
33  *          Frank DiMambro          <frank@chelsio.com>                      *
34  *                                                                           *
35  * History:                                                                  *
36  *                                                                           *
37  ****************************************************************************/
38
39 #include "common.h"
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/dma-mapping.h>
49 #include <asm/uaccess.h>
50
51 #include "cpl5_cmd.h"
52 #include "regs.h"
53 #include "gmac.h"
54 #include "cphy.h"
55 #include "sge.h"
56 #include "tp.h"
57 #include "espi.h"
58 #include "elmer0.h"
59
60 #include <linux/workqueue.h>
61
62 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
63 {
64         schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65 }
66
67 static inline void cancel_mac_stats_update(struct adapter *ap)
68 {
69         cancel_delayed_work(&ap->stats_update_task);
70 }
71
72 #define MAX_CMDQ_ENTRIES        16384
73 #define MAX_CMDQ1_ENTRIES       1024
74 #define MAX_RX_BUFFERS          16384
75 #define MAX_RX_JUMBO_BUFFERS    16384
76 #define MAX_TX_BUFFERS_HIGH     16384U
77 #define MAX_TX_BUFFERS_LOW      1536U
78 #define MAX_TX_BUFFERS          1460U
79 #define MIN_FL_ENTRIES          32
80
81 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84
85 /*
86  * The EEPROM is actually bigger but only the first few bytes are used so we
87  * only report those.
88  */
89 #define EEPROM_SIZE 32
90
91 MODULE_DESCRIPTION(DRV_DESCRIPTION);
92 MODULE_AUTHOR("Chelsio Communications");
93 MODULE_LICENSE("GPL");
94
95 static int dflt_msg_enable = DFLT_MSG_ENABLE;
96
97 module_param(dflt_msg_enable, int, 0);
98 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
99
100 #define HCLOCK 0x0
101 #define LCLOCK 0x1
102
103 /* T1 cards powersave mode */
104 static int t1_clock(struct adapter *adapter, int mode);
105 static int t1powersave = 1;     /* HW default is powersave mode. */
106
107 module_param(t1powersave, int, 0);
108 MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
109
110 static int disable_msi = 0;
111 module_param(disable_msi, int, 0);
112 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
113
114 static const char pci_speed[][4] = {
115         "33", "66", "100", "133"
116 };
117
118 /*
119  * Setup MAC to receive the types of packets we want.
120  */
121 static void t1_set_rxmode(struct net_device *dev)
122 {
123         struct adapter *adapter = dev->priv;
124         struct cmac *mac = adapter->port[dev->if_port].mac;
125         struct t1_rx_mode rm;
126
127         rm.dev = dev;
128         rm.idx = 0;
129         rm.list = dev->mc_list;
130         mac->ops->set_rx_mode(mac, &rm);
131 }
132
133 static void link_report(struct port_info *p)
134 {
135         if (!netif_carrier_ok(p->dev))
136                 printk(KERN_INFO "%s: link down\n", p->dev->name);
137         else {
138                 const char *s = "10Mbps";
139
140                 switch (p->link_config.speed) {
141                         case SPEED_10000: s = "10Gbps"; break;
142                         case SPEED_1000:  s = "1000Mbps"; break;
143                         case SPEED_100:   s = "100Mbps"; break;
144                 }
145
146                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
147                        p->dev->name, s,
148                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149         }
150 }
151
152 void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
153                         int speed, int duplex, int pause)
154 {
155         struct port_info *p = &adapter->port[port_id];
156
157         if (link_stat != netif_carrier_ok(p->dev)) {
158                 if (link_stat)
159                         netif_carrier_on(p->dev);
160                 else
161                         netif_carrier_off(p->dev);
162                 link_report(p);
163
164                 /* multi-ports: inform toe */
165                 if ((speed > 0) && (adapter->params.nports > 1)) {
166                         unsigned int sched_speed = 10;
167                         switch (speed) {
168                         case SPEED_1000:
169                                 sched_speed = 1000;
170                                 break;
171                         case SPEED_100:
172                                 sched_speed = 100;
173                                 break;
174                         case SPEED_10:
175                                 sched_speed = 10;
176                                 break;
177                         }
178                         t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
179                 }
180         }
181 }
182
183 static void link_start(struct port_info *p)
184 {
185         struct cmac *mac = p->mac;
186
187         mac->ops->reset(mac);
188         if (mac->ops->macaddress_set)
189                 mac->ops->macaddress_set(mac, p->dev->dev_addr);
190         t1_set_rxmode(p->dev);
191         t1_link_start(p->phy, mac, &p->link_config);
192         mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
193 }
194
195 static void enable_hw_csum(struct adapter *adapter)
196 {
197         if (adapter->flags & TSO_CAPABLE)
198                 t1_tp_set_ip_checksum_offload(adapter->tp, 1);  /* for TSO only */
199         if (adapter->flags & UDP_CSUM_CAPABLE)
200                 t1_tp_set_udp_checksum_offload(adapter->tp, 1);
201         t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
202 }
203
204 /*
205  * Things to do upon first use of a card.
206  * This must run with the rtnl lock held.
207  */
208 static int cxgb_up(struct adapter *adapter)
209 {
210         int err = 0;
211
212         if (!(adapter->flags & FULL_INIT_DONE)) {
213                 err = t1_init_hw_modules(adapter);
214                 if (err)
215                         goto out_err;
216
217                 enable_hw_csum(adapter);
218                 adapter->flags |= FULL_INIT_DONE;
219         }
220
221         t1_interrupts_clear(adapter);
222
223         adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
224         err = request_irq(adapter->pdev->irq, t1_interrupt,
225                           adapter->params.has_msi ? 0 : IRQF_SHARED,
226                           adapter->name, adapter);
227         if (err) {
228                 if (adapter->params.has_msi)
229                         pci_disable_msi(adapter->pdev);
230
231                 goto out_err;
232         }
233
234         t1_sge_start(adapter->sge);
235         t1_interrupts_enable(adapter);
236 out_err:
237         return err;
238 }
239
240 /*
241  * Release resources when all the ports have been stopped.
242  */
243 static void cxgb_down(struct adapter *adapter)
244 {
245         t1_sge_stop(adapter->sge);
246         t1_interrupts_disable(adapter);
247         free_irq(adapter->pdev->irq, adapter);
248         if (adapter->params.has_msi)
249                 pci_disable_msi(adapter->pdev);
250 }
251
252 static int cxgb_open(struct net_device *dev)
253 {
254         int err;
255         struct adapter *adapter = dev->priv;
256         int other_ports = adapter->open_device_map & PORT_MASK;
257
258         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
259                 return err;
260
261         __set_bit(dev->if_port, &adapter->open_device_map);
262         link_start(&adapter->port[dev->if_port]);
263         netif_start_queue(dev);
264         if (!other_ports && adapter->params.stats_update_period)
265                 schedule_mac_stats_update(adapter,
266                                           adapter->params.stats_update_period);
267         return 0;
268 }
269
270 static int cxgb_close(struct net_device *dev)
271 {
272         struct adapter *adapter = dev->priv;
273         struct port_info *p = &adapter->port[dev->if_port];
274         struct cmac *mac = p->mac;
275
276         netif_stop_queue(dev);
277         mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
278         netif_carrier_off(dev);
279
280         clear_bit(dev->if_port, &adapter->open_device_map);
281         if (adapter->params.stats_update_period &&
282             !(adapter->open_device_map & PORT_MASK)) {
283                 /* Stop statistics accumulation. */
284                 smp_mb__after_clear_bit();
285                 spin_lock(&adapter->work_lock);   /* sync with update task */
286                 spin_unlock(&adapter->work_lock);
287                 cancel_mac_stats_update(adapter);
288         }
289
290         if (!adapter->open_device_map)
291                 cxgb_down(adapter);
292         return 0;
293 }
294
295 static struct net_device_stats *t1_get_stats(struct net_device *dev)
296 {
297         struct adapter *adapter = dev->priv;
298         struct port_info *p = &adapter->port[dev->if_port];
299         struct net_device_stats *ns = &p->netstats;
300         const struct cmac_statistics *pstats;
301
302         /* Do a full update of the MAC stats */
303         pstats = p->mac->ops->statistics_update(p->mac,
304                                                 MAC_STATS_UPDATE_FULL);
305
306         ns->tx_packets = pstats->TxUnicastFramesOK +
307                 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
308
309         ns->rx_packets = pstats->RxUnicastFramesOK +
310                 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
311
312         ns->tx_bytes = pstats->TxOctetsOK;
313         ns->rx_bytes = pstats->RxOctetsOK;
314
315         ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
316                 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
317         ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
318                 pstats->RxFCSErrors + pstats->RxAlignErrors +
319                 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
320                 pstats->RxSymbolErrors + pstats->RxRuntErrors;
321
322         ns->multicast  = pstats->RxMulticastFramesOK;
323         ns->collisions = pstats->TxTotalCollisions;
324
325         /* detailed rx_errors */
326         ns->rx_length_errors = pstats->RxFrameTooLongErrors +
327                 pstats->RxJabberErrors;
328         ns->rx_over_errors   = 0;
329         ns->rx_crc_errors    = pstats->RxFCSErrors;
330         ns->rx_frame_errors  = pstats->RxAlignErrors;
331         ns->rx_fifo_errors   = 0;
332         ns->rx_missed_errors = 0;
333
334         /* detailed tx_errors */
335         ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
336         ns->tx_carrier_errors   = 0;
337         ns->tx_fifo_errors      = pstats->TxUnderrun;
338         ns->tx_heartbeat_errors = 0;
339         ns->tx_window_errors    = pstats->TxLateCollisions;
340         return ns;
341 }
342
343 static u32 get_msglevel(struct net_device *dev)
344 {
345         struct adapter *adapter = dev->priv;
346
347         return adapter->msg_enable;
348 }
349
350 static void set_msglevel(struct net_device *dev, u32 val)
351 {
352         struct adapter *adapter = dev->priv;
353
354         adapter->msg_enable = val;
355 }
356
357 static char stats_strings[][ETH_GSTRING_LEN] = {
358         "TxOctetsOK",
359         "TxOctetsBad",
360         "TxUnicastFramesOK",
361         "TxMulticastFramesOK",
362         "TxBroadcastFramesOK",
363         "TxPauseFrames",
364         "TxFramesWithDeferredXmissions",
365         "TxLateCollisions",
366         "TxTotalCollisions",
367         "TxFramesAbortedDueToXSCollisions",
368         "TxUnderrun",
369         "TxLengthErrors",
370         "TxInternalMACXmitError",
371         "TxFramesWithExcessiveDeferral",
372         "TxFCSErrors",
373
374         "RxOctetsOK",
375         "RxOctetsBad",
376         "RxUnicastFramesOK",
377         "RxMulticastFramesOK",
378         "RxBroadcastFramesOK",
379         "RxPauseFrames",
380         "RxFCSErrors",
381         "RxAlignErrors",
382         "RxSymbolErrors",
383         "RxDataErrors",
384         "RxSequenceErrors",
385         "RxRuntErrors",
386         "RxJabberErrors",
387         "RxInternalMACRcvError",
388         "RxInRangeLengthErrors",
389         "RxOutOfRangeLengthField",
390         "RxFrameTooLongErrors",
391
392         /* Port stats */
393         "RxPackets",
394         "RxCsumGood",
395         "TxPackets",
396         "TxCsumOffload",
397         "TxTso",
398         "RxVlan",
399         "TxVlan",
400
401         /* Interrupt stats */
402         "rx drops",
403         "pure_rsps",
404         "unhandled irqs",
405         "respQ_empty",
406         "respQ_overflow",
407         "freelistQ_empty",
408         "pkt_too_big",
409         "pkt_mismatch",
410         "cmdQ_full0",
411         "cmdQ_full1",
412
413         "espi_DIP2ParityErr",
414         "espi_DIP4Err",
415         "espi_RxDrops",
416         "espi_TxDrops",
417         "espi_RxOvfl",
418         "espi_ParityErr"
419 };
420
421 #define T2_REGMAP_SIZE (3 * 1024)
422
423 static int get_regs_len(struct net_device *dev)
424 {
425         return T2_REGMAP_SIZE;
426 }
427
428 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
429 {
430         struct adapter *adapter = dev->priv;
431
432         strcpy(info->driver, DRV_NAME);
433         strcpy(info->version, DRV_VERSION);
434         strcpy(info->fw_version, "N/A");
435         strcpy(info->bus_info, pci_name(adapter->pdev));
436 }
437
438 static int get_stats_count(struct net_device *dev)
439 {
440         return ARRAY_SIZE(stats_strings);
441 }
442
443 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
444 {
445         if (stringset == ETH_SS_STATS)
446                 memcpy(data, stats_strings, sizeof(stats_strings));
447 }
448
449 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
450                       u64 *data)
451 {
452         struct adapter *adapter = dev->priv;
453         struct cmac *mac = adapter->port[dev->if_port].mac;
454         const struct cmac_statistics *s;
455         const struct sge_intr_counts *t;
456         struct sge_port_stats ss;
457         unsigned int len;
458
459         s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
460
461         len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK);
462         memcpy(data, &s->TxOctetsOK, len);
463         data += len;
464
465         len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK);
466         memcpy(data, &s->RxOctetsOK, len);
467         data += len;
468
469         t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
470         memcpy(data, &ss, sizeof(ss));
471         data += sizeof(ss);
472
473         t = t1_sge_get_intr_counts(adapter->sge);
474         *data++ = t->rx_drops;
475         *data++ = t->pure_rsps;
476         *data++ = t->unhandled_irqs;
477         *data++ = t->respQ_empty;
478         *data++ = t->respQ_overflow;
479         *data++ = t->freelistQ_empty;
480         *data++ = t->pkt_too_big;
481         *data++ = t->pkt_mismatch;
482         *data++ = t->cmdQ_full[0];
483         *data++ = t->cmdQ_full[1];
484
485         if (adapter->espi) {
486                 const struct espi_intr_counts *e;
487
488                 e = t1_espi_get_intr_counts(adapter->espi);
489                 *data++ = e->DIP2_parity_err;
490                 *data++ = e->DIP4_err;
491                 *data++ = e->rx_drops;
492                 *data++ = e->tx_drops;
493                 *data++ = e->rx_ovflw;
494                 *data++ = e->parity_err;
495         }
496 }
497
498 static inline void reg_block_dump(struct adapter *ap, void *buf,
499                                   unsigned int start, unsigned int end)
500 {
501         u32 *p = buf + start;
502
503         for ( ; start <= end; start += sizeof(u32))
504                 *p++ = readl(ap->regs + start);
505 }
506
507 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
508                      void *buf)
509 {
510         struct adapter *ap = dev->priv;
511
512         /*
513          * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
514          */
515         regs->version = 2;
516
517         memset(buf, 0, T2_REGMAP_SIZE);
518         reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
519         reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
520         reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
521         reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
522         reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
523         reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
524         reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
525         reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
526         reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
527         reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
528 }
529
530 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
531 {
532         struct adapter *adapter = dev->priv;
533         struct port_info *p = &adapter->port[dev->if_port];
534
535         cmd->supported = p->link_config.supported;
536         cmd->advertising = p->link_config.advertising;
537
538         if (netif_carrier_ok(dev)) {
539                 cmd->speed = p->link_config.speed;
540                 cmd->duplex = p->link_config.duplex;
541         } else {
542                 cmd->speed = -1;
543                 cmd->duplex = -1;
544         }
545
546         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
547         cmd->phy_address = p->phy->addr;
548         cmd->transceiver = XCVR_EXTERNAL;
549         cmd->autoneg = p->link_config.autoneg;
550         cmd->maxtxpkt = 0;
551         cmd->maxrxpkt = 0;
552         return 0;
553 }
554
555 static int speed_duplex_to_caps(int speed, int duplex)
556 {
557         int cap = 0;
558
559         switch (speed) {
560         case SPEED_10:
561                 if (duplex == DUPLEX_FULL)
562                         cap = SUPPORTED_10baseT_Full;
563                 else
564                         cap = SUPPORTED_10baseT_Half;
565                 break;
566         case SPEED_100:
567                 if (duplex == DUPLEX_FULL)
568                         cap = SUPPORTED_100baseT_Full;
569                 else
570                         cap = SUPPORTED_100baseT_Half;
571                 break;
572         case SPEED_1000:
573                 if (duplex == DUPLEX_FULL)
574                         cap = SUPPORTED_1000baseT_Full;
575                 else
576                         cap = SUPPORTED_1000baseT_Half;
577                 break;
578         case SPEED_10000:
579                 if (duplex == DUPLEX_FULL)
580                         cap = SUPPORTED_10000baseT_Full;
581         }
582         return cap;
583 }
584
585 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
586                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
587                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
588                       ADVERTISED_10000baseT_Full)
589
590 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
591 {
592         struct adapter *adapter = dev->priv;
593         struct port_info *p = &adapter->port[dev->if_port];
594         struct link_config *lc = &p->link_config;
595
596         if (!(lc->supported & SUPPORTED_Autoneg))
597                 return -EOPNOTSUPP;             /* can't change speed/duplex */
598
599         if (cmd->autoneg == AUTONEG_DISABLE) {
600                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
601
602                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
603                         return -EINVAL;
604                 lc->requested_speed = cmd->speed;
605                 lc->requested_duplex = cmd->duplex;
606                 lc->advertising = 0;
607         } else {
608                 cmd->advertising &= ADVERTISED_MASK;
609                 if (cmd->advertising & (cmd->advertising - 1))
610                         cmd->advertising = lc->supported;
611                 cmd->advertising &= lc->supported;
612                 if (!cmd->advertising)
613                         return -EINVAL;
614                 lc->requested_speed = SPEED_INVALID;
615                 lc->requested_duplex = DUPLEX_INVALID;
616                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
617         }
618         lc->autoneg = cmd->autoneg;
619         if (netif_running(dev))
620                 t1_link_start(p->phy, p->mac, lc);
621         return 0;
622 }
623
624 static void get_pauseparam(struct net_device *dev,
625                            struct ethtool_pauseparam *epause)
626 {
627         struct adapter *adapter = dev->priv;
628         struct port_info *p = &adapter->port[dev->if_port];
629
630         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
631         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
632         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
633 }
634
635 static int set_pauseparam(struct net_device *dev,
636                           struct ethtool_pauseparam *epause)
637 {
638         struct adapter *adapter = dev->priv;
639         struct port_info *p = &adapter->port[dev->if_port];
640         struct link_config *lc = &p->link_config;
641
642         if (epause->autoneg == AUTONEG_DISABLE)
643                 lc->requested_fc = 0;
644         else if (lc->supported & SUPPORTED_Autoneg)
645                 lc->requested_fc = PAUSE_AUTONEG;
646         else
647                 return -EINVAL;
648
649         if (epause->rx_pause)
650                 lc->requested_fc |= PAUSE_RX;
651         if (epause->tx_pause)
652                 lc->requested_fc |= PAUSE_TX;
653         if (lc->autoneg == AUTONEG_ENABLE) {
654                 if (netif_running(dev))
655                         t1_link_start(p->phy, p->mac, lc);
656         } else {
657                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
658                 if (netif_running(dev))
659                         p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
660                                                          lc->fc);
661         }
662         return 0;
663 }
664
665 static u32 get_rx_csum(struct net_device *dev)
666 {
667         struct adapter *adapter = dev->priv;
668
669         return (adapter->flags & RX_CSUM_ENABLED) != 0;
670 }
671
672 static int set_rx_csum(struct net_device *dev, u32 data)
673 {
674         struct adapter *adapter = dev->priv;
675
676         if (data)
677                 adapter->flags |= RX_CSUM_ENABLED;
678         else
679                 adapter->flags &= ~RX_CSUM_ENABLED;
680         return 0;
681 }
682
683 static int set_tso(struct net_device *dev, u32 value)
684 {
685         struct adapter *adapter = dev->priv;
686
687         if (!(adapter->flags & TSO_CAPABLE))
688                 return value ? -EOPNOTSUPP : 0;
689         return ethtool_op_set_tso(dev, value);
690 }
691
692 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
693 {
694         struct adapter *adapter = dev->priv;
695         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
696
697         e->rx_max_pending = MAX_RX_BUFFERS;
698         e->rx_mini_max_pending = 0;
699         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
700         e->tx_max_pending = MAX_CMDQ_ENTRIES;
701
702         e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
703         e->rx_mini_pending = 0;
704         e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
705         e->tx_pending = adapter->params.sge.cmdQ_size[0];
706 }
707
708 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
709 {
710         struct adapter *adapter = dev->priv;
711         int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
712
713         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
714             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
715             e->tx_pending > MAX_CMDQ_ENTRIES ||
716             e->rx_pending < MIN_FL_ENTRIES ||
717             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
718             e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
719                 return -EINVAL;
720
721         if (adapter->flags & FULL_INIT_DONE)
722                 return -EBUSY;
723
724         adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
725         adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
726         adapter->params.sge.cmdQ_size[0] = e->tx_pending;
727         adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
728                 MAX_CMDQ1_ENTRIES : e->tx_pending;
729         return 0;
730 }
731
732 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
733 {
734         struct adapter *adapter = dev->priv;
735
736         adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
737         adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
738         adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
739         t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
740         return 0;
741 }
742
743 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
744 {
745         struct adapter *adapter = dev->priv;
746
747         c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
748         c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
749         c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
750         return 0;
751 }
752
753 static int get_eeprom_len(struct net_device *dev)
754 {
755         struct adapter *adapter = dev->priv;
756
757         return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
758 }
759
760 #define EEPROM_MAGIC(ap) \
761         (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
762
763 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
764                       u8 *data)
765 {
766         int i;
767         u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
768         struct adapter *adapter = dev->priv;
769
770         e->magic = EEPROM_MAGIC(adapter);
771         for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
772                 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
773         memcpy(data, buf + e->offset, e->len);
774         return 0;
775 }
776
777 static const struct ethtool_ops t1_ethtool_ops = {
778         .get_settings      = get_settings,
779         .set_settings      = set_settings,
780         .get_drvinfo       = get_drvinfo,
781         .get_msglevel      = get_msglevel,
782         .set_msglevel      = set_msglevel,
783         .get_ringparam     = get_sge_param,
784         .set_ringparam     = set_sge_param,
785         .get_coalesce      = get_coalesce,
786         .set_coalesce      = set_coalesce,
787         .get_eeprom_len    = get_eeprom_len,
788         .get_eeprom        = get_eeprom,
789         .get_pauseparam    = get_pauseparam,
790         .set_pauseparam    = set_pauseparam,
791         .get_rx_csum       = get_rx_csum,
792         .set_rx_csum       = set_rx_csum,
793         .get_tx_csum       = ethtool_op_get_tx_csum,
794         .set_tx_csum       = ethtool_op_set_tx_csum,
795         .get_sg            = ethtool_op_get_sg,
796         .set_sg            = ethtool_op_set_sg,
797         .get_link          = ethtool_op_get_link,
798         .get_strings       = get_strings,
799         .get_stats_count   = get_stats_count,
800         .get_ethtool_stats = get_stats,
801         .get_regs_len      = get_regs_len,
802         .get_regs          = get_regs,
803         .get_tso           = ethtool_op_get_tso,
804         .set_tso           = set_tso,
805 };
806
807 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
808 {
809         struct adapter *adapter = dev->priv;
810         struct mii_ioctl_data *data = if_mii(req);
811
812         switch (cmd) {
813         case SIOCGMIIPHY:
814                 data->phy_id = adapter->port[dev->if_port].phy->addr;
815                 /* FALLTHRU */
816         case SIOCGMIIREG: {
817                 struct cphy *phy = adapter->port[dev->if_port].phy;
818                 u32 val;
819
820                 if (!phy->mdio_read)
821                         return -EOPNOTSUPP;
822                 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
823                                &val);
824                 data->val_out = val;
825                 break;
826         }
827         case SIOCSMIIREG: {
828                 struct cphy *phy = adapter->port[dev->if_port].phy;
829
830                 if (!capable(CAP_NET_ADMIN))
831                     return -EPERM;
832                 if (!phy->mdio_write)
833                         return -EOPNOTSUPP;
834                 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
835                                 data->val_in);
836                 break;
837         }
838
839         default:
840                 return -EOPNOTSUPP;
841         }
842         return 0;
843 }
844
845 static int t1_change_mtu(struct net_device *dev, int new_mtu)
846 {
847         int ret;
848         struct adapter *adapter = dev->priv;
849         struct cmac *mac = adapter->port[dev->if_port].mac;
850
851         if (!mac->ops->set_mtu)
852                 return -EOPNOTSUPP;
853         if (new_mtu < 68)
854                 return -EINVAL;
855         if ((ret = mac->ops->set_mtu(mac, new_mtu)))
856                 return ret;
857         dev->mtu = new_mtu;
858         return 0;
859 }
860
861 static int t1_set_mac_addr(struct net_device *dev, void *p)
862 {
863         struct adapter *adapter = dev->priv;
864         struct cmac *mac = adapter->port[dev->if_port].mac;
865         struct sockaddr *addr = p;
866
867         if (!mac->ops->macaddress_set)
868                 return -EOPNOTSUPP;
869
870         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
871         mac->ops->macaddress_set(mac, dev->dev_addr);
872         return 0;
873 }
874
875 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
876 static void vlan_rx_register(struct net_device *dev,
877                                    struct vlan_group *grp)
878 {
879         struct adapter *adapter = dev->priv;
880
881         spin_lock_irq(&adapter->async_lock);
882         adapter->vlan_grp = grp;
883         t1_set_vlan_accel(adapter, grp != NULL);
884         spin_unlock_irq(&adapter->async_lock);
885 }
886
887 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
888 {
889         struct adapter *adapter = dev->priv;
890
891         spin_lock_irq(&adapter->async_lock);
892         if (adapter->vlan_grp)
893                 adapter->vlan_grp->vlan_devices[vid] = NULL;
894         spin_unlock_irq(&adapter->async_lock);
895 }
896 #endif
897
898 #ifdef CONFIG_NET_POLL_CONTROLLER
899 static void t1_netpoll(struct net_device *dev)
900 {
901         unsigned long flags;
902         struct adapter *adapter = dev->priv;
903
904         local_irq_save(flags);
905         t1_interrupt(adapter->pdev->irq, adapter);
906         local_irq_restore(flags);
907 }
908 #endif
909
910 /*
911  * Periodic accumulation of MAC statistics.  This is used only if the MAC
912  * does not have any other way to prevent stats counter overflow.
913  */
914 static void mac_stats_task(struct work_struct *work)
915 {
916         int i;
917         struct adapter *adapter =
918                 container_of(work, struct adapter, stats_update_task.work);
919
920         for_each_port(adapter, i) {
921                 struct port_info *p = &adapter->port[i];
922
923                 if (netif_running(p->dev))
924                         p->mac->ops->statistics_update(p->mac,
925                                                        MAC_STATS_UPDATE_FAST);
926         }
927
928         /* Schedule the next statistics update if any port is active. */
929         spin_lock(&adapter->work_lock);
930         if (adapter->open_device_map & PORT_MASK)
931                 schedule_mac_stats_update(adapter,
932                                           adapter->params.stats_update_period);
933         spin_unlock(&adapter->work_lock);
934 }
935
936 /*
937  * Processes elmer0 external interrupts in process context.
938  */
939 static void ext_intr_task(struct work_struct *work)
940 {
941         struct adapter *adapter =
942                 container_of(work, struct adapter, ext_intr_handler_task);
943
944         t1_elmer0_ext_intr_handler(adapter);
945
946         /* Now reenable external interrupts */
947         spin_lock_irq(&adapter->async_lock);
948         adapter->slow_intr_mask |= F_PL_INTR_EXT;
949         writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
950         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
951                    adapter->regs + A_PL_ENABLE);
952         spin_unlock_irq(&adapter->async_lock);
953 }
954
955 /*
956  * Interrupt-context handler for elmer0 external interrupts.
957  */
958 void t1_elmer0_ext_intr(struct adapter *adapter)
959 {
960         /*
961          * Schedule a task to handle external interrupts as we require
962          * a process context.  We disable EXT interrupts in the interim
963          * and let the task reenable them when it's done.
964          */
965         adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
966         writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
967                    adapter->regs + A_PL_ENABLE);
968         schedule_work(&adapter->ext_intr_handler_task);
969 }
970
971 void t1_fatal_err(struct adapter *adapter)
972 {
973         if (adapter->flags & FULL_INIT_DONE) {
974                 t1_sge_stop(adapter->sge);
975                 t1_interrupts_disable(adapter);
976         }
977         CH_ALERT("%s: encountered fatal error, operation suspended\n",
978                  adapter->name);
979 }
980
981 static int __devinit init_one(struct pci_dev *pdev,
982                               const struct pci_device_id *ent)
983 {
984         static int version_printed;
985
986         int i, err, pci_using_dac = 0;
987         unsigned long mmio_start, mmio_len;
988         const struct board_info *bi;
989         struct adapter *adapter = NULL;
990         struct port_info *pi;
991
992         if (!version_printed) {
993                 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
994                        DRV_VERSION);
995                 ++version_printed;
996         }
997
998         err = pci_enable_device(pdev);
999         if (err)
1000                 return err;
1001
1002         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1003                 CH_ERR("%s: cannot find PCI device memory base address\n",
1004                        pci_name(pdev));
1005                 err = -ENODEV;
1006                 goto out_disable_pdev;
1007         }
1008
1009         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1010                 pci_using_dac = 1;
1011
1012                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1013                         CH_ERR("%s: unable to obtain 64-bit DMA for"
1014                                "consistent allocations\n", pci_name(pdev));
1015                         err = -ENODEV;
1016                         goto out_disable_pdev;
1017                 }
1018
1019         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1020                 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1021                 goto out_disable_pdev;
1022         }
1023
1024         err = pci_request_regions(pdev, DRV_NAME);
1025         if (err) {
1026                 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1027                 goto out_disable_pdev;
1028         }
1029
1030         pci_set_master(pdev);
1031
1032         mmio_start = pci_resource_start(pdev, 0);
1033         mmio_len = pci_resource_len(pdev, 0);
1034         bi = t1_get_board_info(ent->driver_data);
1035
1036         for (i = 0; i < bi->port_number; ++i) {
1037                 struct net_device *netdev;
1038
1039                 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1040                 if (!netdev) {
1041                         err = -ENOMEM;
1042                         goto out_free_dev;
1043                 }
1044
1045                 SET_MODULE_OWNER(netdev);
1046                 SET_NETDEV_DEV(netdev, &pdev->dev);
1047
1048                 if (!adapter) {
1049                         adapter = netdev->priv;
1050                         adapter->pdev = pdev;
1051                         adapter->port[0].dev = netdev;  /* so we don't leak it */
1052
1053                         adapter->regs = ioremap(mmio_start, mmio_len);
1054                         if (!adapter->regs) {
1055                                 CH_ERR("%s: cannot map device registers\n",
1056                                        pci_name(pdev));
1057                                 err = -ENOMEM;
1058                                 goto out_free_dev;
1059                         }
1060
1061                         if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1062                                 err = -ENODEV;    /* Can't handle this chip rev */
1063                                 goto out_free_dev;
1064                         }
1065
1066                         adapter->name = pci_name(pdev);
1067                         adapter->msg_enable = dflt_msg_enable;
1068                         adapter->mmio_len = mmio_len;
1069
1070                         spin_lock_init(&adapter->tpi_lock);
1071                         spin_lock_init(&adapter->work_lock);
1072                         spin_lock_init(&adapter->async_lock);
1073                         spin_lock_init(&adapter->mac_lock);
1074
1075                         INIT_WORK(&adapter->ext_intr_handler_task,
1076                                   ext_intr_task);
1077                         INIT_DELAYED_WORK(&adapter->stats_update_task,
1078                                           mac_stats_task);
1079
1080                         pci_set_drvdata(pdev, netdev);
1081                 }
1082
1083                 pi = &adapter->port[i];
1084                 pi->dev = netdev;
1085                 netif_carrier_off(netdev);
1086                 netdev->irq = pdev->irq;
1087                 netdev->if_port = i;
1088                 netdev->mem_start = mmio_start;
1089                 netdev->mem_end = mmio_start + mmio_len - 1;
1090                 netdev->priv = adapter;
1091                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1092                 netdev->features |= NETIF_F_LLTX;
1093
1094                 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1095                 if (pci_using_dac)
1096                         netdev->features |= NETIF_F_HIGHDMA;
1097                 if (vlan_tso_capable(adapter)) {
1098 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1099                         adapter->flags |= VLAN_ACCEL_CAPABLE;
1100                         netdev->features |=
1101                                 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1102                         netdev->vlan_rx_register = vlan_rx_register;
1103                         netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1104 #endif
1105
1106                         /* T204: disable TSO */
1107                         if (!(is_T2(adapter)) || bi->port_number != 4) {
1108                                 adapter->flags |= TSO_CAPABLE;
1109                                 netdev->features |= NETIF_F_TSO;
1110                         }
1111                 }
1112
1113                 netdev->open = cxgb_open;
1114                 netdev->stop = cxgb_close;
1115                 netdev->hard_start_xmit = t1_start_xmit;
1116                 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1117                         sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1118                 netdev->get_stats = t1_get_stats;
1119                 netdev->set_multicast_list = t1_set_rxmode;
1120                 netdev->do_ioctl = t1_ioctl;
1121                 netdev->change_mtu = t1_change_mtu;
1122                 netdev->set_mac_address = t1_set_mac_addr;
1123 #ifdef CONFIG_NET_POLL_CONTROLLER
1124                 netdev->poll_controller = t1_netpoll;
1125 #endif
1126 #ifdef CONFIG_CHELSIO_T1_NAPI
1127                 netdev->weight = 64;
1128                 netdev->poll = t1_poll;
1129 #endif
1130
1131                 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1132         }
1133
1134         if (t1_init_sw_modules(adapter, bi) < 0) {
1135                 err = -ENODEV;
1136                 goto out_free_dev;
1137         }
1138
1139         /*
1140          * The card is now ready to go.  If any errors occur during device
1141          * registration we do not fail the whole card but rather proceed only
1142          * with the ports we manage to register successfully.  However we must
1143          * register at least one net device.
1144          */
1145         for (i = 0; i < bi->port_number; ++i) {
1146                 err = register_netdev(adapter->port[i].dev);
1147                 if (err)
1148                         CH_WARN("%s: cannot register net device %s, skipping\n",
1149                                 pci_name(pdev), adapter->port[i].dev->name);
1150                 else {
1151                         /*
1152                          * Change the name we use for messages to the name of
1153                          * the first successfully registered interface.
1154                          */
1155                         if (!adapter->registered_device_map)
1156                                 adapter->name = adapter->port[i].dev->name;
1157
1158                         __set_bit(i, &adapter->registered_device_map);
1159                 }
1160         }
1161         if (!adapter->registered_device_map) {
1162                 CH_ERR("%s: could not register any net devices\n",
1163                        pci_name(pdev));
1164                 goto out_release_adapter_res;
1165         }
1166
1167         printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1168                bi->desc, adapter->params.chip_revision,
1169                adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1170                adapter->params.pci.speed, adapter->params.pci.width);
1171
1172         /*
1173          * Set the T1B ASIC and memory clocks.
1174          */
1175         if (t1powersave)
1176                 adapter->t1powersave = LCLOCK;  /* HW default is powersave mode. */
1177         else
1178                 adapter->t1powersave = HCLOCK;
1179         if (t1_is_T1B(adapter))
1180                 t1_clock(adapter, t1powersave);
1181
1182         return 0;
1183
1184 out_release_adapter_res:
1185         t1_free_sw_modules(adapter);
1186 out_free_dev:
1187         if (adapter) {
1188                 if (adapter->regs)
1189                         iounmap(adapter->regs);
1190                 for (i = bi->port_number - 1; i >= 0; --i)
1191                         if (adapter->port[i].dev)
1192                                 free_netdev(adapter->port[i].dev);
1193         }
1194         pci_release_regions(pdev);
1195 out_disable_pdev:
1196         pci_disable_device(pdev);
1197         pci_set_drvdata(pdev, NULL);
1198         return err;
1199 }
1200
1201 static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1202 {
1203         int data;
1204         int i;
1205         u32 val;
1206
1207         enum {
1208                 S_CLOCK = 1 << 3,
1209                 S_DATA = 1 << 4
1210         };
1211
1212         for (i = (nbits - 1); i > -1; i--) {
1213
1214                 udelay(50);
1215
1216                 data = ((bitdata >> i) & 0x1);
1217                 __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1218
1219                 if (data)
1220                         val |= S_DATA;
1221                 else
1222                         val &= ~S_DATA;
1223
1224                 udelay(50);
1225
1226                 /* Set SCLOCK low */
1227                 val &= ~S_CLOCK;
1228                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1229
1230                 udelay(50);
1231
1232                 /* Write SCLOCK high */
1233                 val |= S_CLOCK;
1234                 __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1235
1236         }
1237 }
1238
1239 static int t1_clock(struct adapter *adapter, int mode)
1240 {
1241         u32 val;
1242         int M_CORE_VAL;
1243         int M_MEM_VAL;
1244
1245         enum {
1246                 M_CORE_BITS     = 9,
1247                 T_CORE_VAL      = 0,
1248                 T_CORE_BITS     = 2,
1249                 N_CORE_VAL      = 0,
1250                 N_CORE_BITS     = 2,
1251                 M_MEM_BITS      = 9,
1252                 T_MEM_VAL       = 0,
1253                 T_MEM_BITS      = 2,
1254                 N_MEM_VAL       = 0,
1255                 N_MEM_BITS      = 2,
1256                 NP_LOAD         = 1 << 17,
1257                 S_LOAD_MEM      = 1 << 5,
1258                 S_LOAD_CORE     = 1 << 6,
1259                 S_CLOCK         = 1 << 3
1260         };
1261
1262         if (!t1_is_T1B(adapter))
1263                 return -ENODEV; /* Can't re-clock this chip. */
1264
1265         if (mode & 2)
1266                 return 0;       /* show current mode. */
1267
1268         if ((adapter->t1powersave & 1) == (mode & 1))
1269                 return -EALREADY;       /* ASIC already running in mode. */
1270
1271         if ((mode & 1) == HCLOCK) {
1272                 M_CORE_VAL = 0x14;
1273                 M_MEM_VAL = 0x18;
1274                 adapter->t1powersave = HCLOCK;  /* overclock */
1275         } else {
1276                 M_CORE_VAL = 0xe;
1277                 M_MEM_VAL = 0x10;
1278                 adapter->t1powersave = LCLOCK;  /* underclock */
1279         }
1280
1281         /* Don't interrupt this serial stream! */
1282         spin_lock(&adapter->tpi_lock);
1283
1284         /* Initialize for ASIC core */
1285         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1286         val |= NP_LOAD;
1287         udelay(50);
1288         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1289         udelay(50);
1290         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1291         val &= ~S_LOAD_CORE;
1292         val &= ~S_CLOCK;
1293         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1294         udelay(50);
1295
1296         /* Serial program the ASIC clock synthesizer */
1297         bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1298         bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1299         bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1300         udelay(50);
1301
1302         /* Finish ASIC core */
1303         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1304         val |= S_LOAD_CORE;
1305         udelay(50);
1306         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1307         udelay(50);
1308         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1309         val &= ~S_LOAD_CORE;
1310         udelay(50);
1311         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1312         udelay(50);
1313
1314         /* Initialize for memory */
1315         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1316         val |= NP_LOAD;
1317         udelay(50);
1318         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1319         udelay(50);
1320         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1321         val &= ~S_LOAD_MEM;
1322         val &= ~S_CLOCK;
1323         udelay(50);
1324         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1325         udelay(50);
1326
1327         /* Serial program the memory clock synthesizer */
1328         bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1329         bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1330         bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1331         udelay(50);
1332
1333         /* Finish memory */
1334         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1335         val |= S_LOAD_MEM;
1336         udelay(50);
1337         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1338         udelay(50);
1339         __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1340         val &= ~S_LOAD_MEM;
1341         udelay(50);
1342         __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1343
1344         spin_unlock(&adapter->tpi_lock);
1345
1346         return 0;
1347 }
1348
1349 static inline void t1_sw_reset(struct pci_dev *pdev)
1350 {
1351         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1352         pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1353 }
1354
1355 static void __devexit remove_one(struct pci_dev *pdev)
1356 {
1357         struct net_device *dev = pci_get_drvdata(pdev);
1358         struct adapter *adapter = dev->priv;
1359         int i;
1360
1361         for_each_port(adapter, i) {
1362                 if (test_bit(i, &adapter->registered_device_map))
1363                         unregister_netdev(adapter->port[i].dev);
1364         }
1365
1366         t1_free_sw_modules(adapter);
1367         iounmap(adapter->regs);
1368
1369         while (--i >= 0) {
1370                 if (adapter->port[i].dev)
1371                         free_netdev(adapter->port[i].dev);
1372         }
1373
1374         pci_release_regions(pdev);
1375         pci_disable_device(pdev);
1376         pci_set_drvdata(pdev, NULL);
1377         t1_sw_reset(pdev);
1378 }
1379
1380 static struct pci_driver driver = {
1381         .name     = DRV_NAME,
1382         .id_table = t1_pci_tbl,
1383         .probe    = init_one,
1384         .remove   = __devexit_p(remove_one),
1385 };
1386
1387 static int __init t1_init_module(void)
1388 {
1389         return pci_register_driver(&driver);
1390 }
1391
1392 static void __exit t1_cleanup_module(void)
1393 {
1394         pci_unregister_driver(&driver);
1395 }
1396
1397 module_init(t1_init_module);
1398 module_exit(t1_cleanup_module);