1 /*****************************************************************************
5 * $Date: 2005/06/22 00:43:25 $ *
7 * Chelsio 10Gb Ethernet Driver. *
9 * This program is free software; you can redistribute it and/or modify *
10 * it under the terms of the GNU General Public License, version 2, as *
11 * published by the Free Software Foundation. *
13 * You should have received a copy of the GNU General Public License along *
14 * with this program; if not, write to the Free Software Foundation, Inc., *
15 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
21 * http://www.chelsio.com *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
24 * All rights reserved. *
26 * Maintainers: maintainers@chelsio.com *
28 * Authors: Dimitrios Michailidis <dm@chelsio.com> *
29 * Tina Yang <tainay@chelsio.com> *
30 * Felix Marti <felix@chelsio.com> *
31 * Scott Bardone <sbardone@chelsio.com> *
32 * Kurt Ottaway <kottaway@chelsio.com> *
33 * Frank DiMambro <frank@chelsio.com> *
37 ****************************************************************************/
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/if_vlan.h>
46 #include <linux/mii.h>
47 #include <linux/sockios.h>
48 #include <linux/proc_fs.h>
49 #include <linux/dma-mapping.h>
50 #include <asm/uaccess.h>
60 #include <linux/tqueue.h>
61 #define INIT_WORK INIT_TQUEUE
62 #define schedule_work schedule_task
63 #define flush_scheduled_work flush_scheduled_tasks
65 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
67 mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
70 static inline void cancel_mac_stats_update(struct adapter *ap)
72 del_timer_sync(&ap->stats_update_timer);
73 flush_scheduled_tasks();
77 * Stats update timer for 2.4. It schedules a task to do the actual update as
78 * we need to access MAC statistics in process context.
80 static void mac_stats_timer(unsigned long data)
82 struct adapter *ap = (struct adapter *)data;
84 schedule_task(&ap->stats_update_task);
87 #include <linux/workqueue.h>
89 static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
91 schedule_delayed_work(&ap->stats_update_task, secs * HZ);
94 static inline void cancel_mac_stats_update(struct adapter *ap)
96 cancel_delayed_work(&ap->stats_update_task);
100 #define MAX_CMDQ_ENTRIES 16384
101 #define MAX_CMDQ1_ENTRIES 1024
102 #define MAX_RX_BUFFERS 16384
103 #define MAX_RX_JUMBO_BUFFERS 16384
104 #define MAX_TX_BUFFERS_HIGH 16384U
105 #define MAX_TX_BUFFERS_LOW 1536U
106 #define MIN_FL_ENTRIES 32
108 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
110 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
111 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
112 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
115 * The EEPROM is actually bigger but only the first few bytes are used so we
118 #define EEPROM_SIZE 32
120 MODULE_DESCRIPTION(DRV_DESCRIPTION);
121 MODULE_AUTHOR("Chelsio Communications");
122 MODULE_LICENSE("GPL");
124 static int dflt_msg_enable = DFLT_MSG_ENABLE;
126 module_param(dflt_msg_enable, int, 0);
127 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
130 static const char pci_speed[][4] = {
131 "33", "66", "100", "133"
135 * Setup MAC to receive the types of packets we want.
137 static void t1_set_rxmode(struct net_device *dev)
139 struct adapter *adapter = dev->priv;
140 struct cmac *mac = adapter->port[dev->if_port].mac;
141 struct t1_rx_mode rm;
145 rm.list = dev->mc_list;
146 mac->ops->set_rx_mode(mac, &rm);
149 static void link_report(struct port_info *p)
151 if (!netif_carrier_ok(p->dev))
152 printk(KERN_INFO "%s: link down\n", p->dev->name);
154 const char *s = "10Mbps";
156 switch (p->link_config.speed) {
157 case SPEED_10000: s = "10Gbps"; break;
158 case SPEED_1000: s = "1000Mbps"; break;
159 case SPEED_100: s = "100Mbps"; break;
162 printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
164 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
168 void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
169 int speed, int duplex, int pause)
171 struct port_info *p = &adapter->port[port_id];
173 if (link_stat != netif_carrier_ok(p->dev)) {
175 netif_carrier_on(p->dev);
177 netif_carrier_off(p->dev);
183 static void link_start(struct port_info *p)
185 struct cmac *mac = p->mac;
187 mac->ops->reset(mac);
188 if (mac->ops->macaddress_set)
189 mac->ops->macaddress_set(mac, p->dev->dev_addr);
190 t1_set_rxmode(p->dev);
191 t1_link_start(p->phy, mac, &p->link_config);
192 mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
195 static void enable_hw_csum(struct adapter *adapter)
197 if (adapter->flags & TSO_CAPABLE)
198 t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
199 t1_tp_set_tcp_checksum_offload(adapter, 1);
203 * Things to do upon first use of a card.
204 * This must run with the rtnl lock held.
206 static int cxgb_up(struct adapter *adapter)
210 if (!(adapter->flags & FULL_INIT_DONE)) {
211 err = t1_init_hw_modules(adapter);
215 enable_hw_csum(adapter);
216 adapter->flags |= FULL_INIT_DONE;
219 t1_interrupts_clear(adapter);
220 if ((err = request_irq(adapter->pdev->irq,
221 t1_select_intr_handler(adapter), IRQF_SHARED,
222 adapter->name, adapter))) {
225 t1_sge_start(adapter->sge);
226 t1_interrupts_enable(adapter);
232 * Release resources when all the ports have been stopped.
234 static void cxgb_down(struct adapter *adapter)
236 t1_sge_stop(adapter->sge);
237 t1_interrupts_disable(adapter);
238 free_irq(adapter->pdev->irq, adapter);
241 static int cxgb_open(struct net_device *dev)
244 struct adapter *adapter = dev->priv;
245 int other_ports = adapter->open_device_map & PORT_MASK;
247 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
250 __set_bit(dev->if_port, &adapter->open_device_map);
251 link_start(&adapter->port[dev->if_port]);
252 netif_start_queue(dev);
253 if (!other_ports && adapter->params.stats_update_period)
254 schedule_mac_stats_update(adapter,
255 adapter->params.stats_update_period);
259 static int cxgb_close(struct net_device *dev)
261 struct adapter *adapter = dev->priv;
262 struct port_info *p = &adapter->port[dev->if_port];
263 struct cmac *mac = p->mac;
265 netif_stop_queue(dev);
266 mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
267 netif_carrier_off(dev);
269 clear_bit(dev->if_port, &adapter->open_device_map);
270 if (adapter->params.stats_update_period &&
271 !(adapter->open_device_map & PORT_MASK)) {
272 /* Stop statistics accumulation. */
273 smp_mb__after_clear_bit();
274 spin_lock(&adapter->work_lock); /* sync with update task */
275 spin_unlock(&adapter->work_lock);
276 cancel_mac_stats_update(adapter);
279 if (!adapter->open_device_map)
284 static struct net_device_stats *t1_get_stats(struct net_device *dev)
286 struct adapter *adapter = dev->priv;
287 struct port_info *p = &adapter->port[dev->if_port];
288 struct net_device_stats *ns = &p->netstats;
289 const struct cmac_statistics *pstats;
291 /* Do a full update of the MAC stats */
292 pstats = p->mac->ops->statistics_update(p->mac,
293 MAC_STATS_UPDATE_FULL);
295 ns->tx_packets = pstats->TxUnicastFramesOK +
296 pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
298 ns->rx_packets = pstats->RxUnicastFramesOK +
299 pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
301 ns->tx_bytes = pstats->TxOctetsOK;
302 ns->rx_bytes = pstats->RxOctetsOK;
304 ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
305 pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
306 ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
307 pstats->RxFCSErrors + pstats->RxAlignErrors +
308 pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
309 pstats->RxSymbolErrors + pstats->RxRuntErrors;
311 ns->multicast = pstats->RxMulticastFramesOK;
312 ns->collisions = pstats->TxTotalCollisions;
314 /* detailed rx_errors */
315 ns->rx_length_errors = pstats->RxFrameTooLongErrors +
316 pstats->RxJabberErrors;
317 ns->rx_over_errors = 0;
318 ns->rx_crc_errors = pstats->RxFCSErrors;
319 ns->rx_frame_errors = pstats->RxAlignErrors;
320 ns->rx_fifo_errors = 0;
321 ns->rx_missed_errors = 0;
323 /* detailed tx_errors */
324 ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
325 ns->tx_carrier_errors = 0;
326 ns->tx_fifo_errors = pstats->TxUnderrun;
327 ns->tx_heartbeat_errors = 0;
328 ns->tx_window_errors = pstats->TxLateCollisions;
332 static u32 get_msglevel(struct net_device *dev)
334 struct adapter *adapter = dev->priv;
336 return adapter->msg_enable;
339 static void set_msglevel(struct net_device *dev, u32 val)
341 struct adapter *adapter = dev->priv;
343 adapter->msg_enable = val;
346 static char stats_strings[][ETH_GSTRING_LEN] = {
350 "TxMulticastFramesOK",
351 "TxBroadcastFramesOK",
353 "TxFramesWithDeferredXmissions",
356 "TxFramesAbortedDueToXSCollisions",
359 "TxInternalMACXmitError",
360 "TxFramesWithExcessiveDeferral",
366 "RxMulticastFramesOK",
367 "RxBroadcastFramesOK",
376 "RxInternalMACRcvError",
377 "RxInRangeLengthErrors",
378 "RxOutOfRangeLengthField",
379 "RxFrameTooLongErrors",
400 "espi_DIP2ParityErr",
408 #define T2_REGMAP_SIZE (3 * 1024)
410 static int get_regs_len(struct net_device *dev)
412 return T2_REGMAP_SIZE;
415 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
417 struct adapter *adapter = dev->priv;
419 strcpy(info->driver, DRV_NAME);
420 strcpy(info->version, DRV_VERSION);
421 strcpy(info->fw_version, "N/A");
422 strcpy(info->bus_info, pci_name(adapter->pdev));
425 static int get_stats_count(struct net_device *dev)
427 return ARRAY_SIZE(stats_strings);
430 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
432 if (stringset == ETH_SS_STATS)
433 memcpy(data, stats_strings, sizeof(stats_strings));
436 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
439 struct adapter *adapter = dev->priv;
440 struct cmac *mac = adapter->port[dev->if_port].mac;
441 const struct cmac_statistics *s;
442 const struct sge_port_stats *ss;
443 const struct sge_intr_counts *t;
445 s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
446 ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
447 t = t1_sge_get_intr_counts(adapter->sge);
449 *data++ = s->TxOctetsOK;
450 *data++ = s->TxOctetsBad;
451 *data++ = s->TxUnicastFramesOK;
452 *data++ = s->TxMulticastFramesOK;
453 *data++ = s->TxBroadcastFramesOK;
454 *data++ = s->TxPauseFrames;
455 *data++ = s->TxFramesWithDeferredXmissions;
456 *data++ = s->TxLateCollisions;
457 *data++ = s->TxTotalCollisions;
458 *data++ = s->TxFramesAbortedDueToXSCollisions;
459 *data++ = s->TxUnderrun;
460 *data++ = s->TxLengthErrors;
461 *data++ = s->TxInternalMACXmitError;
462 *data++ = s->TxFramesWithExcessiveDeferral;
463 *data++ = s->TxFCSErrors;
465 *data++ = s->RxOctetsOK;
466 *data++ = s->RxOctetsBad;
467 *data++ = s->RxUnicastFramesOK;
468 *data++ = s->RxMulticastFramesOK;
469 *data++ = s->RxBroadcastFramesOK;
470 *data++ = s->RxPauseFrames;
471 *data++ = s->RxFCSErrors;
472 *data++ = s->RxAlignErrors;
473 *data++ = s->RxSymbolErrors;
474 *data++ = s->RxDataErrors;
475 *data++ = s->RxSequenceErrors;
476 *data++ = s->RxRuntErrors;
477 *data++ = s->RxJabberErrors;
478 *data++ = s->RxInternalMACRcvError;
479 *data++ = s->RxInRangeLengthErrors;
480 *data++ = s->RxOutOfRangeLengthField;
481 *data++ = s->RxFrameTooLongErrors;
484 *data++ = ss->vlan_xtract;
485 *data++ = ss->vlan_insert;
486 *data++ = ss->rx_cso_good;
487 *data++ = ss->tx_cso;
488 *data++ = ss->rx_drops;
490 *data++ = (u64)t->respQ_empty;
491 *data++ = (u64)t->respQ_overflow;
492 *data++ = (u64)t->freelistQ_empty;
493 *data++ = (u64)t->pkt_too_big;
494 *data++ = (u64)t->pkt_mismatch;
495 *data++ = (u64)t->cmdQ_full[0];
496 *data++ = (u64)t->cmdQ_full[1];
497 *data++ = (u64)t->tx_ipfrags;
498 *data++ = (u64)t->tx_reg_pkts;
499 *data++ = (u64)t->tx_lso_pkts;
500 *data++ = (u64)t->tx_do_cksum;
503 static inline void reg_block_dump(struct adapter *ap, void *buf,
504 unsigned int start, unsigned int end)
506 u32 *p = buf + start;
508 for ( ; start <= end; start += sizeof(u32))
509 *p++ = readl(ap->regs + start);
512 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
515 struct adapter *ap = dev->priv;
518 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
522 memset(buf, 0, T2_REGMAP_SIZE);
523 reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
526 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
528 struct adapter *adapter = dev->priv;
529 struct port_info *p = &adapter->port[dev->if_port];
531 cmd->supported = p->link_config.supported;
532 cmd->advertising = p->link_config.advertising;
534 if (netif_carrier_ok(dev)) {
535 cmd->speed = p->link_config.speed;
536 cmd->duplex = p->link_config.duplex;
542 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
543 cmd->phy_address = p->phy->addr;
544 cmd->transceiver = XCVR_EXTERNAL;
545 cmd->autoneg = p->link_config.autoneg;
551 static int speed_duplex_to_caps(int speed, int duplex)
557 if (duplex == DUPLEX_FULL)
558 cap = SUPPORTED_10baseT_Full;
560 cap = SUPPORTED_10baseT_Half;
563 if (duplex == DUPLEX_FULL)
564 cap = SUPPORTED_100baseT_Full;
566 cap = SUPPORTED_100baseT_Half;
569 if (duplex == DUPLEX_FULL)
570 cap = SUPPORTED_1000baseT_Full;
572 cap = SUPPORTED_1000baseT_Half;
575 if (duplex == DUPLEX_FULL)
576 cap = SUPPORTED_10000baseT_Full;
581 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
582 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
583 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
584 ADVERTISED_10000baseT_Full)
586 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
588 struct adapter *adapter = dev->priv;
589 struct port_info *p = &adapter->port[dev->if_port];
590 struct link_config *lc = &p->link_config;
592 if (!(lc->supported & SUPPORTED_Autoneg))
593 return -EOPNOTSUPP; /* can't change speed/duplex */
595 if (cmd->autoneg == AUTONEG_DISABLE) {
596 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
598 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
600 lc->requested_speed = cmd->speed;
601 lc->requested_duplex = cmd->duplex;
604 cmd->advertising &= ADVERTISED_MASK;
605 if (cmd->advertising & (cmd->advertising - 1))
606 cmd->advertising = lc->supported;
607 cmd->advertising &= lc->supported;
608 if (!cmd->advertising)
610 lc->requested_speed = SPEED_INVALID;
611 lc->requested_duplex = DUPLEX_INVALID;
612 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
614 lc->autoneg = cmd->autoneg;
615 if (netif_running(dev))
616 t1_link_start(p->phy, p->mac, lc);
620 static void get_pauseparam(struct net_device *dev,
621 struct ethtool_pauseparam *epause)
623 struct adapter *adapter = dev->priv;
624 struct port_info *p = &adapter->port[dev->if_port];
626 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
627 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
628 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
631 static int set_pauseparam(struct net_device *dev,
632 struct ethtool_pauseparam *epause)
634 struct adapter *adapter = dev->priv;
635 struct port_info *p = &adapter->port[dev->if_port];
636 struct link_config *lc = &p->link_config;
638 if (epause->autoneg == AUTONEG_DISABLE)
639 lc->requested_fc = 0;
640 else if (lc->supported & SUPPORTED_Autoneg)
641 lc->requested_fc = PAUSE_AUTONEG;
645 if (epause->rx_pause)
646 lc->requested_fc |= PAUSE_RX;
647 if (epause->tx_pause)
648 lc->requested_fc |= PAUSE_TX;
649 if (lc->autoneg == AUTONEG_ENABLE) {
650 if (netif_running(dev))
651 t1_link_start(p->phy, p->mac, lc);
653 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
654 if (netif_running(dev))
655 p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
661 static u32 get_rx_csum(struct net_device *dev)
663 struct adapter *adapter = dev->priv;
665 return (adapter->flags & RX_CSUM_ENABLED) != 0;
668 static int set_rx_csum(struct net_device *dev, u32 data)
670 struct adapter *adapter = dev->priv;
673 adapter->flags |= RX_CSUM_ENABLED;
675 adapter->flags &= ~RX_CSUM_ENABLED;
679 static int set_tso(struct net_device *dev, u32 value)
681 struct adapter *adapter = dev->priv;
683 if (!(adapter->flags & TSO_CAPABLE))
684 return value ? -EOPNOTSUPP : 0;
685 return ethtool_op_set_tso(dev, value);
688 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
690 struct adapter *adapter = dev->priv;
691 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
693 e->rx_max_pending = MAX_RX_BUFFERS;
694 e->rx_mini_max_pending = 0;
695 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
696 e->tx_max_pending = MAX_CMDQ_ENTRIES;
698 e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
699 e->rx_mini_pending = 0;
700 e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
701 e->tx_pending = adapter->params.sge.cmdQ_size[0];
704 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
706 struct adapter *adapter = dev->priv;
707 int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
709 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
710 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
711 e->tx_pending > MAX_CMDQ_ENTRIES ||
712 e->rx_pending < MIN_FL_ENTRIES ||
713 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
714 e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
717 if (adapter->flags & FULL_INIT_DONE)
720 adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
721 adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
722 adapter->params.sge.cmdQ_size[0] = e->tx_pending;
723 adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
724 MAX_CMDQ1_ENTRIES : e->tx_pending;
728 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
730 struct adapter *adapter = dev->priv;
733 * If RX coalescing is requested we use NAPI, otherwise interrupts.
734 * This choice can be made only when all ports and the TOE are off.
736 if (adapter->open_device_map == 0)
737 adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
739 if (adapter->params.sge.polling) {
740 adapter->params.sge.rx_coalesce_usecs = 0;
742 adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
744 adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
745 adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
746 t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
750 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
752 struct adapter *adapter = dev->priv;
754 c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
755 c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
756 c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
760 static int get_eeprom_len(struct net_device *dev)
765 #define EEPROM_MAGIC(ap) \
766 (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
768 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
772 u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
773 struct adapter *adapter = dev->priv;
775 e->magic = EEPROM_MAGIC(adapter);
776 for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
777 t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
778 memcpy(data, buf + e->offset, e->len);
782 static struct ethtool_ops t1_ethtool_ops = {
783 .get_settings = get_settings,
784 .set_settings = set_settings,
785 .get_drvinfo = get_drvinfo,
786 .get_msglevel = get_msglevel,
787 .set_msglevel = set_msglevel,
788 .get_ringparam = get_sge_param,
789 .set_ringparam = set_sge_param,
790 .get_coalesce = get_coalesce,
791 .set_coalesce = set_coalesce,
792 .get_eeprom_len = get_eeprom_len,
793 .get_eeprom = get_eeprom,
794 .get_pauseparam = get_pauseparam,
795 .set_pauseparam = set_pauseparam,
796 .get_rx_csum = get_rx_csum,
797 .set_rx_csum = set_rx_csum,
798 .get_tx_csum = ethtool_op_get_tx_csum,
799 .set_tx_csum = ethtool_op_set_tx_csum,
800 .get_sg = ethtool_op_get_sg,
801 .set_sg = ethtool_op_set_sg,
802 .get_link = ethtool_op_get_link,
803 .get_strings = get_strings,
804 .get_stats_count = get_stats_count,
805 .get_ethtool_stats = get_stats,
806 .get_regs_len = get_regs_len,
807 .get_regs = get_regs,
808 .get_tso = ethtool_op_get_tso,
812 static void cxgb_proc_cleanup(struct adapter *adapter,
813 struct proc_dir_entry *dir)
816 name = adapter->name;
817 remove_proc_entry(name, dir);
819 //#define chtoe_setup_toedev(adapter) NULL
820 #define update_mtu_tab(adapter)
821 #define write_smt_entry(adapter, idx)
823 static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
825 struct adapter *adapter = dev->priv;
826 struct mii_ioctl_data *data = if_mii(req);
830 data->phy_id = adapter->port[dev->if_port].phy->addr;
833 struct cphy *phy = adapter->port[dev->if_port].phy;
838 phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
844 struct cphy *phy = adapter->port[dev->if_port].phy;
846 if (!capable(CAP_NET_ADMIN))
848 if (!phy->mdio_write)
850 phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
861 static int t1_change_mtu(struct net_device *dev, int new_mtu)
864 struct adapter *adapter = dev->priv;
865 struct cmac *mac = adapter->port[dev->if_port].mac;
867 if (!mac->ops->set_mtu)
871 if ((ret = mac->ops->set_mtu(mac, new_mtu)))
877 static int t1_set_mac_addr(struct net_device *dev, void *p)
879 struct adapter *adapter = dev->priv;
880 struct cmac *mac = adapter->port[dev->if_port].mac;
881 struct sockaddr *addr = p;
883 if (!mac->ops->macaddress_set)
886 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
887 mac->ops->macaddress_set(mac, dev->dev_addr);
891 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
892 static void vlan_rx_register(struct net_device *dev,
893 struct vlan_group *grp)
895 struct adapter *adapter = dev->priv;
897 spin_lock_irq(&adapter->async_lock);
898 adapter->vlan_grp = grp;
899 t1_set_vlan_accel(adapter, grp != NULL);
900 spin_unlock_irq(&adapter->async_lock);
903 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
905 struct adapter *adapter = dev->priv;
907 spin_lock_irq(&adapter->async_lock);
908 if (adapter->vlan_grp)
909 adapter->vlan_grp->vlan_devices[vid] = NULL;
910 spin_unlock_irq(&adapter->async_lock);
914 #ifdef CONFIG_NET_POLL_CONTROLLER
915 static void t1_netpoll(struct net_device *dev)
918 struct adapter *adapter = dev->priv;
920 local_irq_save(flags);
921 t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL);
922 local_irq_restore(flags);
927 * Periodic accumulation of MAC statistics. This is used only if the MAC
928 * does not have any other way to prevent stats counter overflow.
930 static void mac_stats_task(void *data)
933 struct adapter *adapter = data;
935 for_each_port(adapter, i) {
936 struct port_info *p = &adapter->port[i];
938 if (netif_running(p->dev))
939 p->mac->ops->statistics_update(p->mac,
940 MAC_STATS_UPDATE_FAST);
943 /* Schedule the next statistics update if any port is active. */
944 spin_lock(&adapter->work_lock);
945 if (adapter->open_device_map & PORT_MASK)
946 schedule_mac_stats_update(adapter,
947 adapter->params.stats_update_period);
948 spin_unlock(&adapter->work_lock);
952 * Processes elmer0 external interrupts in process context.
954 static void ext_intr_task(void *data)
956 struct adapter *adapter = data;
958 elmer0_ext_intr_handler(adapter);
960 /* Now reenable external interrupts */
961 spin_lock_irq(&adapter->async_lock);
962 adapter->slow_intr_mask |= F_PL_INTR_EXT;
963 writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
964 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
965 adapter->regs + A_PL_ENABLE);
966 spin_unlock_irq(&adapter->async_lock);
970 * Interrupt-context handler for elmer0 external interrupts.
972 void t1_elmer0_ext_intr(struct adapter *adapter)
975 * Schedule a task to handle external interrupts as we require
976 * a process context. We disable EXT interrupts in the interim
977 * and let the task reenable them when it's done.
979 adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
980 writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
981 adapter->regs + A_PL_ENABLE);
982 schedule_work(&adapter->ext_intr_handler_task);
985 void t1_fatal_err(struct adapter *adapter)
987 if (adapter->flags & FULL_INIT_DONE) {
988 t1_sge_stop(adapter->sge);
989 t1_interrupts_disable(adapter);
991 CH_ALERT("%s: encountered fatal error, operation suspended\n",
995 static int __devinit init_one(struct pci_dev *pdev,
996 const struct pci_device_id *ent)
998 static int version_printed;
1000 int i, err, pci_using_dac = 0;
1001 unsigned long mmio_start, mmio_len;
1002 const struct board_info *bi;
1003 struct adapter *adapter = NULL;
1004 struct port_info *pi;
1006 if (!version_printed) {
1007 printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1012 err = pci_enable_device(pdev);
1016 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1017 CH_ERR("%s: cannot find PCI device memory base address\n",
1020 goto out_disable_pdev;
1023 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1026 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1027 CH_ERR("%s: unable to obtain 64-bit DMA for"
1028 "consistent allocations\n", pci_name(pdev));
1030 goto out_disable_pdev;
1033 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1034 CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1035 goto out_disable_pdev;
1038 err = pci_request_regions(pdev, DRV_NAME);
1040 CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1041 goto out_disable_pdev;
1044 pci_set_master(pdev);
1046 mmio_start = pci_resource_start(pdev, 0);
1047 mmio_len = pci_resource_len(pdev, 0);
1048 bi = t1_get_board_info(ent->driver_data);
1050 for (i = 0; i < bi->port_number; ++i) {
1051 struct net_device *netdev;
1053 netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1059 SET_MODULE_OWNER(netdev);
1060 SET_NETDEV_DEV(netdev, &pdev->dev);
1063 adapter = netdev->priv;
1064 adapter->pdev = pdev;
1065 adapter->port[0].dev = netdev; /* so we don't leak it */
1067 adapter->regs = ioremap(mmio_start, mmio_len);
1068 if (!adapter->regs) {
1069 CH_ERR("%s: cannot map device registers\n",
1075 if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1076 err = -ENODEV; /* Can't handle this chip rev */
1080 adapter->name = pci_name(pdev);
1081 adapter->msg_enable = dflt_msg_enable;
1082 adapter->mmio_len = mmio_len;
1084 init_MUTEX(&adapter->mib_mutex);
1085 spin_lock_init(&adapter->tpi_lock);
1086 spin_lock_init(&adapter->work_lock);
1087 spin_lock_init(&adapter->async_lock);
1089 INIT_WORK(&adapter->ext_intr_handler_task,
1090 ext_intr_task, adapter);
1091 INIT_WORK(&adapter->stats_update_task, mac_stats_task,
1094 init_timer(&adapter->stats_update_timer);
1095 adapter->stats_update_timer.function = mac_stats_timer;
1096 adapter->stats_update_timer.data =
1097 (unsigned long)adapter;
1100 pci_set_drvdata(pdev, netdev);
1103 pi = &adapter->port[i];
1105 netif_carrier_off(netdev);
1106 netdev->irq = pdev->irq;
1107 netdev->if_port = i;
1108 netdev->mem_start = mmio_start;
1109 netdev->mem_end = mmio_start + mmio_len - 1;
1110 netdev->priv = adapter;
1111 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1112 netdev->features |= NETIF_F_LLTX;
1114 adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1116 netdev->features |= NETIF_F_HIGHDMA;
1117 if (vlan_tso_capable(adapter)) {
1118 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1119 adapter->flags |= VLAN_ACCEL_CAPABLE;
1121 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1122 netdev->vlan_rx_register = vlan_rx_register;
1123 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
1125 adapter->flags |= TSO_CAPABLE;
1126 netdev->features |= NETIF_F_TSO;
1129 netdev->open = cxgb_open;
1130 netdev->stop = cxgb_close;
1131 netdev->hard_start_xmit = t1_start_xmit;
1132 netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1133 sizeof(struct cpl_tx_pkt_lso) :
1134 sizeof(struct cpl_tx_pkt);
1135 netdev->get_stats = t1_get_stats;
1136 netdev->set_multicast_list = t1_set_rxmode;
1137 netdev->do_ioctl = t1_ioctl;
1138 netdev->change_mtu = t1_change_mtu;
1139 netdev->set_mac_address = t1_set_mac_addr;
1140 #ifdef CONFIG_NET_POLL_CONTROLLER
1141 netdev->poll_controller = t1_netpoll;
1143 netdev->weight = 64;
1145 SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1148 if (t1_init_sw_modules(adapter, bi) < 0) {
1154 * The card is now ready to go. If any errors occur during device
1155 * registration we do not fail the whole card but rather proceed only
1156 * with the ports we manage to register successfully. However we must
1157 * register at least one net device.
1159 for (i = 0; i < bi->port_number; ++i) {
1160 err = register_netdev(adapter->port[i].dev);
1162 CH_WARN("%s: cannot register net device %s, skipping\n",
1163 pci_name(pdev), adapter->port[i].dev->name);
1166 * Change the name we use for messages to the name of
1167 * the first successfully registered interface.
1169 if (!adapter->registered_device_map)
1170 adapter->name = adapter->port[i].dev->name;
1172 __set_bit(i, &adapter->registered_device_map);
1175 if (!adapter->registered_device_map) {
1176 CH_ERR("%s: could not register any net devices\n",
1178 goto out_release_adapter_res;
1181 printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1182 bi->desc, adapter->params.chip_revision,
1183 adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1184 adapter->params.pci.speed, adapter->params.pci.width);
1187 out_release_adapter_res:
1188 t1_free_sw_modules(adapter);
1191 if (adapter->regs) iounmap(adapter->regs);
1192 for (i = bi->port_number - 1; i >= 0; --i)
1193 if (adapter->port[i].dev) {
1194 cxgb_proc_cleanup(adapter, proc_root_driver);
1195 kfree(adapter->port[i].dev);
1198 pci_release_regions(pdev);
1200 pci_disable_device(pdev);
1201 pci_set_drvdata(pdev, NULL);
1205 static inline void t1_sw_reset(struct pci_dev *pdev)
1207 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1208 pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1211 static void __devexit remove_one(struct pci_dev *pdev)
1213 struct net_device *dev = pci_get_drvdata(pdev);
1217 struct adapter *adapter = dev->priv;
1219 for_each_port(adapter, i)
1220 if (test_bit(i, &adapter->registered_device_map))
1221 unregister_netdev(adapter->port[i].dev);
1223 t1_free_sw_modules(adapter);
1224 iounmap(adapter->regs);
1226 if (adapter->port[i].dev) {
1227 cxgb_proc_cleanup(adapter, proc_root_driver);
1228 kfree(adapter->port[i].dev);
1230 pci_release_regions(pdev);
1231 pci_disable_device(pdev);
1232 pci_set_drvdata(pdev, NULL);
1237 static struct pci_driver driver = {
1239 .id_table = t1_pci_tbl,
1241 .remove = __devexit_p(remove_one),
1244 static int __init t1_init_module(void)
1246 return pci_register_driver(&driver);
1249 static void __exit t1_cleanup_module(void)
1251 pci_unregister_driver(&driver);
1254 module_init(t1_init_module);
1255 module_exit(t1_cleanup_module);