2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
94 CH_DEVICE(0x36, 3), /* S320E-CR */
95 CH_DEVICE(0x37, 7), /* N320E-G2 */
99 MODULE_DESCRIPTION(DRV_DESC);
100 MODULE_AUTHOR("Chelsio Communications");
101 MODULE_LICENSE("Dual BSD/GPL");
102 MODULE_VERSION(DRV_VERSION);
103 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
105 static int dflt_msg_enable = DFLT_MSG_ENABLE;
107 module_param(dflt_msg_enable, int, 0644);
108 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
111 * The driver uses the best interrupt scheme available on a platform in the
112 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
113 * of these schemes the driver may consider as follows:
115 * msi = 2: choose from among all three options
116 * msi = 1: only consider MSI and pin interrupts
117 * msi = 0: force pin interrupts
121 module_param(msi, int, 0644);
122 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
125 * The driver enables offload as a default.
126 * To disable it, use ofld_disable = 1.
129 static int ofld_disable = 0;
131 module_param(ofld_disable, int, 0644);
132 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
135 * We have work elements that we need to cancel when an interface is taken
136 * down. Normally the work elements would be executed by keventd but that
137 * can deadlock because of linkwatch. If our close method takes the rtnl
138 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
139 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
140 * for our work to complete. Get our own work queue to solve this.
142 static struct workqueue_struct *cxgb3_wq;
145 * link_report - show link status and link speed/duplex
146 * @p: the port whose settings are to be reported
148 * Shows the link status, speed, and duplex of a port.
150 static void link_report(struct net_device *dev)
152 if (!netif_carrier_ok(dev))
153 printk(KERN_INFO "%s: link down\n", dev->name);
155 const char *s = "10Mbps";
156 const struct port_info *p = netdev_priv(dev);
158 switch (p->link_config.speed) {
170 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
171 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
175 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
177 struct net_device *dev = adap->port[port_id];
178 struct port_info *pi = netdev_priv(dev);
180 if (state == netif_carrier_ok(dev))
184 struct cmac *mac = &pi->mac;
186 netif_carrier_on(dev);
188 /* Clear local faults */
189 t3_xgm_intr_disable(adap, pi->port_id);
190 t3_read_reg(adap, A_XGM_INT_STATUS +
193 A_XGM_INT_CAUSE + pi->mac.offset,
196 t3_set_reg_field(adap,
199 F_XGM_INT, F_XGM_INT);
200 t3_xgm_intr_enable(adap, pi->port_id);
202 t3_mac_enable(mac, MAC_DIRECTION_TX);
204 netif_carrier_off(dev);
210 * t3_os_link_changed - handle link status changes
211 * @adapter: the adapter associated with the link change
212 * @port_id: the port index whose limk status has changed
213 * @link_stat: the new status of the link
214 * @speed: the new speed setting
215 * @duplex: the new duplex setting
216 * @pause: the new flow-control setting
218 * This is the OS-dependent handler for link status changes. The OS
219 * neutral handler takes care of most of the processing for these events,
220 * then calls this handler for any OS-specific processing.
222 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
223 int speed, int duplex, int pause)
225 struct net_device *dev = adapter->port[port_id];
226 struct port_info *pi = netdev_priv(dev);
227 struct cmac *mac = &pi->mac;
229 /* Skip changes from disabled ports. */
230 if (!netif_running(dev))
233 if (link_stat != netif_carrier_ok(dev)) {
235 t3_mac_enable(mac, MAC_DIRECTION_RX);
237 /* Clear local faults */
238 t3_xgm_intr_disable(adapter, pi->port_id);
239 t3_read_reg(adapter, A_XGM_INT_STATUS +
241 t3_write_reg(adapter,
242 A_XGM_INT_CAUSE + pi->mac.offset,
245 t3_set_reg_field(adapter,
246 A_XGM_INT_ENABLE + pi->mac.offset,
247 F_XGM_INT, F_XGM_INT);
248 t3_xgm_intr_enable(adapter, pi->port_id);
250 netif_carrier_on(dev);
252 netif_carrier_off(dev);
254 t3_xgm_intr_disable(adapter, pi->port_id);
255 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
256 t3_set_reg_field(adapter,
257 A_XGM_INT_ENABLE + pi->mac.offset,
261 pi->phy.ops->power_down(&pi->phy, 1);
263 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
264 t3_mac_disable(mac, MAC_DIRECTION_RX);
265 t3_link_start(&pi->phy, mac, &pi->link_config);
273 * t3_os_phymod_changed - handle PHY module changes
274 * @phy: the PHY reporting the module change
275 * @mod_type: new module type
277 * This is the OS-dependent handler for PHY module changes. It is
278 * invoked when a PHY module is removed or inserted for any OS-specific
281 void t3_os_phymod_changed(struct adapter *adap, int port_id)
283 static const char *mod_str[] = {
284 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
287 const struct net_device *dev = adap->port[port_id];
288 const struct port_info *pi = netdev_priv(dev);
290 if (pi->phy.modtype == phy_modtype_none)
291 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
293 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
294 mod_str[pi->phy.modtype]);
297 static void cxgb_set_rxmode(struct net_device *dev)
299 struct t3_rx_mode rm;
300 struct port_info *pi = netdev_priv(dev);
302 init_rx_mode(&rm, dev, dev->mc_list);
303 t3_mac_set_rx_mode(&pi->mac, &rm);
307 * link_start - enable a port
308 * @dev: the device to enable
310 * Performs the MAC and PHY actions needed to enable a port.
312 static void link_start(struct net_device *dev)
314 struct t3_rx_mode rm;
315 struct port_info *pi = netdev_priv(dev);
316 struct cmac *mac = &pi->mac;
318 init_rx_mode(&rm, dev, dev->mc_list);
320 t3_mac_set_mtu(mac, dev->mtu);
321 t3_mac_set_address(mac, 0, dev->dev_addr);
322 t3_mac_set_rx_mode(mac, &rm);
323 t3_link_start(&pi->phy, mac, &pi->link_config);
324 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
327 static inline void cxgb_disable_msi(struct adapter *adapter)
329 if (adapter->flags & USING_MSIX) {
330 pci_disable_msix(adapter->pdev);
331 adapter->flags &= ~USING_MSIX;
332 } else if (adapter->flags & USING_MSI) {
333 pci_disable_msi(adapter->pdev);
334 adapter->flags &= ~USING_MSI;
339 * Interrupt handler for asynchronous events used with MSI-X.
341 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
343 t3_slow_intr_handler(cookie);
348 * Name the MSI-X interrupts.
350 static void name_msix_vecs(struct adapter *adap)
352 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
354 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
355 adap->msix_info[0].desc[n] = 0;
357 for_each_port(adap, j) {
358 struct net_device *d = adap->port[j];
359 const struct port_info *pi = netdev_priv(d);
361 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
362 snprintf(adap->msix_info[msi_idx].desc, n,
363 "%s-%d", d->name, pi->first_qset + i);
364 adap->msix_info[msi_idx].desc[n] = 0;
369 static int request_msix_data_irqs(struct adapter *adap)
371 int i, j, err, qidx = 0;
373 for_each_port(adap, i) {
374 int nqsets = adap2pinfo(adap, i)->nqsets;
376 for (j = 0; j < nqsets; ++j) {
377 err = request_irq(adap->msix_info[qidx + 1].vec,
378 t3_intr_handler(adap,
381 adap->msix_info[qidx + 1].desc,
382 &adap->sge.qs[qidx]);
385 free_irq(adap->msix_info[qidx + 1].vec,
386 &adap->sge.qs[qidx]);
395 static void free_irq_resources(struct adapter *adapter)
397 if (adapter->flags & USING_MSIX) {
400 free_irq(adapter->msix_info[0].vec, adapter);
401 for_each_port(adapter, i)
402 n += adap2pinfo(adapter, i)->nqsets;
404 for (i = 0; i < n; ++i)
405 free_irq(adapter->msix_info[i + 1].vec,
406 &adapter->sge.qs[i]);
408 free_irq(adapter->pdev->irq, adapter);
411 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
416 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
424 static int init_tp_parity(struct adapter *adap)
428 struct cpl_set_tcb_field *greq;
429 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
431 t3_tp_set_offload_mode(adap, 1);
433 for (i = 0; i < 16; i++) {
434 struct cpl_smt_write_req *req;
436 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
437 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
438 memset(req, 0, sizeof(*req));
439 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
440 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
442 t3_mgmt_tx(adap, skb);
445 for (i = 0; i < 2048; i++) {
446 struct cpl_l2t_write_req *req;
448 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
449 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
450 memset(req, 0, sizeof(*req));
451 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
452 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
453 req->params = htonl(V_L2T_W_IDX(i));
454 t3_mgmt_tx(adap, skb);
457 for (i = 0; i < 2048; i++) {
458 struct cpl_rte_write_req *req;
460 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
461 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
462 memset(req, 0, sizeof(*req));
463 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
464 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
465 req->l2t_idx = htonl(V_L2T_W_IDX(i));
466 t3_mgmt_tx(adap, skb);
469 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
470 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
471 memset(greq, 0, sizeof(*greq));
472 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
473 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
474 greq->mask = cpu_to_be64(1);
475 t3_mgmt_tx(adap, skb);
477 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
478 t3_tp_set_offload_mode(adap, 0);
483 * setup_rss - configure RSS
486 * Sets up RSS to distribute packets to multiple receive queues. We
487 * configure the RSS CPU lookup table to distribute to the number of HW
488 * receive queues, and the response queue lookup table to narrow that
489 * down to the response queues actually configured for each port.
490 * We always configure the RSS mapping for two ports since the mapping
491 * table has plenty of entries.
493 static void setup_rss(struct adapter *adap)
496 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
497 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
498 u8 cpus[SGE_QSETS + 1];
499 u16 rspq_map[RSS_TABLE_SIZE];
501 for (i = 0; i < SGE_QSETS; ++i)
503 cpus[SGE_QSETS] = 0xff; /* terminator */
505 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
506 rspq_map[i] = i % nq0;
507 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
510 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
511 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
512 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
515 static void init_napi(struct adapter *adap)
519 for (i = 0; i < SGE_QSETS; i++) {
520 struct sge_qset *qs = &adap->sge.qs[i];
523 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
528 * netif_napi_add() can be called only once per napi_struct because it
529 * adds each new napi_struct to a list. Be careful not to call it a
530 * second time, e.g., during EEH recovery, by making a note of it.
532 adap->flags |= NAPI_INIT;
536 * Wait until all NAPI handlers are descheduled. This includes the handlers of
537 * both netdevices representing interfaces and the dummy ones for the extra
540 static void quiesce_rx(struct adapter *adap)
544 for (i = 0; i < SGE_QSETS; i++)
545 if (adap->sge.qs[i].adap)
546 napi_disable(&adap->sge.qs[i].napi);
549 static void enable_all_napi(struct adapter *adap)
552 for (i = 0; i < SGE_QSETS; i++)
553 if (adap->sge.qs[i].adap)
554 napi_enable(&adap->sge.qs[i].napi);
558 * set_qset_lro - Turn a queue set's LRO capability on and off
559 * @dev: the device the qset is attached to
560 * @qset_idx: the queue set index
561 * @val: the LRO switch
563 * Sets LRO on or off for a particular queue set.
564 * the device's features flag is updated to reflect the LRO
565 * capability when all queues belonging to the device are
568 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
570 struct port_info *pi = netdev_priv(dev);
571 struct adapter *adapter = pi->adapter;
573 adapter->params.sge.qset[qset_idx].lro = !!val;
574 adapter->sge.qs[qset_idx].lro_enabled = !!val;
578 * setup_sge_qsets - configure SGE Tx/Rx/response queues
581 * Determines how many sets of SGE queues to use and initializes them.
582 * We support multiple queue sets per port if we have MSI-X, otherwise
583 * just one queue set per port.
585 static int setup_sge_qsets(struct adapter *adap)
587 int i, j, err, irq_idx = 0, qset_idx = 0;
588 unsigned int ntxq = SGE_TXQ_PER_SET;
590 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
593 for_each_port(adap, i) {
594 struct net_device *dev = adap->port[i];
595 struct port_info *pi = netdev_priv(dev);
597 pi->qs = &adap->sge.qs[pi->first_qset];
598 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
600 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
601 err = t3_sge_alloc_qset(adap, qset_idx, 1,
602 (adap->flags & USING_MSIX) ? qset_idx + 1 :
604 &adap->params.sge.qset[qset_idx], ntxq, dev,
605 netdev_get_tx_queue(dev, j));
607 t3_free_sge_resources(adap);
616 static ssize_t attr_show(struct device *d, char *buf,
617 ssize_t(*format) (struct net_device *, char *))
621 /* Synchronize with ioctls that may shut down the device */
623 len = (*format) (to_net_dev(d), buf);
628 static ssize_t attr_store(struct device *d,
629 const char *buf, size_t len,
630 ssize_t(*set) (struct net_device *, unsigned int),
631 unsigned int min_val, unsigned int max_val)
637 if (!capable(CAP_NET_ADMIN))
640 val = simple_strtoul(buf, &endp, 0);
641 if (endp == buf || val < min_val || val > max_val)
645 ret = (*set) (to_net_dev(d), val);
652 #define CXGB3_SHOW(name, val_expr) \
653 static ssize_t format_##name(struct net_device *dev, char *buf) \
655 struct port_info *pi = netdev_priv(dev); \
656 struct adapter *adap = pi->adapter; \
657 return sprintf(buf, "%u\n", val_expr); \
659 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
662 return attr_show(d, buf, format_##name); \
665 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
667 struct port_info *pi = netdev_priv(dev);
668 struct adapter *adap = pi->adapter;
669 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
671 if (adap->flags & FULL_INIT_DONE)
673 if (val && adap->params.rev == 0)
675 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
678 adap->params.mc5.nfilters = val;
682 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
683 const char *buf, size_t len)
685 return attr_store(d, buf, len, set_nfilters, 0, ~0);
688 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
690 struct port_info *pi = netdev_priv(dev);
691 struct adapter *adap = pi->adapter;
693 if (adap->flags & FULL_INIT_DONE)
695 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
698 adap->params.mc5.nservers = val;
702 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
703 const char *buf, size_t len)
705 return attr_store(d, buf, len, set_nservers, 0, ~0);
708 #define CXGB3_ATTR_R(name, val_expr) \
709 CXGB3_SHOW(name, val_expr) \
710 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
712 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
713 CXGB3_SHOW(name, val_expr) \
714 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
716 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
717 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
718 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
720 static struct attribute *cxgb3_attrs[] = {
721 &dev_attr_cam_size.attr,
722 &dev_attr_nfilters.attr,
723 &dev_attr_nservers.attr,
727 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
729 static ssize_t tm_attr_show(struct device *d,
730 char *buf, int sched)
732 struct port_info *pi = netdev_priv(to_net_dev(d));
733 struct adapter *adap = pi->adapter;
734 unsigned int v, addr, bpt, cpt;
737 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
739 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
740 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
743 bpt = (v >> 8) & 0xff;
746 len = sprintf(buf, "disabled\n");
748 v = (adap->params.vpd.cclk * 1000) / cpt;
749 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
755 static ssize_t tm_attr_store(struct device *d,
756 const char *buf, size_t len, int sched)
758 struct port_info *pi = netdev_priv(to_net_dev(d));
759 struct adapter *adap = pi->adapter;
764 if (!capable(CAP_NET_ADMIN))
767 val = simple_strtoul(buf, &endp, 0);
768 if (endp == buf || val > 10000000)
772 ret = t3_config_sched(adap, val, sched);
779 #define TM_ATTR(name, sched) \
780 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
783 return tm_attr_show(d, buf, sched); \
785 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
786 const char *buf, size_t len) \
788 return tm_attr_store(d, buf, len, sched); \
790 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
801 static struct attribute *offload_attrs[] = {
802 &dev_attr_sched0.attr,
803 &dev_attr_sched1.attr,
804 &dev_attr_sched2.attr,
805 &dev_attr_sched3.attr,
806 &dev_attr_sched4.attr,
807 &dev_attr_sched5.attr,
808 &dev_attr_sched6.attr,
809 &dev_attr_sched7.attr,
813 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
816 * Sends an sk_buff to an offload queue driver
817 * after dealing with any active network taps.
819 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
824 ret = t3_offload_tx(tdev, skb);
829 static int write_smt_entry(struct adapter *adapter, int idx)
831 struct cpl_smt_write_req *req;
832 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
837 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
838 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
839 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
840 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
842 memset(req->src_mac1, 0, sizeof(req->src_mac1));
843 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
845 offload_tx(&adapter->tdev, skb);
849 static int init_smt(struct adapter *adapter)
853 for_each_port(adapter, i)
854 write_smt_entry(adapter, i);
858 static void init_port_mtus(struct adapter *adapter)
860 unsigned int mtus = adapter->port[0]->mtu;
862 if (adapter->port[1])
863 mtus |= adapter->port[1]->mtu << 16;
864 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
867 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
871 struct mngt_pktsched_wr *req;
874 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
875 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
876 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
877 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
883 ret = t3_mgmt_tx(adap, skb);
888 static int bind_qsets(struct adapter *adap)
892 for_each_port(adap, i) {
893 const struct port_info *pi = adap2pinfo(adap, i);
895 for (j = 0; j < pi->nqsets; ++j) {
896 int ret = send_pktsched_cmd(adap, 1,
897 pi->first_qset + j, -1,
907 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
908 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
910 static int upgrade_fw(struct adapter *adap)
914 const struct firmware *fw;
915 struct device *dev = &adap->pdev->dev;
917 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
918 FW_VERSION_MINOR, FW_VERSION_MICRO);
919 ret = request_firmware(&fw, buf, dev);
921 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
925 ret = t3_load_fw(adap, fw->data, fw->size);
926 release_firmware(fw);
929 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
930 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
932 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
933 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
938 static inline char t3rev2char(struct adapter *adapter)
942 switch(adapter->params.rev) {
954 static int update_tpsram(struct adapter *adap)
956 const struct firmware *tpsram;
958 struct device *dev = &adap->pdev->dev;
962 rev = t3rev2char(adap);
966 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
967 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
969 ret = request_firmware(&tpsram, buf, dev);
971 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
976 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
980 ret = t3_set_proto_sram(adap, tpsram->data);
983 "successful update of protocol engine "
985 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
987 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
988 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
990 dev_err(dev, "loading protocol SRAM failed\n");
993 release_firmware(tpsram);
999 * cxgb_up - enable the adapter
1000 * @adapter: adapter being enabled
1002 * Called when the first port is enabled, this function performs the
1003 * actions necessary to make an adapter operational, such as completing
1004 * the initialization of HW modules, and enabling interrupts.
1006 * Must be called with the rtnl lock held.
1008 static int cxgb_up(struct adapter *adap)
1012 if (!(adap->flags & FULL_INIT_DONE)) {
1013 err = t3_check_fw_version(adap);
1014 if (err == -EINVAL) {
1015 err = upgrade_fw(adap);
1016 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1017 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1018 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1021 err = t3_check_tpsram_version(adap);
1022 if (err == -EINVAL) {
1023 err = update_tpsram(adap);
1024 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1025 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1026 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1030 * Clear interrupts now to catch errors if t3_init_hw fails.
1031 * We clear them again later as initialization may trigger
1032 * conditions that can interrupt.
1034 t3_intr_clear(adap);
1036 err = t3_init_hw(adap, 0);
1040 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1041 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1043 err = setup_sge_qsets(adap);
1048 if (!(adap->flags & NAPI_INIT))
1051 t3_start_sge_timers(adap);
1052 adap->flags |= FULL_INIT_DONE;
1055 t3_intr_clear(adap);
1057 if (adap->flags & USING_MSIX) {
1058 name_msix_vecs(adap);
1059 err = request_irq(adap->msix_info[0].vec,
1060 t3_async_intr_handler, 0,
1061 adap->msix_info[0].desc, adap);
1065 err = request_msix_data_irqs(adap);
1067 free_irq(adap->msix_info[0].vec, adap);
1070 } else if ((err = request_irq(adap->pdev->irq,
1071 t3_intr_handler(adap,
1072 adap->sge.qs[0].rspq.
1074 (adap->flags & USING_MSI) ?
1079 enable_all_napi(adap);
1081 t3_intr_enable(adap);
1083 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1084 is_offload(adap) && init_tp_parity(adap) == 0)
1085 adap->flags |= TP_PARITY_INIT;
1087 if (adap->flags & TP_PARITY_INIT) {
1088 t3_write_reg(adap, A_TP_INT_CAUSE,
1089 F_CMCACHEPERR | F_ARPLUTPERR);
1090 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1093 if (!(adap->flags & QUEUES_BOUND)) {
1094 err = bind_qsets(adap);
1096 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1097 t3_intr_disable(adap);
1098 free_irq_resources(adap);
1101 adap->flags |= QUEUES_BOUND;
1107 CH_ERR(adap, "request_irq failed, err %d\n", err);
1112 * Release resources when all the ports and offloading have been stopped.
1114 static void cxgb_down(struct adapter *adapter)
1116 t3_sge_stop(adapter);
1117 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1118 t3_intr_disable(adapter);
1119 spin_unlock_irq(&adapter->work_lock);
1121 free_irq_resources(adapter);
1122 quiesce_rx(adapter);
1123 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1126 static void schedule_chk_task(struct adapter *adap)
1130 timeo = adap->params.linkpoll_period ?
1131 (HZ * adap->params.linkpoll_period) / 10 :
1132 adap->params.stats_update_period * HZ;
1134 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1137 static int offload_open(struct net_device *dev)
1139 struct port_info *pi = netdev_priv(dev);
1140 struct adapter *adapter = pi->adapter;
1141 struct t3cdev *tdev = dev2t3cdev(dev);
1142 int adap_up = adapter->open_device_map & PORT_MASK;
1145 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1148 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1151 t3_tp_set_offload_mode(adapter, 1);
1152 tdev->lldev = adapter->port[0];
1153 err = cxgb3_offload_activate(adapter);
1157 init_port_mtus(adapter);
1158 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1159 adapter->params.b_wnd,
1160 adapter->params.rev == 0 ?
1161 adapter->port[0]->mtu : 0xffff);
1164 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1165 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1167 /* Call back all registered clients */
1168 cxgb3_add_clients(tdev);
1171 /* restore them in case the offload module has changed them */
1173 t3_tp_set_offload_mode(adapter, 0);
1174 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1175 cxgb3_set_dummy_ops(tdev);
1180 static int offload_close(struct t3cdev *tdev)
1182 struct adapter *adapter = tdev2adap(tdev);
1184 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1187 /* Call back all registered clients */
1188 cxgb3_remove_clients(tdev);
1190 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1192 /* Flush work scheduled while releasing TIDs */
1193 flush_scheduled_work();
1196 cxgb3_set_dummy_ops(tdev);
1197 t3_tp_set_offload_mode(adapter, 0);
1198 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1200 if (!adapter->open_device_map)
1203 cxgb3_offload_deactivate(adapter);
1207 static int cxgb_open(struct net_device *dev)
1209 struct port_info *pi = netdev_priv(dev);
1210 struct adapter *adapter = pi->adapter;
1211 int other_ports = adapter->open_device_map & PORT_MASK;
1214 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1217 set_bit(pi->port_id, &adapter->open_device_map);
1218 if (is_offload(adapter) && !ofld_disable) {
1219 err = offload_open(dev);
1222 "Could not initialize offload capabilities\n");
1225 dev->real_num_tx_queues = pi->nqsets;
1227 t3_port_intr_enable(adapter, pi->port_id);
1228 netif_tx_start_all_queues(dev);
1230 schedule_chk_task(adapter);
1235 static int cxgb_close(struct net_device *dev)
1237 struct port_info *pi = netdev_priv(dev);
1238 struct adapter *adapter = pi->adapter;
1241 if (!adapter->open_device_map)
1244 /* Stop link fault interrupts */
1245 t3_xgm_intr_disable(adapter, pi->port_id);
1246 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1248 t3_port_intr_disable(adapter, pi->port_id);
1249 netif_tx_stop_all_queues(dev);
1250 pi->phy.ops->power_down(&pi->phy, 1);
1251 netif_carrier_off(dev);
1252 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1254 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1255 clear_bit(pi->port_id, &adapter->open_device_map);
1256 spin_unlock_irq(&adapter->work_lock);
1258 if (!(adapter->open_device_map & PORT_MASK))
1259 cancel_delayed_work_sync(&adapter->adap_check_task);
1261 if (!adapter->open_device_map)
1267 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1269 struct port_info *pi = netdev_priv(dev);
1270 struct adapter *adapter = pi->adapter;
1271 struct net_device_stats *ns = &pi->netstats;
1272 const struct mac_stats *pstats;
1274 spin_lock(&adapter->stats_lock);
1275 pstats = t3_mac_update_stats(&pi->mac);
1276 spin_unlock(&adapter->stats_lock);
1278 ns->tx_bytes = pstats->tx_octets;
1279 ns->tx_packets = pstats->tx_frames;
1280 ns->rx_bytes = pstats->rx_octets;
1281 ns->rx_packets = pstats->rx_frames;
1282 ns->multicast = pstats->rx_mcast_frames;
1284 ns->tx_errors = pstats->tx_underrun;
1285 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1286 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1287 pstats->rx_fifo_ovfl;
1289 /* detailed rx_errors */
1290 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1291 ns->rx_over_errors = 0;
1292 ns->rx_crc_errors = pstats->rx_fcs_errs;
1293 ns->rx_frame_errors = pstats->rx_symbol_errs;
1294 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1295 ns->rx_missed_errors = pstats->rx_cong_drops;
1297 /* detailed tx_errors */
1298 ns->tx_aborted_errors = 0;
1299 ns->tx_carrier_errors = 0;
1300 ns->tx_fifo_errors = pstats->tx_underrun;
1301 ns->tx_heartbeat_errors = 0;
1302 ns->tx_window_errors = 0;
1306 static u32 get_msglevel(struct net_device *dev)
1308 struct port_info *pi = netdev_priv(dev);
1309 struct adapter *adapter = pi->adapter;
1311 return adapter->msg_enable;
1314 static void set_msglevel(struct net_device *dev, u32 val)
1316 struct port_info *pi = netdev_priv(dev);
1317 struct adapter *adapter = pi->adapter;
1319 adapter->msg_enable = val;
1322 static char stats_strings[][ETH_GSTRING_LEN] = {
1325 "TxMulticastFramesOK",
1326 "TxBroadcastFramesOK",
1333 "TxFrames128To255 ",
1334 "TxFrames256To511 ",
1335 "TxFrames512To1023 ",
1336 "TxFrames1024To1518 ",
1337 "TxFrames1519ToMax ",
1341 "RxMulticastFramesOK",
1342 "RxBroadcastFramesOK",
1353 "RxFrames128To255 ",
1354 "RxFrames256To511 ",
1355 "RxFrames512To1023 ",
1356 "RxFrames1024To1518 ",
1357 "RxFrames1519ToMax ",
1370 "CheckTXEnToggled ",
1376 static int get_sset_count(struct net_device *dev, int sset)
1380 return ARRAY_SIZE(stats_strings);
1386 #define T3_REGMAP_SIZE (3 * 1024)
1388 static int get_regs_len(struct net_device *dev)
1390 return T3_REGMAP_SIZE;
1393 static int get_eeprom_len(struct net_device *dev)
1398 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1400 struct port_info *pi = netdev_priv(dev);
1401 struct adapter *adapter = pi->adapter;
1405 spin_lock(&adapter->stats_lock);
1406 t3_get_fw_version(adapter, &fw_vers);
1407 t3_get_tp_version(adapter, &tp_vers);
1408 spin_unlock(&adapter->stats_lock);
1410 strcpy(info->driver, DRV_NAME);
1411 strcpy(info->version, DRV_VERSION);
1412 strcpy(info->bus_info, pci_name(adapter->pdev));
1414 strcpy(info->fw_version, "N/A");
1416 snprintf(info->fw_version, sizeof(info->fw_version),
1417 "%s %u.%u.%u TP %u.%u.%u",
1418 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1419 G_FW_VERSION_MAJOR(fw_vers),
1420 G_FW_VERSION_MINOR(fw_vers),
1421 G_FW_VERSION_MICRO(fw_vers),
1422 G_TP_VERSION_MAJOR(tp_vers),
1423 G_TP_VERSION_MINOR(tp_vers),
1424 G_TP_VERSION_MICRO(tp_vers));
1428 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1430 if (stringset == ETH_SS_STATS)
1431 memcpy(data, stats_strings, sizeof(stats_strings));
1434 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1435 struct port_info *p, int idx)
1438 unsigned long tot = 0;
1440 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1441 tot += adapter->sge.qs[i].port_stats[idx];
1445 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1448 struct port_info *pi = netdev_priv(dev);
1449 struct adapter *adapter = pi->adapter;
1450 const struct mac_stats *s;
1452 spin_lock(&adapter->stats_lock);
1453 s = t3_mac_update_stats(&pi->mac);
1454 spin_unlock(&adapter->stats_lock);
1456 *data++ = s->tx_octets;
1457 *data++ = s->tx_frames;
1458 *data++ = s->tx_mcast_frames;
1459 *data++ = s->tx_bcast_frames;
1460 *data++ = s->tx_pause;
1461 *data++ = s->tx_underrun;
1462 *data++ = s->tx_fifo_urun;
1464 *data++ = s->tx_frames_64;
1465 *data++ = s->tx_frames_65_127;
1466 *data++ = s->tx_frames_128_255;
1467 *data++ = s->tx_frames_256_511;
1468 *data++ = s->tx_frames_512_1023;
1469 *data++ = s->tx_frames_1024_1518;
1470 *data++ = s->tx_frames_1519_max;
1472 *data++ = s->rx_octets;
1473 *data++ = s->rx_frames;
1474 *data++ = s->rx_mcast_frames;
1475 *data++ = s->rx_bcast_frames;
1476 *data++ = s->rx_pause;
1477 *data++ = s->rx_fcs_errs;
1478 *data++ = s->rx_symbol_errs;
1479 *data++ = s->rx_short;
1480 *data++ = s->rx_jabber;
1481 *data++ = s->rx_too_long;
1482 *data++ = s->rx_fifo_ovfl;
1484 *data++ = s->rx_frames_64;
1485 *data++ = s->rx_frames_65_127;
1486 *data++ = s->rx_frames_128_255;
1487 *data++ = s->rx_frames_256_511;
1488 *data++ = s->rx_frames_512_1023;
1489 *data++ = s->rx_frames_1024_1518;
1490 *data++ = s->rx_frames_1519_max;
1492 *data++ = pi->phy.fifo_errors;
1494 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1495 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1496 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1497 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1498 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1502 *data++ = s->rx_cong_drops;
1504 *data++ = s->num_toggled;
1505 *data++ = s->num_resets;
1507 *data++ = s->link_faults;
1510 static inline void reg_block_dump(struct adapter *ap, void *buf,
1511 unsigned int start, unsigned int end)
1513 u32 *p = buf + start;
1515 for (; start <= end; start += sizeof(u32))
1516 *p++ = t3_read_reg(ap, start);
1519 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1522 struct port_info *pi = netdev_priv(dev);
1523 struct adapter *ap = pi->adapter;
1527 * bits 0..9: chip version
1528 * bits 10..15: chip revision
1529 * bit 31: set for PCIe cards
1531 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1534 * We skip the MAC statistics registers because they are clear-on-read.
1535 * Also reading multi-register stats would need to synchronize with the
1536 * periodic mac stats accumulation. Hard to justify the complexity.
1538 memset(buf, 0, T3_REGMAP_SIZE);
1539 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1540 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1541 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1542 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1543 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1544 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1545 XGM_REG(A_XGM_SERDES_STAT3, 1));
1546 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1547 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1550 static int restart_autoneg(struct net_device *dev)
1552 struct port_info *p = netdev_priv(dev);
1554 if (!netif_running(dev))
1556 if (p->link_config.autoneg != AUTONEG_ENABLE)
1558 p->phy.ops->autoneg_restart(&p->phy);
1562 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1564 struct port_info *pi = netdev_priv(dev);
1565 struct adapter *adapter = pi->adapter;
1571 for (i = 0; i < data * 2; i++) {
1572 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1573 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1574 if (msleep_interruptible(500))
1577 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1582 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1584 struct port_info *p = netdev_priv(dev);
1586 cmd->supported = p->link_config.supported;
1587 cmd->advertising = p->link_config.advertising;
1589 if (netif_carrier_ok(dev)) {
1590 cmd->speed = p->link_config.speed;
1591 cmd->duplex = p->link_config.duplex;
1597 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1598 cmd->phy_address = p->phy.mdio.prtad;
1599 cmd->transceiver = XCVR_EXTERNAL;
1600 cmd->autoneg = p->link_config.autoneg;
1606 static int speed_duplex_to_caps(int speed, int duplex)
1612 if (duplex == DUPLEX_FULL)
1613 cap = SUPPORTED_10baseT_Full;
1615 cap = SUPPORTED_10baseT_Half;
1618 if (duplex == DUPLEX_FULL)
1619 cap = SUPPORTED_100baseT_Full;
1621 cap = SUPPORTED_100baseT_Half;
1624 if (duplex == DUPLEX_FULL)
1625 cap = SUPPORTED_1000baseT_Full;
1627 cap = SUPPORTED_1000baseT_Half;
1630 if (duplex == DUPLEX_FULL)
1631 cap = SUPPORTED_10000baseT_Full;
1636 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1637 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1638 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1639 ADVERTISED_10000baseT_Full)
1641 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1643 struct port_info *p = netdev_priv(dev);
1644 struct link_config *lc = &p->link_config;
1646 if (!(lc->supported & SUPPORTED_Autoneg)) {
1648 * PHY offers a single speed/duplex. See if that's what's
1651 if (cmd->autoneg == AUTONEG_DISABLE) {
1652 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1653 if (lc->supported & cap)
1659 if (cmd->autoneg == AUTONEG_DISABLE) {
1660 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1662 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1664 lc->requested_speed = cmd->speed;
1665 lc->requested_duplex = cmd->duplex;
1666 lc->advertising = 0;
1668 cmd->advertising &= ADVERTISED_MASK;
1669 cmd->advertising &= lc->supported;
1670 if (!cmd->advertising)
1672 lc->requested_speed = SPEED_INVALID;
1673 lc->requested_duplex = DUPLEX_INVALID;
1674 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1676 lc->autoneg = cmd->autoneg;
1677 if (netif_running(dev))
1678 t3_link_start(&p->phy, &p->mac, lc);
1682 static void get_pauseparam(struct net_device *dev,
1683 struct ethtool_pauseparam *epause)
1685 struct port_info *p = netdev_priv(dev);
1687 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1688 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1689 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1692 static int set_pauseparam(struct net_device *dev,
1693 struct ethtool_pauseparam *epause)
1695 struct port_info *p = netdev_priv(dev);
1696 struct link_config *lc = &p->link_config;
1698 if (epause->autoneg == AUTONEG_DISABLE)
1699 lc->requested_fc = 0;
1700 else if (lc->supported & SUPPORTED_Autoneg)
1701 lc->requested_fc = PAUSE_AUTONEG;
1705 if (epause->rx_pause)
1706 lc->requested_fc |= PAUSE_RX;
1707 if (epause->tx_pause)
1708 lc->requested_fc |= PAUSE_TX;
1709 if (lc->autoneg == AUTONEG_ENABLE) {
1710 if (netif_running(dev))
1711 t3_link_start(&p->phy, &p->mac, lc);
1713 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1714 if (netif_running(dev))
1715 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1720 static u32 get_rx_csum(struct net_device *dev)
1722 struct port_info *p = netdev_priv(dev);
1724 return p->rx_offload & T3_RX_CSUM;
1727 static int set_rx_csum(struct net_device *dev, u32 data)
1729 struct port_info *p = netdev_priv(dev);
1732 p->rx_offload |= T3_RX_CSUM;
1736 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1737 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1738 set_qset_lro(dev, i, 0);
1743 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1745 struct port_info *pi = netdev_priv(dev);
1746 struct adapter *adapter = pi->adapter;
1747 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1749 e->rx_max_pending = MAX_RX_BUFFERS;
1750 e->rx_mini_max_pending = 0;
1751 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1752 e->tx_max_pending = MAX_TXQ_ENTRIES;
1754 e->rx_pending = q->fl_size;
1755 e->rx_mini_pending = q->rspq_size;
1756 e->rx_jumbo_pending = q->jumbo_size;
1757 e->tx_pending = q->txq_size[0];
1760 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1762 struct port_info *pi = netdev_priv(dev);
1763 struct adapter *adapter = pi->adapter;
1764 struct qset_params *q;
1767 if (e->rx_pending > MAX_RX_BUFFERS ||
1768 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1769 e->tx_pending > MAX_TXQ_ENTRIES ||
1770 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1771 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1772 e->rx_pending < MIN_FL_ENTRIES ||
1773 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1774 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1777 if (adapter->flags & FULL_INIT_DONE)
1780 q = &adapter->params.sge.qset[pi->first_qset];
1781 for (i = 0; i < pi->nqsets; ++i, ++q) {
1782 q->rspq_size = e->rx_mini_pending;
1783 q->fl_size = e->rx_pending;
1784 q->jumbo_size = e->rx_jumbo_pending;
1785 q->txq_size[0] = e->tx_pending;
1786 q->txq_size[1] = e->tx_pending;
1787 q->txq_size[2] = e->tx_pending;
1792 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1794 struct port_info *pi = netdev_priv(dev);
1795 struct adapter *adapter = pi->adapter;
1796 struct qset_params *qsp = &adapter->params.sge.qset[0];
1797 struct sge_qset *qs = &adapter->sge.qs[0];
1799 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1802 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1803 t3_update_qset_coalesce(qs, qsp);
1807 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1809 struct port_info *pi = netdev_priv(dev);
1810 struct adapter *adapter = pi->adapter;
1811 struct qset_params *q = adapter->params.sge.qset;
1813 c->rx_coalesce_usecs = q->coalesce_usecs;
1817 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1820 struct port_info *pi = netdev_priv(dev);
1821 struct adapter *adapter = pi->adapter;
1824 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1828 e->magic = EEPROM_MAGIC;
1829 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1830 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1833 memcpy(data, buf + e->offset, e->len);
1838 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1841 struct port_info *pi = netdev_priv(dev);
1842 struct adapter *adapter = pi->adapter;
1843 u32 aligned_offset, aligned_len;
1848 if (eeprom->magic != EEPROM_MAGIC)
1851 aligned_offset = eeprom->offset & ~3;
1852 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1854 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1855 buf = kmalloc(aligned_len, GFP_KERNEL);
1858 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1859 if (!err && aligned_len > 4)
1860 err = t3_seeprom_read(adapter,
1861 aligned_offset + aligned_len - 4,
1862 (__le32 *) & buf[aligned_len - 4]);
1865 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1869 err = t3_seeprom_wp(adapter, 0);
1873 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1874 err = t3_seeprom_write(adapter, aligned_offset, *p);
1875 aligned_offset += 4;
1879 err = t3_seeprom_wp(adapter, 1);
1886 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1890 memset(&wol->sopass, 0, sizeof(wol->sopass));
1893 static const struct ethtool_ops cxgb_ethtool_ops = {
1894 .get_settings = get_settings,
1895 .set_settings = set_settings,
1896 .get_drvinfo = get_drvinfo,
1897 .get_msglevel = get_msglevel,
1898 .set_msglevel = set_msglevel,
1899 .get_ringparam = get_sge_param,
1900 .set_ringparam = set_sge_param,
1901 .get_coalesce = get_coalesce,
1902 .set_coalesce = set_coalesce,
1903 .get_eeprom_len = get_eeprom_len,
1904 .get_eeprom = get_eeprom,
1905 .set_eeprom = set_eeprom,
1906 .get_pauseparam = get_pauseparam,
1907 .set_pauseparam = set_pauseparam,
1908 .get_rx_csum = get_rx_csum,
1909 .set_rx_csum = set_rx_csum,
1910 .set_tx_csum = ethtool_op_set_tx_csum,
1911 .set_sg = ethtool_op_set_sg,
1912 .get_link = ethtool_op_get_link,
1913 .get_strings = get_strings,
1914 .phys_id = cxgb3_phys_id,
1915 .nway_reset = restart_autoneg,
1916 .get_sset_count = get_sset_count,
1917 .get_ethtool_stats = get_stats,
1918 .get_regs_len = get_regs_len,
1919 .get_regs = get_regs,
1921 .set_tso = ethtool_op_set_tso,
1924 static int in_range(int val, int lo, int hi)
1926 return val < 0 || (val <= hi && val >= lo);
1929 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1931 struct port_info *pi = netdev_priv(dev);
1932 struct adapter *adapter = pi->adapter;
1936 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1940 case CHELSIO_SET_QSET_PARAMS:{
1942 struct qset_params *q;
1943 struct ch_qset_params t;
1944 int q1 = pi->first_qset;
1945 int nqsets = pi->nqsets;
1947 if (!capable(CAP_NET_ADMIN))
1949 if (copy_from_user(&t, useraddr, sizeof(t)))
1951 if (t.qset_idx >= SGE_QSETS)
1953 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1954 !in_range(t.cong_thres, 0, 255) ||
1955 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1957 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1959 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1960 MAX_CTRL_TXQ_ENTRIES) ||
1961 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1963 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1964 MAX_RX_JUMBO_BUFFERS)
1965 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1969 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1970 for_each_port(adapter, i) {
1971 pi = adap2pinfo(adapter, i);
1972 if (t.qset_idx >= pi->first_qset &&
1973 t.qset_idx < pi->first_qset + pi->nqsets &&
1974 !(pi->rx_offload & T3_RX_CSUM))
1978 if ((adapter->flags & FULL_INIT_DONE) &&
1979 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1980 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1981 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1982 t.polling >= 0 || t.cong_thres >= 0))
1985 /* Allow setting of any available qset when offload enabled */
1986 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1988 for_each_port(adapter, i) {
1989 pi = adap2pinfo(adapter, i);
1990 nqsets += pi->first_qset + pi->nqsets;
1994 if (t.qset_idx < q1)
1996 if (t.qset_idx > q1 + nqsets - 1)
1999 q = &adapter->params.sge.qset[t.qset_idx];
2001 if (t.rspq_size >= 0)
2002 q->rspq_size = t.rspq_size;
2003 if (t.fl_size[0] >= 0)
2004 q->fl_size = t.fl_size[0];
2005 if (t.fl_size[1] >= 0)
2006 q->jumbo_size = t.fl_size[1];
2007 if (t.txq_size[0] >= 0)
2008 q->txq_size[0] = t.txq_size[0];
2009 if (t.txq_size[1] >= 0)
2010 q->txq_size[1] = t.txq_size[1];
2011 if (t.txq_size[2] >= 0)
2012 q->txq_size[2] = t.txq_size[2];
2013 if (t.cong_thres >= 0)
2014 q->cong_thres = t.cong_thres;
2015 if (t.intr_lat >= 0) {
2016 struct sge_qset *qs =
2017 &adapter->sge.qs[t.qset_idx];
2019 q->coalesce_usecs = t.intr_lat;
2020 t3_update_qset_coalesce(qs, q);
2022 if (t.polling >= 0) {
2023 if (adapter->flags & USING_MSIX)
2024 q->polling = t.polling;
2026 /* No polling with INTx for T3A */
2027 if (adapter->params.rev == 0 &&
2028 !(adapter->flags & USING_MSI))
2031 for (i = 0; i < SGE_QSETS; i++) {
2032 q = &adapter->params.sge.
2034 q->polling = t.polling;
2039 set_qset_lro(dev, t.qset_idx, t.lro);
2043 case CHELSIO_GET_QSET_PARAMS:{
2044 struct qset_params *q;
2045 struct ch_qset_params t;
2046 int q1 = pi->first_qset;
2047 int nqsets = pi->nqsets;
2050 if (copy_from_user(&t, useraddr, sizeof(t)))
2053 /* Display qsets for all ports when offload enabled */
2054 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2056 for_each_port(adapter, i) {
2057 pi = adap2pinfo(adapter, i);
2058 nqsets = pi->first_qset + pi->nqsets;
2062 if (t.qset_idx >= nqsets)
2065 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2066 t.rspq_size = q->rspq_size;
2067 t.txq_size[0] = q->txq_size[0];
2068 t.txq_size[1] = q->txq_size[1];
2069 t.txq_size[2] = q->txq_size[2];
2070 t.fl_size[0] = q->fl_size;
2071 t.fl_size[1] = q->jumbo_size;
2072 t.polling = q->polling;
2074 t.intr_lat = q->coalesce_usecs;
2075 t.cong_thres = q->cong_thres;
2078 if (adapter->flags & USING_MSIX)
2079 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2081 t.vector = adapter->pdev->irq;
2083 if (copy_to_user(useraddr, &t, sizeof(t)))
2087 case CHELSIO_SET_QSET_NUM:{
2088 struct ch_reg edata;
2089 unsigned int i, first_qset = 0, other_qsets = 0;
2091 if (!capable(CAP_NET_ADMIN))
2093 if (adapter->flags & FULL_INIT_DONE)
2095 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2097 if (edata.val < 1 ||
2098 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2101 for_each_port(adapter, i)
2102 if (adapter->port[i] && adapter->port[i] != dev)
2103 other_qsets += adap2pinfo(adapter, i)->nqsets;
2105 if (edata.val + other_qsets > SGE_QSETS)
2108 pi->nqsets = edata.val;
2110 for_each_port(adapter, i)
2111 if (adapter->port[i]) {
2112 pi = adap2pinfo(adapter, i);
2113 pi->first_qset = first_qset;
2114 first_qset += pi->nqsets;
2118 case CHELSIO_GET_QSET_NUM:{
2119 struct ch_reg edata;
2121 edata.cmd = CHELSIO_GET_QSET_NUM;
2122 edata.val = pi->nqsets;
2123 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2127 case CHELSIO_LOAD_FW:{
2129 struct ch_mem_range t;
2131 if (!capable(CAP_SYS_RAWIO))
2133 if (copy_from_user(&t, useraddr, sizeof(t)))
2135 /* Check t.len sanity ? */
2136 fw_data = kmalloc(t.len, GFP_KERNEL);
2141 (fw_data, useraddr + sizeof(t), t.len)) {
2146 ret = t3_load_fw(adapter, fw_data, t.len);
2152 case CHELSIO_SETMTUTAB:{
2156 if (!is_offload(adapter))
2158 if (!capable(CAP_NET_ADMIN))
2160 if (offload_running(adapter))
2162 if (copy_from_user(&m, useraddr, sizeof(m)))
2164 if (m.nmtus != NMTUS)
2166 if (m.mtus[0] < 81) /* accommodate SACK */
2169 /* MTUs must be in ascending order */
2170 for (i = 1; i < NMTUS; ++i)
2171 if (m.mtus[i] < m.mtus[i - 1])
2174 memcpy(adapter->params.mtus, m.mtus,
2175 sizeof(adapter->params.mtus));
2178 case CHELSIO_GET_PM:{
2179 struct tp_params *p = &adapter->params.tp;
2180 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2182 if (!is_offload(adapter))
2184 m.tx_pg_sz = p->tx_pg_size;
2185 m.tx_num_pg = p->tx_num_pgs;
2186 m.rx_pg_sz = p->rx_pg_size;
2187 m.rx_num_pg = p->rx_num_pgs;
2188 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2189 if (copy_to_user(useraddr, &m, sizeof(m)))
2193 case CHELSIO_SET_PM:{
2195 struct tp_params *p = &adapter->params.tp;
2197 if (!is_offload(adapter))
2199 if (!capable(CAP_NET_ADMIN))
2201 if (adapter->flags & FULL_INIT_DONE)
2203 if (copy_from_user(&m, useraddr, sizeof(m)))
2205 if (!is_power_of_2(m.rx_pg_sz) ||
2206 !is_power_of_2(m.tx_pg_sz))
2207 return -EINVAL; /* not power of 2 */
2208 if (!(m.rx_pg_sz & 0x14000))
2209 return -EINVAL; /* not 16KB or 64KB */
2210 if (!(m.tx_pg_sz & 0x1554000))
2212 if (m.tx_num_pg == -1)
2213 m.tx_num_pg = p->tx_num_pgs;
2214 if (m.rx_num_pg == -1)
2215 m.rx_num_pg = p->rx_num_pgs;
2216 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2218 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2219 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2221 p->rx_pg_size = m.rx_pg_sz;
2222 p->tx_pg_size = m.tx_pg_sz;
2223 p->rx_num_pgs = m.rx_num_pg;
2224 p->tx_num_pgs = m.tx_num_pg;
2227 case CHELSIO_GET_MEM:{
2228 struct ch_mem_range t;
2232 if (!is_offload(adapter))
2234 if (!(adapter->flags & FULL_INIT_DONE))
2235 return -EIO; /* need the memory controllers */
2236 if (copy_from_user(&t, useraddr, sizeof(t)))
2238 if ((t.addr & 7) || (t.len & 7))
2240 if (t.mem_id == MEM_CM)
2242 else if (t.mem_id == MEM_PMRX)
2243 mem = &adapter->pmrx;
2244 else if (t.mem_id == MEM_PMTX)
2245 mem = &adapter->pmtx;
2251 * bits 0..9: chip version
2252 * bits 10..15: chip revision
2254 t.version = 3 | (adapter->params.rev << 10);
2255 if (copy_to_user(useraddr, &t, sizeof(t)))
2259 * Read 256 bytes at a time as len can be large and we don't
2260 * want to use huge intermediate buffers.
2262 useraddr += sizeof(t); /* advance to start of buffer */
2264 unsigned int chunk =
2265 min_t(unsigned int, t.len, sizeof(buf));
2268 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2272 if (copy_to_user(useraddr, buf, chunk))
2280 case CHELSIO_SET_TRACE_FILTER:{
2282 const struct trace_params *tp;
2284 if (!capable(CAP_NET_ADMIN))
2286 if (!offload_running(adapter))
2288 if (copy_from_user(&t, useraddr, sizeof(t)))
2291 tp = (const struct trace_params *)&t.sip;
2293 t3_config_trace_filter(adapter, tp, 0,
2297 t3_config_trace_filter(adapter, tp, 1,
2308 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2310 struct mii_ioctl_data *data = if_mii(req);
2311 struct port_info *pi = netdev_priv(dev);
2312 struct adapter *adapter = pi->adapter;
2317 /* Convert phy_id from older PRTAD/DEVAD format */
2318 if (is_10G(adapter) &&
2319 !mdio_phy_id_is_c45(data->phy_id) &&
2320 (data->phy_id & 0x1f00) &&
2321 !(data->phy_id & 0xe0e0))
2322 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2323 data->phy_id & 0x1f);
2326 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2328 return cxgb_extension_ioctl(dev, req->ifr_data);
2334 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2336 struct port_info *pi = netdev_priv(dev);
2337 struct adapter *adapter = pi->adapter;
2340 if (new_mtu < 81) /* accommodate SACK */
2342 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2345 init_port_mtus(adapter);
2346 if (adapter->params.rev == 0 && offload_running(adapter))
2347 t3_load_mtus(adapter, adapter->params.mtus,
2348 adapter->params.a_wnd, adapter->params.b_wnd,
2349 adapter->port[0]->mtu);
2353 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2355 struct port_info *pi = netdev_priv(dev);
2356 struct adapter *adapter = pi->adapter;
2357 struct sockaddr *addr = p;
2359 if (!is_valid_ether_addr(addr->sa_data))
2362 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2363 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2364 if (offload_running(adapter))
2365 write_smt_entry(adapter, pi->port_id);
2370 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2371 * @adap: the adapter
2374 * Ensures that current Rx processing on any of the queues associated with
2375 * the given port completes before returning. We do this by acquiring and
2376 * releasing the locks of the response queues associated with the port.
2378 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2382 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2383 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2385 spin_lock_irq(&q->lock);
2386 spin_unlock_irq(&q->lock);
2390 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2392 struct port_info *pi = netdev_priv(dev);
2393 struct adapter *adapter = pi->adapter;
2396 if (adapter->params.rev > 0)
2397 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2399 /* single control for all ports */
2400 unsigned int i, have_vlans = 0;
2401 for_each_port(adapter, i)
2402 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2404 t3_set_vlan_accel(adapter, 1, have_vlans);
2406 t3_synchronize_rx(adapter, pi);
2409 #ifdef CONFIG_NET_POLL_CONTROLLER
2410 static void cxgb_netpoll(struct net_device *dev)
2412 struct port_info *pi = netdev_priv(dev);
2413 struct adapter *adapter = pi->adapter;
2416 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2417 struct sge_qset *qs = &adapter->sge.qs[qidx];
2420 if (adapter->flags & USING_MSIX)
2425 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2431 * Periodic accumulation of MAC statistics.
2433 static void mac_stats_update(struct adapter *adapter)
2437 for_each_port(adapter, i) {
2438 struct net_device *dev = adapter->port[i];
2439 struct port_info *p = netdev_priv(dev);
2441 if (netif_running(dev)) {
2442 spin_lock(&adapter->stats_lock);
2443 t3_mac_update_stats(&p->mac);
2444 spin_unlock(&adapter->stats_lock);
2449 static void check_link_status(struct adapter *adapter)
2453 for_each_port(adapter, i) {
2454 struct net_device *dev = adapter->port[i];
2455 struct port_info *p = netdev_priv(dev);
2458 spin_lock_irq(&adapter->work_lock);
2459 link_fault = p->link_fault;
2460 spin_unlock_irq(&adapter->work_lock);
2463 t3_link_fault(adapter, i);
2467 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2468 t3_xgm_intr_disable(adapter, i);
2469 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2471 t3_link_changed(adapter, i);
2472 t3_xgm_intr_enable(adapter, i);
2477 static void check_t3b2_mac(struct adapter *adapter)
2481 if (!rtnl_trylock()) /* synchronize with ifdown */
2484 for_each_port(adapter, i) {
2485 struct net_device *dev = adapter->port[i];
2486 struct port_info *p = netdev_priv(dev);
2489 if (!netif_running(dev))
2493 if (netif_running(dev) && netif_carrier_ok(dev))
2494 status = t3b2_mac_watchdog_task(&p->mac);
2496 p->mac.stats.num_toggled++;
2497 else if (status == 2) {
2498 struct cmac *mac = &p->mac;
2500 t3_mac_set_mtu(mac, dev->mtu);
2501 t3_mac_set_address(mac, 0, dev->dev_addr);
2502 cxgb_set_rxmode(dev);
2503 t3_link_start(&p->phy, mac, &p->link_config);
2504 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2505 t3_port_intr_enable(adapter, p->port_id);
2506 p->mac.stats.num_resets++;
2513 static void t3_adap_check_task(struct work_struct *work)
2515 struct adapter *adapter = container_of(work, struct adapter,
2516 adap_check_task.work);
2517 const struct adapter_params *p = &adapter->params;
2519 unsigned int v, status, reset;
2521 adapter->check_task_cnt++;
2523 check_link_status(adapter);
2525 /* Accumulate MAC stats if needed */
2526 if (!p->linkpoll_period ||
2527 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2528 p->stats_update_period) {
2529 mac_stats_update(adapter);
2530 adapter->check_task_cnt = 0;
2533 if (p->rev == T3_REV_B2)
2534 check_t3b2_mac(adapter);
2537 * Scan the XGMAC's to check for various conditions which we want to
2538 * monitor in a periodic polling manner rather than via an interrupt
2539 * condition. This is used for conditions which would otherwise flood
2540 * the system with interrupts and we only really need to know that the
2541 * conditions are "happening" ... For each condition we count the
2542 * detection of the condition and reset it for the next polling loop.
2544 for_each_port(adapter, port) {
2545 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2548 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2550 if (cause & F_RXFIFO_OVERFLOW) {
2551 mac->stats.rx_fifo_ovfl++;
2552 reset |= F_RXFIFO_OVERFLOW;
2555 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2559 * We do the same as above for FL_EMPTY interrupts.
2561 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2564 if (status & F_FLEMPTY) {
2565 struct sge_qset *qs = &adapter->sge.qs[0];
2570 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2574 qs->fl[i].empty += (v & 1);
2582 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2584 /* Schedule the next check update if any port is active. */
2585 spin_lock_irq(&adapter->work_lock);
2586 if (adapter->open_device_map & PORT_MASK)
2587 schedule_chk_task(adapter);
2588 spin_unlock_irq(&adapter->work_lock);
2592 * Processes external (PHY) interrupts in process context.
2594 static void ext_intr_task(struct work_struct *work)
2596 struct adapter *adapter = container_of(work, struct adapter,
2597 ext_intr_handler_task);
2600 /* Disable link fault interrupts */
2601 for_each_port(adapter, i) {
2602 struct net_device *dev = adapter->port[i];
2603 struct port_info *p = netdev_priv(dev);
2605 t3_xgm_intr_disable(adapter, i);
2606 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2609 /* Re-enable link fault interrupts */
2610 t3_phy_intr_handler(adapter);
2612 for_each_port(adapter, i)
2613 t3_xgm_intr_enable(adapter, i);
2615 /* Now reenable external interrupts */
2616 spin_lock_irq(&adapter->work_lock);
2617 if (adapter->slow_intr_mask) {
2618 adapter->slow_intr_mask |= F_T3DBG;
2619 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2620 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2621 adapter->slow_intr_mask);
2623 spin_unlock_irq(&adapter->work_lock);
2627 * Interrupt-context handler for external (PHY) interrupts.
2629 void t3_os_ext_intr_handler(struct adapter *adapter)
2632 * Schedule a task to handle external interrupts as they may be slow
2633 * and we use a mutex to protect MDIO registers. We disable PHY
2634 * interrupts in the meantime and let the task reenable them when
2637 spin_lock(&adapter->work_lock);
2638 if (adapter->slow_intr_mask) {
2639 adapter->slow_intr_mask &= ~F_T3DBG;
2640 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2641 adapter->slow_intr_mask);
2642 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2644 spin_unlock(&adapter->work_lock);
2647 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2649 struct net_device *netdev = adapter->port[port_id];
2650 struct port_info *pi = netdev_priv(netdev);
2652 spin_lock(&adapter->work_lock);
2654 spin_unlock(&adapter->work_lock);
2657 static int t3_adapter_error(struct adapter *adapter, int reset)
2661 if (is_offload(adapter) &&
2662 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2663 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2664 offload_close(&adapter->tdev);
2667 /* Stop all ports */
2668 for_each_port(adapter, i) {
2669 struct net_device *netdev = adapter->port[i];
2671 if (netif_running(netdev))
2675 /* Stop SGE timers */
2676 t3_stop_sge_timers(adapter);
2678 adapter->flags &= ~FULL_INIT_DONE;
2681 ret = t3_reset_adapter(adapter);
2683 pci_disable_device(adapter->pdev);
2688 static int t3_reenable_adapter(struct adapter *adapter)
2690 if (pci_enable_device(adapter->pdev)) {
2691 dev_err(&adapter->pdev->dev,
2692 "Cannot re-enable PCI device after reset.\n");
2695 pci_set_master(adapter->pdev);
2696 pci_restore_state(adapter->pdev);
2698 /* Free sge resources */
2699 t3_free_sge_resources(adapter);
2701 if (t3_replay_prep_adapter(adapter))
2709 static void t3_resume_ports(struct adapter *adapter)
2713 /* Restart the ports */
2714 for_each_port(adapter, i) {
2715 struct net_device *netdev = adapter->port[i];
2717 if (netif_running(netdev)) {
2718 if (cxgb_open(netdev)) {
2719 dev_err(&adapter->pdev->dev,
2720 "can't bring device back up"
2727 if (is_offload(adapter) && !ofld_disable)
2728 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2732 * processes a fatal error.
2733 * Bring the ports down, reset the chip, bring the ports back up.
2735 static void fatal_error_task(struct work_struct *work)
2737 struct adapter *adapter = container_of(work, struct adapter,
2738 fatal_error_handler_task);
2742 err = t3_adapter_error(adapter, 1);
2744 err = t3_reenable_adapter(adapter);
2746 t3_resume_ports(adapter);
2748 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2752 void t3_fatal_err(struct adapter *adapter)
2754 unsigned int fw_status[4];
2756 if (adapter->flags & FULL_INIT_DONE) {
2757 t3_sge_stop(adapter);
2758 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2759 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2760 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2761 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2763 spin_lock(&adapter->work_lock);
2764 t3_intr_disable(adapter);
2765 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2766 spin_unlock(&adapter->work_lock);
2768 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2769 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2770 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2771 fw_status[0], fw_status[1],
2772 fw_status[2], fw_status[3]);
2776 * t3_io_error_detected - called when PCI error is detected
2777 * @pdev: Pointer to PCI device
2778 * @state: The current pci connection state
2780 * This function is called after a PCI bus error affecting
2781 * this device has been detected.
2783 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2784 pci_channel_state_t state)
2786 struct adapter *adapter = pci_get_drvdata(pdev);
2789 if (state == pci_channel_io_perm_failure)
2790 return PCI_ERS_RESULT_DISCONNECT;
2792 ret = t3_adapter_error(adapter, 0);
2794 /* Request a slot reset. */
2795 return PCI_ERS_RESULT_NEED_RESET;
2799 * t3_io_slot_reset - called after the pci bus has been reset.
2800 * @pdev: Pointer to PCI device
2802 * Restart the card from scratch, as if from a cold-boot.
2804 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2806 struct adapter *adapter = pci_get_drvdata(pdev);
2808 if (!t3_reenable_adapter(adapter))
2809 return PCI_ERS_RESULT_RECOVERED;
2811 return PCI_ERS_RESULT_DISCONNECT;
2815 * t3_io_resume - called when traffic can start flowing again.
2816 * @pdev: Pointer to PCI device
2818 * This callback is called when the error recovery driver tells us that
2819 * its OK to resume normal operation.
2821 static void t3_io_resume(struct pci_dev *pdev)
2823 struct adapter *adapter = pci_get_drvdata(pdev);
2825 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2826 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2828 t3_resume_ports(adapter);
2831 static struct pci_error_handlers t3_err_handler = {
2832 .error_detected = t3_io_error_detected,
2833 .slot_reset = t3_io_slot_reset,
2834 .resume = t3_io_resume,
2838 * Set the number of qsets based on the number of CPUs and the number of ports,
2839 * not to exceed the number of available qsets, assuming there are enough qsets
2842 static void set_nqsets(struct adapter *adap)
2845 int num_cpus = num_online_cpus();
2846 int hwports = adap->params.nports;
2847 int nqsets = adap->msix_nvectors - 1;
2849 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2851 (hwports * nqsets > SGE_QSETS ||
2852 num_cpus >= nqsets / hwports))
2854 if (nqsets > num_cpus)
2856 if (nqsets < 1 || hwports == 4)
2861 for_each_port(adap, i) {
2862 struct port_info *pi = adap2pinfo(adap, i);
2865 pi->nqsets = nqsets;
2866 j = pi->first_qset + nqsets;
2868 dev_info(&adap->pdev->dev,
2869 "Port %d using %d queue sets.\n", i, nqsets);
2873 static int __devinit cxgb_enable_msix(struct adapter *adap)
2875 struct msix_entry entries[SGE_QSETS + 1];
2879 vectors = ARRAY_SIZE(entries);
2880 for (i = 0; i < vectors; ++i)
2881 entries[i].entry = i;
2883 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2887 pci_disable_msix(adap->pdev);
2889 if (!err && vectors < (adap->params.nports + 1)) {
2890 pci_disable_msix(adap->pdev);
2895 for (i = 0; i < vectors; ++i)
2896 adap->msix_info[i].vec = entries[i].vector;
2897 adap->msix_nvectors = vectors;
2903 static void __devinit print_port_info(struct adapter *adap,
2904 const struct adapter_info *ai)
2906 static const char *pci_variant[] = {
2907 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2914 snprintf(buf, sizeof(buf), "%s x%d",
2915 pci_variant[adap->params.pci.variant],
2916 adap->params.pci.width);
2918 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2919 pci_variant[adap->params.pci.variant],
2920 adap->params.pci.speed, adap->params.pci.width);
2922 for_each_port(adap, i) {
2923 struct net_device *dev = adap->port[i];
2924 const struct port_info *pi = netdev_priv(dev);
2926 if (!test_bit(i, &adap->registered_device_map))
2928 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2929 dev->name, ai->desc, pi->phy.desc,
2930 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2931 (adap->flags & USING_MSIX) ? " MSI-X" :
2932 (adap->flags & USING_MSI) ? " MSI" : "");
2933 if (adap->name == dev->name && adap->params.vpd.mclk)
2935 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2936 adap->name, t3_mc7_size(&adap->cm) >> 20,
2937 t3_mc7_size(&adap->pmtx) >> 20,
2938 t3_mc7_size(&adap->pmrx) >> 20,
2939 adap->params.vpd.sn);
2943 static const struct net_device_ops cxgb_netdev_ops = {
2944 .ndo_open = cxgb_open,
2945 .ndo_stop = cxgb_close,
2946 .ndo_start_xmit = t3_eth_xmit,
2947 .ndo_get_stats = cxgb_get_stats,
2948 .ndo_validate_addr = eth_validate_addr,
2949 .ndo_set_multicast_list = cxgb_set_rxmode,
2950 .ndo_do_ioctl = cxgb_ioctl,
2951 .ndo_change_mtu = cxgb_change_mtu,
2952 .ndo_set_mac_address = cxgb_set_mac_addr,
2953 .ndo_vlan_rx_register = vlan_rx_register,
2954 #ifdef CONFIG_NET_POLL_CONTROLLER
2955 .ndo_poll_controller = cxgb_netpoll,
2959 static int __devinit init_one(struct pci_dev *pdev,
2960 const struct pci_device_id *ent)
2962 static int version_printed;
2964 int i, err, pci_using_dac = 0;
2965 resource_size_t mmio_start, mmio_len;
2966 const struct adapter_info *ai;
2967 struct adapter *adapter = NULL;
2968 struct port_info *pi;
2970 if (!version_printed) {
2971 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2976 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2978 printk(KERN_ERR DRV_NAME
2979 ": cannot initialize work queue\n");
2984 err = pci_request_regions(pdev, DRV_NAME);
2986 /* Just info, some other driver may have claimed the device. */
2987 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2991 err = pci_enable_device(pdev);
2993 dev_err(&pdev->dev, "cannot enable PCI device\n");
2994 goto out_release_regions;
2997 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2999 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3001 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3002 "coherent allocations\n");
3003 goto out_disable_device;
3005 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3006 dev_err(&pdev->dev, "no usable DMA configuration\n");
3007 goto out_disable_device;
3010 pci_set_master(pdev);
3011 pci_save_state(pdev);
3013 mmio_start = pci_resource_start(pdev, 0);
3014 mmio_len = pci_resource_len(pdev, 0);
3015 ai = t3_get_adapter_info(ent->driver_data);
3017 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3020 goto out_disable_device;
3023 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3024 if (!adapter->regs) {
3025 dev_err(&pdev->dev, "cannot map device registers\n");
3027 goto out_free_adapter;
3030 adapter->pdev = pdev;
3031 adapter->name = pci_name(pdev);
3032 adapter->msg_enable = dflt_msg_enable;
3033 adapter->mmio_len = mmio_len;
3035 mutex_init(&adapter->mdio_lock);
3036 spin_lock_init(&adapter->work_lock);
3037 spin_lock_init(&adapter->stats_lock);
3039 INIT_LIST_HEAD(&adapter->adapter_list);
3040 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3041 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3042 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3044 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3045 struct net_device *netdev;
3047 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3053 SET_NETDEV_DEV(netdev, &pdev->dev);
3055 adapter->port[i] = netdev;
3056 pi = netdev_priv(netdev);
3057 pi->adapter = adapter;
3058 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3060 netif_carrier_off(netdev);
3061 netif_tx_stop_all_queues(netdev);
3062 netdev->irq = pdev->irq;
3063 netdev->mem_start = mmio_start;
3064 netdev->mem_end = mmio_start + mmio_len - 1;
3065 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3066 netdev->features |= NETIF_F_GRO;
3068 netdev->features |= NETIF_F_HIGHDMA;
3070 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3071 netdev->netdev_ops = &cxgb_netdev_ops;
3072 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3075 pci_set_drvdata(pdev, adapter);
3076 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3082 * The card is now ready to go. If any errors occur during device
3083 * registration we do not fail the whole card but rather proceed only
3084 * with the ports we manage to register successfully. However we must
3085 * register at least one net device.
3087 for_each_port(adapter, i) {
3088 err = register_netdev(adapter->port[i]);
3090 dev_warn(&pdev->dev,
3091 "cannot register net device %s, skipping\n",
3092 adapter->port[i]->name);
3095 * Change the name we use for messages to the name of
3096 * the first successfully registered interface.
3098 if (!adapter->registered_device_map)
3099 adapter->name = adapter->port[i]->name;
3101 __set_bit(i, &adapter->registered_device_map);
3104 if (!adapter->registered_device_map) {
3105 dev_err(&pdev->dev, "could not register any net devices\n");
3109 /* Driver's ready. Reflect it on LEDs */
3110 t3_led_ready(adapter);
3112 if (is_offload(adapter)) {
3113 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3114 cxgb3_adapter_ofld(adapter);
3117 /* See what interrupts we'll be using */
3118 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3119 adapter->flags |= USING_MSIX;
3120 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3121 adapter->flags |= USING_MSI;
3123 set_nqsets(adapter);
3125 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3128 print_port_info(adapter, ai);
3132 iounmap(adapter->regs);
3133 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3134 if (adapter->port[i])
3135 free_netdev(adapter->port[i]);
3141 pci_disable_device(pdev);
3142 out_release_regions:
3143 pci_release_regions(pdev);
3144 pci_set_drvdata(pdev, NULL);
3148 static void __devexit remove_one(struct pci_dev *pdev)
3150 struct adapter *adapter = pci_get_drvdata(pdev);
3155 t3_sge_stop(adapter);
3156 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3159 if (is_offload(adapter)) {
3160 cxgb3_adapter_unofld(adapter);
3161 if (test_bit(OFFLOAD_DEVMAP_BIT,
3162 &adapter->open_device_map))
3163 offload_close(&adapter->tdev);
3166 for_each_port(adapter, i)
3167 if (test_bit(i, &adapter->registered_device_map))
3168 unregister_netdev(adapter->port[i]);
3170 t3_stop_sge_timers(adapter);
3171 t3_free_sge_resources(adapter);
3172 cxgb_disable_msi(adapter);
3174 for_each_port(adapter, i)
3175 if (adapter->port[i])
3176 free_netdev(adapter->port[i]);
3178 iounmap(adapter->regs);
3180 pci_release_regions(pdev);
3181 pci_disable_device(pdev);
3182 pci_set_drvdata(pdev, NULL);
3186 static struct pci_driver driver = {
3188 .id_table = cxgb3_pci_tbl,
3190 .remove = __devexit_p(remove_one),
3191 .err_handler = &t3_err_handler,
3194 static int __init cxgb3_init_module(void)
3198 cxgb3_offload_init();
3200 ret = pci_register_driver(&driver);
3204 static void __exit cxgb3_cleanup_module(void)
3206 pci_unregister_driver(&driver);
3208 destroy_workqueue(cxgb3_wq);
3211 module_init(cxgb3_init_module);
3212 module_exit(cxgb3_cleanup_module);