2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
97 MODULE_DESCRIPTION(DRV_DESC);
98 MODULE_AUTHOR("Chelsio Communications");
99 MODULE_LICENSE("Dual BSD/GPL");
100 MODULE_VERSION(DRV_VERSION);
101 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
103 static int dflt_msg_enable = DFLT_MSG_ENABLE;
105 module_param(dflt_msg_enable, int, 0644);
106 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
109 * The driver uses the best interrupt scheme available on a platform in the
110 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
111 * of these schemes the driver may consider as follows:
113 * msi = 2: choose from among all three options
114 * msi = 1: only consider MSI and pin interrupts
115 * msi = 0: force pin interrupts
119 module_param(msi, int, 0644);
120 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
123 * The driver enables offload as a default.
124 * To disable it, use ofld_disable = 1.
127 static int ofld_disable = 0;
129 module_param(ofld_disable, int, 0644);
130 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
133 * We have work elements that we need to cancel when an interface is taken
134 * down. Normally the work elements would be executed by keventd but that
135 * can deadlock because of linkwatch. If our close method takes the rtnl
136 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
137 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
138 * for our work to complete. Get our own work queue to solve this.
140 static struct workqueue_struct *cxgb3_wq;
143 * link_report - show link status and link speed/duplex
144 * @p: the port whose settings are to be reported
146 * Shows the link status, speed, and duplex of a port.
148 static void link_report(struct net_device *dev)
150 if (!netif_carrier_ok(dev))
151 printk(KERN_INFO "%s: link down\n", dev->name);
153 const char *s = "10Mbps";
154 const struct port_info *p = netdev_priv(dev);
156 switch (p->link_config.speed) {
168 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
169 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
175 struct net_device *dev = adap->port[port_id];
176 struct port_info *pi = netdev_priv(dev);
178 if (state == netif_carrier_ok(dev))
182 struct cmac *mac = &pi->mac;
184 netif_carrier_on(dev);
186 /* Clear local faults */
187 t3_xgm_intr_disable(adap, pi->port_id);
188 t3_read_reg(adap, A_XGM_INT_STATUS +
191 A_XGM_INT_CAUSE + pi->mac.offset,
194 t3_set_reg_field(adap,
197 F_XGM_INT, F_XGM_INT);
198 t3_xgm_intr_enable(adap, pi->port_id);
200 t3_mac_enable(mac, MAC_DIRECTION_TX);
202 netif_carrier_off(dev);
208 * t3_os_link_changed - handle link status changes
209 * @adapter: the adapter associated with the link change
210 * @port_id: the port index whose limk status has changed
211 * @link_stat: the new status of the link
212 * @speed: the new speed setting
213 * @duplex: the new duplex setting
214 * @pause: the new flow-control setting
216 * This is the OS-dependent handler for link status changes. The OS
217 * neutral handler takes care of most of the processing for these events,
218 * then calls this handler for any OS-specific processing.
220 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
221 int speed, int duplex, int pause)
223 struct net_device *dev = adapter->port[port_id];
224 struct port_info *pi = netdev_priv(dev);
225 struct cmac *mac = &pi->mac;
227 /* Skip changes from disabled ports. */
228 if (!netif_running(dev))
231 if (link_stat != netif_carrier_ok(dev)) {
233 t3_mac_enable(mac, MAC_DIRECTION_RX);
235 /* Clear local faults */
236 t3_xgm_intr_disable(adapter, pi->port_id);
237 t3_read_reg(adapter, A_XGM_INT_STATUS +
239 t3_write_reg(adapter,
240 A_XGM_INT_CAUSE + pi->mac.offset,
243 t3_set_reg_field(adapter,
244 A_XGM_INT_ENABLE + pi->mac.offset,
245 F_XGM_INT, F_XGM_INT);
246 t3_xgm_intr_enable(adapter, pi->port_id);
248 netif_carrier_on(dev);
250 netif_carrier_off(dev);
252 t3_xgm_intr_disable(adapter, pi->port_id);
253 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
254 t3_set_reg_field(adapter,
255 A_XGM_INT_ENABLE + pi->mac.offset,
259 pi->phy.ops->power_down(&pi->phy, 1);
261 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
262 t3_mac_disable(mac, MAC_DIRECTION_RX);
263 t3_link_start(&pi->phy, mac, &pi->link_config);
271 * t3_os_phymod_changed - handle PHY module changes
272 * @phy: the PHY reporting the module change
273 * @mod_type: new module type
275 * This is the OS-dependent handler for PHY module changes. It is
276 * invoked when a PHY module is removed or inserted for any OS-specific
279 void t3_os_phymod_changed(struct adapter *adap, int port_id)
281 static const char *mod_str[] = {
282 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
285 const struct net_device *dev = adap->port[port_id];
286 const struct port_info *pi = netdev_priv(dev);
288 if (pi->phy.modtype == phy_modtype_none)
289 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
291 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
292 mod_str[pi->phy.modtype]);
295 static void cxgb_set_rxmode(struct net_device *dev)
297 struct t3_rx_mode rm;
298 struct port_info *pi = netdev_priv(dev);
300 init_rx_mode(&rm, dev, dev->mc_list);
301 t3_mac_set_rx_mode(&pi->mac, &rm);
305 * link_start - enable a port
306 * @dev: the device to enable
308 * Performs the MAC and PHY actions needed to enable a port.
310 static void link_start(struct net_device *dev)
312 struct t3_rx_mode rm;
313 struct port_info *pi = netdev_priv(dev);
314 struct cmac *mac = &pi->mac;
316 init_rx_mode(&rm, dev, dev->mc_list);
318 t3_mac_set_mtu(mac, dev->mtu);
319 t3_mac_set_address(mac, 0, dev->dev_addr);
320 t3_mac_set_rx_mode(mac, &rm);
321 t3_link_start(&pi->phy, mac, &pi->link_config);
322 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
325 static inline void cxgb_disable_msi(struct adapter *adapter)
327 if (adapter->flags & USING_MSIX) {
328 pci_disable_msix(adapter->pdev);
329 adapter->flags &= ~USING_MSIX;
330 } else if (adapter->flags & USING_MSI) {
331 pci_disable_msi(adapter->pdev);
332 adapter->flags &= ~USING_MSI;
337 * Interrupt handler for asynchronous events used with MSI-X.
339 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
341 t3_slow_intr_handler(cookie);
346 * Name the MSI-X interrupts.
348 static void name_msix_vecs(struct adapter *adap)
350 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
352 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
353 adap->msix_info[0].desc[n] = 0;
355 for_each_port(adap, j) {
356 struct net_device *d = adap->port[j];
357 const struct port_info *pi = netdev_priv(d);
359 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
360 snprintf(adap->msix_info[msi_idx].desc, n,
361 "%s-%d", d->name, pi->first_qset + i);
362 adap->msix_info[msi_idx].desc[n] = 0;
367 static int request_msix_data_irqs(struct adapter *adap)
369 int i, j, err, qidx = 0;
371 for_each_port(adap, i) {
372 int nqsets = adap2pinfo(adap, i)->nqsets;
374 for (j = 0; j < nqsets; ++j) {
375 err = request_irq(adap->msix_info[qidx + 1].vec,
376 t3_intr_handler(adap,
379 adap->msix_info[qidx + 1].desc,
380 &adap->sge.qs[qidx]);
383 free_irq(adap->msix_info[qidx + 1].vec,
384 &adap->sge.qs[qidx]);
393 static void free_irq_resources(struct adapter *adapter)
395 if (adapter->flags & USING_MSIX) {
398 free_irq(adapter->msix_info[0].vec, adapter);
399 for_each_port(adapter, i)
400 n += adap2pinfo(adapter, i)->nqsets;
402 for (i = 0; i < n; ++i)
403 free_irq(adapter->msix_info[i + 1].vec,
404 &adapter->sge.qs[i]);
406 free_irq(adapter->pdev->irq, adapter);
409 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
414 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
422 static int init_tp_parity(struct adapter *adap)
426 struct cpl_set_tcb_field *greq;
427 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
429 t3_tp_set_offload_mode(adap, 1);
431 for (i = 0; i < 16; i++) {
432 struct cpl_smt_write_req *req;
434 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
435 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
436 memset(req, 0, sizeof(*req));
437 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
438 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
440 t3_mgmt_tx(adap, skb);
443 for (i = 0; i < 2048; i++) {
444 struct cpl_l2t_write_req *req;
446 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
447 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
448 memset(req, 0, sizeof(*req));
449 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
450 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
451 req->params = htonl(V_L2T_W_IDX(i));
452 t3_mgmt_tx(adap, skb);
455 for (i = 0; i < 2048; i++) {
456 struct cpl_rte_write_req *req;
458 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
459 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
460 memset(req, 0, sizeof(*req));
461 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
462 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
463 req->l2t_idx = htonl(V_L2T_W_IDX(i));
464 t3_mgmt_tx(adap, skb);
467 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
468 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
469 memset(greq, 0, sizeof(*greq));
470 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
471 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
472 greq->mask = cpu_to_be64(1);
473 t3_mgmt_tx(adap, skb);
475 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
476 t3_tp_set_offload_mode(adap, 0);
481 * setup_rss - configure RSS
484 * Sets up RSS to distribute packets to multiple receive queues. We
485 * configure the RSS CPU lookup table to distribute to the number of HW
486 * receive queues, and the response queue lookup table to narrow that
487 * down to the response queues actually configured for each port.
488 * We always configure the RSS mapping for two ports since the mapping
489 * table has plenty of entries.
491 static void setup_rss(struct adapter *adap)
494 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
495 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
496 u8 cpus[SGE_QSETS + 1];
497 u16 rspq_map[RSS_TABLE_SIZE];
499 for (i = 0; i < SGE_QSETS; ++i)
501 cpus[SGE_QSETS] = 0xff; /* terminator */
503 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
504 rspq_map[i] = i % nq0;
505 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
508 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
509 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
510 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
513 static void init_napi(struct adapter *adap)
517 for (i = 0; i < SGE_QSETS; i++) {
518 struct sge_qset *qs = &adap->sge.qs[i];
521 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
526 * netif_napi_add() can be called only once per napi_struct because it
527 * adds each new napi_struct to a list. Be careful not to call it a
528 * second time, e.g., during EEH recovery, by making a note of it.
530 adap->flags |= NAPI_INIT;
534 * Wait until all NAPI handlers are descheduled. This includes the handlers of
535 * both netdevices representing interfaces and the dummy ones for the extra
538 static void quiesce_rx(struct adapter *adap)
542 for (i = 0; i < SGE_QSETS; i++)
543 if (adap->sge.qs[i].adap)
544 napi_disable(&adap->sge.qs[i].napi);
547 static void enable_all_napi(struct adapter *adap)
550 for (i = 0; i < SGE_QSETS; i++)
551 if (adap->sge.qs[i].adap)
552 napi_enable(&adap->sge.qs[i].napi);
556 * set_qset_lro - Turn a queue set's LRO capability on and off
557 * @dev: the device the qset is attached to
558 * @qset_idx: the queue set index
559 * @val: the LRO switch
561 * Sets LRO on or off for a particular queue set.
562 * the device's features flag is updated to reflect the LRO
563 * capability when all queues belonging to the device are
566 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
568 struct port_info *pi = netdev_priv(dev);
569 struct adapter *adapter = pi->adapter;
571 adapter->params.sge.qset[qset_idx].lro = !!val;
572 adapter->sge.qs[qset_idx].lro_enabled = !!val;
576 * setup_sge_qsets - configure SGE Tx/Rx/response queues
579 * Determines how many sets of SGE queues to use and initializes them.
580 * We support multiple queue sets per port if we have MSI-X, otherwise
581 * just one queue set per port.
583 static int setup_sge_qsets(struct adapter *adap)
585 int i, j, err, irq_idx = 0, qset_idx = 0;
586 unsigned int ntxq = SGE_TXQ_PER_SET;
588 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
591 for_each_port(adap, i) {
592 struct net_device *dev = adap->port[i];
593 struct port_info *pi = netdev_priv(dev);
595 pi->qs = &adap->sge.qs[pi->first_qset];
596 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
598 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
599 err = t3_sge_alloc_qset(adap, qset_idx, 1,
600 (adap->flags & USING_MSIX) ? qset_idx + 1 :
602 &adap->params.sge.qset[qset_idx], ntxq, dev,
603 netdev_get_tx_queue(dev, j));
605 t3_free_sge_resources(adap);
614 static ssize_t attr_show(struct device *d, char *buf,
615 ssize_t(*format) (struct net_device *, char *))
619 /* Synchronize with ioctls that may shut down the device */
621 len = (*format) (to_net_dev(d), buf);
626 static ssize_t attr_store(struct device *d,
627 const char *buf, size_t len,
628 ssize_t(*set) (struct net_device *, unsigned int),
629 unsigned int min_val, unsigned int max_val)
635 if (!capable(CAP_NET_ADMIN))
638 val = simple_strtoul(buf, &endp, 0);
639 if (endp == buf || val < min_val || val > max_val)
643 ret = (*set) (to_net_dev(d), val);
650 #define CXGB3_SHOW(name, val_expr) \
651 static ssize_t format_##name(struct net_device *dev, char *buf) \
653 struct port_info *pi = netdev_priv(dev); \
654 struct adapter *adap = pi->adapter; \
655 return sprintf(buf, "%u\n", val_expr); \
657 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
660 return attr_show(d, buf, format_##name); \
663 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
665 struct port_info *pi = netdev_priv(dev);
666 struct adapter *adap = pi->adapter;
667 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
669 if (adap->flags & FULL_INIT_DONE)
671 if (val && adap->params.rev == 0)
673 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
676 adap->params.mc5.nfilters = val;
680 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
681 const char *buf, size_t len)
683 return attr_store(d, buf, len, set_nfilters, 0, ~0);
686 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
688 struct port_info *pi = netdev_priv(dev);
689 struct adapter *adap = pi->adapter;
691 if (adap->flags & FULL_INIT_DONE)
693 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
696 adap->params.mc5.nservers = val;
700 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
701 const char *buf, size_t len)
703 return attr_store(d, buf, len, set_nservers, 0, ~0);
706 #define CXGB3_ATTR_R(name, val_expr) \
707 CXGB3_SHOW(name, val_expr) \
708 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
710 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
711 CXGB3_SHOW(name, val_expr) \
712 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
714 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
715 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
716 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
718 static struct attribute *cxgb3_attrs[] = {
719 &dev_attr_cam_size.attr,
720 &dev_attr_nfilters.attr,
721 &dev_attr_nservers.attr,
725 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
727 static ssize_t tm_attr_show(struct device *d,
728 char *buf, int sched)
730 struct port_info *pi = netdev_priv(to_net_dev(d));
731 struct adapter *adap = pi->adapter;
732 unsigned int v, addr, bpt, cpt;
735 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
737 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
738 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
741 bpt = (v >> 8) & 0xff;
744 len = sprintf(buf, "disabled\n");
746 v = (adap->params.vpd.cclk * 1000) / cpt;
747 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
753 static ssize_t tm_attr_store(struct device *d,
754 const char *buf, size_t len, int sched)
756 struct port_info *pi = netdev_priv(to_net_dev(d));
757 struct adapter *adap = pi->adapter;
762 if (!capable(CAP_NET_ADMIN))
765 val = simple_strtoul(buf, &endp, 0);
766 if (endp == buf || val > 10000000)
770 ret = t3_config_sched(adap, val, sched);
777 #define TM_ATTR(name, sched) \
778 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
781 return tm_attr_show(d, buf, sched); \
783 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
784 const char *buf, size_t len) \
786 return tm_attr_store(d, buf, len, sched); \
788 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
799 static struct attribute *offload_attrs[] = {
800 &dev_attr_sched0.attr,
801 &dev_attr_sched1.attr,
802 &dev_attr_sched2.attr,
803 &dev_attr_sched3.attr,
804 &dev_attr_sched4.attr,
805 &dev_attr_sched5.attr,
806 &dev_attr_sched6.attr,
807 &dev_attr_sched7.attr,
811 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
814 * Sends an sk_buff to an offload queue driver
815 * after dealing with any active network taps.
817 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
822 ret = t3_offload_tx(tdev, skb);
827 static int write_smt_entry(struct adapter *adapter, int idx)
829 struct cpl_smt_write_req *req;
830 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
835 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
836 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
837 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
838 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
840 memset(req->src_mac1, 0, sizeof(req->src_mac1));
841 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
843 offload_tx(&adapter->tdev, skb);
847 static int init_smt(struct adapter *adapter)
851 for_each_port(adapter, i)
852 write_smt_entry(adapter, i);
856 static void init_port_mtus(struct adapter *adapter)
858 unsigned int mtus = adapter->port[0]->mtu;
860 if (adapter->port[1])
861 mtus |= adapter->port[1]->mtu << 16;
862 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
865 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
869 struct mngt_pktsched_wr *req;
872 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
873 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
874 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
875 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
881 ret = t3_mgmt_tx(adap, skb);
886 static int bind_qsets(struct adapter *adap)
890 for_each_port(adap, i) {
891 const struct port_info *pi = adap2pinfo(adap, i);
893 for (j = 0; j < pi->nqsets; ++j) {
894 int ret = send_pktsched_cmd(adap, 1,
895 pi->first_qset + j, -1,
905 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
906 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
908 static int upgrade_fw(struct adapter *adap)
912 const struct firmware *fw;
913 struct device *dev = &adap->pdev->dev;
915 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
916 FW_VERSION_MINOR, FW_VERSION_MICRO);
917 ret = request_firmware(&fw, buf, dev);
919 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
923 ret = t3_load_fw(adap, fw->data, fw->size);
924 release_firmware(fw);
927 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
928 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
930 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
931 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
936 static inline char t3rev2char(struct adapter *adapter)
940 switch(adapter->params.rev) {
952 static int update_tpsram(struct adapter *adap)
954 const struct firmware *tpsram;
956 struct device *dev = &adap->pdev->dev;
960 rev = t3rev2char(adap);
964 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
965 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
967 ret = request_firmware(&tpsram, buf, dev);
969 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
974 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
978 ret = t3_set_proto_sram(adap, tpsram->data);
981 "successful update of protocol engine "
983 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
985 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
986 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
988 dev_err(dev, "loading protocol SRAM failed\n");
991 release_firmware(tpsram);
997 * cxgb_up - enable the adapter
998 * @adapter: adapter being enabled
1000 * Called when the first port is enabled, this function performs the
1001 * actions necessary to make an adapter operational, such as completing
1002 * the initialization of HW modules, and enabling interrupts.
1004 * Must be called with the rtnl lock held.
1006 static int cxgb_up(struct adapter *adap)
1010 if (!(adap->flags & FULL_INIT_DONE)) {
1011 err = t3_check_fw_version(adap);
1012 if (err == -EINVAL) {
1013 err = upgrade_fw(adap);
1014 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1015 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1016 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1019 err = t3_check_tpsram_version(adap);
1020 if (err == -EINVAL) {
1021 err = update_tpsram(adap);
1022 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1023 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1024 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1028 * Clear interrupts now to catch errors if t3_init_hw fails.
1029 * We clear them again later as initialization may trigger
1030 * conditions that can interrupt.
1032 t3_intr_clear(adap);
1034 err = t3_init_hw(adap, 0);
1038 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1039 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1041 err = setup_sge_qsets(adap);
1046 if (!(adap->flags & NAPI_INIT))
1049 t3_start_sge_timers(adap);
1050 adap->flags |= FULL_INIT_DONE;
1053 t3_intr_clear(adap);
1055 if (adap->flags & USING_MSIX) {
1056 name_msix_vecs(adap);
1057 err = request_irq(adap->msix_info[0].vec,
1058 t3_async_intr_handler, 0,
1059 adap->msix_info[0].desc, adap);
1063 err = request_msix_data_irqs(adap);
1065 free_irq(adap->msix_info[0].vec, adap);
1068 } else if ((err = request_irq(adap->pdev->irq,
1069 t3_intr_handler(adap,
1070 adap->sge.qs[0].rspq.
1072 (adap->flags & USING_MSI) ?
1077 enable_all_napi(adap);
1079 t3_intr_enable(adap);
1081 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1082 is_offload(adap) && init_tp_parity(adap) == 0)
1083 adap->flags |= TP_PARITY_INIT;
1085 if (adap->flags & TP_PARITY_INIT) {
1086 t3_write_reg(adap, A_TP_INT_CAUSE,
1087 F_CMCACHEPERR | F_ARPLUTPERR);
1088 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1091 if (!(adap->flags & QUEUES_BOUND)) {
1092 err = bind_qsets(adap);
1094 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1095 t3_intr_disable(adap);
1096 free_irq_resources(adap);
1099 adap->flags |= QUEUES_BOUND;
1105 CH_ERR(adap, "request_irq failed, err %d\n", err);
1110 * Release resources when all the ports and offloading have been stopped.
1112 static void cxgb_down(struct adapter *adapter)
1114 t3_sge_stop(adapter);
1115 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1116 t3_intr_disable(adapter);
1117 spin_unlock_irq(&adapter->work_lock);
1119 free_irq_resources(adapter);
1120 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1121 quiesce_rx(adapter);
1124 static void schedule_chk_task(struct adapter *adap)
1128 timeo = adap->params.linkpoll_period ?
1129 (HZ * adap->params.linkpoll_period) / 10 :
1130 adap->params.stats_update_period * HZ;
1132 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1135 static int offload_open(struct net_device *dev)
1137 struct port_info *pi = netdev_priv(dev);
1138 struct adapter *adapter = pi->adapter;
1139 struct t3cdev *tdev = dev2t3cdev(dev);
1140 int adap_up = adapter->open_device_map & PORT_MASK;
1143 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1146 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1149 t3_tp_set_offload_mode(adapter, 1);
1150 tdev->lldev = adapter->port[0];
1151 err = cxgb3_offload_activate(adapter);
1155 init_port_mtus(adapter);
1156 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1157 adapter->params.b_wnd,
1158 adapter->params.rev == 0 ?
1159 adapter->port[0]->mtu : 0xffff);
1162 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1163 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1165 /* Call back all registered clients */
1166 cxgb3_add_clients(tdev);
1169 /* restore them in case the offload module has changed them */
1171 t3_tp_set_offload_mode(adapter, 0);
1172 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1173 cxgb3_set_dummy_ops(tdev);
1178 static int offload_close(struct t3cdev *tdev)
1180 struct adapter *adapter = tdev2adap(tdev);
1182 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1185 /* Call back all registered clients */
1186 cxgb3_remove_clients(tdev);
1188 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1191 cxgb3_set_dummy_ops(tdev);
1192 t3_tp_set_offload_mode(adapter, 0);
1193 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1195 if (!adapter->open_device_map)
1198 cxgb3_offload_deactivate(adapter);
1202 static int cxgb_open(struct net_device *dev)
1204 struct port_info *pi = netdev_priv(dev);
1205 struct adapter *adapter = pi->adapter;
1206 int other_ports = adapter->open_device_map & PORT_MASK;
1209 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1212 set_bit(pi->port_id, &adapter->open_device_map);
1213 if (is_offload(adapter) && !ofld_disable) {
1214 err = offload_open(dev);
1217 "Could not initialize offload capabilities\n");
1220 dev->real_num_tx_queues = pi->nqsets;
1222 t3_port_intr_enable(adapter, pi->port_id);
1223 netif_tx_start_all_queues(dev);
1225 schedule_chk_task(adapter);
1230 static int cxgb_close(struct net_device *dev)
1232 struct port_info *pi = netdev_priv(dev);
1233 struct adapter *adapter = pi->adapter;
1235 /* Stop link fault interrupts */
1236 t3_xgm_intr_disable(adapter, pi->port_id);
1237 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1239 t3_port_intr_disable(adapter, pi->port_id);
1240 netif_tx_stop_all_queues(dev);
1241 pi->phy.ops->power_down(&pi->phy, 1);
1242 netif_carrier_off(dev);
1243 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1245 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1246 clear_bit(pi->port_id, &adapter->open_device_map);
1247 spin_unlock_irq(&adapter->work_lock);
1249 if (!(adapter->open_device_map & PORT_MASK))
1250 cancel_rearming_delayed_workqueue(cxgb3_wq,
1251 &adapter->adap_check_task);
1253 if (!adapter->open_device_map)
1259 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1261 struct port_info *pi = netdev_priv(dev);
1262 struct adapter *adapter = pi->adapter;
1263 struct net_device_stats *ns = &pi->netstats;
1264 const struct mac_stats *pstats;
1266 spin_lock(&adapter->stats_lock);
1267 pstats = t3_mac_update_stats(&pi->mac);
1268 spin_unlock(&adapter->stats_lock);
1270 ns->tx_bytes = pstats->tx_octets;
1271 ns->tx_packets = pstats->tx_frames;
1272 ns->rx_bytes = pstats->rx_octets;
1273 ns->rx_packets = pstats->rx_frames;
1274 ns->multicast = pstats->rx_mcast_frames;
1276 ns->tx_errors = pstats->tx_underrun;
1277 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1278 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1279 pstats->rx_fifo_ovfl;
1281 /* detailed rx_errors */
1282 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1283 ns->rx_over_errors = 0;
1284 ns->rx_crc_errors = pstats->rx_fcs_errs;
1285 ns->rx_frame_errors = pstats->rx_symbol_errs;
1286 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1287 ns->rx_missed_errors = pstats->rx_cong_drops;
1289 /* detailed tx_errors */
1290 ns->tx_aborted_errors = 0;
1291 ns->tx_carrier_errors = 0;
1292 ns->tx_fifo_errors = pstats->tx_underrun;
1293 ns->tx_heartbeat_errors = 0;
1294 ns->tx_window_errors = 0;
1298 static u32 get_msglevel(struct net_device *dev)
1300 struct port_info *pi = netdev_priv(dev);
1301 struct adapter *adapter = pi->adapter;
1303 return adapter->msg_enable;
1306 static void set_msglevel(struct net_device *dev, u32 val)
1308 struct port_info *pi = netdev_priv(dev);
1309 struct adapter *adapter = pi->adapter;
1311 adapter->msg_enable = val;
1314 static char stats_strings[][ETH_GSTRING_LEN] = {
1317 "TxMulticastFramesOK",
1318 "TxBroadcastFramesOK",
1325 "TxFrames128To255 ",
1326 "TxFrames256To511 ",
1327 "TxFrames512To1023 ",
1328 "TxFrames1024To1518 ",
1329 "TxFrames1519ToMax ",
1333 "RxMulticastFramesOK",
1334 "RxBroadcastFramesOK",
1345 "RxFrames128To255 ",
1346 "RxFrames256To511 ",
1347 "RxFrames512To1023 ",
1348 "RxFrames1024To1518 ",
1349 "RxFrames1519ToMax ",
1362 "CheckTXEnToggled ",
1368 static int get_sset_count(struct net_device *dev, int sset)
1372 return ARRAY_SIZE(stats_strings);
1378 #define T3_REGMAP_SIZE (3 * 1024)
1380 static int get_regs_len(struct net_device *dev)
1382 return T3_REGMAP_SIZE;
1385 static int get_eeprom_len(struct net_device *dev)
1390 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1392 struct port_info *pi = netdev_priv(dev);
1393 struct adapter *adapter = pi->adapter;
1397 spin_lock(&adapter->stats_lock);
1398 t3_get_fw_version(adapter, &fw_vers);
1399 t3_get_tp_version(adapter, &tp_vers);
1400 spin_unlock(&adapter->stats_lock);
1402 strcpy(info->driver, DRV_NAME);
1403 strcpy(info->version, DRV_VERSION);
1404 strcpy(info->bus_info, pci_name(adapter->pdev));
1406 strcpy(info->fw_version, "N/A");
1408 snprintf(info->fw_version, sizeof(info->fw_version),
1409 "%s %u.%u.%u TP %u.%u.%u",
1410 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1411 G_FW_VERSION_MAJOR(fw_vers),
1412 G_FW_VERSION_MINOR(fw_vers),
1413 G_FW_VERSION_MICRO(fw_vers),
1414 G_TP_VERSION_MAJOR(tp_vers),
1415 G_TP_VERSION_MINOR(tp_vers),
1416 G_TP_VERSION_MICRO(tp_vers));
1420 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1422 if (stringset == ETH_SS_STATS)
1423 memcpy(data, stats_strings, sizeof(stats_strings));
1426 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1427 struct port_info *p, int idx)
1430 unsigned long tot = 0;
1432 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1433 tot += adapter->sge.qs[i].port_stats[idx];
1437 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1440 struct port_info *pi = netdev_priv(dev);
1441 struct adapter *adapter = pi->adapter;
1442 const struct mac_stats *s;
1444 spin_lock(&adapter->stats_lock);
1445 s = t3_mac_update_stats(&pi->mac);
1446 spin_unlock(&adapter->stats_lock);
1448 *data++ = s->tx_octets;
1449 *data++ = s->tx_frames;
1450 *data++ = s->tx_mcast_frames;
1451 *data++ = s->tx_bcast_frames;
1452 *data++ = s->tx_pause;
1453 *data++ = s->tx_underrun;
1454 *data++ = s->tx_fifo_urun;
1456 *data++ = s->tx_frames_64;
1457 *data++ = s->tx_frames_65_127;
1458 *data++ = s->tx_frames_128_255;
1459 *data++ = s->tx_frames_256_511;
1460 *data++ = s->tx_frames_512_1023;
1461 *data++ = s->tx_frames_1024_1518;
1462 *data++ = s->tx_frames_1519_max;
1464 *data++ = s->rx_octets;
1465 *data++ = s->rx_frames;
1466 *data++ = s->rx_mcast_frames;
1467 *data++ = s->rx_bcast_frames;
1468 *data++ = s->rx_pause;
1469 *data++ = s->rx_fcs_errs;
1470 *data++ = s->rx_symbol_errs;
1471 *data++ = s->rx_short;
1472 *data++ = s->rx_jabber;
1473 *data++ = s->rx_too_long;
1474 *data++ = s->rx_fifo_ovfl;
1476 *data++ = s->rx_frames_64;
1477 *data++ = s->rx_frames_65_127;
1478 *data++ = s->rx_frames_128_255;
1479 *data++ = s->rx_frames_256_511;
1480 *data++ = s->rx_frames_512_1023;
1481 *data++ = s->rx_frames_1024_1518;
1482 *data++ = s->rx_frames_1519_max;
1484 *data++ = pi->phy.fifo_errors;
1486 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1487 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1488 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1489 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1490 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1494 *data++ = s->rx_cong_drops;
1496 *data++ = s->num_toggled;
1497 *data++ = s->num_resets;
1499 *data++ = s->link_faults;
1502 static inline void reg_block_dump(struct adapter *ap, void *buf,
1503 unsigned int start, unsigned int end)
1505 u32 *p = buf + start;
1507 for (; start <= end; start += sizeof(u32))
1508 *p++ = t3_read_reg(ap, start);
1511 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1514 struct port_info *pi = netdev_priv(dev);
1515 struct adapter *ap = pi->adapter;
1519 * bits 0..9: chip version
1520 * bits 10..15: chip revision
1521 * bit 31: set for PCIe cards
1523 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1526 * We skip the MAC statistics registers because they are clear-on-read.
1527 * Also reading multi-register stats would need to synchronize with the
1528 * periodic mac stats accumulation. Hard to justify the complexity.
1530 memset(buf, 0, T3_REGMAP_SIZE);
1531 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1532 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1533 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1534 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1535 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1536 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1537 XGM_REG(A_XGM_SERDES_STAT3, 1));
1538 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1539 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1542 static int restart_autoneg(struct net_device *dev)
1544 struct port_info *p = netdev_priv(dev);
1546 if (!netif_running(dev))
1548 if (p->link_config.autoneg != AUTONEG_ENABLE)
1550 p->phy.ops->autoneg_restart(&p->phy);
1554 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1556 struct port_info *pi = netdev_priv(dev);
1557 struct adapter *adapter = pi->adapter;
1563 for (i = 0; i < data * 2; i++) {
1564 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1565 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1566 if (msleep_interruptible(500))
1569 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1574 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1576 struct port_info *p = netdev_priv(dev);
1578 cmd->supported = p->link_config.supported;
1579 cmd->advertising = p->link_config.advertising;
1581 if (netif_carrier_ok(dev)) {
1582 cmd->speed = p->link_config.speed;
1583 cmd->duplex = p->link_config.duplex;
1589 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1590 cmd->phy_address = p->phy.addr;
1591 cmd->transceiver = XCVR_EXTERNAL;
1592 cmd->autoneg = p->link_config.autoneg;
1598 static int speed_duplex_to_caps(int speed, int duplex)
1604 if (duplex == DUPLEX_FULL)
1605 cap = SUPPORTED_10baseT_Full;
1607 cap = SUPPORTED_10baseT_Half;
1610 if (duplex == DUPLEX_FULL)
1611 cap = SUPPORTED_100baseT_Full;
1613 cap = SUPPORTED_100baseT_Half;
1616 if (duplex == DUPLEX_FULL)
1617 cap = SUPPORTED_1000baseT_Full;
1619 cap = SUPPORTED_1000baseT_Half;
1622 if (duplex == DUPLEX_FULL)
1623 cap = SUPPORTED_10000baseT_Full;
1628 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1629 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1630 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1631 ADVERTISED_10000baseT_Full)
1633 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1635 struct port_info *p = netdev_priv(dev);
1636 struct link_config *lc = &p->link_config;
1638 if (!(lc->supported & SUPPORTED_Autoneg)) {
1640 * PHY offers a single speed/duplex. See if that's what's
1643 if (cmd->autoneg == AUTONEG_DISABLE) {
1644 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1645 if (lc->supported & cap)
1651 if (cmd->autoneg == AUTONEG_DISABLE) {
1652 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1654 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1656 lc->requested_speed = cmd->speed;
1657 lc->requested_duplex = cmd->duplex;
1658 lc->advertising = 0;
1660 cmd->advertising &= ADVERTISED_MASK;
1661 cmd->advertising &= lc->supported;
1662 if (!cmd->advertising)
1664 lc->requested_speed = SPEED_INVALID;
1665 lc->requested_duplex = DUPLEX_INVALID;
1666 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1668 lc->autoneg = cmd->autoneg;
1669 if (netif_running(dev))
1670 t3_link_start(&p->phy, &p->mac, lc);
1674 static void get_pauseparam(struct net_device *dev,
1675 struct ethtool_pauseparam *epause)
1677 struct port_info *p = netdev_priv(dev);
1679 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1680 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1681 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1684 static int set_pauseparam(struct net_device *dev,
1685 struct ethtool_pauseparam *epause)
1687 struct port_info *p = netdev_priv(dev);
1688 struct link_config *lc = &p->link_config;
1690 if (epause->autoneg == AUTONEG_DISABLE)
1691 lc->requested_fc = 0;
1692 else if (lc->supported & SUPPORTED_Autoneg)
1693 lc->requested_fc = PAUSE_AUTONEG;
1697 if (epause->rx_pause)
1698 lc->requested_fc |= PAUSE_RX;
1699 if (epause->tx_pause)
1700 lc->requested_fc |= PAUSE_TX;
1701 if (lc->autoneg == AUTONEG_ENABLE) {
1702 if (netif_running(dev))
1703 t3_link_start(&p->phy, &p->mac, lc);
1705 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1706 if (netif_running(dev))
1707 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1712 static u32 get_rx_csum(struct net_device *dev)
1714 struct port_info *p = netdev_priv(dev);
1716 return p->rx_offload & T3_RX_CSUM;
1719 static int set_rx_csum(struct net_device *dev, u32 data)
1721 struct port_info *p = netdev_priv(dev);
1724 p->rx_offload |= T3_RX_CSUM;
1728 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1729 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1730 set_qset_lro(dev, i, 0);
1735 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1737 struct port_info *pi = netdev_priv(dev);
1738 struct adapter *adapter = pi->adapter;
1739 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1741 e->rx_max_pending = MAX_RX_BUFFERS;
1742 e->rx_mini_max_pending = 0;
1743 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1744 e->tx_max_pending = MAX_TXQ_ENTRIES;
1746 e->rx_pending = q->fl_size;
1747 e->rx_mini_pending = q->rspq_size;
1748 e->rx_jumbo_pending = q->jumbo_size;
1749 e->tx_pending = q->txq_size[0];
1752 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1754 struct port_info *pi = netdev_priv(dev);
1755 struct adapter *adapter = pi->adapter;
1756 struct qset_params *q;
1759 if (e->rx_pending > MAX_RX_BUFFERS ||
1760 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1761 e->tx_pending > MAX_TXQ_ENTRIES ||
1762 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1763 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1764 e->rx_pending < MIN_FL_ENTRIES ||
1765 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1766 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1769 if (adapter->flags & FULL_INIT_DONE)
1772 q = &adapter->params.sge.qset[pi->first_qset];
1773 for (i = 0; i < pi->nqsets; ++i, ++q) {
1774 q->rspq_size = e->rx_mini_pending;
1775 q->fl_size = e->rx_pending;
1776 q->jumbo_size = e->rx_jumbo_pending;
1777 q->txq_size[0] = e->tx_pending;
1778 q->txq_size[1] = e->tx_pending;
1779 q->txq_size[2] = e->tx_pending;
1784 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1786 struct port_info *pi = netdev_priv(dev);
1787 struct adapter *adapter = pi->adapter;
1788 struct qset_params *qsp = &adapter->params.sge.qset[0];
1789 struct sge_qset *qs = &adapter->sge.qs[0];
1791 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1794 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1795 t3_update_qset_coalesce(qs, qsp);
1799 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1801 struct port_info *pi = netdev_priv(dev);
1802 struct adapter *adapter = pi->adapter;
1803 struct qset_params *q = adapter->params.sge.qset;
1805 c->rx_coalesce_usecs = q->coalesce_usecs;
1809 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1812 struct port_info *pi = netdev_priv(dev);
1813 struct adapter *adapter = pi->adapter;
1816 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1820 e->magic = EEPROM_MAGIC;
1821 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1822 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1825 memcpy(data, buf + e->offset, e->len);
1830 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1833 struct port_info *pi = netdev_priv(dev);
1834 struct adapter *adapter = pi->adapter;
1835 u32 aligned_offset, aligned_len;
1840 if (eeprom->magic != EEPROM_MAGIC)
1843 aligned_offset = eeprom->offset & ~3;
1844 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1846 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1847 buf = kmalloc(aligned_len, GFP_KERNEL);
1850 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1851 if (!err && aligned_len > 4)
1852 err = t3_seeprom_read(adapter,
1853 aligned_offset + aligned_len - 4,
1854 (__le32 *) & buf[aligned_len - 4]);
1857 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1861 err = t3_seeprom_wp(adapter, 0);
1865 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1866 err = t3_seeprom_write(adapter, aligned_offset, *p);
1867 aligned_offset += 4;
1871 err = t3_seeprom_wp(adapter, 1);
1878 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1882 memset(&wol->sopass, 0, sizeof(wol->sopass));
1885 static const struct ethtool_ops cxgb_ethtool_ops = {
1886 .get_settings = get_settings,
1887 .set_settings = set_settings,
1888 .get_drvinfo = get_drvinfo,
1889 .get_msglevel = get_msglevel,
1890 .set_msglevel = set_msglevel,
1891 .get_ringparam = get_sge_param,
1892 .set_ringparam = set_sge_param,
1893 .get_coalesce = get_coalesce,
1894 .set_coalesce = set_coalesce,
1895 .get_eeprom_len = get_eeprom_len,
1896 .get_eeprom = get_eeprom,
1897 .set_eeprom = set_eeprom,
1898 .get_pauseparam = get_pauseparam,
1899 .set_pauseparam = set_pauseparam,
1900 .get_rx_csum = get_rx_csum,
1901 .set_rx_csum = set_rx_csum,
1902 .set_tx_csum = ethtool_op_set_tx_csum,
1903 .set_sg = ethtool_op_set_sg,
1904 .get_link = ethtool_op_get_link,
1905 .get_strings = get_strings,
1906 .phys_id = cxgb3_phys_id,
1907 .nway_reset = restart_autoneg,
1908 .get_sset_count = get_sset_count,
1909 .get_ethtool_stats = get_stats,
1910 .get_regs_len = get_regs_len,
1911 .get_regs = get_regs,
1913 .set_tso = ethtool_op_set_tso,
1916 static int in_range(int val, int lo, int hi)
1918 return val < 0 || (val <= hi && val >= lo);
1921 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1923 struct port_info *pi = netdev_priv(dev);
1924 struct adapter *adapter = pi->adapter;
1928 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1932 case CHELSIO_SET_QSET_PARAMS:{
1934 struct qset_params *q;
1935 struct ch_qset_params t;
1936 int q1 = pi->first_qset;
1937 int nqsets = pi->nqsets;
1939 if (!capable(CAP_NET_ADMIN))
1941 if (copy_from_user(&t, useraddr, sizeof(t)))
1943 if (t.qset_idx >= SGE_QSETS)
1945 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1946 !in_range(t.cong_thres, 0, 255) ||
1947 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1949 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1951 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1952 MAX_CTRL_TXQ_ENTRIES) ||
1953 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1955 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1956 MAX_RX_JUMBO_BUFFERS)
1957 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1961 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1962 for_each_port(adapter, i) {
1963 pi = adap2pinfo(adapter, i);
1964 if (t.qset_idx >= pi->first_qset &&
1965 t.qset_idx < pi->first_qset + pi->nqsets &&
1966 !(pi->rx_offload & T3_RX_CSUM))
1970 if ((adapter->flags & FULL_INIT_DONE) &&
1971 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1972 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1973 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1974 t.polling >= 0 || t.cong_thres >= 0))
1977 /* Allow setting of any available qset when offload enabled */
1978 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1980 for_each_port(adapter, i) {
1981 pi = adap2pinfo(adapter, i);
1982 nqsets += pi->first_qset + pi->nqsets;
1986 if (t.qset_idx < q1)
1988 if (t.qset_idx > q1 + nqsets - 1)
1991 q = &adapter->params.sge.qset[t.qset_idx];
1993 if (t.rspq_size >= 0)
1994 q->rspq_size = t.rspq_size;
1995 if (t.fl_size[0] >= 0)
1996 q->fl_size = t.fl_size[0];
1997 if (t.fl_size[1] >= 0)
1998 q->jumbo_size = t.fl_size[1];
1999 if (t.txq_size[0] >= 0)
2000 q->txq_size[0] = t.txq_size[0];
2001 if (t.txq_size[1] >= 0)
2002 q->txq_size[1] = t.txq_size[1];
2003 if (t.txq_size[2] >= 0)
2004 q->txq_size[2] = t.txq_size[2];
2005 if (t.cong_thres >= 0)
2006 q->cong_thres = t.cong_thres;
2007 if (t.intr_lat >= 0) {
2008 struct sge_qset *qs =
2009 &adapter->sge.qs[t.qset_idx];
2011 q->coalesce_usecs = t.intr_lat;
2012 t3_update_qset_coalesce(qs, q);
2014 if (t.polling >= 0) {
2015 if (adapter->flags & USING_MSIX)
2016 q->polling = t.polling;
2018 /* No polling with INTx for T3A */
2019 if (adapter->params.rev == 0 &&
2020 !(adapter->flags & USING_MSI))
2023 for (i = 0; i < SGE_QSETS; i++) {
2024 q = &adapter->params.sge.
2026 q->polling = t.polling;
2031 set_qset_lro(dev, t.qset_idx, t.lro);
2035 case CHELSIO_GET_QSET_PARAMS:{
2036 struct qset_params *q;
2037 struct ch_qset_params t;
2038 int q1 = pi->first_qset;
2039 int nqsets = pi->nqsets;
2042 if (copy_from_user(&t, useraddr, sizeof(t)))
2045 /* Display qsets for all ports when offload enabled */
2046 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2048 for_each_port(adapter, i) {
2049 pi = adap2pinfo(adapter, i);
2050 nqsets = pi->first_qset + pi->nqsets;
2054 if (t.qset_idx >= nqsets)
2057 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2058 t.rspq_size = q->rspq_size;
2059 t.txq_size[0] = q->txq_size[0];
2060 t.txq_size[1] = q->txq_size[1];
2061 t.txq_size[2] = q->txq_size[2];
2062 t.fl_size[0] = q->fl_size;
2063 t.fl_size[1] = q->jumbo_size;
2064 t.polling = q->polling;
2066 t.intr_lat = q->coalesce_usecs;
2067 t.cong_thres = q->cong_thres;
2070 if (adapter->flags & USING_MSIX)
2071 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2073 t.vector = adapter->pdev->irq;
2075 if (copy_to_user(useraddr, &t, sizeof(t)))
2079 case CHELSIO_SET_QSET_NUM:{
2080 struct ch_reg edata;
2081 unsigned int i, first_qset = 0, other_qsets = 0;
2083 if (!capable(CAP_NET_ADMIN))
2085 if (adapter->flags & FULL_INIT_DONE)
2087 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2089 if (edata.val < 1 ||
2090 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2093 for_each_port(adapter, i)
2094 if (adapter->port[i] && adapter->port[i] != dev)
2095 other_qsets += adap2pinfo(adapter, i)->nqsets;
2097 if (edata.val + other_qsets > SGE_QSETS)
2100 pi->nqsets = edata.val;
2102 for_each_port(adapter, i)
2103 if (adapter->port[i]) {
2104 pi = adap2pinfo(adapter, i);
2105 pi->first_qset = first_qset;
2106 first_qset += pi->nqsets;
2110 case CHELSIO_GET_QSET_NUM:{
2111 struct ch_reg edata;
2113 edata.cmd = CHELSIO_GET_QSET_NUM;
2114 edata.val = pi->nqsets;
2115 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2119 case CHELSIO_LOAD_FW:{
2121 struct ch_mem_range t;
2123 if (!capable(CAP_SYS_RAWIO))
2125 if (copy_from_user(&t, useraddr, sizeof(t)))
2127 /* Check t.len sanity ? */
2128 fw_data = kmalloc(t.len, GFP_KERNEL);
2133 (fw_data, useraddr + sizeof(t), t.len)) {
2138 ret = t3_load_fw(adapter, fw_data, t.len);
2144 case CHELSIO_SETMTUTAB:{
2148 if (!is_offload(adapter))
2150 if (!capable(CAP_NET_ADMIN))
2152 if (offload_running(adapter))
2154 if (copy_from_user(&m, useraddr, sizeof(m)))
2156 if (m.nmtus != NMTUS)
2158 if (m.mtus[0] < 81) /* accommodate SACK */
2161 /* MTUs must be in ascending order */
2162 for (i = 1; i < NMTUS; ++i)
2163 if (m.mtus[i] < m.mtus[i - 1])
2166 memcpy(adapter->params.mtus, m.mtus,
2167 sizeof(adapter->params.mtus));
2170 case CHELSIO_GET_PM:{
2171 struct tp_params *p = &adapter->params.tp;
2172 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2174 if (!is_offload(adapter))
2176 m.tx_pg_sz = p->tx_pg_size;
2177 m.tx_num_pg = p->tx_num_pgs;
2178 m.rx_pg_sz = p->rx_pg_size;
2179 m.rx_num_pg = p->rx_num_pgs;
2180 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2181 if (copy_to_user(useraddr, &m, sizeof(m)))
2185 case CHELSIO_SET_PM:{
2187 struct tp_params *p = &adapter->params.tp;
2189 if (!is_offload(adapter))
2191 if (!capable(CAP_NET_ADMIN))
2193 if (adapter->flags & FULL_INIT_DONE)
2195 if (copy_from_user(&m, useraddr, sizeof(m)))
2197 if (!is_power_of_2(m.rx_pg_sz) ||
2198 !is_power_of_2(m.tx_pg_sz))
2199 return -EINVAL; /* not power of 2 */
2200 if (!(m.rx_pg_sz & 0x14000))
2201 return -EINVAL; /* not 16KB or 64KB */
2202 if (!(m.tx_pg_sz & 0x1554000))
2204 if (m.tx_num_pg == -1)
2205 m.tx_num_pg = p->tx_num_pgs;
2206 if (m.rx_num_pg == -1)
2207 m.rx_num_pg = p->rx_num_pgs;
2208 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2210 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2211 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2213 p->rx_pg_size = m.rx_pg_sz;
2214 p->tx_pg_size = m.tx_pg_sz;
2215 p->rx_num_pgs = m.rx_num_pg;
2216 p->tx_num_pgs = m.tx_num_pg;
2219 case CHELSIO_GET_MEM:{
2220 struct ch_mem_range t;
2224 if (!is_offload(adapter))
2226 if (!(adapter->flags & FULL_INIT_DONE))
2227 return -EIO; /* need the memory controllers */
2228 if (copy_from_user(&t, useraddr, sizeof(t)))
2230 if ((t.addr & 7) || (t.len & 7))
2232 if (t.mem_id == MEM_CM)
2234 else if (t.mem_id == MEM_PMRX)
2235 mem = &adapter->pmrx;
2236 else if (t.mem_id == MEM_PMTX)
2237 mem = &adapter->pmtx;
2243 * bits 0..9: chip version
2244 * bits 10..15: chip revision
2246 t.version = 3 | (adapter->params.rev << 10);
2247 if (copy_to_user(useraddr, &t, sizeof(t)))
2251 * Read 256 bytes at a time as len can be large and we don't
2252 * want to use huge intermediate buffers.
2254 useraddr += sizeof(t); /* advance to start of buffer */
2256 unsigned int chunk =
2257 min_t(unsigned int, t.len, sizeof(buf));
2260 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2264 if (copy_to_user(useraddr, buf, chunk))
2272 case CHELSIO_SET_TRACE_FILTER:{
2274 const struct trace_params *tp;
2276 if (!capable(CAP_NET_ADMIN))
2278 if (!offload_running(adapter))
2280 if (copy_from_user(&t, useraddr, sizeof(t)))
2283 tp = (const struct trace_params *)&t.sip;
2285 t3_config_trace_filter(adapter, tp, 0,
2289 t3_config_trace_filter(adapter, tp, 1,
2300 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2302 struct mii_ioctl_data *data = if_mii(req);
2303 struct port_info *pi = netdev_priv(dev);
2304 struct adapter *adapter = pi->adapter;
2309 data->phy_id = pi->phy.addr;
2313 struct cphy *phy = &pi->phy;
2315 if (!phy->mdio_read)
2317 if (is_10G(adapter)) {
2318 mmd = data->phy_id >> 8;
2321 else if (mmd > MDIO_DEV_VEND2)
2325 phy->mdio_read(adapter, data->phy_id & 0x1f,
2326 mmd, data->reg_num, &val);
2329 phy->mdio_read(adapter, data->phy_id & 0x1f,
2330 0, data->reg_num & 0x1f,
2333 data->val_out = val;
2337 struct cphy *phy = &pi->phy;
2339 if (!capable(CAP_NET_ADMIN))
2341 if (!phy->mdio_write)
2343 if (is_10G(adapter)) {
2344 mmd = data->phy_id >> 8;
2347 else if (mmd > MDIO_DEV_VEND2)
2351 phy->mdio_write(adapter,
2352 data->phy_id & 0x1f, mmd,
2357 phy->mdio_write(adapter,
2358 data->phy_id & 0x1f, 0,
2359 data->reg_num & 0x1f,
2364 return cxgb_extension_ioctl(dev, req->ifr_data);
2371 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2373 struct port_info *pi = netdev_priv(dev);
2374 struct adapter *adapter = pi->adapter;
2377 if (new_mtu < 81) /* accommodate SACK */
2379 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2382 init_port_mtus(adapter);
2383 if (adapter->params.rev == 0 && offload_running(adapter))
2384 t3_load_mtus(adapter, adapter->params.mtus,
2385 adapter->params.a_wnd, adapter->params.b_wnd,
2386 adapter->port[0]->mtu);
2390 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2392 struct port_info *pi = netdev_priv(dev);
2393 struct adapter *adapter = pi->adapter;
2394 struct sockaddr *addr = p;
2396 if (!is_valid_ether_addr(addr->sa_data))
2399 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2400 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2401 if (offload_running(adapter))
2402 write_smt_entry(adapter, pi->port_id);
2407 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2408 * @adap: the adapter
2411 * Ensures that current Rx processing on any of the queues associated with
2412 * the given port completes before returning. We do this by acquiring and
2413 * releasing the locks of the response queues associated with the port.
2415 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2419 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2420 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2422 spin_lock_irq(&q->lock);
2423 spin_unlock_irq(&q->lock);
2427 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2429 struct port_info *pi = netdev_priv(dev);
2430 struct adapter *adapter = pi->adapter;
2433 if (adapter->params.rev > 0)
2434 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2436 /* single control for all ports */
2437 unsigned int i, have_vlans = 0;
2438 for_each_port(adapter, i)
2439 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2441 t3_set_vlan_accel(adapter, 1, have_vlans);
2443 t3_synchronize_rx(adapter, pi);
2446 #ifdef CONFIG_NET_POLL_CONTROLLER
2447 static void cxgb_netpoll(struct net_device *dev)
2449 struct port_info *pi = netdev_priv(dev);
2450 struct adapter *adapter = pi->adapter;
2453 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2454 struct sge_qset *qs = &adapter->sge.qs[qidx];
2457 if (adapter->flags & USING_MSIX)
2462 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2468 * Periodic accumulation of MAC statistics.
2470 static void mac_stats_update(struct adapter *adapter)
2474 for_each_port(adapter, i) {
2475 struct net_device *dev = adapter->port[i];
2476 struct port_info *p = netdev_priv(dev);
2478 if (netif_running(dev)) {
2479 spin_lock(&adapter->stats_lock);
2480 t3_mac_update_stats(&p->mac);
2481 spin_unlock(&adapter->stats_lock);
2486 static void check_link_status(struct adapter *adapter)
2490 for_each_port(adapter, i) {
2491 struct net_device *dev = adapter->port[i];
2492 struct port_info *p = netdev_priv(dev);
2494 spin_lock_irq(&adapter->work_lock);
2495 if (p->link_fault) {
2496 spin_unlock_irq(&adapter->work_lock);
2499 spin_unlock_irq(&adapter->work_lock);
2501 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2502 t3_xgm_intr_disable(adapter, i);
2503 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2505 t3_link_changed(adapter, i);
2506 t3_xgm_intr_enable(adapter, i);
2511 static void check_t3b2_mac(struct adapter *adapter)
2515 if (!rtnl_trylock()) /* synchronize with ifdown */
2518 for_each_port(adapter, i) {
2519 struct net_device *dev = adapter->port[i];
2520 struct port_info *p = netdev_priv(dev);
2523 if (!netif_running(dev))
2527 if (netif_running(dev) && netif_carrier_ok(dev))
2528 status = t3b2_mac_watchdog_task(&p->mac);
2530 p->mac.stats.num_toggled++;
2531 else if (status == 2) {
2532 struct cmac *mac = &p->mac;
2534 t3_mac_set_mtu(mac, dev->mtu);
2535 t3_mac_set_address(mac, 0, dev->dev_addr);
2536 cxgb_set_rxmode(dev);
2537 t3_link_start(&p->phy, mac, &p->link_config);
2538 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2539 t3_port_intr_enable(adapter, p->port_id);
2540 p->mac.stats.num_resets++;
2547 static void t3_adap_check_task(struct work_struct *work)
2549 struct adapter *adapter = container_of(work, struct adapter,
2550 adap_check_task.work);
2551 const struct adapter_params *p = &adapter->params;
2553 unsigned int v, status, reset;
2555 adapter->check_task_cnt++;
2557 /* Check link status for PHYs without interrupts */
2558 if (p->linkpoll_period)
2559 check_link_status(adapter);
2561 /* Accumulate MAC stats if needed */
2562 if (!p->linkpoll_period ||
2563 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2564 p->stats_update_period) {
2565 mac_stats_update(adapter);
2566 adapter->check_task_cnt = 0;
2569 if (p->rev == T3_REV_B2)
2570 check_t3b2_mac(adapter);
2573 * Scan the XGMAC's to check for various conditions which we want to
2574 * monitor in a periodic polling manner rather than via an interrupt
2575 * condition. This is used for conditions which would otherwise flood
2576 * the system with interrupts and we only really need to know that the
2577 * conditions are "happening" ... For each condition we count the
2578 * detection of the condition and reset it for the next polling loop.
2580 for_each_port(adapter, port) {
2581 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2584 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2586 if (cause & F_RXFIFO_OVERFLOW) {
2587 mac->stats.rx_fifo_ovfl++;
2588 reset |= F_RXFIFO_OVERFLOW;
2591 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2595 * We do the same as above for FL_EMPTY interrupts.
2597 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2600 if (status & F_FLEMPTY) {
2601 struct sge_qset *qs = &adapter->sge.qs[0];
2606 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2610 qs->fl[i].empty += (v & 1);
2618 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2620 /* Schedule the next check update if any port is active. */
2621 spin_lock_irq(&adapter->work_lock);
2622 if (adapter->open_device_map & PORT_MASK)
2623 schedule_chk_task(adapter);
2624 spin_unlock_irq(&adapter->work_lock);
2628 * Processes external (PHY) interrupts in process context.
2630 static void ext_intr_task(struct work_struct *work)
2632 struct adapter *adapter = container_of(work, struct adapter,
2633 ext_intr_handler_task);
2636 /* Disable link fault interrupts */
2637 for_each_port(adapter, i) {
2638 struct net_device *dev = adapter->port[i];
2639 struct port_info *p = netdev_priv(dev);
2641 t3_xgm_intr_disable(adapter, i);
2642 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2645 /* Re-enable link fault interrupts */
2646 t3_phy_intr_handler(adapter);
2648 for_each_port(adapter, i)
2649 t3_xgm_intr_enable(adapter, i);
2651 /* Now reenable external interrupts */
2652 spin_lock_irq(&adapter->work_lock);
2653 if (adapter->slow_intr_mask) {
2654 adapter->slow_intr_mask |= F_T3DBG;
2655 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2656 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2657 adapter->slow_intr_mask);
2659 spin_unlock_irq(&adapter->work_lock);
2663 * Interrupt-context handler for external (PHY) interrupts.
2665 void t3_os_ext_intr_handler(struct adapter *adapter)
2668 * Schedule a task to handle external interrupts as they may be slow
2669 * and we use a mutex to protect MDIO registers. We disable PHY
2670 * interrupts in the meantime and let the task reenable them when
2673 spin_lock(&adapter->work_lock);
2674 if (adapter->slow_intr_mask) {
2675 adapter->slow_intr_mask &= ~F_T3DBG;
2676 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2677 adapter->slow_intr_mask);
2678 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2680 spin_unlock(&adapter->work_lock);
2683 static void link_fault_task(struct work_struct *work)
2685 struct adapter *adapter = container_of(work, struct adapter,
2686 link_fault_handler_task);
2689 for_each_port(adapter, i) {
2690 struct net_device *netdev = adapter->port[i];
2691 struct port_info *pi = netdev_priv(netdev);
2694 t3_link_fault(adapter, i);
2698 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2700 struct net_device *netdev = adapter->port[port_id];
2701 struct port_info *pi = netdev_priv(netdev);
2703 spin_lock(&adapter->work_lock);
2705 queue_work(cxgb3_wq, &adapter->link_fault_handler_task);
2706 spin_unlock(&adapter->work_lock);
2709 static int t3_adapter_error(struct adapter *adapter, int reset)
2713 if (is_offload(adapter) &&
2714 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2715 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2716 offload_close(&adapter->tdev);
2719 /* Stop all ports */
2720 for_each_port(adapter, i) {
2721 struct net_device *netdev = adapter->port[i];
2723 if (netif_running(netdev))
2727 /* Stop SGE timers */
2728 t3_stop_sge_timers(adapter);
2730 adapter->flags &= ~FULL_INIT_DONE;
2733 ret = t3_reset_adapter(adapter);
2735 pci_disable_device(adapter->pdev);
2740 static int t3_reenable_adapter(struct adapter *adapter)
2742 if (pci_enable_device(adapter->pdev)) {
2743 dev_err(&adapter->pdev->dev,
2744 "Cannot re-enable PCI device after reset.\n");
2747 pci_set_master(adapter->pdev);
2748 pci_restore_state(adapter->pdev);
2750 /* Free sge resources */
2751 t3_free_sge_resources(adapter);
2753 if (t3_replay_prep_adapter(adapter))
2761 static void t3_resume_ports(struct adapter *adapter)
2765 /* Restart the ports */
2766 for_each_port(adapter, i) {
2767 struct net_device *netdev = adapter->port[i];
2769 if (netif_running(netdev)) {
2770 if (cxgb_open(netdev)) {
2771 dev_err(&adapter->pdev->dev,
2772 "can't bring device back up"
2779 if (is_offload(adapter) && !ofld_disable)
2780 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2784 * processes a fatal error.
2785 * Bring the ports down, reset the chip, bring the ports back up.
2787 static void fatal_error_task(struct work_struct *work)
2789 struct adapter *adapter = container_of(work, struct adapter,
2790 fatal_error_handler_task);
2794 err = t3_adapter_error(adapter, 1);
2796 err = t3_reenable_adapter(adapter);
2798 t3_resume_ports(adapter);
2800 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2804 void t3_fatal_err(struct adapter *adapter)
2806 unsigned int fw_status[4];
2808 if (adapter->flags & FULL_INIT_DONE) {
2809 t3_sge_stop(adapter);
2810 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2811 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2812 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2813 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2815 spin_lock(&adapter->work_lock);
2816 t3_intr_disable(adapter);
2817 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2818 spin_unlock(&adapter->work_lock);
2820 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2821 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2822 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2823 fw_status[0], fw_status[1],
2824 fw_status[2], fw_status[3]);
2828 * t3_io_error_detected - called when PCI error is detected
2829 * @pdev: Pointer to PCI device
2830 * @state: The current pci connection state
2832 * This function is called after a PCI bus error affecting
2833 * this device has been detected.
2835 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2836 pci_channel_state_t state)
2838 struct adapter *adapter = pci_get_drvdata(pdev);
2841 ret = t3_adapter_error(adapter, 0);
2843 /* Request a slot reset. */
2844 return PCI_ERS_RESULT_NEED_RESET;
2848 * t3_io_slot_reset - called after the pci bus has been reset.
2849 * @pdev: Pointer to PCI device
2851 * Restart the card from scratch, as if from a cold-boot.
2853 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2855 struct adapter *adapter = pci_get_drvdata(pdev);
2857 if (!t3_reenable_adapter(adapter))
2858 return PCI_ERS_RESULT_RECOVERED;
2860 return PCI_ERS_RESULT_DISCONNECT;
2864 * t3_io_resume - called when traffic can start flowing again.
2865 * @pdev: Pointer to PCI device
2867 * This callback is called when the error recovery driver tells us that
2868 * its OK to resume normal operation.
2870 static void t3_io_resume(struct pci_dev *pdev)
2872 struct adapter *adapter = pci_get_drvdata(pdev);
2874 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2875 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2877 t3_resume_ports(adapter);
2880 static struct pci_error_handlers t3_err_handler = {
2881 .error_detected = t3_io_error_detected,
2882 .slot_reset = t3_io_slot_reset,
2883 .resume = t3_io_resume,
2887 * Set the number of qsets based on the number of CPUs and the number of ports,
2888 * not to exceed the number of available qsets, assuming there are enough qsets
2891 static void set_nqsets(struct adapter *adap)
2894 int num_cpus = num_online_cpus();
2895 int hwports = adap->params.nports;
2896 int nqsets = adap->msix_nvectors - 1;
2898 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2900 (hwports * nqsets > SGE_QSETS ||
2901 num_cpus >= nqsets / hwports))
2903 if (nqsets > num_cpus)
2905 if (nqsets < 1 || hwports == 4)
2910 for_each_port(adap, i) {
2911 struct port_info *pi = adap2pinfo(adap, i);
2914 pi->nqsets = nqsets;
2915 j = pi->first_qset + nqsets;
2917 dev_info(&adap->pdev->dev,
2918 "Port %d using %d queue sets.\n", i, nqsets);
2922 static int __devinit cxgb_enable_msix(struct adapter *adap)
2924 struct msix_entry entries[SGE_QSETS + 1];
2928 vectors = ARRAY_SIZE(entries);
2929 for (i = 0; i < vectors; ++i)
2930 entries[i].entry = i;
2932 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2935 if (!err && vectors < (adap->params.nports + 1))
2939 for (i = 0; i < vectors; ++i)
2940 adap->msix_info[i].vec = entries[i].vector;
2941 adap->msix_nvectors = vectors;
2947 static void __devinit print_port_info(struct adapter *adap,
2948 const struct adapter_info *ai)
2950 static const char *pci_variant[] = {
2951 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2958 snprintf(buf, sizeof(buf), "%s x%d",
2959 pci_variant[adap->params.pci.variant],
2960 adap->params.pci.width);
2962 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2963 pci_variant[adap->params.pci.variant],
2964 adap->params.pci.speed, adap->params.pci.width);
2966 for_each_port(adap, i) {
2967 struct net_device *dev = adap->port[i];
2968 const struct port_info *pi = netdev_priv(dev);
2970 if (!test_bit(i, &adap->registered_device_map))
2972 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2973 dev->name, ai->desc, pi->phy.desc,
2974 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2975 (adap->flags & USING_MSIX) ? " MSI-X" :
2976 (adap->flags & USING_MSI) ? " MSI" : "");
2977 if (adap->name == dev->name && adap->params.vpd.mclk)
2979 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2980 adap->name, t3_mc7_size(&adap->cm) >> 20,
2981 t3_mc7_size(&adap->pmtx) >> 20,
2982 t3_mc7_size(&adap->pmrx) >> 20,
2983 adap->params.vpd.sn);
2987 static const struct net_device_ops cxgb_netdev_ops = {
2988 .ndo_open = cxgb_open,
2989 .ndo_stop = cxgb_close,
2990 .ndo_start_xmit = t3_eth_xmit,
2991 .ndo_get_stats = cxgb_get_stats,
2992 .ndo_validate_addr = eth_validate_addr,
2993 .ndo_set_multicast_list = cxgb_set_rxmode,
2994 .ndo_do_ioctl = cxgb_ioctl,
2995 .ndo_change_mtu = cxgb_change_mtu,
2996 .ndo_set_mac_address = cxgb_set_mac_addr,
2997 .ndo_vlan_rx_register = vlan_rx_register,
2998 #ifdef CONFIG_NET_POLL_CONTROLLER
2999 .ndo_poll_controller = cxgb_netpoll,
3003 static int __devinit init_one(struct pci_dev *pdev,
3004 const struct pci_device_id *ent)
3006 static int version_printed;
3008 int i, err, pci_using_dac = 0;
3009 resource_size_t mmio_start, mmio_len;
3010 const struct adapter_info *ai;
3011 struct adapter *adapter = NULL;
3012 struct port_info *pi;
3014 if (!version_printed) {
3015 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3020 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3022 printk(KERN_ERR DRV_NAME
3023 ": cannot initialize work queue\n");
3028 err = pci_request_regions(pdev, DRV_NAME);
3030 /* Just info, some other driver may have claimed the device. */
3031 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3035 err = pci_enable_device(pdev);
3037 dev_err(&pdev->dev, "cannot enable PCI device\n");
3038 goto out_release_regions;
3041 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3043 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3045 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3046 "coherent allocations\n");
3047 goto out_disable_device;
3049 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3050 dev_err(&pdev->dev, "no usable DMA configuration\n");
3051 goto out_disable_device;
3054 pci_set_master(pdev);
3055 pci_save_state(pdev);
3057 mmio_start = pci_resource_start(pdev, 0);
3058 mmio_len = pci_resource_len(pdev, 0);
3059 ai = t3_get_adapter_info(ent->driver_data);
3061 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3064 goto out_disable_device;
3067 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3068 if (!adapter->regs) {
3069 dev_err(&pdev->dev, "cannot map device registers\n");
3071 goto out_free_adapter;
3074 adapter->pdev = pdev;
3075 adapter->name = pci_name(pdev);
3076 adapter->msg_enable = dflt_msg_enable;
3077 adapter->mmio_len = mmio_len;
3079 mutex_init(&adapter->mdio_lock);
3080 spin_lock_init(&adapter->work_lock);
3081 spin_lock_init(&adapter->stats_lock);
3083 INIT_LIST_HEAD(&adapter->adapter_list);
3084 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3085 INIT_WORK(&adapter->link_fault_handler_task, link_fault_task);
3086 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3087 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3089 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3090 struct net_device *netdev;
3092 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3098 SET_NETDEV_DEV(netdev, &pdev->dev);
3100 adapter->port[i] = netdev;
3101 pi = netdev_priv(netdev);
3102 pi->adapter = adapter;
3103 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3105 netif_carrier_off(netdev);
3106 netif_tx_stop_all_queues(netdev);
3107 netdev->irq = pdev->irq;
3108 netdev->mem_start = mmio_start;
3109 netdev->mem_end = mmio_start + mmio_len - 1;
3110 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3111 netdev->features |= NETIF_F_LLTX;
3112 netdev->features |= NETIF_F_GRO;
3114 netdev->features |= NETIF_F_HIGHDMA;
3116 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3117 netdev->netdev_ops = &cxgb_netdev_ops;
3118 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3121 pci_set_drvdata(pdev, adapter);
3122 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3128 * The card is now ready to go. If any errors occur during device
3129 * registration we do not fail the whole card but rather proceed only
3130 * with the ports we manage to register successfully. However we must
3131 * register at least one net device.
3133 for_each_port(adapter, i) {
3134 err = register_netdev(adapter->port[i]);
3136 dev_warn(&pdev->dev,
3137 "cannot register net device %s, skipping\n",
3138 adapter->port[i]->name);
3141 * Change the name we use for messages to the name of
3142 * the first successfully registered interface.
3144 if (!adapter->registered_device_map)
3145 adapter->name = adapter->port[i]->name;
3147 __set_bit(i, &adapter->registered_device_map);
3150 if (!adapter->registered_device_map) {
3151 dev_err(&pdev->dev, "could not register any net devices\n");
3155 /* Driver's ready. Reflect it on LEDs */
3156 t3_led_ready(adapter);
3158 if (is_offload(adapter)) {
3159 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3160 cxgb3_adapter_ofld(adapter);
3163 /* See what interrupts we'll be using */
3164 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3165 adapter->flags |= USING_MSIX;
3166 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3167 adapter->flags |= USING_MSI;
3169 set_nqsets(adapter);
3171 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3174 print_port_info(adapter, ai);
3178 iounmap(adapter->regs);
3179 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3180 if (adapter->port[i])
3181 free_netdev(adapter->port[i]);
3187 pci_disable_device(pdev);
3188 out_release_regions:
3189 pci_release_regions(pdev);
3190 pci_set_drvdata(pdev, NULL);
3194 static void __devexit remove_one(struct pci_dev *pdev)
3196 struct adapter *adapter = pci_get_drvdata(pdev);
3201 t3_sge_stop(adapter);
3202 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3205 if (is_offload(adapter)) {
3206 cxgb3_adapter_unofld(adapter);
3207 if (test_bit(OFFLOAD_DEVMAP_BIT,
3208 &adapter->open_device_map))
3209 offload_close(&adapter->tdev);
3212 for_each_port(adapter, i)
3213 if (test_bit(i, &adapter->registered_device_map))
3214 unregister_netdev(adapter->port[i]);
3216 t3_stop_sge_timers(adapter);
3217 t3_free_sge_resources(adapter);
3218 cxgb_disable_msi(adapter);
3220 for_each_port(adapter, i)
3221 if (adapter->port[i])
3222 free_netdev(adapter->port[i]);
3224 iounmap(adapter->regs);
3226 pci_release_regions(pdev);
3227 pci_disable_device(pdev);
3228 pci_set_drvdata(pdev, NULL);
3232 static struct pci_driver driver = {
3234 .id_table = cxgb3_pci_tbl,
3236 .remove = __devexit_p(remove_one),
3237 .err_handler = &t3_err_handler,
3240 static int __init cxgb3_init_module(void)
3244 cxgb3_offload_init();
3246 ret = pci_register_driver(&driver);
3250 static void __exit cxgb3_cleanup_module(void)
3252 pci_unregister_driver(&driver);
3254 destroy_workqueue(cxgb3_wq);
3257 module_init(cxgb3_init_module);
3258 module_exit(cxgb3_cleanup_module);