2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
94 CH_DEVICE(0x36, 3), /* S320E-CR */
95 CH_DEVICE(0x37, 7), /* N320E-G2 */
99 MODULE_DESCRIPTION(DRV_DESC);
100 MODULE_AUTHOR("Chelsio Communications");
101 MODULE_LICENSE("Dual BSD/GPL");
102 MODULE_VERSION(DRV_VERSION);
103 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
105 static int dflt_msg_enable = DFLT_MSG_ENABLE;
107 module_param(dflt_msg_enable, int, 0644);
108 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
111 * The driver uses the best interrupt scheme available on a platform in the
112 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
113 * of these schemes the driver may consider as follows:
115 * msi = 2: choose from among all three options
116 * msi = 1: only consider MSI and pin interrupts
117 * msi = 0: force pin interrupts
121 module_param(msi, int, 0644);
122 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
125 * The driver enables offload as a default.
126 * To disable it, use ofld_disable = 1.
129 static int ofld_disable = 0;
131 module_param(ofld_disable, int, 0644);
132 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
135 * We have work elements that we need to cancel when an interface is taken
136 * down. Normally the work elements would be executed by keventd but that
137 * can deadlock because of linkwatch. If our close method takes the rtnl
138 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
139 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
140 * for our work to complete. Get our own work queue to solve this.
142 static struct workqueue_struct *cxgb3_wq;
145 * link_report - show link status and link speed/duplex
146 * @p: the port whose settings are to be reported
148 * Shows the link status, speed, and duplex of a port.
150 static void link_report(struct net_device *dev)
152 if (!netif_carrier_ok(dev))
153 printk(KERN_INFO "%s: link down\n", dev->name);
155 const char *s = "10Mbps";
156 const struct port_info *p = netdev_priv(dev);
158 switch (p->link_config.speed) {
170 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
171 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
175 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
177 struct net_device *dev = adap->port[port_id];
178 struct port_info *pi = netdev_priv(dev);
180 if (state == netif_carrier_ok(dev))
184 struct cmac *mac = &pi->mac;
186 netif_carrier_on(dev);
188 /* Clear local faults */
189 t3_xgm_intr_disable(adap, pi->port_id);
190 t3_read_reg(adap, A_XGM_INT_STATUS +
193 A_XGM_INT_CAUSE + pi->mac.offset,
196 t3_set_reg_field(adap,
199 F_XGM_INT, F_XGM_INT);
200 t3_xgm_intr_enable(adap, pi->port_id);
202 t3_mac_enable(mac, MAC_DIRECTION_TX);
204 netif_carrier_off(dev);
210 * t3_os_link_changed - handle link status changes
211 * @adapter: the adapter associated with the link change
212 * @port_id: the port index whose limk status has changed
213 * @link_stat: the new status of the link
214 * @speed: the new speed setting
215 * @duplex: the new duplex setting
216 * @pause: the new flow-control setting
218 * This is the OS-dependent handler for link status changes. The OS
219 * neutral handler takes care of most of the processing for these events,
220 * then calls this handler for any OS-specific processing.
222 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
223 int speed, int duplex, int pause)
225 struct net_device *dev = adapter->port[port_id];
226 struct port_info *pi = netdev_priv(dev);
227 struct cmac *mac = &pi->mac;
229 /* Skip changes from disabled ports. */
230 if (!netif_running(dev))
233 if (link_stat != netif_carrier_ok(dev)) {
235 t3_mac_enable(mac, MAC_DIRECTION_RX);
237 /* Clear local faults */
238 t3_xgm_intr_disable(adapter, pi->port_id);
239 t3_read_reg(adapter, A_XGM_INT_STATUS +
241 t3_write_reg(adapter,
242 A_XGM_INT_CAUSE + pi->mac.offset,
245 t3_set_reg_field(adapter,
246 A_XGM_INT_ENABLE + pi->mac.offset,
247 F_XGM_INT, F_XGM_INT);
248 t3_xgm_intr_enable(adapter, pi->port_id);
250 netif_carrier_on(dev);
252 netif_carrier_off(dev);
254 t3_xgm_intr_disable(adapter, pi->port_id);
255 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
256 t3_set_reg_field(adapter,
257 A_XGM_INT_ENABLE + pi->mac.offset,
261 pi->phy.ops->power_down(&pi->phy, 1);
263 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
264 t3_mac_disable(mac, MAC_DIRECTION_RX);
265 t3_link_start(&pi->phy, mac, &pi->link_config);
273 * t3_os_phymod_changed - handle PHY module changes
274 * @phy: the PHY reporting the module change
275 * @mod_type: new module type
277 * This is the OS-dependent handler for PHY module changes. It is
278 * invoked when a PHY module is removed or inserted for any OS-specific
281 void t3_os_phymod_changed(struct adapter *adap, int port_id)
283 static const char *mod_str[] = {
284 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
287 const struct net_device *dev = adap->port[port_id];
288 const struct port_info *pi = netdev_priv(dev);
290 if (pi->phy.modtype == phy_modtype_none)
291 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
293 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
294 mod_str[pi->phy.modtype]);
297 static void cxgb_set_rxmode(struct net_device *dev)
299 struct t3_rx_mode rm;
300 struct port_info *pi = netdev_priv(dev);
302 init_rx_mode(&rm, dev, dev->mc_list);
303 t3_mac_set_rx_mode(&pi->mac, &rm);
307 * link_start - enable a port
308 * @dev: the device to enable
310 * Performs the MAC and PHY actions needed to enable a port.
312 static void link_start(struct net_device *dev)
314 struct t3_rx_mode rm;
315 struct port_info *pi = netdev_priv(dev);
316 struct cmac *mac = &pi->mac;
318 init_rx_mode(&rm, dev, dev->mc_list);
320 t3_mac_set_mtu(mac, dev->mtu);
321 t3_mac_set_address(mac, 0, dev->dev_addr);
322 t3_mac_set_rx_mode(mac, &rm);
323 t3_link_start(&pi->phy, mac, &pi->link_config);
324 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
327 static inline void cxgb_disable_msi(struct adapter *adapter)
329 if (adapter->flags & USING_MSIX) {
330 pci_disable_msix(adapter->pdev);
331 adapter->flags &= ~USING_MSIX;
332 } else if (adapter->flags & USING_MSI) {
333 pci_disable_msi(adapter->pdev);
334 adapter->flags &= ~USING_MSI;
339 * Interrupt handler for asynchronous events used with MSI-X.
341 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
343 t3_slow_intr_handler(cookie);
348 * Name the MSI-X interrupts.
350 static void name_msix_vecs(struct adapter *adap)
352 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
354 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
355 adap->msix_info[0].desc[n] = 0;
357 for_each_port(adap, j) {
358 struct net_device *d = adap->port[j];
359 const struct port_info *pi = netdev_priv(d);
361 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
362 snprintf(adap->msix_info[msi_idx].desc, n,
363 "%s-%d", d->name, pi->first_qset + i);
364 adap->msix_info[msi_idx].desc[n] = 0;
369 static int request_msix_data_irqs(struct adapter *adap)
371 int i, j, err, qidx = 0;
373 for_each_port(adap, i) {
374 int nqsets = adap2pinfo(adap, i)->nqsets;
376 for (j = 0; j < nqsets; ++j) {
377 err = request_irq(adap->msix_info[qidx + 1].vec,
378 t3_intr_handler(adap,
381 adap->msix_info[qidx + 1].desc,
382 &adap->sge.qs[qidx]);
385 free_irq(adap->msix_info[qidx + 1].vec,
386 &adap->sge.qs[qidx]);
395 static void free_irq_resources(struct adapter *adapter)
397 if (adapter->flags & USING_MSIX) {
400 free_irq(adapter->msix_info[0].vec, adapter);
401 for_each_port(adapter, i)
402 n += adap2pinfo(adapter, i)->nqsets;
404 for (i = 0; i < n; ++i)
405 free_irq(adapter->msix_info[i + 1].vec,
406 &adapter->sge.qs[i]);
408 free_irq(adapter->pdev->irq, adapter);
411 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
416 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
424 static int init_tp_parity(struct adapter *adap)
428 struct cpl_set_tcb_field *greq;
429 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
431 t3_tp_set_offload_mode(adap, 1);
433 for (i = 0; i < 16; i++) {
434 struct cpl_smt_write_req *req;
436 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
438 skb = adap->nofail_skb;
442 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
443 memset(req, 0, sizeof(*req));
444 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
445 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
447 t3_mgmt_tx(adap, skb);
448 if (skb == adap->nofail_skb) {
449 await_mgmt_replies(adap, cnt, i + 1);
450 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
451 if (!adap->nofail_skb)
456 for (i = 0; i < 2048; i++) {
457 struct cpl_l2t_write_req *req;
459 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
461 skb = adap->nofail_skb;
465 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
466 memset(req, 0, sizeof(*req));
467 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
468 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
469 req->params = htonl(V_L2T_W_IDX(i));
470 t3_mgmt_tx(adap, skb);
471 if (skb == adap->nofail_skb) {
472 await_mgmt_replies(adap, cnt, 16 + i + 1);
473 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
474 if (!adap->nofail_skb)
479 for (i = 0; i < 2048; i++) {
480 struct cpl_rte_write_req *req;
482 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
484 skb = adap->nofail_skb;
488 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
489 memset(req, 0, sizeof(*req));
490 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
491 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
492 req->l2t_idx = htonl(V_L2T_W_IDX(i));
493 t3_mgmt_tx(adap, skb);
494 if (skb == adap->nofail_skb) {
495 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
496 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
497 if (!adap->nofail_skb)
502 skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
504 skb = adap->nofail_skb;
508 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
509 memset(greq, 0, sizeof(*greq));
510 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
511 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
512 greq->mask = cpu_to_be64(1);
513 t3_mgmt_tx(adap, skb);
515 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
516 if (skb == adap->nofail_skb) {
517 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
518 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
521 t3_tp_set_offload_mode(adap, 0);
525 t3_tp_set_offload_mode(adap, 0);
530 * setup_rss - configure RSS
533 * Sets up RSS to distribute packets to multiple receive queues. We
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for two ports since the mapping
538 * table has plenty of entries.
540 static void setup_rss(struct adapter *adap)
543 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
544 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
545 u8 cpus[SGE_QSETS + 1];
546 u16 rspq_map[RSS_TABLE_SIZE];
548 for (i = 0; i < SGE_QSETS; ++i)
550 cpus[SGE_QSETS] = 0xff; /* terminator */
552 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
553 rspq_map[i] = i % nq0;
554 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
557 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
558 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
559 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
562 static void init_napi(struct adapter *adap)
566 for (i = 0; i < SGE_QSETS; i++) {
567 struct sge_qset *qs = &adap->sge.qs[i];
570 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
575 * netif_napi_add() can be called only once per napi_struct because it
576 * adds each new napi_struct to a list. Be careful not to call it a
577 * second time, e.g., during EEH recovery, by making a note of it.
579 adap->flags |= NAPI_INIT;
583 * Wait until all NAPI handlers are descheduled. This includes the handlers of
584 * both netdevices representing interfaces and the dummy ones for the extra
587 static void quiesce_rx(struct adapter *adap)
591 for (i = 0; i < SGE_QSETS; i++)
592 if (adap->sge.qs[i].adap)
593 napi_disable(&adap->sge.qs[i].napi);
596 static void enable_all_napi(struct adapter *adap)
599 for (i = 0; i < SGE_QSETS; i++)
600 if (adap->sge.qs[i].adap)
601 napi_enable(&adap->sge.qs[i].napi);
605 * set_qset_lro - Turn a queue set's LRO capability on and off
606 * @dev: the device the qset is attached to
607 * @qset_idx: the queue set index
608 * @val: the LRO switch
610 * Sets LRO on or off for a particular queue set.
611 * the device's features flag is updated to reflect the LRO
612 * capability when all queues belonging to the device are
615 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
617 struct port_info *pi = netdev_priv(dev);
618 struct adapter *adapter = pi->adapter;
620 adapter->params.sge.qset[qset_idx].lro = !!val;
621 adapter->sge.qs[qset_idx].lro_enabled = !!val;
625 * setup_sge_qsets - configure SGE Tx/Rx/response queues
628 * Determines how many sets of SGE queues to use and initializes them.
629 * We support multiple queue sets per port if we have MSI-X, otherwise
630 * just one queue set per port.
632 static int setup_sge_qsets(struct adapter *adap)
634 int i, j, err, irq_idx = 0, qset_idx = 0;
635 unsigned int ntxq = SGE_TXQ_PER_SET;
637 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
640 for_each_port(adap, i) {
641 struct net_device *dev = adap->port[i];
642 struct port_info *pi = netdev_priv(dev);
644 pi->qs = &adap->sge.qs[pi->first_qset];
645 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
646 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
647 err = t3_sge_alloc_qset(adap, qset_idx, 1,
648 (adap->flags & USING_MSIX) ? qset_idx + 1 :
650 &adap->params.sge.qset[qset_idx], ntxq, dev,
651 netdev_get_tx_queue(dev, j));
653 t3_free_sge_resources(adap);
662 static ssize_t attr_show(struct device *d, char *buf,
663 ssize_t(*format) (struct net_device *, char *))
667 /* Synchronize with ioctls that may shut down the device */
669 len = (*format) (to_net_dev(d), buf);
674 static ssize_t attr_store(struct device *d,
675 const char *buf, size_t len,
676 ssize_t(*set) (struct net_device *, unsigned int),
677 unsigned int min_val, unsigned int max_val)
683 if (!capable(CAP_NET_ADMIN))
686 val = simple_strtoul(buf, &endp, 0);
687 if (endp == buf || val < min_val || val > max_val)
691 ret = (*set) (to_net_dev(d), val);
698 #define CXGB3_SHOW(name, val_expr) \
699 static ssize_t format_##name(struct net_device *dev, char *buf) \
701 struct port_info *pi = netdev_priv(dev); \
702 struct adapter *adap = pi->adapter; \
703 return sprintf(buf, "%u\n", val_expr); \
705 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
708 return attr_show(d, buf, format_##name); \
711 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
713 struct port_info *pi = netdev_priv(dev);
714 struct adapter *adap = pi->adapter;
715 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
717 if (adap->flags & FULL_INIT_DONE)
719 if (val && adap->params.rev == 0)
721 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
724 adap->params.mc5.nfilters = val;
728 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
729 const char *buf, size_t len)
731 return attr_store(d, buf, len, set_nfilters, 0, ~0);
734 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
736 struct port_info *pi = netdev_priv(dev);
737 struct adapter *adap = pi->adapter;
739 if (adap->flags & FULL_INIT_DONE)
741 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
744 adap->params.mc5.nservers = val;
748 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
749 const char *buf, size_t len)
751 return attr_store(d, buf, len, set_nservers, 0, ~0);
754 #define CXGB3_ATTR_R(name, val_expr) \
755 CXGB3_SHOW(name, val_expr) \
756 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
758 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
759 CXGB3_SHOW(name, val_expr) \
760 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
762 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
763 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
764 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
766 static struct attribute *cxgb3_attrs[] = {
767 &dev_attr_cam_size.attr,
768 &dev_attr_nfilters.attr,
769 &dev_attr_nservers.attr,
773 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
775 static ssize_t tm_attr_show(struct device *d,
776 char *buf, int sched)
778 struct port_info *pi = netdev_priv(to_net_dev(d));
779 struct adapter *adap = pi->adapter;
780 unsigned int v, addr, bpt, cpt;
783 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
785 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
786 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
789 bpt = (v >> 8) & 0xff;
792 len = sprintf(buf, "disabled\n");
794 v = (adap->params.vpd.cclk * 1000) / cpt;
795 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
801 static ssize_t tm_attr_store(struct device *d,
802 const char *buf, size_t len, int sched)
804 struct port_info *pi = netdev_priv(to_net_dev(d));
805 struct adapter *adap = pi->adapter;
810 if (!capable(CAP_NET_ADMIN))
813 val = simple_strtoul(buf, &endp, 0);
814 if (endp == buf || val > 10000000)
818 ret = t3_config_sched(adap, val, sched);
825 #define TM_ATTR(name, sched) \
826 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
829 return tm_attr_show(d, buf, sched); \
831 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
832 const char *buf, size_t len) \
834 return tm_attr_store(d, buf, len, sched); \
836 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
847 static struct attribute *offload_attrs[] = {
848 &dev_attr_sched0.attr,
849 &dev_attr_sched1.attr,
850 &dev_attr_sched2.attr,
851 &dev_attr_sched3.attr,
852 &dev_attr_sched4.attr,
853 &dev_attr_sched5.attr,
854 &dev_attr_sched6.attr,
855 &dev_attr_sched7.attr,
859 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
862 * Sends an sk_buff to an offload queue driver
863 * after dealing with any active network taps.
865 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
870 ret = t3_offload_tx(tdev, skb);
875 static int write_smt_entry(struct adapter *adapter, int idx)
877 struct cpl_smt_write_req *req;
878 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
883 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
884 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
885 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
886 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
888 memset(req->src_mac1, 0, sizeof(req->src_mac1));
889 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
891 offload_tx(&adapter->tdev, skb);
895 static int init_smt(struct adapter *adapter)
899 for_each_port(adapter, i)
900 write_smt_entry(adapter, i);
904 static void init_port_mtus(struct adapter *adapter)
906 unsigned int mtus = adapter->port[0]->mtu;
908 if (adapter->port[1])
909 mtus |= adapter->port[1]->mtu << 16;
910 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
913 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
917 struct mngt_pktsched_wr *req;
920 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
922 skb = adap->nofail_skb;
926 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
927 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
928 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
934 ret = t3_mgmt_tx(adap, skb);
935 if (skb == adap->nofail_skb) {
936 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
938 if (!adap->nofail_skb)
945 static int bind_qsets(struct adapter *adap)
949 for_each_port(adap, i) {
950 const struct port_info *pi = adap2pinfo(adap, i);
952 for (j = 0; j < pi->nqsets; ++j) {
953 int ret = send_pktsched_cmd(adap, 1,
954 pi->first_qset + j, -1,
964 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
965 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
967 static int upgrade_fw(struct adapter *adap)
971 const struct firmware *fw;
972 struct device *dev = &adap->pdev->dev;
974 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
975 FW_VERSION_MINOR, FW_VERSION_MICRO);
976 ret = request_firmware(&fw, buf, dev);
978 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
982 ret = t3_load_fw(adap, fw->data, fw->size);
983 release_firmware(fw);
986 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
987 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
989 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
990 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
995 static inline char t3rev2char(struct adapter *adapter)
999 switch(adapter->params.rev) {
1011 static int update_tpsram(struct adapter *adap)
1013 const struct firmware *tpsram;
1015 struct device *dev = &adap->pdev->dev;
1019 rev = t3rev2char(adap);
1023 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1024 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1026 ret = request_firmware(&tpsram, buf, dev);
1028 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1033 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1035 goto release_tpsram;
1037 ret = t3_set_proto_sram(adap, tpsram->data);
1040 "successful update of protocol engine "
1042 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1044 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1045 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1047 dev_err(dev, "loading protocol SRAM failed\n");
1050 release_firmware(tpsram);
1056 * cxgb_up - enable the adapter
1057 * @adapter: adapter being enabled
1059 * Called when the first port is enabled, this function performs the
1060 * actions necessary to make an adapter operational, such as completing
1061 * the initialization of HW modules, and enabling interrupts.
1063 * Must be called with the rtnl lock held.
1065 static int cxgb_up(struct adapter *adap)
1069 if (!(adap->flags & FULL_INIT_DONE)) {
1070 err = t3_check_fw_version(adap);
1071 if (err == -EINVAL) {
1072 err = upgrade_fw(adap);
1073 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1074 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1075 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1078 err = t3_check_tpsram_version(adap);
1079 if (err == -EINVAL) {
1080 err = update_tpsram(adap);
1081 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1082 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1083 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1087 * Clear interrupts now to catch errors if t3_init_hw fails.
1088 * We clear them again later as initialization may trigger
1089 * conditions that can interrupt.
1091 t3_intr_clear(adap);
1093 err = t3_init_hw(adap, 0);
1097 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1098 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1100 err = setup_sge_qsets(adap);
1105 if (!(adap->flags & NAPI_INIT))
1108 t3_start_sge_timers(adap);
1109 adap->flags |= FULL_INIT_DONE;
1112 t3_intr_clear(adap);
1114 if (adap->flags & USING_MSIX) {
1115 name_msix_vecs(adap);
1116 err = request_irq(adap->msix_info[0].vec,
1117 t3_async_intr_handler, 0,
1118 adap->msix_info[0].desc, adap);
1122 err = request_msix_data_irqs(adap);
1124 free_irq(adap->msix_info[0].vec, adap);
1127 } else if ((err = request_irq(adap->pdev->irq,
1128 t3_intr_handler(adap,
1129 adap->sge.qs[0].rspq.
1131 (adap->flags & USING_MSI) ?
1136 enable_all_napi(adap);
1138 t3_intr_enable(adap);
1140 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1141 is_offload(adap) && init_tp_parity(adap) == 0)
1142 adap->flags |= TP_PARITY_INIT;
1144 if (adap->flags & TP_PARITY_INIT) {
1145 t3_write_reg(adap, A_TP_INT_CAUSE,
1146 F_CMCACHEPERR | F_ARPLUTPERR);
1147 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1150 if (!(adap->flags & QUEUES_BOUND)) {
1151 err = bind_qsets(adap);
1153 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1154 t3_intr_disable(adap);
1155 free_irq_resources(adap);
1158 adap->flags |= QUEUES_BOUND;
1164 CH_ERR(adap, "request_irq failed, err %d\n", err);
1169 * Release resources when all the ports and offloading have been stopped.
1171 static void cxgb_down(struct adapter *adapter)
1173 t3_sge_stop(adapter);
1174 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1175 t3_intr_disable(adapter);
1176 spin_unlock_irq(&adapter->work_lock);
1178 free_irq_resources(adapter);
1179 quiesce_rx(adapter);
1180 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1183 static void schedule_chk_task(struct adapter *adap)
1187 timeo = adap->params.linkpoll_period ?
1188 (HZ * adap->params.linkpoll_period) / 10 :
1189 adap->params.stats_update_period * HZ;
1191 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1194 static int offload_open(struct net_device *dev)
1196 struct port_info *pi = netdev_priv(dev);
1197 struct adapter *adapter = pi->adapter;
1198 struct t3cdev *tdev = dev2t3cdev(dev);
1199 int adap_up = adapter->open_device_map & PORT_MASK;
1202 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1205 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1208 t3_tp_set_offload_mode(adapter, 1);
1209 tdev->lldev = adapter->port[0];
1210 err = cxgb3_offload_activate(adapter);
1214 init_port_mtus(adapter);
1215 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1216 adapter->params.b_wnd,
1217 adapter->params.rev == 0 ?
1218 adapter->port[0]->mtu : 0xffff);
1221 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1222 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1224 /* Call back all registered clients */
1225 cxgb3_add_clients(tdev);
1228 /* restore them in case the offload module has changed them */
1230 t3_tp_set_offload_mode(adapter, 0);
1231 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1232 cxgb3_set_dummy_ops(tdev);
1237 static int offload_close(struct t3cdev *tdev)
1239 struct adapter *adapter = tdev2adap(tdev);
1241 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1244 /* Call back all registered clients */
1245 cxgb3_remove_clients(tdev);
1247 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1249 /* Flush work scheduled while releasing TIDs */
1250 flush_scheduled_work();
1253 cxgb3_set_dummy_ops(tdev);
1254 t3_tp_set_offload_mode(adapter, 0);
1255 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1257 if (!adapter->open_device_map)
1260 cxgb3_offload_deactivate(adapter);
1264 static int cxgb_open(struct net_device *dev)
1266 struct port_info *pi = netdev_priv(dev);
1267 struct adapter *adapter = pi->adapter;
1268 int other_ports = adapter->open_device_map & PORT_MASK;
1271 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1274 set_bit(pi->port_id, &adapter->open_device_map);
1275 if (is_offload(adapter) && !ofld_disable) {
1276 err = offload_open(dev);
1279 "Could not initialize offload capabilities\n");
1282 dev->real_num_tx_queues = pi->nqsets;
1284 t3_port_intr_enable(adapter, pi->port_id);
1285 netif_tx_start_all_queues(dev);
1287 schedule_chk_task(adapter);
1292 static int cxgb_close(struct net_device *dev)
1294 struct port_info *pi = netdev_priv(dev);
1295 struct adapter *adapter = pi->adapter;
1298 if (!adapter->open_device_map)
1301 /* Stop link fault interrupts */
1302 t3_xgm_intr_disable(adapter, pi->port_id);
1303 t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1305 t3_port_intr_disable(adapter, pi->port_id);
1306 netif_tx_stop_all_queues(dev);
1307 pi->phy.ops->power_down(&pi->phy, 1);
1308 netif_carrier_off(dev);
1309 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1311 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1312 clear_bit(pi->port_id, &adapter->open_device_map);
1313 spin_unlock_irq(&adapter->work_lock);
1315 if (!(adapter->open_device_map & PORT_MASK))
1316 cancel_delayed_work_sync(&adapter->adap_check_task);
1318 if (!adapter->open_device_map)
1324 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1326 struct port_info *pi = netdev_priv(dev);
1327 struct adapter *adapter = pi->adapter;
1328 struct net_device_stats *ns = &pi->netstats;
1329 const struct mac_stats *pstats;
1331 spin_lock(&adapter->stats_lock);
1332 pstats = t3_mac_update_stats(&pi->mac);
1333 spin_unlock(&adapter->stats_lock);
1335 ns->tx_bytes = pstats->tx_octets;
1336 ns->tx_packets = pstats->tx_frames;
1337 ns->rx_bytes = pstats->rx_octets;
1338 ns->rx_packets = pstats->rx_frames;
1339 ns->multicast = pstats->rx_mcast_frames;
1341 ns->tx_errors = pstats->tx_underrun;
1342 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1343 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1344 pstats->rx_fifo_ovfl;
1346 /* detailed rx_errors */
1347 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1348 ns->rx_over_errors = 0;
1349 ns->rx_crc_errors = pstats->rx_fcs_errs;
1350 ns->rx_frame_errors = pstats->rx_symbol_errs;
1351 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1352 ns->rx_missed_errors = pstats->rx_cong_drops;
1354 /* detailed tx_errors */
1355 ns->tx_aborted_errors = 0;
1356 ns->tx_carrier_errors = 0;
1357 ns->tx_fifo_errors = pstats->tx_underrun;
1358 ns->tx_heartbeat_errors = 0;
1359 ns->tx_window_errors = 0;
1363 static u32 get_msglevel(struct net_device *dev)
1365 struct port_info *pi = netdev_priv(dev);
1366 struct adapter *adapter = pi->adapter;
1368 return adapter->msg_enable;
1371 static void set_msglevel(struct net_device *dev, u32 val)
1373 struct port_info *pi = netdev_priv(dev);
1374 struct adapter *adapter = pi->adapter;
1376 adapter->msg_enable = val;
1379 static char stats_strings[][ETH_GSTRING_LEN] = {
1382 "TxMulticastFramesOK",
1383 "TxBroadcastFramesOK",
1390 "TxFrames128To255 ",
1391 "TxFrames256To511 ",
1392 "TxFrames512To1023 ",
1393 "TxFrames1024To1518 ",
1394 "TxFrames1519ToMax ",
1398 "RxMulticastFramesOK",
1399 "RxBroadcastFramesOK",
1410 "RxFrames128To255 ",
1411 "RxFrames256To511 ",
1412 "RxFrames512To1023 ",
1413 "RxFrames1024To1518 ",
1414 "RxFrames1519ToMax ",
1427 "CheckTXEnToggled ",
1433 static int get_sset_count(struct net_device *dev, int sset)
1437 return ARRAY_SIZE(stats_strings);
1443 #define T3_REGMAP_SIZE (3 * 1024)
1445 static int get_regs_len(struct net_device *dev)
1447 return T3_REGMAP_SIZE;
1450 static int get_eeprom_len(struct net_device *dev)
1455 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1457 struct port_info *pi = netdev_priv(dev);
1458 struct adapter *adapter = pi->adapter;
1462 spin_lock(&adapter->stats_lock);
1463 t3_get_fw_version(adapter, &fw_vers);
1464 t3_get_tp_version(adapter, &tp_vers);
1465 spin_unlock(&adapter->stats_lock);
1467 strcpy(info->driver, DRV_NAME);
1468 strcpy(info->version, DRV_VERSION);
1469 strcpy(info->bus_info, pci_name(adapter->pdev));
1471 strcpy(info->fw_version, "N/A");
1473 snprintf(info->fw_version, sizeof(info->fw_version),
1474 "%s %u.%u.%u TP %u.%u.%u",
1475 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1476 G_FW_VERSION_MAJOR(fw_vers),
1477 G_FW_VERSION_MINOR(fw_vers),
1478 G_FW_VERSION_MICRO(fw_vers),
1479 G_TP_VERSION_MAJOR(tp_vers),
1480 G_TP_VERSION_MINOR(tp_vers),
1481 G_TP_VERSION_MICRO(tp_vers));
1485 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1487 if (stringset == ETH_SS_STATS)
1488 memcpy(data, stats_strings, sizeof(stats_strings));
1491 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1492 struct port_info *p, int idx)
1495 unsigned long tot = 0;
1497 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1498 tot += adapter->sge.qs[i].port_stats[idx];
1502 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1505 struct port_info *pi = netdev_priv(dev);
1506 struct adapter *adapter = pi->adapter;
1507 const struct mac_stats *s;
1509 spin_lock(&adapter->stats_lock);
1510 s = t3_mac_update_stats(&pi->mac);
1511 spin_unlock(&adapter->stats_lock);
1513 *data++ = s->tx_octets;
1514 *data++ = s->tx_frames;
1515 *data++ = s->tx_mcast_frames;
1516 *data++ = s->tx_bcast_frames;
1517 *data++ = s->tx_pause;
1518 *data++ = s->tx_underrun;
1519 *data++ = s->tx_fifo_urun;
1521 *data++ = s->tx_frames_64;
1522 *data++ = s->tx_frames_65_127;
1523 *data++ = s->tx_frames_128_255;
1524 *data++ = s->tx_frames_256_511;
1525 *data++ = s->tx_frames_512_1023;
1526 *data++ = s->tx_frames_1024_1518;
1527 *data++ = s->tx_frames_1519_max;
1529 *data++ = s->rx_octets;
1530 *data++ = s->rx_frames;
1531 *data++ = s->rx_mcast_frames;
1532 *data++ = s->rx_bcast_frames;
1533 *data++ = s->rx_pause;
1534 *data++ = s->rx_fcs_errs;
1535 *data++ = s->rx_symbol_errs;
1536 *data++ = s->rx_short;
1537 *data++ = s->rx_jabber;
1538 *data++ = s->rx_too_long;
1539 *data++ = s->rx_fifo_ovfl;
1541 *data++ = s->rx_frames_64;
1542 *data++ = s->rx_frames_65_127;
1543 *data++ = s->rx_frames_128_255;
1544 *data++ = s->rx_frames_256_511;
1545 *data++ = s->rx_frames_512_1023;
1546 *data++ = s->rx_frames_1024_1518;
1547 *data++ = s->rx_frames_1519_max;
1549 *data++ = pi->phy.fifo_errors;
1551 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1552 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1553 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1554 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1555 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1559 *data++ = s->rx_cong_drops;
1561 *data++ = s->num_toggled;
1562 *data++ = s->num_resets;
1564 *data++ = s->link_faults;
1567 static inline void reg_block_dump(struct adapter *ap, void *buf,
1568 unsigned int start, unsigned int end)
1570 u32 *p = buf + start;
1572 for (; start <= end; start += sizeof(u32))
1573 *p++ = t3_read_reg(ap, start);
1576 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1579 struct port_info *pi = netdev_priv(dev);
1580 struct adapter *ap = pi->adapter;
1584 * bits 0..9: chip version
1585 * bits 10..15: chip revision
1586 * bit 31: set for PCIe cards
1588 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1591 * We skip the MAC statistics registers because they are clear-on-read.
1592 * Also reading multi-register stats would need to synchronize with the
1593 * periodic mac stats accumulation. Hard to justify the complexity.
1595 memset(buf, 0, T3_REGMAP_SIZE);
1596 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1597 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1598 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1599 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1600 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1601 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1602 XGM_REG(A_XGM_SERDES_STAT3, 1));
1603 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1604 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1607 static int restart_autoneg(struct net_device *dev)
1609 struct port_info *p = netdev_priv(dev);
1611 if (!netif_running(dev))
1613 if (p->link_config.autoneg != AUTONEG_ENABLE)
1615 p->phy.ops->autoneg_restart(&p->phy);
1619 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1621 struct port_info *pi = netdev_priv(dev);
1622 struct adapter *adapter = pi->adapter;
1628 for (i = 0; i < data * 2; i++) {
1629 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1630 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1631 if (msleep_interruptible(500))
1634 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1639 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1641 struct port_info *p = netdev_priv(dev);
1643 cmd->supported = p->link_config.supported;
1644 cmd->advertising = p->link_config.advertising;
1646 if (netif_carrier_ok(dev)) {
1647 cmd->speed = p->link_config.speed;
1648 cmd->duplex = p->link_config.duplex;
1654 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1655 cmd->phy_address = p->phy.mdio.prtad;
1656 cmd->transceiver = XCVR_EXTERNAL;
1657 cmd->autoneg = p->link_config.autoneg;
1663 static int speed_duplex_to_caps(int speed, int duplex)
1669 if (duplex == DUPLEX_FULL)
1670 cap = SUPPORTED_10baseT_Full;
1672 cap = SUPPORTED_10baseT_Half;
1675 if (duplex == DUPLEX_FULL)
1676 cap = SUPPORTED_100baseT_Full;
1678 cap = SUPPORTED_100baseT_Half;
1681 if (duplex == DUPLEX_FULL)
1682 cap = SUPPORTED_1000baseT_Full;
1684 cap = SUPPORTED_1000baseT_Half;
1687 if (duplex == DUPLEX_FULL)
1688 cap = SUPPORTED_10000baseT_Full;
1693 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1694 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1695 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1696 ADVERTISED_10000baseT_Full)
1698 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1700 struct port_info *p = netdev_priv(dev);
1701 struct link_config *lc = &p->link_config;
1703 if (!(lc->supported & SUPPORTED_Autoneg)) {
1705 * PHY offers a single speed/duplex. See if that's what's
1708 if (cmd->autoneg == AUTONEG_DISABLE) {
1709 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1710 if (lc->supported & cap)
1716 if (cmd->autoneg == AUTONEG_DISABLE) {
1717 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1719 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1721 lc->requested_speed = cmd->speed;
1722 lc->requested_duplex = cmd->duplex;
1723 lc->advertising = 0;
1725 cmd->advertising &= ADVERTISED_MASK;
1726 cmd->advertising &= lc->supported;
1727 if (!cmd->advertising)
1729 lc->requested_speed = SPEED_INVALID;
1730 lc->requested_duplex = DUPLEX_INVALID;
1731 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1733 lc->autoneg = cmd->autoneg;
1734 if (netif_running(dev))
1735 t3_link_start(&p->phy, &p->mac, lc);
1739 static void get_pauseparam(struct net_device *dev,
1740 struct ethtool_pauseparam *epause)
1742 struct port_info *p = netdev_priv(dev);
1744 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1745 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1746 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1749 static int set_pauseparam(struct net_device *dev,
1750 struct ethtool_pauseparam *epause)
1752 struct port_info *p = netdev_priv(dev);
1753 struct link_config *lc = &p->link_config;
1755 if (epause->autoneg == AUTONEG_DISABLE)
1756 lc->requested_fc = 0;
1757 else if (lc->supported & SUPPORTED_Autoneg)
1758 lc->requested_fc = PAUSE_AUTONEG;
1762 if (epause->rx_pause)
1763 lc->requested_fc |= PAUSE_RX;
1764 if (epause->tx_pause)
1765 lc->requested_fc |= PAUSE_TX;
1766 if (lc->autoneg == AUTONEG_ENABLE) {
1767 if (netif_running(dev))
1768 t3_link_start(&p->phy, &p->mac, lc);
1770 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1771 if (netif_running(dev))
1772 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1777 static u32 get_rx_csum(struct net_device *dev)
1779 struct port_info *p = netdev_priv(dev);
1781 return p->rx_offload & T3_RX_CSUM;
1784 static int set_rx_csum(struct net_device *dev, u32 data)
1786 struct port_info *p = netdev_priv(dev);
1789 p->rx_offload |= T3_RX_CSUM;
1793 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1794 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1795 set_qset_lro(dev, i, 0);
1800 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1802 struct port_info *pi = netdev_priv(dev);
1803 struct adapter *adapter = pi->adapter;
1804 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1806 e->rx_max_pending = MAX_RX_BUFFERS;
1807 e->rx_mini_max_pending = 0;
1808 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1809 e->tx_max_pending = MAX_TXQ_ENTRIES;
1811 e->rx_pending = q->fl_size;
1812 e->rx_mini_pending = q->rspq_size;
1813 e->rx_jumbo_pending = q->jumbo_size;
1814 e->tx_pending = q->txq_size[0];
1817 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1819 struct port_info *pi = netdev_priv(dev);
1820 struct adapter *adapter = pi->adapter;
1821 struct qset_params *q;
1824 if (e->rx_pending > MAX_RX_BUFFERS ||
1825 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1826 e->tx_pending > MAX_TXQ_ENTRIES ||
1827 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1828 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1829 e->rx_pending < MIN_FL_ENTRIES ||
1830 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1831 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1834 if (adapter->flags & FULL_INIT_DONE)
1837 q = &adapter->params.sge.qset[pi->first_qset];
1838 for (i = 0; i < pi->nqsets; ++i, ++q) {
1839 q->rspq_size = e->rx_mini_pending;
1840 q->fl_size = e->rx_pending;
1841 q->jumbo_size = e->rx_jumbo_pending;
1842 q->txq_size[0] = e->tx_pending;
1843 q->txq_size[1] = e->tx_pending;
1844 q->txq_size[2] = e->tx_pending;
1849 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1851 struct port_info *pi = netdev_priv(dev);
1852 struct adapter *adapter = pi->adapter;
1853 struct qset_params *qsp = &adapter->params.sge.qset[0];
1854 struct sge_qset *qs = &adapter->sge.qs[0];
1856 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1859 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1860 t3_update_qset_coalesce(qs, qsp);
1864 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1866 struct port_info *pi = netdev_priv(dev);
1867 struct adapter *adapter = pi->adapter;
1868 struct qset_params *q = adapter->params.sge.qset;
1870 c->rx_coalesce_usecs = q->coalesce_usecs;
1874 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1877 struct port_info *pi = netdev_priv(dev);
1878 struct adapter *adapter = pi->adapter;
1881 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1885 e->magic = EEPROM_MAGIC;
1886 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1887 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1890 memcpy(data, buf + e->offset, e->len);
1895 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1898 struct port_info *pi = netdev_priv(dev);
1899 struct adapter *adapter = pi->adapter;
1900 u32 aligned_offset, aligned_len;
1905 if (eeprom->magic != EEPROM_MAGIC)
1908 aligned_offset = eeprom->offset & ~3;
1909 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1911 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1912 buf = kmalloc(aligned_len, GFP_KERNEL);
1915 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1916 if (!err && aligned_len > 4)
1917 err = t3_seeprom_read(adapter,
1918 aligned_offset + aligned_len - 4,
1919 (__le32 *) & buf[aligned_len - 4]);
1922 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1926 err = t3_seeprom_wp(adapter, 0);
1930 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1931 err = t3_seeprom_write(adapter, aligned_offset, *p);
1932 aligned_offset += 4;
1936 err = t3_seeprom_wp(adapter, 1);
1943 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1947 memset(&wol->sopass, 0, sizeof(wol->sopass));
1950 static const struct ethtool_ops cxgb_ethtool_ops = {
1951 .get_settings = get_settings,
1952 .set_settings = set_settings,
1953 .get_drvinfo = get_drvinfo,
1954 .get_msglevel = get_msglevel,
1955 .set_msglevel = set_msglevel,
1956 .get_ringparam = get_sge_param,
1957 .set_ringparam = set_sge_param,
1958 .get_coalesce = get_coalesce,
1959 .set_coalesce = set_coalesce,
1960 .get_eeprom_len = get_eeprom_len,
1961 .get_eeprom = get_eeprom,
1962 .set_eeprom = set_eeprom,
1963 .get_pauseparam = get_pauseparam,
1964 .set_pauseparam = set_pauseparam,
1965 .get_rx_csum = get_rx_csum,
1966 .set_rx_csum = set_rx_csum,
1967 .set_tx_csum = ethtool_op_set_tx_csum,
1968 .set_sg = ethtool_op_set_sg,
1969 .get_link = ethtool_op_get_link,
1970 .get_strings = get_strings,
1971 .phys_id = cxgb3_phys_id,
1972 .nway_reset = restart_autoneg,
1973 .get_sset_count = get_sset_count,
1974 .get_ethtool_stats = get_stats,
1975 .get_regs_len = get_regs_len,
1976 .get_regs = get_regs,
1978 .set_tso = ethtool_op_set_tso,
1981 static int in_range(int val, int lo, int hi)
1983 return val < 0 || (val <= hi && val >= lo);
1986 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1988 struct port_info *pi = netdev_priv(dev);
1989 struct adapter *adapter = pi->adapter;
1993 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1997 case CHELSIO_SET_QSET_PARAMS:{
1999 struct qset_params *q;
2000 struct ch_qset_params t;
2001 int q1 = pi->first_qset;
2002 int nqsets = pi->nqsets;
2004 if (!capable(CAP_NET_ADMIN))
2006 if (copy_from_user(&t, useraddr, sizeof(t)))
2008 if (t.qset_idx >= SGE_QSETS)
2010 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2011 !in_range(t.cong_thres, 0, 255) ||
2012 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2014 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2016 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2017 MAX_CTRL_TXQ_ENTRIES) ||
2018 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2020 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2021 MAX_RX_JUMBO_BUFFERS)
2022 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2026 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2027 for_each_port(adapter, i) {
2028 pi = adap2pinfo(adapter, i);
2029 if (t.qset_idx >= pi->first_qset &&
2030 t.qset_idx < pi->first_qset + pi->nqsets &&
2031 !(pi->rx_offload & T3_RX_CSUM))
2035 if ((adapter->flags & FULL_INIT_DONE) &&
2036 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2037 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2038 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2039 t.polling >= 0 || t.cong_thres >= 0))
2042 /* Allow setting of any available qset when offload enabled */
2043 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2045 for_each_port(adapter, i) {
2046 pi = adap2pinfo(adapter, i);
2047 nqsets += pi->first_qset + pi->nqsets;
2051 if (t.qset_idx < q1)
2053 if (t.qset_idx > q1 + nqsets - 1)
2056 q = &adapter->params.sge.qset[t.qset_idx];
2058 if (t.rspq_size >= 0)
2059 q->rspq_size = t.rspq_size;
2060 if (t.fl_size[0] >= 0)
2061 q->fl_size = t.fl_size[0];
2062 if (t.fl_size[1] >= 0)
2063 q->jumbo_size = t.fl_size[1];
2064 if (t.txq_size[0] >= 0)
2065 q->txq_size[0] = t.txq_size[0];
2066 if (t.txq_size[1] >= 0)
2067 q->txq_size[1] = t.txq_size[1];
2068 if (t.txq_size[2] >= 0)
2069 q->txq_size[2] = t.txq_size[2];
2070 if (t.cong_thres >= 0)
2071 q->cong_thres = t.cong_thres;
2072 if (t.intr_lat >= 0) {
2073 struct sge_qset *qs =
2074 &adapter->sge.qs[t.qset_idx];
2076 q->coalesce_usecs = t.intr_lat;
2077 t3_update_qset_coalesce(qs, q);
2079 if (t.polling >= 0) {
2080 if (adapter->flags & USING_MSIX)
2081 q->polling = t.polling;
2083 /* No polling with INTx for T3A */
2084 if (adapter->params.rev == 0 &&
2085 !(adapter->flags & USING_MSI))
2088 for (i = 0; i < SGE_QSETS; i++) {
2089 q = &adapter->params.sge.
2091 q->polling = t.polling;
2096 set_qset_lro(dev, t.qset_idx, t.lro);
2100 case CHELSIO_GET_QSET_PARAMS:{
2101 struct qset_params *q;
2102 struct ch_qset_params t;
2103 int q1 = pi->first_qset;
2104 int nqsets = pi->nqsets;
2107 if (copy_from_user(&t, useraddr, sizeof(t)))
2110 /* Display qsets for all ports when offload enabled */
2111 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2113 for_each_port(adapter, i) {
2114 pi = adap2pinfo(adapter, i);
2115 nqsets = pi->first_qset + pi->nqsets;
2119 if (t.qset_idx >= nqsets)
2122 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2123 t.rspq_size = q->rspq_size;
2124 t.txq_size[0] = q->txq_size[0];
2125 t.txq_size[1] = q->txq_size[1];
2126 t.txq_size[2] = q->txq_size[2];
2127 t.fl_size[0] = q->fl_size;
2128 t.fl_size[1] = q->jumbo_size;
2129 t.polling = q->polling;
2131 t.intr_lat = q->coalesce_usecs;
2132 t.cong_thres = q->cong_thres;
2135 if (adapter->flags & USING_MSIX)
2136 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2138 t.vector = adapter->pdev->irq;
2140 if (copy_to_user(useraddr, &t, sizeof(t)))
2144 case CHELSIO_SET_QSET_NUM:{
2145 struct ch_reg edata;
2146 unsigned int i, first_qset = 0, other_qsets = 0;
2148 if (!capable(CAP_NET_ADMIN))
2150 if (adapter->flags & FULL_INIT_DONE)
2152 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2154 if (edata.val < 1 ||
2155 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2158 for_each_port(adapter, i)
2159 if (adapter->port[i] && adapter->port[i] != dev)
2160 other_qsets += adap2pinfo(adapter, i)->nqsets;
2162 if (edata.val + other_qsets > SGE_QSETS)
2165 pi->nqsets = edata.val;
2167 for_each_port(adapter, i)
2168 if (adapter->port[i]) {
2169 pi = adap2pinfo(adapter, i);
2170 pi->first_qset = first_qset;
2171 first_qset += pi->nqsets;
2175 case CHELSIO_GET_QSET_NUM:{
2176 struct ch_reg edata;
2178 edata.cmd = CHELSIO_GET_QSET_NUM;
2179 edata.val = pi->nqsets;
2180 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2184 case CHELSIO_LOAD_FW:{
2186 struct ch_mem_range t;
2188 if (!capable(CAP_SYS_RAWIO))
2190 if (copy_from_user(&t, useraddr, sizeof(t)))
2192 /* Check t.len sanity ? */
2193 fw_data = kmalloc(t.len, GFP_KERNEL);
2198 (fw_data, useraddr + sizeof(t), t.len)) {
2203 ret = t3_load_fw(adapter, fw_data, t.len);
2209 case CHELSIO_SETMTUTAB:{
2213 if (!is_offload(adapter))
2215 if (!capable(CAP_NET_ADMIN))
2217 if (offload_running(adapter))
2219 if (copy_from_user(&m, useraddr, sizeof(m)))
2221 if (m.nmtus != NMTUS)
2223 if (m.mtus[0] < 81) /* accommodate SACK */
2226 /* MTUs must be in ascending order */
2227 for (i = 1; i < NMTUS; ++i)
2228 if (m.mtus[i] < m.mtus[i - 1])
2231 memcpy(adapter->params.mtus, m.mtus,
2232 sizeof(adapter->params.mtus));
2235 case CHELSIO_GET_PM:{
2236 struct tp_params *p = &adapter->params.tp;
2237 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2239 if (!is_offload(adapter))
2241 m.tx_pg_sz = p->tx_pg_size;
2242 m.tx_num_pg = p->tx_num_pgs;
2243 m.rx_pg_sz = p->rx_pg_size;
2244 m.rx_num_pg = p->rx_num_pgs;
2245 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2246 if (copy_to_user(useraddr, &m, sizeof(m)))
2250 case CHELSIO_SET_PM:{
2252 struct tp_params *p = &adapter->params.tp;
2254 if (!is_offload(adapter))
2256 if (!capable(CAP_NET_ADMIN))
2258 if (adapter->flags & FULL_INIT_DONE)
2260 if (copy_from_user(&m, useraddr, sizeof(m)))
2262 if (!is_power_of_2(m.rx_pg_sz) ||
2263 !is_power_of_2(m.tx_pg_sz))
2264 return -EINVAL; /* not power of 2 */
2265 if (!(m.rx_pg_sz & 0x14000))
2266 return -EINVAL; /* not 16KB or 64KB */
2267 if (!(m.tx_pg_sz & 0x1554000))
2269 if (m.tx_num_pg == -1)
2270 m.tx_num_pg = p->tx_num_pgs;
2271 if (m.rx_num_pg == -1)
2272 m.rx_num_pg = p->rx_num_pgs;
2273 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2275 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2276 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2278 p->rx_pg_size = m.rx_pg_sz;
2279 p->tx_pg_size = m.tx_pg_sz;
2280 p->rx_num_pgs = m.rx_num_pg;
2281 p->tx_num_pgs = m.tx_num_pg;
2284 case CHELSIO_GET_MEM:{
2285 struct ch_mem_range t;
2289 if (!is_offload(adapter))
2291 if (!(adapter->flags & FULL_INIT_DONE))
2292 return -EIO; /* need the memory controllers */
2293 if (copy_from_user(&t, useraddr, sizeof(t)))
2295 if ((t.addr & 7) || (t.len & 7))
2297 if (t.mem_id == MEM_CM)
2299 else if (t.mem_id == MEM_PMRX)
2300 mem = &adapter->pmrx;
2301 else if (t.mem_id == MEM_PMTX)
2302 mem = &adapter->pmtx;
2308 * bits 0..9: chip version
2309 * bits 10..15: chip revision
2311 t.version = 3 | (adapter->params.rev << 10);
2312 if (copy_to_user(useraddr, &t, sizeof(t)))
2316 * Read 256 bytes at a time as len can be large and we don't
2317 * want to use huge intermediate buffers.
2319 useraddr += sizeof(t); /* advance to start of buffer */
2321 unsigned int chunk =
2322 min_t(unsigned int, t.len, sizeof(buf));
2325 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2329 if (copy_to_user(useraddr, buf, chunk))
2337 case CHELSIO_SET_TRACE_FILTER:{
2339 const struct trace_params *tp;
2341 if (!capable(CAP_NET_ADMIN))
2343 if (!offload_running(adapter))
2345 if (copy_from_user(&t, useraddr, sizeof(t)))
2348 tp = (const struct trace_params *)&t.sip;
2350 t3_config_trace_filter(adapter, tp, 0,
2354 t3_config_trace_filter(adapter, tp, 1,
2365 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2367 struct mii_ioctl_data *data = if_mii(req);
2368 struct port_info *pi = netdev_priv(dev);
2369 struct adapter *adapter = pi->adapter;
2374 /* Convert phy_id from older PRTAD/DEVAD format */
2375 if (is_10G(adapter) &&
2376 !mdio_phy_id_is_c45(data->phy_id) &&
2377 (data->phy_id & 0x1f00) &&
2378 !(data->phy_id & 0xe0e0))
2379 data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2380 data->phy_id & 0x1f);
2383 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2385 return cxgb_extension_ioctl(dev, req->ifr_data);
2391 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2393 struct port_info *pi = netdev_priv(dev);
2394 struct adapter *adapter = pi->adapter;
2397 if (new_mtu < 81) /* accommodate SACK */
2399 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2402 init_port_mtus(adapter);
2403 if (adapter->params.rev == 0 && offload_running(adapter))
2404 t3_load_mtus(adapter, adapter->params.mtus,
2405 adapter->params.a_wnd, adapter->params.b_wnd,
2406 adapter->port[0]->mtu);
2410 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2412 struct port_info *pi = netdev_priv(dev);
2413 struct adapter *adapter = pi->adapter;
2414 struct sockaddr *addr = p;
2416 if (!is_valid_ether_addr(addr->sa_data))
2419 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2420 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2421 if (offload_running(adapter))
2422 write_smt_entry(adapter, pi->port_id);
2427 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2428 * @adap: the adapter
2431 * Ensures that current Rx processing on any of the queues associated with
2432 * the given port completes before returning. We do this by acquiring and
2433 * releasing the locks of the response queues associated with the port.
2435 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2439 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2440 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2442 spin_lock_irq(&q->lock);
2443 spin_unlock_irq(&q->lock);
2447 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2449 struct port_info *pi = netdev_priv(dev);
2450 struct adapter *adapter = pi->adapter;
2453 if (adapter->params.rev > 0)
2454 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2456 /* single control for all ports */
2457 unsigned int i, have_vlans = 0;
2458 for_each_port(adapter, i)
2459 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2461 t3_set_vlan_accel(adapter, 1, have_vlans);
2463 t3_synchronize_rx(adapter, pi);
2466 #ifdef CONFIG_NET_POLL_CONTROLLER
2467 static void cxgb_netpoll(struct net_device *dev)
2469 struct port_info *pi = netdev_priv(dev);
2470 struct adapter *adapter = pi->adapter;
2473 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2474 struct sge_qset *qs = &adapter->sge.qs[qidx];
2477 if (adapter->flags & USING_MSIX)
2482 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2488 * Periodic accumulation of MAC statistics.
2490 static void mac_stats_update(struct adapter *adapter)
2494 for_each_port(adapter, i) {
2495 struct net_device *dev = adapter->port[i];
2496 struct port_info *p = netdev_priv(dev);
2498 if (netif_running(dev)) {
2499 spin_lock(&adapter->stats_lock);
2500 t3_mac_update_stats(&p->mac);
2501 spin_unlock(&adapter->stats_lock);
2506 static void check_link_status(struct adapter *adapter)
2510 for_each_port(adapter, i) {
2511 struct net_device *dev = adapter->port[i];
2512 struct port_info *p = netdev_priv(dev);
2515 spin_lock_irq(&adapter->work_lock);
2516 link_fault = p->link_fault;
2517 spin_unlock_irq(&adapter->work_lock);
2520 t3_link_fault(adapter, i);
2524 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2525 t3_xgm_intr_disable(adapter, i);
2526 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2528 t3_link_changed(adapter, i);
2529 t3_xgm_intr_enable(adapter, i);
2534 static void check_t3b2_mac(struct adapter *adapter)
2538 if (!rtnl_trylock()) /* synchronize with ifdown */
2541 for_each_port(adapter, i) {
2542 struct net_device *dev = adapter->port[i];
2543 struct port_info *p = netdev_priv(dev);
2546 if (!netif_running(dev))
2550 if (netif_running(dev) && netif_carrier_ok(dev))
2551 status = t3b2_mac_watchdog_task(&p->mac);
2553 p->mac.stats.num_toggled++;
2554 else if (status == 2) {
2555 struct cmac *mac = &p->mac;
2557 t3_mac_set_mtu(mac, dev->mtu);
2558 t3_mac_set_address(mac, 0, dev->dev_addr);
2559 cxgb_set_rxmode(dev);
2560 t3_link_start(&p->phy, mac, &p->link_config);
2561 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2562 t3_port_intr_enable(adapter, p->port_id);
2563 p->mac.stats.num_resets++;
2570 static void t3_adap_check_task(struct work_struct *work)
2572 struct adapter *adapter = container_of(work, struct adapter,
2573 adap_check_task.work);
2574 const struct adapter_params *p = &adapter->params;
2576 unsigned int v, status, reset;
2578 adapter->check_task_cnt++;
2580 check_link_status(adapter);
2582 /* Accumulate MAC stats if needed */
2583 if (!p->linkpoll_period ||
2584 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2585 p->stats_update_period) {
2586 mac_stats_update(adapter);
2587 adapter->check_task_cnt = 0;
2590 if (p->rev == T3_REV_B2)
2591 check_t3b2_mac(adapter);
2594 * Scan the XGMAC's to check for various conditions which we want to
2595 * monitor in a periodic polling manner rather than via an interrupt
2596 * condition. This is used for conditions which would otherwise flood
2597 * the system with interrupts and we only really need to know that the
2598 * conditions are "happening" ... For each condition we count the
2599 * detection of the condition and reset it for the next polling loop.
2601 for_each_port(adapter, port) {
2602 struct cmac *mac = &adap2pinfo(adapter, port)->mac;
2605 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2607 if (cause & F_RXFIFO_OVERFLOW) {
2608 mac->stats.rx_fifo_ovfl++;
2609 reset |= F_RXFIFO_OVERFLOW;
2612 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2616 * We do the same as above for FL_EMPTY interrupts.
2618 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2621 if (status & F_FLEMPTY) {
2622 struct sge_qset *qs = &adapter->sge.qs[0];
2627 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2631 qs->fl[i].empty += (v & 1);
2639 t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2641 /* Schedule the next check update if any port is active. */
2642 spin_lock_irq(&adapter->work_lock);
2643 if (adapter->open_device_map & PORT_MASK)
2644 schedule_chk_task(adapter);
2645 spin_unlock_irq(&adapter->work_lock);
2649 * Processes external (PHY) interrupts in process context.
2651 static void ext_intr_task(struct work_struct *work)
2653 struct adapter *adapter = container_of(work, struct adapter,
2654 ext_intr_handler_task);
2657 /* Disable link fault interrupts */
2658 for_each_port(adapter, i) {
2659 struct net_device *dev = adapter->port[i];
2660 struct port_info *p = netdev_priv(dev);
2662 t3_xgm_intr_disable(adapter, i);
2663 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2666 /* Re-enable link fault interrupts */
2667 t3_phy_intr_handler(adapter);
2669 for_each_port(adapter, i)
2670 t3_xgm_intr_enable(adapter, i);
2672 /* Now reenable external interrupts */
2673 spin_lock_irq(&adapter->work_lock);
2674 if (adapter->slow_intr_mask) {
2675 adapter->slow_intr_mask |= F_T3DBG;
2676 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2677 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2678 adapter->slow_intr_mask);
2680 spin_unlock_irq(&adapter->work_lock);
2684 * Interrupt-context handler for external (PHY) interrupts.
2686 void t3_os_ext_intr_handler(struct adapter *adapter)
2689 * Schedule a task to handle external interrupts as they may be slow
2690 * and we use a mutex to protect MDIO registers. We disable PHY
2691 * interrupts in the meantime and let the task reenable them when
2694 spin_lock(&adapter->work_lock);
2695 if (adapter->slow_intr_mask) {
2696 adapter->slow_intr_mask &= ~F_T3DBG;
2697 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2698 adapter->slow_intr_mask);
2699 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2701 spin_unlock(&adapter->work_lock);
2704 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2706 struct net_device *netdev = adapter->port[port_id];
2707 struct port_info *pi = netdev_priv(netdev);
2709 spin_lock(&adapter->work_lock);
2711 spin_unlock(&adapter->work_lock);
2714 static int t3_adapter_error(struct adapter *adapter, int reset)
2718 if (is_offload(adapter) &&
2719 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2720 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2721 offload_close(&adapter->tdev);
2724 /* Stop all ports */
2725 for_each_port(adapter, i) {
2726 struct net_device *netdev = adapter->port[i];
2728 if (netif_running(netdev))
2732 /* Stop SGE timers */
2733 t3_stop_sge_timers(adapter);
2735 adapter->flags &= ~FULL_INIT_DONE;
2738 ret = t3_reset_adapter(adapter);
2740 pci_disable_device(adapter->pdev);
2745 static int t3_reenable_adapter(struct adapter *adapter)
2747 if (pci_enable_device(adapter->pdev)) {
2748 dev_err(&adapter->pdev->dev,
2749 "Cannot re-enable PCI device after reset.\n");
2752 pci_set_master(adapter->pdev);
2753 pci_restore_state(adapter->pdev);
2755 /* Free sge resources */
2756 t3_free_sge_resources(adapter);
2758 if (t3_replay_prep_adapter(adapter))
2766 static void t3_resume_ports(struct adapter *adapter)
2770 /* Restart the ports */
2771 for_each_port(adapter, i) {
2772 struct net_device *netdev = adapter->port[i];
2774 if (netif_running(netdev)) {
2775 if (cxgb_open(netdev)) {
2776 dev_err(&adapter->pdev->dev,
2777 "can't bring device back up"
2784 if (is_offload(adapter) && !ofld_disable)
2785 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2789 * processes a fatal error.
2790 * Bring the ports down, reset the chip, bring the ports back up.
2792 static void fatal_error_task(struct work_struct *work)
2794 struct adapter *adapter = container_of(work, struct adapter,
2795 fatal_error_handler_task);
2799 err = t3_adapter_error(adapter, 1);
2801 err = t3_reenable_adapter(adapter);
2803 t3_resume_ports(adapter);
2805 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2809 void t3_fatal_err(struct adapter *adapter)
2811 unsigned int fw_status[4];
2813 if (adapter->flags & FULL_INIT_DONE) {
2814 t3_sge_stop(adapter);
2815 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2816 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2817 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2818 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2820 spin_lock(&adapter->work_lock);
2821 t3_intr_disable(adapter);
2822 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2823 spin_unlock(&adapter->work_lock);
2825 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2826 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2827 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2828 fw_status[0], fw_status[1],
2829 fw_status[2], fw_status[3]);
2833 * t3_io_error_detected - called when PCI error is detected
2834 * @pdev: Pointer to PCI device
2835 * @state: The current pci connection state
2837 * This function is called after a PCI bus error affecting
2838 * this device has been detected.
2840 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2841 pci_channel_state_t state)
2843 struct adapter *adapter = pci_get_drvdata(pdev);
2846 if (state == pci_channel_io_perm_failure)
2847 return PCI_ERS_RESULT_DISCONNECT;
2849 ret = t3_adapter_error(adapter, 0);
2851 /* Request a slot reset. */
2852 return PCI_ERS_RESULT_NEED_RESET;
2856 * t3_io_slot_reset - called after the pci bus has been reset.
2857 * @pdev: Pointer to PCI device
2859 * Restart the card from scratch, as if from a cold-boot.
2861 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2863 struct adapter *adapter = pci_get_drvdata(pdev);
2865 if (!t3_reenable_adapter(adapter))
2866 return PCI_ERS_RESULT_RECOVERED;
2868 return PCI_ERS_RESULT_DISCONNECT;
2872 * t3_io_resume - called when traffic can start flowing again.
2873 * @pdev: Pointer to PCI device
2875 * This callback is called when the error recovery driver tells us that
2876 * its OK to resume normal operation.
2878 static void t3_io_resume(struct pci_dev *pdev)
2880 struct adapter *adapter = pci_get_drvdata(pdev);
2882 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2883 t3_read_reg(adapter, A_PCIE_PEX_ERR));
2885 t3_resume_ports(adapter);
2888 static struct pci_error_handlers t3_err_handler = {
2889 .error_detected = t3_io_error_detected,
2890 .slot_reset = t3_io_slot_reset,
2891 .resume = t3_io_resume,
2895 * Set the number of qsets based on the number of CPUs and the number of ports,
2896 * not to exceed the number of available qsets, assuming there are enough qsets
2899 static void set_nqsets(struct adapter *adap)
2902 int num_cpus = num_online_cpus();
2903 int hwports = adap->params.nports;
2904 int nqsets = adap->msix_nvectors - 1;
2906 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2908 (hwports * nqsets > SGE_QSETS ||
2909 num_cpus >= nqsets / hwports))
2911 if (nqsets > num_cpus)
2913 if (nqsets < 1 || hwports == 4)
2918 for_each_port(adap, i) {
2919 struct port_info *pi = adap2pinfo(adap, i);
2922 pi->nqsets = nqsets;
2923 j = pi->first_qset + nqsets;
2925 dev_info(&adap->pdev->dev,
2926 "Port %d using %d queue sets.\n", i, nqsets);
2930 static int __devinit cxgb_enable_msix(struct adapter *adap)
2932 struct msix_entry entries[SGE_QSETS + 1];
2936 vectors = ARRAY_SIZE(entries);
2937 for (i = 0; i < vectors; ++i)
2938 entries[i].entry = i;
2940 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2944 pci_disable_msix(adap->pdev);
2946 if (!err && vectors < (adap->params.nports + 1)) {
2947 pci_disable_msix(adap->pdev);
2952 for (i = 0; i < vectors; ++i)
2953 adap->msix_info[i].vec = entries[i].vector;
2954 adap->msix_nvectors = vectors;
2960 static void __devinit print_port_info(struct adapter *adap,
2961 const struct adapter_info *ai)
2963 static const char *pci_variant[] = {
2964 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2971 snprintf(buf, sizeof(buf), "%s x%d",
2972 pci_variant[adap->params.pci.variant],
2973 adap->params.pci.width);
2975 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2976 pci_variant[adap->params.pci.variant],
2977 adap->params.pci.speed, adap->params.pci.width);
2979 for_each_port(adap, i) {
2980 struct net_device *dev = adap->port[i];
2981 const struct port_info *pi = netdev_priv(dev);
2983 if (!test_bit(i, &adap->registered_device_map))
2985 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2986 dev->name, ai->desc, pi->phy.desc,
2987 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2988 (adap->flags & USING_MSIX) ? " MSI-X" :
2989 (adap->flags & USING_MSI) ? " MSI" : "");
2990 if (adap->name == dev->name && adap->params.vpd.mclk)
2992 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2993 adap->name, t3_mc7_size(&adap->cm) >> 20,
2994 t3_mc7_size(&adap->pmtx) >> 20,
2995 t3_mc7_size(&adap->pmrx) >> 20,
2996 adap->params.vpd.sn);
3000 static const struct net_device_ops cxgb_netdev_ops = {
3001 .ndo_open = cxgb_open,
3002 .ndo_stop = cxgb_close,
3003 .ndo_start_xmit = t3_eth_xmit,
3004 .ndo_get_stats = cxgb_get_stats,
3005 .ndo_validate_addr = eth_validate_addr,
3006 .ndo_set_multicast_list = cxgb_set_rxmode,
3007 .ndo_do_ioctl = cxgb_ioctl,
3008 .ndo_change_mtu = cxgb_change_mtu,
3009 .ndo_set_mac_address = cxgb_set_mac_addr,
3010 .ndo_vlan_rx_register = vlan_rx_register,
3011 #ifdef CONFIG_NET_POLL_CONTROLLER
3012 .ndo_poll_controller = cxgb_netpoll,
3016 static int __devinit init_one(struct pci_dev *pdev,
3017 const struct pci_device_id *ent)
3019 static int version_printed;
3021 int i, err, pci_using_dac = 0;
3022 resource_size_t mmio_start, mmio_len;
3023 const struct adapter_info *ai;
3024 struct adapter *adapter = NULL;
3025 struct port_info *pi;
3027 if (!version_printed) {
3028 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3033 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3035 printk(KERN_ERR DRV_NAME
3036 ": cannot initialize work queue\n");
3041 err = pci_request_regions(pdev, DRV_NAME);
3043 /* Just info, some other driver may have claimed the device. */
3044 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3048 err = pci_enable_device(pdev);
3050 dev_err(&pdev->dev, "cannot enable PCI device\n");
3051 goto out_release_regions;
3054 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3056 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3058 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3059 "coherent allocations\n");
3060 goto out_disable_device;
3062 } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3063 dev_err(&pdev->dev, "no usable DMA configuration\n");
3064 goto out_disable_device;
3067 pci_set_master(pdev);
3068 pci_save_state(pdev);
3070 mmio_start = pci_resource_start(pdev, 0);
3071 mmio_len = pci_resource_len(pdev, 0);
3072 ai = t3_get_adapter_info(ent->driver_data);
3074 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3077 goto out_disable_device;
3080 adapter->nofail_skb =
3081 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3082 if (!adapter->nofail_skb) {
3083 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3085 goto out_free_adapter;
3088 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3089 if (!adapter->regs) {
3090 dev_err(&pdev->dev, "cannot map device registers\n");
3092 goto out_free_adapter;
3095 adapter->pdev = pdev;
3096 adapter->name = pci_name(pdev);
3097 adapter->msg_enable = dflt_msg_enable;
3098 adapter->mmio_len = mmio_len;
3100 mutex_init(&adapter->mdio_lock);
3101 spin_lock_init(&adapter->work_lock);
3102 spin_lock_init(&adapter->stats_lock);
3104 INIT_LIST_HEAD(&adapter->adapter_list);
3105 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3106 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3107 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3109 for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3110 struct net_device *netdev;
3112 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3118 SET_NETDEV_DEV(netdev, &pdev->dev);
3120 adapter->port[i] = netdev;
3121 pi = netdev_priv(netdev);
3122 pi->adapter = adapter;
3123 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3125 netif_carrier_off(netdev);
3126 netif_tx_stop_all_queues(netdev);
3127 netdev->irq = pdev->irq;
3128 netdev->mem_start = mmio_start;
3129 netdev->mem_end = mmio_start + mmio_len - 1;
3130 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3131 netdev->features |= NETIF_F_GRO;
3133 netdev->features |= NETIF_F_HIGHDMA;
3135 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3136 netdev->netdev_ops = &cxgb_netdev_ops;
3137 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3140 pci_set_drvdata(pdev, adapter);
3141 if (t3_prep_adapter(adapter, ai, 1) < 0) {
3147 * The card is now ready to go. If any errors occur during device
3148 * registration we do not fail the whole card but rather proceed only
3149 * with the ports we manage to register successfully. However we must
3150 * register at least one net device.
3152 for_each_port(adapter, i) {
3153 err = register_netdev(adapter->port[i]);
3155 dev_warn(&pdev->dev,
3156 "cannot register net device %s, skipping\n",
3157 adapter->port[i]->name);
3160 * Change the name we use for messages to the name of
3161 * the first successfully registered interface.
3163 if (!adapter->registered_device_map)
3164 adapter->name = adapter->port[i]->name;
3166 __set_bit(i, &adapter->registered_device_map);
3169 if (!adapter->registered_device_map) {
3170 dev_err(&pdev->dev, "could not register any net devices\n");
3174 /* Driver's ready. Reflect it on LEDs */
3175 t3_led_ready(adapter);
3177 if (is_offload(adapter)) {
3178 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3179 cxgb3_adapter_ofld(adapter);
3182 /* See what interrupts we'll be using */
3183 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3184 adapter->flags |= USING_MSIX;
3185 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3186 adapter->flags |= USING_MSI;
3188 set_nqsets(adapter);
3190 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3193 print_port_info(adapter, ai);
3197 iounmap(adapter->regs);
3198 for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3199 if (adapter->port[i])
3200 free_netdev(adapter->port[i]);
3206 pci_disable_device(pdev);
3207 out_release_regions:
3208 pci_release_regions(pdev);
3209 pci_set_drvdata(pdev, NULL);
3213 static void __devexit remove_one(struct pci_dev *pdev)
3215 struct adapter *adapter = pci_get_drvdata(pdev);
3220 t3_sge_stop(adapter);
3221 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3224 if (is_offload(adapter)) {
3225 cxgb3_adapter_unofld(adapter);
3226 if (test_bit(OFFLOAD_DEVMAP_BIT,
3227 &adapter->open_device_map))
3228 offload_close(&adapter->tdev);
3231 for_each_port(adapter, i)
3232 if (test_bit(i, &adapter->registered_device_map))
3233 unregister_netdev(adapter->port[i]);
3235 t3_stop_sge_timers(adapter);
3236 t3_free_sge_resources(adapter);
3237 cxgb_disable_msi(adapter);
3239 for_each_port(adapter, i)
3240 if (adapter->port[i])
3241 free_netdev(adapter->port[i]);
3243 iounmap(adapter->regs);
3244 if (adapter->nofail_skb)
3245 kfree_skb(adapter->nofail_skb);
3247 pci_release_regions(pdev);
3248 pci_disable_device(pdev);
3249 pci_set_drvdata(pdev, NULL);
3253 static struct pci_driver driver = {
3255 .id_table = cxgb3_pci_tbl,
3257 .remove = __devexit_p(remove_one),
3258 .err_handler = &t3_err_handler,
3261 static int __init cxgb3_init_module(void)
3265 cxgb3_offload_init();
3267 ret = pci_register_driver(&driver);
3271 static void __exit cxgb3_cleanup_module(void)
3273 pci_unregister_driver(&driver);
3275 destroy_workqueue(cxgb3_wq);
3278 module_init(cxgb3_init_module);
3279 module_exit(cxgb3_cleanup_module);