2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
211 static void cxgb_set_rxmode(struct net_device *dev)
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
221 * link_start - enable a port
222 * @dev: the device to enable
224 * Performs the MAC and PHY actions needed to enable a port.
226 static void link_start(struct net_device *dev)
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
232 init_rx_mode(&rm, dev, dev->mc_list);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
241 static inline void cxgb_disable_msi(struct adapter *adapter)
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
253 * Interrupt handler for asynchronous events used with MSI-X.
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
257 t3_slow_intr_handler(cookie);
262 * Name the MSI-X interrupts.
264 static void name_msix_vecs(struct adapter *adap)
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
283 static int request_msix_data_irqs(struct adapter *adap)
285 int i, j, err, qidx = 0;
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
309 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
314 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
322 static int init_tp_parity(struct adapter *adap)
326 struct cpl_set_tcb_field *greq;
327 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
329 t3_tp_set_offload_mode(adap, 1);
331 for (i = 0; i < 16; i++) {
332 struct cpl_smt_write_req *req;
334 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
335 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
336 memset(req, 0, sizeof(*req));
337 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
338 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
340 t3_mgmt_tx(adap, skb);
343 for (i = 0; i < 2048; i++) {
344 struct cpl_l2t_write_req *req;
346 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
347 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
348 memset(req, 0, sizeof(*req));
349 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
350 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
351 req->params = htonl(V_L2T_W_IDX(i));
352 t3_mgmt_tx(adap, skb);
355 for (i = 0; i < 2048; i++) {
356 struct cpl_rte_write_req *req;
358 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
359 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
360 memset(req, 0, sizeof(*req));
361 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
362 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
363 req->l2t_idx = htonl(V_L2T_W_IDX(i));
364 t3_mgmt_tx(adap, skb);
367 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
368 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
369 memset(greq, 0, sizeof(*greq));
370 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
371 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
372 greq->mask = cpu_to_be64(1);
373 t3_mgmt_tx(adap, skb);
375 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
376 t3_tp_set_offload_mode(adap, 0);
381 * setup_rss - configure RSS
384 * Sets up RSS to distribute packets to multiple receive queues. We
385 * configure the RSS CPU lookup table to distribute to the number of HW
386 * receive queues, and the response queue lookup table to narrow that
387 * down to the response queues actually configured for each port.
388 * We always configure the RSS mapping for two ports since the mapping
389 * table has plenty of entries.
391 static void setup_rss(struct adapter *adap)
394 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
395 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
396 u8 cpus[SGE_QSETS + 1];
397 u16 rspq_map[RSS_TABLE_SIZE];
399 for (i = 0; i < SGE_QSETS; ++i)
401 cpus[SGE_QSETS] = 0xff; /* terminator */
403 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
404 rspq_map[i] = i % nq0;
405 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
408 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
409 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
410 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
413 static void init_napi(struct adapter *adap)
417 for (i = 0; i < SGE_QSETS; i++) {
418 struct sge_qset *qs = &adap->sge.qs[i];
421 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
427 * Wait until all NAPI handlers are descheduled. This includes the handlers of
428 * both netdevices representing interfaces and the dummy ones for the extra
431 static void quiesce_rx(struct adapter *adap)
435 for (i = 0; i < SGE_QSETS; i++)
436 if (adap->sge.qs[i].adap)
437 napi_disable(&adap->sge.qs[i].napi);
440 static void enable_all_napi(struct adapter *adap)
443 for (i = 0; i < SGE_QSETS; i++)
444 if (adap->sge.qs[i].adap)
445 napi_enable(&adap->sge.qs[i].napi);
449 * setup_sge_qsets - configure SGE Tx/Rx/response queues
452 * Determines how many sets of SGE queues to use and initializes them.
453 * We support multiple queue sets per port if we have MSI-X, otherwise
454 * just one queue set per port.
456 static int setup_sge_qsets(struct adapter *adap)
458 int i, j, err, irq_idx = 0, qset_idx = 0;
459 unsigned int ntxq = SGE_TXQ_PER_SET;
461 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
464 for_each_port(adap, i) {
465 struct net_device *dev = adap->port[i];
466 struct port_info *pi = netdev_priv(dev);
468 pi->qs = &adap->sge.qs[pi->first_qset];
469 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
470 err = t3_sge_alloc_qset(adap, qset_idx, 1,
471 (adap->flags & USING_MSIX) ? qset_idx + 1 :
473 &adap->params.sge.qset[qset_idx], ntxq, dev);
475 t3_free_sge_resources(adap);
484 static ssize_t attr_show(struct device *d, char *buf,
485 ssize_t(*format) (struct net_device *, char *))
489 /* Synchronize with ioctls that may shut down the device */
491 len = (*format) (to_net_dev(d), buf);
496 static ssize_t attr_store(struct device *d,
497 const char *buf, size_t len,
498 ssize_t(*set) (struct net_device *, unsigned int),
499 unsigned int min_val, unsigned int max_val)
505 if (!capable(CAP_NET_ADMIN))
508 val = simple_strtoul(buf, &endp, 0);
509 if (endp == buf || val < min_val || val > max_val)
513 ret = (*set) (to_net_dev(d), val);
520 #define CXGB3_SHOW(name, val_expr) \
521 static ssize_t format_##name(struct net_device *dev, char *buf) \
523 struct port_info *pi = netdev_priv(dev); \
524 struct adapter *adap = pi->adapter; \
525 return sprintf(buf, "%u\n", val_expr); \
527 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
530 return attr_show(d, buf, format_##name); \
533 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
535 struct port_info *pi = netdev_priv(dev);
536 struct adapter *adap = pi->adapter;
537 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
539 if (adap->flags & FULL_INIT_DONE)
541 if (val && adap->params.rev == 0)
543 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
546 adap->params.mc5.nfilters = val;
550 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
551 const char *buf, size_t len)
553 return attr_store(d, buf, len, set_nfilters, 0, ~0);
556 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
558 struct port_info *pi = netdev_priv(dev);
559 struct adapter *adap = pi->adapter;
561 if (adap->flags & FULL_INIT_DONE)
563 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
566 adap->params.mc5.nservers = val;
570 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
571 const char *buf, size_t len)
573 return attr_store(d, buf, len, set_nservers, 0, ~0);
576 #define CXGB3_ATTR_R(name, val_expr) \
577 CXGB3_SHOW(name, val_expr) \
578 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
580 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
581 CXGB3_SHOW(name, val_expr) \
582 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
584 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
585 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
586 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
588 static struct attribute *cxgb3_attrs[] = {
589 &dev_attr_cam_size.attr,
590 &dev_attr_nfilters.attr,
591 &dev_attr_nservers.attr,
595 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
597 static ssize_t tm_attr_show(struct device *d,
598 char *buf, int sched)
600 struct port_info *pi = netdev_priv(to_net_dev(d));
601 struct adapter *adap = pi->adapter;
602 unsigned int v, addr, bpt, cpt;
605 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
607 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
608 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
611 bpt = (v >> 8) & 0xff;
614 len = sprintf(buf, "disabled\n");
616 v = (adap->params.vpd.cclk * 1000) / cpt;
617 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
623 static ssize_t tm_attr_store(struct device *d,
624 const char *buf, size_t len, int sched)
626 struct port_info *pi = netdev_priv(to_net_dev(d));
627 struct adapter *adap = pi->adapter;
632 if (!capable(CAP_NET_ADMIN))
635 val = simple_strtoul(buf, &endp, 0);
636 if (endp == buf || val > 10000000)
640 ret = t3_config_sched(adap, val, sched);
647 #define TM_ATTR(name, sched) \
648 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
651 return tm_attr_show(d, buf, sched); \
653 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
654 const char *buf, size_t len) \
656 return tm_attr_store(d, buf, len, sched); \
658 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
669 static struct attribute *offload_attrs[] = {
670 &dev_attr_sched0.attr,
671 &dev_attr_sched1.attr,
672 &dev_attr_sched2.attr,
673 &dev_attr_sched3.attr,
674 &dev_attr_sched4.attr,
675 &dev_attr_sched5.attr,
676 &dev_attr_sched6.attr,
677 &dev_attr_sched7.attr,
681 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
684 * Sends an sk_buff to an offload queue driver
685 * after dealing with any active network taps.
687 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
692 ret = t3_offload_tx(tdev, skb);
697 static int write_smt_entry(struct adapter *adapter, int idx)
699 struct cpl_smt_write_req *req;
700 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
705 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
706 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
707 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
708 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
710 memset(req->src_mac1, 0, sizeof(req->src_mac1));
711 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
713 offload_tx(&adapter->tdev, skb);
717 static int init_smt(struct adapter *adapter)
721 for_each_port(adapter, i)
722 write_smt_entry(adapter, i);
726 static void init_port_mtus(struct adapter *adapter)
728 unsigned int mtus = adapter->port[0]->mtu;
730 if (adapter->port[1])
731 mtus |= adapter->port[1]->mtu << 16;
732 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
735 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
739 struct mngt_pktsched_wr *req;
741 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
742 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
743 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
744 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
750 t3_mgmt_tx(adap, skb);
753 static void bind_qsets(struct adapter *adap)
757 for_each_port(adap, i) {
758 const struct port_info *pi = adap2pinfo(adap, i);
760 for (j = 0; j < pi->nqsets; ++j)
761 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
766 #define FW_FNAME "t3fw-%d.%d.%d.bin"
767 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
769 static int upgrade_fw(struct adapter *adap)
773 const struct firmware *fw;
774 struct device *dev = &adap->pdev->dev;
776 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
777 FW_VERSION_MINOR, FW_VERSION_MICRO);
778 ret = request_firmware(&fw, buf, dev);
780 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
784 ret = t3_load_fw(adap, fw->data, fw->size);
785 release_firmware(fw);
788 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
789 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
791 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
792 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
797 static inline char t3rev2char(struct adapter *adapter)
801 switch(adapter->params.rev) {
813 static int update_tpsram(struct adapter *adap)
815 const struct firmware *tpsram;
817 struct device *dev = &adap->pdev->dev;
821 rev = t3rev2char(adap);
825 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
826 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
828 ret = request_firmware(&tpsram, buf, dev);
830 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
835 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
839 ret = t3_set_proto_sram(adap, tpsram->data);
842 "successful update of protocol engine "
844 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
846 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
847 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
849 dev_err(dev, "loading protocol SRAM failed\n");
852 release_firmware(tpsram);
858 * cxgb_up - enable the adapter
859 * @adapter: adapter being enabled
861 * Called when the first port is enabled, this function performs the
862 * actions necessary to make an adapter operational, such as completing
863 * the initialization of HW modules, and enabling interrupts.
865 * Must be called with the rtnl lock held.
867 static int cxgb_up(struct adapter *adap)
872 if (!(adap->flags & FULL_INIT_DONE)) {
873 err = t3_check_fw_version(adap, &must_load);
874 if (err == -EINVAL) {
875 err = upgrade_fw(adap);
876 if (err && must_load)
880 err = t3_check_tpsram_version(adap, &must_load);
881 if (err == -EINVAL) {
882 err = update_tpsram(adap);
883 if (err && must_load)
887 err = t3_init_hw(adap, 0);
891 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
892 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
894 err = setup_sge_qsets(adap);
900 adap->flags |= FULL_INIT_DONE;
905 if (adap->flags & USING_MSIX) {
906 name_msix_vecs(adap);
907 err = request_irq(adap->msix_info[0].vec,
908 t3_async_intr_handler, 0,
909 adap->msix_info[0].desc, adap);
913 err = request_msix_data_irqs(adap);
915 free_irq(adap->msix_info[0].vec, adap);
918 } else if ((err = request_irq(adap->pdev->irq,
919 t3_intr_handler(adap,
920 adap->sge.qs[0].rspq.
922 (adap->flags & USING_MSI) ?
927 enable_all_napi(adap);
929 t3_intr_enable(adap);
931 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
932 is_offload(adap) && init_tp_parity(adap) == 0)
933 adap->flags |= TP_PARITY_INIT;
935 if (adap->flags & TP_PARITY_INIT) {
936 t3_write_reg(adap, A_TP_INT_CAUSE,
937 F_CMCACHEPERR | F_ARPLUTPERR);
938 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
941 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
943 adap->flags |= QUEUES_BOUND;
948 CH_ERR(adap, "request_irq failed, err %d\n", err);
953 * Release resources when all the ports and offloading have been stopped.
955 static void cxgb_down(struct adapter *adapter)
957 t3_sge_stop(adapter);
958 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
959 t3_intr_disable(adapter);
960 spin_unlock_irq(&adapter->work_lock);
962 if (adapter->flags & USING_MSIX) {
965 free_irq(adapter->msix_info[0].vec, adapter);
966 for_each_port(adapter, i)
967 n += adap2pinfo(adapter, i)->nqsets;
969 for (i = 0; i < n; ++i)
970 free_irq(adapter->msix_info[i + 1].vec,
971 &adapter->sge.qs[i]);
973 free_irq(adapter->pdev->irq, adapter);
975 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
979 static void schedule_chk_task(struct adapter *adap)
983 timeo = adap->params.linkpoll_period ?
984 (HZ * adap->params.linkpoll_period) / 10 :
985 adap->params.stats_update_period * HZ;
987 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
990 static int offload_open(struct net_device *dev)
992 struct port_info *pi = netdev_priv(dev);
993 struct adapter *adapter = pi->adapter;
994 struct t3cdev *tdev = dev2t3cdev(dev);
995 int adap_up = adapter->open_device_map & PORT_MASK;
998 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1001 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1004 t3_tp_set_offload_mode(adapter, 1);
1005 tdev->lldev = adapter->port[0];
1006 err = cxgb3_offload_activate(adapter);
1010 init_port_mtus(adapter);
1011 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1012 adapter->params.b_wnd,
1013 adapter->params.rev == 0 ?
1014 adapter->port[0]->mtu : 0xffff);
1017 /* Never mind if the next step fails */
1018 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1020 /* Call back all registered clients */
1021 cxgb3_add_clients(tdev);
1024 /* restore them in case the offload module has changed them */
1026 t3_tp_set_offload_mode(adapter, 0);
1027 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1028 cxgb3_set_dummy_ops(tdev);
1033 static int offload_close(struct t3cdev *tdev)
1035 struct adapter *adapter = tdev2adap(tdev);
1037 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1040 /* Call back all registered clients */
1041 cxgb3_remove_clients(tdev);
1043 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1046 cxgb3_set_dummy_ops(tdev);
1047 t3_tp_set_offload_mode(adapter, 0);
1048 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1050 if (!adapter->open_device_map)
1053 cxgb3_offload_deactivate(adapter);
1057 static int cxgb_open(struct net_device *dev)
1059 struct port_info *pi = netdev_priv(dev);
1060 struct adapter *adapter = pi->adapter;
1061 int other_ports = adapter->open_device_map & PORT_MASK;
1064 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
1065 quiesce_rx(adapter);
1069 set_bit(pi->port_id, &adapter->open_device_map);
1070 if (is_offload(adapter) && !ofld_disable) {
1071 err = offload_open(dev);
1074 "Could not initialize offload capabilities\n");
1078 t3_port_intr_enable(adapter, pi->port_id);
1079 netif_start_queue(dev);
1081 schedule_chk_task(adapter);
1086 static int cxgb_close(struct net_device *dev)
1088 struct port_info *pi = netdev_priv(dev);
1089 struct adapter *adapter = pi->adapter;
1091 t3_port_intr_disable(adapter, pi->port_id);
1092 netif_stop_queue(dev);
1093 pi->phy.ops->power_down(&pi->phy, 1);
1094 netif_carrier_off(dev);
1095 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1097 spin_lock(&adapter->work_lock); /* sync with update task */
1098 clear_bit(pi->port_id, &adapter->open_device_map);
1099 spin_unlock(&adapter->work_lock);
1101 if (!(adapter->open_device_map & PORT_MASK))
1102 cancel_rearming_delayed_workqueue(cxgb3_wq,
1103 &adapter->adap_check_task);
1105 if (!adapter->open_device_map)
1111 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1113 struct port_info *pi = netdev_priv(dev);
1114 struct adapter *adapter = pi->adapter;
1115 struct net_device_stats *ns = &pi->netstats;
1116 const struct mac_stats *pstats;
1118 spin_lock(&adapter->stats_lock);
1119 pstats = t3_mac_update_stats(&pi->mac);
1120 spin_unlock(&adapter->stats_lock);
1122 ns->tx_bytes = pstats->tx_octets;
1123 ns->tx_packets = pstats->tx_frames;
1124 ns->rx_bytes = pstats->rx_octets;
1125 ns->rx_packets = pstats->rx_frames;
1126 ns->multicast = pstats->rx_mcast_frames;
1128 ns->tx_errors = pstats->tx_underrun;
1129 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1130 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1131 pstats->rx_fifo_ovfl;
1133 /* detailed rx_errors */
1134 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1135 ns->rx_over_errors = 0;
1136 ns->rx_crc_errors = pstats->rx_fcs_errs;
1137 ns->rx_frame_errors = pstats->rx_symbol_errs;
1138 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1139 ns->rx_missed_errors = pstats->rx_cong_drops;
1141 /* detailed tx_errors */
1142 ns->tx_aborted_errors = 0;
1143 ns->tx_carrier_errors = 0;
1144 ns->tx_fifo_errors = pstats->tx_underrun;
1145 ns->tx_heartbeat_errors = 0;
1146 ns->tx_window_errors = 0;
1150 static u32 get_msglevel(struct net_device *dev)
1152 struct port_info *pi = netdev_priv(dev);
1153 struct adapter *adapter = pi->adapter;
1155 return adapter->msg_enable;
1158 static void set_msglevel(struct net_device *dev, u32 val)
1160 struct port_info *pi = netdev_priv(dev);
1161 struct adapter *adapter = pi->adapter;
1163 adapter->msg_enable = val;
1166 static char stats_strings[][ETH_GSTRING_LEN] = {
1169 "TxMulticastFramesOK",
1170 "TxBroadcastFramesOK",
1177 "TxFrames128To255 ",
1178 "TxFrames256To511 ",
1179 "TxFrames512To1023 ",
1180 "TxFrames1024To1518 ",
1181 "TxFrames1519ToMax ",
1185 "RxMulticastFramesOK",
1186 "RxBroadcastFramesOK",
1197 "RxFrames128To255 ",
1198 "RxFrames256To511 ",
1199 "RxFrames512To1023 ",
1200 "RxFrames1024To1518 ",
1201 "RxFrames1519ToMax ",
1211 "CheckTXEnToggled ",
1216 static int get_sset_count(struct net_device *dev, int sset)
1220 return ARRAY_SIZE(stats_strings);
1226 #define T3_REGMAP_SIZE (3 * 1024)
1228 static int get_regs_len(struct net_device *dev)
1230 return T3_REGMAP_SIZE;
1233 static int get_eeprom_len(struct net_device *dev)
1238 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1240 struct port_info *pi = netdev_priv(dev);
1241 struct adapter *adapter = pi->adapter;
1245 t3_get_fw_version(adapter, &fw_vers);
1246 t3_get_tp_version(adapter, &tp_vers);
1248 strcpy(info->driver, DRV_NAME);
1249 strcpy(info->version, DRV_VERSION);
1250 strcpy(info->bus_info, pci_name(adapter->pdev));
1252 strcpy(info->fw_version, "N/A");
1254 snprintf(info->fw_version, sizeof(info->fw_version),
1255 "%s %u.%u.%u TP %u.%u.%u",
1256 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1257 G_FW_VERSION_MAJOR(fw_vers),
1258 G_FW_VERSION_MINOR(fw_vers),
1259 G_FW_VERSION_MICRO(fw_vers),
1260 G_TP_VERSION_MAJOR(tp_vers),
1261 G_TP_VERSION_MINOR(tp_vers),
1262 G_TP_VERSION_MICRO(tp_vers));
1266 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1268 if (stringset == ETH_SS_STATS)
1269 memcpy(data, stats_strings, sizeof(stats_strings));
1272 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1273 struct port_info *p, int idx)
1276 unsigned long tot = 0;
1278 for (i = 0; i < p->nqsets; ++i)
1279 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1283 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1286 struct port_info *pi = netdev_priv(dev);
1287 struct adapter *adapter = pi->adapter;
1288 const struct mac_stats *s;
1290 spin_lock(&adapter->stats_lock);
1291 s = t3_mac_update_stats(&pi->mac);
1292 spin_unlock(&adapter->stats_lock);
1294 *data++ = s->tx_octets;
1295 *data++ = s->tx_frames;
1296 *data++ = s->tx_mcast_frames;
1297 *data++ = s->tx_bcast_frames;
1298 *data++ = s->tx_pause;
1299 *data++ = s->tx_underrun;
1300 *data++ = s->tx_fifo_urun;
1302 *data++ = s->tx_frames_64;
1303 *data++ = s->tx_frames_65_127;
1304 *data++ = s->tx_frames_128_255;
1305 *data++ = s->tx_frames_256_511;
1306 *data++ = s->tx_frames_512_1023;
1307 *data++ = s->tx_frames_1024_1518;
1308 *data++ = s->tx_frames_1519_max;
1310 *data++ = s->rx_octets;
1311 *data++ = s->rx_frames;
1312 *data++ = s->rx_mcast_frames;
1313 *data++ = s->rx_bcast_frames;
1314 *data++ = s->rx_pause;
1315 *data++ = s->rx_fcs_errs;
1316 *data++ = s->rx_symbol_errs;
1317 *data++ = s->rx_short;
1318 *data++ = s->rx_jabber;
1319 *data++ = s->rx_too_long;
1320 *data++ = s->rx_fifo_ovfl;
1322 *data++ = s->rx_frames_64;
1323 *data++ = s->rx_frames_65_127;
1324 *data++ = s->rx_frames_128_255;
1325 *data++ = s->rx_frames_256_511;
1326 *data++ = s->rx_frames_512_1023;
1327 *data++ = s->rx_frames_1024_1518;
1328 *data++ = s->rx_frames_1519_max;
1330 *data++ = pi->phy.fifo_errors;
1332 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1333 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1334 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1335 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1336 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1337 *data++ = s->rx_cong_drops;
1339 *data++ = s->num_toggled;
1340 *data++ = s->num_resets;
1343 static inline void reg_block_dump(struct adapter *ap, void *buf,
1344 unsigned int start, unsigned int end)
1346 u32 *p = buf + start;
1348 for (; start <= end; start += sizeof(u32))
1349 *p++ = t3_read_reg(ap, start);
1352 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1355 struct port_info *pi = netdev_priv(dev);
1356 struct adapter *ap = pi->adapter;
1360 * bits 0..9: chip version
1361 * bits 10..15: chip revision
1362 * bit 31: set for PCIe cards
1364 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1367 * We skip the MAC statistics registers because they are clear-on-read.
1368 * Also reading multi-register stats would need to synchronize with the
1369 * periodic mac stats accumulation. Hard to justify the complexity.
1371 memset(buf, 0, T3_REGMAP_SIZE);
1372 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1373 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1374 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1375 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1376 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1377 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1378 XGM_REG(A_XGM_SERDES_STAT3, 1));
1379 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1380 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1383 static int restart_autoneg(struct net_device *dev)
1385 struct port_info *p = netdev_priv(dev);
1387 if (!netif_running(dev))
1389 if (p->link_config.autoneg != AUTONEG_ENABLE)
1391 p->phy.ops->autoneg_restart(&p->phy);
1395 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1397 struct port_info *pi = netdev_priv(dev);
1398 struct adapter *adapter = pi->adapter;
1404 for (i = 0; i < data * 2; i++) {
1405 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1406 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1407 if (msleep_interruptible(500))
1410 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1415 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1417 struct port_info *p = netdev_priv(dev);
1419 cmd->supported = p->link_config.supported;
1420 cmd->advertising = p->link_config.advertising;
1422 if (netif_carrier_ok(dev)) {
1423 cmd->speed = p->link_config.speed;
1424 cmd->duplex = p->link_config.duplex;
1430 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1431 cmd->phy_address = p->phy.addr;
1432 cmd->transceiver = XCVR_EXTERNAL;
1433 cmd->autoneg = p->link_config.autoneg;
1439 static int speed_duplex_to_caps(int speed, int duplex)
1445 if (duplex == DUPLEX_FULL)
1446 cap = SUPPORTED_10baseT_Full;
1448 cap = SUPPORTED_10baseT_Half;
1451 if (duplex == DUPLEX_FULL)
1452 cap = SUPPORTED_100baseT_Full;
1454 cap = SUPPORTED_100baseT_Half;
1457 if (duplex == DUPLEX_FULL)
1458 cap = SUPPORTED_1000baseT_Full;
1460 cap = SUPPORTED_1000baseT_Half;
1463 if (duplex == DUPLEX_FULL)
1464 cap = SUPPORTED_10000baseT_Full;
1469 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1470 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1471 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1472 ADVERTISED_10000baseT_Full)
1474 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1476 struct port_info *p = netdev_priv(dev);
1477 struct link_config *lc = &p->link_config;
1479 if (!(lc->supported & SUPPORTED_Autoneg))
1480 return -EOPNOTSUPP; /* can't change speed/duplex */
1482 if (cmd->autoneg == AUTONEG_DISABLE) {
1483 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1485 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1487 lc->requested_speed = cmd->speed;
1488 lc->requested_duplex = cmd->duplex;
1489 lc->advertising = 0;
1491 cmd->advertising &= ADVERTISED_MASK;
1492 cmd->advertising &= lc->supported;
1493 if (!cmd->advertising)
1495 lc->requested_speed = SPEED_INVALID;
1496 lc->requested_duplex = DUPLEX_INVALID;
1497 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1499 lc->autoneg = cmd->autoneg;
1500 if (netif_running(dev))
1501 t3_link_start(&p->phy, &p->mac, lc);
1505 static void get_pauseparam(struct net_device *dev,
1506 struct ethtool_pauseparam *epause)
1508 struct port_info *p = netdev_priv(dev);
1510 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1511 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1512 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1515 static int set_pauseparam(struct net_device *dev,
1516 struct ethtool_pauseparam *epause)
1518 struct port_info *p = netdev_priv(dev);
1519 struct link_config *lc = &p->link_config;
1521 if (epause->autoneg == AUTONEG_DISABLE)
1522 lc->requested_fc = 0;
1523 else if (lc->supported & SUPPORTED_Autoneg)
1524 lc->requested_fc = PAUSE_AUTONEG;
1528 if (epause->rx_pause)
1529 lc->requested_fc |= PAUSE_RX;
1530 if (epause->tx_pause)
1531 lc->requested_fc |= PAUSE_TX;
1532 if (lc->autoneg == AUTONEG_ENABLE) {
1533 if (netif_running(dev))
1534 t3_link_start(&p->phy, &p->mac, lc);
1536 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1537 if (netif_running(dev))
1538 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1543 static u32 get_rx_csum(struct net_device *dev)
1545 struct port_info *p = netdev_priv(dev);
1547 return p->rx_csum_offload;
1550 static int set_rx_csum(struct net_device *dev, u32 data)
1552 struct port_info *p = netdev_priv(dev);
1554 p->rx_csum_offload = data;
1558 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1560 struct port_info *pi = netdev_priv(dev);
1561 struct adapter *adapter = pi->adapter;
1562 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1564 e->rx_max_pending = MAX_RX_BUFFERS;
1565 e->rx_mini_max_pending = 0;
1566 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1567 e->tx_max_pending = MAX_TXQ_ENTRIES;
1569 e->rx_pending = q->fl_size;
1570 e->rx_mini_pending = q->rspq_size;
1571 e->rx_jumbo_pending = q->jumbo_size;
1572 e->tx_pending = q->txq_size[0];
1575 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1577 struct port_info *pi = netdev_priv(dev);
1578 struct adapter *adapter = pi->adapter;
1579 struct qset_params *q;
1582 if (e->rx_pending > MAX_RX_BUFFERS ||
1583 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1584 e->tx_pending > MAX_TXQ_ENTRIES ||
1585 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1586 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1587 e->rx_pending < MIN_FL_ENTRIES ||
1588 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1589 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1592 if (adapter->flags & FULL_INIT_DONE)
1595 q = &adapter->params.sge.qset[pi->first_qset];
1596 for (i = 0; i < pi->nqsets; ++i, ++q) {
1597 q->rspq_size = e->rx_mini_pending;
1598 q->fl_size = e->rx_pending;
1599 q->jumbo_size = e->rx_jumbo_pending;
1600 q->txq_size[0] = e->tx_pending;
1601 q->txq_size[1] = e->tx_pending;
1602 q->txq_size[2] = e->tx_pending;
1607 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1609 struct port_info *pi = netdev_priv(dev);
1610 struct adapter *adapter = pi->adapter;
1611 struct qset_params *qsp = &adapter->params.sge.qset[0];
1612 struct sge_qset *qs = &adapter->sge.qs[0];
1614 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1617 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1618 t3_update_qset_coalesce(qs, qsp);
1622 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1624 struct port_info *pi = netdev_priv(dev);
1625 struct adapter *adapter = pi->adapter;
1626 struct qset_params *q = adapter->params.sge.qset;
1628 c->rx_coalesce_usecs = q->coalesce_usecs;
1632 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1635 struct port_info *pi = netdev_priv(dev);
1636 struct adapter *adapter = pi->adapter;
1639 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1643 e->magic = EEPROM_MAGIC;
1644 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1645 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1648 memcpy(data, buf + e->offset, e->len);
1653 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1656 struct port_info *pi = netdev_priv(dev);
1657 struct adapter *adapter = pi->adapter;
1658 u32 aligned_offset, aligned_len, *p;
1662 if (eeprom->magic != EEPROM_MAGIC)
1665 aligned_offset = eeprom->offset & ~3;
1666 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1668 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1669 buf = kmalloc(aligned_len, GFP_KERNEL);
1672 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1673 if (!err && aligned_len > 4)
1674 err = t3_seeprom_read(adapter,
1675 aligned_offset + aligned_len - 4,
1676 (u32 *) & buf[aligned_len - 4]);
1679 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1683 err = t3_seeprom_wp(adapter, 0);
1687 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1688 err = t3_seeprom_write(adapter, aligned_offset, *p);
1689 aligned_offset += 4;
1693 err = t3_seeprom_wp(adapter, 1);
1700 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1704 memset(&wol->sopass, 0, sizeof(wol->sopass));
1707 static const struct ethtool_ops cxgb_ethtool_ops = {
1708 .get_settings = get_settings,
1709 .set_settings = set_settings,
1710 .get_drvinfo = get_drvinfo,
1711 .get_msglevel = get_msglevel,
1712 .set_msglevel = set_msglevel,
1713 .get_ringparam = get_sge_param,
1714 .set_ringparam = set_sge_param,
1715 .get_coalesce = get_coalesce,
1716 .set_coalesce = set_coalesce,
1717 .get_eeprom_len = get_eeprom_len,
1718 .get_eeprom = get_eeprom,
1719 .set_eeprom = set_eeprom,
1720 .get_pauseparam = get_pauseparam,
1721 .set_pauseparam = set_pauseparam,
1722 .get_rx_csum = get_rx_csum,
1723 .set_rx_csum = set_rx_csum,
1724 .set_tx_csum = ethtool_op_set_tx_csum,
1725 .set_sg = ethtool_op_set_sg,
1726 .get_link = ethtool_op_get_link,
1727 .get_strings = get_strings,
1728 .phys_id = cxgb3_phys_id,
1729 .nway_reset = restart_autoneg,
1730 .get_sset_count = get_sset_count,
1731 .get_ethtool_stats = get_stats,
1732 .get_regs_len = get_regs_len,
1733 .get_regs = get_regs,
1735 .set_tso = ethtool_op_set_tso,
1738 static int in_range(int val, int lo, int hi)
1740 return val < 0 || (val <= hi && val >= lo);
1743 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1745 struct port_info *pi = netdev_priv(dev);
1746 struct adapter *adapter = pi->adapter;
1750 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1754 case CHELSIO_SET_QSET_PARAMS:{
1756 struct qset_params *q;
1757 struct ch_qset_params t;
1759 if (!capable(CAP_NET_ADMIN))
1761 if (copy_from_user(&t, useraddr, sizeof(t)))
1763 if (t.qset_idx >= SGE_QSETS)
1765 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1766 !in_range(t.cong_thres, 0, 255) ||
1767 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1769 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1771 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1772 MAX_CTRL_TXQ_ENTRIES) ||
1773 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1775 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1776 MAX_RX_JUMBO_BUFFERS)
1777 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1780 if ((adapter->flags & FULL_INIT_DONE) &&
1781 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1782 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1783 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1784 t.polling >= 0 || t.cong_thres >= 0))
1787 q = &adapter->params.sge.qset[t.qset_idx];
1789 if (t.rspq_size >= 0)
1790 q->rspq_size = t.rspq_size;
1791 if (t.fl_size[0] >= 0)
1792 q->fl_size = t.fl_size[0];
1793 if (t.fl_size[1] >= 0)
1794 q->jumbo_size = t.fl_size[1];
1795 if (t.txq_size[0] >= 0)
1796 q->txq_size[0] = t.txq_size[0];
1797 if (t.txq_size[1] >= 0)
1798 q->txq_size[1] = t.txq_size[1];
1799 if (t.txq_size[2] >= 0)
1800 q->txq_size[2] = t.txq_size[2];
1801 if (t.cong_thres >= 0)
1802 q->cong_thres = t.cong_thres;
1803 if (t.intr_lat >= 0) {
1804 struct sge_qset *qs =
1805 &adapter->sge.qs[t.qset_idx];
1807 q->coalesce_usecs = t.intr_lat;
1808 t3_update_qset_coalesce(qs, q);
1810 if (t.polling >= 0) {
1811 if (adapter->flags & USING_MSIX)
1812 q->polling = t.polling;
1814 /* No polling with INTx for T3A */
1815 if (adapter->params.rev == 0 &&
1816 !(adapter->flags & USING_MSI))
1819 for (i = 0; i < SGE_QSETS; i++) {
1820 q = &adapter->params.sge.
1822 q->polling = t.polling;
1828 case CHELSIO_GET_QSET_PARAMS:{
1829 struct qset_params *q;
1830 struct ch_qset_params t;
1832 if (copy_from_user(&t, useraddr, sizeof(t)))
1834 if (t.qset_idx >= SGE_QSETS)
1837 q = &adapter->params.sge.qset[t.qset_idx];
1838 t.rspq_size = q->rspq_size;
1839 t.txq_size[0] = q->txq_size[0];
1840 t.txq_size[1] = q->txq_size[1];
1841 t.txq_size[2] = q->txq_size[2];
1842 t.fl_size[0] = q->fl_size;
1843 t.fl_size[1] = q->jumbo_size;
1844 t.polling = q->polling;
1845 t.intr_lat = q->coalesce_usecs;
1846 t.cong_thres = q->cong_thres;
1848 if (copy_to_user(useraddr, &t, sizeof(t)))
1852 case CHELSIO_SET_QSET_NUM:{
1853 struct ch_reg edata;
1854 unsigned int i, first_qset = 0, other_qsets = 0;
1856 if (!capable(CAP_NET_ADMIN))
1858 if (adapter->flags & FULL_INIT_DONE)
1860 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1862 if (edata.val < 1 ||
1863 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1866 for_each_port(adapter, i)
1867 if (adapter->port[i] && adapter->port[i] != dev)
1868 other_qsets += adap2pinfo(adapter, i)->nqsets;
1870 if (edata.val + other_qsets > SGE_QSETS)
1873 pi->nqsets = edata.val;
1875 for_each_port(adapter, i)
1876 if (adapter->port[i]) {
1877 pi = adap2pinfo(adapter, i);
1878 pi->first_qset = first_qset;
1879 first_qset += pi->nqsets;
1883 case CHELSIO_GET_QSET_NUM:{
1884 struct ch_reg edata;
1886 edata.cmd = CHELSIO_GET_QSET_NUM;
1887 edata.val = pi->nqsets;
1888 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1892 case CHELSIO_LOAD_FW:{
1894 struct ch_mem_range t;
1896 if (!capable(CAP_NET_ADMIN))
1898 if (copy_from_user(&t, useraddr, sizeof(t)))
1901 fw_data = kmalloc(t.len, GFP_KERNEL);
1906 (fw_data, useraddr + sizeof(t), t.len)) {
1911 ret = t3_load_fw(adapter, fw_data, t.len);
1917 case CHELSIO_SETMTUTAB:{
1921 if (!is_offload(adapter))
1923 if (!capable(CAP_NET_ADMIN))
1925 if (offload_running(adapter))
1927 if (copy_from_user(&m, useraddr, sizeof(m)))
1929 if (m.nmtus != NMTUS)
1931 if (m.mtus[0] < 81) /* accommodate SACK */
1934 /* MTUs must be in ascending order */
1935 for (i = 1; i < NMTUS; ++i)
1936 if (m.mtus[i] < m.mtus[i - 1])
1939 memcpy(adapter->params.mtus, m.mtus,
1940 sizeof(adapter->params.mtus));
1943 case CHELSIO_GET_PM:{
1944 struct tp_params *p = &adapter->params.tp;
1945 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1947 if (!is_offload(adapter))
1949 m.tx_pg_sz = p->tx_pg_size;
1950 m.tx_num_pg = p->tx_num_pgs;
1951 m.rx_pg_sz = p->rx_pg_size;
1952 m.rx_num_pg = p->rx_num_pgs;
1953 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1954 if (copy_to_user(useraddr, &m, sizeof(m)))
1958 case CHELSIO_SET_PM:{
1960 struct tp_params *p = &adapter->params.tp;
1962 if (!is_offload(adapter))
1964 if (!capable(CAP_NET_ADMIN))
1966 if (adapter->flags & FULL_INIT_DONE)
1968 if (copy_from_user(&m, useraddr, sizeof(m)))
1970 if (!is_power_of_2(m.rx_pg_sz) ||
1971 !is_power_of_2(m.tx_pg_sz))
1972 return -EINVAL; /* not power of 2 */
1973 if (!(m.rx_pg_sz & 0x14000))
1974 return -EINVAL; /* not 16KB or 64KB */
1975 if (!(m.tx_pg_sz & 0x1554000))
1977 if (m.tx_num_pg == -1)
1978 m.tx_num_pg = p->tx_num_pgs;
1979 if (m.rx_num_pg == -1)
1980 m.rx_num_pg = p->rx_num_pgs;
1981 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1983 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1984 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1986 p->rx_pg_size = m.rx_pg_sz;
1987 p->tx_pg_size = m.tx_pg_sz;
1988 p->rx_num_pgs = m.rx_num_pg;
1989 p->tx_num_pgs = m.tx_num_pg;
1992 case CHELSIO_GET_MEM:{
1993 struct ch_mem_range t;
1997 if (!is_offload(adapter))
1999 if (!(adapter->flags & FULL_INIT_DONE))
2000 return -EIO; /* need the memory controllers */
2001 if (copy_from_user(&t, useraddr, sizeof(t)))
2003 if ((t.addr & 7) || (t.len & 7))
2005 if (t.mem_id == MEM_CM)
2007 else if (t.mem_id == MEM_PMRX)
2008 mem = &adapter->pmrx;
2009 else if (t.mem_id == MEM_PMTX)
2010 mem = &adapter->pmtx;
2016 * bits 0..9: chip version
2017 * bits 10..15: chip revision
2019 t.version = 3 | (adapter->params.rev << 10);
2020 if (copy_to_user(useraddr, &t, sizeof(t)))
2024 * Read 256 bytes at a time as len can be large and we don't
2025 * want to use huge intermediate buffers.
2027 useraddr += sizeof(t); /* advance to start of buffer */
2029 unsigned int chunk =
2030 min_t(unsigned int, t.len, sizeof(buf));
2033 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2037 if (copy_to_user(useraddr, buf, chunk))
2045 case CHELSIO_SET_TRACE_FILTER:{
2047 const struct trace_params *tp;
2049 if (!capable(CAP_NET_ADMIN))
2051 if (!offload_running(adapter))
2053 if (copy_from_user(&t, useraddr, sizeof(t)))
2056 tp = (const struct trace_params *)&t.sip;
2058 t3_config_trace_filter(adapter, tp, 0,
2062 t3_config_trace_filter(adapter, tp, 1,
2073 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2075 struct mii_ioctl_data *data = if_mii(req);
2076 struct port_info *pi = netdev_priv(dev);
2077 struct adapter *adapter = pi->adapter;
2082 data->phy_id = pi->phy.addr;
2086 struct cphy *phy = &pi->phy;
2088 if (!phy->mdio_read)
2090 if (is_10G(adapter)) {
2091 mmd = data->phy_id >> 8;
2094 else if (mmd > MDIO_DEV_XGXS)
2098 phy->mdio_read(adapter, data->phy_id & 0x1f,
2099 mmd, data->reg_num, &val);
2102 phy->mdio_read(adapter, data->phy_id & 0x1f,
2103 0, data->reg_num & 0x1f,
2106 data->val_out = val;
2110 struct cphy *phy = &pi->phy;
2112 if (!capable(CAP_NET_ADMIN))
2114 if (!phy->mdio_write)
2116 if (is_10G(adapter)) {
2117 mmd = data->phy_id >> 8;
2120 else if (mmd > MDIO_DEV_XGXS)
2124 phy->mdio_write(adapter,
2125 data->phy_id & 0x1f, mmd,
2130 phy->mdio_write(adapter,
2131 data->phy_id & 0x1f, 0,
2132 data->reg_num & 0x1f,
2137 return cxgb_extension_ioctl(dev, req->ifr_data);
2144 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2146 struct port_info *pi = netdev_priv(dev);
2147 struct adapter *adapter = pi->adapter;
2150 if (new_mtu < 81) /* accommodate SACK */
2152 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2155 init_port_mtus(adapter);
2156 if (adapter->params.rev == 0 && offload_running(adapter))
2157 t3_load_mtus(adapter, adapter->params.mtus,
2158 adapter->params.a_wnd, adapter->params.b_wnd,
2159 adapter->port[0]->mtu);
2163 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2165 struct port_info *pi = netdev_priv(dev);
2166 struct adapter *adapter = pi->adapter;
2167 struct sockaddr *addr = p;
2169 if (!is_valid_ether_addr(addr->sa_data))
2172 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2173 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2174 if (offload_running(adapter))
2175 write_smt_entry(adapter, pi->port_id);
2180 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2181 * @adap: the adapter
2184 * Ensures that current Rx processing on any of the queues associated with
2185 * the given port completes before returning. We do this by acquiring and
2186 * releasing the locks of the response queues associated with the port.
2188 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2192 for (i = 0; i < p->nqsets; i++) {
2193 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2195 spin_lock_irq(&q->lock);
2196 spin_unlock_irq(&q->lock);
2200 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2202 struct port_info *pi = netdev_priv(dev);
2203 struct adapter *adapter = pi->adapter;
2206 if (adapter->params.rev > 0)
2207 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2209 /* single control for all ports */
2210 unsigned int i, have_vlans = 0;
2211 for_each_port(adapter, i)
2212 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2214 t3_set_vlan_accel(adapter, 1, have_vlans);
2216 t3_synchronize_rx(adapter, pi);
2219 #ifdef CONFIG_NET_POLL_CONTROLLER
2220 static void cxgb_netpoll(struct net_device *dev)
2222 struct port_info *pi = netdev_priv(dev);
2223 struct adapter *adapter = pi->adapter;
2226 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2227 struct sge_qset *qs = &adapter->sge.qs[qidx];
2230 if (adapter->flags & USING_MSIX)
2235 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2241 * Periodic accumulation of MAC statistics.
2243 static void mac_stats_update(struct adapter *adapter)
2247 for_each_port(adapter, i) {
2248 struct net_device *dev = adapter->port[i];
2249 struct port_info *p = netdev_priv(dev);
2251 if (netif_running(dev)) {
2252 spin_lock(&adapter->stats_lock);
2253 t3_mac_update_stats(&p->mac);
2254 spin_unlock(&adapter->stats_lock);
2259 static void check_link_status(struct adapter *adapter)
2263 for_each_port(adapter, i) {
2264 struct net_device *dev = adapter->port[i];
2265 struct port_info *p = netdev_priv(dev);
2267 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2268 t3_link_changed(adapter, i);
2272 static void check_t3b2_mac(struct adapter *adapter)
2276 if (!rtnl_trylock()) /* synchronize with ifdown */
2279 for_each_port(adapter, i) {
2280 struct net_device *dev = adapter->port[i];
2281 struct port_info *p = netdev_priv(dev);
2284 if (!netif_running(dev))
2288 if (netif_running(dev) && netif_carrier_ok(dev))
2289 status = t3b2_mac_watchdog_task(&p->mac);
2291 p->mac.stats.num_toggled++;
2292 else if (status == 2) {
2293 struct cmac *mac = &p->mac;
2295 t3_mac_set_mtu(mac, dev->mtu);
2296 t3_mac_set_address(mac, 0, dev->dev_addr);
2297 cxgb_set_rxmode(dev);
2298 t3_link_start(&p->phy, mac, &p->link_config);
2299 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2300 t3_port_intr_enable(adapter, p->port_id);
2301 p->mac.stats.num_resets++;
2308 static void t3_adap_check_task(struct work_struct *work)
2310 struct adapter *adapter = container_of(work, struct adapter,
2311 adap_check_task.work);
2312 const struct adapter_params *p = &adapter->params;
2314 adapter->check_task_cnt++;
2316 /* Check link status for PHYs without interrupts */
2317 if (p->linkpoll_period)
2318 check_link_status(adapter);
2320 /* Accumulate MAC stats if needed */
2321 if (!p->linkpoll_period ||
2322 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2323 p->stats_update_period) {
2324 mac_stats_update(adapter);
2325 adapter->check_task_cnt = 0;
2328 if (p->rev == T3_REV_B2)
2329 check_t3b2_mac(adapter);
2331 /* Schedule the next check update if any port is active. */
2332 spin_lock(&adapter->work_lock);
2333 if (adapter->open_device_map & PORT_MASK)
2334 schedule_chk_task(adapter);
2335 spin_unlock(&adapter->work_lock);
2339 * Processes external (PHY) interrupts in process context.
2341 static void ext_intr_task(struct work_struct *work)
2343 struct adapter *adapter = container_of(work, struct adapter,
2344 ext_intr_handler_task);
2346 t3_phy_intr_handler(adapter);
2348 /* Now reenable external interrupts */
2349 spin_lock_irq(&adapter->work_lock);
2350 if (adapter->slow_intr_mask) {
2351 adapter->slow_intr_mask |= F_T3DBG;
2352 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2353 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2354 adapter->slow_intr_mask);
2356 spin_unlock_irq(&adapter->work_lock);
2360 * Interrupt-context handler for external (PHY) interrupts.
2362 void t3_os_ext_intr_handler(struct adapter *adapter)
2365 * Schedule a task to handle external interrupts as they may be slow
2366 * and we use a mutex to protect MDIO registers. We disable PHY
2367 * interrupts in the meantime and let the task reenable them when
2370 spin_lock(&adapter->work_lock);
2371 if (adapter->slow_intr_mask) {
2372 adapter->slow_intr_mask &= ~F_T3DBG;
2373 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2374 adapter->slow_intr_mask);
2375 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2377 spin_unlock(&adapter->work_lock);
2380 void t3_fatal_err(struct adapter *adapter)
2382 unsigned int fw_status[4];
2384 if (adapter->flags & FULL_INIT_DONE) {
2385 t3_sge_stop(adapter);
2386 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2387 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2388 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2389 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2390 t3_intr_disable(adapter);
2392 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2393 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2394 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2395 fw_status[0], fw_status[1],
2396 fw_status[2], fw_status[3]);
2401 * t3_io_error_detected - called when PCI error is detected
2402 * @pdev: Pointer to PCI device
2403 * @state: The current pci connection state
2405 * This function is called after a PCI bus error affecting
2406 * this device has been detected.
2408 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2409 pci_channel_state_t state)
2411 struct adapter *adapter = pci_get_drvdata(pdev);
2414 /* Stop all ports */
2415 for_each_port(adapter, i) {
2416 struct net_device *netdev = adapter->port[i];
2418 if (netif_running(netdev))
2422 if (is_offload(adapter) &&
2423 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2424 offload_close(&adapter->tdev);
2426 /* Free sge resources */
2427 t3_free_sge_resources(adapter);
2429 adapter->flags &= ~FULL_INIT_DONE;
2431 pci_disable_device(pdev);
2433 /* Request a slot slot reset. */
2434 return PCI_ERS_RESULT_NEED_RESET;
2438 * t3_io_slot_reset - called after the pci bus has been reset.
2439 * @pdev: Pointer to PCI device
2441 * Restart the card from scratch, as if from a cold-boot.
2443 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2445 struct adapter *adapter = pci_get_drvdata(pdev);
2447 if (pci_enable_device(pdev)) {
2449 "Cannot re-enable PCI device after reset.\n");
2450 return PCI_ERS_RESULT_DISCONNECT;
2452 pci_set_master(pdev);
2454 t3_prep_adapter(adapter, adapter->params.info, 1);
2456 return PCI_ERS_RESULT_RECOVERED;
2460 * t3_io_resume - called when traffic can start flowing again.
2461 * @pdev: Pointer to PCI device
2463 * This callback is called when the error recovery driver tells us that
2464 * its OK to resume normal operation.
2466 static void t3_io_resume(struct pci_dev *pdev)
2468 struct adapter *adapter = pci_get_drvdata(pdev);
2471 /* Restart the ports */
2472 for_each_port(adapter, i) {
2473 struct net_device *netdev = adapter->port[i];
2475 if (netif_running(netdev)) {
2476 if (cxgb_open(netdev)) {
2478 "can't bring device back up"
2482 netif_device_attach(netdev);
2486 if (is_offload(adapter)) {
2487 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2488 if (offload_open(adapter->port[0]))
2490 "Could not bring back offload capabilities\n");
2494 static struct pci_error_handlers t3_err_handler = {
2495 .error_detected = t3_io_error_detected,
2496 .slot_reset = t3_io_slot_reset,
2497 .resume = t3_io_resume,
2500 static int __devinit cxgb_enable_msix(struct adapter *adap)
2502 struct msix_entry entries[SGE_QSETS + 1];
2505 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2506 entries[i].entry = i;
2508 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2510 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2511 adap->msix_info[i].vec = entries[i].vector;
2513 dev_info(&adap->pdev->dev,
2514 "only %d MSI-X vectors left, not using MSI-X\n", err);
2518 static void __devinit print_port_info(struct adapter *adap,
2519 const struct adapter_info *ai)
2521 static const char *pci_variant[] = {
2522 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2529 snprintf(buf, sizeof(buf), "%s x%d",
2530 pci_variant[adap->params.pci.variant],
2531 adap->params.pci.width);
2533 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2534 pci_variant[adap->params.pci.variant],
2535 adap->params.pci.speed, adap->params.pci.width);
2537 for_each_port(adap, i) {
2538 struct net_device *dev = adap->port[i];
2539 const struct port_info *pi = netdev_priv(dev);
2541 if (!test_bit(i, &adap->registered_device_map))
2543 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2544 dev->name, ai->desc, pi->port_type->desc,
2545 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2546 (adap->flags & USING_MSIX) ? " MSI-X" :
2547 (adap->flags & USING_MSI) ? " MSI" : "");
2548 if (adap->name == dev->name && adap->params.vpd.mclk)
2550 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2551 adap->name, t3_mc7_size(&adap->cm) >> 20,
2552 t3_mc7_size(&adap->pmtx) >> 20,
2553 t3_mc7_size(&adap->pmrx) >> 20,
2554 adap->params.vpd.sn);
2558 static int __devinit init_one(struct pci_dev *pdev,
2559 const struct pci_device_id *ent)
2561 static int version_printed;
2563 int i, err, pci_using_dac = 0;
2564 unsigned long mmio_start, mmio_len;
2565 const struct adapter_info *ai;
2566 struct adapter *adapter = NULL;
2567 struct port_info *pi;
2569 if (!version_printed) {
2570 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2575 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2577 printk(KERN_ERR DRV_NAME
2578 ": cannot initialize work queue\n");
2583 err = pci_request_regions(pdev, DRV_NAME);
2585 /* Just info, some other driver may have claimed the device. */
2586 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2590 err = pci_enable_device(pdev);
2592 dev_err(&pdev->dev, "cannot enable PCI device\n");
2593 goto out_release_regions;
2596 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2598 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2600 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2601 "coherent allocations\n");
2602 goto out_disable_device;
2604 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2605 dev_err(&pdev->dev, "no usable DMA configuration\n");
2606 goto out_disable_device;
2609 pci_set_master(pdev);
2611 mmio_start = pci_resource_start(pdev, 0);
2612 mmio_len = pci_resource_len(pdev, 0);
2613 ai = t3_get_adapter_info(ent->driver_data);
2615 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2618 goto out_disable_device;
2621 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2622 if (!adapter->regs) {
2623 dev_err(&pdev->dev, "cannot map device registers\n");
2625 goto out_free_adapter;
2628 adapter->pdev = pdev;
2629 adapter->name = pci_name(pdev);
2630 adapter->msg_enable = dflt_msg_enable;
2631 adapter->mmio_len = mmio_len;
2633 mutex_init(&adapter->mdio_lock);
2634 spin_lock_init(&adapter->work_lock);
2635 spin_lock_init(&adapter->stats_lock);
2637 INIT_LIST_HEAD(&adapter->adapter_list);
2638 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2639 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2641 for (i = 0; i < ai->nports; ++i) {
2642 struct net_device *netdev;
2644 netdev = alloc_etherdev(sizeof(struct port_info));
2650 SET_NETDEV_DEV(netdev, &pdev->dev);
2652 adapter->port[i] = netdev;
2653 pi = netdev_priv(netdev);
2654 pi->adapter = adapter;
2655 pi->rx_csum_offload = 1;
2660 netif_carrier_off(netdev);
2661 netdev->irq = pdev->irq;
2662 netdev->mem_start = mmio_start;
2663 netdev->mem_end = mmio_start + mmio_len - 1;
2664 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2665 netdev->features |= NETIF_F_LLTX;
2667 netdev->features |= NETIF_F_HIGHDMA;
2669 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2670 netdev->vlan_rx_register = vlan_rx_register;
2672 netdev->open = cxgb_open;
2673 netdev->stop = cxgb_close;
2674 netdev->hard_start_xmit = t3_eth_xmit;
2675 netdev->get_stats = cxgb_get_stats;
2676 netdev->set_multicast_list = cxgb_set_rxmode;
2677 netdev->do_ioctl = cxgb_ioctl;
2678 netdev->change_mtu = cxgb_change_mtu;
2679 netdev->set_mac_address = cxgb_set_mac_addr;
2680 #ifdef CONFIG_NET_POLL_CONTROLLER
2681 netdev->poll_controller = cxgb_netpoll;
2684 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2687 pci_set_drvdata(pdev, adapter);
2688 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2694 * The card is now ready to go. If any errors occur during device
2695 * registration we do not fail the whole card but rather proceed only
2696 * with the ports we manage to register successfully. However we must
2697 * register at least one net device.
2699 for_each_port(adapter, i) {
2700 err = register_netdev(adapter->port[i]);
2702 dev_warn(&pdev->dev,
2703 "cannot register net device %s, skipping\n",
2704 adapter->port[i]->name);
2707 * Change the name we use for messages to the name of
2708 * the first successfully registered interface.
2710 if (!adapter->registered_device_map)
2711 adapter->name = adapter->port[i]->name;
2713 __set_bit(i, &adapter->registered_device_map);
2716 if (!adapter->registered_device_map) {
2717 dev_err(&pdev->dev, "could not register any net devices\n");
2721 /* Driver's ready. Reflect it on LEDs */
2722 t3_led_ready(adapter);
2724 if (is_offload(adapter)) {
2725 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2726 cxgb3_adapter_ofld(adapter);
2729 /* See what interrupts we'll be using */
2730 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2731 adapter->flags |= USING_MSIX;
2732 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2733 adapter->flags |= USING_MSI;
2735 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2738 print_port_info(adapter, ai);
2742 iounmap(adapter->regs);
2743 for (i = ai->nports - 1; i >= 0; --i)
2744 if (adapter->port[i])
2745 free_netdev(adapter->port[i]);
2751 pci_disable_device(pdev);
2752 out_release_regions:
2753 pci_release_regions(pdev);
2754 pci_set_drvdata(pdev, NULL);
2758 static void __devexit remove_one(struct pci_dev *pdev)
2760 struct adapter *adapter = pci_get_drvdata(pdev);
2765 t3_sge_stop(adapter);
2766 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2769 if (is_offload(adapter)) {
2770 cxgb3_adapter_unofld(adapter);
2771 if (test_bit(OFFLOAD_DEVMAP_BIT,
2772 &adapter->open_device_map))
2773 offload_close(&adapter->tdev);
2776 for_each_port(adapter, i)
2777 if (test_bit(i, &adapter->registered_device_map))
2778 unregister_netdev(adapter->port[i]);
2780 t3_free_sge_resources(adapter);
2781 cxgb_disable_msi(adapter);
2783 for_each_port(adapter, i)
2784 if (adapter->port[i])
2785 free_netdev(adapter->port[i]);
2787 iounmap(adapter->regs);
2789 pci_release_regions(pdev);
2790 pci_disable_device(pdev);
2791 pci_set_drvdata(pdev, NULL);
2795 static struct pci_driver driver = {
2797 .id_table = cxgb3_pci_tbl,
2799 .remove = __devexit_p(remove_one),
2800 .err_handler = &t3_err_handler,
2803 static int __init cxgb3_init_module(void)
2807 cxgb3_offload_init();
2809 ret = pci_register_driver(&driver);
2813 static void __exit cxgb3_cleanup_module(void)
2815 pci_unregister_driver(&driver);
2817 destroy_workqueue(cxgb3_wq);
2820 module_init(cxgb3_init_module);
2821 module_exit(cxgb3_cleanup_module);