2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
212 * t3_os_phymod_changed - handle PHY module changes
213 * @phy: the PHY reporting the module change
214 * @mod_type: new module type
216 * This is the OS-dependent handler for PHY module changes. It is
217 * invoked when a PHY module is removed or inserted for any OS-specific
220 void t3_os_phymod_changed(struct adapter *adap, int port_id)
222 static const char *mod_str[] = {
223 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
226 const struct net_device *dev = adap->port[port_id];
227 const struct port_info *pi = netdev_priv(dev);
229 if (pi->phy.modtype == phy_modtype_none)
230 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
232 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
233 mod_str[pi->phy.modtype]);
236 static void cxgb_set_rxmode(struct net_device *dev)
238 struct t3_rx_mode rm;
239 struct port_info *pi = netdev_priv(dev);
241 init_rx_mode(&rm, dev, dev->mc_list);
242 t3_mac_set_rx_mode(&pi->mac, &rm);
246 * link_start - enable a port
247 * @dev: the device to enable
249 * Performs the MAC and PHY actions needed to enable a port.
251 static void link_start(struct net_device *dev)
253 struct t3_rx_mode rm;
254 struct port_info *pi = netdev_priv(dev);
255 struct cmac *mac = &pi->mac;
257 init_rx_mode(&rm, dev, dev->mc_list);
259 t3_mac_set_mtu(mac, dev->mtu);
260 t3_mac_set_address(mac, 0, dev->dev_addr);
261 t3_mac_set_rx_mode(mac, &rm);
262 t3_link_start(&pi->phy, mac, &pi->link_config);
263 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
266 static inline void cxgb_disable_msi(struct adapter *adapter)
268 if (adapter->flags & USING_MSIX) {
269 pci_disable_msix(adapter->pdev);
270 adapter->flags &= ~USING_MSIX;
271 } else if (adapter->flags & USING_MSI) {
272 pci_disable_msi(adapter->pdev);
273 adapter->flags &= ~USING_MSI;
278 * Interrupt handler for asynchronous events used with MSI-X.
280 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
282 t3_slow_intr_handler(cookie);
287 * Name the MSI-X interrupts.
289 static void name_msix_vecs(struct adapter *adap)
291 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
293 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
294 adap->msix_info[0].desc[n] = 0;
296 for_each_port(adap, j) {
297 struct net_device *d = adap->port[j];
298 const struct port_info *pi = netdev_priv(d);
300 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
301 snprintf(adap->msix_info[msi_idx].desc, n,
302 "%s-%d", d->name, pi->first_qset + i);
303 adap->msix_info[msi_idx].desc[n] = 0;
308 static int request_msix_data_irqs(struct adapter *adap)
310 int i, j, err, qidx = 0;
312 for_each_port(adap, i) {
313 int nqsets = adap2pinfo(adap, i)->nqsets;
315 for (j = 0; j < nqsets; ++j) {
316 err = request_irq(adap->msix_info[qidx + 1].vec,
317 t3_intr_handler(adap,
320 adap->msix_info[qidx + 1].desc,
321 &adap->sge.qs[qidx]);
324 free_irq(adap->msix_info[qidx + 1].vec,
325 &adap->sge.qs[qidx]);
334 static void free_irq_resources(struct adapter *adapter)
336 if (adapter->flags & USING_MSIX) {
339 free_irq(adapter->msix_info[0].vec, adapter);
340 for_each_port(adapter, i)
341 n += adap2pinfo(adapter, i)->nqsets;
343 for (i = 0; i < n; ++i)
344 free_irq(adapter->msix_info[i + 1].vec,
345 &adapter->sge.qs[i]);
347 free_irq(adapter->pdev->irq, adapter);
350 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
355 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
363 static int init_tp_parity(struct adapter *adap)
367 struct cpl_set_tcb_field *greq;
368 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
370 t3_tp_set_offload_mode(adap, 1);
372 for (i = 0; i < 16; i++) {
373 struct cpl_smt_write_req *req;
375 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
376 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
377 memset(req, 0, sizeof(*req));
378 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
379 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
381 t3_mgmt_tx(adap, skb);
384 for (i = 0; i < 2048; i++) {
385 struct cpl_l2t_write_req *req;
387 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
388 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
389 memset(req, 0, sizeof(*req));
390 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
391 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
392 req->params = htonl(V_L2T_W_IDX(i));
393 t3_mgmt_tx(adap, skb);
396 for (i = 0; i < 2048; i++) {
397 struct cpl_rte_write_req *req;
399 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
400 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
401 memset(req, 0, sizeof(*req));
402 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
403 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
404 req->l2t_idx = htonl(V_L2T_W_IDX(i));
405 t3_mgmt_tx(adap, skb);
408 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
409 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
410 memset(greq, 0, sizeof(*greq));
411 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
412 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
413 greq->mask = cpu_to_be64(1);
414 t3_mgmt_tx(adap, skb);
416 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
417 t3_tp_set_offload_mode(adap, 0);
422 * setup_rss - configure RSS
425 * Sets up RSS to distribute packets to multiple receive queues. We
426 * configure the RSS CPU lookup table to distribute to the number of HW
427 * receive queues, and the response queue lookup table to narrow that
428 * down to the response queues actually configured for each port.
429 * We always configure the RSS mapping for two ports since the mapping
430 * table has plenty of entries.
432 static void setup_rss(struct adapter *adap)
435 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
436 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
437 u8 cpus[SGE_QSETS + 1];
438 u16 rspq_map[RSS_TABLE_SIZE];
440 for (i = 0; i < SGE_QSETS; ++i)
442 cpus[SGE_QSETS] = 0xff; /* terminator */
444 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
445 rspq_map[i] = i % nq0;
446 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
449 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
450 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
451 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
454 static void init_napi(struct adapter *adap)
458 for (i = 0; i < SGE_QSETS; i++) {
459 struct sge_qset *qs = &adap->sge.qs[i];
462 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
467 * netif_napi_add() can be called only once per napi_struct because it
468 * adds each new napi_struct to a list. Be careful not to call it a
469 * second time, e.g., during EEH recovery, by making a note of it.
471 adap->flags |= NAPI_INIT;
475 * Wait until all NAPI handlers are descheduled. This includes the handlers of
476 * both netdevices representing interfaces and the dummy ones for the extra
479 static void quiesce_rx(struct adapter *adap)
483 for (i = 0; i < SGE_QSETS; i++)
484 if (adap->sge.qs[i].adap)
485 napi_disable(&adap->sge.qs[i].napi);
488 static void enable_all_napi(struct adapter *adap)
491 for (i = 0; i < SGE_QSETS; i++)
492 if (adap->sge.qs[i].adap)
493 napi_enable(&adap->sge.qs[i].napi);
497 * set_qset_lro - Turn a queue set's LRO capability on and off
498 * @dev: the device the qset is attached to
499 * @qset_idx: the queue set index
500 * @val: the LRO switch
502 * Sets LRO on or off for a particular queue set.
503 * the device's features flag is updated to reflect the LRO
504 * capability when all queues belonging to the device are
507 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
509 struct port_info *pi = netdev_priv(dev);
510 struct adapter *adapter = pi->adapter;
513 adapter->params.sge.qset[qset_idx].lro = !!val;
514 adapter->sge.qs[qset_idx].lro_enabled = !!val;
516 /* let ethtool report LRO on only if all queues are LRO enabled */
517 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518 lro_on &= adapter->params.sge.qset[i].lro;
521 dev->features |= NETIF_F_LRO;
523 dev->features &= ~NETIF_F_LRO;
527 * setup_sge_qsets - configure SGE Tx/Rx/response queues
530 * Determines how many sets of SGE queues to use and initializes them.
531 * We support multiple queue sets per port if we have MSI-X, otherwise
532 * just one queue set per port.
534 static int setup_sge_qsets(struct adapter *adap)
536 int i, j, err, irq_idx = 0, qset_idx = 0;
537 unsigned int ntxq = SGE_TXQ_PER_SET;
539 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
542 for_each_port(adap, i) {
543 struct net_device *dev = adap->port[i];
544 struct port_info *pi = netdev_priv(dev);
546 pi->qs = &adap->sge.qs[pi->first_qset];
547 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
549 set_qset_lro(dev, qset_idx, pi->rx_csum_offload);
550 err = t3_sge_alloc_qset(adap, qset_idx, 1,
551 (adap->flags & USING_MSIX) ? qset_idx + 1 :
553 &adap->params.sge.qset[qset_idx], ntxq, dev,
554 netdev_get_tx_queue(dev, j));
556 t3_stop_sge_timers(adap);
557 t3_free_sge_resources(adap);
566 static ssize_t attr_show(struct device *d, char *buf,
567 ssize_t(*format) (struct net_device *, char *))
571 /* Synchronize with ioctls that may shut down the device */
573 len = (*format) (to_net_dev(d), buf);
578 static ssize_t attr_store(struct device *d,
579 const char *buf, size_t len,
580 ssize_t(*set) (struct net_device *, unsigned int),
581 unsigned int min_val, unsigned int max_val)
587 if (!capable(CAP_NET_ADMIN))
590 val = simple_strtoul(buf, &endp, 0);
591 if (endp == buf || val < min_val || val > max_val)
595 ret = (*set) (to_net_dev(d), val);
602 #define CXGB3_SHOW(name, val_expr) \
603 static ssize_t format_##name(struct net_device *dev, char *buf) \
605 struct port_info *pi = netdev_priv(dev); \
606 struct adapter *adap = pi->adapter; \
607 return sprintf(buf, "%u\n", val_expr); \
609 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
612 return attr_show(d, buf, format_##name); \
615 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
617 struct port_info *pi = netdev_priv(dev);
618 struct adapter *adap = pi->adapter;
619 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
621 if (adap->flags & FULL_INIT_DONE)
623 if (val && adap->params.rev == 0)
625 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
628 adap->params.mc5.nfilters = val;
632 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
633 const char *buf, size_t len)
635 return attr_store(d, buf, len, set_nfilters, 0, ~0);
638 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
640 struct port_info *pi = netdev_priv(dev);
641 struct adapter *adap = pi->adapter;
643 if (adap->flags & FULL_INIT_DONE)
645 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
648 adap->params.mc5.nservers = val;
652 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
653 const char *buf, size_t len)
655 return attr_store(d, buf, len, set_nservers, 0, ~0);
658 #define CXGB3_ATTR_R(name, val_expr) \
659 CXGB3_SHOW(name, val_expr) \
660 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
662 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
663 CXGB3_SHOW(name, val_expr) \
664 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
666 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
667 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
668 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
670 static struct attribute *cxgb3_attrs[] = {
671 &dev_attr_cam_size.attr,
672 &dev_attr_nfilters.attr,
673 &dev_attr_nservers.attr,
677 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
679 static ssize_t tm_attr_show(struct device *d,
680 char *buf, int sched)
682 struct port_info *pi = netdev_priv(to_net_dev(d));
683 struct adapter *adap = pi->adapter;
684 unsigned int v, addr, bpt, cpt;
687 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
689 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
690 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
693 bpt = (v >> 8) & 0xff;
696 len = sprintf(buf, "disabled\n");
698 v = (adap->params.vpd.cclk * 1000) / cpt;
699 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
705 static ssize_t tm_attr_store(struct device *d,
706 const char *buf, size_t len, int sched)
708 struct port_info *pi = netdev_priv(to_net_dev(d));
709 struct adapter *adap = pi->adapter;
714 if (!capable(CAP_NET_ADMIN))
717 val = simple_strtoul(buf, &endp, 0);
718 if (endp == buf || val > 10000000)
722 ret = t3_config_sched(adap, val, sched);
729 #define TM_ATTR(name, sched) \
730 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
733 return tm_attr_show(d, buf, sched); \
735 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
736 const char *buf, size_t len) \
738 return tm_attr_store(d, buf, len, sched); \
740 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
751 static struct attribute *offload_attrs[] = {
752 &dev_attr_sched0.attr,
753 &dev_attr_sched1.attr,
754 &dev_attr_sched2.attr,
755 &dev_attr_sched3.attr,
756 &dev_attr_sched4.attr,
757 &dev_attr_sched5.attr,
758 &dev_attr_sched6.attr,
759 &dev_attr_sched7.attr,
763 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
766 * Sends an sk_buff to an offload queue driver
767 * after dealing with any active network taps.
769 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
774 ret = t3_offload_tx(tdev, skb);
779 static int write_smt_entry(struct adapter *adapter, int idx)
781 struct cpl_smt_write_req *req;
782 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
787 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
788 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
789 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
790 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
792 memset(req->src_mac1, 0, sizeof(req->src_mac1));
793 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
795 offload_tx(&adapter->tdev, skb);
799 static int init_smt(struct adapter *adapter)
803 for_each_port(adapter, i)
804 write_smt_entry(adapter, i);
808 static void init_port_mtus(struct adapter *adapter)
810 unsigned int mtus = adapter->port[0]->mtu;
812 if (adapter->port[1])
813 mtus |= adapter->port[1]->mtu << 16;
814 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
817 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
821 struct mngt_pktsched_wr *req;
824 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
825 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
826 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
827 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
833 ret = t3_mgmt_tx(adap, skb);
838 static int bind_qsets(struct adapter *adap)
842 for_each_port(adap, i) {
843 const struct port_info *pi = adap2pinfo(adap, i);
845 for (j = 0; j < pi->nqsets; ++j) {
846 int ret = send_pktsched_cmd(adap, 1,
847 pi->first_qset + j, -1,
857 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
858 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
860 static int upgrade_fw(struct adapter *adap)
864 const struct firmware *fw;
865 struct device *dev = &adap->pdev->dev;
867 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
868 FW_VERSION_MINOR, FW_VERSION_MICRO);
869 ret = request_firmware(&fw, buf, dev);
871 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
875 ret = t3_load_fw(adap, fw->data, fw->size);
876 release_firmware(fw);
879 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
880 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
882 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
883 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
888 static inline char t3rev2char(struct adapter *adapter)
892 switch(adapter->params.rev) {
904 static int update_tpsram(struct adapter *adap)
906 const struct firmware *tpsram;
908 struct device *dev = &adap->pdev->dev;
912 rev = t3rev2char(adap);
916 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
917 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
919 ret = request_firmware(&tpsram, buf, dev);
921 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
926 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
930 ret = t3_set_proto_sram(adap, tpsram->data);
933 "successful update of protocol engine "
935 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
937 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
938 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
940 dev_err(dev, "loading protocol SRAM failed\n");
943 release_firmware(tpsram);
949 * cxgb_up - enable the adapter
950 * @adapter: adapter being enabled
952 * Called when the first port is enabled, this function performs the
953 * actions necessary to make an adapter operational, such as completing
954 * the initialization of HW modules, and enabling interrupts.
956 * Must be called with the rtnl lock held.
958 static int cxgb_up(struct adapter *adap)
962 if (!(adap->flags & FULL_INIT_DONE)) {
963 err = t3_check_fw_version(adap);
964 if (err == -EINVAL) {
965 err = upgrade_fw(adap);
966 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
967 FW_VERSION_MAJOR, FW_VERSION_MINOR,
968 FW_VERSION_MICRO, err ? "failed" : "succeeded");
971 err = t3_check_tpsram_version(adap);
972 if (err == -EINVAL) {
973 err = update_tpsram(adap);
974 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
975 TP_VERSION_MAJOR, TP_VERSION_MINOR,
976 TP_VERSION_MICRO, err ? "failed" : "succeeded");
980 * Clear interrupts now to catch errors if t3_init_hw fails.
981 * We clear them again later as initialization may trigger
982 * conditions that can interrupt.
986 err = t3_init_hw(adap, 0);
990 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
991 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
993 err = setup_sge_qsets(adap);
998 if (!(adap->flags & NAPI_INIT))
1000 adap->flags |= FULL_INIT_DONE;
1003 t3_intr_clear(adap);
1005 if (adap->flags & USING_MSIX) {
1006 name_msix_vecs(adap);
1007 err = request_irq(adap->msix_info[0].vec,
1008 t3_async_intr_handler, 0,
1009 adap->msix_info[0].desc, adap);
1013 err = request_msix_data_irqs(adap);
1015 free_irq(adap->msix_info[0].vec, adap);
1018 } else if ((err = request_irq(adap->pdev->irq,
1019 t3_intr_handler(adap,
1020 adap->sge.qs[0].rspq.
1022 (adap->flags & USING_MSI) ?
1027 enable_all_napi(adap);
1029 t3_intr_enable(adap);
1031 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1032 is_offload(adap) && init_tp_parity(adap) == 0)
1033 adap->flags |= TP_PARITY_INIT;
1035 if (adap->flags & TP_PARITY_INIT) {
1036 t3_write_reg(adap, A_TP_INT_CAUSE,
1037 F_CMCACHEPERR | F_ARPLUTPERR);
1038 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1041 if (!(adap->flags & QUEUES_BOUND)) {
1042 err = bind_qsets(adap);
1044 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1045 t3_intr_disable(adap);
1046 free_irq_resources(adap);
1049 adap->flags |= QUEUES_BOUND;
1055 CH_ERR(adap, "request_irq failed, err %d\n", err);
1060 * Release resources when all the ports and offloading have been stopped.
1062 static void cxgb_down(struct adapter *adapter)
1064 t3_sge_stop(adapter);
1065 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1066 t3_intr_disable(adapter);
1067 spin_unlock_irq(&adapter->work_lock);
1069 free_irq_resources(adapter);
1070 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1071 quiesce_rx(adapter);
1074 static void schedule_chk_task(struct adapter *adap)
1078 timeo = adap->params.linkpoll_period ?
1079 (HZ * adap->params.linkpoll_period) / 10 :
1080 adap->params.stats_update_period * HZ;
1082 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1085 static int offload_open(struct net_device *dev)
1087 struct port_info *pi = netdev_priv(dev);
1088 struct adapter *adapter = pi->adapter;
1089 struct t3cdev *tdev = dev2t3cdev(dev);
1090 int adap_up = adapter->open_device_map & PORT_MASK;
1093 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1096 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1099 t3_tp_set_offload_mode(adapter, 1);
1100 tdev->lldev = adapter->port[0];
1101 err = cxgb3_offload_activate(adapter);
1105 init_port_mtus(adapter);
1106 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1107 adapter->params.b_wnd,
1108 adapter->params.rev == 0 ?
1109 adapter->port[0]->mtu : 0xffff);
1112 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1113 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1115 /* Call back all registered clients */
1116 cxgb3_add_clients(tdev);
1119 /* restore them in case the offload module has changed them */
1121 t3_tp_set_offload_mode(adapter, 0);
1122 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1123 cxgb3_set_dummy_ops(tdev);
1128 static int offload_close(struct t3cdev *tdev)
1130 struct adapter *adapter = tdev2adap(tdev);
1132 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1135 /* Call back all registered clients */
1136 cxgb3_remove_clients(tdev);
1138 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1141 cxgb3_set_dummy_ops(tdev);
1142 t3_tp_set_offload_mode(adapter, 0);
1143 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1145 if (!adapter->open_device_map)
1148 cxgb3_offload_deactivate(adapter);
1152 static int cxgb_open(struct net_device *dev)
1154 struct port_info *pi = netdev_priv(dev);
1155 struct adapter *adapter = pi->adapter;
1156 int other_ports = adapter->open_device_map & PORT_MASK;
1159 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1162 set_bit(pi->port_id, &adapter->open_device_map);
1163 if (is_offload(adapter) && !ofld_disable) {
1164 err = offload_open(dev);
1167 "Could not initialize offload capabilities\n");
1170 dev->real_num_tx_queues = pi->nqsets;
1172 t3_port_intr_enable(adapter, pi->port_id);
1173 netif_tx_start_all_queues(dev);
1175 schedule_chk_task(adapter);
1180 static int cxgb_close(struct net_device *dev)
1182 struct port_info *pi = netdev_priv(dev);
1183 struct adapter *adapter = pi->adapter;
1185 t3_port_intr_disable(adapter, pi->port_id);
1186 netif_tx_stop_all_queues(dev);
1187 pi->phy.ops->power_down(&pi->phy, 1);
1188 netif_carrier_off(dev);
1189 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1191 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1192 clear_bit(pi->port_id, &adapter->open_device_map);
1193 spin_unlock_irq(&adapter->work_lock);
1195 if (!(adapter->open_device_map & PORT_MASK))
1196 cancel_rearming_delayed_workqueue(cxgb3_wq,
1197 &adapter->adap_check_task);
1199 if (!adapter->open_device_map)
1205 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1207 struct port_info *pi = netdev_priv(dev);
1208 struct adapter *adapter = pi->adapter;
1209 struct net_device_stats *ns = &pi->netstats;
1210 const struct mac_stats *pstats;
1212 spin_lock(&adapter->stats_lock);
1213 pstats = t3_mac_update_stats(&pi->mac);
1214 spin_unlock(&adapter->stats_lock);
1216 ns->tx_bytes = pstats->tx_octets;
1217 ns->tx_packets = pstats->tx_frames;
1218 ns->rx_bytes = pstats->rx_octets;
1219 ns->rx_packets = pstats->rx_frames;
1220 ns->multicast = pstats->rx_mcast_frames;
1222 ns->tx_errors = pstats->tx_underrun;
1223 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1224 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1225 pstats->rx_fifo_ovfl;
1227 /* detailed rx_errors */
1228 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1229 ns->rx_over_errors = 0;
1230 ns->rx_crc_errors = pstats->rx_fcs_errs;
1231 ns->rx_frame_errors = pstats->rx_symbol_errs;
1232 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1233 ns->rx_missed_errors = pstats->rx_cong_drops;
1235 /* detailed tx_errors */
1236 ns->tx_aborted_errors = 0;
1237 ns->tx_carrier_errors = 0;
1238 ns->tx_fifo_errors = pstats->tx_underrun;
1239 ns->tx_heartbeat_errors = 0;
1240 ns->tx_window_errors = 0;
1244 static u32 get_msglevel(struct net_device *dev)
1246 struct port_info *pi = netdev_priv(dev);
1247 struct adapter *adapter = pi->adapter;
1249 return adapter->msg_enable;
1252 static void set_msglevel(struct net_device *dev, u32 val)
1254 struct port_info *pi = netdev_priv(dev);
1255 struct adapter *adapter = pi->adapter;
1257 adapter->msg_enable = val;
1260 static char stats_strings[][ETH_GSTRING_LEN] = {
1263 "TxMulticastFramesOK",
1264 "TxBroadcastFramesOK",
1271 "TxFrames128To255 ",
1272 "TxFrames256To511 ",
1273 "TxFrames512To1023 ",
1274 "TxFrames1024To1518 ",
1275 "TxFrames1519ToMax ",
1279 "RxMulticastFramesOK",
1280 "RxBroadcastFramesOK",
1291 "RxFrames128To255 ",
1292 "RxFrames256To511 ",
1293 "RxFrames512To1023 ",
1294 "RxFrames1024To1518 ",
1295 "RxFrames1519ToMax ",
1308 "CheckTXEnToggled ",
1313 static int get_sset_count(struct net_device *dev, int sset)
1317 return ARRAY_SIZE(stats_strings);
1323 #define T3_REGMAP_SIZE (3 * 1024)
1325 static int get_regs_len(struct net_device *dev)
1327 return T3_REGMAP_SIZE;
1330 static int get_eeprom_len(struct net_device *dev)
1335 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1337 struct port_info *pi = netdev_priv(dev);
1338 struct adapter *adapter = pi->adapter;
1342 spin_lock(&adapter->stats_lock);
1343 t3_get_fw_version(adapter, &fw_vers);
1344 t3_get_tp_version(adapter, &tp_vers);
1345 spin_unlock(&adapter->stats_lock);
1347 strcpy(info->driver, DRV_NAME);
1348 strcpy(info->version, DRV_VERSION);
1349 strcpy(info->bus_info, pci_name(adapter->pdev));
1351 strcpy(info->fw_version, "N/A");
1353 snprintf(info->fw_version, sizeof(info->fw_version),
1354 "%s %u.%u.%u TP %u.%u.%u",
1355 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1356 G_FW_VERSION_MAJOR(fw_vers),
1357 G_FW_VERSION_MINOR(fw_vers),
1358 G_FW_VERSION_MICRO(fw_vers),
1359 G_TP_VERSION_MAJOR(tp_vers),
1360 G_TP_VERSION_MINOR(tp_vers),
1361 G_TP_VERSION_MICRO(tp_vers));
1365 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1367 if (stringset == ETH_SS_STATS)
1368 memcpy(data, stats_strings, sizeof(stats_strings));
1371 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1372 struct port_info *p, int idx)
1375 unsigned long tot = 0;
1377 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1378 tot += adapter->sge.qs[i].port_stats[idx];
1382 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1385 struct port_info *pi = netdev_priv(dev);
1386 struct adapter *adapter = pi->adapter;
1387 const struct mac_stats *s;
1389 spin_lock(&adapter->stats_lock);
1390 s = t3_mac_update_stats(&pi->mac);
1391 spin_unlock(&adapter->stats_lock);
1393 *data++ = s->tx_octets;
1394 *data++ = s->tx_frames;
1395 *data++ = s->tx_mcast_frames;
1396 *data++ = s->tx_bcast_frames;
1397 *data++ = s->tx_pause;
1398 *data++ = s->tx_underrun;
1399 *data++ = s->tx_fifo_urun;
1401 *data++ = s->tx_frames_64;
1402 *data++ = s->tx_frames_65_127;
1403 *data++ = s->tx_frames_128_255;
1404 *data++ = s->tx_frames_256_511;
1405 *data++ = s->tx_frames_512_1023;
1406 *data++ = s->tx_frames_1024_1518;
1407 *data++ = s->tx_frames_1519_max;
1409 *data++ = s->rx_octets;
1410 *data++ = s->rx_frames;
1411 *data++ = s->rx_mcast_frames;
1412 *data++ = s->rx_bcast_frames;
1413 *data++ = s->rx_pause;
1414 *data++ = s->rx_fcs_errs;
1415 *data++ = s->rx_symbol_errs;
1416 *data++ = s->rx_short;
1417 *data++ = s->rx_jabber;
1418 *data++ = s->rx_too_long;
1419 *data++ = s->rx_fifo_ovfl;
1421 *data++ = s->rx_frames_64;
1422 *data++ = s->rx_frames_65_127;
1423 *data++ = s->rx_frames_128_255;
1424 *data++ = s->rx_frames_256_511;
1425 *data++ = s->rx_frames_512_1023;
1426 *data++ = s->rx_frames_1024_1518;
1427 *data++ = s->rx_frames_1519_max;
1429 *data++ = pi->phy.fifo_errors;
1431 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1432 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1433 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1434 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1435 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1436 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1437 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1438 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1439 *data++ = s->rx_cong_drops;
1441 *data++ = s->num_toggled;
1442 *data++ = s->num_resets;
1445 static inline void reg_block_dump(struct adapter *ap, void *buf,
1446 unsigned int start, unsigned int end)
1448 u32 *p = buf + start;
1450 for (; start <= end; start += sizeof(u32))
1451 *p++ = t3_read_reg(ap, start);
1454 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1457 struct port_info *pi = netdev_priv(dev);
1458 struct adapter *ap = pi->adapter;
1462 * bits 0..9: chip version
1463 * bits 10..15: chip revision
1464 * bit 31: set for PCIe cards
1466 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1469 * We skip the MAC statistics registers because they are clear-on-read.
1470 * Also reading multi-register stats would need to synchronize with the
1471 * periodic mac stats accumulation. Hard to justify the complexity.
1473 memset(buf, 0, T3_REGMAP_SIZE);
1474 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1475 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1476 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1477 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1478 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1479 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1480 XGM_REG(A_XGM_SERDES_STAT3, 1));
1481 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1482 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1485 static int restart_autoneg(struct net_device *dev)
1487 struct port_info *p = netdev_priv(dev);
1489 if (!netif_running(dev))
1491 if (p->link_config.autoneg != AUTONEG_ENABLE)
1493 p->phy.ops->autoneg_restart(&p->phy);
1497 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1499 struct port_info *pi = netdev_priv(dev);
1500 struct adapter *adapter = pi->adapter;
1506 for (i = 0; i < data * 2; i++) {
1507 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1508 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1509 if (msleep_interruptible(500))
1512 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1517 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1519 struct port_info *p = netdev_priv(dev);
1521 cmd->supported = p->link_config.supported;
1522 cmd->advertising = p->link_config.advertising;
1524 if (netif_carrier_ok(dev)) {
1525 cmd->speed = p->link_config.speed;
1526 cmd->duplex = p->link_config.duplex;
1532 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1533 cmd->phy_address = p->phy.addr;
1534 cmd->transceiver = XCVR_EXTERNAL;
1535 cmd->autoneg = p->link_config.autoneg;
1541 static int speed_duplex_to_caps(int speed, int duplex)
1547 if (duplex == DUPLEX_FULL)
1548 cap = SUPPORTED_10baseT_Full;
1550 cap = SUPPORTED_10baseT_Half;
1553 if (duplex == DUPLEX_FULL)
1554 cap = SUPPORTED_100baseT_Full;
1556 cap = SUPPORTED_100baseT_Half;
1559 if (duplex == DUPLEX_FULL)
1560 cap = SUPPORTED_1000baseT_Full;
1562 cap = SUPPORTED_1000baseT_Half;
1565 if (duplex == DUPLEX_FULL)
1566 cap = SUPPORTED_10000baseT_Full;
1571 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1572 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1573 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1574 ADVERTISED_10000baseT_Full)
1576 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1579 struct port_info *p = netdev_priv(dev);
1580 struct link_config *lc = &p->link_config;
1582 if (!(lc->supported & SUPPORTED_Autoneg)) {
1584 * PHY offers a single speed/duplex. See if that's what's
1587 if (cmd->autoneg == AUTONEG_DISABLE) {
1588 cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1589 if (lc->supported & cap)
1595 if (cmd->autoneg == AUTONEG_DISABLE) {
1596 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1598 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1600 lc->requested_speed = cmd->speed;
1601 lc->requested_duplex = cmd->duplex;
1602 lc->advertising = 0;
1604 cmd->advertising &= ADVERTISED_MASK;
1605 cmd->advertising &= lc->supported;
1606 if (!cmd->advertising)
1608 lc->requested_speed = SPEED_INVALID;
1609 lc->requested_duplex = DUPLEX_INVALID;
1610 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1612 lc->autoneg = cmd->autoneg;
1613 if (netif_running(dev))
1614 t3_link_start(&p->phy, &p->mac, lc);
1618 static void get_pauseparam(struct net_device *dev,
1619 struct ethtool_pauseparam *epause)
1621 struct port_info *p = netdev_priv(dev);
1623 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1624 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1625 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1628 static int set_pauseparam(struct net_device *dev,
1629 struct ethtool_pauseparam *epause)
1631 struct port_info *p = netdev_priv(dev);
1632 struct link_config *lc = &p->link_config;
1634 if (epause->autoneg == AUTONEG_DISABLE)
1635 lc->requested_fc = 0;
1636 else if (lc->supported & SUPPORTED_Autoneg)
1637 lc->requested_fc = PAUSE_AUTONEG;
1641 if (epause->rx_pause)
1642 lc->requested_fc |= PAUSE_RX;
1643 if (epause->tx_pause)
1644 lc->requested_fc |= PAUSE_TX;
1645 if (lc->autoneg == AUTONEG_ENABLE) {
1646 if (netif_running(dev))
1647 t3_link_start(&p->phy, &p->mac, lc);
1649 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1650 if (netif_running(dev))
1651 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1656 static u32 get_rx_csum(struct net_device *dev)
1658 struct port_info *p = netdev_priv(dev);
1660 return p->rx_csum_offload;
1663 static int set_rx_csum(struct net_device *dev, u32 data)
1665 struct port_info *p = netdev_priv(dev);
1667 p->rx_csum_offload = data;
1671 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1672 set_qset_lro(dev, i, 0);
1677 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1679 struct port_info *pi = netdev_priv(dev);
1680 struct adapter *adapter = pi->adapter;
1681 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1683 e->rx_max_pending = MAX_RX_BUFFERS;
1684 e->rx_mini_max_pending = 0;
1685 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1686 e->tx_max_pending = MAX_TXQ_ENTRIES;
1688 e->rx_pending = q->fl_size;
1689 e->rx_mini_pending = q->rspq_size;
1690 e->rx_jumbo_pending = q->jumbo_size;
1691 e->tx_pending = q->txq_size[0];
1694 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1696 struct port_info *pi = netdev_priv(dev);
1697 struct adapter *adapter = pi->adapter;
1698 struct qset_params *q;
1701 if (e->rx_pending > MAX_RX_BUFFERS ||
1702 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1703 e->tx_pending > MAX_TXQ_ENTRIES ||
1704 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1705 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1706 e->rx_pending < MIN_FL_ENTRIES ||
1707 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1708 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1711 if (adapter->flags & FULL_INIT_DONE)
1714 q = &adapter->params.sge.qset[pi->first_qset];
1715 for (i = 0; i < pi->nqsets; ++i, ++q) {
1716 q->rspq_size = e->rx_mini_pending;
1717 q->fl_size = e->rx_pending;
1718 q->jumbo_size = e->rx_jumbo_pending;
1719 q->txq_size[0] = e->tx_pending;
1720 q->txq_size[1] = e->tx_pending;
1721 q->txq_size[2] = e->tx_pending;
1726 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1728 struct port_info *pi = netdev_priv(dev);
1729 struct adapter *adapter = pi->adapter;
1730 struct qset_params *qsp = &adapter->params.sge.qset[0];
1731 struct sge_qset *qs = &adapter->sge.qs[0];
1733 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1736 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1737 t3_update_qset_coalesce(qs, qsp);
1741 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1743 struct port_info *pi = netdev_priv(dev);
1744 struct adapter *adapter = pi->adapter;
1745 struct qset_params *q = adapter->params.sge.qset;
1747 c->rx_coalesce_usecs = q->coalesce_usecs;
1751 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1754 struct port_info *pi = netdev_priv(dev);
1755 struct adapter *adapter = pi->adapter;
1758 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1762 e->magic = EEPROM_MAGIC;
1763 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1764 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1767 memcpy(data, buf + e->offset, e->len);
1772 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1775 struct port_info *pi = netdev_priv(dev);
1776 struct adapter *adapter = pi->adapter;
1777 u32 aligned_offset, aligned_len;
1782 if (eeprom->magic != EEPROM_MAGIC)
1785 aligned_offset = eeprom->offset & ~3;
1786 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1788 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1789 buf = kmalloc(aligned_len, GFP_KERNEL);
1792 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1793 if (!err && aligned_len > 4)
1794 err = t3_seeprom_read(adapter,
1795 aligned_offset + aligned_len - 4,
1796 (__le32 *) & buf[aligned_len - 4]);
1799 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1803 err = t3_seeprom_wp(adapter, 0);
1807 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1808 err = t3_seeprom_write(adapter, aligned_offset, *p);
1809 aligned_offset += 4;
1813 err = t3_seeprom_wp(adapter, 1);
1820 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1824 memset(&wol->sopass, 0, sizeof(wol->sopass));
1827 static int cxgb3_set_flags(struct net_device *dev, u32 data)
1829 struct port_info *pi = netdev_priv(dev);
1832 if (data & ETH_FLAG_LRO) {
1833 if (!pi->rx_csum_offload)
1836 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1837 set_qset_lro(dev, i, 1);
1840 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1841 set_qset_lro(dev, i, 0);
1846 static const struct ethtool_ops cxgb_ethtool_ops = {
1847 .get_settings = get_settings,
1848 .set_settings = set_settings,
1849 .get_drvinfo = get_drvinfo,
1850 .get_msglevel = get_msglevel,
1851 .set_msglevel = set_msglevel,
1852 .get_ringparam = get_sge_param,
1853 .set_ringparam = set_sge_param,
1854 .get_coalesce = get_coalesce,
1855 .set_coalesce = set_coalesce,
1856 .get_eeprom_len = get_eeprom_len,
1857 .get_eeprom = get_eeprom,
1858 .set_eeprom = set_eeprom,
1859 .get_pauseparam = get_pauseparam,
1860 .set_pauseparam = set_pauseparam,
1861 .get_rx_csum = get_rx_csum,
1862 .set_rx_csum = set_rx_csum,
1863 .set_tx_csum = ethtool_op_set_tx_csum,
1864 .set_sg = ethtool_op_set_sg,
1865 .get_link = ethtool_op_get_link,
1866 .get_strings = get_strings,
1867 .phys_id = cxgb3_phys_id,
1868 .nway_reset = restart_autoneg,
1869 .get_sset_count = get_sset_count,
1870 .get_ethtool_stats = get_stats,
1871 .get_regs_len = get_regs_len,
1872 .get_regs = get_regs,
1874 .set_tso = ethtool_op_set_tso,
1875 .get_flags = ethtool_op_get_flags,
1876 .set_flags = cxgb3_set_flags,
1879 static int in_range(int val, int lo, int hi)
1881 return val < 0 || (val <= hi && val >= lo);
1884 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1886 struct port_info *pi = netdev_priv(dev);
1887 struct adapter *adapter = pi->adapter;
1891 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1895 case CHELSIO_SET_QSET_PARAMS:{
1897 struct qset_params *q;
1898 struct ch_qset_params t;
1899 int q1 = pi->first_qset;
1900 int nqsets = pi->nqsets;
1902 if (!capable(CAP_NET_ADMIN))
1904 if (copy_from_user(&t, useraddr, sizeof(t)))
1906 if (t.qset_idx >= SGE_QSETS)
1908 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1909 !in_range(t.cong_thres, 0, 255) ||
1910 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1912 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1914 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1915 MAX_CTRL_TXQ_ENTRIES) ||
1916 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1918 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1919 MAX_RX_JUMBO_BUFFERS)
1920 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1924 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1925 for_each_port(adapter, i) {
1926 pi = adap2pinfo(adapter, i);
1927 if (t.qset_idx >= pi->first_qset &&
1928 t.qset_idx < pi->first_qset + pi->nqsets &&
1929 !pi->rx_csum_offload)
1933 if ((adapter->flags & FULL_INIT_DONE) &&
1934 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1935 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1936 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1937 t.polling >= 0 || t.cong_thres >= 0))
1940 /* Allow setting of any available qset when offload enabled */
1941 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1943 for_each_port(adapter, i) {
1944 pi = adap2pinfo(adapter, i);
1945 nqsets += pi->first_qset + pi->nqsets;
1949 if (t.qset_idx < q1)
1951 if (t.qset_idx > q1 + nqsets - 1)
1954 q = &adapter->params.sge.qset[t.qset_idx];
1956 if (t.rspq_size >= 0)
1957 q->rspq_size = t.rspq_size;
1958 if (t.fl_size[0] >= 0)
1959 q->fl_size = t.fl_size[0];
1960 if (t.fl_size[1] >= 0)
1961 q->jumbo_size = t.fl_size[1];
1962 if (t.txq_size[0] >= 0)
1963 q->txq_size[0] = t.txq_size[0];
1964 if (t.txq_size[1] >= 0)
1965 q->txq_size[1] = t.txq_size[1];
1966 if (t.txq_size[2] >= 0)
1967 q->txq_size[2] = t.txq_size[2];
1968 if (t.cong_thres >= 0)
1969 q->cong_thres = t.cong_thres;
1970 if (t.intr_lat >= 0) {
1971 struct sge_qset *qs =
1972 &adapter->sge.qs[t.qset_idx];
1974 q->coalesce_usecs = t.intr_lat;
1975 t3_update_qset_coalesce(qs, q);
1977 if (t.polling >= 0) {
1978 if (adapter->flags & USING_MSIX)
1979 q->polling = t.polling;
1981 /* No polling with INTx for T3A */
1982 if (adapter->params.rev == 0 &&
1983 !(adapter->flags & USING_MSI))
1986 for (i = 0; i < SGE_QSETS; i++) {
1987 q = &adapter->params.sge.
1989 q->polling = t.polling;
1994 set_qset_lro(dev, t.qset_idx, t.lro);
1998 case CHELSIO_GET_QSET_PARAMS:{
1999 struct qset_params *q;
2000 struct ch_qset_params t;
2001 int q1 = pi->first_qset;
2002 int nqsets = pi->nqsets;
2005 if (copy_from_user(&t, useraddr, sizeof(t)))
2008 /* Display qsets for all ports when offload enabled */
2009 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2011 for_each_port(adapter, i) {
2012 pi = adap2pinfo(adapter, i);
2013 nqsets = pi->first_qset + pi->nqsets;
2017 if (t.qset_idx >= nqsets)
2020 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2021 t.rspq_size = q->rspq_size;
2022 t.txq_size[0] = q->txq_size[0];
2023 t.txq_size[1] = q->txq_size[1];
2024 t.txq_size[2] = q->txq_size[2];
2025 t.fl_size[0] = q->fl_size;
2026 t.fl_size[1] = q->jumbo_size;
2027 t.polling = q->polling;
2029 t.intr_lat = q->coalesce_usecs;
2030 t.cong_thres = q->cong_thres;
2033 if (adapter->flags & USING_MSIX)
2034 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2036 t.vector = adapter->pdev->irq;
2038 if (copy_to_user(useraddr, &t, sizeof(t)))
2042 case CHELSIO_SET_QSET_NUM:{
2043 struct ch_reg edata;
2044 unsigned int i, first_qset = 0, other_qsets = 0;
2046 if (!capable(CAP_NET_ADMIN))
2048 if (adapter->flags & FULL_INIT_DONE)
2050 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2052 if (edata.val < 1 ||
2053 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2056 for_each_port(adapter, i)
2057 if (adapter->port[i] && adapter->port[i] != dev)
2058 other_qsets += adap2pinfo(adapter, i)->nqsets;
2060 if (edata.val + other_qsets > SGE_QSETS)
2063 pi->nqsets = edata.val;
2065 for_each_port(adapter, i)
2066 if (adapter->port[i]) {
2067 pi = adap2pinfo(adapter, i);
2068 pi->first_qset = first_qset;
2069 first_qset += pi->nqsets;
2073 case CHELSIO_GET_QSET_NUM:{
2074 struct ch_reg edata;
2076 edata.cmd = CHELSIO_GET_QSET_NUM;
2077 edata.val = pi->nqsets;
2078 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2082 case CHELSIO_LOAD_FW:{
2084 struct ch_mem_range t;
2086 if (!capable(CAP_SYS_RAWIO))
2088 if (copy_from_user(&t, useraddr, sizeof(t)))
2090 /* Check t.len sanity ? */
2091 fw_data = kmalloc(t.len, GFP_KERNEL);
2096 (fw_data, useraddr + sizeof(t), t.len)) {
2101 ret = t3_load_fw(adapter, fw_data, t.len);
2107 case CHELSIO_SETMTUTAB:{
2111 if (!is_offload(adapter))
2113 if (!capable(CAP_NET_ADMIN))
2115 if (offload_running(adapter))
2117 if (copy_from_user(&m, useraddr, sizeof(m)))
2119 if (m.nmtus != NMTUS)
2121 if (m.mtus[0] < 81) /* accommodate SACK */
2124 /* MTUs must be in ascending order */
2125 for (i = 1; i < NMTUS; ++i)
2126 if (m.mtus[i] < m.mtus[i - 1])
2129 memcpy(adapter->params.mtus, m.mtus,
2130 sizeof(adapter->params.mtus));
2133 case CHELSIO_GET_PM:{
2134 struct tp_params *p = &adapter->params.tp;
2135 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2137 if (!is_offload(adapter))
2139 m.tx_pg_sz = p->tx_pg_size;
2140 m.tx_num_pg = p->tx_num_pgs;
2141 m.rx_pg_sz = p->rx_pg_size;
2142 m.rx_num_pg = p->rx_num_pgs;
2143 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2144 if (copy_to_user(useraddr, &m, sizeof(m)))
2148 case CHELSIO_SET_PM:{
2150 struct tp_params *p = &adapter->params.tp;
2152 if (!is_offload(adapter))
2154 if (!capable(CAP_NET_ADMIN))
2156 if (adapter->flags & FULL_INIT_DONE)
2158 if (copy_from_user(&m, useraddr, sizeof(m)))
2160 if (!is_power_of_2(m.rx_pg_sz) ||
2161 !is_power_of_2(m.tx_pg_sz))
2162 return -EINVAL; /* not power of 2 */
2163 if (!(m.rx_pg_sz & 0x14000))
2164 return -EINVAL; /* not 16KB or 64KB */
2165 if (!(m.tx_pg_sz & 0x1554000))
2167 if (m.tx_num_pg == -1)
2168 m.tx_num_pg = p->tx_num_pgs;
2169 if (m.rx_num_pg == -1)
2170 m.rx_num_pg = p->rx_num_pgs;
2171 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2173 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2174 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2176 p->rx_pg_size = m.rx_pg_sz;
2177 p->tx_pg_size = m.tx_pg_sz;
2178 p->rx_num_pgs = m.rx_num_pg;
2179 p->tx_num_pgs = m.tx_num_pg;
2182 case CHELSIO_GET_MEM:{
2183 struct ch_mem_range t;
2187 if (!is_offload(adapter))
2189 if (!(adapter->flags & FULL_INIT_DONE))
2190 return -EIO; /* need the memory controllers */
2191 if (copy_from_user(&t, useraddr, sizeof(t)))
2193 if ((t.addr & 7) || (t.len & 7))
2195 if (t.mem_id == MEM_CM)
2197 else if (t.mem_id == MEM_PMRX)
2198 mem = &adapter->pmrx;
2199 else if (t.mem_id == MEM_PMTX)
2200 mem = &adapter->pmtx;
2206 * bits 0..9: chip version
2207 * bits 10..15: chip revision
2209 t.version = 3 | (adapter->params.rev << 10);
2210 if (copy_to_user(useraddr, &t, sizeof(t)))
2214 * Read 256 bytes at a time as len can be large and we don't
2215 * want to use huge intermediate buffers.
2217 useraddr += sizeof(t); /* advance to start of buffer */
2219 unsigned int chunk =
2220 min_t(unsigned int, t.len, sizeof(buf));
2223 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2227 if (copy_to_user(useraddr, buf, chunk))
2235 case CHELSIO_SET_TRACE_FILTER:{
2237 const struct trace_params *tp;
2239 if (!capable(CAP_NET_ADMIN))
2241 if (!offload_running(adapter))
2243 if (copy_from_user(&t, useraddr, sizeof(t)))
2246 tp = (const struct trace_params *)&t.sip;
2248 t3_config_trace_filter(adapter, tp, 0,
2252 t3_config_trace_filter(adapter, tp, 1,
2263 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2265 struct mii_ioctl_data *data = if_mii(req);
2266 struct port_info *pi = netdev_priv(dev);
2267 struct adapter *adapter = pi->adapter;
2272 data->phy_id = pi->phy.addr;
2276 struct cphy *phy = &pi->phy;
2278 if (!phy->mdio_read)
2280 if (is_10G(adapter)) {
2281 mmd = data->phy_id >> 8;
2284 else if (mmd > MDIO_DEV_VEND2)
2288 phy->mdio_read(adapter, data->phy_id & 0x1f,
2289 mmd, data->reg_num, &val);
2292 phy->mdio_read(adapter, data->phy_id & 0x1f,
2293 0, data->reg_num & 0x1f,
2296 data->val_out = val;
2300 struct cphy *phy = &pi->phy;
2302 if (!capable(CAP_NET_ADMIN))
2304 if (!phy->mdio_write)
2306 if (is_10G(adapter)) {
2307 mmd = data->phy_id >> 8;
2310 else if (mmd > MDIO_DEV_VEND2)
2314 phy->mdio_write(adapter,
2315 data->phy_id & 0x1f, mmd,
2320 phy->mdio_write(adapter,
2321 data->phy_id & 0x1f, 0,
2322 data->reg_num & 0x1f,
2327 return cxgb_extension_ioctl(dev, req->ifr_data);
2334 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2336 struct port_info *pi = netdev_priv(dev);
2337 struct adapter *adapter = pi->adapter;
2340 if (new_mtu < 81) /* accommodate SACK */
2342 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2345 init_port_mtus(adapter);
2346 if (adapter->params.rev == 0 && offload_running(adapter))
2347 t3_load_mtus(adapter, adapter->params.mtus,
2348 adapter->params.a_wnd, adapter->params.b_wnd,
2349 adapter->port[0]->mtu);
2353 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2355 struct port_info *pi = netdev_priv(dev);
2356 struct adapter *adapter = pi->adapter;
2357 struct sockaddr *addr = p;
2359 if (!is_valid_ether_addr(addr->sa_data))
2362 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2363 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2364 if (offload_running(adapter))
2365 write_smt_entry(adapter, pi->port_id);
2370 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2371 * @adap: the adapter
2374 * Ensures that current Rx processing on any of the queues associated with
2375 * the given port completes before returning. We do this by acquiring and
2376 * releasing the locks of the response queues associated with the port.
2378 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2382 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2383 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2385 spin_lock_irq(&q->lock);
2386 spin_unlock_irq(&q->lock);
2390 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2392 struct port_info *pi = netdev_priv(dev);
2393 struct adapter *adapter = pi->adapter;
2396 if (adapter->params.rev > 0)
2397 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2399 /* single control for all ports */
2400 unsigned int i, have_vlans = 0;
2401 for_each_port(adapter, i)
2402 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2404 t3_set_vlan_accel(adapter, 1, have_vlans);
2406 t3_synchronize_rx(adapter, pi);
2409 #ifdef CONFIG_NET_POLL_CONTROLLER
2410 static void cxgb_netpoll(struct net_device *dev)
2412 struct port_info *pi = netdev_priv(dev);
2413 struct adapter *adapter = pi->adapter;
2416 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2417 struct sge_qset *qs = &adapter->sge.qs[qidx];
2420 if (adapter->flags & USING_MSIX)
2425 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2431 * Periodic accumulation of MAC statistics.
2433 static void mac_stats_update(struct adapter *adapter)
2437 for_each_port(adapter, i) {
2438 struct net_device *dev = adapter->port[i];
2439 struct port_info *p = netdev_priv(dev);
2441 if (netif_running(dev)) {
2442 spin_lock(&adapter->stats_lock);
2443 t3_mac_update_stats(&p->mac);
2444 spin_unlock(&adapter->stats_lock);
2449 static void check_link_status(struct adapter *adapter)
2453 for_each_port(adapter, i) {
2454 struct net_device *dev = adapter->port[i];
2455 struct port_info *p = netdev_priv(dev);
2457 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2458 t3_link_changed(adapter, i);
2462 static void check_t3b2_mac(struct adapter *adapter)
2466 if (!rtnl_trylock()) /* synchronize with ifdown */
2469 for_each_port(adapter, i) {
2470 struct net_device *dev = adapter->port[i];
2471 struct port_info *p = netdev_priv(dev);
2474 if (!netif_running(dev))
2478 if (netif_running(dev) && netif_carrier_ok(dev))
2479 status = t3b2_mac_watchdog_task(&p->mac);
2481 p->mac.stats.num_toggled++;
2482 else if (status == 2) {
2483 struct cmac *mac = &p->mac;
2485 t3_mac_set_mtu(mac, dev->mtu);
2486 t3_mac_set_address(mac, 0, dev->dev_addr);
2487 cxgb_set_rxmode(dev);
2488 t3_link_start(&p->phy, mac, &p->link_config);
2489 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2490 t3_port_intr_enable(adapter, p->port_id);
2491 p->mac.stats.num_resets++;
2498 static void t3_adap_check_task(struct work_struct *work)
2500 struct adapter *adapter = container_of(work, struct adapter,
2501 adap_check_task.work);
2502 const struct adapter_params *p = &adapter->params;
2504 adapter->check_task_cnt++;
2506 /* Check link status for PHYs without interrupts */
2507 if (p->linkpoll_period)
2508 check_link_status(adapter);
2510 /* Accumulate MAC stats if needed */
2511 if (!p->linkpoll_period ||
2512 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2513 p->stats_update_period) {
2514 mac_stats_update(adapter);
2515 adapter->check_task_cnt = 0;
2518 if (p->rev == T3_REV_B2)
2519 check_t3b2_mac(adapter);
2521 /* Schedule the next check update if any port is active. */
2522 spin_lock_irq(&adapter->work_lock);
2523 if (adapter->open_device_map & PORT_MASK)
2524 schedule_chk_task(adapter);
2525 spin_unlock_irq(&adapter->work_lock);
2529 * Processes external (PHY) interrupts in process context.
2531 static void ext_intr_task(struct work_struct *work)
2533 struct adapter *adapter = container_of(work, struct adapter,
2534 ext_intr_handler_task);
2536 t3_phy_intr_handler(adapter);
2538 /* Now reenable external interrupts */
2539 spin_lock_irq(&adapter->work_lock);
2540 if (adapter->slow_intr_mask) {
2541 adapter->slow_intr_mask |= F_T3DBG;
2542 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2543 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2544 adapter->slow_intr_mask);
2546 spin_unlock_irq(&adapter->work_lock);
2550 * Interrupt-context handler for external (PHY) interrupts.
2552 void t3_os_ext_intr_handler(struct adapter *adapter)
2555 * Schedule a task to handle external interrupts as they may be slow
2556 * and we use a mutex to protect MDIO registers. We disable PHY
2557 * interrupts in the meantime and let the task reenable them when
2560 spin_lock(&adapter->work_lock);
2561 if (adapter->slow_intr_mask) {
2562 adapter->slow_intr_mask &= ~F_T3DBG;
2563 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2564 adapter->slow_intr_mask);
2565 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2567 spin_unlock(&adapter->work_lock);
2570 static int t3_adapter_error(struct adapter *adapter, int reset)
2574 /* Stop all ports */
2575 for_each_port(adapter, i) {
2576 struct net_device *netdev = adapter->port[i];
2578 if (netif_running(netdev))
2582 if (is_offload(adapter) &&
2583 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2584 offload_close(&adapter->tdev);
2586 /* Stop SGE timers */
2587 t3_stop_sge_timers(adapter);
2589 adapter->flags &= ~FULL_INIT_DONE;
2592 ret = t3_reset_adapter(adapter);
2594 pci_disable_device(adapter->pdev);
2599 static int t3_reenable_adapter(struct adapter *adapter)
2601 if (pci_enable_device(adapter->pdev)) {
2602 dev_err(&adapter->pdev->dev,
2603 "Cannot re-enable PCI device after reset.\n");
2606 pci_set_master(adapter->pdev);
2607 pci_restore_state(adapter->pdev);
2609 /* Free sge resources */
2610 t3_free_sge_resources(adapter);
2612 if (t3_replay_prep_adapter(adapter))
2620 static void t3_resume_ports(struct adapter *adapter)
2624 /* Restart the ports */
2625 for_each_port(adapter, i) {
2626 struct net_device *netdev = adapter->port[i];
2628 if (netif_running(netdev)) {
2629 if (cxgb_open(netdev)) {
2630 dev_err(&adapter->pdev->dev,
2631 "can't bring device back up"
2640 * processes a fatal error.
2641 * Bring the ports down, reset the chip, bring the ports back up.
2643 static void fatal_error_task(struct work_struct *work)
2645 struct adapter *adapter = container_of(work, struct adapter,
2646 fatal_error_handler_task);
2650 err = t3_adapter_error(adapter, 1);
2652 err = t3_reenable_adapter(adapter);
2654 t3_resume_ports(adapter);
2656 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2660 void t3_fatal_err(struct adapter *adapter)
2662 unsigned int fw_status[4];
2664 if (adapter->flags & FULL_INIT_DONE) {
2665 t3_sge_stop(adapter);
2666 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2667 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2668 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2669 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2671 spin_lock(&adapter->work_lock);
2672 t3_intr_disable(adapter);
2673 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2674 spin_unlock(&adapter->work_lock);
2676 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2677 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2678 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2679 fw_status[0], fw_status[1],
2680 fw_status[2], fw_status[3]);
2685 * t3_io_error_detected - called when PCI error is detected
2686 * @pdev: Pointer to PCI device
2687 * @state: The current pci connection state
2689 * This function is called after a PCI bus error affecting
2690 * this device has been detected.
2692 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2693 pci_channel_state_t state)
2695 struct adapter *adapter = pci_get_drvdata(pdev);
2698 ret = t3_adapter_error(adapter, 0);
2700 /* Request a slot reset. */
2701 return PCI_ERS_RESULT_NEED_RESET;
2705 * t3_io_slot_reset - called after the pci bus has been reset.
2706 * @pdev: Pointer to PCI device
2708 * Restart the card from scratch, as if from a cold-boot.
2710 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2712 struct adapter *adapter = pci_get_drvdata(pdev);
2714 if (!t3_reenable_adapter(adapter))
2715 return PCI_ERS_RESULT_RECOVERED;
2717 return PCI_ERS_RESULT_DISCONNECT;
2721 * t3_io_resume - called when traffic can start flowing again.
2722 * @pdev: Pointer to PCI device
2724 * This callback is called when the error recovery driver tells us that
2725 * its OK to resume normal operation.
2727 static void t3_io_resume(struct pci_dev *pdev)
2729 struct adapter *adapter = pci_get_drvdata(pdev);
2731 t3_resume_ports(adapter);
2734 static struct pci_error_handlers t3_err_handler = {
2735 .error_detected = t3_io_error_detected,
2736 .slot_reset = t3_io_slot_reset,
2737 .resume = t3_io_resume,
2741 * Set the number of qsets based on the number of CPUs and the number of ports,
2742 * not to exceed the number of available qsets, assuming there are enough qsets
2745 static void set_nqsets(struct adapter *adap)
2748 int num_cpus = num_online_cpus();
2749 int hwports = adap->params.nports;
2750 int nqsets = SGE_QSETS;
2752 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2754 (hwports * nqsets > SGE_QSETS ||
2755 num_cpus >= nqsets / hwports))
2757 if (nqsets > num_cpus)
2759 if (nqsets < 1 || hwports == 4)
2764 for_each_port(adap, i) {
2765 struct port_info *pi = adap2pinfo(adap, i);
2768 pi->nqsets = nqsets;
2769 j = pi->first_qset + nqsets;
2771 dev_info(&adap->pdev->dev,
2772 "Port %d using %d queue sets.\n", i, nqsets);
2776 static int __devinit cxgb_enable_msix(struct adapter *adap)
2778 struct msix_entry entries[SGE_QSETS + 1];
2781 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2782 entries[i].entry = i;
2784 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2786 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2787 adap->msix_info[i].vec = entries[i].vector;
2789 dev_info(&adap->pdev->dev,
2790 "only %d MSI-X vectors left, not using MSI-X\n", err);
2794 static void __devinit print_port_info(struct adapter *adap,
2795 const struct adapter_info *ai)
2797 static const char *pci_variant[] = {
2798 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2805 snprintf(buf, sizeof(buf), "%s x%d",
2806 pci_variant[adap->params.pci.variant],
2807 adap->params.pci.width);
2809 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2810 pci_variant[adap->params.pci.variant],
2811 adap->params.pci.speed, adap->params.pci.width);
2813 for_each_port(adap, i) {
2814 struct net_device *dev = adap->port[i];
2815 const struct port_info *pi = netdev_priv(dev);
2817 if (!test_bit(i, &adap->registered_device_map))
2819 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2820 dev->name, ai->desc, pi->phy.desc,
2821 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2822 (adap->flags & USING_MSIX) ? " MSI-X" :
2823 (adap->flags & USING_MSI) ? " MSI" : "");
2824 if (adap->name == dev->name && adap->params.vpd.mclk)
2826 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2827 adap->name, t3_mc7_size(&adap->cm) >> 20,
2828 t3_mc7_size(&adap->pmtx) >> 20,
2829 t3_mc7_size(&adap->pmrx) >> 20,
2830 adap->params.vpd.sn);
2834 static const struct net_device_ops cxgb_netdev_ops = {
2835 .ndo_open = cxgb_open,
2836 .ndo_stop = cxgb_close,
2837 .ndo_start_xmit = t3_eth_xmit,
2838 .ndo_get_stats = cxgb_get_stats,
2839 .ndo_validate_addr = eth_validate_addr,
2840 .ndo_set_multicast_list = cxgb_set_rxmode,
2841 .ndo_do_ioctl = cxgb_ioctl,
2842 .ndo_change_mtu = cxgb_change_mtu,
2843 .ndo_set_mac_address = cxgb_set_mac_addr,
2844 .ndo_vlan_rx_register = vlan_rx_register,
2845 #ifdef CONFIG_NET_POLL_CONTROLLER
2846 .ndo_poll_controller = cxgb_netpoll,
2850 static int __devinit init_one(struct pci_dev *pdev,
2851 const struct pci_device_id *ent)
2853 static int version_printed;
2855 int i, err, pci_using_dac = 0;
2856 unsigned long mmio_start, mmio_len;
2857 const struct adapter_info *ai;
2858 struct adapter *adapter = NULL;
2859 struct port_info *pi;
2861 if (!version_printed) {
2862 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2867 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2869 printk(KERN_ERR DRV_NAME
2870 ": cannot initialize work queue\n");
2875 err = pci_request_regions(pdev, DRV_NAME);
2877 /* Just info, some other driver may have claimed the device. */
2878 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2882 err = pci_enable_device(pdev);
2884 dev_err(&pdev->dev, "cannot enable PCI device\n");
2885 goto out_release_regions;
2888 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2890 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2892 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2893 "coherent allocations\n");
2894 goto out_disable_device;
2896 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2897 dev_err(&pdev->dev, "no usable DMA configuration\n");
2898 goto out_disable_device;
2901 pci_set_master(pdev);
2902 pci_save_state(pdev);
2904 mmio_start = pci_resource_start(pdev, 0);
2905 mmio_len = pci_resource_len(pdev, 0);
2906 ai = t3_get_adapter_info(ent->driver_data);
2908 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2911 goto out_disable_device;
2914 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2915 if (!adapter->regs) {
2916 dev_err(&pdev->dev, "cannot map device registers\n");
2918 goto out_free_adapter;
2921 adapter->pdev = pdev;
2922 adapter->name = pci_name(pdev);
2923 adapter->msg_enable = dflt_msg_enable;
2924 adapter->mmio_len = mmio_len;
2926 mutex_init(&adapter->mdio_lock);
2927 spin_lock_init(&adapter->work_lock);
2928 spin_lock_init(&adapter->stats_lock);
2930 INIT_LIST_HEAD(&adapter->adapter_list);
2931 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2932 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2933 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2935 for (i = 0; i < ai->nports; ++i) {
2936 struct net_device *netdev;
2938 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
2944 SET_NETDEV_DEV(netdev, &pdev->dev);
2946 adapter->port[i] = netdev;
2947 pi = netdev_priv(netdev);
2948 pi->adapter = adapter;
2949 pi->rx_csum_offload = 1;
2951 netif_carrier_off(netdev);
2952 netif_tx_stop_all_queues(netdev);
2953 netdev->irq = pdev->irq;
2954 netdev->mem_start = mmio_start;
2955 netdev->mem_end = mmio_start + mmio_len - 1;
2956 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2957 netdev->features |= NETIF_F_LLTX;
2959 netdev->features |= NETIF_F_HIGHDMA;
2961 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2962 netdev->netdev_ops = &cxgb_netdev_ops;
2963 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2966 pci_set_drvdata(pdev, adapter);
2967 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2973 * The card is now ready to go. If any errors occur during device
2974 * registration we do not fail the whole card but rather proceed only
2975 * with the ports we manage to register successfully. However we must
2976 * register at least one net device.
2978 for_each_port(adapter, i) {
2979 err = register_netdev(adapter->port[i]);
2981 dev_warn(&pdev->dev,
2982 "cannot register net device %s, skipping\n",
2983 adapter->port[i]->name);
2986 * Change the name we use for messages to the name of
2987 * the first successfully registered interface.
2989 if (!adapter->registered_device_map)
2990 adapter->name = adapter->port[i]->name;
2992 __set_bit(i, &adapter->registered_device_map);
2995 if (!adapter->registered_device_map) {
2996 dev_err(&pdev->dev, "could not register any net devices\n");
3000 /* Driver's ready. Reflect it on LEDs */
3001 t3_led_ready(adapter);
3003 if (is_offload(adapter)) {
3004 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3005 cxgb3_adapter_ofld(adapter);
3008 /* See what interrupts we'll be using */
3009 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3010 adapter->flags |= USING_MSIX;
3011 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3012 adapter->flags |= USING_MSI;
3014 set_nqsets(adapter);
3016 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3019 print_port_info(adapter, ai);
3023 iounmap(adapter->regs);
3024 for (i = ai->nports - 1; i >= 0; --i)
3025 if (adapter->port[i])
3026 free_netdev(adapter->port[i]);
3032 pci_disable_device(pdev);
3033 out_release_regions:
3034 pci_release_regions(pdev);
3035 pci_set_drvdata(pdev, NULL);
3039 static void __devexit remove_one(struct pci_dev *pdev)
3041 struct adapter *adapter = pci_get_drvdata(pdev);
3046 t3_sge_stop(adapter);
3047 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3050 if (is_offload(adapter)) {
3051 cxgb3_adapter_unofld(adapter);
3052 if (test_bit(OFFLOAD_DEVMAP_BIT,
3053 &adapter->open_device_map))
3054 offload_close(&adapter->tdev);
3057 for_each_port(adapter, i)
3058 if (test_bit(i, &adapter->registered_device_map))
3059 unregister_netdev(adapter->port[i]);
3061 t3_stop_sge_timers(adapter);
3062 t3_free_sge_resources(adapter);
3063 cxgb_disable_msi(adapter);
3065 for_each_port(adapter, i)
3066 if (adapter->port[i])
3067 free_netdev(adapter->port[i]);
3069 iounmap(adapter->regs);
3071 pci_release_regions(pdev);
3072 pci_disable_device(pdev);
3073 pci_set_drvdata(pdev, NULL);
3077 static struct pci_driver driver = {
3079 .id_table = cxgb3_pci_tbl,
3081 .remove = __devexit_p(remove_one),
3082 .err_handler = &t3_err_handler,
3085 static int __init cxgb3_init_module(void)
3089 cxgb3_offload_init();
3091 ret = pci_register_driver(&driver);
3095 static void __exit cxgb3_cleanup_module(void)
3097 pci_unregister_driver(&driver);
3099 destroy_workqueue(cxgb3_wq);
3102 module_init(cxgb3_init_module);
3103 module_exit(cxgb3_cleanup_module);