2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
93 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
97 MODULE_DESCRIPTION(DRV_DESC);
98 MODULE_AUTHOR("Chelsio Communications");
99 MODULE_LICENSE("Dual BSD/GPL");
100 MODULE_VERSION(DRV_VERSION);
101 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
103 static int dflt_msg_enable = DFLT_MSG_ENABLE;
105 module_param(dflt_msg_enable, int, 0644);
106 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
109 * The driver uses the best interrupt scheme available on a platform in the
110 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
111 * of these schemes the driver may consider as follows:
113 * msi = 2: choose from among all three options
114 * msi = 1: only consider MSI and pin interrupts
115 * msi = 0: force pin interrupts
119 module_param(msi, int, 0644);
120 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
123 * The driver enables offload as a default.
124 * To disable it, use ofld_disable = 1.
127 static int ofld_disable = 0;
129 module_param(ofld_disable, int, 0644);
130 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
133 * We have work elements that we need to cancel when an interface is taken
134 * down. Normally the work elements would be executed by keventd but that
135 * can deadlock because of linkwatch. If our close method takes the rtnl
136 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
137 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
138 * for our work to complete. Get our own work queue to solve this.
140 static struct workqueue_struct *cxgb3_wq;
143 * link_report - show link status and link speed/duplex
144 * @p: the port whose settings are to be reported
146 * Shows the link status, speed, and duplex of a port.
148 static void link_report(struct net_device *dev)
150 if (!netif_carrier_ok(dev))
151 printk(KERN_INFO "%s: link down\n", dev->name);
153 const char *s = "10Mbps";
154 const struct port_info *p = netdev_priv(dev);
156 switch (p->link_config.speed) {
168 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
169 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
174 * t3_os_link_changed - handle link status changes
175 * @adapter: the adapter associated with the link change
176 * @port_id: the port index whose limk status has changed
177 * @link_stat: the new status of the link
178 * @speed: the new speed setting
179 * @duplex: the new duplex setting
180 * @pause: the new flow-control setting
182 * This is the OS-dependent handler for link status changes. The OS
183 * neutral handler takes care of most of the processing for these events,
184 * then calls this handler for any OS-specific processing.
186 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
187 int speed, int duplex, int pause)
189 struct net_device *dev = adapter->port[port_id];
190 struct port_info *pi = netdev_priv(dev);
191 struct cmac *mac = &pi->mac;
193 /* Skip changes from disabled ports. */
194 if (!netif_running(dev))
197 if (link_stat != netif_carrier_ok(dev)) {
199 t3_mac_enable(mac, MAC_DIRECTION_RX);
200 netif_carrier_on(dev);
202 netif_carrier_off(dev);
203 pi->phy.ops->power_down(&pi->phy, 1);
204 t3_mac_disable(mac, MAC_DIRECTION_RX);
205 t3_link_start(&pi->phy, mac, &pi->link_config);
213 * t3_os_phymod_changed - handle PHY module changes
214 * @phy: the PHY reporting the module change
215 * @mod_type: new module type
217 * This is the OS-dependent handler for PHY module changes. It is
218 * invoked when a PHY module is removed or inserted for any OS-specific
221 void t3_os_phymod_changed(struct adapter *adap, int port_id)
223 static const char *mod_str[] = {
224 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
227 const struct net_device *dev = adap->port[port_id];
228 const struct port_info *pi = netdev_priv(dev);
230 if (pi->phy.modtype == phy_modtype_none)
231 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
233 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
234 mod_str[pi->phy.modtype]);
237 static void cxgb_set_rxmode(struct net_device *dev)
239 struct t3_rx_mode rm;
240 struct port_info *pi = netdev_priv(dev);
242 init_rx_mode(&rm, dev, dev->mc_list);
243 t3_mac_set_rx_mode(&pi->mac, &rm);
247 * link_start - enable a port
248 * @dev: the device to enable
250 * Performs the MAC and PHY actions needed to enable a port.
252 static void link_start(struct net_device *dev)
254 struct t3_rx_mode rm;
255 struct port_info *pi = netdev_priv(dev);
256 struct cmac *mac = &pi->mac;
258 init_rx_mode(&rm, dev, dev->mc_list);
260 t3_mac_set_mtu(mac, dev->mtu);
261 t3_mac_set_address(mac, 0, dev->dev_addr);
262 t3_mac_set_rx_mode(mac, &rm);
263 t3_link_start(&pi->phy, mac, &pi->link_config);
264 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
267 static inline void cxgb_disable_msi(struct adapter *adapter)
269 if (adapter->flags & USING_MSIX) {
270 pci_disable_msix(adapter->pdev);
271 adapter->flags &= ~USING_MSIX;
272 } else if (adapter->flags & USING_MSI) {
273 pci_disable_msi(adapter->pdev);
274 adapter->flags &= ~USING_MSI;
279 * Interrupt handler for asynchronous events used with MSI-X.
281 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
283 t3_slow_intr_handler(cookie);
288 * Name the MSI-X interrupts.
290 static void name_msix_vecs(struct adapter *adap)
292 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
294 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
295 adap->msix_info[0].desc[n] = 0;
297 for_each_port(adap, j) {
298 struct net_device *d = adap->port[j];
299 const struct port_info *pi = netdev_priv(d);
301 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
302 snprintf(adap->msix_info[msi_idx].desc, n,
303 "%s-%d", d->name, pi->first_qset + i);
304 adap->msix_info[msi_idx].desc[n] = 0;
309 static int request_msix_data_irqs(struct adapter *adap)
311 int i, j, err, qidx = 0;
313 for_each_port(adap, i) {
314 int nqsets = adap2pinfo(adap, i)->nqsets;
316 for (j = 0; j < nqsets; ++j) {
317 err = request_irq(adap->msix_info[qidx + 1].vec,
318 t3_intr_handler(adap,
321 adap->msix_info[qidx + 1].desc,
322 &adap->sge.qs[qidx]);
325 free_irq(adap->msix_info[qidx + 1].vec,
326 &adap->sge.qs[qidx]);
335 static void free_irq_resources(struct adapter *adapter)
337 if (adapter->flags & USING_MSIX) {
340 free_irq(adapter->msix_info[0].vec, adapter);
341 for_each_port(adapter, i)
342 n += adap2pinfo(adapter, i)->nqsets;
344 for (i = 0; i < n; ++i)
345 free_irq(adapter->msix_info[i + 1].vec,
346 &adapter->sge.qs[i]);
348 free_irq(adapter->pdev->irq, adapter);
351 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
356 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
364 static int init_tp_parity(struct adapter *adap)
368 struct cpl_set_tcb_field *greq;
369 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
371 t3_tp_set_offload_mode(adap, 1);
373 for (i = 0; i < 16; i++) {
374 struct cpl_smt_write_req *req;
376 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
377 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
378 memset(req, 0, sizeof(*req));
379 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
380 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
382 t3_mgmt_tx(adap, skb);
385 for (i = 0; i < 2048; i++) {
386 struct cpl_l2t_write_req *req;
388 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
389 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
390 memset(req, 0, sizeof(*req));
391 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
392 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
393 req->params = htonl(V_L2T_W_IDX(i));
394 t3_mgmt_tx(adap, skb);
397 for (i = 0; i < 2048; i++) {
398 struct cpl_rte_write_req *req;
400 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
401 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
402 memset(req, 0, sizeof(*req));
403 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
404 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
405 req->l2t_idx = htonl(V_L2T_W_IDX(i));
406 t3_mgmt_tx(adap, skb);
409 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
410 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
411 memset(greq, 0, sizeof(*greq));
412 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
413 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
414 greq->mask = cpu_to_be64(1);
415 t3_mgmt_tx(adap, skb);
417 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
418 t3_tp_set_offload_mode(adap, 0);
423 * setup_rss - configure RSS
426 * Sets up RSS to distribute packets to multiple receive queues. We
427 * configure the RSS CPU lookup table to distribute to the number of HW
428 * receive queues, and the response queue lookup table to narrow that
429 * down to the response queues actually configured for each port.
430 * We always configure the RSS mapping for two ports since the mapping
431 * table has plenty of entries.
433 static void setup_rss(struct adapter *adap)
436 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
437 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
438 u8 cpus[SGE_QSETS + 1];
439 u16 rspq_map[RSS_TABLE_SIZE];
441 for (i = 0; i < SGE_QSETS; ++i)
443 cpus[SGE_QSETS] = 0xff; /* terminator */
445 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
446 rspq_map[i] = i % nq0;
447 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
450 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
451 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
452 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
455 static void init_napi(struct adapter *adap)
459 for (i = 0; i < SGE_QSETS; i++) {
460 struct sge_qset *qs = &adap->sge.qs[i];
463 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
468 * netif_napi_add() can be called only once per napi_struct because it
469 * adds each new napi_struct to a list. Be careful not to call it a
470 * second time, e.g., during EEH recovery, by making a note of it.
472 adap->flags |= NAPI_INIT;
476 * Wait until all NAPI handlers are descheduled. This includes the handlers of
477 * both netdevices representing interfaces and the dummy ones for the extra
480 static void quiesce_rx(struct adapter *adap)
484 for (i = 0; i < SGE_QSETS; i++)
485 if (adap->sge.qs[i].adap)
486 napi_disable(&adap->sge.qs[i].napi);
489 static void enable_all_napi(struct adapter *adap)
492 for (i = 0; i < SGE_QSETS; i++)
493 if (adap->sge.qs[i].adap)
494 napi_enable(&adap->sge.qs[i].napi);
498 * set_qset_lro - Turn a queue set's LRO capability on and off
499 * @dev: the device the qset is attached to
500 * @qset_idx: the queue set index
501 * @val: the LRO switch
503 * Sets LRO on or off for a particular queue set.
504 * the device's features flag is updated to reflect the LRO
505 * capability when all queues belonging to the device are
508 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
510 struct port_info *pi = netdev_priv(dev);
511 struct adapter *adapter = pi->adapter;
514 adapter->params.sge.qset[qset_idx].lro = !!val;
515 adapter->sge.qs[qset_idx].lro_enabled = !!val;
517 /* let ethtool report LRO on only if all queues are LRO enabled */
518 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
519 lro_on &= adapter->params.sge.qset[i].lro;
522 dev->features |= NETIF_F_LRO;
524 dev->features &= ~NETIF_F_LRO;
528 * setup_sge_qsets - configure SGE Tx/Rx/response queues
531 * Determines how many sets of SGE queues to use and initializes them.
532 * We support multiple queue sets per port if we have MSI-X, otherwise
533 * just one queue set per port.
535 static int setup_sge_qsets(struct adapter *adap)
537 int i, j, err, irq_idx = 0, qset_idx = 0;
538 unsigned int ntxq = SGE_TXQ_PER_SET;
540 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
543 for_each_port(adap, i) {
544 struct net_device *dev = adap->port[i];
545 struct port_info *pi = netdev_priv(dev);
547 pi->qs = &adap->sge.qs[pi->first_qset];
548 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
550 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
551 err = t3_sge_alloc_qset(adap, qset_idx, 1,
552 (adap->flags & USING_MSIX) ? qset_idx + 1 :
554 &adap->params.sge.qset[qset_idx], ntxq, dev,
555 netdev_get_tx_queue(dev, j));
557 t3_stop_sge_timers(adap);
558 t3_free_sge_resources(adap);
567 static ssize_t attr_show(struct device *d, char *buf,
568 ssize_t(*format) (struct net_device *, char *))
572 /* Synchronize with ioctls that may shut down the device */
574 len = (*format) (to_net_dev(d), buf);
579 static ssize_t attr_store(struct device *d,
580 const char *buf, size_t len,
581 ssize_t(*set) (struct net_device *, unsigned int),
582 unsigned int min_val, unsigned int max_val)
588 if (!capable(CAP_NET_ADMIN))
591 val = simple_strtoul(buf, &endp, 0);
592 if (endp == buf || val < min_val || val > max_val)
596 ret = (*set) (to_net_dev(d), val);
603 #define CXGB3_SHOW(name, val_expr) \
604 static ssize_t format_##name(struct net_device *dev, char *buf) \
606 struct port_info *pi = netdev_priv(dev); \
607 struct adapter *adap = pi->adapter; \
608 return sprintf(buf, "%u\n", val_expr); \
610 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
613 return attr_show(d, buf, format_##name); \
616 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
618 struct port_info *pi = netdev_priv(dev);
619 struct adapter *adap = pi->adapter;
620 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
622 if (adap->flags & FULL_INIT_DONE)
624 if (val && adap->params.rev == 0)
626 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
629 adap->params.mc5.nfilters = val;
633 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
634 const char *buf, size_t len)
636 return attr_store(d, buf, len, set_nfilters, 0, ~0);
639 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
641 struct port_info *pi = netdev_priv(dev);
642 struct adapter *adap = pi->adapter;
644 if (adap->flags & FULL_INIT_DONE)
646 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
649 adap->params.mc5.nservers = val;
653 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
654 const char *buf, size_t len)
656 return attr_store(d, buf, len, set_nservers, 0, ~0);
659 #define CXGB3_ATTR_R(name, val_expr) \
660 CXGB3_SHOW(name, val_expr) \
661 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
663 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
664 CXGB3_SHOW(name, val_expr) \
665 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
667 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
668 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
669 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
671 static struct attribute *cxgb3_attrs[] = {
672 &dev_attr_cam_size.attr,
673 &dev_attr_nfilters.attr,
674 &dev_attr_nservers.attr,
678 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
680 static ssize_t tm_attr_show(struct device *d,
681 char *buf, int sched)
683 struct port_info *pi = netdev_priv(to_net_dev(d));
684 struct adapter *adap = pi->adapter;
685 unsigned int v, addr, bpt, cpt;
688 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
690 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
691 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
694 bpt = (v >> 8) & 0xff;
697 len = sprintf(buf, "disabled\n");
699 v = (adap->params.vpd.cclk * 1000) / cpt;
700 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
706 static ssize_t tm_attr_store(struct device *d,
707 const char *buf, size_t len, int sched)
709 struct port_info *pi = netdev_priv(to_net_dev(d));
710 struct adapter *adap = pi->adapter;
715 if (!capable(CAP_NET_ADMIN))
718 val = simple_strtoul(buf, &endp, 0);
719 if (endp == buf || val > 10000000)
723 ret = t3_config_sched(adap, val, sched);
730 #define TM_ATTR(name, sched) \
731 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
734 return tm_attr_show(d, buf, sched); \
736 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
737 const char *buf, size_t len) \
739 return tm_attr_store(d, buf, len, sched); \
741 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
752 static struct attribute *offload_attrs[] = {
753 &dev_attr_sched0.attr,
754 &dev_attr_sched1.attr,
755 &dev_attr_sched2.attr,
756 &dev_attr_sched3.attr,
757 &dev_attr_sched4.attr,
758 &dev_attr_sched5.attr,
759 &dev_attr_sched6.attr,
760 &dev_attr_sched7.attr,
764 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
767 * Sends an sk_buff to an offload queue driver
768 * after dealing with any active network taps.
770 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
775 ret = t3_offload_tx(tdev, skb);
780 static int write_smt_entry(struct adapter *adapter, int idx)
782 struct cpl_smt_write_req *req;
783 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
788 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
789 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
790 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
791 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
793 memset(req->src_mac1, 0, sizeof(req->src_mac1));
794 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
796 offload_tx(&adapter->tdev, skb);
800 static int init_smt(struct adapter *adapter)
804 for_each_port(adapter, i)
805 write_smt_entry(adapter, i);
809 static void init_port_mtus(struct adapter *adapter)
811 unsigned int mtus = adapter->port[0]->mtu;
813 if (adapter->port[1])
814 mtus |= adapter->port[1]->mtu << 16;
815 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
818 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
822 struct mngt_pktsched_wr *req;
825 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
826 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
827 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
828 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
834 ret = t3_mgmt_tx(adap, skb);
839 static int bind_qsets(struct adapter *adap)
843 for_each_port(adap, i) {
844 const struct port_info *pi = adap2pinfo(adap, i);
846 for (j = 0; j < pi->nqsets; ++j) {
847 int ret = send_pktsched_cmd(adap, 1,
848 pi->first_qset + j, -1,
858 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
859 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
861 static int upgrade_fw(struct adapter *adap)
865 const struct firmware *fw;
866 struct device *dev = &adap->pdev->dev;
868 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
869 FW_VERSION_MINOR, FW_VERSION_MICRO);
870 ret = request_firmware(&fw, buf, dev);
872 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
876 ret = t3_load_fw(adap, fw->data, fw->size);
877 release_firmware(fw);
880 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
881 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
883 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
884 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
889 static inline char t3rev2char(struct adapter *adapter)
893 switch(adapter->params.rev) {
905 static int update_tpsram(struct adapter *adap)
907 const struct firmware *tpsram;
909 struct device *dev = &adap->pdev->dev;
913 rev = t3rev2char(adap);
917 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
918 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
920 ret = request_firmware(&tpsram, buf, dev);
922 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
927 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
931 ret = t3_set_proto_sram(adap, tpsram->data);
934 "successful update of protocol engine "
936 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
938 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
939 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
941 dev_err(dev, "loading protocol SRAM failed\n");
944 release_firmware(tpsram);
950 * cxgb_up - enable the adapter
951 * @adapter: adapter being enabled
953 * Called when the first port is enabled, this function performs the
954 * actions necessary to make an adapter operational, such as completing
955 * the initialization of HW modules, and enabling interrupts.
957 * Must be called with the rtnl lock held.
959 static int cxgb_up(struct adapter *adap)
963 if (!(adap->flags & FULL_INIT_DONE)) {
964 err = t3_check_fw_version(adap);
965 if (err == -EINVAL) {
966 err = upgrade_fw(adap);
967 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
968 FW_VERSION_MAJOR, FW_VERSION_MINOR,
969 FW_VERSION_MICRO, err ? "failed" : "succeeded");
972 err = t3_check_tpsram_version(adap);
973 if (err == -EINVAL) {
974 err = update_tpsram(adap);
975 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
976 TP_VERSION_MAJOR, TP_VERSION_MINOR,
977 TP_VERSION_MICRO, err ? "failed" : "succeeded");
981 * Clear interrupts now to catch errors if t3_init_hw fails.
982 * We clear them again later as initialization may trigger
983 * conditions that can interrupt.
987 err = t3_init_hw(adap, 0);
991 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
992 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
994 err = setup_sge_qsets(adap);
999 if (!(adap->flags & NAPI_INIT))
1001 adap->flags |= FULL_INIT_DONE;
1004 t3_intr_clear(adap);
1006 if (adap->flags & USING_MSIX) {
1007 name_msix_vecs(adap);
1008 err = request_irq(adap->msix_info[0].vec,
1009 t3_async_intr_handler, 0,
1010 adap->msix_info[0].desc, adap);
1014 err = request_msix_data_irqs(adap);
1016 free_irq(adap->msix_info[0].vec, adap);
1019 } else if ((err = request_irq(adap->pdev->irq,
1020 t3_intr_handler(adap,
1021 adap->sge.qs[0].rspq.
1023 (adap->flags & USING_MSI) ?
1028 enable_all_napi(adap);
1030 t3_intr_enable(adap);
1032 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1033 is_offload(adap) && init_tp_parity(adap) == 0)
1034 adap->flags |= TP_PARITY_INIT;
1036 if (adap->flags & TP_PARITY_INIT) {
1037 t3_write_reg(adap, A_TP_INT_CAUSE,
1038 F_CMCACHEPERR | F_ARPLUTPERR);
1039 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1042 if (!(adap->flags & QUEUES_BOUND)) {
1043 err = bind_qsets(adap);
1045 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1046 t3_intr_disable(adap);
1047 free_irq_resources(adap);
1050 adap->flags |= QUEUES_BOUND;
1056 CH_ERR(adap, "request_irq failed, err %d\n", err);
1061 * Release resources when all the ports and offloading have been stopped.
1063 static void cxgb_down(struct adapter *adapter)
1065 t3_sge_stop(adapter);
1066 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1067 t3_intr_disable(adapter);
1068 spin_unlock_irq(&adapter->work_lock);
1070 free_irq_resources(adapter);
1071 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1072 quiesce_rx(adapter);
1075 static void schedule_chk_task(struct adapter *adap)
1079 timeo = adap->params.linkpoll_period ?
1080 (HZ * adap->params.linkpoll_period) / 10 :
1081 adap->params.stats_update_period * HZ;
1083 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1086 static int offload_open(struct net_device *dev)
1088 struct port_info *pi = netdev_priv(dev);
1089 struct adapter *adapter = pi->adapter;
1090 struct t3cdev *tdev = dev2t3cdev(dev);
1091 int adap_up = adapter->open_device_map & PORT_MASK;
1094 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1097 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1100 t3_tp_set_offload_mode(adapter, 1);
1101 tdev->lldev = adapter->port[0];
1102 err = cxgb3_offload_activate(adapter);
1106 init_port_mtus(adapter);
1107 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1108 adapter->params.b_wnd,
1109 adapter->params.rev == 0 ?
1110 adapter->port[0]->mtu : 0xffff);
1113 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1114 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1116 /* Call back all registered clients */
1117 cxgb3_add_clients(tdev);
1120 /* restore them in case the offload module has changed them */
1122 t3_tp_set_offload_mode(adapter, 0);
1123 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1124 cxgb3_set_dummy_ops(tdev);
1129 static int offload_close(struct t3cdev *tdev)
1131 struct adapter *adapter = tdev2adap(tdev);
1133 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1136 /* Call back all registered clients */
1137 cxgb3_remove_clients(tdev);
1139 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1142 cxgb3_set_dummy_ops(tdev);
1143 t3_tp_set_offload_mode(adapter, 0);
1144 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1146 if (!adapter->open_device_map)
1149 cxgb3_offload_deactivate(adapter);
1153 static int cxgb_open(struct net_device *dev)
1155 struct port_info *pi = netdev_priv(dev);
1156 struct adapter *adapter = pi->adapter;
1157 int other_ports = adapter->open_device_map & PORT_MASK;
1160 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1163 set_bit(pi->port_id, &adapter->open_device_map);
1164 if (is_offload(adapter) && !ofld_disable) {
1165 err = offload_open(dev);
1168 "Could not initialize offload capabilities\n");
1171 dev->real_num_tx_queues = pi->nqsets;
1173 t3_port_intr_enable(adapter, pi->port_id);
1174 netif_tx_start_all_queues(dev);
1176 schedule_chk_task(adapter);
1181 static int cxgb_close(struct net_device *dev)
1183 struct port_info *pi = netdev_priv(dev);
1184 struct adapter *adapter = pi->adapter;
1186 t3_port_intr_disable(adapter, pi->port_id);
1187 netif_tx_stop_all_queues(dev);
1188 pi->phy.ops->power_down(&pi->phy, 1);
1189 netif_carrier_off(dev);
1190 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1192 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1193 clear_bit(pi->port_id, &adapter->open_device_map);
1194 spin_unlock_irq(&adapter->work_lock);
1196 if (!(adapter->open_device_map & PORT_MASK))
1197 cancel_rearming_delayed_workqueue(cxgb3_wq,
1198 &adapter->adap_check_task);
1200 if (!adapter->open_device_map)
1206 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1208 struct port_info *pi = netdev_priv(dev);
1209 struct adapter *adapter = pi->adapter;
1210 struct net_device_stats *ns = &pi->netstats;
1211 const struct mac_stats *pstats;
1213 spin_lock(&adapter->stats_lock);
1214 pstats = t3_mac_update_stats(&pi->mac);
1215 spin_unlock(&adapter->stats_lock);
1217 ns->tx_bytes = pstats->tx_octets;
1218 ns->tx_packets = pstats->tx_frames;
1219 ns->rx_bytes = pstats->rx_octets;
1220 ns->rx_packets = pstats->rx_frames;
1221 ns->multicast = pstats->rx_mcast_frames;
1223 ns->tx_errors = pstats->tx_underrun;
1224 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1225 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1226 pstats->rx_fifo_ovfl;
1228 /* detailed rx_errors */
1229 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1230 ns->rx_over_errors = 0;
1231 ns->rx_crc_errors = pstats->rx_fcs_errs;
1232 ns->rx_frame_errors = pstats->rx_symbol_errs;
1233 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1234 ns->rx_missed_errors = pstats->rx_cong_drops;
1236 /* detailed tx_errors */
1237 ns->tx_aborted_errors = 0;
1238 ns->tx_carrier_errors = 0;
1239 ns->tx_fifo_errors = pstats->tx_underrun;
1240 ns->tx_heartbeat_errors = 0;
1241 ns->tx_window_errors = 0;
1245 static u32 get_msglevel(struct net_device *dev)
1247 struct port_info *pi = netdev_priv(dev);
1248 struct adapter *adapter = pi->adapter;
1250 return adapter->msg_enable;
1253 static void set_msglevel(struct net_device *dev, u32 val)
1255 struct port_info *pi = netdev_priv(dev);
1256 struct adapter *adapter = pi->adapter;
1258 adapter->msg_enable = val;
1261 static char stats_strings[][ETH_GSTRING_LEN] = {
1264 "TxMulticastFramesOK",
1265 "TxBroadcastFramesOK",
1272 "TxFrames128To255 ",
1273 "TxFrames256To511 ",
1274 "TxFrames512To1023 ",
1275 "TxFrames1024To1518 ",
1276 "TxFrames1519ToMax ",
1280 "RxMulticastFramesOK",
1281 "RxBroadcastFramesOK",
1292 "RxFrames128To255 ",
1293 "RxFrames256To511 ",
1294 "RxFrames512To1023 ",
1295 "RxFrames1024To1518 ",
1296 "RxFrames1519ToMax ",
1309 "CheckTXEnToggled ",
1314 static int get_sset_count(struct net_device *dev, int sset)
1318 return ARRAY_SIZE(stats_strings);
1324 #define T3_REGMAP_SIZE (3 * 1024)
1326 static int get_regs_len(struct net_device *dev)
1328 return T3_REGMAP_SIZE;
1331 static int get_eeprom_len(struct net_device *dev)
1336 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1338 struct port_info *pi = netdev_priv(dev);
1339 struct adapter *adapter = pi->adapter;
1343 spin_lock(&adapter->stats_lock);
1344 t3_get_fw_version(adapter, &fw_vers);
1345 t3_get_tp_version(adapter, &tp_vers);
1346 spin_unlock(&adapter->stats_lock);
1348 strcpy(info->driver, DRV_NAME);
1349 strcpy(info->version, DRV_VERSION);
1350 strcpy(info->bus_info, pci_name(adapter->pdev));
1352 strcpy(info->fw_version, "N/A");
1354 snprintf(info->fw_version, sizeof(info->fw_version),
1355 "%s %u.%u.%u TP %u.%u.%u",
1356 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1357 G_FW_VERSION_MAJOR(fw_vers),
1358 G_FW_VERSION_MINOR(fw_vers),
1359 G_FW_VERSION_MICRO(fw_vers),
1360 G_TP_VERSION_MAJOR(tp_vers),
1361 G_TP_VERSION_MINOR(tp_vers),
1362 G_TP_VERSION_MICRO(tp_vers));
1366 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1368 if (stringset == ETH_SS_STATS)
1369 memcpy(data, stats_strings, sizeof(stats_strings));
1372 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1373 struct port_info *p, int idx)
1376 unsigned long tot = 0;
1378 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1379 tot += adapter->sge.qs[i].port_stats[idx];
1383 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1386 struct port_info *pi = netdev_priv(dev);
1387 struct adapter *adapter = pi->adapter;
1388 const struct mac_stats *s;
1390 spin_lock(&adapter->stats_lock);
1391 s = t3_mac_update_stats(&pi->mac);
1392 spin_unlock(&adapter->stats_lock);
1394 *data++ = s->tx_octets;
1395 *data++ = s->tx_frames;
1396 *data++ = s->tx_mcast_frames;
1397 *data++ = s->tx_bcast_frames;
1398 *data++ = s->tx_pause;
1399 *data++ = s->tx_underrun;
1400 *data++ = s->tx_fifo_urun;
1402 *data++ = s->tx_frames_64;
1403 *data++ = s->tx_frames_65_127;
1404 *data++ = s->tx_frames_128_255;
1405 *data++ = s->tx_frames_256_511;
1406 *data++ = s->tx_frames_512_1023;
1407 *data++ = s->tx_frames_1024_1518;
1408 *data++ = s->tx_frames_1519_max;
1410 *data++ = s->rx_octets;
1411 *data++ = s->rx_frames;
1412 *data++ = s->rx_mcast_frames;
1413 *data++ = s->rx_bcast_frames;
1414 *data++ = s->rx_pause;
1415 *data++ = s->rx_fcs_errs;
1416 *data++ = s->rx_symbol_errs;
1417 *data++ = s->rx_short;
1418 *data++ = s->rx_jabber;
1419 *data++ = s->rx_too_long;
1420 *data++ = s->rx_fifo_ovfl;
1422 *data++ = s->rx_frames_64;
1423 *data++ = s->rx_frames_65_127;
1424 *data++ = s->rx_frames_128_255;
1425 *data++ = s->rx_frames_256_511;
1426 *data++ = s->rx_frames_512_1023;
1427 *data++ = s->rx_frames_1024_1518;
1428 *data++ = s->rx_frames_1519_max;
1430 *data++ = pi->phy.fifo_errors;
1432 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1433 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1434 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1435 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1436 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1437 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1438 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1439 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1440 *data++ = s->rx_cong_drops;
1442 *data++ = s->num_toggled;
1443 *data++ = s->num_resets;
1446 static inline void reg_block_dump(struct adapter *ap, void *buf,
1447 unsigned int start, unsigned int end)
1449 u32 *p = buf + start;
1451 for (; start <= end; start += sizeof(u32))
1452 *p++ = t3_read_reg(ap, start);
1455 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1458 struct port_info *pi = netdev_priv(dev);
1459 struct adapter *ap = pi->adapter;
1463 * bits 0..9: chip version
1464 * bits 10..15: chip revision
1465 * bit 31: set for PCIe cards
1467 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1470 * We skip the MAC statistics registers because they are clear-on-read.
1471 * Also reading multi-register stats would need to synchronize with the
1472 * periodic mac stats accumulation. Hard to justify the complexity.
1474 memset(buf, 0, T3_REGMAP_SIZE);
1475 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1476 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1477 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1478 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1479 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1480 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1481 XGM_REG(A_XGM_SERDES_STAT3, 1));
1482 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1483 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1486 static int restart_autoneg(struct net_device *dev)
1488 struct port_info *p = netdev_priv(dev);
1490 if (!netif_running(dev))
1492 if (p->link_config.autoneg != AUTONEG_ENABLE)
1494 p->phy.ops->autoneg_restart(&p->phy);
1498 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1500 struct port_info *pi = netdev_priv(dev);
1501 struct adapter *adapter = pi->adapter;
1507 for (i = 0; i < data * 2; i++) {
1508 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1509 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1510 if (msleep_interruptible(500))
1513 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1518 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1520 struct port_info *p = netdev_priv(dev);
1522 cmd->supported = p->link_config.supported;
1523 cmd->advertising = p->link_config.advertising;
1525 if (netif_carrier_ok(dev)) {
1526 cmd->speed = p->link_config.speed;
1527 cmd->duplex = p->link_config.duplex;
1533 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1534 cmd->phy_address = p->phy.addr;
1535 cmd->transceiver = XCVR_EXTERNAL;
1536 cmd->autoneg = p->link_config.autoneg;
1542 static int speed_duplex_to_caps(int speed, int duplex)
1548 if (duplex == DUPLEX_FULL)
1549 cap = SUPPORTED_10baseT_Full;
1551 cap = SUPPORTED_10baseT_Half;
1554 if (duplex == DUPLEX_FULL)
1555 cap = SUPPORTED_100baseT_Full;
1557 cap = SUPPORTED_100baseT_Half;
1560 if (duplex == DUPLEX_FULL)
1561 cap = SUPPORTED_1000baseT_Full;
1563 cap = SUPPORTED_1000baseT_Half;
1566 if (duplex == DUPLEX_FULL)
1567 cap = SUPPORTED_10000baseT_Full;
1572 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1573 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1574 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1575 ADVERTISED_10000baseT_Full)
1577 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1580 struct port_info *p = netdev_priv(dev);
1581 struct link_config *lc = &p->link_config;
1583 if (!(lc->supported & SUPPORTED_Autoneg)) {
1585 * PHY offers a single speed/duplex. See if that's what's
1588 if (cmd->autoneg == AUTONEG_DISABLE) {
1589 cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1590 if (lc->supported & cap)
1596 if (cmd->autoneg == AUTONEG_DISABLE) {
1597 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1599 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1601 lc->requested_speed = cmd->speed;
1602 lc->requested_duplex = cmd->duplex;
1603 lc->advertising = 0;
1605 cmd->advertising &= ADVERTISED_MASK;
1606 cmd->advertising &= lc->supported;
1607 if (!cmd->advertising)
1609 lc->requested_speed = SPEED_INVALID;
1610 lc->requested_duplex = DUPLEX_INVALID;
1611 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1613 lc->autoneg = cmd->autoneg;
1614 if (netif_running(dev))
1615 t3_link_start(&p->phy, &p->mac, lc);
1619 static void get_pauseparam(struct net_device *dev,
1620 struct ethtool_pauseparam *epause)
1622 struct port_info *p = netdev_priv(dev);
1624 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1625 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1626 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1629 static int set_pauseparam(struct net_device *dev,
1630 struct ethtool_pauseparam *epause)
1632 struct port_info *p = netdev_priv(dev);
1633 struct link_config *lc = &p->link_config;
1635 if (epause->autoneg == AUTONEG_DISABLE)
1636 lc->requested_fc = 0;
1637 else if (lc->supported & SUPPORTED_Autoneg)
1638 lc->requested_fc = PAUSE_AUTONEG;
1642 if (epause->rx_pause)
1643 lc->requested_fc |= PAUSE_RX;
1644 if (epause->tx_pause)
1645 lc->requested_fc |= PAUSE_TX;
1646 if (lc->autoneg == AUTONEG_ENABLE) {
1647 if (netif_running(dev))
1648 t3_link_start(&p->phy, &p->mac, lc);
1650 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1651 if (netif_running(dev))
1652 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1657 static u32 get_rx_csum(struct net_device *dev)
1659 struct port_info *p = netdev_priv(dev);
1661 return p->rx_offload & T3_RX_CSUM;
1664 static int set_rx_csum(struct net_device *dev, u32 data)
1666 struct port_info *p = netdev_priv(dev);
1669 p->rx_offload |= T3_RX_CSUM;
1673 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1674 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1675 set_qset_lro(dev, i, 0);
1680 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1682 struct port_info *pi = netdev_priv(dev);
1683 struct adapter *adapter = pi->adapter;
1684 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1686 e->rx_max_pending = MAX_RX_BUFFERS;
1687 e->rx_mini_max_pending = 0;
1688 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1689 e->tx_max_pending = MAX_TXQ_ENTRIES;
1691 e->rx_pending = q->fl_size;
1692 e->rx_mini_pending = q->rspq_size;
1693 e->rx_jumbo_pending = q->jumbo_size;
1694 e->tx_pending = q->txq_size[0];
1697 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1699 struct port_info *pi = netdev_priv(dev);
1700 struct adapter *adapter = pi->adapter;
1701 struct qset_params *q;
1704 if (e->rx_pending > MAX_RX_BUFFERS ||
1705 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1706 e->tx_pending > MAX_TXQ_ENTRIES ||
1707 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1708 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1709 e->rx_pending < MIN_FL_ENTRIES ||
1710 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1711 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1714 if (adapter->flags & FULL_INIT_DONE)
1717 q = &adapter->params.sge.qset[pi->first_qset];
1718 for (i = 0; i < pi->nqsets; ++i, ++q) {
1719 q->rspq_size = e->rx_mini_pending;
1720 q->fl_size = e->rx_pending;
1721 q->jumbo_size = e->rx_jumbo_pending;
1722 q->txq_size[0] = e->tx_pending;
1723 q->txq_size[1] = e->tx_pending;
1724 q->txq_size[2] = e->tx_pending;
1729 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1731 struct port_info *pi = netdev_priv(dev);
1732 struct adapter *adapter = pi->adapter;
1733 struct qset_params *qsp = &adapter->params.sge.qset[0];
1734 struct sge_qset *qs = &adapter->sge.qs[0];
1736 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1739 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1740 t3_update_qset_coalesce(qs, qsp);
1744 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1746 struct port_info *pi = netdev_priv(dev);
1747 struct adapter *adapter = pi->adapter;
1748 struct qset_params *q = adapter->params.sge.qset;
1750 c->rx_coalesce_usecs = q->coalesce_usecs;
1754 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1757 struct port_info *pi = netdev_priv(dev);
1758 struct adapter *adapter = pi->adapter;
1761 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1765 e->magic = EEPROM_MAGIC;
1766 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1767 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1770 memcpy(data, buf + e->offset, e->len);
1775 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1778 struct port_info *pi = netdev_priv(dev);
1779 struct adapter *adapter = pi->adapter;
1780 u32 aligned_offset, aligned_len;
1785 if (eeprom->magic != EEPROM_MAGIC)
1788 aligned_offset = eeprom->offset & ~3;
1789 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1791 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1792 buf = kmalloc(aligned_len, GFP_KERNEL);
1795 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1796 if (!err && aligned_len > 4)
1797 err = t3_seeprom_read(adapter,
1798 aligned_offset + aligned_len - 4,
1799 (__le32 *) & buf[aligned_len - 4]);
1802 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1806 err = t3_seeprom_wp(adapter, 0);
1810 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1811 err = t3_seeprom_write(adapter, aligned_offset, *p);
1812 aligned_offset += 4;
1816 err = t3_seeprom_wp(adapter, 1);
1823 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1827 memset(&wol->sopass, 0, sizeof(wol->sopass));
1830 static int cxgb3_set_flags(struct net_device *dev, u32 data)
1832 struct port_info *pi = netdev_priv(dev);
1835 if (data & ETH_FLAG_LRO) {
1836 if (!(pi->rx_offload & T3_RX_CSUM))
1839 pi->rx_offload |= T3_LRO;
1840 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1841 set_qset_lro(dev, i, 1);
1844 pi->rx_offload &= ~T3_LRO;
1845 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1846 set_qset_lro(dev, i, 0);
1852 static const struct ethtool_ops cxgb_ethtool_ops = {
1853 .get_settings = get_settings,
1854 .set_settings = set_settings,
1855 .get_drvinfo = get_drvinfo,
1856 .get_msglevel = get_msglevel,
1857 .set_msglevel = set_msglevel,
1858 .get_ringparam = get_sge_param,
1859 .set_ringparam = set_sge_param,
1860 .get_coalesce = get_coalesce,
1861 .set_coalesce = set_coalesce,
1862 .get_eeprom_len = get_eeprom_len,
1863 .get_eeprom = get_eeprom,
1864 .set_eeprom = set_eeprom,
1865 .get_pauseparam = get_pauseparam,
1866 .set_pauseparam = set_pauseparam,
1867 .get_rx_csum = get_rx_csum,
1868 .set_rx_csum = set_rx_csum,
1869 .set_tx_csum = ethtool_op_set_tx_csum,
1870 .set_sg = ethtool_op_set_sg,
1871 .get_link = ethtool_op_get_link,
1872 .get_strings = get_strings,
1873 .phys_id = cxgb3_phys_id,
1874 .nway_reset = restart_autoneg,
1875 .get_sset_count = get_sset_count,
1876 .get_ethtool_stats = get_stats,
1877 .get_regs_len = get_regs_len,
1878 .get_regs = get_regs,
1880 .set_tso = ethtool_op_set_tso,
1881 .get_flags = ethtool_op_get_flags,
1882 .set_flags = cxgb3_set_flags,
1885 static int in_range(int val, int lo, int hi)
1887 return val < 0 || (val <= hi && val >= lo);
1890 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1892 struct port_info *pi = netdev_priv(dev);
1893 struct adapter *adapter = pi->adapter;
1897 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1901 case CHELSIO_SET_QSET_PARAMS:{
1903 struct qset_params *q;
1904 struct ch_qset_params t;
1905 int q1 = pi->first_qset;
1906 int nqsets = pi->nqsets;
1908 if (!capable(CAP_NET_ADMIN))
1910 if (copy_from_user(&t, useraddr, sizeof(t)))
1912 if (t.qset_idx >= SGE_QSETS)
1914 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1915 !in_range(t.cong_thres, 0, 255) ||
1916 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1918 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1920 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1921 MAX_CTRL_TXQ_ENTRIES) ||
1922 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1924 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1925 MAX_RX_JUMBO_BUFFERS)
1926 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1930 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1931 for_each_port(adapter, i) {
1932 pi = adap2pinfo(adapter, i);
1933 if (t.qset_idx >= pi->first_qset &&
1934 t.qset_idx < pi->first_qset + pi->nqsets &&
1935 !(pi->rx_offload & T3_RX_CSUM))
1939 if ((adapter->flags & FULL_INIT_DONE) &&
1940 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1941 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1942 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1943 t.polling >= 0 || t.cong_thres >= 0))
1946 /* Allow setting of any available qset when offload enabled */
1947 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1949 for_each_port(adapter, i) {
1950 pi = adap2pinfo(adapter, i);
1951 nqsets += pi->first_qset + pi->nqsets;
1955 if (t.qset_idx < q1)
1957 if (t.qset_idx > q1 + nqsets - 1)
1960 q = &adapter->params.sge.qset[t.qset_idx];
1962 if (t.rspq_size >= 0)
1963 q->rspq_size = t.rspq_size;
1964 if (t.fl_size[0] >= 0)
1965 q->fl_size = t.fl_size[0];
1966 if (t.fl_size[1] >= 0)
1967 q->jumbo_size = t.fl_size[1];
1968 if (t.txq_size[0] >= 0)
1969 q->txq_size[0] = t.txq_size[0];
1970 if (t.txq_size[1] >= 0)
1971 q->txq_size[1] = t.txq_size[1];
1972 if (t.txq_size[2] >= 0)
1973 q->txq_size[2] = t.txq_size[2];
1974 if (t.cong_thres >= 0)
1975 q->cong_thres = t.cong_thres;
1976 if (t.intr_lat >= 0) {
1977 struct sge_qset *qs =
1978 &adapter->sge.qs[t.qset_idx];
1980 q->coalesce_usecs = t.intr_lat;
1981 t3_update_qset_coalesce(qs, q);
1983 if (t.polling >= 0) {
1984 if (adapter->flags & USING_MSIX)
1985 q->polling = t.polling;
1987 /* No polling with INTx for T3A */
1988 if (adapter->params.rev == 0 &&
1989 !(adapter->flags & USING_MSI))
1992 for (i = 0; i < SGE_QSETS; i++) {
1993 q = &adapter->params.sge.
1995 q->polling = t.polling;
2000 set_qset_lro(dev, t.qset_idx, t.lro);
2004 case CHELSIO_GET_QSET_PARAMS:{
2005 struct qset_params *q;
2006 struct ch_qset_params t;
2007 int q1 = pi->first_qset;
2008 int nqsets = pi->nqsets;
2011 if (copy_from_user(&t, useraddr, sizeof(t)))
2014 /* Display qsets for all ports when offload enabled */
2015 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2017 for_each_port(adapter, i) {
2018 pi = adap2pinfo(adapter, i);
2019 nqsets = pi->first_qset + pi->nqsets;
2023 if (t.qset_idx >= nqsets)
2026 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2027 t.rspq_size = q->rspq_size;
2028 t.txq_size[0] = q->txq_size[0];
2029 t.txq_size[1] = q->txq_size[1];
2030 t.txq_size[2] = q->txq_size[2];
2031 t.fl_size[0] = q->fl_size;
2032 t.fl_size[1] = q->jumbo_size;
2033 t.polling = q->polling;
2035 t.intr_lat = q->coalesce_usecs;
2036 t.cong_thres = q->cong_thres;
2039 if (adapter->flags & USING_MSIX)
2040 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2042 t.vector = adapter->pdev->irq;
2044 if (copy_to_user(useraddr, &t, sizeof(t)))
2048 case CHELSIO_SET_QSET_NUM:{
2049 struct ch_reg edata;
2050 unsigned int i, first_qset = 0, other_qsets = 0;
2052 if (!capable(CAP_NET_ADMIN))
2054 if (adapter->flags & FULL_INIT_DONE)
2056 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2058 if (edata.val < 1 ||
2059 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2062 for_each_port(adapter, i)
2063 if (adapter->port[i] && adapter->port[i] != dev)
2064 other_qsets += adap2pinfo(adapter, i)->nqsets;
2066 if (edata.val + other_qsets > SGE_QSETS)
2069 pi->nqsets = edata.val;
2071 for_each_port(adapter, i)
2072 if (adapter->port[i]) {
2073 pi = adap2pinfo(adapter, i);
2074 pi->first_qset = first_qset;
2075 first_qset += pi->nqsets;
2079 case CHELSIO_GET_QSET_NUM:{
2080 struct ch_reg edata;
2082 edata.cmd = CHELSIO_GET_QSET_NUM;
2083 edata.val = pi->nqsets;
2084 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2088 case CHELSIO_LOAD_FW:{
2090 struct ch_mem_range t;
2092 if (!capable(CAP_SYS_RAWIO))
2094 if (copy_from_user(&t, useraddr, sizeof(t)))
2096 /* Check t.len sanity ? */
2097 fw_data = kmalloc(t.len, GFP_KERNEL);
2102 (fw_data, useraddr + sizeof(t), t.len)) {
2107 ret = t3_load_fw(adapter, fw_data, t.len);
2113 case CHELSIO_SETMTUTAB:{
2117 if (!is_offload(adapter))
2119 if (!capable(CAP_NET_ADMIN))
2121 if (offload_running(adapter))
2123 if (copy_from_user(&m, useraddr, sizeof(m)))
2125 if (m.nmtus != NMTUS)
2127 if (m.mtus[0] < 81) /* accommodate SACK */
2130 /* MTUs must be in ascending order */
2131 for (i = 1; i < NMTUS; ++i)
2132 if (m.mtus[i] < m.mtus[i - 1])
2135 memcpy(adapter->params.mtus, m.mtus,
2136 sizeof(adapter->params.mtus));
2139 case CHELSIO_GET_PM:{
2140 struct tp_params *p = &adapter->params.tp;
2141 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2143 if (!is_offload(adapter))
2145 m.tx_pg_sz = p->tx_pg_size;
2146 m.tx_num_pg = p->tx_num_pgs;
2147 m.rx_pg_sz = p->rx_pg_size;
2148 m.rx_num_pg = p->rx_num_pgs;
2149 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2150 if (copy_to_user(useraddr, &m, sizeof(m)))
2154 case CHELSIO_SET_PM:{
2156 struct tp_params *p = &adapter->params.tp;
2158 if (!is_offload(adapter))
2160 if (!capable(CAP_NET_ADMIN))
2162 if (adapter->flags & FULL_INIT_DONE)
2164 if (copy_from_user(&m, useraddr, sizeof(m)))
2166 if (!is_power_of_2(m.rx_pg_sz) ||
2167 !is_power_of_2(m.tx_pg_sz))
2168 return -EINVAL; /* not power of 2 */
2169 if (!(m.rx_pg_sz & 0x14000))
2170 return -EINVAL; /* not 16KB or 64KB */
2171 if (!(m.tx_pg_sz & 0x1554000))
2173 if (m.tx_num_pg == -1)
2174 m.tx_num_pg = p->tx_num_pgs;
2175 if (m.rx_num_pg == -1)
2176 m.rx_num_pg = p->rx_num_pgs;
2177 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2179 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2180 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2182 p->rx_pg_size = m.rx_pg_sz;
2183 p->tx_pg_size = m.tx_pg_sz;
2184 p->rx_num_pgs = m.rx_num_pg;
2185 p->tx_num_pgs = m.tx_num_pg;
2188 case CHELSIO_GET_MEM:{
2189 struct ch_mem_range t;
2193 if (!is_offload(adapter))
2195 if (!(adapter->flags & FULL_INIT_DONE))
2196 return -EIO; /* need the memory controllers */
2197 if (copy_from_user(&t, useraddr, sizeof(t)))
2199 if ((t.addr & 7) || (t.len & 7))
2201 if (t.mem_id == MEM_CM)
2203 else if (t.mem_id == MEM_PMRX)
2204 mem = &adapter->pmrx;
2205 else if (t.mem_id == MEM_PMTX)
2206 mem = &adapter->pmtx;
2212 * bits 0..9: chip version
2213 * bits 10..15: chip revision
2215 t.version = 3 | (adapter->params.rev << 10);
2216 if (copy_to_user(useraddr, &t, sizeof(t)))
2220 * Read 256 bytes at a time as len can be large and we don't
2221 * want to use huge intermediate buffers.
2223 useraddr += sizeof(t); /* advance to start of buffer */
2225 unsigned int chunk =
2226 min_t(unsigned int, t.len, sizeof(buf));
2229 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2233 if (copy_to_user(useraddr, buf, chunk))
2241 case CHELSIO_SET_TRACE_FILTER:{
2243 const struct trace_params *tp;
2245 if (!capable(CAP_NET_ADMIN))
2247 if (!offload_running(adapter))
2249 if (copy_from_user(&t, useraddr, sizeof(t)))
2252 tp = (const struct trace_params *)&t.sip;
2254 t3_config_trace_filter(adapter, tp, 0,
2258 t3_config_trace_filter(adapter, tp, 1,
2269 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2271 struct mii_ioctl_data *data = if_mii(req);
2272 struct port_info *pi = netdev_priv(dev);
2273 struct adapter *adapter = pi->adapter;
2278 data->phy_id = pi->phy.addr;
2282 struct cphy *phy = &pi->phy;
2284 if (!phy->mdio_read)
2286 if (is_10G(adapter)) {
2287 mmd = data->phy_id >> 8;
2290 else if (mmd > MDIO_DEV_VEND2)
2294 phy->mdio_read(adapter, data->phy_id & 0x1f,
2295 mmd, data->reg_num, &val);
2298 phy->mdio_read(adapter, data->phy_id & 0x1f,
2299 0, data->reg_num & 0x1f,
2302 data->val_out = val;
2306 struct cphy *phy = &pi->phy;
2308 if (!capable(CAP_NET_ADMIN))
2310 if (!phy->mdio_write)
2312 if (is_10G(adapter)) {
2313 mmd = data->phy_id >> 8;
2316 else if (mmd > MDIO_DEV_VEND2)
2320 phy->mdio_write(adapter,
2321 data->phy_id & 0x1f, mmd,
2326 phy->mdio_write(adapter,
2327 data->phy_id & 0x1f, 0,
2328 data->reg_num & 0x1f,
2333 return cxgb_extension_ioctl(dev, req->ifr_data);
2340 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2342 struct port_info *pi = netdev_priv(dev);
2343 struct adapter *adapter = pi->adapter;
2346 if (new_mtu < 81) /* accommodate SACK */
2348 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2351 init_port_mtus(adapter);
2352 if (adapter->params.rev == 0 && offload_running(adapter))
2353 t3_load_mtus(adapter, adapter->params.mtus,
2354 adapter->params.a_wnd, adapter->params.b_wnd,
2355 adapter->port[0]->mtu);
2359 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2361 struct port_info *pi = netdev_priv(dev);
2362 struct adapter *adapter = pi->adapter;
2363 struct sockaddr *addr = p;
2365 if (!is_valid_ether_addr(addr->sa_data))
2368 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2369 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2370 if (offload_running(adapter))
2371 write_smt_entry(adapter, pi->port_id);
2376 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2377 * @adap: the adapter
2380 * Ensures that current Rx processing on any of the queues associated with
2381 * the given port completes before returning. We do this by acquiring and
2382 * releasing the locks of the response queues associated with the port.
2384 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2388 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2389 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2391 spin_lock_irq(&q->lock);
2392 spin_unlock_irq(&q->lock);
2396 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2398 struct port_info *pi = netdev_priv(dev);
2399 struct adapter *adapter = pi->adapter;
2402 if (adapter->params.rev > 0)
2403 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2405 /* single control for all ports */
2406 unsigned int i, have_vlans = 0;
2407 for_each_port(adapter, i)
2408 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2410 t3_set_vlan_accel(adapter, 1, have_vlans);
2412 t3_synchronize_rx(adapter, pi);
2415 #ifdef CONFIG_NET_POLL_CONTROLLER
2416 static void cxgb_netpoll(struct net_device *dev)
2418 struct port_info *pi = netdev_priv(dev);
2419 struct adapter *adapter = pi->adapter;
2422 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2423 struct sge_qset *qs = &adapter->sge.qs[qidx];
2426 if (adapter->flags & USING_MSIX)
2431 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2437 * Periodic accumulation of MAC statistics.
2439 static void mac_stats_update(struct adapter *adapter)
2443 for_each_port(adapter, i) {
2444 struct net_device *dev = adapter->port[i];
2445 struct port_info *p = netdev_priv(dev);
2447 if (netif_running(dev)) {
2448 spin_lock(&adapter->stats_lock);
2449 t3_mac_update_stats(&p->mac);
2450 spin_unlock(&adapter->stats_lock);
2455 static void check_link_status(struct adapter *adapter)
2459 for_each_port(adapter, i) {
2460 struct net_device *dev = adapter->port[i];
2461 struct port_info *p = netdev_priv(dev);
2463 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2464 t3_link_changed(adapter, i);
2468 static void check_t3b2_mac(struct adapter *adapter)
2472 if (!rtnl_trylock()) /* synchronize with ifdown */
2475 for_each_port(adapter, i) {
2476 struct net_device *dev = adapter->port[i];
2477 struct port_info *p = netdev_priv(dev);
2480 if (!netif_running(dev))
2484 if (netif_running(dev) && netif_carrier_ok(dev))
2485 status = t3b2_mac_watchdog_task(&p->mac);
2487 p->mac.stats.num_toggled++;
2488 else if (status == 2) {
2489 struct cmac *mac = &p->mac;
2491 t3_mac_set_mtu(mac, dev->mtu);
2492 t3_mac_set_address(mac, 0, dev->dev_addr);
2493 cxgb_set_rxmode(dev);
2494 t3_link_start(&p->phy, mac, &p->link_config);
2495 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2496 t3_port_intr_enable(adapter, p->port_id);
2497 p->mac.stats.num_resets++;
2504 static void t3_adap_check_task(struct work_struct *work)
2506 struct adapter *adapter = container_of(work, struct adapter,
2507 adap_check_task.work);
2508 const struct adapter_params *p = &adapter->params;
2510 adapter->check_task_cnt++;
2512 /* Check link status for PHYs without interrupts */
2513 if (p->linkpoll_period)
2514 check_link_status(adapter);
2516 /* Accumulate MAC stats if needed */
2517 if (!p->linkpoll_period ||
2518 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2519 p->stats_update_period) {
2520 mac_stats_update(adapter);
2521 adapter->check_task_cnt = 0;
2524 if (p->rev == T3_REV_B2)
2525 check_t3b2_mac(adapter);
2527 /* Schedule the next check update if any port is active. */
2528 spin_lock_irq(&adapter->work_lock);
2529 if (adapter->open_device_map & PORT_MASK)
2530 schedule_chk_task(adapter);
2531 spin_unlock_irq(&adapter->work_lock);
2535 * Processes external (PHY) interrupts in process context.
2537 static void ext_intr_task(struct work_struct *work)
2539 struct adapter *adapter = container_of(work, struct adapter,
2540 ext_intr_handler_task);
2542 t3_phy_intr_handler(adapter);
2544 /* Now reenable external interrupts */
2545 spin_lock_irq(&adapter->work_lock);
2546 if (adapter->slow_intr_mask) {
2547 adapter->slow_intr_mask |= F_T3DBG;
2548 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2549 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2550 adapter->slow_intr_mask);
2552 spin_unlock_irq(&adapter->work_lock);
2556 * Interrupt-context handler for external (PHY) interrupts.
2558 void t3_os_ext_intr_handler(struct adapter *adapter)
2561 * Schedule a task to handle external interrupts as they may be slow
2562 * and we use a mutex to protect MDIO registers. We disable PHY
2563 * interrupts in the meantime and let the task reenable them when
2566 spin_lock(&adapter->work_lock);
2567 if (adapter->slow_intr_mask) {
2568 adapter->slow_intr_mask &= ~F_T3DBG;
2569 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2570 adapter->slow_intr_mask);
2571 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2573 spin_unlock(&adapter->work_lock);
2576 static int t3_adapter_error(struct adapter *adapter, int reset)
2580 /* Stop all ports */
2581 for_each_port(adapter, i) {
2582 struct net_device *netdev = adapter->port[i];
2584 if (netif_running(netdev))
2588 if (is_offload(adapter) &&
2589 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2590 offload_close(&adapter->tdev);
2592 /* Stop SGE timers */
2593 t3_stop_sge_timers(adapter);
2595 adapter->flags &= ~FULL_INIT_DONE;
2598 ret = t3_reset_adapter(adapter);
2600 pci_disable_device(adapter->pdev);
2605 static int t3_reenable_adapter(struct adapter *adapter)
2607 if (pci_enable_device(adapter->pdev)) {
2608 dev_err(&adapter->pdev->dev,
2609 "Cannot re-enable PCI device after reset.\n");
2612 pci_set_master(adapter->pdev);
2613 pci_restore_state(adapter->pdev);
2615 /* Free sge resources */
2616 t3_free_sge_resources(adapter);
2618 if (t3_replay_prep_adapter(adapter))
2626 static void t3_resume_ports(struct adapter *adapter)
2630 /* Restart the ports */
2631 for_each_port(adapter, i) {
2632 struct net_device *netdev = adapter->port[i];
2634 if (netif_running(netdev)) {
2635 if (cxgb_open(netdev)) {
2636 dev_err(&adapter->pdev->dev,
2637 "can't bring device back up"
2646 * processes a fatal error.
2647 * Bring the ports down, reset the chip, bring the ports back up.
2649 static void fatal_error_task(struct work_struct *work)
2651 struct adapter *adapter = container_of(work, struct adapter,
2652 fatal_error_handler_task);
2656 err = t3_adapter_error(adapter, 1);
2658 err = t3_reenable_adapter(adapter);
2660 t3_resume_ports(adapter);
2662 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2666 void t3_fatal_err(struct adapter *adapter)
2668 unsigned int fw_status[4];
2670 if (adapter->flags & FULL_INIT_DONE) {
2671 t3_sge_stop(adapter);
2672 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2673 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2674 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2675 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2677 spin_lock(&adapter->work_lock);
2678 t3_intr_disable(adapter);
2679 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2680 spin_unlock(&adapter->work_lock);
2682 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2683 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2684 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2685 fw_status[0], fw_status[1],
2686 fw_status[2], fw_status[3]);
2691 * t3_io_error_detected - called when PCI error is detected
2692 * @pdev: Pointer to PCI device
2693 * @state: The current pci connection state
2695 * This function is called after a PCI bus error affecting
2696 * this device has been detected.
2698 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2699 pci_channel_state_t state)
2701 struct adapter *adapter = pci_get_drvdata(pdev);
2704 ret = t3_adapter_error(adapter, 0);
2706 /* Request a slot reset. */
2707 return PCI_ERS_RESULT_NEED_RESET;
2711 * t3_io_slot_reset - called after the pci bus has been reset.
2712 * @pdev: Pointer to PCI device
2714 * Restart the card from scratch, as if from a cold-boot.
2716 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2718 struct adapter *adapter = pci_get_drvdata(pdev);
2720 if (!t3_reenable_adapter(adapter))
2721 return PCI_ERS_RESULT_RECOVERED;
2723 return PCI_ERS_RESULT_DISCONNECT;
2727 * t3_io_resume - called when traffic can start flowing again.
2728 * @pdev: Pointer to PCI device
2730 * This callback is called when the error recovery driver tells us that
2731 * its OK to resume normal operation.
2733 static void t3_io_resume(struct pci_dev *pdev)
2735 struct adapter *adapter = pci_get_drvdata(pdev);
2737 t3_resume_ports(adapter);
2740 static struct pci_error_handlers t3_err_handler = {
2741 .error_detected = t3_io_error_detected,
2742 .slot_reset = t3_io_slot_reset,
2743 .resume = t3_io_resume,
2747 * Set the number of qsets based on the number of CPUs and the number of ports,
2748 * not to exceed the number of available qsets, assuming there are enough qsets
2751 static void set_nqsets(struct adapter *adap)
2754 int num_cpus = num_online_cpus();
2755 int hwports = adap->params.nports;
2756 int nqsets = SGE_QSETS;
2758 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2760 (hwports * nqsets > SGE_QSETS ||
2761 num_cpus >= nqsets / hwports))
2763 if (nqsets > num_cpus)
2765 if (nqsets < 1 || hwports == 4)
2770 for_each_port(adap, i) {
2771 struct port_info *pi = adap2pinfo(adap, i);
2774 pi->nqsets = nqsets;
2775 j = pi->first_qset + nqsets;
2777 dev_info(&adap->pdev->dev,
2778 "Port %d using %d queue sets.\n", i, nqsets);
2782 static int __devinit cxgb_enable_msix(struct adapter *adap)
2784 struct msix_entry entries[SGE_QSETS + 1];
2787 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2788 entries[i].entry = i;
2790 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2792 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2793 adap->msix_info[i].vec = entries[i].vector;
2795 dev_info(&adap->pdev->dev,
2796 "only %d MSI-X vectors left, not using MSI-X\n", err);
2800 static void __devinit print_port_info(struct adapter *adap,
2801 const struct adapter_info *ai)
2803 static const char *pci_variant[] = {
2804 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2811 snprintf(buf, sizeof(buf), "%s x%d",
2812 pci_variant[adap->params.pci.variant],
2813 adap->params.pci.width);
2815 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2816 pci_variant[adap->params.pci.variant],
2817 adap->params.pci.speed, adap->params.pci.width);
2819 for_each_port(adap, i) {
2820 struct net_device *dev = adap->port[i];
2821 const struct port_info *pi = netdev_priv(dev);
2823 if (!test_bit(i, &adap->registered_device_map))
2825 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2826 dev->name, ai->desc, pi->phy.desc,
2827 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2828 (adap->flags & USING_MSIX) ? " MSI-X" :
2829 (adap->flags & USING_MSI) ? " MSI" : "");
2830 if (adap->name == dev->name && adap->params.vpd.mclk)
2832 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2833 adap->name, t3_mc7_size(&adap->cm) >> 20,
2834 t3_mc7_size(&adap->pmtx) >> 20,
2835 t3_mc7_size(&adap->pmrx) >> 20,
2836 adap->params.vpd.sn);
2840 static const struct net_device_ops cxgb_netdev_ops = {
2841 .ndo_open = cxgb_open,
2842 .ndo_stop = cxgb_close,
2843 .ndo_start_xmit = t3_eth_xmit,
2844 .ndo_get_stats = cxgb_get_stats,
2845 .ndo_validate_addr = eth_validate_addr,
2846 .ndo_set_multicast_list = cxgb_set_rxmode,
2847 .ndo_do_ioctl = cxgb_ioctl,
2848 .ndo_change_mtu = cxgb_change_mtu,
2849 .ndo_set_mac_address = cxgb_set_mac_addr,
2850 .ndo_vlan_rx_register = vlan_rx_register,
2851 #ifdef CONFIG_NET_POLL_CONTROLLER
2852 .ndo_poll_controller = cxgb_netpoll,
2856 static int __devinit init_one(struct pci_dev *pdev,
2857 const struct pci_device_id *ent)
2859 static int version_printed;
2861 int i, err, pci_using_dac = 0;
2862 unsigned long mmio_start, mmio_len;
2863 const struct adapter_info *ai;
2864 struct adapter *adapter = NULL;
2865 struct port_info *pi;
2867 if (!version_printed) {
2868 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2873 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2875 printk(KERN_ERR DRV_NAME
2876 ": cannot initialize work queue\n");
2881 err = pci_request_regions(pdev, DRV_NAME);
2883 /* Just info, some other driver may have claimed the device. */
2884 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2888 err = pci_enable_device(pdev);
2890 dev_err(&pdev->dev, "cannot enable PCI device\n");
2891 goto out_release_regions;
2894 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2896 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2898 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2899 "coherent allocations\n");
2900 goto out_disable_device;
2902 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2903 dev_err(&pdev->dev, "no usable DMA configuration\n");
2904 goto out_disable_device;
2907 pci_set_master(pdev);
2908 pci_save_state(pdev);
2910 mmio_start = pci_resource_start(pdev, 0);
2911 mmio_len = pci_resource_len(pdev, 0);
2912 ai = t3_get_adapter_info(ent->driver_data);
2914 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2917 goto out_disable_device;
2920 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2921 if (!adapter->regs) {
2922 dev_err(&pdev->dev, "cannot map device registers\n");
2924 goto out_free_adapter;
2927 adapter->pdev = pdev;
2928 adapter->name = pci_name(pdev);
2929 adapter->msg_enable = dflt_msg_enable;
2930 adapter->mmio_len = mmio_len;
2932 mutex_init(&adapter->mdio_lock);
2933 spin_lock_init(&adapter->work_lock);
2934 spin_lock_init(&adapter->stats_lock);
2936 INIT_LIST_HEAD(&adapter->adapter_list);
2937 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2938 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2939 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2941 for (i = 0; i < ai->nports; ++i) {
2942 struct net_device *netdev;
2944 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
2950 SET_NETDEV_DEV(netdev, &pdev->dev);
2952 adapter->port[i] = netdev;
2953 pi = netdev_priv(netdev);
2954 pi->adapter = adapter;
2955 pi->rx_offload = T3_RX_CSUM | T3_LRO;
2957 netif_carrier_off(netdev);
2958 netif_tx_stop_all_queues(netdev);
2959 netdev->irq = pdev->irq;
2960 netdev->mem_start = mmio_start;
2961 netdev->mem_end = mmio_start + mmio_len - 1;
2962 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2963 netdev->features |= NETIF_F_LLTX;
2964 netdev->features |= NETIF_F_LRO;
2966 netdev->features |= NETIF_F_HIGHDMA;
2968 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2969 netdev->netdev_ops = &cxgb_netdev_ops;
2970 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2973 pci_set_drvdata(pdev, adapter);
2974 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2980 * The card is now ready to go. If any errors occur during device
2981 * registration we do not fail the whole card but rather proceed only
2982 * with the ports we manage to register successfully. However we must
2983 * register at least one net device.
2985 for_each_port(adapter, i) {
2986 err = register_netdev(adapter->port[i]);
2988 dev_warn(&pdev->dev,
2989 "cannot register net device %s, skipping\n",
2990 adapter->port[i]->name);
2993 * Change the name we use for messages to the name of
2994 * the first successfully registered interface.
2996 if (!adapter->registered_device_map)
2997 adapter->name = adapter->port[i]->name;
2999 __set_bit(i, &adapter->registered_device_map);
3002 if (!adapter->registered_device_map) {
3003 dev_err(&pdev->dev, "could not register any net devices\n");
3007 /* Driver's ready. Reflect it on LEDs */
3008 t3_led_ready(adapter);
3010 if (is_offload(adapter)) {
3011 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3012 cxgb3_adapter_ofld(adapter);
3015 /* See what interrupts we'll be using */
3016 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3017 adapter->flags |= USING_MSIX;
3018 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3019 adapter->flags |= USING_MSI;
3021 set_nqsets(adapter);
3023 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3026 print_port_info(adapter, ai);
3030 iounmap(adapter->regs);
3031 for (i = ai->nports - 1; i >= 0; --i)
3032 if (adapter->port[i])
3033 free_netdev(adapter->port[i]);
3039 pci_disable_device(pdev);
3040 out_release_regions:
3041 pci_release_regions(pdev);
3042 pci_set_drvdata(pdev, NULL);
3046 static void __devexit remove_one(struct pci_dev *pdev)
3048 struct adapter *adapter = pci_get_drvdata(pdev);
3053 t3_sge_stop(adapter);
3054 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3057 if (is_offload(adapter)) {
3058 cxgb3_adapter_unofld(adapter);
3059 if (test_bit(OFFLOAD_DEVMAP_BIT,
3060 &adapter->open_device_map))
3061 offload_close(&adapter->tdev);
3064 for_each_port(adapter, i)
3065 if (test_bit(i, &adapter->registered_device_map))
3066 unregister_netdev(adapter->port[i]);
3068 t3_stop_sge_timers(adapter);
3069 t3_free_sge_resources(adapter);
3070 cxgb_disable_msi(adapter);
3072 for_each_port(adapter, i)
3073 if (adapter->port[i])
3074 free_netdev(adapter->port[i]);
3076 iounmap(adapter->regs);
3078 pci_release_regions(pdev);
3079 pci_disable_device(pdev);
3080 pci_set_drvdata(pdev, NULL);
3084 static struct pci_driver driver = {
3086 .id_table = cxgb3_pci_tbl,
3088 .remove = __devexit_p(remove_one),
3089 .err_handler = &t3_err_handler,
3092 static int __init cxgb3_init_module(void)
3096 cxgb3_offload_init();
3098 ret = pci_register_driver(&driver);
3102 static void __exit cxgb3_cleanup_module(void)
3104 pci_unregister_driver(&driver);
3106 destroy_workqueue(cxgb3_wq);
3109 module_init(cxgb3_init_module);
3110 module_exit(cxgb3_cleanup_module);