2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
212 * t3_os_phymod_changed - handle PHY module changes
213 * @phy: the PHY reporting the module change
214 * @mod_type: new module type
216 * This is the OS-dependent handler for PHY module changes. It is
217 * invoked when a PHY module is removed or inserted for any OS-specific
220 void t3_os_phymod_changed(struct adapter *adap, int port_id)
222 static const char *mod_str[] = {
223 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
226 const struct net_device *dev = adap->port[port_id];
227 const struct port_info *pi = netdev_priv(dev);
229 if (pi->phy.modtype == phy_modtype_none)
230 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
232 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
233 mod_str[pi->phy.modtype]);
236 static void cxgb_set_rxmode(struct net_device *dev)
238 struct t3_rx_mode rm;
239 struct port_info *pi = netdev_priv(dev);
241 init_rx_mode(&rm, dev, dev->mc_list);
242 t3_mac_set_rx_mode(&pi->mac, &rm);
246 * link_start - enable a port
247 * @dev: the device to enable
249 * Performs the MAC and PHY actions needed to enable a port.
251 static void link_start(struct net_device *dev)
253 struct t3_rx_mode rm;
254 struct port_info *pi = netdev_priv(dev);
255 struct cmac *mac = &pi->mac;
257 init_rx_mode(&rm, dev, dev->mc_list);
259 t3_mac_set_mtu(mac, dev->mtu);
260 t3_mac_set_address(mac, 0, dev->dev_addr);
261 t3_mac_set_rx_mode(mac, &rm);
262 t3_link_start(&pi->phy, mac, &pi->link_config);
263 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
266 static inline void cxgb_disable_msi(struct adapter *adapter)
268 if (adapter->flags & USING_MSIX) {
269 pci_disable_msix(adapter->pdev);
270 adapter->flags &= ~USING_MSIX;
271 } else if (adapter->flags & USING_MSI) {
272 pci_disable_msi(adapter->pdev);
273 adapter->flags &= ~USING_MSI;
278 * Interrupt handler for asynchronous events used with MSI-X.
280 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
282 t3_slow_intr_handler(cookie);
287 * Name the MSI-X interrupts.
289 static void name_msix_vecs(struct adapter *adap)
291 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
293 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
294 adap->msix_info[0].desc[n] = 0;
296 for_each_port(adap, j) {
297 struct net_device *d = adap->port[j];
298 const struct port_info *pi = netdev_priv(d);
300 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
301 snprintf(adap->msix_info[msi_idx].desc, n,
302 "%s-%d", d->name, pi->first_qset + i);
303 adap->msix_info[msi_idx].desc[n] = 0;
308 static int request_msix_data_irqs(struct adapter *adap)
310 int i, j, err, qidx = 0;
312 for_each_port(adap, i) {
313 int nqsets = adap2pinfo(adap, i)->nqsets;
315 for (j = 0; j < nqsets; ++j) {
316 err = request_irq(adap->msix_info[qidx + 1].vec,
317 t3_intr_handler(adap,
320 adap->msix_info[qidx + 1].desc,
321 &adap->sge.qs[qidx]);
324 free_irq(adap->msix_info[qidx + 1].vec,
325 &adap->sge.qs[qidx]);
334 static void free_irq_resources(struct adapter *adapter)
336 if (adapter->flags & USING_MSIX) {
339 free_irq(adapter->msix_info[0].vec, adapter);
340 for_each_port(adapter, i)
341 n += adap2pinfo(adapter, i)->nqsets;
343 for (i = 0; i < n; ++i)
344 free_irq(adapter->msix_info[i + 1].vec,
345 &adapter->sge.qs[i]);
347 free_irq(adapter->pdev->irq, adapter);
350 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
355 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
363 static int init_tp_parity(struct adapter *adap)
367 struct cpl_set_tcb_field *greq;
368 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
370 t3_tp_set_offload_mode(adap, 1);
372 for (i = 0; i < 16; i++) {
373 struct cpl_smt_write_req *req;
375 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
376 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
377 memset(req, 0, sizeof(*req));
378 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
379 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
381 t3_mgmt_tx(adap, skb);
384 for (i = 0; i < 2048; i++) {
385 struct cpl_l2t_write_req *req;
387 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
388 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
389 memset(req, 0, sizeof(*req));
390 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
391 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
392 req->params = htonl(V_L2T_W_IDX(i));
393 t3_mgmt_tx(adap, skb);
396 for (i = 0; i < 2048; i++) {
397 struct cpl_rte_write_req *req;
399 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
400 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
401 memset(req, 0, sizeof(*req));
402 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
403 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
404 req->l2t_idx = htonl(V_L2T_W_IDX(i));
405 t3_mgmt_tx(adap, skb);
408 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
409 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
410 memset(greq, 0, sizeof(*greq));
411 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
412 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
413 greq->mask = cpu_to_be64(1);
414 t3_mgmt_tx(adap, skb);
416 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
417 t3_tp_set_offload_mode(adap, 0);
422 * setup_rss - configure RSS
425 * Sets up RSS to distribute packets to multiple receive queues. We
426 * configure the RSS CPU lookup table to distribute to the number of HW
427 * receive queues, and the response queue lookup table to narrow that
428 * down to the response queues actually configured for each port.
429 * We always configure the RSS mapping for two ports since the mapping
430 * table has plenty of entries.
432 static void setup_rss(struct adapter *adap)
435 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
436 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
437 u8 cpus[SGE_QSETS + 1];
438 u16 rspq_map[RSS_TABLE_SIZE];
440 for (i = 0; i < SGE_QSETS; ++i)
442 cpus[SGE_QSETS] = 0xff; /* terminator */
444 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
445 rspq_map[i] = i % nq0;
446 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
449 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
450 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
451 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
454 static void init_napi(struct adapter *adap)
458 for (i = 0; i < SGE_QSETS; i++) {
459 struct sge_qset *qs = &adap->sge.qs[i];
462 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
467 * netif_napi_add() can be called only once per napi_struct because it
468 * adds each new napi_struct to a list. Be careful not to call it a
469 * second time, e.g., during EEH recovery, by making a note of it.
471 adap->flags |= NAPI_INIT;
475 * Wait until all NAPI handlers are descheduled. This includes the handlers of
476 * both netdevices representing interfaces and the dummy ones for the extra
479 static void quiesce_rx(struct adapter *adap)
483 for (i = 0; i < SGE_QSETS; i++)
484 if (adap->sge.qs[i].adap)
485 napi_disable(&adap->sge.qs[i].napi);
488 static void enable_all_napi(struct adapter *adap)
491 for (i = 0; i < SGE_QSETS; i++)
492 if (adap->sge.qs[i].adap)
493 napi_enable(&adap->sge.qs[i].napi);
497 * set_qset_lro - Turn a queue set's LRO capability on and off
498 * @dev: the device the qset is attached to
499 * @qset_idx: the queue set index
500 * @val: the LRO switch
502 * Sets LRO on or off for a particular queue set.
503 * the device's features flag is updated to reflect the LRO
504 * capability when all queues belonging to the device are
507 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
509 struct port_info *pi = netdev_priv(dev);
510 struct adapter *adapter = pi->adapter;
513 adapter->params.sge.qset[qset_idx].lro = !!val;
514 adapter->sge.qs[qset_idx].lro_enabled = !!val;
516 /* let ethtool report LRO on only if all queues are LRO enabled */
517 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518 lro_on &= adapter->params.sge.qset[i].lro;
521 dev->features |= NETIF_F_LRO;
523 dev->features &= ~NETIF_F_LRO;
527 * setup_sge_qsets - configure SGE Tx/Rx/response queues
530 * Determines how many sets of SGE queues to use and initializes them.
531 * We support multiple queue sets per port if we have MSI-X, otherwise
532 * just one queue set per port.
534 static int setup_sge_qsets(struct adapter *adap)
536 int i, j, err, irq_idx = 0, qset_idx = 0;
537 unsigned int ntxq = SGE_TXQ_PER_SET;
539 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
542 for_each_port(adap, i) {
543 struct net_device *dev = adap->port[i];
544 struct port_info *pi = netdev_priv(dev);
546 pi->qs = &adap->sge.qs[pi->first_qset];
547 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
549 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
550 err = t3_sge_alloc_qset(adap, qset_idx, 1,
551 (adap->flags & USING_MSIX) ? qset_idx + 1 :
553 &adap->params.sge.qset[qset_idx], ntxq, dev,
554 netdev_get_tx_queue(dev, j));
556 t3_stop_sge_timers(adap);
557 t3_free_sge_resources(adap);
566 static ssize_t attr_show(struct device *d, char *buf,
567 ssize_t(*format) (struct net_device *, char *))
571 /* Synchronize with ioctls that may shut down the device */
573 len = (*format) (to_net_dev(d), buf);
578 static ssize_t attr_store(struct device *d,
579 const char *buf, size_t len,
580 ssize_t(*set) (struct net_device *, unsigned int),
581 unsigned int min_val, unsigned int max_val)
587 if (!capable(CAP_NET_ADMIN))
590 val = simple_strtoul(buf, &endp, 0);
591 if (endp == buf || val < min_val || val > max_val)
595 ret = (*set) (to_net_dev(d), val);
602 #define CXGB3_SHOW(name, val_expr) \
603 static ssize_t format_##name(struct net_device *dev, char *buf) \
605 struct port_info *pi = netdev_priv(dev); \
606 struct adapter *adap = pi->adapter; \
607 return sprintf(buf, "%u\n", val_expr); \
609 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
612 return attr_show(d, buf, format_##name); \
615 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
617 struct port_info *pi = netdev_priv(dev);
618 struct adapter *adap = pi->adapter;
619 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
621 if (adap->flags & FULL_INIT_DONE)
623 if (val && adap->params.rev == 0)
625 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
628 adap->params.mc5.nfilters = val;
632 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
633 const char *buf, size_t len)
635 return attr_store(d, buf, len, set_nfilters, 0, ~0);
638 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
640 struct port_info *pi = netdev_priv(dev);
641 struct adapter *adap = pi->adapter;
643 if (adap->flags & FULL_INIT_DONE)
645 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
648 adap->params.mc5.nservers = val;
652 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
653 const char *buf, size_t len)
655 return attr_store(d, buf, len, set_nservers, 0, ~0);
658 #define CXGB3_ATTR_R(name, val_expr) \
659 CXGB3_SHOW(name, val_expr) \
660 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
662 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
663 CXGB3_SHOW(name, val_expr) \
664 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
666 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
667 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
668 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
670 static struct attribute *cxgb3_attrs[] = {
671 &dev_attr_cam_size.attr,
672 &dev_attr_nfilters.attr,
673 &dev_attr_nservers.attr,
677 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
679 static ssize_t tm_attr_show(struct device *d,
680 char *buf, int sched)
682 struct port_info *pi = netdev_priv(to_net_dev(d));
683 struct adapter *adap = pi->adapter;
684 unsigned int v, addr, bpt, cpt;
687 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
689 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
690 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
693 bpt = (v >> 8) & 0xff;
696 len = sprintf(buf, "disabled\n");
698 v = (adap->params.vpd.cclk * 1000) / cpt;
699 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
705 static ssize_t tm_attr_store(struct device *d,
706 const char *buf, size_t len, int sched)
708 struct port_info *pi = netdev_priv(to_net_dev(d));
709 struct adapter *adap = pi->adapter;
714 if (!capable(CAP_NET_ADMIN))
717 val = simple_strtoul(buf, &endp, 0);
718 if (endp == buf || val > 10000000)
722 ret = t3_config_sched(adap, val, sched);
729 #define TM_ATTR(name, sched) \
730 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
733 return tm_attr_show(d, buf, sched); \
735 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
736 const char *buf, size_t len) \
738 return tm_attr_store(d, buf, len, sched); \
740 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
751 static struct attribute *offload_attrs[] = {
752 &dev_attr_sched0.attr,
753 &dev_attr_sched1.attr,
754 &dev_attr_sched2.attr,
755 &dev_attr_sched3.attr,
756 &dev_attr_sched4.attr,
757 &dev_attr_sched5.attr,
758 &dev_attr_sched6.attr,
759 &dev_attr_sched7.attr,
763 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
766 * Sends an sk_buff to an offload queue driver
767 * after dealing with any active network taps.
769 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
774 ret = t3_offload_tx(tdev, skb);
779 static int write_smt_entry(struct adapter *adapter, int idx)
781 struct cpl_smt_write_req *req;
782 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
787 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
788 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
789 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
790 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
792 memset(req->src_mac1, 0, sizeof(req->src_mac1));
793 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
795 offload_tx(&adapter->tdev, skb);
799 static int init_smt(struct adapter *adapter)
803 for_each_port(adapter, i)
804 write_smt_entry(adapter, i);
808 static void init_port_mtus(struct adapter *adapter)
810 unsigned int mtus = adapter->port[0]->mtu;
812 if (adapter->port[1])
813 mtus |= adapter->port[1]->mtu << 16;
814 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
817 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
821 struct mngt_pktsched_wr *req;
824 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
825 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
826 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
827 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
833 ret = t3_mgmt_tx(adap, skb);
838 static int bind_qsets(struct adapter *adap)
842 for_each_port(adap, i) {
843 const struct port_info *pi = adap2pinfo(adap, i);
845 for (j = 0; j < pi->nqsets; ++j) {
846 int ret = send_pktsched_cmd(adap, 1,
847 pi->first_qset + j, -1,
857 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
858 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
860 static int upgrade_fw(struct adapter *adap)
864 const struct firmware *fw;
865 struct device *dev = &adap->pdev->dev;
867 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
868 FW_VERSION_MINOR, FW_VERSION_MICRO);
869 ret = request_firmware(&fw, buf, dev);
871 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
875 ret = t3_load_fw(adap, fw->data, fw->size);
876 release_firmware(fw);
879 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
880 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
882 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
883 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
888 static inline char t3rev2char(struct adapter *adapter)
892 switch(adapter->params.rev) {
904 static int update_tpsram(struct adapter *adap)
906 const struct firmware *tpsram;
908 struct device *dev = &adap->pdev->dev;
912 rev = t3rev2char(adap);
916 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
917 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
919 ret = request_firmware(&tpsram, buf, dev);
921 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
926 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
930 ret = t3_set_proto_sram(adap, tpsram->data);
933 "successful update of protocol engine "
935 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
937 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
938 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
940 dev_err(dev, "loading protocol SRAM failed\n");
943 release_firmware(tpsram);
949 * cxgb_up - enable the adapter
950 * @adapter: adapter being enabled
952 * Called when the first port is enabled, this function performs the
953 * actions necessary to make an adapter operational, such as completing
954 * the initialization of HW modules, and enabling interrupts.
956 * Must be called with the rtnl lock held.
958 static int cxgb_up(struct adapter *adap)
962 if (!(adap->flags & FULL_INIT_DONE)) {
963 err = t3_check_fw_version(adap);
964 if (err == -EINVAL) {
965 err = upgrade_fw(adap);
966 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
967 FW_VERSION_MAJOR, FW_VERSION_MINOR,
968 FW_VERSION_MICRO, err ? "failed" : "succeeded");
971 err = t3_check_tpsram_version(adap);
972 if (err == -EINVAL) {
973 err = update_tpsram(adap);
974 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
975 TP_VERSION_MAJOR, TP_VERSION_MINOR,
976 TP_VERSION_MICRO, err ? "failed" : "succeeded");
980 * Clear interrupts now to catch errors if t3_init_hw fails.
981 * We clear them again later as initialization may trigger
982 * conditions that can interrupt.
986 err = t3_init_hw(adap, 0);
990 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
991 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
993 err = setup_sge_qsets(adap);
998 if (!(adap->flags & NAPI_INIT))
1000 adap->flags |= FULL_INIT_DONE;
1003 t3_intr_clear(adap);
1005 if (adap->flags & USING_MSIX) {
1006 name_msix_vecs(adap);
1007 err = request_irq(adap->msix_info[0].vec,
1008 t3_async_intr_handler, 0,
1009 adap->msix_info[0].desc, adap);
1013 err = request_msix_data_irqs(adap);
1015 free_irq(adap->msix_info[0].vec, adap);
1018 } else if ((err = request_irq(adap->pdev->irq,
1019 t3_intr_handler(adap,
1020 adap->sge.qs[0].rspq.
1022 (adap->flags & USING_MSI) ?
1027 enable_all_napi(adap);
1029 t3_intr_enable(adap);
1031 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1032 is_offload(adap) && init_tp_parity(adap) == 0)
1033 adap->flags |= TP_PARITY_INIT;
1035 if (adap->flags & TP_PARITY_INIT) {
1036 t3_write_reg(adap, A_TP_INT_CAUSE,
1037 F_CMCACHEPERR | F_ARPLUTPERR);
1038 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1041 if (!(adap->flags & QUEUES_BOUND)) {
1042 err = bind_qsets(adap);
1044 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1045 t3_intr_disable(adap);
1046 free_irq_resources(adap);
1049 adap->flags |= QUEUES_BOUND;
1055 CH_ERR(adap, "request_irq failed, err %d\n", err);
1060 * Release resources when all the ports and offloading have been stopped.
1062 static void cxgb_down(struct adapter *adapter)
1064 t3_sge_stop(adapter);
1065 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1066 t3_intr_disable(adapter);
1067 spin_unlock_irq(&adapter->work_lock);
1069 free_irq_resources(adapter);
1070 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1071 quiesce_rx(adapter);
1074 static void schedule_chk_task(struct adapter *adap)
1078 timeo = adap->params.linkpoll_period ?
1079 (HZ * adap->params.linkpoll_period) / 10 :
1080 adap->params.stats_update_period * HZ;
1082 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1085 static int offload_open(struct net_device *dev)
1087 struct port_info *pi = netdev_priv(dev);
1088 struct adapter *adapter = pi->adapter;
1089 struct t3cdev *tdev = dev2t3cdev(dev);
1090 int adap_up = adapter->open_device_map & PORT_MASK;
1093 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1096 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1099 t3_tp_set_offload_mode(adapter, 1);
1100 tdev->lldev = adapter->port[0];
1101 err = cxgb3_offload_activate(adapter);
1105 init_port_mtus(adapter);
1106 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1107 adapter->params.b_wnd,
1108 adapter->params.rev == 0 ?
1109 adapter->port[0]->mtu : 0xffff);
1112 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1113 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1115 /* Call back all registered clients */
1116 cxgb3_add_clients(tdev);
1119 /* restore them in case the offload module has changed them */
1121 t3_tp_set_offload_mode(adapter, 0);
1122 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1123 cxgb3_set_dummy_ops(tdev);
1128 static int offload_close(struct t3cdev *tdev)
1130 struct adapter *adapter = tdev2adap(tdev);
1132 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1135 /* Call back all registered clients */
1136 cxgb3_remove_clients(tdev);
1138 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1141 cxgb3_set_dummy_ops(tdev);
1142 t3_tp_set_offload_mode(adapter, 0);
1143 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1145 if (!adapter->open_device_map)
1148 cxgb3_offload_deactivate(adapter);
1152 static int cxgb_open(struct net_device *dev)
1154 struct port_info *pi = netdev_priv(dev);
1155 struct adapter *adapter = pi->adapter;
1156 int other_ports = adapter->open_device_map & PORT_MASK;
1159 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1162 set_bit(pi->port_id, &adapter->open_device_map);
1163 if (is_offload(adapter) && !ofld_disable) {
1164 err = offload_open(dev);
1167 "Could not initialize offload capabilities\n");
1170 dev->real_num_tx_queues = pi->nqsets;
1172 t3_port_intr_enable(adapter, pi->port_id);
1173 netif_tx_start_all_queues(dev);
1175 schedule_chk_task(adapter);
1180 static int cxgb_close(struct net_device *dev)
1182 struct port_info *pi = netdev_priv(dev);
1183 struct adapter *adapter = pi->adapter;
1185 t3_port_intr_disable(adapter, pi->port_id);
1186 netif_tx_stop_all_queues(dev);
1187 pi->phy.ops->power_down(&pi->phy, 1);
1188 netif_carrier_off(dev);
1189 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1191 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1192 clear_bit(pi->port_id, &adapter->open_device_map);
1193 spin_unlock_irq(&adapter->work_lock);
1195 if (!(adapter->open_device_map & PORT_MASK))
1196 cancel_rearming_delayed_workqueue(cxgb3_wq,
1197 &adapter->adap_check_task);
1199 if (!adapter->open_device_map)
1205 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1207 struct port_info *pi = netdev_priv(dev);
1208 struct adapter *adapter = pi->adapter;
1209 struct net_device_stats *ns = &pi->netstats;
1210 const struct mac_stats *pstats;
1212 spin_lock(&adapter->stats_lock);
1213 pstats = t3_mac_update_stats(&pi->mac);
1214 spin_unlock(&adapter->stats_lock);
1216 ns->tx_bytes = pstats->tx_octets;
1217 ns->tx_packets = pstats->tx_frames;
1218 ns->rx_bytes = pstats->rx_octets;
1219 ns->rx_packets = pstats->rx_frames;
1220 ns->multicast = pstats->rx_mcast_frames;
1222 ns->tx_errors = pstats->tx_underrun;
1223 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1224 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1225 pstats->rx_fifo_ovfl;
1227 /* detailed rx_errors */
1228 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1229 ns->rx_over_errors = 0;
1230 ns->rx_crc_errors = pstats->rx_fcs_errs;
1231 ns->rx_frame_errors = pstats->rx_symbol_errs;
1232 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1233 ns->rx_missed_errors = pstats->rx_cong_drops;
1235 /* detailed tx_errors */
1236 ns->tx_aborted_errors = 0;
1237 ns->tx_carrier_errors = 0;
1238 ns->tx_fifo_errors = pstats->tx_underrun;
1239 ns->tx_heartbeat_errors = 0;
1240 ns->tx_window_errors = 0;
1244 static u32 get_msglevel(struct net_device *dev)
1246 struct port_info *pi = netdev_priv(dev);
1247 struct adapter *adapter = pi->adapter;
1249 return adapter->msg_enable;
1252 static void set_msglevel(struct net_device *dev, u32 val)
1254 struct port_info *pi = netdev_priv(dev);
1255 struct adapter *adapter = pi->adapter;
1257 adapter->msg_enable = val;
1260 static char stats_strings[][ETH_GSTRING_LEN] = {
1263 "TxMulticastFramesOK",
1264 "TxBroadcastFramesOK",
1271 "TxFrames128To255 ",
1272 "TxFrames256To511 ",
1273 "TxFrames512To1023 ",
1274 "TxFrames1024To1518 ",
1275 "TxFrames1519ToMax ",
1279 "RxMulticastFramesOK",
1280 "RxBroadcastFramesOK",
1291 "RxFrames128To255 ",
1292 "RxFrames256To511 ",
1293 "RxFrames512To1023 ",
1294 "RxFrames1024To1518 ",
1295 "RxFrames1519ToMax ",
1308 "CheckTXEnToggled ",
1313 static int get_sset_count(struct net_device *dev, int sset)
1317 return ARRAY_SIZE(stats_strings);
1323 #define T3_REGMAP_SIZE (3 * 1024)
1325 static int get_regs_len(struct net_device *dev)
1327 return T3_REGMAP_SIZE;
1330 static int get_eeprom_len(struct net_device *dev)
1335 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1337 struct port_info *pi = netdev_priv(dev);
1338 struct adapter *adapter = pi->adapter;
1342 spin_lock(&adapter->stats_lock);
1343 t3_get_fw_version(adapter, &fw_vers);
1344 t3_get_tp_version(adapter, &tp_vers);
1345 spin_unlock(&adapter->stats_lock);
1347 strcpy(info->driver, DRV_NAME);
1348 strcpy(info->version, DRV_VERSION);
1349 strcpy(info->bus_info, pci_name(adapter->pdev));
1351 strcpy(info->fw_version, "N/A");
1353 snprintf(info->fw_version, sizeof(info->fw_version),
1354 "%s %u.%u.%u TP %u.%u.%u",
1355 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1356 G_FW_VERSION_MAJOR(fw_vers),
1357 G_FW_VERSION_MINOR(fw_vers),
1358 G_FW_VERSION_MICRO(fw_vers),
1359 G_TP_VERSION_MAJOR(tp_vers),
1360 G_TP_VERSION_MINOR(tp_vers),
1361 G_TP_VERSION_MICRO(tp_vers));
1365 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1367 if (stringset == ETH_SS_STATS)
1368 memcpy(data, stats_strings, sizeof(stats_strings));
1371 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1372 struct port_info *p, int idx)
1375 unsigned long tot = 0;
1377 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1378 tot += adapter->sge.qs[i].port_stats[idx];
1382 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1385 struct port_info *pi = netdev_priv(dev);
1386 struct adapter *adapter = pi->adapter;
1387 const struct mac_stats *s;
1389 spin_lock(&adapter->stats_lock);
1390 s = t3_mac_update_stats(&pi->mac);
1391 spin_unlock(&adapter->stats_lock);
1393 *data++ = s->tx_octets;
1394 *data++ = s->tx_frames;
1395 *data++ = s->tx_mcast_frames;
1396 *data++ = s->tx_bcast_frames;
1397 *data++ = s->tx_pause;
1398 *data++ = s->tx_underrun;
1399 *data++ = s->tx_fifo_urun;
1401 *data++ = s->tx_frames_64;
1402 *data++ = s->tx_frames_65_127;
1403 *data++ = s->tx_frames_128_255;
1404 *data++ = s->tx_frames_256_511;
1405 *data++ = s->tx_frames_512_1023;
1406 *data++ = s->tx_frames_1024_1518;
1407 *data++ = s->tx_frames_1519_max;
1409 *data++ = s->rx_octets;
1410 *data++ = s->rx_frames;
1411 *data++ = s->rx_mcast_frames;
1412 *data++ = s->rx_bcast_frames;
1413 *data++ = s->rx_pause;
1414 *data++ = s->rx_fcs_errs;
1415 *data++ = s->rx_symbol_errs;
1416 *data++ = s->rx_short;
1417 *data++ = s->rx_jabber;
1418 *data++ = s->rx_too_long;
1419 *data++ = s->rx_fifo_ovfl;
1421 *data++ = s->rx_frames_64;
1422 *data++ = s->rx_frames_65_127;
1423 *data++ = s->rx_frames_128_255;
1424 *data++ = s->rx_frames_256_511;
1425 *data++ = s->rx_frames_512_1023;
1426 *data++ = s->rx_frames_1024_1518;
1427 *data++ = s->rx_frames_1519_max;
1429 *data++ = pi->phy.fifo_errors;
1431 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1432 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1433 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1434 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1435 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1436 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1437 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1438 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1439 *data++ = s->rx_cong_drops;
1441 *data++ = s->num_toggled;
1442 *data++ = s->num_resets;
1445 static inline void reg_block_dump(struct adapter *ap, void *buf,
1446 unsigned int start, unsigned int end)
1448 u32 *p = buf + start;
1450 for (; start <= end; start += sizeof(u32))
1451 *p++ = t3_read_reg(ap, start);
1454 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1457 struct port_info *pi = netdev_priv(dev);
1458 struct adapter *ap = pi->adapter;
1462 * bits 0..9: chip version
1463 * bits 10..15: chip revision
1464 * bit 31: set for PCIe cards
1466 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1469 * We skip the MAC statistics registers because they are clear-on-read.
1470 * Also reading multi-register stats would need to synchronize with the
1471 * periodic mac stats accumulation. Hard to justify the complexity.
1473 memset(buf, 0, T3_REGMAP_SIZE);
1474 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1475 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1476 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1477 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1478 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1479 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1480 XGM_REG(A_XGM_SERDES_STAT3, 1));
1481 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1482 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1485 static int restart_autoneg(struct net_device *dev)
1487 struct port_info *p = netdev_priv(dev);
1489 if (!netif_running(dev))
1491 if (p->link_config.autoneg != AUTONEG_ENABLE)
1493 p->phy.ops->autoneg_restart(&p->phy);
1497 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1499 struct port_info *pi = netdev_priv(dev);
1500 struct adapter *adapter = pi->adapter;
1506 for (i = 0; i < data * 2; i++) {
1507 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1508 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1509 if (msleep_interruptible(500))
1512 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1517 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1519 struct port_info *p = netdev_priv(dev);
1521 cmd->supported = p->link_config.supported;
1522 cmd->advertising = p->link_config.advertising;
1524 if (netif_carrier_ok(dev)) {
1525 cmd->speed = p->link_config.speed;
1526 cmd->duplex = p->link_config.duplex;
1532 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1533 cmd->phy_address = p->phy.addr;
1534 cmd->transceiver = XCVR_EXTERNAL;
1535 cmd->autoneg = p->link_config.autoneg;
1541 static int speed_duplex_to_caps(int speed, int duplex)
1547 if (duplex == DUPLEX_FULL)
1548 cap = SUPPORTED_10baseT_Full;
1550 cap = SUPPORTED_10baseT_Half;
1553 if (duplex == DUPLEX_FULL)
1554 cap = SUPPORTED_100baseT_Full;
1556 cap = SUPPORTED_100baseT_Half;
1559 if (duplex == DUPLEX_FULL)
1560 cap = SUPPORTED_1000baseT_Full;
1562 cap = SUPPORTED_1000baseT_Half;
1565 if (duplex == DUPLEX_FULL)
1566 cap = SUPPORTED_10000baseT_Full;
1571 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1572 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1573 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1574 ADVERTISED_10000baseT_Full)
1576 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1579 struct port_info *p = netdev_priv(dev);
1580 struct link_config *lc = &p->link_config;
1582 if (!(lc->supported & SUPPORTED_Autoneg)) {
1584 * PHY offers a single speed/duplex. See if that's what's
1587 if (cmd->autoneg == AUTONEG_DISABLE) {
1588 cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1589 if (lc->supported & cap)
1595 if (cmd->autoneg == AUTONEG_DISABLE) {
1596 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1598 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1600 lc->requested_speed = cmd->speed;
1601 lc->requested_duplex = cmd->duplex;
1602 lc->advertising = 0;
1604 cmd->advertising &= ADVERTISED_MASK;
1605 cmd->advertising &= lc->supported;
1606 if (!cmd->advertising)
1608 lc->requested_speed = SPEED_INVALID;
1609 lc->requested_duplex = DUPLEX_INVALID;
1610 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1612 lc->autoneg = cmd->autoneg;
1613 if (netif_running(dev))
1614 t3_link_start(&p->phy, &p->mac, lc);
1618 static void get_pauseparam(struct net_device *dev,
1619 struct ethtool_pauseparam *epause)
1621 struct port_info *p = netdev_priv(dev);
1623 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1624 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1625 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1628 static int set_pauseparam(struct net_device *dev,
1629 struct ethtool_pauseparam *epause)
1631 struct port_info *p = netdev_priv(dev);
1632 struct link_config *lc = &p->link_config;
1634 if (epause->autoneg == AUTONEG_DISABLE)
1635 lc->requested_fc = 0;
1636 else if (lc->supported & SUPPORTED_Autoneg)
1637 lc->requested_fc = PAUSE_AUTONEG;
1641 if (epause->rx_pause)
1642 lc->requested_fc |= PAUSE_RX;
1643 if (epause->tx_pause)
1644 lc->requested_fc |= PAUSE_TX;
1645 if (lc->autoneg == AUTONEG_ENABLE) {
1646 if (netif_running(dev))
1647 t3_link_start(&p->phy, &p->mac, lc);
1649 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1650 if (netif_running(dev))
1651 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1656 static u32 get_rx_csum(struct net_device *dev)
1658 struct port_info *p = netdev_priv(dev);
1660 return p->rx_offload & T3_RX_CSUM;
1663 static int set_rx_csum(struct net_device *dev, u32 data)
1665 struct port_info *p = netdev_priv(dev);
1668 p->rx_offload |= T3_RX_CSUM;
1672 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1673 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1674 set_qset_lro(dev, i, 0);
1679 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1681 struct port_info *pi = netdev_priv(dev);
1682 struct adapter *adapter = pi->adapter;
1683 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1685 e->rx_max_pending = MAX_RX_BUFFERS;
1686 e->rx_mini_max_pending = 0;
1687 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1688 e->tx_max_pending = MAX_TXQ_ENTRIES;
1690 e->rx_pending = q->fl_size;
1691 e->rx_mini_pending = q->rspq_size;
1692 e->rx_jumbo_pending = q->jumbo_size;
1693 e->tx_pending = q->txq_size[0];
1696 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1698 struct port_info *pi = netdev_priv(dev);
1699 struct adapter *adapter = pi->adapter;
1700 struct qset_params *q;
1703 if (e->rx_pending > MAX_RX_BUFFERS ||
1704 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1705 e->tx_pending > MAX_TXQ_ENTRIES ||
1706 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1707 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1708 e->rx_pending < MIN_FL_ENTRIES ||
1709 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1710 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1713 if (adapter->flags & FULL_INIT_DONE)
1716 q = &adapter->params.sge.qset[pi->first_qset];
1717 for (i = 0; i < pi->nqsets; ++i, ++q) {
1718 q->rspq_size = e->rx_mini_pending;
1719 q->fl_size = e->rx_pending;
1720 q->jumbo_size = e->rx_jumbo_pending;
1721 q->txq_size[0] = e->tx_pending;
1722 q->txq_size[1] = e->tx_pending;
1723 q->txq_size[2] = e->tx_pending;
1728 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1730 struct port_info *pi = netdev_priv(dev);
1731 struct adapter *adapter = pi->adapter;
1732 struct qset_params *qsp = &adapter->params.sge.qset[0];
1733 struct sge_qset *qs = &adapter->sge.qs[0];
1735 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1738 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1739 t3_update_qset_coalesce(qs, qsp);
1743 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1745 struct port_info *pi = netdev_priv(dev);
1746 struct adapter *adapter = pi->adapter;
1747 struct qset_params *q = adapter->params.sge.qset;
1749 c->rx_coalesce_usecs = q->coalesce_usecs;
1753 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1756 struct port_info *pi = netdev_priv(dev);
1757 struct adapter *adapter = pi->adapter;
1760 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1764 e->magic = EEPROM_MAGIC;
1765 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1766 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1769 memcpy(data, buf + e->offset, e->len);
1774 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1777 struct port_info *pi = netdev_priv(dev);
1778 struct adapter *adapter = pi->adapter;
1779 u32 aligned_offset, aligned_len;
1784 if (eeprom->magic != EEPROM_MAGIC)
1787 aligned_offset = eeprom->offset & ~3;
1788 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1790 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1791 buf = kmalloc(aligned_len, GFP_KERNEL);
1794 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1795 if (!err && aligned_len > 4)
1796 err = t3_seeprom_read(adapter,
1797 aligned_offset + aligned_len - 4,
1798 (__le32 *) & buf[aligned_len - 4]);
1801 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1805 err = t3_seeprom_wp(adapter, 0);
1809 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1810 err = t3_seeprom_write(adapter, aligned_offset, *p);
1811 aligned_offset += 4;
1815 err = t3_seeprom_wp(adapter, 1);
1822 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1826 memset(&wol->sopass, 0, sizeof(wol->sopass));
1829 static int cxgb3_set_flags(struct net_device *dev, u32 data)
1831 struct port_info *pi = netdev_priv(dev);
1834 if (data & ETH_FLAG_LRO) {
1835 if (!(pi->rx_offload & T3_RX_CSUM))
1838 pi->rx_offload |= T3_LRO;
1839 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1840 set_qset_lro(dev, i, 1);
1843 pi->rx_offload &= ~T3_LRO;
1844 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1845 set_qset_lro(dev, i, 0);
1851 static const struct ethtool_ops cxgb_ethtool_ops = {
1852 .get_settings = get_settings,
1853 .set_settings = set_settings,
1854 .get_drvinfo = get_drvinfo,
1855 .get_msglevel = get_msglevel,
1856 .set_msglevel = set_msglevel,
1857 .get_ringparam = get_sge_param,
1858 .set_ringparam = set_sge_param,
1859 .get_coalesce = get_coalesce,
1860 .set_coalesce = set_coalesce,
1861 .get_eeprom_len = get_eeprom_len,
1862 .get_eeprom = get_eeprom,
1863 .set_eeprom = set_eeprom,
1864 .get_pauseparam = get_pauseparam,
1865 .set_pauseparam = set_pauseparam,
1866 .get_rx_csum = get_rx_csum,
1867 .set_rx_csum = set_rx_csum,
1868 .set_tx_csum = ethtool_op_set_tx_csum,
1869 .set_sg = ethtool_op_set_sg,
1870 .get_link = ethtool_op_get_link,
1871 .get_strings = get_strings,
1872 .phys_id = cxgb3_phys_id,
1873 .nway_reset = restart_autoneg,
1874 .get_sset_count = get_sset_count,
1875 .get_ethtool_stats = get_stats,
1876 .get_regs_len = get_regs_len,
1877 .get_regs = get_regs,
1879 .set_tso = ethtool_op_set_tso,
1880 .get_flags = ethtool_op_get_flags,
1881 .set_flags = cxgb3_set_flags,
1884 static int in_range(int val, int lo, int hi)
1886 return val < 0 || (val <= hi && val >= lo);
1889 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1891 struct port_info *pi = netdev_priv(dev);
1892 struct adapter *adapter = pi->adapter;
1896 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1900 case CHELSIO_SET_QSET_PARAMS:{
1902 struct qset_params *q;
1903 struct ch_qset_params t;
1904 int q1 = pi->first_qset;
1905 int nqsets = pi->nqsets;
1907 if (!capable(CAP_NET_ADMIN))
1909 if (copy_from_user(&t, useraddr, sizeof(t)))
1911 if (t.qset_idx >= SGE_QSETS)
1913 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1914 !in_range(t.cong_thres, 0, 255) ||
1915 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1917 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1919 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1920 MAX_CTRL_TXQ_ENTRIES) ||
1921 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1923 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1924 MAX_RX_JUMBO_BUFFERS)
1925 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1929 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1930 for_each_port(adapter, i) {
1931 pi = adap2pinfo(adapter, i);
1932 if (t.qset_idx >= pi->first_qset &&
1933 t.qset_idx < pi->first_qset + pi->nqsets &&
1934 !(pi->rx_offload & T3_RX_CSUM))
1938 if ((adapter->flags & FULL_INIT_DONE) &&
1939 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1940 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1941 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1942 t.polling >= 0 || t.cong_thres >= 0))
1945 /* Allow setting of any available qset when offload enabled */
1946 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1948 for_each_port(adapter, i) {
1949 pi = adap2pinfo(adapter, i);
1950 nqsets += pi->first_qset + pi->nqsets;
1954 if (t.qset_idx < q1)
1956 if (t.qset_idx > q1 + nqsets - 1)
1959 q = &adapter->params.sge.qset[t.qset_idx];
1961 if (t.rspq_size >= 0)
1962 q->rspq_size = t.rspq_size;
1963 if (t.fl_size[0] >= 0)
1964 q->fl_size = t.fl_size[0];
1965 if (t.fl_size[1] >= 0)
1966 q->jumbo_size = t.fl_size[1];
1967 if (t.txq_size[0] >= 0)
1968 q->txq_size[0] = t.txq_size[0];
1969 if (t.txq_size[1] >= 0)
1970 q->txq_size[1] = t.txq_size[1];
1971 if (t.txq_size[2] >= 0)
1972 q->txq_size[2] = t.txq_size[2];
1973 if (t.cong_thres >= 0)
1974 q->cong_thres = t.cong_thres;
1975 if (t.intr_lat >= 0) {
1976 struct sge_qset *qs =
1977 &adapter->sge.qs[t.qset_idx];
1979 q->coalesce_usecs = t.intr_lat;
1980 t3_update_qset_coalesce(qs, q);
1982 if (t.polling >= 0) {
1983 if (adapter->flags & USING_MSIX)
1984 q->polling = t.polling;
1986 /* No polling with INTx for T3A */
1987 if (adapter->params.rev == 0 &&
1988 !(adapter->flags & USING_MSI))
1991 for (i = 0; i < SGE_QSETS; i++) {
1992 q = &adapter->params.sge.
1994 q->polling = t.polling;
1999 set_qset_lro(dev, t.qset_idx, t.lro);
2003 case CHELSIO_GET_QSET_PARAMS:{
2004 struct qset_params *q;
2005 struct ch_qset_params t;
2006 int q1 = pi->first_qset;
2007 int nqsets = pi->nqsets;
2010 if (copy_from_user(&t, useraddr, sizeof(t)))
2013 /* Display qsets for all ports when offload enabled */
2014 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2016 for_each_port(adapter, i) {
2017 pi = adap2pinfo(adapter, i);
2018 nqsets = pi->first_qset + pi->nqsets;
2022 if (t.qset_idx >= nqsets)
2025 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2026 t.rspq_size = q->rspq_size;
2027 t.txq_size[0] = q->txq_size[0];
2028 t.txq_size[1] = q->txq_size[1];
2029 t.txq_size[2] = q->txq_size[2];
2030 t.fl_size[0] = q->fl_size;
2031 t.fl_size[1] = q->jumbo_size;
2032 t.polling = q->polling;
2034 t.intr_lat = q->coalesce_usecs;
2035 t.cong_thres = q->cong_thres;
2038 if (adapter->flags & USING_MSIX)
2039 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2041 t.vector = adapter->pdev->irq;
2043 if (copy_to_user(useraddr, &t, sizeof(t)))
2047 case CHELSIO_SET_QSET_NUM:{
2048 struct ch_reg edata;
2049 unsigned int i, first_qset = 0, other_qsets = 0;
2051 if (!capable(CAP_NET_ADMIN))
2053 if (adapter->flags & FULL_INIT_DONE)
2055 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2057 if (edata.val < 1 ||
2058 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2061 for_each_port(adapter, i)
2062 if (adapter->port[i] && adapter->port[i] != dev)
2063 other_qsets += adap2pinfo(adapter, i)->nqsets;
2065 if (edata.val + other_qsets > SGE_QSETS)
2068 pi->nqsets = edata.val;
2070 for_each_port(adapter, i)
2071 if (adapter->port[i]) {
2072 pi = adap2pinfo(adapter, i);
2073 pi->first_qset = first_qset;
2074 first_qset += pi->nqsets;
2078 case CHELSIO_GET_QSET_NUM:{
2079 struct ch_reg edata;
2081 edata.cmd = CHELSIO_GET_QSET_NUM;
2082 edata.val = pi->nqsets;
2083 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2087 case CHELSIO_LOAD_FW:{
2089 struct ch_mem_range t;
2091 if (!capable(CAP_SYS_RAWIO))
2093 if (copy_from_user(&t, useraddr, sizeof(t)))
2095 /* Check t.len sanity ? */
2096 fw_data = kmalloc(t.len, GFP_KERNEL);
2101 (fw_data, useraddr + sizeof(t), t.len)) {
2106 ret = t3_load_fw(adapter, fw_data, t.len);
2112 case CHELSIO_SETMTUTAB:{
2116 if (!is_offload(adapter))
2118 if (!capable(CAP_NET_ADMIN))
2120 if (offload_running(adapter))
2122 if (copy_from_user(&m, useraddr, sizeof(m)))
2124 if (m.nmtus != NMTUS)
2126 if (m.mtus[0] < 81) /* accommodate SACK */
2129 /* MTUs must be in ascending order */
2130 for (i = 1; i < NMTUS; ++i)
2131 if (m.mtus[i] < m.mtus[i - 1])
2134 memcpy(adapter->params.mtus, m.mtus,
2135 sizeof(adapter->params.mtus));
2138 case CHELSIO_GET_PM:{
2139 struct tp_params *p = &adapter->params.tp;
2140 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2142 if (!is_offload(adapter))
2144 m.tx_pg_sz = p->tx_pg_size;
2145 m.tx_num_pg = p->tx_num_pgs;
2146 m.rx_pg_sz = p->rx_pg_size;
2147 m.rx_num_pg = p->rx_num_pgs;
2148 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2149 if (copy_to_user(useraddr, &m, sizeof(m)))
2153 case CHELSIO_SET_PM:{
2155 struct tp_params *p = &adapter->params.tp;
2157 if (!is_offload(adapter))
2159 if (!capable(CAP_NET_ADMIN))
2161 if (adapter->flags & FULL_INIT_DONE)
2163 if (copy_from_user(&m, useraddr, sizeof(m)))
2165 if (!is_power_of_2(m.rx_pg_sz) ||
2166 !is_power_of_2(m.tx_pg_sz))
2167 return -EINVAL; /* not power of 2 */
2168 if (!(m.rx_pg_sz & 0x14000))
2169 return -EINVAL; /* not 16KB or 64KB */
2170 if (!(m.tx_pg_sz & 0x1554000))
2172 if (m.tx_num_pg == -1)
2173 m.tx_num_pg = p->tx_num_pgs;
2174 if (m.rx_num_pg == -1)
2175 m.rx_num_pg = p->rx_num_pgs;
2176 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2178 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2179 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2181 p->rx_pg_size = m.rx_pg_sz;
2182 p->tx_pg_size = m.tx_pg_sz;
2183 p->rx_num_pgs = m.rx_num_pg;
2184 p->tx_num_pgs = m.tx_num_pg;
2187 case CHELSIO_GET_MEM:{
2188 struct ch_mem_range t;
2192 if (!is_offload(adapter))
2194 if (!(adapter->flags & FULL_INIT_DONE))
2195 return -EIO; /* need the memory controllers */
2196 if (copy_from_user(&t, useraddr, sizeof(t)))
2198 if ((t.addr & 7) || (t.len & 7))
2200 if (t.mem_id == MEM_CM)
2202 else if (t.mem_id == MEM_PMRX)
2203 mem = &adapter->pmrx;
2204 else if (t.mem_id == MEM_PMTX)
2205 mem = &adapter->pmtx;
2211 * bits 0..9: chip version
2212 * bits 10..15: chip revision
2214 t.version = 3 | (adapter->params.rev << 10);
2215 if (copy_to_user(useraddr, &t, sizeof(t)))
2219 * Read 256 bytes at a time as len can be large and we don't
2220 * want to use huge intermediate buffers.
2222 useraddr += sizeof(t); /* advance to start of buffer */
2224 unsigned int chunk =
2225 min_t(unsigned int, t.len, sizeof(buf));
2228 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2232 if (copy_to_user(useraddr, buf, chunk))
2240 case CHELSIO_SET_TRACE_FILTER:{
2242 const struct trace_params *tp;
2244 if (!capable(CAP_NET_ADMIN))
2246 if (!offload_running(adapter))
2248 if (copy_from_user(&t, useraddr, sizeof(t)))
2251 tp = (const struct trace_params *)&t.sip;
2253 t3_config_trace_filter(adapter, tp, 0,
2257 t3_config_trace_filter(adapter, tp, 1,
2268 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2270 struct mii_ioctl_data *data = if_mii(req);
2271 struct port_info *pi = netdev_priv(dev);
2272 struct adapter *adapter = pi->adapter;
2277 data->phy_id = pi->phy.addr;
2281 struct cphy *phy = &pi->phy;
2283 if (!phy->mdio_read)
2285 if (is_10G(adapter)) {
2286 mmd = data->phy_id >> 8;
2289 else if (mmd > MDIO_DEV_VEND2)
2293 phy->mdio_read(adapter, data->phy_id & 0x1f,
2294 mmd, data->reg_num, &val);
2297 phy->mdio_read(adapter, data->phy_id & 0x1f,
2298 0, data->reg_num & 0x1f,
2301 data->val_out = val;
2305 struct cphy *phy = &pi->phy;
2307 if (!capable(CAP_NET_ADMIN))
2309 if (!phy->mdio_write)
2311 if (is_10G(adapter)) {
2312 mmd = data->phy_id >> 8;
2315 else if (mmd > MDIO_DEV_VEND2)
2319 phy->mdio_write(adapter,
2320 data->phy_id & 0x1f, mmd,
2325 phy->mdio_write(adapter,
2326 data->phy_id & 0x1f, 0,
2327 data->reg_num & 0x1f,
2332 return cxgb_extension_ioctl(dev, req->ifr_data);
2339 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2341 struct port_info *pi = netdev_priv(dev);
2342 struct adapter *adapter = pi->adapter;
2345 if (new_mtu < 81) /* accommodate SACK */
2347 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2350 init_port_mtus(adapter);
2351 if (adapter->params.rev == 0 && offload_running(adapter))
2352 t3_load_mtus(adapter, adapter->params.mtus,
2353 adapter->params.a_wnd, adapter->params.b_wnd,
2354 adapter->port[0]->mtu);
2358 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2360 struct port_info *pi = netdev_priv(dev);
2361 struct adapter *adapter = pi->adapter;
2362 struct sockaddr *addr = p;
2364 if (!is_valid_ether_addr(addr->sa_data))
2367 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2368 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2369 if (offload_running(adapter))
2370 write_smt_entry(adapter, pi->port_id);
2375 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2376 * @adap: the adapter
2379 * Ensures that current Rx processing on any of the queues associated with
2380 * the given port completes before returning. We do this by acquiring and
2381 * releasing the locks of the response queues associated with the port.
2383 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2387 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2388 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2390 spin_lock_irq(&q->lock);
2391 spin_unlock_irq(&q->lock);
2395 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2397 struct port_info *pi = netdev_priv(dev);
2398 struct adapter *adapter = pi->adapter;
2401 if (adapter->params.rev > 0)
2402 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2404 /* single control for all ports */
2405 unsigned int i, have_vlans = 0;
2406 for_each_port(adapter, i)
2407 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2409 t3_set_vlan_accel(adapter, 1, have_vlans);
2411 t3_synchronize_rx(adapter, pi);
2414 #ifdef CONFIG_NET_POLL_CONTROLLER
2415 static void cxgb_netpoll(struct net_device *dev)
2417 struct port_info *pi = netdev_priv(dev);
2418 struct adapter *adapter = pi->adapter;
2421 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2422 struct sge_qset *qs = &adapter->sge.qs[qidx];
2425 if (adapter->flags & USING_MSIX)
2430 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2436 * Periodic accumulation of MAC statistics.
2438 static void mac_stats_update(struct adapter *adapter)
2442 for_each_port(adapter, i) {
2443 struct net_device *dev = adapter->port[i];
2444 struct port_info *p = netdev_priv(dev);
2446 if (netif_running(dev)) {
2447 spin_lock(&adapter->stats_lock);
2448 t3_mac_update_stats(&p->mac);
2449 spin_unlock(&adapter->stats_lock);
2454 static void check_link_status(struct adapter *adapter)
2458 for_each_port(adapter, i) {
2459 struct net_device *dev = adapter->port[i];
2460 struct port_info *p = netdev_priv(dev);
2462 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2463 t3_link_changed(adapter, i);
2467 static void check_t3b2_mac(struct adapter *adapter)
2471 if (!rtnl_trylock()) /* synchronize with ifdown */
2474 for_each_port(adapter, i) {
2475 struct net_device *dev = adapter->port[i];
2476 struct port_info *p = netdev_priv(dev);
2479 if (!netif_running(dev))
2483 if (netif_running(dev) && netif_carrier_ok(dev))
2484 status = t3b2_mac_watchdog_task(&p->mac);
2486 p->mac.stats.num_toggled++;
2487 else if (status == 2) {
2488 struct cmac *mac = &p->mac;
2490 t3_mac_set_mtu(mac, dev->mtu);
2491 t3_mac_set_address(mac, 0, dev->dev_addr);
2492 cxgb_set_rxmode(dev);
2493 t3_link_start(&p->phy, mac, &p->link_config);
2494 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2495 t3_port_intr_enable(adapter, p->port_id);
2496 p->mac.stats.num_resets++;
2503 static void t3_adap_check_task(struct work_struct *work)
2505 struct adapter *adapter = container_of(work, struct adapter,
2506 adap_check_task.work);
2507 const struct adapter_params *p = &adapter->params;
2509 adapter->check_task_cnt++;
2511 /* Check link status for PHYs without interrupts */
2512 if (p->linkpoll_period)
2513 check_link_status(adapter);
2515 /* Accumulate MAC stats if needed */
2516 if (!p->linkpoll_period ||
2517 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2518 p->stats_update_period) {
2519 mac_stats_update(adapter);
2520 adapter->check_task_cnt = 0;
2523 if (p->rev == T3_REV_B2)
2524 check_t3b2_mac(adapter);
2526 /* Schedule the next check update if any port is active. */
2527 spin_lock_irq(&adapter->work_lock);
2528 if (adapter->open_device_map & PORT_MASK)
2529 schedule_chk_task(adapter);
2530 spin_unlock_irq(&adapter->work_lock);
2534 * Processes external (PHY) interrupts in process context.
2536 static void ext_intr_task(struct work_struct *work)
2538 struct adapter *adapter = container_of(work, struct adapter,
2539 ext_intr_handler_task);
2541 t3_phy_intr_handler(adapter);
2543 /* Now reenable external interrupts */
2544 spin_lock_irq(&adapter->work_lock);
2545 if (adapter->slow_intr_mask) {
2546 adapter->slow_intr_mask |= F_T3DBG;
2547 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2548 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2549 adapter->slow_intr_mask);
2551 spin_unlock_irq(&adapter->work_lock);
2555 * Interrupt-context handler for external (PHY) interrupts.
2557 void t3_os_ext_intr_handler(struct adapter *adapter)
2560 * Schedule a task to handle external interrupts as they may be slow
2561 * and we use a mutex to protect MDIO registers. We disable PHY
2562 * interrupts in the meantime and let the task reenable them when
2565 spin_lock(&adapter->work_lock);
2566 if (adapter->slow_intr_mask) {
2567 adapter->slow_intr_mask &= ~F_T3DBG;
2568 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2569 adapter->slow_intr_mask);
2570 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2572 spin_unlock(&adapter->work_lock);
2575 static int t3_adapter_error(struct adapter *adapter, int reset)
2579 /* Stop all ports */
2580 for_each_port(adapter, i) {
2581 struct net_device *netdev = adapter->port[i];
2583 if (netif_running(netdev))
2587 if (is_offload(adapter) &&
2588 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2589 offload_close(&adapter->tdev);
2591 /* Stop SGE timers */
2592 t3_stop_sge_timers(adapter);
2594 adapter->flags &= ~FULL_INIT_DONE;
2597 ret = t3_reset_adapter(adapter);
2599 pci_disable_device(adapter->pdev);
2604 static int t3_reenable_adapter(struct adapter *adapter)
2606 if (pci_enable_device(adapter->pdev)) {
2607 dev_err(&adapter->pdev->dev,
2608 "Cannot re-enable PCI device after reset.\n");
2611 pci_set_master(adapter->pdev);
2612 pci_restore_state(adapter->pdev);
2614 /* Free sge resources */
2615 t3_free_sge_resources(adapter);
2617 if (t3_replay_prep_adapter(adapter))
2625 static void t3_resume_ports(struct adapter *adapter)
2629 /* Restart the ports */
2630 for_each_port(adapter, i) {
2631 struct net_device *netdev = adapter->port[i];
2633 if (netif_running(netdev)) {
2634 if (cxgb_open(netdev)) {
2635 dev_err(&adapter->pdev->dev,
2636 "can't bring device back up"
2645 * processes a fatal error.
2646 * Bring the ports down, reset the chip, bring the ports back up.
2648 static void fatal_error_task(struct work_struct *work)
2650 struct adapter *adapter = container_of(work, struct adapter,
2651 fatal_error_handler_task);
2655 err = t3_adapter_error(adapter, 1);
2657 err = t3_reenable_adapter(adapter);
2659 t3_resume_ports(adapter);
2661 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2665 void t3_fatal_err(struct adapter *adapter)
2667 unsigned int fw_status[4];
2669 if (adapter->flags & FULL_INIT_DONE) {
2670 t3_sge_stop(adapter);
2671 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2672 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2673 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2674 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2676 spin_lock(&adapter->work_lock);
2677 t3_intr_disable(adapter);
2678 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2679 spin_unlock(&adapter->work_lock);
2681 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2682 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2683 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2684 fw_status[0], fw_status[1],
2685 fw_status[2], fw_status[3]);
2690 * t3_io_error_detected - called when PCI error is detected
2691 * @pdev: Pointer to PCI device
2692 * @state: The current pci connection state
2694 * This function is called after a PCI bus error affecting
2695 * this device has been detected.
2697 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2698 pci_channel_state_t state)
2700 struct adapter *adapter = pci_get_drvdata(pdev);
2703 ret = t3_adapter_error(adapter, 0);
2705 /* Request a slot reset. */
2706 return PCI_ERS_RESULT_NEED_RESET;
2710 * t3_io_slot_reset - called after the pci bus has been reset.
2711 * @pdev: Pointer to PCI device
2713 * Restart the card from scratch, as if from a cold-boot.
2715 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2717 struct adapter *adapter = pci_get_drvdata(pdev);
2719 if (!t3_reenable_adapter(adapter))
2720 return PCI_ERS_RESULT_RECOVERED;
2722 return PCI_ERS_RESULT_DISCONNECT;
2726 * t3_io_resume - called when traffic can start flowing again.
2727 * @pdev: Pointer to PCI device
2729 * This callback is called when the error recovery driver tells us that
2730 * its OK to resume normal operation.
2732 static void t3_io_resume(struct pci_dev *pdev)
2734 struct adapter *adapter = pci_get_drvdata(pdev);
2736 t3_resume_ports(adapter);
2739 static struct pci_error_handlers t3_err_handler = {
2740 .error_detected = t3_io_error_detected,
2741 .slot_reset = t3_io_slot_reset,
2742 .resume = t3_io_resume,
2746 * Set the number of qsets based on the number of CPUs and the number of ports,
2747 * not to exceed the number of available qsets, assuming there are enough qsets
2750 static void set_nqsets(struct adapter *adap)
2753 int num_cpus = num_online_cpus();
2754 int hwports = adap->params.nports;
2755 int nqsets = SGE_QSETS;
2757 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2759 (hwports * nqsets > SGE_QSETS ||
2760 num_cpus >= nqsets / hwports))
2762 if (nqsets > num_cpus)
2764 if (nqsets < 1 || hwports == 4)
2769 for_each_port(adap, i) {
2770 struct port_info *pi = adap2pinfo(adap, i);
2773 pi->nqsets = nqsets;
2774 j = pi->first_qset + nqsets;
2776 dev_info(&adap->pdev->dev,
2777 "Port %d using %d queue sets.\n", i, nqsets);
2781 static int __devinit cxgb_enable_msix(struct adapter *adap)
2783 struct msix_entry entries[SGE_QSETS + 1];
2786 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2787 entries[i].entry = i;
2789 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2791 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2792 adap->msix_info[i].vec = entries[i].vector;
2794 dev_info(&adap->pdev->dev,
2795 "only %d MSI-X vectors left, not using MSI-X\n", err);
2799 static void __devinit print_port_info(struct adapter *adap,
2800 const struct adapter_info *ai)
2802 static const char *pci_variant[] = {
2803 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2810 snprintf(buf, sizeof(buf), "%s x%d",
2811 pci_variant[adap->params.pci.variant],
2812 adap->params.pci.width);
2814 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2815 pci_variant[adap->params.pci.variant],
2816 adap->params.pci.speed, adap->params.pci.width);
2818 for_each_port(adap, i) {
2819 struct net_device *dev = adap->port[i];
2820 const struct port_info *pi = netdev_priv(dev);
2822 if (!test_bit(i, &adap->registered_device_map))
2824 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2825 dev->name, ai->desc, pi->phy.desc,
2826 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2827 (adap->flags & USING_MSIX) ? " MSI-X" :
2828 (adap->flags & USING_MSI) ? " MSI" : "");
2829 if (adap->name == dev->name && adap->params.vpd.mclk)
2831 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2832 adap->name, t3_mc7_size(&adap->cm) >> 20,
2833 t3_mc7_size(&adap->pmtx) >> 20,
2834 t3_mc7_size(&adap->pmrx) >> 20,
2835 adap->params.vpd.sn);
2839 static const struct net_device_ops cxgb_netdev_ops = {
2840 .ndo_open = cxgb_open,
2841 .ndo_stop = cxgb_close,
2842 .ndo_start_xmit = t3_eth_xmit,
2843 .ndo_get_stats = cxgb_get_stats,
2844 .ndo_validate_addr = eth_validate_addr,
2845 .ndo_set_multicast_list = cxgb_set_rxmode,
2846 .ndo_do_ioctl = cxgb_ioctl,
2847 .ndo_change_mtu = cxgb_change_mtu,
2848 .ndo_set_mac_address = cxgb_set_mac_addr,
2849 .ndo_vlan_rx_register = vlan_rx_register,
2850 #ifdef CONFIG_NET_POLL_CONTROLLER
2851 .ndo_poll_controller = cxgb_netpoll,
2855 static int __devinit init_one(struct pci_dev *pdev,
2856 const struct pci_device_id *ent)
2858 static int version_printed;
2860 int i, err, pci_using_dac = 0;
2861 unsigned long mmio_start, mmio_len;
2862 const struct adapter_info *ai;
2863 struct adapter *adapter = NULL;
2864 struct port_info *pi;
2866 if (!version_printed) {
2867 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2872 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2874 printk(KERN_ERR DRV_NAME
2875 ": cannot initialize work queue\n");
2880 err = pci_request_regions(pdev, DRV_NAME);
2882 /* Just info, some other driver may have claimed the device. */
2883 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2887 err = pci_enable_device(pdev);
2889 dev_err(&pdev->dev, "cannot enable PCI device\n");
2890 goto out_release_regions;
2893 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2895 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2897 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2898 "coherent allocations\n");
2899 goto out_disable_device;
2901 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2902 dev_err(&pdev->dev, "no usable DMA configuration\n");
2903 goto out_disable_device;
2906 pci_set_master(pdev);
2907 pci_save_state(pdev);
2909 mmio_start = pci_resource_start(pdev, 0);
2910 mmio_len = pci_resource_len(pdev, 0);
2911 ai = t3_get_adapter_info(ent->driver_data);
2913 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2916 goto out_disable_device;
2919 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2920 if (!adapter->regs) {
2921 dev_err(&pdev->dev, "cannot map device registers\n");
2923 goto out_free_adapter;
2926 adapter->pdev = pdev;
2927 adapter->name = pci_name(pdev);
2928 adapter->msg_enable = dflt_msg_enable;
2929 adapter->mmio_len = mmio_len;
2931 mutex_init(&adapter->mdio_lock);
2932 spin_lock_init(&adapter->work_lock);
2933 spin_lock_init(&adapter->stats_lock);
2935 INIT_LIST_HEAD(&adapter->adapter_list);
2936 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2937 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2938 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2940 for (i = 0; i < ai->nports; ++i) {
2941 struct net_device *netdev;
2943 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
2949 SET_NETDEV_DEV(netdev, &pdev->dev);
2951 adapter->port[i] = netdev;
2952 pi = netdev_priv(netdev);
2953 pi->adapter = adapter;
2954 pi->rx_offload = T3_RX_CSUM | T3_LRO;
2956 netif_carrier_off(netdev);
2957 netif_tx_stop_all_queues(netdev);
2958 netdev->irq = pdev->irq;
2959 netdev->mem_start = mmio_start;
2960 netdev->mem_end = mmio_start + mmio_len - 1;
2961 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2962 netdev->features |= NETIF_F_LLTX;
2963 netdev->features |= NETIF_F_LRO;
2965 netdev->features |= NETIF_F_HIGHDMA;
2967 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2968 netdev->netdev_ops = &cxgb_netdev_ops;
2969 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2972 pci_set_drvdata(pdev, adapter);
2973 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2979 * The card is now ready to go. If any errors occur during device
2980 * registration we do not fail the whole card but rather proceed only
2981 * with the ports we manage to register successfully. However we must
2982 * register at least one net device.
2984 for_each_port(adapter, i) {
2985 err = register_netdev(adapter->port[i]);
2987 dev_warn(&pdev->dev,
2988 "cannot register net device %s, skipping\n",
2989 adapter->port[i]->name);
2992 * Change the name we use for messages to the name of
2993 * the first successfully registered interface.
2995 if (!adapter->registered_device_map)
2996 adapter->name = adapter->port[i]->name;
2998 __set_bit(i, &adapter->registered_device_map);
3001 if (!adapter->registered_device_map) {
3002 dev_err(&pdev->dev, "could not register any net devices\n");
3006 /* Driver's ready. Reflect it on LEDs */
3007 t3_led_ready(adapter);
3009 if (is_offload(adapter)) {
3010 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3011 cxgb3_adapter_ofld(adapter);
3014 /* See what interrupts we'll be using */
3015 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3016 adapter->flags |= USING_MSIX;
3017 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3018 adapter->flags |= USING_MSI;
3020 set_nqsets(adapter);
3022 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3025 print_port_info(adapter, ai);
3029 iounmap(adapter->regs);
3030 for (i = ai->nports - 1; i >= 0; --i)
3031 if (adapter->port[i])
3032 free_netdev(adapter->port[i]);
3038 pci_disable_device(pdev);
3039 out_release_regions:
3040 pci_release_regions(pdev);
3041 pci_set_drvdata(pdev, NULL);
3045 static void __devexit remove_one(struct pci_dev *pdev)
3047 struct adapter *adapter = pci_get_drvdata(pdev);
3052 t3_sge_stop(adapter);
3053 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3056 if (is_offload(adapter)) {
3057 cxgb3_adapter_unofld(adapter);
3058 if (test_bit(OFFLOAD_DEVMAP_BIT,
3059 &adapter->open_device_map))
3060 offload_close(&adapter->tdev);
3063 for_each_port(adapter, i)
3064 if (test_bit(i, &adapter->registered_device_map))
3065 unregister_netdev(adapter->port[i]);
3067 t3_stop_sge_timers(adapter);
3068 t3_free_sge_resources(adapter);
3069 cxgb_disable_msi(adapter);
3071 for_each_port(adapter, i)
3072 if (adapter->port[i])
3073 free_netdev(adapter->port[i]);
3075 iounmap(adapter->regs);
3077 pci_release_regions(pdev);
3078 pci_disable_device(pdev);
3079 pci_set_drvdata(pdev, NULL);
3083 static struct pci_driver driver = {
3085 .id_table = cxgb3_pci_tbl,
3087 .remove = __devexit_p(remove_one),
3088 .err_handler = &t3_err_handler,
3091 static int __init cxgb3_init_module(void)
3095 cxgb3_offload_init();
3097 ret = pci_register_driver(&driver);
3101 static void __exit cxgb3_cleanup_module(void)
3103 pci_unregister_driver(&driver);
3105 destroy_workqueue(cxgb3_wq);
3108 module_init(cxgb3_init_module);
3109 module_exit(cxgb3_cleanup_module);