2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
212 * t3_os_phymod_changed - handle PHY module changes
213 * @phy: the PHY reporting the module change
214 * @mod_type: new module type
216 * This is the OS-dependent handler for PHY module changes. It is
217 * invoked when a PHY module is removed or inserted for any OS-specific
220 void t3_os_phymod_changed(struct adapter *adap, int port_id)
222 static const char *mod_str[] = {
223 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
226 const struct net_device *dev = adap->port[port_id];
227 const struct port_info *pi = netdev_priv(dev);
229 if (pi->phy.modtype == phy_modtype_none)
230 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
232 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
233 mod_str[pi->phy.modtype]);
236 static void cxgb_set_rxmode(struct net_device *dev)
238 struct t3_rx_mode rm;
239 struct port_info *pi = netdev_priv(dev);
241 init_rx_mode(&rm, dev, dev->mc_list);
242 t3_mac_set_rx_mode(&pi->mac, &rm);
246 * link_start - enable a port
247 * @dev: the device to enable
249 * Performs the MAC and PHY actions needed to enable a port.
251 static void link_start(struct net_device *dev)
253 struct t3_rx_mode rm;
254 struct port_info *pi = netdev_priv(dev);
255 struct cmac *mac = &pi->mac;
257 init_rx_mode(&rm, dev, dev->mc_list);
259 t3_mac_set_mtu(mac, dev->mtu);
260 t3_mac_set_address(mac, 0, dev->dev_addr);
261 t3_mac_set_rx_mode(mac, &rm);
262 t3_link_start(&pi->phy, mac, &pi->link_config);
263 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
266 static inline void cxgb_disable_msi(struct adapter *adapter)
268 if (adapter->flags & USING_MSIX) {
269 pci_disable_msix(adapter->pdev);
270 adapter->flags &= ~USING_MSIX;
271 } else if (adapter->flags & USING_MSI) {
272 pci_disable_msi(adapter->pdev);
273 adapter->flags &= ~USING_MSI;
278 * Interrupt handler for asynchronous events used with MSI-X.
280 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
282 t3_slow_intr_handler(cookie);
287 * Name the MSI-X interrupts.
289 static void name_msix_vecs(struct adapter *adap)
291 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
293 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
294 adap->msix_info[0].desc[n] = 0;
296 for_each_port(adap, j) {
297 struct net_device *d = adap->port[j];
298 const struct port_info *pi = netdev_priv(d);
300 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
301 snprintf(adap->msix_info[msi_idx].desc, n,
302 "%s-%d", d->name, pi->first_qset + i);
303 adap->msix_info[msi_idx].desc[n] = 0;
308 static int request_msix_data_irqs(struct adapter *adap)
310 int i, j, err, qidx = 0;
312 for_each_port(adap, i) {
313 int nqsets = adap2pinfo(adap, i)->nqsets;
315 for (j = 0; j < nqsets; ++j) {
316 err = request_irq(adap->msix_info[qidx + 1].vec,
317 t3_intr_handler(adap,
320 adap->msix_info[qidx + 1].desc,
321 &adap->sge.qs[qidx]);
324 free_irq(adap->msix_info[qidx + 1].vec,
325 &adap->sge.qs[qidx]);
334 static void free_irq_resources(struct adapter *adapter)
336 if (adapter->flags & USING_MSIX) {
339 free_irq(adapter->msix_info[0].vec, adapter);
340 for_each_port(adapter, i)
341 n += adap2pinfo(adapter, i)->nqsets;
343 for (i = 0; i < n; ++i)
344 free_irq(adapter->msix_info[i + 1].vec,
345 &adapter->sge.qs[i]);
347 free_irq(adapter->pdev->irq, adapter);
350 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
355 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
363 static int init_tp_parity(struct adapter *adap)
367 struct cpl_set_tcb_field *greq;
368 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
370 t3_tp_set_offload_mode(adap, 1);
372 for (i = 0; i < 16; i++) {
373 struct cpl_smt_write_req *req;
375 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
376 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
377 memset(req, 0, sizeof(*req));
378 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
379 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
381 t3_mgmt_tx(adap, skb);
384 for (i = 0; i < 2048; i++) {
385 struct cpl_l2t_write_req *req;
387 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
388 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
389 memset(req, 0, sizeof(*req));
390 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
391 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
392 req->params = htonl(V_L2T_W_IDX(i));
393 t3_mgmt_tx(adap, skb);
396 for (i = 0; i < 2048; i++) {
397 struct cpl_rte_write_req *req;
399 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
400 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
401 memset(req, 0, sizeof(*req));
402 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
403 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
404 req->l2t_idx = htonl(V_L2T_W_IDX(i));
405 t3_mgmt_tx(adap, skb);
408 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
409 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
410 memset(greq, 0, sizeof(*greq));
411 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
412 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
413 greq->mask = cpu_to_be64(1);
414 t3_mgmt_tx(adap, skb);
416 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
417 t3_tp_set_offload_mode(adap, 0);
422 * setup_rss - configure RSS
425 * Sets up RSS to distribute packets to multiple receive queues. We
426 * configure the RSS CPU lookup table to distribute to the number of HW
427 * receive queues, and the response queue lookup table to narrow that
428 * down to the response queues actually configured for each port.
429 * We always configure the RSS mapping for two ports since the mapping
430 * table has plenty of entries.
432 static void setup_rss(struct adapter *adap)
435 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
436 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
437 u8 cpus[SGE_QSETS + 1];
438 u16 rspq_map[RSS_TABLE_SIZE];
440 for (i = 0; i < SGE_QSETS; ++i)
442 cpus[SGE_QSETS] = 0xff; /* terminator */
444 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
445 rspq_map[i] = i % nq0;
446 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
449 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
450 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
451 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
454 static void init_napi(struct adapter *adap)
458 for (i = 0; i < SGE_QSETS; i++) {
459 struct sge_qset *qs = &adap->sge.qs[i];
462 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
467 * netif_napi_add() can be called only once per napi_struct because it
468 * adds each new napi_struct to a list. Be careful not to call it a
469 * second time, e.g., during EEH recovery, by making a note of it.
471 adap->flags |= NAPI_INIT;
475 * Wait until all NAPI handlers are descheduled. This includes the handlers of
476 * both netdevices representing interfaces and the dummy ones for the extra
479 static void quiesce_rx(struct adapter *adap)
483 for (i = 0; i < SGE_QSETS; i++)
484 if (adap->sge.qs[i].adap)
485 napi_disable(&adap->sge.qs[i].napi);
488 static void enable_all_napi(struct adapter *adap)
491 for (i = 0; i < SGE_QSETS; i++)
492 if (adap->sge.qs[i].adap)
493 napi_enable(&adap->sge.qs[i].napi);
497 * set_qset_lro - Turn a queue set's LRO capability on and off
498 * @dev: the device the qset is attached to
499 * @qset_idx: the queue set index
500 * @val: the LRO switch
502 * Sets LRO on or off for a particular queue set.
503 * the device's features flag is updated to reflect the LRO
504 * capability when all queues belonging to the device are
507 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
509 struct port_info *pi = netdev_priv(dev);
510 struct adapter *adapter = pi->adapter;
512 adapter->params.sge.qset[qset_idx].lro = !!val;
513 adapter->sge.qs[qset_idx].lro_enabled = !!val;
517 * setup_sge_qsets - configure SGE Tx/Rx/response queues
520 * Determines how many sets of SGE queues to use and initializes them.
521 * We support multiple queue sets per port if we have MSI-X, otherwise
522 * just one queue set per port.
524 static int setup_sge_qsets(struct adapter *adap)
526 int i, j, err, irq_idx = 0, qset_idx = 0;
527 unsigned int ntxq = SGE_TXQ_PER_SET;
529 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
532 for_each_port(adap, i) {
533 struct net_device *dev = adap->port[i];
534 struct port_info *pi = netdev_priv(dev);
536 pi->qs = &adap->sge.qs[pi->first_qset];
537 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
539 set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
540 err = t3_sge_alloc_qset(adap, qset_idx, 1,
541 (adap->flags & USING_MSIX) ? qset_idx + 1 :
543 &adap->params.sge.qset[qset_idx], ntxq, dev,
544 netdev_get_tx_queue(dev, j));
546 t3_stop_sge_timers(adap);
547 t3_free_sge_resources(adap);
556 static ssize_t attr_show(struct device *d, char *buf,
557 ssize_t(*format) (struct net_device *, char *))
561 /* Synchronize with ioctls that may shut down the device */
563 len = (*format) (to_net_dev(d), buf);
568 static ssize_t attr_store(struct device *d,
569 const char *buf, size_t len,
570 ssize_t(*set) (struct net_device *, unsigned int),
571 unsigned int min_val, unsigned int max_val)
577 if (!capable(CAP_NET_ADMIN))
580 val = simple_strtoul(buf, &endp, 0);
581 if (endp == buf || val < min_val || val > max_val)
585 ret = (*set) (to_net_dev(d), val);
592 #define CXGB3_SHOW(name, val_expr) \
593 static ssize_t format_##name(struct net_device *dev, char *buf) \
595 struct port_info *pi = netdev_priv(dev); \
596 struct adapter *adap = pi->adapter; \
597 return sprintf(buf, "%u\n", val_expr); \
599 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
602 return attr_show(d, buf, format_##name); \
605 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
607 struct port_info *pi = netdev_priv(dev);
608 struct adapter *adap = pi->adapter;
609 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
611 if (adap->flags & FULL_INIT_DONE)
613 if (val && adap->params.rev == 0)
615 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
618 adap->params.mc5.nfilters = val;
622 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
623 const char *buf, size_t len)
625 return attr_store(d, buf, len, set_nfilters, 0, ~0);
628 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
630 struct port_info *pi = netdev_priv(dev);
631 struct adapter *adap = pi->adapter;
633 if (adap->flags & FULL_INIT_DONE)
635 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
638 adap->params.mc5.nservers = val;
642 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
643 const char *buf, size_t len)
645 return attr_store(d, buf, len, set_nservers, 0, ~0);
648 #define CXGB3_ATTR_R(name, val_expr) \
649 CXGB3_SHOW(name, val_expr) \
650 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
652 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
653 CXGB3_SHOW(name, val_expr) \
654 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
656 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
657 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
658 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
660 static struct attribute *cxgb3_attrs[] = {
661 &dev_attr_cam_size.attr,
662 &dev_attr_nfilters.attr,
663 &dev_attr_nservers.attr,
667 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
669 static ssize_t tm_attr_show(struct device *d,
670 char *buf, int sched)
672 struct port_info *pi = netdev_priv(to_net_dev(d));
673 struct adapter *adap = pi->adapter;
674 unsigned int v, addr, bpt, cpt;
677 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
679 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
680 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
683 bpt = (v >> 8) & 0xff;
686 len = sprintf(buf, "disabled\n");
688 v = (adap->params.vpd.cclk * 1000) / cpt;
689 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
695 static ssize_t tm_attr_store(struct device *d,
696 const char *buf, size_t len, int sched)
698 struct port_info *pi = netdev_priv(to_net_dev(d));
699 struct adapter *adap = pi->adapter;
704 if (!capable(CAP_NET_ADMIN))
707 val = simple_strtoul(buf, &endp, 0);
708 if (endp == buf || val > 10000000)
712 ret = t3_config_sched(adap, val, sched);
719 #define TM_ATTR(name, sched) \
720 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
723 return tm_attr_show(d, buf, sched); \
725 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
726 const char *buf, size_t len) \
728 return tm_attr_store(d, buf, len, sched); \
730 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
741 static struct attribute *offload_attrs[] = {
742 &dev_attr_sched0.attr,
743 &dev_attr_sched1.attr,
744 &dev_attr_sched2.attr,
745 &dev_attr_sched3.attr,
746 &dev_attr_sched4.attr,
747 &dev_attr_sched5.attr,
748 &dev_attr_sched6.attr,
749 &dev_attr_sched7.attr,
753 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
756 * Sends an sk_buff to an offload queue driver
757 * after dealing with any active network taps.
759 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
764 ret = t3_offload_tx(tdev, skb);
769 static int write_smt_entry(struct adapter *adapter, int idx)
771 struct cpl_smt_write_req *req;
772 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
777 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
778 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
779 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
780 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
782 memset(req->src_mac1, 0, sizeof(req->src_mac1));
783 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
785 offload_tx(&adapter->tdev, skb);
789 static int init_smt(struct adapter *adapter)
793 for_each_port(adapter, i)
794 write_smt_entry(adapter, i);
798 static void init_port_mtus(struct adapter *adapter)
800 unsigned int mtus = adapter->port[0]->mtu;
802 if (adapter->port[1])
803 mtus |= adapter->port[1]->mtu << 16;
804 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
807 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
811 struct mngt_pktsched_wr *req;
814 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
815 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
816 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
817 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
823 ret = t3_mgmt_tx(adap, skb);
828 static int bind_qsets(struct adapter *adap)
832 for_each_port(adap, i) {
833 const struct port_info *pi = adap2pinfo(adap, i);
835 for (j = 0; j < pi->nqsets; ++j) {
836 int ret = send_pktsched_cmd(adap, 1,
837 pi->first_qset + j, -1,
847 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
848 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
850 static int upgrade_fw(struct adapter *adap)
854 const struct firmware *fw;
855 struct device *dev = &adap->pdev->dev;
857 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
858 FW_VERSION_MINOR, FW_VERSION_MICRO);
859 ret = request_firmware(&fw, buf, dev);
861 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
865 ret = t3_load_fw(adap, fw->data, fw->size);
866 release_firmware(fw);
869 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
870 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
872 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
873 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
878 static inline char t3rev2char(struct adapter *adapter)
882 switch(adapter->params.rev) {
894 static int update_tpsram(struct adapter *adap)
896 const struct firmware *tpsram;
898 struct device *dev = &adap->pdev->dev;
902 rev = t3rev2char(adap);
906 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
907 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
909 ret = request_firmware(&tpsram, buf, dev);
911 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
916 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
920 ret = t3_set_proto_sram(adap, tpsram->data);
923 "successful update of protocol engine "
925 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
927 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
928 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
930 dev_err(dev, "loading protocol SRAM failed\n");
933 release_firmware(tpsram);
939 * cxgb_up - enable the adapter
940 * @adapter: adapter being enabled
942 * Called when the first port is enabled, this function performs the
943 * actions necessary to make an adapter operational, such as completing
944 * the initialization of HW modules, and enabling interrupts.
946 * Must be called with the rtnl lock held.
948 static int cxgb_up(struct adapter *adap)
952 if (!(adap->flags & FULL_INIT_DONE)) {
953 err = t3_check_fw_version(adap);
954 if (err == -EINVAL) {
955 err = upgrade_fw(adap);
956 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
957 FW_VERSION_MAJOR, FW_VERSION_MINOR,
958 FW_VERSION_MICRO, err ? "failed" : "succeeded");
961 err = t3_check_tpsram_version(adap);
962 if (err == -EINVAL) {
963 err = update_tpsram(adap);
964 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
965 TP_VERSION_MAJOR, TP_VERSION_MINOR,
966 TP_VERSION_MICRO, err ? "failed" : "succeeded");
970 * Clear interrupts now to catch errors if t3_init_hw fails.
971 * We clear them again later as initialization may trigger
972 * conditions that can interrupt.
976 err = t3_init_hw(adap, 0);
980 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
981 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
983 err = setup_sge_qsets(adap);
988 if (!(adap->flags & NAPI_INIT))
990 adap->flags |= FULL_INIT_DONE;
995 if (adap->flags & USING_MSIX) {
996 name_msix_vecs(adap);
997 err = request_irq(adap->msix_info[0].vec,
998 t3_async_intr_handler, 0,
999 adap->msix_info[0].desc, adap);
1003 err = request_msix_data_irqs(adap);
1005 free_irq(adap->msix_info[0].vec, adap);
1008 } else if ((err = request_irq(adap->pdev->irq,
1009 t3_intr_handler(adap,
1010 adap->sge.qs[0].rspq.
1012 (adap->flags & USING_MSI) ?
1017 enable_all_napi(adap);
1019 t3_intr_enable(adap);
1021 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1022 is_offload(adap) && init_tp_parity(adap) == 0)
1023 adap->flags |= TP_PARITY_INIT;
1025 if (adap->flags & TP_PARITY_INIT) {
1026 t3_write_reg(adap, A_TP_INT_CAUSE,
1027 F_CMCACHEPERR | F_ARPLUTPERR);
1028 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1031 if (!(adap->flags & QUEUES_BOUND)) {
1032 err = bind_qsets(adap);
1034 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1035 t3_intr_disable(adap);
1036 free_irq_resources(adap);
1039 adap->flags |= QUEUES_BOUND;
1045 CH_ERR(adap, "request_irq failed, err %d\n", err);
1050 * Release resources when all the ports and offloading have been stopped.
1052 static void cxgb_down(struct adapter *adapter)
1054 t3_sge_stop(adapter);
1055 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1056 t3_intr_disable(adapter);
1057 spin_unlock_irq(&adapter->work_lock);
1059 free_irq_resources(adapter);
1060 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1061 quiesce_rx(adapter);
1064 static void schedule_chk_task(struct adapter *adap)
1068 timeo = adap->params.linkpoll_period ?
1069 (HZ * adap->params.linkpoll_period) / 10 :
1070 adap->params.stats_update_period * HZ;
1072 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1075 static int offload_open(struct net_device *dev)
1077 struct port_info *pi = netdev_priv(dev);
1078 struct adapter *adapter = pi->adapter;
1079 struct t3cdev *tdev = dev2t3cdev(dev);
1080 int adap_up = adapter->open_device_map & PORT_MASK;
1083 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1086 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1089 t3_tp_set_offload_mode(adapter, 1);
1090 tdev->lldev = adapter->port[0];
1091 err = cxgb3_offload_activate(adapter);
1095 init_port_mtus(adapter);
1096 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1097 adapter->params.b_wnd,
1098 adapter->params.rev == 0 ?
1099 adapter->port[0]->mtu : 0xffff);
1102 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1103 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1105 /* Call back all registered clients */
1106 cxgb3_add_clients(tdev);
1109 /* restore them in case the offload module has changed them */
1111 t3_tp_set_offload_mode(adapter, 0);
1112 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1113 cxgb3_set_dummy_ops(tdev);
1118 static int offload_close(struct t3cdev *tdev)
1120 struct adapter *adapter = tdev2adap(tdev);
1122 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1125 /* Call back all registered clients */
1126 cxgb3_remove_clients(tdev);
1128 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1131 cxgb3_set_dummy_ops(tdev);
1132 t3_tp_set_offload_mode(adapter, 0);
1133 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1135 if (!adapter->open_device_map)
1138 cxgb3_offload_deactivate(adapter);
1142 static int cxgb_open(struct net_device *dev)
1144 struct port_info *pi = netdev_priv(dev);
1145 struct adapter *adapter = pi->adapter;
1146 int other_ports = adapter->open_device_map & PORT_MASK;
1149 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1152 set_bit(pi->port_id, &adapter->open_device_map);
1153 if (is_offload(adapter) && !ofld_disable) {
1154 err = offload_open(dev);
1157 "Could not initialize offload capabilities\n");
1160 dev->real_num_tx_queues = pi->nqsets;
1162 t3_port_intr_enable(adapter, pi->port_id);
1163 netif_tx_start_all_queues(dev);
1165 schedule_chk_task(adapter);
1170 static int cxgb_close(struct net_device *dev)
1172 struct port_info *pi = netdev_priv(dev);
1173 struct adapter *adapter = pi->adapter;
1175 t3_port_intr_disable(adapter, pi->port_id);
1176 netif_tx_stop_all_queues(dev);
1177 pi->phy.ops->power_down(&pi->phy, 1);
1178 netif_carrier_off(dev);
1179 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1181 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1182 clear_bit(pi->port_id, &adapter->open_device_map);
1183 spin_unlock_irq(&adapter->work_lock);
1185 if (!(adapter->open_device_map & PORT_MASK))
1186 cancel_rearming_delayed_workqueue(cxgb3_wq,
1187 &adapter->adap_check_task);
1189 if (!adapter->open_device_map)
1195 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1197 struct port_info *pi = netdev_priv(dev);
1198 struct adapter *adapter = pi->adapter;
1199 struct net_device_stats *ns = &pi->netstats;
1200 const struct mac_stats *pstats;
1202 spin_lock(&adapter->stats_lock);
1203 pstats = t3_mac_update_stats(&pi->mac);
1204 spin_unlock(&adapter->stats_lock);
1206 ns->tx_bytes = pstats->tx_octets;
1207 ns->tx_packets = pstats->tx_frames;
1208 ns->rx_bytes = pstats->rx_octets;
1209 ns->rx_packets = pstats->rx_frames;
1210 ns->multicast = pstats->rx_mcast_frames;
1212 ns->tx_errors = pstats->tx_underrun;
1213 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1214 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1215 pstats->rx_fifo_ovfl;
1217 /* detailed rx_errors */
1218 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1219 ns->rx_over_errors = 0;
1220 ns->rx_crc_errors = pstats->rx_fcs_errs;
1221 ns->rx_frame_errors = pstats->rx_symbol_errs;
1222 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1223 ns->rx_missed_errors = pstats->rx_cong_drops;
1225 /* detailed tx_errors */
1226 ns->tx_aborted_errors = 0;
1227 ns->tx_carrier_errors = 0;
1228 ns->tx_fifo_errors = pstats->tx_underrun;
1229 ns->tx_heartbeat_errors = 0;
1230 ns->tx_window_errors = 0;
1234 static u32 get_msglevel(struct net_device *dev)
1236 struct port_info *pi = netdev_priv(dev);
1237 struct adapter *adapter = pi->adapter;
1239 return adapter->msg_enable;
1242 static void set_msglevel(struct net_device *dev, u32 val)
1244 struct port_info *pi = netdev_priv(dev);
1245 struct adapter *adapter = pi->adapter;
1247 adapter->msg_enable = val;
1250 static char stats_strings[][ETH_GSTRING_LEN] = {
1253 "TxMulticastFramesOK",
1254 "TxBroadcastFramesOK",
1261 "TxFrames128To255 ",
1262 "TxFrames256To511 ",
1263 "TxFrames512To1023 ",
1264 "TxFrames1024To1518 ",
1265 "TxFrames1519ToMax ",
1269 "RxMulticastFramesOK",
1270 "RxBroadcastFramesOK",
1281 "RxFrames128To255 ",
1282 "RxFrames256To511 ",
1283 "RxFrames512To1023 ",
1284 "RxFrames1024To1518 ",
1285 "RxFrames1519ToMax ",
1298 "CheckTXEnToggled ",
1303 static int get_sset_count(struct net_device *dev, int sset)
1307 return ARRAY_SIZE(stats_strings);
1313 #define T3_REGMAP_SIZE (3 * 1024)
1315 static int get_regs_len(struct net_device *dev)
1317 return T3_REGMAP_SIZE;
1320 static int get_eeprom_len(struct net_device *dev)
1325 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1327 struct port_info *pi = netdev_priv(dev);
1328 struct adapter *adapter = pi->adapter;
1332 spin_lock(&adapter->stats_lock);
1333 t3_get_fw_version(adapter, &fw_vers);
1334 t3_get_tp_version(adapter, &tp_vers);
1335 spin_unlock(&adapter->stats_lock);
1337 strcpy(info->driver, DRV_NAME);
1338 strcpy(info->version, DRV_VERSION);
1339 strcpy(info->bus_info, pci_name(adapter->pdev));
1341 strcpy(info->fw_version, "N/A");
1343 snprintf(info->fw_version, sizeof(info->fw_version),
1344 "%s %u.%u.%u TP %u.%u.%u",
1345 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1346 G_FW_VERSION_MAJOR(fw_vers),
1347 G_FW_VERSION_MINOR(fw_vers),
1348 G_FW_VERSION_MICRO(fw_vers),
1349 G_TP_VERSION_MAJOR(tp_vers),
1350 G_TP_VERSION_MINOR(tp_vers),
1351 G_TP_VERSION_MICRO(tp_vers));
1355 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1357 if (stringset == ETH_SS_STATS)
1358 memcpy(data, stats_strings, sizeof(stats_strings));
1361 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1362 struct port_info *p, int idx)
1365 unsigned long tot = 0;
1367 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1368 tot += adapter->sge.qs[i].port_stats[idx];
1372 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1375 struct port_info *pi = netdev_priv(dev);
1376 struct adapter *adapter = pi->adapter;
1377 const struct mac_stats *s;
1379 spin_lock(&adapter->stats_lock);
1380 s = t3_mac_update_stats(&pi->mac);
1381 spin_unlock(&adapter->stats_lock);
1383 *data++ = s->tx_octets;
1384 *data++ = s->tx_frames;
1385 *data++ = s->tx_mcast_frames;
1386 *data++ = s->tx_bcast_frames;
1387 *data++ = s->tx_pause;
1388 *data++ = s->tx_underrun;
1389 *data++ = s->tx_fifo_urun;
1391 *data++ = s->tx_frames_64;
1392 *data++ = s->tx_frames_65_127;
1393 *data++ = s->tx_frames_128_255;
1394 *data++ = s->tx_frames_256_511;
1395 *data++ = s->tx_frames_512_1023;
1396 *data++ = s->tx_frames_1024_1518;
1397 *data++ = s->tx_frames_1519_max;
1399 *data++ = s->rx_octets;
1400 *data++ = s->rx_frames;
1401 *data++ = s->rx_mcast_frames;
1402 *data++ = s->rx_bcast_frames;
1403 *data++ = s->rx_pause;
1404 *data++ = s->rx_fcs_errs;
1405 *data++ = s->rx_symbol_errs;
1406 *data++ = s->rx_short;
1407 *data++ = s->rx_jabber;
1408 *data++ = s->rx_too_long;
1409 *data++ = s->rx_fifo_ovfl;
1411 *data++ = s->rx_frames_64;
1412 *data++ = s->rx_frames_65_127;
1413 *data++ = s->rx_frames_128_255;
1414 *data++ = s->rx_frames_256_511;
1415 *data++ = s->rx_frames_512_1023;
1416 *data++ = s->rx_frames_1024_1518;
1417 *data++ = s->rx_frames_1519_max;
1419 *data++ = pi->phy.fifo_errors;
1421 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1422 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1423 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1424 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1425 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1429 *data++ = s->rx_cong_drops;
1431 *data++ = s->num_toggled;
1432 *data++ = s->num_resets;
1435 static inline void reg_block_dump(struct adapter *ap, void *buf,
1436 unsigned int start, unsigned int end)
1438 u32 *p = buf + start;
1440 for (; start <= end; start += sizeof(u32))
1441 *p++ = t3_read_reg(ap, start);
1444 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1447 struct port_info *pi = netdev_priv(dev);
1448 struct adapter *ap = pi->adapter;
1452 * bits 0..9: chip version
1453 * bits 10..15: chip revision
1454 * bit 31: set for PCIe cards
1456 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1459 * We skip the MAC statistics registers because they are clear-on-read.
1460 * Also reading multi-register stats would need to synchronize with the
1461 * periodic mac stats accumulation. Hard to justify the complexity.
1463 memset(buf, 0, T3_REGMAP_SIZE);
1464 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1465 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1466 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1467 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1468 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1469 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1470 XGM_REG(A_XGM_SERDES_STAT3, 1));
1471 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1472 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1475 static int restart_autoneg(struct net_device *dev)
1477 struct port_info *p = netdev_priv(dev);
1479 if (!netif_running(dev))
1481 if (p->link_config.autoneg != AUTONEG_ENABLE)
1483 p->phy.ops->autoneg_restart(&p->phy);
1487 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1489 struct port_info *pi = netdev_priv(dev);
1490 struct adapter *adapter = pi->adapter;
1496 for (i = 0; i < data * 2; i++) {
1497 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1498 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1499 if (msleep_interruptible(500))
1502 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1507 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1509 struct port_info *p = netdev_priv(dev);
1511 cmd->supported = p->link_config.supported;
1512 cmd->advertising = p->link_config.advertising;
1514 if (netif_carrier_ok(dev)) {
1515 cmd->speed = p->link_config.speed;
1516 cmd->duplex = p->link_config.duplex;
1522 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1523 cmd->phy_address = p->phy.addr;
1524 cmd->transceiver = XCVR_EXTERNAL;
1525 cmd->autoneg = p->link_config.autoneg;
1531 static int speed_duplex_to_caps(int speed, int duplex)
1537 if (duplex == DUPLEX_FULL)
1538 cap = SUPPORTED_10baseT_Full;
1540 cap = SUPPORTED_10baseT_Half;
1543 if (duplex == DUPLEX_FULL)
1544 cap = SUPPORTED_100baseT_Full;
1546 cap = SUPPORTED_100baseT_Half;
1549 if (duplex == DUPLEX_FULL)
1550 cap = SUPPORTED_1000baseT_Full;
1552 cap = SUPPORTED_1000baseT_Half;
1555 if (duplex == DUPLEX_FULL)
1556 cap = SUPPORTED_10000baseT_Full;
1561 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1562 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1563 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1564 ADVERTISED_10000baseT_Full)
1566 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1568 struct port_info *p = netdev_priv(dev);
1569 struct link_config *lc = &p->link_config;
1571 if (!(lc->supported & SUPPORTED_Autoneg)) {
1573 * PHY offers a single speed/duplex. See if that's what's
1576 if (cmd->autoneg == AUTONEG_DISABLE) {
1577 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1578 if (lc->supported & cap)
1584 if (cmd->autoneg == AUTONEG_DISABLE) {
1585 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1587 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1589 lc->requested_speed = cmd->speed;
1590 lc->requested_duplex = cmd->duplex;
1591 lc->advertising = 0;
1593 cmd->advertising &= ADVERTISED_MASK;
1594 cmd->advertising &= lc->supported;
1595 if (!cmd->advertising)
1597 lc->requested_speed = SPEED_INVALID;
1598 lc->requested_duplex = DUPLEX_INVALID;
1599 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1601 lc->autoneg = cmd->autoneg;
1602 if (netif_running(dev))
1603 t3_link_start(&p->phy, &p->mac, lc);
1607 static void get_pauseparam(struct net_device *dev,
1608 struct ethtool_pauseparam *epause)
1610 struct port_info *p = netdev_priv(dev);
1612 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1613 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1614 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1617 static int set_pauseparam(struct net_device *dev,
1618 struct ethtool_pauseparam *epause)
1620 struct port_info *p = netdev_priv(dev);
1621 struct link_config *lc = &p->link_config;
1623 if (epause->autoneg == AUTONEG_DISABLE)
1624 lc->requested_fc = 0;
1625 else if (lc->supported & SUPPORTED_Autoneg)
1626 lc->requested_fc = PAUSE_AUTONEG;
1630 if (epause->rx_pause)
1631 lc->requested_fc |= PAUSE_RX;
1632 if (epause->tx_pause)
1633 lc->requested_fc |= PAUSE_TX;
1634 if (lc->autoneg == AUTONEG_ENABLE) {
1635 if (netif_running(dev))
1636 t3_link_start(&p->phy, &p->mac, lc);
1638 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1639 if (netif_running(dev))
1640 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1645 static u32 get_rx_csum(struct net_device *dev)
1647 struct port_info *p = netdev_priv(dev);
1649 return p->rx_offload & T3_RX_CSUM;
1652 static int set_rx_csum(struct net_device *dev, u32 data)
1654 struct port_info *p = netdev_priv(dev);
1657 p->rx_offload |= T3_RX_CSUM;
1661 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1662 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1663 set_qset_lro(dev, i, 0);
1668 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1670 struct port_info *pi = netdev_priv(dev);
1671 struct adapter *adapter = pi->adapter;
1672 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1674 e->rx_max_pending = MAX_RX_BUFFERS;
1675 e->rx_mini_max_pending = 0;
1676 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1677 e->tx_max_pending = MAX_TXQ_ENTRIES;
1679 e->rx_pending = q->fl_size;
1680 e->rx_mini_pending = q->rspq_size;
1681 e->rx_jumbo_pending = q->jumbo_size;
1682 e->tx_pending = q->txq_size[0];
1685 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1687 struct port_info *pi = netdev_priv(dev);
1688 struct adapter *adapter = pi->adapter;
1689 struct qset_params *q;
1692 if (e->rx_pending > MAX_RX_BUFFERS ||
1693 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1694 e->tx_pending > MAX_TXQ_ENTRIES ||
1695 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1696 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1697 e->rx_pending < MIN_FL_ENTRIES ||
1698 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1699 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1702 if (adapter->flags & FULL_INIT_DONE)
1705 q = &adapter->params.sge.qset[pi->first_qset];
1706 for (i = 0; i < pi->nqsets; ++i, ++q) {
1707 q->rspq_size = e->rx_mini_pending;
1708 q->fl_size = e->rx_pending;
1709 q->jumbo_size = e->rx_jumbo_pending;
1710 q->txq_size[0] = e->tx_pending;
1711 q->txq_size[1] = e->tx_pending;
1712 q->txq_size[2] = e->tx_pending;
1717 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1719 struct port_info *pi = netdev_priv(dev);
1720 struct adapter *adapter = pi->adapter;
1721 struct qset_params *qsp = &adapter->params.sge.qset[0];
1722 struct sge_qset *qs = &adapter->sge.qs[0];
1724 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1727 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1728 t3_update_qset_coalesce(qs, qsp);
1732 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1734 struct port_info *pi = netdev_priv(dev);
1735 struct adapter *adapter = pi->adapter;
1736 struct qset_params *q = adapter->params.sge.qset;
1738 c->rx_coalesce_usecs = q->coalesce_usecs;
1742 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1745 struct port_info *pi = netdev_priv(dev);
1746 struct adapter *adapter = pi->adapter;
1749 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1753 e->magic = EEPROM_MAGIC;
1754 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1755 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1758 memcpy(data, buf + e->offset, e->len);
1763 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1766 struct port_info *pi = netdev_priv(dev);
1767 struct adapter *adapter = pi->adapter;
1768 u32 aligned_offset, aligned_len;
1773 if (eeprom->magic != EEPROM_MAGIC)
1776 aligned_offset = eeprom->offset & ~3;
1777 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1779 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1780 buf = kmalloc(aligned_len, GFP_KERNEL);
1783 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1784 if (!err && aligned_len > 4)
1785 err = t3_seeprom_read(adapter,
1786 aligned_offset + aligned_len - 4,
1787 (__le32 *) & buf[aligned_len - 4]);
1790 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1794 err = t3_seeprom_wp(adapter, 0);
1798 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1799 err = t3_seeprom_write(adapter, aligned_offset, *p);
1800 aligned_offset += 4;
1804 err = t3_seeprom_wp(adapter, 1);
1811 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1815 memset(&wol->sopass, 0, sizeof(wol->sopass));
1818 static const struct ethtool_ops cxgb_ethtool_ops = {
1819 .get_settings = get_settings,
1820 .set_settings = set_settings,
1821 .get_drvinfo = get_drvinfo,
1822 .get_msglevel = get_msglevel,
1823 .set_msglevel = set_msglevel,
1824 .get_ringparam = get_sge_param,
1825 .set_ringparam = set_sge_param,
1826 .get_coalesce = get_coalesce,
1827 .set_coalesce = set_coalesce,
1828 .get_eeprom_len = get_eeprom_len,
1829 .get_eeprom = get_eeprom,
1830 .set_eeprom = set_eeprom,
1831 .get_pauseparam = get_pauseparam,
1832 .set_pauseparam = set_pauseparam,
1833 .get_rx_csum = get_rx_csum,
1834 .set_rx_csum = set_rx_csum,
1835 .set_tx_csum = ethtool_op_set_tx_csum,
1836 .set_sg = ethtool_op_set_sg,
1837 .get_link = ethtool_op_get_link,
1838 .get_strings = get_strings,
1839 .phys_id = cxgb3_phys_id,
1840 .nway_reset = restart_autoneg,
1841 .get_sset_count = get_sset_count,
1842 .get_ethtool_stats = get_stats,
1843 .get_regs_len = get_regs_len,
1844 .get_regs = get_regs,
1846 .set_tso = ethtool_op_set_tso,
1849 static int in_range(int val, int lo, int hi)
1851 return val < 0 || (val <= hi && val >= lo);
1854 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1856 struct port_info *pi = netdev_priv(dev);
1857 struct adapter *adapter = pi->adapter;
1861 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1865 case CHELSIO_SET_QSET_PARAMS:{
1867 struct qset_params *q;
1868 struct ch_qset_params t;
1869 int q1 = pi->first_qset;
1870 int nqsets = pi->nqsets;
1872 if (!capable(CAP_NET_ADMIN))
1874 if (copy_from_user(&t, useraddr, sizeof(t)))
1876 if (t.qset_idx >= SGE_QSETS)
1878 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1879 !in_range(t.cong_thres, 0, 255) ||
1880 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1882 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1884 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1885 MAX_CTRL_TXQ_ENTRIES) ||
1886 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1888 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1889 MAX_RX_JUMBO_BUFFERS)
1890 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1894 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1895 for_each_port(adapter, i) {
1896 pi = adap2pinfo(adapter, i);
1897 if (t.qset_idx >= pi->first_qset &&
1898 t.qset_idx < pi->first_qset + pi->nqsets &&
1899 !(pi->rx_offload & T3_RX_CSUM))
1903 if ((adapter->flags & FULL_INIT_DONE) &&
1904 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1905 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1906 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1907 t.polling >= 0 || t.cong_thres >= 0))
1910 /* Allow setting of any available qset when offload enabled */
1911 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1913 for_each_port(adapter, i) {
1914 pi = adap2pinfo(adapter, i);
1915 nqsets += pi->first_qset + pi->nqsets;
1919 if (t.qset_idx < q1)
1921 if (t.qset_idx > q1 + nqsets - 1)
1924 q = &adapter->params.sge.qset[t.qset_idx];
1926 if (t.rspq_size >= 0)
1927 q->rspq_size = t.rspq_size;
1928 if (t.fl_size[0] >= 0)
1929 q->fl_size = t.fl_size[0];
1930 if (t.fl_size[1] >= 0)
1931 q->jumbo_size = t.fl_size[1];
1932 if (t.txq_size[0] >= 0)
1933 q->txq_size[0] = t.txq_size[0];
1934 if (t.txq_size[1] >= 0)
1935 q->txq_size[1] = t.txq_size[1];
1936 if (t.txq_size[2] >= 0)
1937 q->txq_size[2] = t.txq_size[2];
1938 if (t.cong_thres >= 0)
1939 q->cong_thres = t.cong_thres;
1940 if (t.intr_lat >= 0) {
1941 struct sge_qset *qs =
1942 &adapter->sge.qs[t.qset_idx];
1944 q->coalesce_usecs = t.intr_lat;
1945 t3_update_qset_coalesce(qs, q);
1947 if (t.polling >= 0) {
1948 if (adapter->flags & USING_MSIX)
1949 q->polling = t.polling;
1951 /* No polling with INTx for T3A */
1952 if (adapter->params.rev == 0 &&
1953 !(adapter->flags & USING_MSI))
1956 for (i = 0; i < SGE_QSETS; i++) {
1957 q = &adapter->params.sge.
1959 q->polling = t.polling;
1964 set_qset_lro(dev, t.qset_idx, t.lro);
1968 case CHELSIO_GET_QSET_PARAMS:{
1969 struct qset_params *q;
1970 struct ch_qset_params t;
1971 int q1 = pi->first_qset;
1972 int nqsets = pi->nqsets;
1975 if (copy_from_user(&t, useraddr, sizeof(t)))
1978 /* Display qsets for all ports when offload enabled */
1979 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1981 for_each_port(adapter, i) {
1982 pi = adap2pinfo(adapter, i);
1983 nqsets = pi->first_qset + pi->nqsets;
1987 if (t.qset_idx >= nqsets)
1990 q = &adapter->params.sge.qset[q1 + t.qset_idx];
1991 t.rspq_size = q->rspq_size;
1992 t.txq_size[0] = q->txq_size[0];
1993 t.txq_size[1] = q->txq_size[1];
1994 t.txq_size[2] = q->txq_size[2];
1995 t.fl_size[0] = q->fl_size;
1996 t.fl_size[1] = q->jumbo_size;
1997 t.polling = q->polling;
1999 t.intr_lat = q->coalesce_usecs;
2000 t.cong_thres = q->cong_thres;
2003 if (adapter->flags & USING_MSIX)
2004 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2006 t.vector = adapter->pdev->irq;
2008 if (copy_to_user(useraddr, &t, sizeof(t)))
2012 case CHELSIO_SET_QSET_NUM:{
2013 struct ch_reg edata;
2014 unsigned int i, first_qset = 0, other_qsets = 0;
2016 if (!capable(CAP_NET_ADMIN))
2018 if (adapter->flags & FULL_INIT_DONE)
2020 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2022 if (edata.val < 1 ||
2023 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2026 for_each_port(adapter, i)
2027 if (adapter->port[i] && adapter->port[i] != dev)
2028 other_qsets += adap2pinfo(adapter, i)->nqsets;
2030 if (edata.val + other_qsets > SGE_QSETS)
2033 pi->nqsets = edata.val;
2035 for_each_port(adapter, i)
2036 if (adapter->port[i]) {
2037 pi = adap2pinfo(adapter, i);
2038 pi->first_qset = first_qset;
2039 first_qset += pi->nqsets;
2043 case CHELSIO_GET_QSET_NUM:{
2044 struct ch_reg edata;
2046 edata.cmd = CHELSIO_GET_QSET_NUM;
2047 edata.val = pi->nqsets;
2048 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2052 case CHELSIO_LOAD_FW:{
2054 struct ch_mem_range t;
2056 if (!capable(CAP_SYS_RAWIO))
2058 if (copy_from_user(&t, useraddr, sizeof(t)))
2060 /* Check t.len sanity ? */
2061 fw_data = kmalloc(t.len, GFP_KERNEL);
2066 (fw_data, useraddr + sizeof(t), t.len)) {
2071 ret = t3_load_fw(adapter, fw_data, t.len);
2077 case CHELSIO_SETMTUTAB:{
2081 if (!is_offload(adapter))
2083 if (!capable(CAP_NET_ADMIN))
2085 if (offload_running(adapter))
2087 if (copy_from_user(&m, useraddr, sizeof(m)))
2089 if (m.nmtus != NMTUS)
2091 if (m.mtus[0] < 81) /* accommodate SACK */
2094 /* MTUs must be in ascending order */
2095 for (i = 1; i < NMTUS; ++i)
2096 if (m.mtus[i] < m.mtus[i - 1])
2099 memcpy(adapter->params.mtus, m.mtus,
2100 sizeof(adapter->params.mtus));
2103 case CHELSIO_GET_PM:{
2104 struct tp_params *p = &adapter->params.tp;
2105 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2107 if (!is_offload(adapter))
2109 m.tx_pg_sz = p->tx_pg_size;
2110 m.tx_num_pg = p->tx_num_pgs;
2111 m.rx_pg_sz = p->rx_pg_size;
2112 m.rx_num_pg = p->rx_num_pgs;
2113 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2114 if (copy_to_user(useraddr, &m, sizeof(m)))
2118 case CHELSIO_SET_PM:{
2120 struct tp_params *p = &adapter->params.tp;
2122 if (!is_offload(adapter))
2124 if (!capable(CAP_NET_ADMIN))
2126 if (adapter->flags & FULL_INIT_DONE)
2128 if (copy_from_user(&m, useraddr, sizeof(m)))
2130 if (!is_power_of_2(m.rx_pg_sz) ||
2131 !is_power_of_2(m.tx_pg_sz))
2132 return -EINVAL; /* not power of 2 */
2133 if (!(m.rx_pg_sz & 0x14000))
2134 return -EINVAL; /* not 16KB or 64KB */
2135 if (!(m.tx_pg_sz & 0x1554000))
2137 if (m.tx_num_pg == -1)
2138 m.tx_num_pg = p->tx_num_pgs;
2139 if (m.rx_num_pg == -1)
2140 m.rx_num_pg = p->rx_num_pgs;
2141 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2143 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2144 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2146 p->rx_pg_size = m.rx_pg_sz;
2147 p->tx_pg_size = m.tx_pg_sz;
2148 p->rx_num_pgs = m.rx_num_pg;
2149 p->tx_num_pgs = m.tx_num_pg;
2152 case CHELSIO_GET_MEM:{
2153 struct ch_mem_range t;
2157 if (!is_offload(adapter))
2159 if (!(adapter->flags & FULL_INIT_DONE))
2160 return -EIO; /* need the memory controllers */
2161 if (copy_from_user(&t, useraddr, sizeof(t)))
2163 if ((t.addr & 7) || (t.len & 7))
2165 if (t.mem_id == MEM_CM)
2167 else if (t.mem_id == MEM_PMRX)
2168 mem = &adapter->pmrx;
2169 else if (t.mem_id == MEM_PMTX)
2170 mem = &adapter->pmtx;
2176 * bits 0..9: chip version
2177 * bits 10..15: chip revision
2179 t.version = 3 | (adapter->params.rev << 10);
2180 if (copy_to_user(useraddr, &t, sizeof(t)))
2184 * Read 256 bytes at a time as len can be large and we don't
2185 * want to use huge intermediate buffers.
2187 useraddr += sizeof(t); /* advance to start of buffer */
2189 unsigned int chunk =
2190 min_t(unsigned int, t.len, sizeof(buf));
2193 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2197 if (copy_to_user(useraddr, buf, chunk))
2205 case CHELSIO_SET_TRACE_FILTER:{
2207 const struct trace_params *tp;
2209 if (!capable(CAP_NET_ADMIN))
2211 if (!offload_running(adapter))
2213 if (copy_from_user(&t, useraddr, sizeof(t)))
2216 tp = (const struct trace_params *)&t.sip;
2218 t3_config_trace_filter(adapter, tp, 0,
2222 t3_config_trace_filter(adapter, tp, 1,
2233 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2235 struct mii_ioctl_data *data = if_mii(req);
2236 struct port_info *pi = netdev_priv(dev);
2237 struct adapter *adapter = pi->adapter;
2242 data->phy_id = pi->phy.addr;
2246 struct cphy *phy = &pi->phy;
2248 if (!phy->mdio_read)
2250 if (is_10G(adapter)) {
2251 mmd = data->phy_id >> 8;
2254 else if (mmd > MDIO_DEV_VEND2)
2258 phy->mdio_read(adapter, data->phy_id & 0x1f,
2259 mmd, data->reg_num, &val);
2262 phy->mdio_read(adapter, data->phy_id & 0x1f,
2263 0, data->reg_num & 0x1f,
2266 data->val_out = val;
2270 struct cphy *phy = &pi->phy;
2272 if (!capable(CAP_NET_ADMIN))
2274 if (!phy->mdio_write)
2276 if (is_10G(adapter)) {
2277 mmd = data->phy_id >> 8;
2280 else if (mmd > MDIO_DEV_VEND2)
2284 phy->mdio_write(adapter,
2285 data->phy_id & 0x1f, mmd,
2290 phy->mdio_write(adapter,
2291 data->phy_id & 0x1f, 0,
2292 data->reg_num & 0x1f,
2297 return cxgb_extension_ioctl(dev, req->ifr_data);
2304 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2306 struct port_info *pi = netdev_priv(dev);
2307 struct adapter *adapter = pi->adapter;
2310 if (new_mtu < 81) /* accommodate SACK */
2312 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2315 init_port_mtus(adapter);
2316 if (adapter->params.rev == 0 && offload_running(adapter))
2317 t3_load_mtus(adapter, adapter->params.mtus,
2318 adapter->params.a_wnd, adapter->params.b_wnd,
2319 adapter->port[0]->mtu);
2323 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2325 struct port_info *pi = netdev_priv(dev);
2326 struct adapter *adapter = pi->adapter;
2327 struct sockaddr *addr = p;
2329 if (!is_valid_ether_addr(addr->sa_data))
2332 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2333 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2334 if (offload_running(adapter))
2335 write_smt_entry(adapter, pi->port_id);
2340 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2341 * @adap: the adapter
2344 * Ensures that current Rx processing on any of the queues associated with
2345 * the given port completes before returning. We do this by acquiring and
2346 * releasing the locks of the response queues associated with the port.
2348 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2352 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2353 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2355 spin_lock_irq(&q->lock);
2356 spin_unlock_irq(&q->lock);
2360 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2362 struct port_info *pi = netdev_priv(dev);
2363 struct adapter *adapter = pi->adapter;
2366 if (adapter->params.rev > 0)
2367 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2369 /* single control for all ports */
2370 unsigned int i, have_vlans = 0;
2371 for_each_port(adapter, i)
2372 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2374 t3_set_vlan_accel(adapter, 1, have_vlans);
2376 t3_synchronize_rx(adapter, pi);
2379 #ifdef CONFIG_NET_POLL_CONTROLLER
2380 static void cxgb_netpoll(struct net_device *dev)
2382 struct port_info *pi = netdev_priv(dev);
2383 struct adapter *adapter = pi->adapter;
2386 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2387 struct sge_qset *qs = &adapter->sge.qs[qidx];
2390 if (adapter->flags & USING_MSIX)
2395 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2401 * Periodic accumulation of MAC statistics.
2403 static void mac_stats_update(struct adapter *adapter)
2407 for_each_port(adapter, i) {
2408 struct net_device *dev = adapter->port[i];
2409 struct port_info *p = netdev_priv(dev);
2411 if (netif_running(dev)) {
2412 spin_lock(&adapter->stats_lock);
2413 t3_mac_update_stats(&p->mac);
2414 spin_unlock(&adapter->stats_lock);
2419 static void check_link_status(struct adapter *adapter)
2423 for_each_port(adapter, i) {
2424 struct net_device *dev = adapter->port[i];
2425 struct port_info *p = netdev_priv(dev);
2427 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2428 t3_link_changed(adapter, i);
2432 static void check_t3b2_mac(struct adapter *adapter)
2436 if (!rtnl_trylock()) /* synchronize with ifdown */
2439 for_each_port(adapter, i) {
2440 struct net_device *dev = adapter->port[i];
2441 struct port_info *p = netdev_priv(dev);
2444 if (!netif_running(dev))
2448 if (netif_running(dev) && netif_carrier_ok(dev))
2449 status = t3b2_mac_watchdog_task(&p->mac);
2451 p->mac.stats.num_toggled++;
2452 else if (status == 2) {
2453 struct cmac *mac = &p->mac;
2455 t3_mac_set_mtu(mac, dev->mtu);
2456 t3_mac_set_address(mac, 0, dev->dev_addr);
2457 cxgb_set_rxmode(dev);
2458 t3_link_start(&p->phy, mac, &p->link_config);
2459 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2460 t3_port_intr_enable(adapter, p->port_id);
2461 p->mac.stats.num_resets++;
2468 static void t3_adap_check_task(struct work_struct *work)
2470 struct adapter *adapter = container_of(work, struct adapter,
2471 adap_check_task.work);
2472 const struct adapter_params *p = &adapter->params;
2474 adapter->check_task_cnt++;
2476 /* Check link status for PHYs without interrupts */
2477 if (p->linkpoll_period)
2478 check_link_status(adapter);
2480 /* Accumulate MAC stats if needed */
2481 if (!p->linkpoll_period ||
2482 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2483 p->stats_update_period) {
2484 mac_stats_update(adapter);
2485 adapter->check_task_cnt = 0;
2488 if (p->rev == T3_REV_B2)
2489 check_t3b2_mac(adapter);
2491 /* Schedule the next check update if any port is active. */
2492 spin_lock_irq(&adapter->work_lock);
2493 if (adapter->open_device_map & PORT_MASK)
2494 schedule_chk_task(adapter);
2495 spin_unlock_irq(&adapter->work_lock);
2499 * Processes external (PHY) interrupts in process context.
2501 static void ext_intr_task(struct work_struct *work)
2503 struct adapter *adapter = container_of(work, struct adapter,
2504 ext_intr_handler_task);
2506 t3_phy_intr_handler(adapter);
2508 /* Now reenable external interrupts */
2509 spin_lock_irq(&adapter->work_lock);
2510 if (adapter->slow_intr_mask) {
2511 adapter->slow_intr_mask |= F_T3DBG;
2512 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2513 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2514 adapter->slow_intr_mask);
2516 spin_unlock_irq(&adapter->work_lock);
2520 * Interrupt-context handler for external (PHY) interrupts.
2522 void t3_os_ext_intr_handler(struct adapter *adapter)
2525 * Schedule a task to handle external interrupts as they may be slow
2526 * and we use a mutex to protect MDIO registers. We disable PHY
2527 * interrupts in the meantime and let the task reenable them when
2530 spin_lock(&adapter->work_lock);
2531 if (adapter->slow_intr_mask) {
2532 adapter->slow_intr_mask &= ~F_T3DBG;
2533 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2534 adapter->slow_intr_mask);
2535 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2537 spin_unlock(&adapter->work_lock);
2540 static int t3_adapter_error(struct adapter *adapter, int reset)
2544 if (is_offload(adapter) &&
2545 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2546 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2547 offload_close(&adapter->tdev);
2550 /* Stop all ports */
2551 for_each_port(adapter, i) {
2552 struct net_device *netdev = adapter->port[i];
2554 if (netif_running(netdev))
2558 /* Stop SGE timers */
2559 t3_stop_sge_timers(adapter);
2561 adapter->flags &= ~FULL_INIT_DONE;
2564 ret = t3_reset_adapter(adapter);
2566 pci_disable_device(adapter->pdev);
2571 static int t3_reenable_adapter(struct adapter *adapter)
2573 if (pci_enable_device(adapter->pdev)) {
2574 dev_err(&adapter->pdev->dev,
2575 "Cannot re-enable PCI device after reset.\n");
2578 pci_set_master(adapter->pdev);
2579 pci_restore_state(adapter->pdev);
2581 /* Free sge resources */
2582 t3_free_sge_resources(adapter);
2584 if (t3_replay_prep_adapter(adapter))
2592 static void t3_resume_ports(struct adapter *adapter)
2596 /* Restart the ports */
2597 for_each_port(adapter, i) {
2598 struct net_device *netdev = adapter->port[i];
2600 if (netif_running(netdev)) {
2601 if (cxgb_open(netdev)) {
2602 dev_err(&adapter->pdev->dev,
2603 "can't bring device back up"
2610 if (is_offload(adapter) && !ofld_disable)
2611 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2615 * processes a fatal error.
2616 * Bring the ports down, reset the chip, bring the ports back up.
2618 static void fatal_error_task(struct work_struct *work)
2620 struct adapter *adapter = container_of(work, struct adapter,
2621 fatal_error_handler_task);
2625 err = t3_adapter_error(adapter, 1);
2627 err = t3_reenable_adapter(adapter);
2629 t3_resume_ports(adapter);
2631 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2635 void t3_fatal_err(struct adapter *adapter)
2637 unsigned int fw_status[4];
2639 if (adapter->flags & FULL_INIT_DONE) {
2640 t3_sge_stop(adapter);
2641 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2642 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2643 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2644 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2646 spin_lock(&adapter->work_lock);
2647 t3_intr_disable(adapter);
2648 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2649 spin_unlock(&adapter->work_lock);
2651 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2652 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2653 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2654 fw_status[0], fw_status[1],
2655 fw_status[2], fw_status[3]);
2660 * t3_io_error_detected - called when PCI error is detected
2661 * @pdev: Pointer to PCI device
2662 * @state: The current pci connection state
2664 * This function is called after a PCI bus error affecting
2665 * this device has been detected.
2667 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2668 pci_channel_state_t state)
2670 struct adapter *adapter = pci_get_drvdata(pdev);
2673 ret = t3_adapter_error(adapter, 0);
2675 /* Request a slot reset. */
2676 return PCI_ERS_RESULT_NEED_RESET;
2680 * t3_io_slot_reset - called after the pci bus has been reset.
2681 * @pdev: Pointer to PCI device
2683 * Restart the card from scratch, as if from a cold-boot.
2685 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2687 struct adapter *adapter = pci_get_drvdata(pdev);
2689 if (!t3_reenable_adapter(adapter))
2690 return PCI_ERS_RESULT_RECOVERED;
2692 return PCI_ERS_RESULT_DISCONNECT;
2696 * t3_io_resume - called when traffic can start flowing again.
2697 * @pdev: Pointer to PCI device
2699 * This callback is called when the error recovery driver tells us that
2700 * its OK to resume normal operation.
2702 static void t3_io_resume(struct pci_dev *pdev)
2704 struct adapter *adapter = pci_get_drvdata(pdev);
2706 t3_resume_ports(adapter);
2709 static struct pci_error_handlers t3_err_handler = {
2710 .error_detected = t3_io_error_detected,
2711 .slot_reset = t3_io_slot_reset,
2712 .resume = t3_io_resume,
2716 * Set the number of qsets based on the number of CPUs and the number of ports,
2717 * not to exceed the number of available qsets, assuming there are enough qsets
2720 static void set_nqsets(struct adapter *adap)
2723 int num_cpus = num_online_cpus();
2724 int hwports = adap->params.nports;
2725 int nqsets = adap->msix_nvectors - 1;
2727 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2729 (hwports * nqsets > SGE_QSETS ||
2730 num_cpus >= nqsets / hwports))
2732 if (nqsets > num_cpus)
2734 if (nqsets < 1 || hwports == 4)
2739 for_each_port(adap, i) {
2740 struct port_info *pi = adap2pinfo(adap, i);
2743 pi->nqsets = nqsets;
2744 j = pi->first_qset + nqsets;
2746 dev_info(&adap->pdev->dev,
2747 "Port %d using %d queue sets.\n", i, nqsets);
2751 static int __devinit cxgb_enable_msix(struct adapter *adap)
2753 struct msix_entry entries[SGE_QSETS + 1];
2757 vectors = ARRAY_SIZE(entries);
2758 for (i = 0; i < vectors; ++i)
2759 entries[i].entry = i;
2761 while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2764 if (!err && vectors < (adap->params.nports + 1))
2768 for (i = 0; i < vectors; ++i)
2769 adap->msix_info[i].vec = entries[i].vector;
2770 adap->msix_nvectors = vectors;
2776 static void __devinit print_port_info(struct adapter *adap,
2777 const struct adapter_info *ai)
2779 static const char *pci_variant[] = {
2780 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2787 snprintf(buf, sizeof(buf), "%s x%d",
2788 pci_variant[adap->params.pci.variant],
2789 adap->params.pci.width);
2791 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2792 pci_variant[adap->params.pci.variant],
2793 adap->params.pci.speed, adap->params.pci.width);
2795 for_each_port(adap, i) {
2796 struct net_device *dev = adap->port[i];
2797 const struct port_info *pi = netdev_priv(dev);
2799 if (!test_bit(i, &adap->registered_device_map))
2801 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2802 dev->name, ai->desc, pi->phy.desc,
2803 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2804 (adap->flags & USING_MSIX) ? " MSI-X" :
2805 (adap->flags & USING_MSI) ? " MSI" : "");
2806 if (adap->name == dev->name && adap->params.vpd.mclk)
2808 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2809 adap->name, t3_mc7_size(&adap->cm) >> 20,
2810 t3_mc7_size(&adap->pmtx) >> 20,
2811 t3_mc7_size(&adap->pmrx) >> 20,
2812 adap->params.vpd.sn);
2816 static const struct net_device_ops cxgb_netdev_ops = {
2817 .ndo_open = cxgb_open,
2818 .ndo_stop = cxgb_close,
2819 .ndo_start_xmit = t3_eth_xmit,
2820 .ndo_get_stats = cxgb_get_stats,
2821 .ndo_validate_addr = eth_validate_addr,
2822 .ndo_set_multicast_list = cxgb_set_rxmode,
2823 .ndo_do_ioctl = cxgb_ioctl,
2824 .ndo_change_mtu = cxgb_change_mtu,
2825 .ndo_set_mac_address = cxgb_set_mac_addr,
2826 .ndo_vlan_rx_register = vlan_rx_register,
2827 #ifdef CONFIG_NET_POLL_CONTROLLER
2828 .ndo_poll_controller = cxgb_netpoll,
2832 static int __devinit init_one(struct pci_dev *pdev,
2833 const struct pci_device_id *ent)
2835 static int version_printed;
2837 int i, err, pci_using_dac = 0;
2838 unsigned long mmio_start, mmio_len;
2839 const struct adapter_info *ai;
2840 struct adapter *adapter = NULL;
2841 struct port_info *pi;
2843 if (!version_printed) {
2844 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2849 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2851 printk(KERN_ERR DRV_NAME
2852 ": cannot initialize work queue\n");
2857 err = pci_request_regions(pdev, DRV_NAME);
2859 /* Just info, some other driver may have claimed the device. */
2860 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2864 err = pci_enable_device(pdev);
2866 dev_err(&pdev->dev, "cannot enable PCI device\n");
2867 goto out_release_regions;
2870 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2872 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2874 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2875 "coherent allocations\n");
2876 goto out_disable_device;
2878 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2879 dev_err(&pdev->dev, "no usable DMA configuration\n");
2880 goto out_disable_device;
2883 pci_set_master(pdev);
2884 pci_save_state(pdev);
2886 mmio_start = pci_resource_start(pdev, 0);
2887 mmio_len = pci_resource_len(pdev, 0);
2888 ai = t3_get_adapter_info(ent->driver_data);
2890 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2893 goto out_disable_device;
2896 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2897 if (!adapter->regs) {
2898 dev_err(&pdev->dev, "cannot map device registers\n");
2900 goto out_free_adapter;
2903 adapter->pdev = pdev;
2904 adapter->name = pci_name(pdev);
2905 adapter->msg_enable = dflt_msg_enable;
2906 adapter->mmio_len = mmio_len;
2908 mutex_init(&adapter->mdio_lock);
2909 spin_lock_init(&adapter->work_lock);
2910 spin_lock_init(&adapter->stats_lock);
2912 INIT_LIST_HEAD(&adapter->adapter_list);
2913 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2914 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2915 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2917 for (i = 0; i < ai->nports; ++i) {
2918 struct net_device *netdev;
2920 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
2926 SET_NETDEV_DEV(netdev, &pdev->dev);
2928 adapter->port[i] = netdev;
2929 pi = netdev_priv(netdev);
2930 pi->adapter = adapter;
2931 pi->rx_offload = T3_RX_CSUM | T3_LRO;
2933 netif_carrier_off(netdev);
2934 netif_tx_stop_all_queues(netdev);
2935 netdev->irq = pdev->irq;
2936 netdev->mem_start = mmio_start;
2937 netdev->mem_end = mmio_start + mmio_len - 1;
2938 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2939 netdev->features |= NETIF_F_LLTX;
2940 netdev->features |= NETIF_F_GRO;
2942 netdev->features |= NETIF_F_HIGHDMA;
2944 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2945 netdev->netdev_ops = &cxgb_netdev_ops;
2946 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2949 pci_set_drvdata(pdev, adapter);
2950 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2956 * The card is now ready to go. If any errors occur during device
2957 * registration we do not fail the whole card but rather proceed only
2958 * with the ports we manage to register successfully. However we must
2959 * register at least one net device.
2961 for_each_port(adapter, i) {
2962 err = register_netdev(adapter->port[i]);
2964 dev_warn(&pdev->dev,
2965 "cannot register net device %s, skipping\n",
2966 adapter->port[i]->name);
2969 * Change the name we use for messages to the name of
2970 * the first successfully registered interface.
2972 if (!adapter->registered_device_map)
2973 adapter->name = adapter->port[i]->name;
2975 __set_bit(i, &adapter->registered_device_map);
2978 if (!adapter->registered_device_map) {
2979 dev_err(&pdev->dev, "could not register any net devices\n");
2983 /* Driver's ready. Reflect it on LEDs */
2984 t3_led_ready(adapter);
2986 if (is_offload(adapter)) {
2987 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2988 cxgb3_adapter_ofld(adapter);
2991 /* See what interrupts we'll be using */
2992 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2993 adapter->flags |= USING_MSIX;
2994 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2995 adapter->flags |= USING_MSI;
2997 set_nqsets(adapter);
2999 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3002 print_port_info(adapter, ai);
3006 iounmap(adapter->regs);
3007 for (i = ai->nports - 1; i >= 0; --i)
3008 if (adapter->port[i])
3009 free_netdev(adapter->port[i]);
3015 pci_disable_device(pdev);
3016 out_release_regions:
3017 pci_release_regions(pdev);
3018 pci_set_drvdata(pdev, NULL);
3022 static void __devexit remove_one(struct pci_dev *pdev)
3024 struct adapter *adapter = pci_get_drvdata(pdev);
3029 t3_sge_stop(adapter);
3030 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3033 if (is_offload(adapter)) {
3034 cxgb3_adapter_unofld(adapter);
3035 if (test_bit(OFFLOAD_DEVMAP_BIT,
3036 &adapter->open_device_map))
3037 offload_close(&adapter->tdev);
3040 for_each_port(adapter, i)
3041 if (test_bit(i, &adapter->registered_device_map))
3042 unregister_netdev(adapter->port[i]);
3044 t3_stop_sge_timers(adapter);
3045 t3_free_sge_resources(adapter);
3046 cxgb_disable_msi(adapter);
3048 for_each_port(adapter, i)
3049 if (adapter->port[i])
3050 free_netdev(adapter->port[i]);
3052 iounmap(adapter->regs);
3054 pci_release_regions(pdev);
3055 pci_disable_device(pdev);
3056 pci_set_drvdata(pdev, NULL);
3060 static struct pci_driver driver = {
3062 .id_table = cxgb3_pci_tbl,
3064 .remove = __devexit_p(remove_one),
3065 .err_handler = &t3_err_handler,
3068 static int __init cxgb3_init_module(void)
3072 cxgb3_offload_init();
3074 ret = pci_register_driver(&driver);
3078 static void __exit cxgb3_cleanup_module(void)
3080 pci_unregister_driver(&driver);
3082 destroy_workqueue(cxgb3_wq);
3085 module_init(cxgb3_init_module);
3086 module_exit(cxgb3_cleanup_module);