2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
211 static void cxgb_set_rxmode(struct net_device *dev)
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
221 * link_start - enable a port
222 * @dev: the device to enable
224 * Performs the MAC and PHY actions needed to enable a port.
226 static void link_start(struct net_device *dev)
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
232 init_rx_mode(&rm, dev, dev->mc_list);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
241 static inline void cxgb_disable_msi(struct adapter *adapter)
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
253 * Interrupt handler for asynchronous events used with MSI-X.
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
257 t3_slow_intr_handler(cookie);
262 * Name the MSI-X interrupts.
264 static void name_msix_vecs(struct adapter *adap)
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
283 static int request_msix_data_irqs(struct adapter *adap)
285 int i, j, err, qidx = 0;
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
309 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
314 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
322 static int init_tp_parity(struct adapter *adap)
326 struct cpl_set_tcb_field *greq;
327 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
329 t3_tp_set_offload_mode(adap, 1);
331 for (i = 0; i < 16; i++) {
332 struct cpl_smt_write_req *req;
334 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
335 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
336 memset(req, 0, sizeof(*req));
337 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
338 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
340 t3_mgmt_tx(adap, skb);
343 for (i = 0; i < 2048; i++) {
344 struct cpl_l2t_write_req *req;
346 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
347 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
348 memset(req, 0, sizeof(*req));
349 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
350 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
351 req->params = htonl(V_L2T_W_IDX(i));
352 t3_mgmt_tx(adap, skb);
355 for (i = 0; i < 2048; i++) {
356 struct cpl_rte_write_req *req;
358 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
359 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
360 memset(req, 0, sizeof(*req));
361 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
362 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
363 req->l2t_idx = htonl(V_L2T_W_IDX(i));
364 t3_mgmt_tx(adap, skb);
367 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
368 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
369 memset(greq, 0, sizeof(*greq));
370 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
371 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
372 greq->mask = cpu_to_be64(1);
373 t3_mgmt_tx(adap, skb);
375 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
376 t3_tp_set_offload_mode(adap, 0);
381 * setup_rss - configure RSS
384 * Sets up RSS to distribute packets to multiple receive queues. We
385 * configure the RSS CPU lookup table to distribute to the number of HW
386 * receive queues, and the response queue lookup table to narrow that
387 * down to the response queues actually configured for each port.
388 * We always configure the RSS mapping for two ports since the mapping
389 * table has plenty of entries.
391 static void setup_rss(struct adapter *adap)
394 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
395 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
396 u8 cpus[SGE_QSETS + 1];
397 u16 rspq_map[RSS_TABLE_SIZE];
399 for (i = 0; i < SGE_QSETS; ++i)
401 cpus[SGE_QSETS] = 0xff; /* terminator */
403 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
404 rspq_map[i] = i % nq0;
405 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
408 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
409 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
410 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
413 static void init_napi(struct adapter *adap)
417 for (i = 0; i < SGE_QSETS; i++) {
418 struct sge_qset *qs = &adap->sge.qs[i];
421 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
426 * netif_napi_add() can be called only once per napi_struct because it
427 * adds each new napi_struct to a list. Be careful not to call it a
428 * second time, e.g., during EEH recovery, by making a note of it.
430 adap->flags |= NAPI_INIT;
434 * Wait until all NAPI handlers are descheduled. This includes the handlers of
435 * both netdevices representing interfaces and the dummy ones for the extra
438 static void quiesce_rx(struct adapter *adap)
442 for (i = 0; i < SGE_QSETS; i++)
443 if (adap->sge.qs[i].adap)
444 napi_disable(&adap->sge.qs[i].napi);
447 static void enable_all_napi(struct adapter *adap)
450 for (i = 0; i < SGE_QSETS; i++)
451 if (adap->sge.qs[i].adap)
452 napi_enable(&adap->sge.qs[i].napi);
456 * setup_sge_qsets - configure SGE Tx/Rx/response queues
459 * Determines how many sets of SGE queues to use and initializes them.
460 * We support multiple queue sets per port if we have MSI-X, otherwise
461 * just one queue set per port.
463 static int setup_sge_qsets(struct adapter *adap)
465 int i, j, err, irq_idx = 0, qset_idx = 0;
466 unsigned int ntxq = SGE_TXQ_PER_SET;
468 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
471 for_each_port(adap, i) {
472 struct net_device *dev = adap->port[i];
473 struct port_info *pi = netdev_priv(dev);
475 pi->qs = &adap->sge.qs[pi->first_qset];
476 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
477 err = t3_sge_alloc_qset(adap, qset_idx, 1,
478 (adap->flags & USING_MSIX) ? qset_idx + 1 :
480 &adap->params.sge.qset[qset_idx], ntxq, dev);
482 t3_free_sge_resources(adap);
491 static ssize_t attr_show(struct device *d, char *buf,
492 ssize_t(*format) (struct net_device *, char *))
496 /* Synchronize with ioctls that may shut down the device */
498 len = (*format) (to_net_dev(d), buf);
503 static ssize_t attr_store(struct device *d,
504 const char *buf, size_t len,
505 ssize_t(*set) (struct net_device *, unsigned int),
506 unsigned int min_val, unsigned int max_val)
512 if (!capable(CAP_NET_ADMIN))
515 val = simple_strtoul(buf, &endp, 0);
516 if (endp == buf || val < min_val || val > max_val)
520 ret = (*set) (to_net_dev(d), val);
527 #define CXGB3_SHOW(name, val_expr) \
528 static ssize_t format_##name(struct net_device *dev, char *buf) \
530 struct port_info *pi = netdev_priv(dev); \
531 struct adapter *adap = pi->adapter; \
532 return sprintf(buf, "%u\n", val_expr); \
534 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
537 return attr_show(d, buf, format_##name); \
540 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
542 struct port_info *pi = netdev_priv(dev);
543 struct adapter *adap = pi->adapter;
544 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
546 if (adap->flags & FULL_INIT_DONE)
548 if (val && adap->params.rev == 0)
550 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
553 adap->params.mc5.nfilters = val;
557 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
558 const char *buf, size_t len)
560 return attr_store(d, buf, len, set_nfilters, 0, ~0);
563 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
565 struct port_info *pi = netdev_priv(dev);
566 struct adapter *adap = pi->adapter;
568 if (adap->flags & FULL_INIT_DONE)
570 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
573 adap->params.mc5.nservers = val;
577 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
578 const char *buf, size_t len)
580 return attr_store(d, buf, len, set_nservers, 0, ~0);
583 #define CXGB3_ATTR_R(name, val_expr) \
584 CXGB3_SHOW(name, val_expr) \
585 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
587 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
588 CXGB3_SHOW(name, val_expr) \
589 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
591 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
592 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
593 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
595 static struct attribute *cxgb3_attrs[] = {
596 &dev_attr_cam_size.attr,
597 &dev_attr_nfilters.attr,
598 &dev_attr_nservers.attr,
602 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
604 static ssize_t tm_attr_show(struct device *d,
605 char *buf, int sched)
607 struct port_info *pi = netdev_priv(to_net_dev(d));
608 struct adapter *adap = pi->adapter;
609 unsigned int v, addr, bpt, cpt;
612 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
614 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
615 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
618 bpt = (v >> 8) & 0xff;
621 len = sprintf(buf, "disabled\n");
623 v = (adap->params.vpd.cclk * 1000) / cpt;
624 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
630 static ssize_t tm_attr_store(struct device *d,
631 const char *buf, size_t len, int sched)
633 struct port_info *pi = netdev_priv(to_net_dev(d));
634 struct adapter *adap = pi->adapter;
639 if (!capable(CAP_NET_ADMIN))
642 val = simple_strtoul(buf, &endp, 0);
643 if (endp == buf || val > 10000000)
647 ret = t3_config_sched(adap, val, sched);
654 #define TM_ATTR(name, sched) \
655 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
658 return tm_attr_show(d, buf, sched); \
660 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
661 const char *buf, size_t len) \
663 return tm_attr_store(d, buf, len, sched); \
665 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
676 static struct attribute *offload_attrs[] = {
677 &dev_attr_sched0.attr,
678 &dev_attr_sched1.attr,
679 &dev_attr_sched2.attr,
680 &dev_attr_sched3.attr,
681 &dev_attr_sched4.attr,
682 &dev_attr_sched5.attr,
683 &dev_attr_sched6.attr,
684 &dev_attr_sched7.attr,
688 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
691 * Sends an sk_buff to an offload queue driver
692 * after dealing with any active network taps.
694 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
699 ret = t3_offload_tx(tdev, skb);
704 static int write_smt_entry(struct adapter *adapter, int idx)
706 struct cpl_smt_write_req *req;
707 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
712 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
713 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
714 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
715 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
717 memset(req->src_mac1, 0, sizeof(req->src_mac1));
718 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
720 offload_tx(&adapter->tdev, skb);
724 static int init_smt(struct adapter *adapter)
728 for_each_port(adapter, i)
729 write_smt_entry(adapter, i);
733 static void init_port_mtus(struct adapter *adapter)
735 unsigned int mtus = adapter->port[0]->mtu;
737 if (adapter->port[1])
738 mtus |= adapter->port[1]->mtu << 16;
739 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
742 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
746 struct mngt_pktsched_wr *req;
748 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
749 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
750 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
751 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
757 t3_mgmt_tx(adap, skb);
760 static void bind_qsets(struct adapter *adap)
764 for_each_port(adap, i) {
765 const struct port_info *pi = adap2pinfo(adap, i);
767 for (j = 0; j < pi->nqsets; ++j)
768 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
773 #define FW_FNAME "t3fw-%d.%d.%d.bin"
774 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
776 static int upgrade_fw(struct adapter *adap)
780 const struct firmware *fw;
781 struct device *dev = &adap->pdev->dev;
783 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
784 FW_VERSION_MINOR, FW_VERSION_MICRO);
785 ret = request_firmware(&fw, buf, dev);
787 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
791 ret = t3_load_fw(adap, fw->data, fw->size);
792 release_firmware(fw);
795 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
796 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
798 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
799 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
804 static inline char t3rev2char(struct adapter *adapter)
808 switch(adapter->params.rev) {
820 static int update_tpsram(struct adapter *adap)
822 const struct firmware *tpsram;
824 struct device *dev = &adap->pdev->dev;
828 rev = t3rev2char(adap);
832 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
833 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
835 ret = request_firmware(&tpsram, buf, dev);
837 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
842 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
846 ret = t3_set_proto_sram(adap, tpsram->data);
849 "successful update of protocol engine "
851 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
853 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
854 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
856 dev_err(dev, "loading protocol SRAM failed\n");
859 release_firmware(tpsram);
865 * cxgb_up - enable the adapter
866 * @adapter: adapter being enabled
868 * Called when the first port is enabled, this function performs the
869 * actions necessary to make an adapter operational, such as completing
870 * the initialization of HW modules, and enabling interrupts.
872 * Must be called with the rtnl lock held.
874 static int cxgb_up(struct adapter *adap)
879 if (!(adap->flags & FULL_INIT_DONE)) {
880 err = t3_check_fw_version(adap, &must_load);
881 if (err == -EINVAL) {
882 err = upgrade_fw(adap);
883 if (err && must_load)
887 err = t3_check_tpsram_version(adap, &must_load);
888 if (err == -EINVAL) {
889 err = update_tpsram(adap);
890 if (err && must_load)
894 err = t3_init_hw(adap, 0);
898 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
899 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
901 err = setup_sge_qsets(adap);
906 if (!(adap->flags & NAPI_INIT))
908 adap->flags |= FULL_INIT_DONE;
913 if (adap->flags & USING_MSIX) {
914 name_msix_vecs(adap);
915 err = request_irq(adap->msix_info[0].vec,
916 t3_async_intr_handler, 0,
917 adap->msix_info[0].desc, adap);
921 err = request_msix_data_irqs(adap);
923 free_irq(adap->msix_info[0].vec, adap);
926 } else if ((err = request_irq(adap->pdev->irq,
927 t3_intr_handler(adap,
928 adap->sge.qs[0].rspq.
930 (adap->flags & USING_MSI) ?
935 enable_all_napi(adap);
937 t3_intr_enable(adap);
939 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
940 is_offload(adap) && init_tp_parity(adap) == 0)
941 adap->flags |= TP_PARITY_INIT;
943 if (adap->flags & TP_PARITY_INIT) {
944 t3_write_reg(adap, A_TP_INT_CAUSE,
945 F_CMCACHEPERR | F_ARPLUTPERR);
946 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
949 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
951 adap->flags |= QUEUES_BOUND;
956 CH_ERR(adap, "request_irq failed, err %d\n", err);
961 * Release resources when all the ports and offloading have been stopped.
963 static void cxgb_down(struct adapter *adapter)
965 t3_sge_stop(adapter);
966 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
967 t3_intr_disable(adapter);
968 spin_unlock_irq(&adapter->work_lock);
970 if (adapter->flags & USING_MSIX) {
973 free_irq(adapter->msix_info[0].vec, adapter);
974 for_each_port(adapter, i)
975 n += adap2pinfo(adapter, i)->nqsets;
977 for (i = 0; i < n; ++i)
978 free_irq(adapter->msix_info[i + 1].vec,
979 &adapter->sge.qs[i]);
981 free_irq(adapter->pdev->irq, adapter);
983 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
987 static void schedule_chk_task(struct adapter *adap)
991 timeo = adap->params.linkpoll_period ?
992 (HZ * adap->params.linkpoll_period) / 10 :
993 adap->params.stats_update_period * HZ;
995 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
998 static int offload_open(struct net_device *dev)
1000 struct port_info *pi = netdev_priv(dev);
1001 struct adapter *adapter = pi->adapter;
1002 struct t3cdev *tdev = dev2t3cdev(dev);
1003 int adap_up = adapter->open_device_map & PORT_MASK;
1006 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1009 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1012 t3_tp_set_offload_mode(adapter, 1);
1013 tdev->lldev = adapter->port[0];
1014 err = cxgb3_offload_activate(adapter);
1018 init_port_mtus(adapter);
1019 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1020 adapter->params.b_wnd,
1021 adapter->params.rev == 0 ?
1022 adapter->port[0]->mtu : 0xffff);
1025 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1026 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1028 /* Call back all registered clients */
1029 cxgb3_add_clients(tdev);
1032 /* restore them in case the offload module has changed them */
1034 t3_tp_set_offload_mode(adapter, 0);
1035 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1036 cxgb3_set_dummy_ops(tdev);
1041 static int offload_close(struct t3cdev *tdev)
1043 struct adapter *adapter = tdev2adap(tdev);
1045 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1048 /* Call back all registered clients */
1049 cxgb3_remove_clients(tdev);
1051 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1054 cxgb3_set_dummy_ops(tdev);
1055 t3_tp_set_offload_mode(adapter, 0);
1056 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1058 if (!adapter->open_device_map)
1061 cxgb3_offload_deactivate(adapter);
1065 static int cxgb_open(struct net_device *dev)
1067 struct port_info *pi = netdev_priv(dev);
1068 struct adapter *adapter = pi->adapter;
1069 int other_ports = adapter->open_device_map & PORT_MASK;
1072 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1075 set_bit(pi->port_id, &adapter->open_device_map);
1076 if (is_offload(adapter) && !ofld_disable) {
1077 err = offload_open(dev);
1080 "Could not initialize offload capabilities\n");
1084 t3_port_intr_enable(adapter, pi->port_id);
1085 netif_start_queue(dev);
1087 schedule_chk_task(adapter);
1092 static int cxgb_close(struct net_device *dev)
1094 struct port_info *pi = netdev_priv(dev);
1095 struct adapter *adapter = pi->adapter;
1097 t3_port_intr_disable(adapter, pi->port_id);
1098 netif_stop_queue(dev);
1099 pi->phy.ops->power_down(&pi->phy, 1);
1100 netif_carrier_off(dev);
1101 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1103 spin_lock(&adapter->work_lock); /* sync with update task */
1104 clear_bit(pi->port_id, &adapter->open_device_map);
1105 spin_unlock(&adapter->work_lock);
1107 if (!(adapter->open_device_map & PORT_MASK))
1108 cancel_rearming_delayed_workqueue(cxgb3_wq,
1109 &adapter->adap_check_task);
1111 if (!adapter->open_device_map)
1117 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1119 struct port_info *pi = netdev_priv(dev);
1120 struct adapter *adapter = pi->adapter;
1121 struct net_device_stats *ns = &pi->netstats;
1122 const struct mac_stats *pstats;
1124 spin_lock(&adapter->stats_lock);
1125 pstats = t3_mac_update_stats(&pi->mac);
1126 spin_unlock(&adapter->stats_lock);
1128 ns->tx_bytes = pstats->tx_octets;
1129 ns->tx_packets = pstats->tx_frames;
1130 ns->rx_bytes = pstats->rx_octets;
1131 ns->rx_packets = pstats->rx_frames;
1132 ns->multicast = pstats->rx_mcast_frames;
1134 ns->tx_errors = pstats->tx_underrun;
1135 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1136 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1137 pstats->rx_fifo_ovfl;
1139 /* detailed rx_errors */
1140 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1141 ns->rx_over_errors = 0;
1142 ns->rx_crc_errors = pstats->rx_fcs_errs;
1143 ns->rx_frame_errors = pstats->rx_symbol_errs;
1144 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1145 ns->rx_missed_errors = pstats->rx_cong_drops;
1147 /* detailed tx_errors */
1148 ns->tx_aborted_errors = 0;
1149 ns->tx_carrier_errors = 0;
1150 ns->tx_fifo_errors = pstats->tx_underrun;
1151 ns->tx_heartbeat_errors = 0;
1152 ns->tx_window_errors = 0;
1156 static u32 get_msglevel(struct net_device *dev)
1158 struct port_info *pi = netdev_priv(dev);
1159 struct adapter *adapter = pi->adapter;
1161 return adapter->msg_enable;
1164 static void set_msglevel(struct net_device *dev, u32 val)
1166 struct port_info *pi = netdev_priv(dev);
1167 struct adapter *adapter = pi->adapter;
1169 adapter->msg_enable = val;
1172 static char stats_strings[][ETH_GSTRING_LEN] = {
1175 "TxMulticastFramesOK",
1176 "TxBroadcastFramesOK",
1183 "TxFrames128To255 ",
1184 "TxFrames256To511 ",
1185 "TxFrames512To1023 ",
1186 "TxFrames1024To1518 ",
1187 "TxFrames1519ToMax ",
1191 "RxMulticastFramesOK",
1192 "RxBroadcastFramesOK",
1203 "RxFrames128To255 ",
1204 "RxFrames256To511 ",
1205 "RxFrames512To1023 ",
1206 "RxFrames1024To1518 ",
1207 "RxFrames1519ToMax ",
1220 "CheckTXEnToggled ",
1225 static int get_sset_count(struct net_device *dev, int sset)
1229 return ARRAY_SIZE(stats_strings);
1235 #define T3_REGMAP_SIZE (3 * 1024)
1237 static int get_regs_len(struct net_device *dev)
1239 return T3_REGMAP_SIZE;
1242 static int get_eeprom_len(struct net_device *dev)
1247 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1249 struct port_info *pi = netdev_priv(dev);
1250 struct adapter *adapter = pi->adapter;
1254 t3_get_fw_version(adapter, &fw_vers);
1255 t3_get_tp_version(adapter, &tp_vers);
1257 strcpy(info->driver, DRV_NAME);
1258 strcpy(info->version, DRV_VERSION);
1259 strcpy(info->bus_info, pci_name(adapter->pdev));
1261 strcpy(info->fw_version, "N/A");
1263 snprintf(info->fw_version, sizeof(info->fw_version),
1264 "%s %u.%u.%u TP %u.%u.%u",
1265 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1266 G_FW_VERSION_MAJOR(fw_vers),
1267 G_FW_VERSION_MINOR(fw_vers),
1268 G_FW_VERSION_MICRO(fw_vers),
1269 G_TP_VERSION_MAJOR(tp_vers),
1270 G_TP_VERSION_MINOR(tp_vers),
1271 G_TP_VERSION_MICRO(tp_vers));
1275 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1277 if (stringset == ETH_SS_STATS)
1278 memcpy(data, stats_strings, sizeof(stats_strings));
1281 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1282 struct port_info *p, int idx)
1285 unsigned long tot = 0;
1287 for (i = 0; i < p->nqsets; ++i)
1288 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1292 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1295 struct port_info *pi = netdev_priv(dev);
1296 struct adapter *adapter = pi->adapter;
1297 const struct mac_stats *s;
1299 spin_lock(&adapter->stats_lock);
1300 s = t3_mac_update_stats(&pi->mac);
1301 spin_unlock(&adapter->stats_lock);
1303 *data++ = s->tx_octets;
1304 *data++ = s->tx_frames;
1305 *data++ = s->tx_mcast_frames;
1306 *data++ = s->tx_bcast_frames;
1307 *data++ = s->tx_pause;
1308 *data++ = s->tx_underrun;
1309 *data++ = s->tx_fifo_urun;
1311 *data++ = s->tx_frames_64;
1312 *data++ = s->tx_frames_65_127;
1313 *data++ = s->tx_frames_128_255;
1314 *data++ = s->tx_frames_256_511;
1315 *data++ = s->tx_frames_512_1023;
1316 *data++ = s->tx_frames_1024_1518;
1317 *data++ = s->tx_frames_1519_max;
1319 *data++ = s->rx_octets;
1320 *data++ = s->rx_frames;
1321 *data++ = s->rx_mcast_frames;
1322 *data++ = s->rx_bcast_frames;
1323 *data++ = s->rx_pause;
1324 *data++ = s->rx_fcs_errs;
1325 *data++ = s->rx_symbol_errs;
1326 *data++ = s->rx_short;
1327 *data++ = s->rx_jabber;
1328 *data++ = s->rx_too_long;
1329 *data++ = s->rx_fifo_ovfl;
1331 *data++ = s->rx_frames_64;
1332 *data++ = s->rx_frames_65_127;
1333 *data++ = s->rx_frames_128_255;
1334 *data++ = s->rx_frames_256_511;
1335 *data++ = s->rx_frames_512_1023;
1336 *data++ = s->rx_frames_1024_1518;
1337 *data++ = s->rx_frames_1519_max;
1339 *data++ = pi->phy.fifo_errors;
1341 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1342 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1343 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1344 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1345 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1346 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1347 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1348 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1349 *data++ = s->rx_cong_drops;
1351 *data++ = s->num_toggled;
1352 *data++ = s->num_resets;
1355 static inline void reg_block_dump(struct adapter *ap, void *buf,
1356 unsigned int start, unsigned int end)
1358 u32 *p = buf + start;
1360 for (; start <= end; start += sizeof(u32))
1361 *p++ = t3_read_reg(ap, start);
1364 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1367 struct port_info *pi = netdev_priv(dev);
1368 struct adapter *ap = pi->adapter;
1372 * bits 0..9: chip version
1373 * bits 10..15: chip revision
1374 * bit 31: set for PCIe cards
1376 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1379 * We skip the MAC statistics registers because they are clear-on-read.
1380 * Also reading multi-register stats would need to synchronize with the
1381 * periodic mac stats accumulation. Hard to justify the complexity.
1383 memset(buf, 0, T3_REGMAP_SIZE);
1384 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1385 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1386 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1387 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1388 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1389 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1390 XGM_REG(A_XGM_SERDES_STAT3, 1));
1391 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1392 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1395 static int restart_autoneg(struct net_device *dev)
1397 struct port_info *p = netdev_priv(dev);
1399 if (!netif_running(dev))
1401 if (p->link_config.autoneg != AUTONEG_ENABLE)
1403 p->phy.ops->autoneg_restart(&p->phy);
1407 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1409 struct port_info *pi = netdev_priv(dev);
1410 struct adapter *adapter = pi->adapter;
1416 for (i = 0; i < data * 2; i++) {
1417 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1418 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1419 if (msleep_interruptible(500))
1422 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1427 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1429 struct port_info *p = netdev_priv(dev);
1431 cmd->supported = p->link_config.supported;
1432 cmd->advertising = p->link_config.advertising;
1434 if (netif_carrier_ok(dev)) {
1435 cmd->speed = p->link_config.speed;
1436 cmd->duplex = p->link_config.duplex;
1442 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1443 cmd->phy_address = p->phy.addr;
1444 cmd->transceiver = XCVR_EXTERNAL;
1445 cmd->autoneg = p->link_config.autoneg;
1451 static int speed_duplex_to_caps(int speed, int duplex)
1457 if (duplex == DUPLEX_FULL)
1458 cap = SUPPORTED_10baseT_Full;
1460 cap = SUPPORTED_10baseT_Half;
1463 if (duplex == DUPLEX_FULL)
1464 cap = SUPPORTED_100baseT_Full;
1466 cap = SUPPORTED_100baseT_Half;
1469 if (duplex == DUPLEX_FULL)
1470 cap = SUPPORTED_1000baseT_Full;
1472 cap = SUPPORTED_1000baseT_Half;
1475 if (duplex == DUPLEX_FULL)
1476 cap = SUPPORTED_10000baseT_Full;
1481 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1482 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1483 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1484 ADVERTISED_10000baseT_Full)
1486 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1488 struct port_info *p = netdev_priv(dev);
1489 struct link_config *lc = &p->link_config;
1491 if (!(lc->supported & SUPPORTED_Autoneg))
1492 return -EOPNOTSUPP; /* can't change speed/duplex */
1494 if (cmd->autoneg == AUTONEG_DISABLE) {
1495 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1497 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1499 lc->requested_speed = cmd->speed;
1500 lc->requested_duplex = cmd->duplex;
1501 lc->advertising = 0;
1503 cmd->advertising &= ADVERTISED_MASK;
1504 cmd->advertising &= lc->supported;
1505 if (!cmd->advertising)
1507 lc->requested_speed = SPEED_INVALID;
1508 lc->requested_duplex = DUPLEX_INVALID;
1509 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1511 lc->autoneg = cmd->autoneg;
1512 if (netif_running(dev))
1513 t3_link_start(&p->phy, &p->mac, lc);
1517 static void get_pauseparam(struct net_device *dev,
1518 struct ethtool_pauseparam *epause)
1520 struct port_info *p = netdev_priv(dev);
1522 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1523 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1524 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1527 static int set_pauseparam(struct net_device *dev,
1528 struct ethtool_pauseparam *epause)
1530 struct port_info *p = netdev_priv(dev);
1531 struct link_config *lc = &p->link_config;
1533 if (epause->autoneg == AUTONEG_DISABLE)
1534 lc->requested_fc = 0;
1535 else if (lc->supported & SUPPORTED_Autoneg)
1536 lc->requested_fc = PAUSE_AUTONEG;
1540 if (epause->rx_pause)
1541 lc->requested_fc |= PAUSE_RX;
1542 if (epause->tx_pause)
1543 lc->requested_fc |= PAUSE_TX;
1544 if (lc->autoneg == AUTONEG_ENABLE) {
1545 if (netif_running(dev))
1546 t3_link_start(&p->phy, &p->mac, lc);
1548 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1549 if (netif_running(dev))
1550 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1555 static u32 get_rx_csum(struct net_device *dev)
1557 struct port_info *p = netdev_priv(dev);
1559 return p->rx_csum_offload;
1562 static int set_rx_csum(struct net_device *dev, u32 data)
1564 struct port_info *p = netdev_priv(dev);
1566 p->rx_csum_offload = data;
1568 struct adapter *adap = p->adapter;
1571 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1572 adap->sge.qs[i].lro_enabled = 0;
1577 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1579 struct port_info *pi = netdev_priv(dev);
1580 struct adapter *adapter = pi->adapter;
1581 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1583 e->rx_max_pending = MAX_RX_BUFFERS;
1584 e->rx_mini_max_pending = 0;
1585 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1586 e->tx_max_pending = MAX_TXQ_ENTRIES;
1588 e->rx_pending = q->fl_size;
1589 e->rx_mini_pending = q->rspq_size;
1590 e->rx_jumbo_pending = q->jumbo_size;
1591 e->tx_pending = q->txq_size[0];
1594 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1596 struct port_info *pi = netdev_priv(dev);
1597 struct adapter *adapter = pi->adapter;
1598 struct qset_params *q;
1601 if (e->rx_pending > MAX_RX_BUFFERS ||
1602 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1603 e->tx_pending > MAX_TXQ_ENTRIES ||
1604 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1605 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1606 e->rx_pending < MIN_FL_ENTRIES ||
1607 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1608 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1611 if (adapter->flags & FULL_INIT_DONE)
1614 q = &adapter->params.sge.qset[pi->first_qset];
1615 for (i = 0; i < pi->nqsets; ++i, ++q) {
1616 q->rspq_size = e->rx_mini_pending;
1617 q->fl_size = e->rx_pending;
1618 q->jumbo_size = e->rx_jumbo_pending;
1619 q->txq_size[0] = e->tx_pending;
1620 q->txq_size[1] = e->tx_pending;
1621 q->txq_size[2] = e->tx_pending;
1626 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1628 struct port_info *pi = netdev_priv(dev);
1629 struct adapter *adapter = pi->adapter;
1630 struct qset_params *qsp = &adapter->params.sge.qset[0];
1631 struct sge_qset *qs = &adapter->sge.qs[0];
1633 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1636 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1637 t3_update_qset_coalesce(qs, qsp);
1641 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1643 struct port_info *pi = netdev_priv(dev);
1644 struct adapter *adapter = pi->adapter;
1645 struct qset_params *q = adapter->params.sge.qset;
1647 c->rx_coalesce_usecs = q->coalesce_usecs;
1651 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1654 struct port_info *pi = netdev_priv(dev);
1655 struct adapter *adapter = pi->adapter;
1658 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1662 e->magic = EEPROM_MAGIC;
1663 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1664 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1667 memcpy(data, buf + e->offset, e->len);
1672 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1675 struct port_info *pi = netdev_priv(dev);
1676 struct adapter *adapter = pi->adapter;
1677 u32 aligned_offset, aligned_len;
1682 if (eeprom->magic != EEPROM_MAGIC)
1685 aligned_offset = eeprom->offset & ~3;
1686 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1688 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1689 buf = kmalloc(aligned_len, GFP_KERNEL);
1692 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1693 if (!err && aligned_len > 4)
1694 err = t3_seeprom_read(adapter,
1695 aligned_offset + aligned_len - 4,
1696 (__le32 *) & buf[aligned_len - 4]);
1699 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1703 err = t3_seeprom_wp(adapter, 0);
1707 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1708 err = t3_seeprom_write(adapter, aligned_offset, *p);
1709 aligned_offset += 4;
1713 err = t3_seeprom_wp(adapter, 1);
1720 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1724 memset(&wol->sopass, 0, sizeof(wol->sopass));
1727 static const struct ethtool_ops cxgb_ethtool_ops = {
1728 .get_settings = get_settings,
1729 .set_settings = set_settings,
1730 .get_drvinfo = get_drvinfo,
1731 .get_msglevel = get_msglevel,
1732 .set_msglevel = set_msglevel,
1733 .get_ringparam = get_sge_param,
1734 .set_ringparam = set_sge_param,
1735 .get_coalesce = get_coalesce,
1736 .set_coalesce = set_coalesce,
1737 .get_eeprom_len = get_eeprom_len,
1738 .get_eeprom = get_eeprom,
1739 .set_eeprom = set_eeprom,
1740 .get_pauseparam = get_pauseparam,
1741 .set_pauseparam = set_pauseparam,
1742 .get_rx_csum = get_rx_csum,
1743 .set_rx_csum = set_rx_csum,
1744 .set_tx_csum = ethtool_op_set_tx_csum,
1745 .set_sg = ethtool_op_set_sg,
1746 .get_link = ethtool_op_get_link,
1747 .get_strings = get_strings,
1748 .phys_id = cxgb3_phys_id,
1749 .nway_reset = restart_autoneg,
1750 .get_sset_count = get_sset_count,
1751 .get_ethtool_stats = get_stats,
1752 .get_regs_len = get_regs_len,
1753 .get_regs = get_regs,
1755 .set_tso = ethtool_op_set_tso,
1758 static int in_range(int val, int lo, int hi)
1760 return val < 0 || (val <= hi && val >= lo);
1763 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1765 struct port_info *pi = netdev_priv(dev);
1766 struct adapter *adapter = pi->adapter;
1770 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1774 case CHELSIO_SET_QSET_PARAMS:{
1776 struct qset_params *q;
1777 struct ch_qset_params t;
1779 if (!capable(CAP_NET_ADMIN))
1781 if (copy_from_user(&t, useraddr, sizeof(t)))
1783 if (t.qset_idx >= SGE_QSETS)
1785 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1786 !in_range(t.cong_thres, 0, 255) ||
1787 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1789 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1791 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1792 MAX_CTRL_TXQ_ENTRIES) ||
1793 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1795 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1796 MAX_RX_JUMBO_BUFFERS)
1797 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1800 if ((adapter->flags & FULL_INIT_DONE) &&
1801 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1802 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1803 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1804 t.polling >= 0 || t.cong_thres >= 0))
1807 q = &adapter->params.sge.qset[t.qset_idx];
1809 if (t.rspq_size >= 0)
1810 q->rspq_size = t.rspq_size;
1811 if (t.fl_size[0] >= 0)
1812 q->fl_size = t.fl_size[0];
1813 if (t.fl_size[1] >= 0)
1814 q->jumbo_size = t.fl_size[1];
1815 if (t.txq_size[0] >= 0)
1816 q->txq_size[0] = t.txq_size[0];
1817 if (t.txq_size[1] >= 0)
1818 q->txq_size[1] = t.txq_size[1];
1819 if (t.txq_size[2] >= 0)
1820 q->txq_size[2] = t.txq_size[2];
1821 if (t.cong_thres >= 0)
1822 q->cong_thres = t.cong_thres;
1823 if (t.intr_lat >= 0) {
1824 struct sge_qset *qs =
1825 &adapter->sge.qs[t.qset_idx];
1827 q->coalesce_usecs = t.intr_lat;
1828 t3_update_qset_coalesce(qs, q);
1830 if (t.polling >= 0) {
1831 if (adapter->flags & USING_MSIX)
1832 q->polling = t.polling;
1834 /* No polling with INTx for T3A */
1835 if (adapter->params.rev == 0 &&
1836 !(adapter->flags & USING_MSI))
1839 for (i = 0; i < SGE_QSETS; i++) {
1840 q = &adapter->params.sge.
1842 q->polling = t.polling;
1847 struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
1849 qs->lro_enabled = t.lro;
1853 case CHELSIO_GET_QSET_PARAMS:{
1854 struct qset_params *q;
1855 struct ch_qset_params t;
1857 if (copy_from_user(&t, useraddr, sizeof(t)))
1859 if (t.qset_idx >= SGE_QSETS)
1862 q = &adapter->params.sge.qset[t.qset_idx];
1863 t.rspq_size = q->rspq_size;
1864 t.txq_size[0] = q->txq_size[0];
1865 t.txq_size[1] = q->txq_size[1];
1866 t.txq_size[2] = q->txq_size[2];
1867 t.fl_size[0] = q->fl_size;
1868 t.fl_size[1] = q->jumbo_size;
1869 t.polling = q->polling;
1871 t.intr_lat = q->coalesce_usecs;
1872 t.cong_thres = q->cong_thres;
1874 if (copy_to_user(useraddr, &t, sizeof(t)))
1878 case CHELSIO_SET_QSET_NUM:{
1879 struct ch_reg edata;
1880 unsigned int i, first_qset = 0, other_qsets = 0;
1882 if (!capable(CAP_NET_ADMIN))
1884 if (adapter->flags & FULL_INIT_DONE)
1886 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1888 if (edata.val < 1 ||
1889 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1892 for_each_port(adapter, i)
1893 if (adapter->port[i] && adapter->port[i] != dev)
1894 other_qsets += adap2pinfo(adapter, i)->nqsets;
1896 if (edata.val + other_qsets > SGE_QSETS)
1899 pi->nqsets = edata.val;
1901 for_each_port(adapter, i)
1902 if (adapter->port[i]) {
1903 pi = adap2pinfo(adapter, i);
1904 pi->first_qset = first_qset;
1905 first_qset += pi->nqsets;
1909 case CHELSIO_GET_QSET_NUM:{
1910 struct ch_reg edata;
1912 edata.cmd = CHELSIO_GET_QSET_NUM;
1913 edata.val = pi->nqsets;
1914 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1918 case CHELSIO_LOAD_FW:{
1920 struct ch_mem_range t;
1922 if (!capable(CAP_SYS_RAWIO))
1924 if (copy_from_user(&t, useraddr, sizeof(t)))
1926 /* Check t.len sanity ? */
1927 fw_data = kmalloc(t.len, GFP_KERNEL);
1932 (fw_data, useraddr + sizeof(t), t.len)) {
1937 ret = t3_load_fw(adapter, fw_data, t.len);
1943 case CHELSIO_SETMTUTAB:{
1947 if (!is_offload(adapter))
1949 if (!capable(CAP_NET_ADMIN))
1951 if (offload_running(adapter))
1953 if (copy_from_user(&m, useraddr, sizeof(m)))
1955 if (m.nmtus != NMTUS)
1957 if (m.mtus[0] < 81) /* accommodate SACK */
1960 /* MTUs must be in ascending order */
1961 for (i = 1; i < NMTUS; ++i)
1962 if (m.mtus[i] < m.mtus[i - 1])
1965 memcpy(adapter->params.mtus, m.mtus,
1966 sizeof(adapter->params.mtus));
1969 case CHELSIO_GET_PM:{
1970 struct tp_params *p = &adapter->params.tp;
1971 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1973 if (!is_offload(adapter))
1975 m.tx_pg_sz = p->tx_pg_size;
1976 m.tx_num_pg = p->tx_num_pgs;
1977 m.rx_pg_sz = p->rx_pg_size;
1978 m.rx_num_pg = p->rx_num_pgs;
1979 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1980 if (copy_to_user(useraddr, &m, sizeof(m)))
1984 case CHELSIO_SET_PM:{
1986 struct tp_params *p = &adapter->params.tp;
1988 if (!is_offload(adapter))
1990 if (!capable(CAP_NET_ADMIN))
1992 if (adapter->flags & FULL_INIT_DONE)
1994 if (copy_from_user(&m, useraddr, sizeof(m)))
1996 if (!is_power_of_2(m.rx_pg_sz) ||
1997 !is_power_of_2(m.tx_pg_sz))
1998 return -EINVAL; /* not power of 2 */
1999 if (!(m.rx_pg_sz & 0x14000))
2000 return -EINVAL; /* not 16KB or 64KB */
2001 if (!(m.tx_pg_sz & 0x1554000))
2003 if (m.tx_num_pg == -1)
2004 m.tx_num_pg = p->tx_num_pgs;
2005 if (m.rx_num_pg == -1)
2006 m.rx_num_pg = p->rx_num_pgs;
2007 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2009 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2010 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2012 p->rx_pg_size = m.rx_pg_sz;
2013 p->tx_pg_size = m.tx_pg_sz;
2014 p->rx_num_pgs = m.rx_num_pg;
2015 p->tx_num_pgs = m.tx_num_pg;
2018 case CHELSIO_GET_MEM:{
2019 struct ch_mem_range t;
2023 if (!is_offload(adapter))
2025 if (!(adapter->flags & FULL_INIT_DONE))
2026 return -EIO; /* need the memory controllers */
2027 if (copy_from_user(&t, useraddr, sizeof(t)))
2029 if ((t.addr & 7) || (t.len & 7))
2031 if (t.mem_id == MEM_CM)
2033 else if (t.mem_id == MEM_PMRX)
2034 mem = &adapter->pmrx;
2035 else if (t.mem_id == MEM_PMTX)
2036 mem = &adapter->pmtx;
2042 * bits 0..9: chip version
2043 * bits 10..15: chip revision
2045 t.version = 3 | (adapter->params.rev << 10);
2046 if (copy_to_user(useraddr, &t, sizeof(t)))
2050 * Read 256 bytes at a time as len can be large and we don't
2051 * want to use huge intermediate buffers.
2053 useraddr += sizeof(t); /* advance to start of buffer */
2055 unsigned int chunk =
2056 min_t(unsigned int, t.len, sizeof(buf));
2059 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2063 if (copy_to_user(useraddr, buf, chunk))
2071 case CHELSIO_SET_TRACE_FILTER:{
2073 const struct trace_params *tp;
2075 if (!capable(CAP_NET_ADMIN))
2077 if (!offload_running(adapter))
2079 if (copy_from_user(&t, useraddr, sizeof(t)))
2082 tp = (const struct trace_params *)&t.sip;
2084 t3_config_trace_filter(adapter, tp, 0,
2088 t3_config_trace_filter(adapter, tp, 1,
2099 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2101 struct mii_ioctl_data *data = if_mii(req);
2102 struct port_info *pi = netdev_priv(dev);
2103 struct adapter *adapter = pi->adapter;
2108 data->phy_id = pi->phy.addr;
2112 struct cphy *phy = &pi->phy;
2114 if (!phy->mdio_read)
2116 if (is_10G(adapter)) {
2117 mmd = data->phy_id >> 8;
2120 else if (mmd > MDIO_DEV_XGXS)
2124 phy->mdio_read(adapter, data->phy_id & 0x1f,
2125 mmd, data->reg_num, &val);
2128 phy->mdio_read(adapter, data->phy_id & 0x1f,
2129 0, data->reg_num & 0x1f,
2132 data->val_out = val;
2136 struct cphy *phy = &pi->phy;
2138 if (!capable(CAP_NET_ADMIN))
2140 if (!phy->mdio_write)
2142 if (is_10G(adapter)) {
2143 mmd = data->phy_id >> 8;
2146 else if (mmd > MDIO_DEV_XGXS)
2150 phy->mdio_write(adapter,
2151 data->phy_id & 0x1f, mmd,
2156 phy->mdio_write(adapter,
2157 data->phy_id & 0x1f, 0,
2158 data->reg_num & 0x1f,
2163 return cxgb_extension_ioctl(dev, req->ifr_data);
2170 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2172 struct port_info *pi = netdev_priv(dev);
2173 struct adapter *adapter = pi->adapter;
2176 if (new_mtu < 81) /* accommodate SACK */
2178 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2181 init_port_mtus(adapter);
2182 if (adapter->params.rev == 0 && offload_running(adapter))
2183 t3_load_mtus(adapter, adapter->params.mtus,
2184 adapter->params.a_wnd, adapter->params.b_wnd,
2185 adapter->port[0]->mtu);
2189 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2191 struct port_info *pi = netdev_priv(dev);
2192 struct adapter *adapter = pi->adapter;
2193 struct sockaddr *addr = p;
2195 if (!is_valid_ether_addr(addr->sa_data))
2198 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2199 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2200 if (offload_running(adapter))
2201 write_smt_entry(adapter, pi->port_id);
2206 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2207 * @adap: the adapter
2210 * Ensures that current Rx processing on any of the queues associated with
2211 * the given port completes before returning. We do this by acquiring and
2212 * releasing the locks of the response queues associated with the port.
2214 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2218 for (i = 0; i < p->nqsets; i++) {
2219 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2221 spin_lock_irq(&q->lock);
2222 spin_unlock_irq(&q->lock);
2226 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2228 struct port_info *pi = netdev_priv(dev);
2229 struct adapter *adapter = pi->adapter;
2232 if (adapter->params.rev > 0)
2233 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2235 /* single control for all ports */
2236 unsigned int i, have_vlans = 0;
2237 for_each_port(adapter, i)
2238 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2240 t3_set_vlan_accel(adapter, 1, have_vlans);
2242 t3_synchronize_rx(adapter, pi);
2245 #ifdef CONFIG_NET_POLL_CONTROLLER
2246 static void cxgb_netpoll(struct net_device *dev)
2248 struct port_info *pi = netdev_priv(dev);
2249 struct adapter *adapter = pi->adapter;
2252 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2253 struct sge_qset *qs = &adapter->sge.qs[qidx];
2256 if (adapter->flags & USING_MSIX)
2261 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2267 * Periodic accumulation of MAC statistics.
2269 static void mac_stats_update(struct adapter *adapter)
2273 for_each_port(adapter, i) {
2274 struct net_device *dev = adapter->port[i];
2275 struct port_info *p = netdev_priv(dev);
2277 if (netif_running(dev)) {
2278 spin_lock(&adapter->stats_lock);
2279 t3_mac_update_stats(&p->mac);
2280 spin_unlock(&adapter->stats_lock);
2285 static void check_link_status(struct adapter *adapter)
2289 for_each_port(adapter, i) {
2290 struct net_device *dev = adapter->port[i];
2291 struct port_info *p = netdev_priv(dev);
2293 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2294 t3_link_changed(adapter, i);
2298 static void check_t3b2_mac(struct adapter *adapter)
2302 if (!rtnl_trylock()) /* synchronize with ifdown */
2305 for_each_port(adapter, i) {
2306 struct net_device *dev = adapter->port[i];
2307 struct port_info *p = netdev_priv(dev);
2310 if (!netif_running(dev))
2314 if (netif_running(dev) && netif_carrier_ok(dev))
2315 status = t3b2_mac_watchdog_task(&p->mac);
2317 p->mac.stats.num_toggled++;
2318 else if (status == 2) {
2319 struct cmac *mac = &p->mac;
2321 t3_mac_set_mtu(mac, dev->mtu);
2322 t3_mac_set_address(mac, 0, dev->dev_addr);
2323 cxgb_set_rxmode(dev);
2324 t3_link_start(&p->phy, mac, &p->link_config);
2325 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2326 t3_port_intr_enable(adapter, p->port_id);
2327 p->mac.stats.num_resets++;
2334 static void t3_adap_check_task(struct work_struct *work)
2336 struct adapter *adapter = container_of(work, struct adapter,
2337 adap_check_task.work);
2338 const struct adapter_params *p = &adapter->params;
2340 adapter->check_task_cnt++;
2342 /* Check link status for PHYs without interrupts */
2343 if (p->linkpoll_period)
2344 check_link_status(adapter);
2346 /* Accumulate MAC stats if needed */
2347 if (!p->linkpoll_period ||
2348 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2349 p->stats_update_period) {
2350 mac_stats_update(adapter);
2351 adapter->check_task_cnt = 0;
2354 if (p->rev == T3_REV_B2)
2355 check_t3b2_mac(adapter);
2357 /* Schedule the next check update if any port is active. */
2358 spin_lock(&adapter->work_lock);
2359 if (adapter->open_device_map & PORT_MASK)
2360 schedule_chk_task(adapter);
2361 spin_unlock(&adapter->work_lock);
2365 * Processes external (PHY) interrupts in process context.
2367 static void ext_intr_task(struct work_struct *work)
2369 struct adapter *adapter = container_of(work, struct adapter,
2370 ext_intr_handler_task);
2372 t3_phy_intr_handler(adapter);
2374 /* Now reenable external interrupts */
2375 spin_lock_irq(&adapter->work_lock);
2376 if (adapter->slow_intr_mask) {
2377 adapter->slow_intr_mask |= F_T3DBG;
2378 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2379 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2380 adapter->slow_intr_mask);
2382 spin_unlock_irq(&adapter->work_lock);
2386 * Interrupt-context handler for external (PHY) interrupts.
2388 void t3_os_ext_intr_handler(struct adapter *adapter)
2391 * Schedule a task to handle external interrupts as they may be slow
2392 * and we use a mutex to protect MDIO registers. We disable PHY
2393 * interrupts in the meantime and let the task reenable them when
2396 spin_lock(&adapter->work_lock);
2397 if (adapter->slow_intr_mask) {
2398 adapter->slow_intr_mask &= ~F_T3DBG;
2399 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2400 adapter->slow_intr_mask);
2401 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2403 spin_unlock(&adapter->work_lock);
2406 void t3_fatal_err(struct adapter *adapter)
2408 unsigned int fw_status[4];
2410 if (adapter->flags & FULL_INIT_DONE) {
2411 t3_sge_stop(adapter);
2412 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2413 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2414 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2415 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2416 t3_intr_disable(adapter);
2418 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2419 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2420 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2421 fw_status[0], fw_status[1],
2422 fw_status[2], fw_status[3]);
2427 * t3_io_error_detected - called when PCI error is detected
2428 * @pdev: Pointer to PCI device
2429 * @state: The current pci connection state
2431 * This function is called after a PCI bus error affecting
2432 * this device has been detected.
2434 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2435 pci_channel_state_t state)
2437 struct adapter *adapter = pci_get_drvdata(pdev);
2440 /* Stop all ports */
2441 for_each_port(adapter, i) {
2442 struct net_device *netdev = adapter->port[i];
2444 if (netif_running(netdev))
2448 if (is_offload(adapter) &&
2449 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2450 offload_close(&adapter->tdev);
2452 adapter->flags &= ~FULL_INIT_DONE;
2454 pci_disable_device(pdev);
2456 /* Request a slot reset. */
2457 return PCI_ERS_RESULT_NEED_RESET;
2461 * t3_io_slot_reset - called after the pci bus has been reset.
2462 * @pdev: Pointer to PCI device
2464 * Restart the card from scratch, as if from a cold-boot.
2466 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2468 struct adapter *adapter = pci_get_drvdata(pdev);
2470 if (pci_enable_device(pdev)) {
2472 "Cannot re-enable PCI device after reset.\n");
2475 pci_set_master(pdev);
2476 pci_restore_state(pdev);
2478 /* Free sge resources */
2479 t3_free_sge_resources(adapter);
2481 if (t3_replay_prep_adapter(adapter))
2484 return PCI_ERS_RESULT_RECOVERED;
2486 return PCI_ERS_RESULT_DISCONNECT;
2490 * t3_io_resume - called when traffic can start flowing again.
2491 * @pdev: Pointer to PCI device
2493 * This callback is called when the error recovery driver tells us that
2494 * its OK to resume normal operation.
2496 static void t3_io_resume(struct pci_dev *pdev)
2498 struct adapter *adapter = pci_get_drvdata(pdev);
2501 /* Restart the ports */
2502 for_each_port(adapter, i) {
2503 struct net_device *netdev = adapter->port[i];
2505 if (netif_running(netdev)) {
2506 if (cxgb_open(netdev)) {
2508 "can't bring device back up"
2512 netif_device_attach(netdev);
2517 static struct pci_error_handlers t3_err_handler = {
2518 .error_detected = t3_io_error_detected,
2519 .slot_reset = t3_io_slot_reset,
2520 .resume = t3_io_resume,
2523 static int __devinit cxgb_enable_msix(struct adapter *adap)
2525 struct msix_entry entries[SGE_QSETS + 1];
2528 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2529 entries[i].entry = i;
2531 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2533 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2534 adap->msix_info[i].vec = entries[i].vector;
2536 dev_info(&adap->pdev->dev,
2537 "only %d MSI-X vectors left, not using MSI-X\n", err);
2541 static void __devinit print_port_info(struct adapter *adap,
2542 const struct adapter_info *ai)
2544 static const char *pci_variant[] = {
2545 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2552 snprintf(buf, sizeof(buf), "%s x%d",
2553 pci_variant[adap->params.pci.variant],
2554 adap->params.pci.width);
2556 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2557 pci_variant[adap->params.pci.variant],
2558 adap->params.pci.speed, adap->params.pci.width);
2560 for_each_port(adap, i) {
2561 struct net_device *dev = adap->port[i];
2562 const struct port_info *pi = netdev_priv(dev);
2564 if (!test_bit(i, &adap->registered_device_map))
2566 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2567 dev->name, ai->desc, pi->port_type->desc,
2568 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2569 (adap->flags & USING_MSIX) ? " MSI-X" :
2570 (adap->flags & USING_MSI) ? " MSI" : "");
2571 if (adap->name == dev->name && adap->params.vpd.mclk)
2573 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2574 adap->name, t3_mc7_size(&adap->cm) >> 20,
2575 t3_mc7_size(&adap->pmtx) >> 20,
2576 t3_mc7_size(&adap->pmrx) >> 20,
2577 adap->params.vpd.sn);
2581 static int __devinit init_one(struct pci_dev *pdev,
2582 const struct pci_device_id *ent)
2584 static int version_printed;
2586 int i, err, pci_using_dac = 0;
2587 unsigned long mmio_start, mmio_len;
2588 const struct adapter_info *ai;
2589 struct adapter *adapter = NULL;
2590 struct port_info *pi;
2592 if (!version_printed) {
2593 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2598 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2600 printk(KERN_ERR DRV_NAME
2601 ": cannot initialize work queue\n");
2606 err = pci_request_regions(pdev, DRV_NAME);
2608 /* Just info, some other driver may have claimed the device. */
2609 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2613 err = pci_enable_device(pdev);
2615 dev_err(&pdev->dev, "cannot enable PCI device\n");
2616 goto out_release_regions;
2619 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2621 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2623 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2624 "coherent allocations\n");
2625 goto out_disable_device;
2627 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2628 dev_err(&pdev->dev, "no usable DMA configuration\n");
2629 goto out_disable_device;
2632 pci_set_master(pdev);
2633 pci_save_state(pdev);
2635 mmio_start = pci_resource_start(pdev, 0);
2636 mmio_len = pci_resource_len(pdev, 0);
2637 ai = t3_get_adapter_info(ent->driver_data);
2639 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2642 goto out_disable_device;
2645 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2646 if (!adapter->regs) {
2647 dev_err(&pdev->dev, "cannot map device registers\n");
2649 goto out_free_adapter;
2652 adapter->pdev = pdev;
2653 adapter->name = pci_name(pdev);
2654 adapter->msg_enable = dflt_msg_enable;
2655 adapter->mmio_len = mmio_len;
2657 mutex_init(&adapter->mdio_lock);
2658 spin_lock_init(&adapter->work_lock);
2659 spin_lock_init(&adapter->stats_lock);
2661 INIT_LIST_HEAD(&adapter->adapter_list);
2662 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2663 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2665 for (i = 0; i < ai->nports; ++i) {
2666 struct net_device *netdev;
2668 netdev = alloc_etherdev(sizeof(struct port_info));
2674 SET_NETDEV_DEV(netdev, &pdev->dev);
2676 adapter->port[i] = netdev;
2677 pi = netdev_priv(netdev);
2678 pi->adapter = adapter;
2679 pi->rx_csum_offload = 1;
2684 netif_carrier_off(netdev);
2685 netdev->irq = pdev->irq;
2686 netdev->mem_start = mmio_start;
2687 netdev->mem_end = mmio_start + mmio_len - 1;
2688 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2689 netdev->features |= NETIF_F_LLTX;
2691 netdev->features |= NETIF_F_HIGHDMA;
2693 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2694 netdev->vlan_rx_register = vlan_rx_register;
2696 netdev->open = cxgb_open;
2697 netdev->stop = cxgb_close;
2698 netdev->hard_start_xmit = t3_eth_xmit;
2699 netdev->get_stats = cxgb_get_stats;
2700 netdev->set_multicast_list = cxgb_set_rxmode;
2701 netdev->do_ioctl = cxgb_ioctl;
2702 netdev->change_mtu = cxgb_change_mtu;
2703 netdev->set_mac_address = cxgb_set_mac_addr;
2704 #ifdef CONFIG_NET_POLL_CONTROLLER
2705 netdev->poll_controller = cxgb_netpoll;
2708 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2711 pci_set_drvdata(pdev, adapter);
2712 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2718 * The card is now ready to go. If any errors occur during device
2719 * registration we do not fail the whole card but rather proceed only
2720 * with the ports we manage to register successfully. However we must
2721 * register at least one net device.
2723 for_each_port(adapter, i) {
2724 err = register_netdev(adapter->port[i]);
2726 dev_warn(&pdev->dev,
2727 "cannot register net device %s, skipping\n",
2728 adapter->port[i]->name);
2731 * Change the name we use for messages to the name of
2732 * the first successfully registered interface.
2734 if (!adapter->registered_device_map)
2735 adapter->name = adapter->port[i]->name;
2737 __set_bit(i, &adapter->registered_device_map);
2740 if (!adapter->registered_device_map) {
2741 dev_err(&pdev->dev, "could not register any net devices\n");
2745 /* Driver's ready. Reflect it on LEDs */
2746 t3_led_ready(adapter);
2748 if (is_offload(adapter)) {
2749 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2750 cxgb3_adapter_ofld(adapter);
2753 /* See what interrupts we'll be using */
2754 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2755 adapter->flags |= USING_MSIX;
2756 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2757 adapter->flags |= USING_MSI;
2759 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2762 print_port_info(adapter, ai);
2766 iounmap(adapter->regs);
2767 for (i = ai->nports - 1; i >= 0; --i)
2768 if (adapter->port[i])
2769 free_netdev(adapter->port[i]);
2775 pci_disable_device(pdev);
2776 out_release_regions:
2777 pci_release_regions(pdev);
2778 pci_set_drvdata(pdev, NULL);
2782 static void __devexit remove_one(struct pci_dev *pdev)
2784 struct adapter *adapter = pci_get_drvdata(pdev);
2789 t3_sge_stop(adapter);
2790 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2793 if (is_offload(adapter)) {
2794 cxgb3_adapter_unofld(adapter);
2795 if (test_bit(OFFLOAD_DEVMAP_BIT,
2796 &adapter->open_device_map))
2797 offload_close(&adapter->tdev);
2800 for_each_port(adapter, i)
2801 if (test_bit(i, &adapter->registered_device_map))
2802 unregister_netdev(adapter->port[i]);
2804 t3_free_sge_resources(adapter);
2805 cxgb_disable_msi(adapter);
2807 for_each_port(adapter, i)
2808 if (adapter->port[i])
2809 free_netdev(adapter->port[i]);
2811 iounmap(adapter->regs);
2813 pci_release_regions(pdev);
2814 pci_disable_device(pdev);
2815 pci_set_drvdata(pdev, NULL);
2819 static struct pci_driver driver = {
2821 .id_table = cxgb3_pci_tbl,
2823 .remove = __devexit_p(remove_one),
2824 .err_handler = &t3_err_handler,
2827 static int __init cxgb3_init_module(void)
2831 cxgb3_offload_init();
2833 ret = pci_register_driver(&driver);
2837 static void __exit cxgb3_cleanup_module(void)
2839 pci_unregister_driver(&driver);
2841 destroy_workqueue(cxgb3_wq);
2844 module_init(cxgb3_init_module);
2845 module_exit(cxgb3_cleanup_module);