2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <asm/uaccess.h>
48 #include "cxgb3_ioctl.h"
50 #include "cxgb3_offload.h"
53 #include "cxgb3_ctl_defs.h"
55 #include "firmware_exports.h"
58 MAX_TXQ_ENTRIES = 16384,
59 MAX_CTRL_TXQ_ENTRIES = 1024,
60 MAX_RSPQ_ENTRIES = 16384,
61 MAX_RX_BUFFERS = 16384,
62 MAX_RX_JUMBO_BUFFERS = 16384,
64 MIN_CTRL_TXQ_ENTRIES = 4,
65 MIN_RSPQ_ENTRIES = 32,
69 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
71 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
75 #define EEPROM_MAGIC 0x38E2F10C
77 #define CH_DEVICE(devid, ssid, idx) \
78 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
80 static const struct pci_device_id cxgb3_pci_tbl[] = {
81 CH_DEVICE(0x20, 1, 0), /* PE9000 */
82 CH_DEVICE(0x21, 1, 1), /* T302E */
83 CH_DEVICE(0x22, 1, 2), /* T310E */
84 CH_DEVICE(0x23, 1, 3), /* T320X */
85 CH_DEVICE(0x24, 1, 1), /* T302X */
86 CH_DEVICE(0x25, 1, 3), /* T320E */
87 CH_DEVICE(0x26, 1, 2), /* T310X */
88 CH_DEVICE(0x30, 1, 2), /* T3B10 */
89 CH_DEVICE(0x31, 1, 3), /* T3B20 */
90 CH_DEVICE(0x32, 1, 1), /* T3B02 */
94 MODULE_DESCRIPTION(DRV_DESC);
95 MODULE_AUTHOR("Chelsio Communications");
96 MODULE_LICENSE("Dual BSD/GPL");
97 MODULE_VERSION(DRV_VERSION);
98 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
100 static int dflt_msg_enable = DFLT_MSG_ENABLE;
102 module_param(dflt_msg_enable, int, 0644);
103 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106 * The driver uses the best interrupt scheme available on a platform in the
107 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
108 * of these schemes the driver may consider as follows:
110 * msi = 2: choose from among all three options
111 * msi = 1: only consider MSI and pin interrupts
112 * msi = 0: force pin interrupts
116 module_param(msi, int, 0644);
117 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120 * The driver enables offload as a default.
121 * To disable it, use ofld_disable = 1.
124 static int ofld_disable = 0;
126 module_param(ofld_disable, int, 0644);
127 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130 * We have work elements that we need to cancel when an interface is taken
131 * down. Normally the work elements would be executed by keventd but that
132 * can deadlock because of linkwatch. If our close method takes the rtnl
133 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
134 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
135 * for our work to complete. Get our own work queue to solve this.
137 static struct workqueue_struct *cxgb3_wq;
140 * link_report - show link status and link speed/duplex
141 * @p: the port whose settings are to be reported
143 * Shows the link status, speed, and duplex of a port.
145 static void link_report(struct net_device *dev)
147 if (!netif_carrier_ok(dev))
148 printk(KERN_INFO "%s: link down\n", dev->name);
150 const char *s = "10Mbps";
151 const struct port_info *p = netdev_priv(dev);
153 switch (p->link_config.speed) {
165 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
166 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
171 * t3_os_link_changed - handle link status changes
172 * @adapter: the adapter associated with the link change
173 * @port_id: the port index whose limk status has changed
174 * @link_stat: the new status of the link
175 * @speed: the new speed setting
176 * @duplex: the new duplex setting
177 * @pause: the new flow-control setting
179 * This is the OS-dependent handler for link status changes. The OS
180 * neutral handler takes care of most of the processing for these events,
181 * then calls this handler for any OS-specific processing.
183 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
184 int speed, int duplex, int pause)
186 struct net_device *dev = adapter->port[port_id];
188 /* Skip changes from disabled ports. */
189 if (!netif_running(dev))
192 if (link_stat != netif_carrier_ok(dev)) {
194 netif_carrier_on(dev);
196 netif_carrier_off(dev);
201 static void cxgb_set_rxmode(struct net_device *dev)
203 struct t3_rx_mode rm;
204 struct port_info *pi = netdev_priv(dev);
206 init_rx_mode(&rm, dev, dev->mc_list);
207 t3_mac_set_rx_mode(&pi->mac, &rm);
211 * link_start - enable a port
212 * @dev: the device to enable
214 * Performs the MAC and PHY actions needed to enable a port.
216 static void link_start(struct net_device *dev)
218 struct t3_rx_mode rm;
219 struct port_info *pi = netdev_priv(dev);
220 struct cmac *mac = &pi->mac;
222 init_rx_mode(&rm, dev, dev->mc_list);
224 t3_mac_set_mtu(mac, dev->mtu);
225 t3_mac_set_address(mac, 0, dev->dev_addr);
226 t3_mac_set_rx_mode(mac, &rm);
227 t3_link_start(&pi->phy, mac, &pi->link_config);
228 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
231 static inline void cxgb_disable_msi(struct adapter *adapter)
233 if (adapter->flags & USING_MSIX) {
234 pci_disable_msix(adapter->pdev);
235 adapter->flags &= ~USING_MSIX;
236 } else if (adapter->flags & USING_MSI) {
237 pci_disable_msi(adapter->pdev);
238 adapter->flags &= ~USING_MSI;
243 * Interrupt handler for asynchronous events used with MSI-X.
245 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
247 t3_slow_intr_handler(cookie);
252 * Name the MSI-X interrupts.
254 static void name_msix_vecs(struct adapter *adap)
256 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
258 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
259 adap->msix_info[0].desc[n] = 0;
261 for_each_port(adap, j) {
262 struct net_device *d = adap->port[j];
263 const struct port_info *pi = netdev_priv(d);
265 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
266 snprintf(adap->msix_info[msi_idx].desc, n,
267 "%s (queue %d)", d->name, i);
268 adap->msix_info[msi_idx].desc[n] = 0;
273 static int request_msix_data_irqs(struct adapter *adap)
275 int i, j, err, qidx = 0;
277 for_each_port(adap, i) {
278 int nqsets = adap2pinfo(adap, i)->nqsets;
280 for (j = 0; j < nqsets; ++j) {
281 err = request_irq(adap->msix_info[qidx + 1].vec,
282 t3_intr_handler(adap,
285 adap->msix_info[qidx + 1].desc,
286 &adap->sge.qs[qidx]);
289 free_irq(adap->msix_info[qidx + 1].vec,
290 &adap->sge.qs[qidx]);
300 * setup_rss - configure RSS
303 * Sets up RSS to distribute packets to multiple receive queues. We
304 * configure the RSS CPU lookup table to distribute to the number of HW
305 * receive queues, and the response queue lookup table to narrow that
306 * down to the response queues actually configured for each port.
307 * We always configure the RSS mapping for two ports since the mapping
308 * table has plenty of entries.
310 static void setup_rss(struct adapter *adap)
313 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
314 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
315 u8 cpus[SGE_QSETS + 1];
316 u16 rspq_map[RSS_TABLE_SIZE];
318 for (i = 0; i < SGE_QSETS; ++i)
320 cpus[SGE_QSETS] = 0xff; /* terminator */
322 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
323 rspq_map[i] = i % nq0;
324 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
327 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
328 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
329 V_RRCPLCPUSIZE(6), cpus, rspq_map);
333 * If we have multiple receive queues per port serviced by NAPI we need one
334 * netdevice per queue as NAPI operates on netdevices. We already have one
335 * netdevice, namely the one associated with the interface, so we use dummy
336 * ones for any additional queues. Note that these netdevices exist purely
337 * so that NAPI has something to work with, they do not represent network
338 * ports and are not registered.
340 static int init_dummy_netdevs(struct adapter *adap)
342 int i, j, dummy_idx = 0;
343 struct net_device *nd;
345 for_each_port(adap, i) {
346 struct net_device *dev = adap->port[i];
347 const struct port_info *pi = netdev_priv(dev);
349 for (j = 0; j < pi->nqsets - 1; j++) {
350 if (!adap->dummy_netdev[dummy_idx]) {
351 nd = alloc_netdev(0, "", ether_setup);
357 set_bit(__LINK_STATE_START, &nd->state);
358 adap->dummy_netdev[dummy_idx] = nd;
360 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
367 while (--dummy_idx >= 0) {
368 free_netdev(adap->dummy_netdev[dummy_idx]);
369 adap->dummy_netdev[dummy_idx] = NULL;
375 * Wait until all NAPI handlers are descheduled. This includes the handlers of
376 * both netdevices representing interfaces and the dummy ones for the extra
379 static void quiesce_rx(struct adapter *adap)
382 struct net_device *dev;
384 for_each_port(adap, i) {
386 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
390 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
391 dev = adap->dummy_netdev[i];
393 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
399 * setup_sge_qsets - configure SGE Tx/Rx/response queues
402 * Determines how many sets of SGE queues to use and initializes them.
403 * We support multiple queue sets per port if we have MSI-X, otherwise
404 * just one queue set per port.
406 static int setup_sge_qsets(struct adapter *adap)
408 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
409 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
411 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
414 for_each_port(adap, i) {
415 struct net_device *dev = adap->port[i];
416 const struct port_info *pi = netdev_priv(dev);
418 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
419 err = t3_sge_alloc_qset(adap, qset_idx, 1,
420 (adap->flags & USING_MSIX) ? qset_idx + 1 :
422 &adap->params.sge.qset[qset_idx], ntxq,
424 adap-> dummy_netdev[dummy_dev_idx++]);
426 t3_free_sge_resources(adap);
435 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
437 ssize_t(*format) (struct adapter *, char *))
440 struct adapter *adap = to_net_dev(d)->priv;
442 /* Synchronize with ioctls that may shut down the device */
444 len = (*format) (adap, buf);
449 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450 const char *buf, size_t len,
451 ssize_t(*set) (struct adapter *, unsigned int),
452 unsigned int min_val, unsigned int max_val)
457 struct adapter *adap = to_net_dev(d)->priv;
459 if (!capable(CAP_NET_ADMIN))
462 val = simple_strtoul(buf, &endp, 0);
463 if (endp == buf || val < min_val || val > max_val)
467 ret = (*set) (adap, val);
474 #define CXGB3_SHOW(name, val_expr) \
475 static ssize_t format_##name(struct adapter *adap, char *buf) \
477 return sprintf(buf, "%u\n", val_expr); \
479 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
482 return attr_show(d, attr, buf, format_##name); \
485 static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
487 if (adap->flags & FULL_INIT_DONE)
489 if (val && adap->params.rev == 0)
491 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
493 adap->params.mc5.nfilters = val;
497 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
498 const char *buf, size_t len)
500 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
503 static ssize_t set_nservers(struct adapter *adap, unsigned int val)
505 if (adap->flags & FULL_INIT_DONE)
507 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
509 adap->params.mc5.nservers = val;
513 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
514 const char *buf, size_t len)
516 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
519 #define CXGB3_ATTR_R(name, val_expr) \
520 CXGB3_SHOW(name, val_expr) \
521 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
523 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
524 CXGB3_SHOW(name, val_expr) \
525 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
527 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
528 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
529 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
531 static struct attribute *cxgb3_attrs[] = {
532 &dev_attr_cam_size.attr,
533 &dev_attr_nfilters.attr,
534 &dev_attr_nservers.attr,
538 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
540 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
541 char *buf, int sched)
544 unsigned int v, addr, bpt, cpt;
545 struct adapter *adap = to_net_dev(d)->priv;
547 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
549 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
550 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
553 bpt = (v >> 8) & 0xff;
556 len = sprintf(buf, "disabled\n");
558 v = (adap->params.vpd.cclk * 1000) / cpt;
559 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
565 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
566 const char *buf, size_t len, int sched)
571 struct adapter *adap = to_net_dev(d)->priv;
573 if (!capable(CAP_NET_ADMIN))
576 val = simple_strtoul(buf, &endp, 0);
577 if (endp == buf || val > 10000000)
581 ret = t3_config_sched(adap, val, sched);
588 #define TM_ATTR(name, sched) \
589 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
592 return tm_attr_show(d, attr, buf, sched); \
594 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
595 const char *buf, size_t len) \
597 return tm_attr_store(d, attr, buf, len, sched); \
599 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
610 static struct attribute *offload_attrs[] = {
611 &dev_attr_sched0.attr,
612 &dev_attr_sched1.attr,
613 &dev_attr_sched2.attr,
614 &dev_attr_sched3.attr,
615 &dev_attr_sched4.attr,
616 &dev_attr_sched5.attr,
617 &dev_attr_sched6.attr,
618 &dev_attr_sched7.attr,
622 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
625 * Sends an sk_buff to an offload queue driver
626 * after dealing with any active network taps.
628 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
633 ret = t3_offload_tx(tdev, skb);
638 static int write_smt_entry(struct adapter *adapter, int idx)
640 struct cpl_smt_write_req *req;
641 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
646 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
647 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
648 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
649 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
651 memset(req->src_mac1, 0, sizeof(req->src_mac1));
652 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
654 offload_tx(&adapter->tdev, skb);
658 static int init_smt(struct adapter *adapter)
662 for_each_port(adapter, i)
663 write_smt_entry(adapter, i);
667 static void init_port_mtus(struct adapter *adapter)
669 unsigned int mtus = adapter->port[0]->mtu;
671 if (adapter->port[1])
672 mtus |= adapter->port[1]->mtu << 16;
673 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
676 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
680 struct mngt_pktsched_wr *req;
682 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
683 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
684 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
685 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
691 t3_mgmt_tx(adap, skb);
694 static void bind_qsets(struct adapter *adap)
698 for_each_port(adap, i) {
699 const struct port_info *pi = adap2pinfo(adap, i);
701 for (j = 0; j < pi->nqsets; ++j)
702 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
708 * cxgb_up - enable the adapter
709 * @adapter: adapter being enabled
711 * Called when the first port is enabled, this function performs the
712 * actions necessary to make an adapter operational, such as completing
713 * the initialization of HW modules, and enabling interrupts.
715 * Must be called with the rtnl lock held.
717 static int cxgb_up(struct adapter *adap)
721 if (!(adap->flags & FULL_INIT_DONE)) {
722 err = t3_check_fw_version(adap);
726 err = init_dummy_netdevs(adap);
730 err = t3_init_hw(adap, 0);
734 err = setup_sge_qsets(adap);
739 adap->flags |= FULL_INIT_DONE;
744 if (adap->flags & USING_MSIX) {
745 name_msix_vecs(adap);
746 err = request_irq(adap->msix_info[0].vec,
747 t3_async_intr_handler, 0,
748 adap->msix_info[0].desc, adap);
752 if (request_msix_data_irqs(adap)) {
753 free_irq(adap->msix_info[0].vec, adap);
756 } else if ((err = request_irq(adap->pdev->irq,
757 t3_intr_handler(adap,
758 adap->sge.qs[0].rspq.
760 (adap->flags & USING_MSI) ?
766 t3_intr_enable(adap);
768 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
770 adap->flags |= QUEUES_BOUND;
775 CH_ERR(adap, "request_irq failed, err %d\n", err);
780 * Release resources when all the ports and offloading have been stopped.
782 static void cxgb_down(struct adapter *adapter)
784 t3_sge_stop(adapter);
785 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
786 t3_intr_disable(adapter);
787 spin_unlock_irq(&adapter->work_lock);
789 if (adapter->flags & USING_MSIX) {
792 free_irq(adapter->msix_info[0].vec, adapter);
793 for_each_port(adapter, i)
794 n += adap2pinfo(adapter, i)->nqsets;
796 for (i = 0; i < n; ++i)
797 free_irq(adapter->msix_info[i + 1].vec,
798 &adapter->sge.qs[i]);
800 free_irq(adapter->pdev->irq, adapter);
802 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
806 static void schedule_chk_task(struct adapter *adap)
810 timeo = adap->params.linkpoll_period ?
811 (HZ * adap->params.linkpoll_period) / 10 :
812 adap->params.stats_update_period * HZ;
814 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
817 static int offload_open(struct net_device *dev)
819 struct adapter *adapter = dev->priv;
820 struct t3cdev *tdev = T3CDEV(dev);
821 int adap_up = adapter->open_device_map & PORT_MASK;
824 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
827 if (!adap_up && (err = cxgb_up(adapter)) < 0)
830 t3_tp_set_offload_mode(adapter, 1);
831 tdev->lldev = adapter->port[0];
832 err = cxgb3_offload_activate(adapter);
836 init_port_mtus(adapter);
837 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
838 adapter->params.b_wnd,
839 adapter->params.rev == 0 ?
840 adapter->port[0]->mtu : 0xffff);
843 /* Never mind if the next step fails */
844 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
846 /* Call back all registered clients */
847 cxgb3_add_clients(tdev);
850 /* restore them in case the offload module has changed them */
852 t3_tp_set_offload_mode(adapter, 0);
853 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
854 cxgb3_set_dummy_ops(tdev);
859 static int offload_close(struct t3cdev *tdev)
861 struct adapter *adapter = tdev2adap(tdev);
863 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
866 /* Call back all registered clients */
867 cxgb3_remove_clients(tdev);
869 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
872 cxgb3_set_dummy_ops(tdev);
873 t3_tp_set_offload_mode(adapter, 0);
874 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
876 if (!adapter->open_device_map)
879 cxgb3_offload_deactivate(adapter);
883 static int cxgb_open(struct net_device *dev)
886 struct adapter *adapter = dev->priv;
887 struct port_info *pi = netdev_priv(dev);
888 int other_ports = adapter->open_device_map & PORT_MASK;
890 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
893 set_bit(pi->port_id, &adapter->open_device_map);
895 err = offload_open(dev);
898 "Could not initialize offload capabilities\n");
902 t3_port_intr_enable(adapter, pi->port_id);
903 netif_start_queue(dev);
905 schedule_chk_task(adapter);
910 static int cxgb_close(struct net_device *dev)
912 struct adapter *adapter = dev->priv;
913 struct port_info *p = netdev_priv(dev);
915 t3_port_intr_disable(adapter, p->port_id);
916 netif_stop_queue(dev);
917 p->phy.ops->power_down(&p->phy, 1);
918 netif_carrier_off(dev);
919 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
921 spin_lock(&adapter->work_lock); /* sync with update task */
922 clear_bit(p->port_id, &adapter->open_device_map);
923 spin_unlock(&adapter->work_lock);
925 if (!(adapter->open_device_map & PORT_MASK))
926 cancel_rearming_delayed_workqueue(cxgb3_wq,
927 &adapter->adap_check_task);
929 if (!adapter->open_device_map)
935 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
937 struct adapter *adapter = dev->priv;
938 struct port_info *p = netdev_priv(dev);
939 struct net_device_stats *ns = &p->netstats;
940 const struct mac_stats *pstats;
942 spin_lock(&adapter->stats_lock);
943 pstats = t3_mac_update_stats(&p->mac);
944 spin_unlock(&adapter->stats_lock);
946 ns->tx_bytes = pstats->tx_octets;
947 ns->tx_packets = pstats->tx_frames;
948 ns->rx_bytes = pstats->rx_octets;
949 ns->rx_packets = pstats->rx_frames;
950 ns->multicast = pstats->rx_mcast_frames;
952 ns->tx_errors = pstats->tx_underrun;
953 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
954 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
955 pstats->rx_fifo_ovfl;
957 /* detailed rx_errors */
958 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
959 ns->rx_over_errors = 0;
960 ns->rx_crc_errors = pstats->rx_fcs_errs;
961 ns->rx_frame_errors = pstats->rx_symbol_errs;
962 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
963 ns->rx_missed_errors = pstats->rx_cong_drops;
965 /* detailed tx_errors */
966 ns->tx_aborted_errors = 0;
967 ns->tx_carrier_errors = 0;
968 ns->tx_fifo_errors = pstats->tx_underrun;
969 ns->tx_heartbeat_errors = 0;
970 ns->tx_window_errors = 0;
974 static u32 get_msglevel(struct net_device *dev)
976 struct adapter *adapter = dev->priv;
978 return adapter->msg_enable;
981 static void set_msglevel(struct net_device *dev, u32 val)
983 struct adapter *adapter = dev->priv;
985 adapter->msg_enable = val;
988 static char stats_strings[][ETH_GSTRING_LEN] = {
991 "TxMulticastFramesOK",
992 "TxBroadcastFramesOK",
1000 "TxFrames256To511 ",
1001 "TxFrames512To1023 ",
1002 "TxFrames1024To1518 ",
1003 "TxFrames1519ToMax ",
1007 "RxMulticastFramesOK",
1008 "RxBroadcastFramesOK",
1019 "RxFrames128To255 ",
1020 "RxFrames256To511 ",
1021 "RxFrames512To1023 ",
1022 "RxFrames1024To1518 ",
1023 "RxFrames1519ToMax ",
1034 static int get_stats_count(struct net_device *dev)
1036 return ARRAY_SIZE(stats_strings);
1039 #define T3_REGMAP_SIZE (3 * 1024)
1041 static int get_regs_len(struct net_device *dev)
1043 return T3_REGMAP_SIZE;
1046 static int get_eeprom_len(struct net_device *dev)
1051 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1054 struct adapter *adapter = dev->priv;
1056 t3_get_fw_version(adapter, &fw_vers);
1058 strcpy(info->driver, DRV_NAME);
1059 strcpy(info->version, DRV_VERSION);
1060 strcpy(info->bus_info, pci_name(adapter->pdev));
1062 strcpy(info->fw_version, "N/A");
1064 snprintf(info->fw_version, sizeof(info->fw_version),
1066 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1067 G_FW_VERSION_MAJOR(fw_vers),
1068 G_FW_VERSION_MINOR(fw_vers),
1069 G_FW_VERSION_MICRO(fw_vers));
1073 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1075 if (stringset == ETH_SS_STATS)
1076 memcpy(data, stats_strings, sizeof(stats_strings));
1079 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1080 struct port_info *p, int idx)
1083 unsigned long tot = 0;
1085 for (i = 0; i < p->nqsets; ++i)
1086 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1090 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1093 struct adapter *adapter = dev->priv;
1094 struct port_info *pi = netdev_priv(dev);
1095 const struct mac_stats *s;
1097 spin_lock(&adapter->stats_lock);
1098 s = t3_mac_update_stats(&pi->mac);
1099 spin_unlock(&adapter->stats_lock);
1101 *data++ = s->tx_octets;
1102 *data++ = s->tx_frames;
1103 *data++ = s->tx_mcast_frames;
1104 *data++ = s->tx_bcast_frames;
1105 *data++ = s->tx_pause;
1106 *data++ = s->tx_underrun;
1107 *data++ = s->tx_fifo_urun;
1109 *data++ = s->tx_frames_64;
1110 *data++ = s->tx_frames_65_127;
1111 *data++ = s->tx_frames_128_255;
1112 *data++ = s->tx_frames_256_511;
1113 *data++ = s->tx_frames_512_1023;
1114 *data++ = s->tx_frames_1024_1518;
1115 *data++ = s->tx_frames_1519_max;
1117 *data++ = s->rx_octets;
1118 *data++ = s->rx_frames;
1119 *data++ = s->rx_mcast_frames;
1120 *data++ = s->rx_bcast_frames;
1121 *data++ = s->rx_pause;
1122 *data++ = s->rx_fcs_errs;
1123 *data++ = s->rx_symbol_errs;
1124 *data++ = s->rx_short;
1125 *data++ = s->rx_jabber;
1126 *data++ = s->rx_too_long;
1127 *data++ = s->rx_fifo_ovfl;
1129 *data++ = s->rx_frames_64;
1130 *data++ = s->rx_frames_65_127;
1131 *data++ = s->rx_frames_128_255;
1132 *data++ = s->rx_frames_256_511;
1133 *data++ = s->rx_frames_512_1023;
1134 *data++ = s->rx_frames_1024_1518;
1135 *data++ = s->rx_frames_1519_max;
1137 *data++ = pi->phy.fifo_errors;
1139 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1140 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1141 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1142 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1143 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1144 *data++ = s->rx_cong_drops;
1147 static inline void reg_block_dump(struct adapter *ap, void *buf,
1148 unsigned int start, unsigned int end)
1150 u32 *p = buf + start;
1152 for (; start <= end; start += sizeof(u32))
1153 *p++ = t3_read_reg(ap, start);
1156 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1159 struct adapter *ap = dev->priv;
1163 * bits 0..9: chip version
1164 * bits 10..15: chip revision
1165 * bit 31: set for PCIe cards
1167 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1170 * We skip the MAC statistics registers because they are clear-on-read.
1171 * Also reading multi-register stats would need to synchronize with the
1172 * periodic mac stats accumulation. Hard to justify the complexity.
1174 memset(buf, 0, T3_REGMAP_SIZE);
1175 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1176 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1177 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1178 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1179 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1180 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1181 XGM_REG(A_XGM_SERDES_STAT3, 1));
1182 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1183 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1186 static int restart_autoneg(struct net_device *dev)
1188 struct port_info *p = netdev_priv(dev);
1190 if (!netif_running(dev))
1192 if (p->link_config.autoneg != AUTONEG_ENABLE)
1194 p->phy.ops->autoneg_restart(&p->phy);
1198 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1201 struct adapter *adapter = dev->priv;
1206 for (i = 0; i < data * 2; i++) {
1207 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1208 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1209 if (msleep_interruptible(500))
1212 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1217 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1219 struct port_info *p = netdev_priv(dev);
1221 cmd->supported = p->link_config.supported;
1222 cmd->advertising = p->link_config.advertising;
1224 if (netif_carrier_ok(dev)) {
1225 cmd->speed = p->link_config.speed;
1226 cmd->duplex = p->link_config.duplex;
1232 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1233 cmd->phy_address = p->phy.addr;
1234 cmd->transceiver = XCVR_EXTERNAL;
1235 cmd->autoneg = p->link_config.autoneg;
1241 static int speed_duplex_to_caps(int speed, int duplex)
1247 if (duplex == DUPLEX_FULL)
1248 cap = SUPPORTED_10baseT_Full;
1250 cap = SUPPORTED_10baseT_Half;
1253 if (duplex == DUPLEX_FULL)
1254 cap = SUPPORTED_100baseT_Full;
1256 cap = SUPPORTED_100baseT_Half;
1259 if (duplex == DUPLEX_FULL)
1260 cap = SUPPORTED_1000baseT_Full;
1262 cap = SUPPORTED_1000baseT_Half;
1265 if (duplex == DUPLEX_FULL)
1266 cap = SUPPORTED_10000baseT_Full;
1271 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1272 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1273 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1274 ADVERTISED_10000baseT_Full)
1276 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1278 struct port_info *p = netdev_priv(dev);
1279 struct link_config *lc = &p->link_config;
1281 if (!(lc->supported & SUPPORTED_Autoneg))
1282 return -EOPNOTSUPP; /* can't change speed/duplex */
1284 if (cmd->autoneg == AUTONEG_DISABLE) {
1285 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1287 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1289 lc->requested_speed = cmd->speed;
1290 lc->requested_duplex = cmd->duplex;
1291 lc->advertising = 0;
1293 cmd->advertising &= ADVERTISED_MASK;
1294 cmd->advertising &= lc->supported;
1295 if (!cmd->advertising)
1297 lc->requested_speed = SPEED_INVALID;
1298 lc->requested_duplex = DUPLEX_INVALID;
1299 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1301 lc->autoneg = cmd->autoneg;
1302 if (netif_running(dev))
1303 t3_link_start(&p->phy, &p->mac, lc);
1307 static void get_pauseparam(struct net_device *dev,
1308 struct ethtool_pauseparam *epause)
1310 struct port_info *p = netdev_priv(dev);
1312 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1313 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1314 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1317 static int set_pauseparam(struct net_device *dev,
1318 struct ethtool_pauseparam *epause)
1320 struct port_info *p = netdev_priv(dev);
1321 struct link_config *lc = &p->link_config;
1323 if (epause->autoneg == AUTONEG_DISABLE)
1324 lc->requested_fc = 0;
1325 else if (lc->supported & SUPPORTED_Autoneg)
1326 lc->requested_fc = PAUSE_AUTONEG;
1330 if (epause->rx_pause)
1331 lc->requested_fc |= PAUSE_RX;
1332 if (epause->tx_pause)
1333 lc->requested_fc |= PAUSE_TX;
1334 if (lc->autoneg == AUTONEG_ENABLE) {
1335 if (netif_running(dev))
1336 t3_link_start(&p->phy, &p->mac, lc);
1338 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1339 if (netif_running(dev))
1340 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1345 static u32 get_rx_csum(struct net_device *dev)
1347 struct port_info *p = netdev_priv(dev);
1349 return p->rx_csum_offload;
1352 static int set_rx_csum(struct net_device *dev, u32 data)
1354 struct port_info *p = netdev_priv(dev);
1356 p->rx_csum_offload = data;
1360 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1362 struct adapter *adapter = dev->priv;
1364 e->rx_max_pending = MAX_RX_BUFFERS;
1365 e->rx_mini_max_pending = 0;
1366 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1367 e->tx_max_pending = MAX_TXQ_ENTRIES;
1369 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1370 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1371 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1372 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1375 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1378 struct adapter *adapter = dev->priv;
1380 if (e->rx_pending > MAX_RX_BUFFERS ||
1381 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1382 e->tx_pending > MAX_TXQ_ENTRIES ||
1383 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1384 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1385 e->rx_pending < MIN_FL_ENTRIES ||
1386 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1387 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1390 if (adapter->flags & FULL_INIT_DONE)
1393 for (i = 0; i < SGE_QSETS; ++i) {
1394 struct qset_params *q = &adapter->params.sge.qset[i];
1396 q->rspq_size = e->rx_mini_pending;
1397 q->fl_size = e->rx_pending;
1398 q->jumbo_size = e->rx_jumbo_pending;
1399 q->txq_size[0] = e->tx_pending;
1400 q->txq_size[1] = e->tx_pending;
1401 q->txq_size[2] = e->tx_pending;
1406 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1408 struct adapter *adapter = dev->priv;
1409 struct qset_params *qsp = &adapter->params.sge.qset[0];
1410 struct sge_qset *qs = &adapter->sge.qs[0];
1412 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1415 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1416 t3_update_qset_coalesce(qs, qsp);
1420 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1422 struct adapter *adapter = dev->priv;
1423 struct qset_params *q = adapter->params.sge.qset;
1425 c->rx_coalesce_usecs = q->coalesce_usecs;
1429 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1433 struct adapter *adapter = dev->priv;
1435 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1439 e->magic = EEPROM_MAGIC;
1440 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1441 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1444 memcpy(data, buf + e->offset, e->len);
1449 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1454 u32 aligned_offset, aligned_len, *p;
1455 struct adapter *adapter = dev->priv;
1457 if (eeprom->magic != EEPROM_MAGIC)
1460 aligned_offset = eeprom->offset & ~3;
1461 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1463 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1464 buf = kmalloc(aligned_len, GFP_KERNEL);
1467 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1468 if (!err && aligned_len > 4)
1469 err = t3_seeprom_read(adapter,
1470 aligned_offset + aligned_len - 4,
1471 (u32 *) & buf[aligned_len - 4]);
1474 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1478 err = t3_seeprom_wp(adapter, 0);
1482 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1483 err = t3_seeprom_write(adapter, aligned_offset, *p);
1484 aligned_offset += 4;
1488 err = t3_seeprom_wp(adapter, 1);
1495 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1499 memset(&wol->sopass, 0, sizeof(wol->sopass));
1502 static const struct ethtool_ops cxgb_ethtool_ops = {
1503 .get_settings = get_settings,
1504 .set_settings = set_settings,
1505 .get_drvinfo = get_drvinfo,
1506 .get_msglevel = get_msglevel,
1507 .set_msglevel = set_msglevel,
1508 .get_ringparam = get_sge_param,
1509 .set_ringparam = set_sge_param,
1510 .get_coalesce = get_coalesce,
1511 .set_coalesce = set_coalesce,
1512 .get_eeprom_len = get_eeprom_len,
1513 .get_eeprom = get_eeprom,
1514 .set_eeprom = set_eeprom,
1515 .get_pauseparam = get_pauseparam,
1516 .set_pauseparam = set_pauseparam,
1517 .get_rx_csum = get_rx_csum,
1518 .set_rx_csum = set_rx_csum,
1519 .get_tx_csum = ethtool_op_get_tx_csum,
1520 .set_tx_csum = ethtool_op_set_tx_csum,
1521 .get_sg = ethtool_op_get_sg,
1522 .set_sg = ethtool_op_set_sg,
1523 .get_link = ethtool_op_get_link,
1524 .get_strings = get_strings,
1525 .phys_id = cxgb3_phys_id,
1526 .nway_reset = restart_autoneg,
1527 .get_stats_count = get_stats_count,
1528 .get_ethtool_stats = get_stats,
1529 .get_regs_len = get_regs_len,
1530 .get_regs = get_regs,
1532 .get_tso = ethtool_op_get_tso,
1533 .set_tso = ethtool_op_set_tso,
1534 .get_perm_addr = ethtool_op_get_perm_addr
1537 static int in_range(int val, int lo, int hi)
1539 return val < 0 || (val <= hi && val >= lo);
1542 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1546 struct adapter *adapter = dev->priv;
1548 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1552 case CHELSIO_SETREG:{
1553 struct ch_reg edata;
1555 if (!capable(CAP_NET_ADMIN))
1557 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1559 if ((edata.addr & 3) != 0
1560 || edata.addr >= adapter->mmio_len)
1562 writel(edata.val, adapter->regs + edata.addr);
1565 case CHELSIO_GETREG:{
1566 struct ch_reg edata;
1568 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1570 if ((edata.addr & 3) != 0
1571 || edata.addr >= adapter->mmio_len)
1573 edata.val = readl(adapter->regs + edata.addr);
1574 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1578 case CHELSIO_SET_QSET_PARAMS:{
1580 struct qset_params *q;
1581 struct ch_qset_params t;
1583 if (!capable(CAP_NET_ADMIN))
1585 if (copy_from_user(&t, useraddr, sizeof(t)))
1587 if (t.qset_idx >= SGE_QSETS)
1589 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1590 !in_range(t.cong_thres, 0, 255) ||
1591 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1593 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1595 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1596 MAX_CTRL_TXQ_ENTRIES) ||
1597 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1599 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1600 MAX_RX_JUMBO_BUFFERS)
1601 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1604 if ((adapter->flags & FULL_INIT_DONE) &&
1605 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1606 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1607 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1608 t.polling >= 0 || t.cong_thres >= 0))
1611 q = &adapter->params.sge.qset[t.qset_idx];
1613 if (t.rspq_size >= 0)
1614 q->rspq_size = t.rspq_size;
1615 if (t.fl_size[0] >= 0)
1616 q->fl_size = t.fl_size[0];
1617 if (t.fl_size[1] >= 0)
1618 q->jumbo_size = t.fl_size[1];
1619 if (t.txq_size[0] >= 0)
1620 q->txq_size[0] = t.txq_size[0];
1621 if (t.txq_size[1] >= 0)
1622 q->txq_size[1] = t.txq_size[1];
1623 if (t.txq_size[2] >= 0)
1624 q->txq_size[2] = t.txq_size[2];
1625 if (t.cong_thres >= 0)
1626 q->cong_thres = t.cong_thres;
1627 if (t.intr_lat >= 0) {
1628 struct sge_qset *qs =
1629 &adapter->sge.qs[t.qset_idx];
1631 q->coalesce_usecs = t.intr_lat;
1632 t3_update_qset_coalesce(qs, q);
1634 if (t.polling >= 0) {
1635 if (adapter->flags & USING_MSIX)
1636 q->polling = t.polling;
1638 /* No polling with INTx for T3A */
1639 if (adapter->params.rev == 0 &&
1640 !(adapter->flags & USING_MSI))
1643 for (i = 0; i < SGE_QSETS; i++) {
1644 q = &adapter->params.sge.
1646 q->polling = t.polling;
1652 case CHELSIO_GET_QSET_PARAMS:{
1653 struct qset_params *q;
1654 struct ch_qset_params t;
1656 if (copy_from_user(&t, useraddr, sizeof(t)))
1658 if (t.qset_idx >= SGE_QSETS)
1661 q = &adapter->params.sge.qset[t.qset_idx];
1662 t.rspq_size = q->rspq_size;
1663 t.txq_size[0] = q->txq_size[0];
1664 t.txq_size[1] = q->txq_size[1];
1665 t.txq_size[2] = q->txq_size[2];
1666 t.fl_size[0] = q->fl_size;
1667 t.fl_size[1] = q->jumbo_size;
1668 t.polling = q->polling;
1669 t.intr_lat = q->coalesce_usecs;
1670 t.cong_thres = q->cong_thres;
1672 if (copy_to_user(useraddr, &t, sizeof(t)))
1676 case CHELSIO_SET_QSET_NUM:{
1677 struct ch_reg edata;
1678 struct port_info *pi = netdev_priv(dev);
1679 unsigned int i, first_qset = 0, other_qsets = 0;
1681 if (!capable(CAP_NET_ADMIN))
1683 if (adapter->flags & FULL_INIT_DONE)
1685 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1687 if (edata.val < 1 ||
1688 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1691 for_each_port(adapter, i)
1692 if (adapter->port[i] && adapter->port[i] != dev)
1693 other_qsets += adap2pinfo(adapter, i)->nqsets;
1695 if (edata.val + other_qsets > SGE_QSETS)
1698 pi->nqsets = edata.val;
1700 for_each_port(adapter, i)
1701 if (adapter->port[i]) {
1702 pi = adap2pinfo(adapter, i);
1703 pi->first_qset = first_qset;
1704 first_qset += pi->nqsets;
1708 case CHELSIO_GET_QSET_NUM:{
1709 struct ch_reg edata;
1710 struct port_info *pi = netdev_priv(dev);
1712 edata.cmd = CHELSIO_GET_QSET_NUM;
1713 edata.val = pi->nqsets;
1714 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1718 case CHELSIO_LOAD_FW:{
1720 struct ch_mem_range t;
1722 if (!capable(CAP_NET_ADMIN))
1724 if (copy_from_user(&t, useraddr, sizeof(t)))
1727 fw_data = kmalloc(t.len, GFP_KERNEL);
1732 (fw_data, useraddr + sizeof(t), t.len)) {
1737 ret = t3_load_fw(adapter, fw_data, t.len);
1743 case CHELSIO_SETMTUTAB:{
1747 if (!is_offload(adapter))
1749 if (!capable(CAP_NET_ADMIN))
1751 if (offload_running(adapter))
1753 if (copy_from_user(&m, useraddr, sizeof(m)))
1755 if (m.nmtus != NMTUS)
1757 if (m.mtus[0] < 81) /* accommodate SACK */
1760 /* MTUs must be in ascending order */
1761 for (i = 1; i < NMTUS; ++i)
1762 if (m.mtus[i] < m.mtus[i - 1])
1765 memcpy(adapter->params.mtus, m.mtus,
1766 sizeof(adapter->params.mtus));
1769 case CHELSIO_GET_PM:{
1770 struct tp_params *p = &adapter->params.tp;
1771 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1773 if (!is_offload(adapter))
1775 m.tx_pg_sz = p->tx_pg_size;
1776 m.tx_num_pg = p->tx_num_pgs;
1777 m.rx_pg_sz = p->rx_pg_size;
1778 m.rx_num_pg = p->rx_num_pgs;
1779 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1780 if (copy_to_user(useraddr, &m, sizeof(m)))
1784 case CHELSIO_SET_PM:{
1786 struct tp_params *p = &adapter->params.tp;
1788 if (!is_offload(adapter))
1790 if (!capable(CAP_NET_ADMIN))
1792 if (adapter->flags & FULL_INIT_DONE)
1794 if (copy_from_user(&m, useraddr, sizeof(m)))
1796 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1797 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1798 return -EINVAL; /* not power of 2 */
1799 if (!(m.rx_pg_sz & 0x14000))
1800 return -EINVAL; /* not 16KB or 64KB */
1801 if (!(m.tx_pg_sz & 0x1554000))
1803 if (m.tx_num_pg == -1)
1804 m.tx_num_pg = p->tx_num_pgs;
1805 if (m.rx_num_pg == -1)
1806 m.rx_num_pg = p->rx_num_pgs;
1807 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1809 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1810 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1812 p->rx_pg_size = m.rx_pg_sz;
1813 p->tx_pg_size = m.tx_pg_sz;
1814 p->rx_num_pgs = m.rx_num_pg;
1815 p->tx_num_pgs = m.tx_num_pg;
1818 case CHELSIO_GET_MEM:{
1819 struct ch_mem_range t;
1823 if (!is_offload(adapter))
1825 if (!(adapter->flags & FULL_INIT_DONE))
1826 return -EIO; /* need the memory controllers */
1827 if (copy_from_user(&t, useraddr, sizeof(t)))
1829 if ((t.addr & 7) || (t.len & 7))
1831 if (t.mem_id == MEM_CM)
1833 else if (t.mem_id == MEM_PMRX)
1834 mem = &adapter->pmrx;
1835 else if (t.mem_id == MEM_PMTX)
1836 mem = &adapter->pmtx;
1842 * bits 0..9: chip version
1843 * bits 10..15: chip revision
1845 t.version = 3 | (adapter->params.rev << 10);
1846 if (copy_to_user(useraddr, &t, sizeof(t)))
1850 * Read 256 bytes at a time as len can be large and we don't
1851 * want to use huge intermediate buffers.
1853 useraddr += sizeof(t); /* advance to start of buffer */
1855 unsigned int chunk =
1856 min_t(unsigned int, t.len, sizeof(buf));
1859 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1863 if (copy_to_user(useraddr, buf, chunk))
1871 case CHELSIO_SET_TRACE_FILTER:{
1873 const struct trace_params *tp;
1875 if (!capable(CAP_NET_ADMIN))
1877 if (!offload_running(adapter))
1879 if (copy_from_user(&t, useraddr, sizeof(t)))
1882 tp = (const struct trace_params *)&t.sip;
1884 t3_config_trace_filter(adapter, tp, 0,
1888 t3_config_trace_filter(adapter, tp, 1,
1893 case CHELSIO_SET_PKTSCHED:{
1894 struct ch_pktsched_params p;
1896 if (!capable(CAP_NET_ADMIN))
1898 if (!adapter->open_device_map)
1899 return -EAGAIN; /* uP and SGE must be running */
1900 if (copy_from_user(&p, useraddr, sizeof(p)))
1902 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1913 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1916 struct adapter *adapter = dev->priv;
1917 struct port_info *pi = netdev_priv(dev);
1918 struct mii_ioctl_data *data = if_mii(req);
1922 data->phy_id = pi->phy.addr;
1926 struct cphy *phy = &pi->phy;
1928 if (!phy->mdio_read)
1930 if (is_10G(adapter)) {
1931 mmd = data->phy_id >> 8;
1934 else if (mmd > MDIO_DEV_XGXS)
1938 phy->mdio_read(adapter, data->phy_id & 0x1f,
1939 mmd, data->reg_num, &val);
1942 phy->mdio_read(adapter, data->phy_id & 0x1f,
1943 0, data->reg_num & 0x1f,
1946 data->val_out = val;
1950 struct cphy *phy = &pi->phy;
1952 if (!capable(CAP_NET_ADMIN))
1954 if (!phy->mdio_write)
1956 if (is_10G(adapter)) {
1957 mmd = data->phy_id >> 8;
1960 else if (mmd > MDIO_DEV_XGXS)
1964 phy->mdio_write(adapter,
1965 data->phy_id & 0x1f, mmd,
1970 phy->mdio_write(adapter,
1971 data->phy_id & 0x1f, 0,
1972 data->reg_num & 0x1f,
1977 return cxgb_extension_ioctl(dev, req->ifr_data);
1984 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1987 struct adapter *adapter = dev->priv;
1988 struct port_info *pi = netdev_priv(dev);
1990 if (new_mtu < 81) /* accommodate SACK */
1992 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1995 init_port_mtus(adapter);
1996 if (adapter->params.rev == 0 && offload_running(adapter))
1997 t3_load_mtus(adapter, adapter->params.mtus,
1998 adapter->params.a_wnd, adapter->params.b_wnd,
1999 adapter->port[0]->mtu);
2003 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2005 struct adapter *adapter = dev->priv;
2006 struct port_info *pi = netdev_priv(dev);
2007 struct sockaddr *addr = p;
2009 if (!is_valid_ether_addr(addr->sa_data))
2012 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2013 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2014 if (offload_running(adapter))
2015 write_smt_entry(adapter, pi->port_id);
2020 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2021 * @adap: the adapter
2024 * Ensures that current Rx processing on any of the queues associated with
2025 * the given port completes before returning. We do this by acquiring and
2026 * releasing the locks of the response queues associated with the port.
2028 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2032 for (i = 0; i < p->nqsets; i++) {
2033 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2035 spin_lock_irq(&q->lock);
2036 spin_unlock_irq(&q->lock);
2040 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2042 struct adapter *adapter = dev->priv;
2043 struct port_info *pi = netdev_priv(dev);
2046 if (adapter->params.rev > 0)
2047 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2049 /* single control for all ports */
2050 unsigned int i, have_vlans = 0;
2051 for_each_port(adapter, i)
2052 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2054 t3_set_vlan_accel(adapter, 1, have_vlans);
2056 t3_synchronize_rx(adapter, pi);
2059 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2064 #ifdef CONFIG_NET_POLL_CONTROLLER
2065 static void cxgb_netpoll(struct net_device *dev)
2067 struct adapter *adapter = dev->priv;
2068 struct sge_qset *qs = dev2qset(dev);
2070 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2076 * Periodic accumulation of MAC statistics.
2078 static void mac_stats_update(struct adapter *adapter)
2082 for_each_port(adapter, i) {
2083 struct net_device *dev = adapter->port[i];
2084 struct port_info *p = netdev_priv(dev);
2086 if (netif_running(dev)) {
2087 spin_lock(&adapter->stats_lock);
2088 t3_mac_update_stats(&p->mac);
2089 spin_unlock(&adapter->stats_lock);
2094 static void check_link_status(struct adapter *adapter)
2098 for_each_port(adapter, i) {
2099 struct net_device *dev = adapter->port[i];
2100 struct port_info *p = netdev_priv(dev);
2102 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2103 t3_link_changed(adapter, i);
2107 static void t3_adap_check_task(struct work_struct *work)
2109 struct adapter *adapter = container_of(work, struct adapter,
2110 adap_check_task.work);
2111 const struct adapter_params *p = &adapter->params;
2113 adapter->check_task_cnt++;
2115 /* Check link status for PHYs without interrupts */
2116 if (p->linkpoll_period)
2117 check_link_status(adapter);
2119 /* Accumulate MAC stats if needed */
2120 if (!p->linkpoll_period ||
2121 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2122 p->stats_update_period) {
2123 mac_stats_update(adapter);
2124 adapter->check_task_cnt = 0;
2127 /* Schedule the next check update if any port is active. */
2128 spin_lock(&adapter->work_lock);
2129 if (adapter->open_device_map & PORT_MASK)
2130 schedule_chk_task(adapter);
2131 spin_unlock(&adapter->work_lock);
2135 * Processes external (PHY) interrupts in process context.
2137 static void ext_intr_task(struct work_struct *work)
2139 struct adapter *adapter = container_of(work, struct adapter,
2140 ext_intr_handler_task);
2142 t3_phy_intr_handler(adapter);
2144 /* Now reenable external interrupts */
2145 spin_lock_irq(&adapter->work_lock);
2146 if (adapter->slow_intr_mask) {
2147 adapter->slow_intr_mask |= F_T3DBG;
2148 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2149 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2150 adapter->slow_intr_mask);
2152 spin_unlock_irq(&adapter->work_lock);
2156 * Interrupt-context handler for external (PHY) interrupts.
2158 void t3_os_ext_intr_handler(struct adapter *adapter)
2161 * Schedule a task to handle external interrupts as they may be slow
2162 * and we use a mutex to protect MDIO registers. We disable PHY
2163 * interrupts in the meantime and let the task reenable them when
2166 spin_lock(&adapter->work_lock);
2167 if (adapter->slow_intr_mask) {
2168 adapter->slow_intr_mask &= ~F_T3DBG;
2169 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2170 adapter->slow_intr_mask);
2171 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2173 spin_unlock(&adapter->work_lock);
2176 void t3_fatal_err(struct adapter *adapter)
2178 unsigned int fw_status[4];
2180 if (adapter->flags & FULL_INIT_DONE) {
2181 t3_sge_stop(adapter);
2182 t3_intr_disable(adapter);
2184 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2185 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2186 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2187 fw_status[0], fw_status[1],
2188 fw_status[2], fw_status[3]);
2192 static int __devinit cxgb_enable_msix(struct adapter *adap)
2194 struct msix_entry entries[SGE_QSETS + 1];
2197 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2198 entries[i].entry = i;
2200 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2202 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2203 adap->msix_info[i].vec = entries[i].vector;
2205 dev_info(&adap->pdev->dev,
2206 "only %d MSI-X vectors left, not using MSI-X\n", err);
2210 static void __devinit print_port_info(struct adapter *adap,
2211 const struct adapter_info *ai)
2213 static const char *pci_variant[] = {
2214 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2221 snprintf(buf, sizeof(buf), "%s x%d",
2222 pci_variant[adap->params.pci.variant],
2223 adap->params.pci.width);
2225 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2226 pci_variant[adap->params.pci.variant],
2227 adap->params.pci.speed, adap->params.pci.width);
2229 for_each_port(adap, i) {
2230 struct net_device *dev = adap->port[i];
2231 const struct port_info *pi = netdev_priv(dev);
2233 if (!test_bit(i, &adap->registered_device_map))
2235 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2236 dev->name, ai->desc, pi->port_type->desc,
2237 adap->params.rev, buf,
2238 (adap->flags & USING_MSIX) ? " MSI-X" :
2239 (adap->flags & USING_MSI) ? " MSI" : "");
2240 if (adap->name == dev->name && adap->params.vpd.mclk)
2241 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2242 adap->name, t3_mc7_size(&adap->cm) >> 20,
2243 t3_mc7_size(&adap->pmtx) >> 20,
2244 t3_mc7_size(&adap->pmrx) >> 20);
2248 static int __devinit init_one(struct pci_dev *pdev,
2249 const struct pci_device_id *ent)
2251 static int version_printed;
2253 int i, err, pci_using_dac = 0;
2254 unsigned long mmio_start, mmio_len;
2255 const struct adapter_info *ai;
2256 struct adapter *adapter = NULL;
2257 struct port_info *pi;
2259 if (!version_printed) {
2260 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2265 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2267 printk(KERN_ERR DRV_NAME
2268 ": cannot initialize work queue\n");
2273 err = pci_request_regions(pdev, DRV_NAME);
2275 /* Just info, some other driver may have claimed the device. */
2276 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2280 err = pci_enable_device(pdev);
2282 dev_err(&pdev->dev, "cannot enable PCI device\n");
2283 goto out_release_regions;
2286 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2288 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2290 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2291 "coherent allocations\n");
2292 goto out_disable_device;
2294 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2295 dev_err(&pdev->dev, "no usable DMA configuration\n");
2296 goto out_disable_device;
2299 pci_set_master(pdev);
2301 mmio_start = pci_resource_start(pdev, 0);
2302 mmio_len = pci_resource_len(pdev, 0);
2303 ai = t3_get_adapter_info(ent->driver_data);
2305 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2308 goto out_disable_device;
2311 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2312 if (!adapter->regs) {
2313 dev_err(&pdev->dev, "cannot map device registers\n");
2315 goto out_free_adapter;
2318 adapter->pdev = pdev;
2319 adapter->name = pci_name(pdev);
2320 adapter->msg_enable = dflt_msg_enable;
2321 adapter->mmio_len = mmio_len;
2323 mutex_init(&adapter->mdio_lock);
2324 spin_lock_init(&adapter->work_lock);
2325 spin_lock_init(&adapter->stats_lock);
2327 INIT_LIST_HEAD(&adapter->adapter_list);
2328 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2329 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2331 for (i = 0; i < ai->nports; ++i) {
2332 struct net_device *netdev;
2334 netdev = alloc_etherdev(sizeof(struct port_info));
2340 SET_MODULE_OWNER(netdev);
2341 SET_NETDEV_DEV(netdev, &pdev->dev);
2343 adapter->port[i] = netdev;
2344 pi = netdev_priv(netdev);
2345 pi->rx_csum_offload = 1;
2350 netif_carrier_off(netdev);
2351 netdev->irq = pdev->irq;
2352 netdev->mem_start = mmio_start;
2353 netdev->mem_end = mmio_start + mmio_len - 1;
2354 netdev->priv = adapter;
2355 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2356 netdev->features |= NETIF_F_LLTX;
2358 netdev->features |= NETIF_F_HIGHDMA;
2360 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2361 netdev->vlan_rx_register = vlan_rx_register;
2362 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2364 netdev->open = cxgb_open;
2365 netdev->stop = cxgb_close;
2366 netdev->hard_start_xmit = t3_eth_xmit;
2367 netdev->get_stats = cxgb_get_stats;
2368 netdev->set_multicast_list = cxgb_set_rxmode;
2369 netdev->do_ioctl = cxgb_ioctl;
2370 netdev->change_mtu = cxgb_change_mtu;
2371 netdev->set_mac_address = cxgb_set_mac_addr;
2372 #ifdef CONFIG_NET_POLL_CONTROLLER
2373 netdev->poll_controller = cxgb_netpoll;
2375 netdev->weight = 64;
2377 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2380 pci_set_drvdata(pdev, adapter->port[0]);
2381 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2387 * The card is now ready to go. If any errors occur during device
2388 * registration we do not fail the whole card but rather proceed only
2389 * with the ports we manage to register successfully. However we must
2390 * register at least one net device.
2392 for_each_port(adapter, i) {
2393 err = register_netdev(adapter->port[i]);
2395 dev_warn(&pdev->dev,
2396 "cannot register net device %s, skipping\n",
2397 adapter->port[i]->name);
2400 * Change the name we use for messages to the name of
2401 * the first successfully registered interface.
2403 if (!adapter->registered_device_map)
2404 adapter->name = adapter->port[i]->name;
2406 __set_bit(i, &adapter->registered_device_map);
2409 if (!adapter->registered_device_map) {
2410 dev_err(&pdev->dev, "could not register any net devices\n");
2414 /* Driver's ready. Reflect it on LEDs */
2415 t3_led_ready(adapter);
2417 if (is_offload(adapter)) {
2418 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2419 cxgb3_adapter_ofld(adapter);
2422 /* See what interrupts we'll be using */
2423 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2424 adapter->flags |= USING_MSIX;
2425 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2426 adapter->flags |= USING_MSI;
2428 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2431 print_port_info(adapter, ai);
2435 iounmap(adapter->regs);
2436 for (i = ai->nports - 1; i >= 0; --i)
2437 if (adapter->port[i])
2438 free_netdev(adapter->port[i]);
2444 pci_disable_device(pdev);
2445 out_release_regions:
2446 pci_release_regions(pdev);
2447 pci_set_drvdata(pdev, NULL);
2451 static void __devexit remove_one(struct pci_dev *pdev)
2453 struct net_device *dev = pci_get_drvdata(pdev);
2457 struct adapter *adapter = dev->priv;
2459 t3_sge_stop(adapter);
2460 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2463 for_each_port(adapter, i)
2464 if (test_bit(i, &adapter->registered_device_map))
2465 unregister_netdev(adapter->port[i]);
2467 if (is_offload(adapter)) {
2468 cxgb3_adapter_unofld(adapter);
2469 if (test_bit(OFFLOAD_DEVMAP_BIT,
2470 &adapter->open_device_map))
2471 offload_close(&adapter->tdev);
2474 t3_free_sge_resources(adapter);
2475 cxgb_disable_msi(adapter);
2477 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2478 if (adapter->dummy_netdev[i]) {
2479 free_netdev(adapter->dummy_netdev[i]);
2480 adapter->dummy_netdev[i] = NULL;
2483 for_each_port(adapter, i)
2484 if (adapter->port[i])
2485 free_netdev(adapter->port[i]);
2487 iounmap(adapter->regs);
2489 pci_release_regions(pdev);
2490 pci_disable_device(pdev);
2491 pci_set_drvdata(pdev, NULL);
2495 static struct pci_driver driver = {
2497 .id_table = cxgb3_pci_tbl,
2499 .remove = __devexit_p(remove_one),
2502 static int __init cxgb3_init_module(void)
2506 cxgb3_offload_init();
2508 ret = pci_register_driver(&driver);
2512 static void __exit cxgb3_cleanup_module(void)
2514 pci_unregister_driver(&driver);
2516 destroy_workqueue(cxgb3_wq);
2519 module_init(cxgb3_init_module);
2520 module_exit(cxgb3_cleanup_module);