2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <asm/uaccess.h>
48 #include "cxgb3_ioctl.h"
50 #include "cxgb3_offload.h"
53 #include "cxgb3_ctl_defs.h"
55 #include "firmware_exports.h"
58 MAX_TXQ_ENTRIES = 16384,
59 MAX_CTRL_TXQ_ENTRIES = 1024,
60 MAX_RSPQ_ENTRIES = 16384,
61 MAX_RX_BUFFERS = 16384,
62 MAX_RX_JUMBO_BUFFERS = 16384,
64 MIN_CTRL_TXQ_ENTRIES = 4,
65 MIN_RSPQ_ENTRIES = 32,
69 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
71 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
75 #define EEPROM_MAGIC 0x38E2F10C
77 #define CH_DEVICE(devid, ssid, idx) \
78 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
80 static const struct pci_device_id cxgb3_pci_tbl[] = {
81 CH_DEVICE(0x20, 1, 0), /* PE9000 */
82 CH_DEVICE(0x21, 1, 1), /* T302E */
83 CH_DEVICE(0x22, 1, 2), /* T310E */
84 CH_DEVICE(0x23, 1, 3), /* T320X */
85 CH_DEVICE(0x24, 1, 1), /* T302X */
86 CH_DEVICE(0x25, 1, 3), /* T320E */
87 CH_DEVICE(0x26, 1, 2), /* T310X */
88 CH_DEVICE(0x30, 1, 2), /* T3B10 */
89 CH_DEVICE(0x31, 1, 3), /* T3B20 */
90 CH_DEVICE(0x32, 1, 1), /* T3B02 */
94 MODULE_DESCRIPTION(DRV_DESC);
95 MODULE_AUTHOR("Chelsio Communications");
96 MODULE_LICENSE("Dual BSD/GPL");
97 MODULE_VERSION(DRV_VERSION);
98 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
100 static int dflt_msg_enable = DFLT_MSG_ENABLE;
102 module_param(dflt_msg_enable, int, 0644);
103 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106 * The driver uses the best interrupt scheme available on a platform in the
107 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
108 * of these schemes the driver may consider as follows:
110 * msi = 2: choose from among all three options
111 * msi = 1: only consider MSI and pin interrupts
112 * msi = 0: force pin interrupts
116 module_param(msi, int, 0644);
117 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120 * The driver enables offload as a default.
121 * To disable it, use ofld_disable = 1.
124 static int ofld_disable = 0;
126 module_param(ofld_disable, int, 0644);
127 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130 * We have work elements that we need to cancel when an interface is taken
131 * down. Normally the work elements would be executed by keventd but that
132 * can deadlock because of linkwatch. If our close method takes the rtnl
133 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
134 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
135 * for our work to complete. Get our own work queue to solve this.
137 static struct workqueue_struct *cxgb3_wq;
140 * link_report - show link status and link speed/duplex
141 * @p: the port whose settings are to be reported
143 * Shows the link status, speed, and duplex of a port.
145 static void link_report(struct net_device *dev)
147 if (!netif_carrier_ok(dev))
148 printk(KERN_INFO "%s: link down\n", dev->name);
150 const char *s = "10Mbps";
151 const struct port_info *p = netdev_priv(dev);
153 switch (p->link_config.speed) {
165 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
166 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
171 * t3_os_link_changed - handle link status changes
172 * @adapter: the adapter associated with the link change
173 * @port_id: the port index whose limk status has changed
174 * @link_stat: the new status of the link
175 * @speed: the new speed setting
176 * @duplex: the new duplex setting
177 * @pause: the new flow-control setting
179 * This is the OS-dependent handler for link status changes. The OS
180 * neutral handler takes care of most of the processing for these events,
181 * then calls this handler for any OS-specific processing.
183 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
184 int speed, int duplex, int pause)
186 struct net_device *dev = adapter->port[port_id];
188 /* Skip changes from disabled ports. */
189 if (!netif_running(dev))
192 if (link_stat != netif_carrier_ok(dev)) {
194 netif_carrier_on(dev);
196 netif_carrier_off(dev);
201 static void cxgb_set_rxmode(struct net_device *dev)
203 struct t3_rx_mode rm;
204 struct port_info *pi = netdev_priv(dev);
206 init_rx_mode(&rm, dev, dev->mc_list);
207 t3_mac_set_rx_mode(&pi->mac, &rm);
211 * link_start - enable a port
212 * @dev: the device to enable
214 * Performs the MAC and PHY actions needed to enable a port.
216 static void link_start(struct net_device *dev)
218 struct t3_rx_mode rm;
219 struct port_info *pi = netdev_priv(dev);
220 struct cmac *mac = &pi->mac;
222 init_rx_mode(&rm, dev, dev->mc_list);
224 t3_mac_set_mtu(mac, dev->mtu);
225 t3_mac_set_address(mac, 0, dev->dev_addr);
226 t3_mac_set_rx_mode(mac, &rm);
227 t3_link_start(&pi->phy, mac, &pi->link_config);
228 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
231 static inline void cxgb_disable_msi(struct adapter *adapter)
233 if (adapter->flags & USING_MSIX) {
234 pci_disable_msix(adapter->pdev);
235 adapter->flags &= ~USING_MSIX;
236 } else if (adapter->flags & USING_MSI) {
237 pci_disable_msi(adapter->pdev);
238 adapter->flags &= ~USING_MSI;
243 * Interrupt handler for asynchronous events used with MSI-X.
245 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
247 t3_slow_intr_handler(cookie);
252 * Name the MSI-X interrupts.
254 static void name_msix_vecs(struct adapter *adap)
256 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
258 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
259 adap->msix_info[0].desc[n] = 0;
261 for_each_port(adap, j) {
262 struct net_device *d = adap->port[j];
263 const struct port_info *pi = netdev_priv(d);
265 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
266 snprintf(adap->msix_info[msi_idx].desc, n,
267 "%s (queue %d)", d->name, i);
268 adap->msix_info[msi_idx].desc[n] = 0;
273 static int request_msix_data_irqs(struct adapter *adap)
275 int i, j, err, qidx = 0;
277 for_each_port(adap, i) {
278 int nqsets = adap2pinfo(adap, i)->nqsets;
280 for (j = 0; j < nqsets; ++j) {
281 err = request_irq(adap->msix_info[qidx + 1].vec,
282 t3_intr_handler(adap,
285 adap->msix_info[qidx + 1].desc,
286 &adap->sge.qs[qidx]);
289 free_irq(adap->msix_info[qidx + 1].vec,
290 &adap->sge.qs[qidx]);
300 * setup_rss - configure RSS
303 * Sets up RSS to distribute packets to multiple receive queues. We
304 * configure the RSS CPU lookup table to distribute to the number of HW
305 * receive queues, and the response queue lookup table to narrow that
306 * down to the response queues actually configured for each port.
307 * We always configure the RSS mapping for two ports since the mapping
308 * table has plenty of entries.
310 static void setup_rss(struct adapter *adap)
313 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
314 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
315 u8 cpus[SGE_QSETS + 1];
316 u16 rspq_map[RSS_TABLE_SIZE];
318 for (i = 0; i < SGE_QSETS; ++i)
320 cpus[SGE_QSETS] = 0xff; /* terminator */
322 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
323 rspq_map[i] = i % nq0;
324 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
327 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
328 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
329 V_RRCPLCPUSIZE(6), cpus, rspq_map);
333 * If we have multiple receive queues per port serviced by NAPI we need one
334 * netdevice per queue as NAPI operates on netdevices. We already have one
335 * netdevice, namely the one associated with the interface, so we use dummy
336 * ones for any additional queues. Note that these netdevices exist purely
337 * so that NAPI has something to work with, they do not represent network
338 * ports and are not registered.
340 static int init_dummy_netdevs(struct adapter *adap)
342 int i, j, dummy_idx = 0;
343 struct net_device *nd;
345 for_each_port(adap, i) {
346 struct net_device *dev = adap->port[i];
347 const struct port_info *pi = netdev_priv(dev);
349 for (j = 0; j < pi->nqsets - 1; j++) {
350 if (!adap->dummy_netdev[dummy_idx]) {
351 nd = alloc_netdev(0, "", ether_setup);
357 set_bit(__LINK_STATE_START, &nd->state);
358 adap->dummy_netdev[dummy_idx] = nd;
360 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
367 while (--dummy_idx >= 0) {
368 free_netdev(adap->dummy_netdev[dummy_idx]);
369 adap->dummy_netdev[dummy_idx] = NULL;
375 * Wait until all NAPI handlers are descheduled. This includes the handlers of
376 * both netdevices representing interfaces and the dummy ones for the extra
379 static void quiesce_rx(struct adapter *adap)
382 struct net_device *dev;
384 for_each_port(adap, i) {
386 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
390 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
391 dev = adap->dummy_netdev[i];
393 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
399 * setup_sge_qsets - configure SGE Tx/Rx/response queues
402 * Determines how many sets of SGE queues to use and initializes them.
403 * We support multiple queue sets per port if we have MSI-X, otherwise
404 * just one queue set per port.
406 static int setup_sge_qsets(struct adapter *adap)
408 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
409 unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
411 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
414 for_each_port(adap, i) {
415 struct net_device *dev = adap->port[i];
416 const struct port_info *pi = netdev_priv(dev);
418 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
419 err = t3_sge_alloc_qset(adap, qset_idx, 1,
420 (adap->flags & USING_MSIX) ? qset_idx + 1 :
422 &adap->params.sge.qset[qset_idx], ntxq,
424 adap-> dummy_netdev[dummy_dev_idx++]);
426 t3_free_sge_resources(adap);
435 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
437 ssize_t(*format) (struct net_device *, char *))
441 /* Synchronize with ioctls that may shut down the device */
443 len = (*format) (to_net_dev(d), buf);
448 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
449 const char *buf, size_t len,
450 ssize_t(*set) (struct net_device *, unsigned int),
451 unsigned int min_val, unsigned int max_val)
457 if (!capable(CAP_NET_ADMIN))
460 val = simple_strtoul(buf, &endp, 0);
461 if (endp == buf || val < min_val || val > max_val)
465 ret = (*set) (to_net_dev(d), val);
472 #define CXGB3_SHOW(name, val_expr) \
473 static ssize_t format_##name(struct net_device *dev, char *buf) \
475 struct adapter *adap = dev->priv; \
476 return sprintf(buf, "%u\n", val_expr); \
478 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
481 return attr_show(d, attr, buf, format_##name); \
484 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
486 struct adapter *adap = dev->priv;
488 if (adap->flags & FULL_INIT_DONE)
490 if (val && adap->params.rev == 0)
492 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
494 adap->params.mc5.nfilters = val;
498 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
499 const char *buf, size_t len)
501 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
504 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
506 struct adapter *adap = dev->priv;
508 if (adap->flags & FULL_INIT_DONE)
510 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
512 adap->params.mc5.nservers = val;
516 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
517 const char *buf, size_t len)
519 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
522 #define CXGB3_ATTR_R(name, val_expr) \
523 CXGB3_SHOW(name, val_expr) \
524 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
526 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
527 CXGB3_SHOW(name, val_expr) \
528 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
530 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
531 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
532 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
534 static struct attribute *cxgb3_attrs[] = {
535 &dev_attr_cam_size.attr,
536 &dev_attr_nfilters.attr,
537 &dev_attr_nservers.attr,
541 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
543 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
544 char *buf, int sched)
547 unsigned int v, addr, bpt, cpt;
548 struct adapter *adap = to_net_dev(d)->priv;
550 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
552 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
553 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
556 bpt = (v >> 8) & 0xff;
559 len = sprintf(buf, "disabled\n");
561 v = (adap->params.vpd.cclk * 1000) / cpt;
562 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
568 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
569 const char *buf, size_t len, int sched)
574 struct adapter *adap = to_net_dev(d)->priv;
576 if (!capable(CAP_NET_ADMIN))
579 val = simple_strtoul(buf, &endp, 0);
580 if (endp == buf || val > 10000000)
584 ret = t3_config_sched(adap, val, sched);
591 #define TM_ATTR(name, sched) \
592 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
595 return tm_attr_show(d, attr, buf, sched); \
597 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
598 const char *buf, size_t len) \
600 return tm_attr_store(d, attr, buf, len, sched); \
602 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
613 static struct attribute *offload_attrs[] = {
614 &dev_attr_sched0.attr,
615 &dev_attr_sched1.attr,
616 &dev_attr_sched2.attr,
617 &dev_attr_sched3.attr,
618 &dev_attr_sched4.attr,
619 &dev_attr_sched5.attr,
620 &dev_attr_sched6.attr,
621 &dev_attr_sched7.attr,
625 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
628 * Sends an sk_buff to an offload queue driver
629 * after dealing with any active network taps.
631 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
636 ret = t3_offload_tx(tdev, skb);
641 static int write_smt_entry(struct adapter *adapter, int idx)
643 struct cpl_smt_write_req *req;
644 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
649 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
650 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
651 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
652 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
654 memset(req->src_mac1, 0, sizeof(req->src_mac1));
655 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
657 offload_tx(&adapter->tdev, skb);
661 static int init_smt(struct adapter *adapter)
665 for_each_port(adapter, i)
666 write_smt_entry(adapter, i);
670 static void init_port_mtus(struct adapter *adapter)
672 unsigned int mtus = adapter->port[0]->mtu;
674 if (adapter->port[1])
675 mtus |= adapter->port[1]->mtu << 16;
676 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
679 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
683 struct mngt_pktsched_wr *req;
685 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
686 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
687 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
688 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
694 t3_mgmt_tx(adap, skb);
697 static void bind_qsets(struct adapter *adap)
701 for_each_port(adap, i) {
702 const struct port_info *pi = adap2pinfo(adap, i);
704 for (j = 0; j < pi->nqsets; ++j)
705 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
711 * cxgb_up - enable the adapter
712 * @adapter: adapter being enabled
714 * Called when the first port is enabled, this function performs the
715 * actions necessary to make an adapter operational, such as completing
716 * the initialization of HW modules, and enabling interrupts.
718 * Must be called with the rtnl lock held.
720 static int cxgb_up(struct adapter *adap)
724 if (!(adap->flags & FULL_INIT_DONE)) {
725 err = t3_check_fw_version(adap);
729 err = init_dummy_netdevs(adap);
733 err = t3_init_hw(adap, 0);
737 err = setup_sge_qsets(adap);
742 adap->flags |= FULL_INIT_DONE;
747 if (adap->flags & USING_MSIX) {
748 name_msix_vecs(adap);
749 err = request_irq(adap->msix_info[0].vec,
750 t3_async_intr_handler, 0,
751 adap->msix_info[0].desc, adap);
755 if (request_msix_data_irqs(adap)) {
756 free_irq(adap->msix_info[0].vec, adap);
759 } else if ((err = request_irq(adap->pdev->irq,
760 t3_intr_handler(adap,
761 adap->sge.qs[0].rspq.
763 (adap->flags & USING_MSI) ?
769 t3_intr_enable(adap);
771 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
773 adap->flags |= QUEUES_BOUND;
778 CH_ERR(adap, "request_irq failed, err %d\n", err);
783 * Release resources when all the ports and offloading have been stopped.
785 static void cxgb_down(struct adapter *adapter)
787 t3_sge_stop(adapter);
788 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
789 t3_intr_disable(adapter);
790 spin_unlock_irq(&adapter->work_lock);
792 if (adapter->flags & USING_MSIX) {
795 free_irq(adapter->msix_info[0].vec, adapter);
796 for_each_port(adapter, i)
797 n += adap2pinfo(adapter, i)->nqsets;
799 for (i = 0; i < n; ++i)
800 free_irq(adapter->msix_info[i + 1].vec,
801 &adapter->sge.qs[i]);
803 free_irq(adapter->pdev->irq, adapter);
805 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
809 static void schedule_chk_task(struct adapter *adap)
813 timeo = adap->params.linkpoll_period ?
814 (HZ * adap->params.linkpoll_period) / 10 :
815 adap->params.stats_update_period * HZ;
817 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
820 static int offload_open(struct net_device *dev)
822 struct adapter *adapter = dev->priv;
823 struct t3cdev *tdev = T3CDEV(dev);
824 int adap_up = adapter->open_device_map & PORT_MASK;
827 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
830 if (!adap_up && (err = cxgb_up(adapter)) < 0)
833 t3_tp_set_offload_mode(adapter, 1);
834 tdev->lldev = adapter->port[0];
835 err = cxgb3_offload_activate(adapter);
839 init_port_mtus(adapter);
840 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
841 adapter->params.b_wnd,
842 adapter->params.rev == 0 ?
843 adapter->port[0]->mtu : 0xffff);
846 /* Never mind if the next step fails */
847 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
849 /* Call back all registered clients */
850 cxgb3_add_clients(tdev);
853 /* restore them in case the offload module has changed them */
855 t3_tp_set_offload_mode(adapter, 0);
856 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
857 cxgb3_set_dummy_ops(tdev);
862 static int offload_close(struct t3cdev *tdev)
864 struct adapter *adapter = tdev2adap(tdev);
866 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
869 /* Call back all registered clients */
870 cxgb3_remove_clients(tdev);
872 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
875 cxgb3_set_dummy_ops(tdev);
876 t3_tp_set_offload_mode(adapter, 0);
877 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
879 if (!adapter->open_device_map)
882 cxgb3_offload_deactivate(adapter);
886 static int cxgb_open(struct net_device *dev)
889 struct adapter *adapter = dev->priv;
890 struct port_info *pi = netdev_priv(dev);
891 int other_ports = adapter->open_device_map & PORT_MASK;
893 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
896 set_bit(pi->port_id, &adapter->open_device_map);
898 err = offload_open(dev);
901 "Could not initialize offload capabilities\n");
905 t3_port_intr_enable(adapter, pi->port_id);
906 netif_start_queue(dev);
908 schedule_chk_task(adapter);
913 static int cxgb_close(struct net_device *dev)
915 struct adapter *adapter = dev->priv;
916 struct port_info *p = netdev_priv(dev);
918 t3_port_intr_disable(adapter, p->port_id);
919 netif_stop_queue(dev);
920 p->phy.ops->power_down(&p->phy, 1);
921 netif_carrier_off(dev);
922 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
924 spin_lock(&adapter->work_lock); /* sync with update task */
925 clear_bit(p->port_id, &adapter->open_device_map);
926 spin_unlock(&adapter->work_lock);
928 if (!(adapter->open_device_map & PORT_MASK))
929 cancel_rearming_delayed_workqueue(cxgb3_wq,
930 &adapter->adap_check_task);
932 if (!adapter->open_device_map)
938 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
940 struct adapter *adapter = dev->priv;
941 struct port_info *p = netdev_priv(dev);
942 struct net_device_stats *ns = &p->netstats;
943 const struct mac_stats *pstats;
945 spin_lock(&adapter->stats_lock);
946 pstats = t3_mac_update_stats(&p->mac);
947 spin_unlock(&adapter->stats_lock);
949 ns->tx_bytes = pstats->tx_octets;
950 ns->tx_packets = pstats->tx_frames;
951 ns->rx_bytes = pstats->rx_octets;
952 ns->rx_packets = pstats->rx_frames;
953 ns->multicast = pstats->rx_mcast_frames;
955 ns->tx_errors = pstats->tx_underrun;
956 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
957 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
958 pstats->rx_fifo_ovfl;
960 /* detailed rx_errors */
961 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
962 ns->rx_over_errors = 0;
963 ns->rx_crc_errors = pstats->rx_fcs_errs;
964 ns->rx_frame_errors = pstats->rx_symbol_errs;
965 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
966 ns->rx_missed_errors = pstats->rx_cong_drops;
968 /* detailed tx_errors */
969 ns->tx_aborted_errors = 0;
970 ns->tx_carrier_errors = 0;
971 ns->tx_fifo_errors = pstats->tx_underrun;
972 ns->tx_heartbeat_errors = 0;
973 ns->tx_window_errors = 0;
977 static u32 get_msglevel(struct net_device *dev)
979 struct adapter *adapter = dev->priv;
981 return adapter->msg_enable;
984 static void set_msglevel(struct net_device *dev, u32 val)
986 struct adapter *adapter = dev->priv;
988 adapter->msg_enable = val;
991 static char stats_strings[][ETH_GSTRING_LEN] = {
994 "TxMulticastFramesOK",
995 "TxBroadcastFramesOK",
1002 "TxFrames128To255 ",
1003 "TxFrames256To511 ",
1004 "TxFrames512To1023 ",
1005 "TxFrames1024To1518 ",
1006 "TxFrames1519ToMax ",
1010 "RxMulticastFramesOK",
1011 "RxBroadcastFramesOK",
1022 "RxFrames128To255 ",
1023 "RxFrames256To511 ",
1024 "RxFrames512To1023 ",
1025 "RxFrames1024To1518 ",
1026 "RxFrames1519ToMax ",
1037 static int get_stats_count(struct net_device *dev)
1039 return ARRAY_SIZE(stats_strings);
1042 #define T3_REGMAP_SIZE (3 * 1024)
1044 static int get_regs_len(struct net_device *dev)
1046 return T3_REGMAP_SIZE;
1049 static int get_eeprom_len(struct net_device *dev)
1054 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1057 struct adapter *adapter = dev->priv;
1059 t3_get_fw_version(adapter, &fw_vers);
1061 strcpy(info->driver, DRV_NAME);
1062 strcpy(info->version, DRV_VERSION);
1063 strcpy(info->bus_info, pci_name(adapter->pdev));
1065 strcpy(info->fw_version, "N/A");
1067 snprintf(info->fw_version, sizeof(info->fw_version),
1069 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1070 G_FW_VERSION_MAJOR(fw_vers),
1071 G_FW_VERSION_MINOR(fw_vers),
1072 G_FW_VERSION_MICRO(fw_vers));
1076 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1078 if (stringset == ETH_SS_STATS)
1079 memcpy(data, stats_strings, sizeof(stats_strings));
1082 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1083 struct port_info *p, int idx)
1086 unsigned long tot = 0;
1088 for (i = 0; i < p->nqsets; ++i)
1089 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1093 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1096 struct adapter *adapter = dev->priv;
1097 struct port_info *pi = netdev_priv(dev);
1098 const struct mac_stats *s;
1100 spin_lock(&adapter->stats_lock);
1101 s = t3_mac_update_stats(&pi->mac);
1102 spin_unlock(&adapter->stats_lock);
1104 *data++ = s->tx_octets;
1105 *data++ = s->tx_frames;
1106 *data++ = s->tx_mcast_frames;
1107 *data++ = s->tx_bcast_frames;
1108 *data++ = s->tx_pause;
1109 *data++ = s->tx_underrun;
1110 *data++ = s->tx_fifo_urun;
1112 *data++ = s->tx_frames_64;
1113 *data++ = s->tx_frames_65_127;
1114 *data++ = s->tx_frames_128_255;
1115 *data++ = s->tx_frames_256_511;
1116 *data++ = s->tx_frames_512_1023;
1117 *data++ = s->tx_frames_1024_1518;
1118 *data++ = s->tx_frames_1519_max;
1120 *data++ = s->rx_octets;
1121 *data++ = s->rx_frames;
1122 *data++ = s->rx_mcast_frames;
1123 *data++ = s->rx_bcast_frames;
1124 *data++ = s->rx_pause;
1125 *data++ = s->rx_fcs_errs;
1126 *data++ = s->rx_symbol_errs;
1127 *data++ = s->rx_short;
1128 *data++ = s->rx_jabber;
1129 *data++ = s->rx_too_long;
1130 *data++ = s->rx_fifo_ovfl;
1132 *data++ = s->rx_frames_64;
1133 *data++ = s->rx_frames_65_127;
1134 *data++ = s->rx_frames_128_255;
1135 *data++ = s->rx_frames_256_511;
1136 *data++ = s->rx_frames_512_1023;
1137 *data++ = s->rx_frames_1024_1518;
1138 *data++ = s->rx_frames_1519_max;
1140 *data++ = pi->phy.fifo_errors;
1142 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1143 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1144 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1145 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1146 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1147 *data++ = s->rx_cong_drops;
1150 static inline void reg_block_dump(struct adapter *ap, void *buf,
1151 unsigned int start, unsigned int end)
1153 u32 *p = buf + start;
1155 for (; start <= end; start += sizeof(u32))
1156 *p++ = t3_read_reg(ap, start);
1159 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1162 struct adapter *ap = dev->priv;
1166 * bits 0..9: chip version
1167 * bits 10..15: chip revision
1168 * bit 31: set for PCIe cards
1170 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1173 * We skip the MAC statistics registers because they are clear-on-read.
1174 * Also reading multi-register stats would need to synchronize with the
1175 * periodic mac stats accumulation. Hard to justify the complexity.
1177 memset(buf, 0, T3_REGMAP_SIZE);
1178 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1179 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1180 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1181 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1182 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1183 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1184 XGM_REG(A_XGM_SERDES_STAT3, 1));
1185 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1186 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1189 static int restart_autoneg(struct net_device *dev)
1191 struct port_info *p = netdev_priv(dev);
1193 if (!netif_running(dev))
1195 if (p->link_config.autoneg != AUTONEG_ENABLE)
1197 p->phy.ops->autoneg_restart(&p->phy);
1201 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1204 struct adapter *adapter = dev->priv;
1209 for (i = 0; i < data * 2; i++) {
1210 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1211 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1212 if (msleep_interruptible(500))
1215 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1220 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1222 struct port_info *p = netdev_priv(dev);
1224 cmd->supported = p->link_config.supported;
1225 cmd->advertising = p->link_config.advertising;
1227 if (netif_carrier_ok(dev)) {
1228 cmd->speed = p->link_config.speed;
1229 cmd->duplex = p->link_config.duplex;
1235 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1236 cmd->phy_address = p->phy.addr;
1237 cmd->transceiver = XCVR_EXTERNAL;
1238 cmd->autoneg = p->link_config.autoneg;
1244 static int speed_duplex_to_caps(int speed, int duplex)
1250 if (duplex == DUPLEX_FULL)
1251 cap = SUPPORTED_10baseT_Full;
1253 cap = SUPPORTED_10baseT_Half;
1256 if (duplex == DUPLEX_FULL)
1257 cap = SUPPORTED_100baseT_Full;
1259 cap = SUPPORTED_100baseT_Half;
1262 if (duplex == DUPLEX_FULL)
1263 cap = SUPPORTED_1000baseT_Full;
1265 cap = SUPPORTED_1000baseT_Half;
1268 if (duplex == DUPLEX_FULL)
1269 cap = SUPPORTED_10000baseT_Full;
1274 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1275 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1276 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1277 ADVERTISED_10000baseT_Full)
1279 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1281 struct port_info *p = netdev_priv(dev);
1282 struct link_config *lc = &p->link_config;
1284 if (!(lc->supported & SUPPORTED_Autoneg))
1285 return -EOPNOTSUPP; /* can't change speed/duplex */
1287 if (cmd->autoneg == AUTONEG_DISABLE) {
1288 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1290 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1292 lc->requested_speed = cmd->speed;
1293 lc->requested_duplex = cmd->duplex;
1294 lc->advertising = 0;
1296 cmd->advertising &= ADVERTISED_MASK;
1297 cmd->advertising &= lc->supported;
1298 if (!cmd->advertising)
1300 lc->requested_speed = SPEED_INVALID;
1301 lc->requested_duplex = DUPLEX_INVALID;
1302 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1304 lc->autoneg = cmd->autoneg;
1305 if (netif_running(dev))
1306 t3_link_start(&p->phy, &p->mac, lc);
1310 static void get_pauseparam(struct net_device *dev,
1311 struct ethtool_pauseparam *epause)
1313 struct port_info *p = netdev_priv(dev);
1315 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1316 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1317 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1320 static int set_pauseparam(struct net_device *dev,
1321 struct ethtool_pauseparam *epause)
1323 struct port_info *p = netdev_priv(dev);
1324 struct link_config *lc = &p->link_config;
1326 if (epause->autoneg == AUTONEG_DISABLE)
1327 lc->requested_fc = 0;
1328 else if (lc->supported & SUPPORTED_Autoneg)
1329 lc->requested_fc = PAUSE_AUTONEG;
1333 if (epause->rx_pause)
1334 lc->requested_fc |= PAUSE_RX;
1335 if (epause->tx_pause)
1336 lc->requested_fc |= PAUSE_TX;
1337 if (lc->autoneg == AUTONEG_ENABLE) {
1338 if (netif_running(dev))
1339 t3_link_start(&p->phy, &p->mac, lc);
1341 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1342 if (netif_running(dev))
1343 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1348 static u32 get_rx_csum(struct net_device *dev)
1350 struct port_info *p = netdev_priv(dev);
1352 return p->rx_csum_offload;
1355 static int set_rx_csum(struct net_device *dev, u32 data)
1357 struct port_info *p = netdev_priv(dev);
1359 p->rx_csum_offload = data;
1363 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1365 struct adapter *adapter = dev->priv;
1367 e->rx_max_pending = MAX_RX_BUFFERS;
1368 e->rx_mini_max_pending = 0;
1369 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1370 e->tx_max_pending = MAX_TXQ_ENTRIES;
1372 e->rx_pending = adapter->params.sge.qset[0].fl_size;
1373 e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1374 e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1375 e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1378 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1381 struct adapter *adapter = dev->priv;
1383 if (e->rx_pending > MAX_RX_BUFFERS ||
1384 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1385 e->tx_pending > MAX_TXQ_ENTRIES ||
1386 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1387 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1388 e->rx_pending < MIN_FL_ENTRIES ||
1389 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1390 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1393 if (adapter->flags & FULL_INIT_DONE)
1396 for (i = 0; i < SGE_QSETS; ++i) {
1397 struct qset_params *q = &adapter->params.sge.qset[i];
1399 q->rspq_size = e->rx_mini_pending;
1400 q->fl_size = e->rx_pending;
1401 q->jumbo_size = e->rx_jumbo_pending;
1402 q->txq_size[0] = e->tx_pending;
1403 q->txq_size[1] = e->tx_pending;
1404 q->txq_size[2] = e->tx_pending;
1409 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1411 struct adapter *adapter = dev->priv;
1412 struct qset_params *qsp = &adapter->params.sge.qset[0];
1413 struct sge_qset *qs = &adapter->sge.qs[0];
1415 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1418 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1419 t3_update_qset_coalesce(qs, qsp);
1423 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1425 struct adapter *adapter = dev->priv;
1426 struct qset_params *q = adapter->params.sge.qset;
1428 c->rx_coalesce_usecs = q->coalesce_usecs;
1432 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1436 struct adapter *adapter = dev->priv;
1438 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1442 e->magic = EEPROM_MAGIC;
1443 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1444 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1447 memcpy(data, buf + e->offset, e->len);
1452 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1457 u32 aligned_offset, aligned_len, *p;
1458 struct adapter *adapter = dev->priv;
1460 if (eeprom->magic != EEPROM_MAGIC)
1463 aligned_offset = eeprom->offset & ~3;
1464 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1466 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1467 buf = kmalloc(aligned_len, GFP_KERNEL);
1470 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1471 if (!err && aligned_len > 4)
1472 err = t3_seeprom_read(adapter,
1473 aligned_offset + aligned_len - 4,
1474 (u32 *) & buf[aligned_len - 4]);
1477 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1481 err = t3_seeprom_wp(adapter, 0);
1485 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1486 err = t3_seeprom_write(adapter, aligned_offset, *p);
1487 aligned_offset += 4;
1491 err = t3_seeprom_wp(adapter, 1);
1498 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1502 memset(&wol->sopass, 0, sizeof(wol->sopass));
1505 static const struct ethtool_ops cxgb_ethtool_ops = {
1506 .get_settings = get_settings,
1507 .set_settings = set_settings,
1508 .get_drvinfo = get_drvinfo,
1509 .get_msglevel = get_msglevel,
1510 .set_msglevel = set_msglevel,
1511 .get_ringparam = get_sge_param,
1512 .set_ringparam = set_sge_param,
1513 .get_coalesce = get_coalesce,
1514 .set_coalesce = set_coalesce,
1515 .get_eeprom_len = get_eeprom_len,
1516 .get_eeprom = get_eeprom,
1517 .set_eeprom = set_eeprom,
1518 .get_pauseparam = get_pauseparam,
1519 .set_pauseparam = set_pauseparam,
1520 .get_rx_csum = get_rx_csum,
1521 .set_rx_csum = set_rx_csum,
1522 .get_tx_csum = ethtool_op_get_tx_csum,
1523 .set_tx_csum = ethtool_op_set_tx_csum,
1524 .get_sg = ethtool_op_get_sg,
1525 .set_sg = ethtool_op_set_sg,
1526 .get_link = ethtool_op_get_link,
1527 .get_strings = get_strings,
1528 .phys_id = cxgb3_phys_id,
1529 .nway_reset = restart_autoneg,
1530 .get_stats_count = get_stats_count,
1531 .get_ethtool_stats = get_stats,
1532 .get_regs_len = get_regs_len,
1533 .get_regs = get_regs,
1535 .get_tso = ethtool_op_get_tso,
1536 .set_tso = ethtool_op_set_tso,
1537 .get_perm_addr = ethtool_op_get_perm_addr
1540 static int in_range(int val, int lo, int hi)
1542 return val < 0 || (val <= hi && val >= lo);
1545 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1549 struct adapter *adapter = dev->priv;
1551 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1555 case CHELSIO_SET_QSET_PARAMS:{
1557 struct qset_params *q;
1558 struct ch_qset_params t;
1560 if (!capable(CAP_NET_ADMIN))
1562 if (copy_from_user(&t, useraddr, sizeof(t)))
1564 if (t.qset_idx >= SGE_QSETS)
1566 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1567 !in_range(t.cong_thres, 0, 255) ||
1568 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1570 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1572 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1573 MAX_CTRL_TXQ_ENTRIES) ||
1574 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1576 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1577 MAX_RX_JUMBO_BUFFERS)
1578 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1581 if ((adapter->flags & FULL_INIT_DONE) &&
1582 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1583 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1584 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1585 t.polling >= 0 || t.cong_thres >= 0))
1588 q = &adapter->params.sge.qset[t.qset_idx];
1590 if (t.rspq_size >= 0)
1591 q->rspq_size = t.rspq_size;
1592 if (t.fl_size[0] >= 0)
1593 q->fl_size = t.fl_size[0];
1594 if (t.fl_size[1] >= 0)
1595 q->jumbo_size = t.fl_size[1];
1596 if (t.txq_size[0] >= 0)
1597 q->txq_size[0] = t.txq_size[0];
1598 if (t.txq_size[1] >= 0)
1599 q->txq_size[1] = t.txq_size[1];
1600 if (t.txq_size[2] >= 0)
1601 q->txq_size[2] = t.txq_size[2];
1602 if (t.cong_thres >= 0)
1603 q->cong_thres = t.cong_thres;
1604 if (t.intr_lat >= 0) {
1605 struct sge_qset *qs =
1606 &adapter->sge.qs[t.qset_idx];
1608 q->coalesce_usecs = t.intr_lat;
1609 t3_update_qset_coalesce(qs, q);
1611 if (t.polling >= 0) {
1612 if (adapter->flags & USING_MSIX)
1613 q->polling = t.polling;
1615 /* No polling with INTx for T3A */
1616 if (adapter->params.rev == 0 &&
1617 !(adapter->flags & USING_MSI))
1620 for (i = 0; i < SGE_QSETS; i++) {
1621 q = &adapter->params.sge.
1623 q->polling = t.polling;
1629 case CHELSIO_GET_QSET_PARAMS:{
1630 struct qset_params *q;
1631 struct ch_qset_params t;
1633 if (copy_from_user(&t, useraddr, sizeof(t)))
1635 if (t.qset_idx >= SGE_QSETS)
1638 q = &adapter->params.sge.qset[t.qset_idx];
1639 t.rspq_size = q->rspq_size;
1640 t.txq_size[0] = q->txq_size[0];
1641 t.txq_size[1] = q->txq_size[1];
1642 t.txq_size[2] = q->txq_size[2];
1643 t.fl_size[0] = q->fl_size;
1644 t.fl_size[1] = q->jumbo_size;
1645 t.polling = q->polling;
1646 t.intr_lat = q->coalesce_usecs;
1647 t.cong_thres = q->cong_thres;
1649 if (copy_to_user(useraddr, &t, sizeof(t)))
1653 case CHELSIO_SET_QSET_NUM:{
1654 struct ch_reg edata;
1655 struct port_info *pi = netdev_priv(dev);
1656 unsigned int i, first_qset = 0, other_qsets = 0;
1658 if (!capable(CAP_NET_ADMIN))
1660 if (adapter->flags & FULL_INIT_DONE)
1662 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1664 if (edata.val < 1 ||
1665 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1668 for_each_port(adapter, i)
1669 if (adapter->port[i] && adapter->port[i] != dev)
1670 other_qsets += adap2pinfo(adapter, i)->nqsets;
1672 if (edata.val + other_qsets > SGE_QSETS)
1675 pi->nqsets = edata.val;
1677 for_each_port(adapter, i)
1678 if (adapter->port[i]) {
1679 pi = adap2pinfo(adapter, i);
1680 pi->first_qset = first_qset;
1681 first_qset += pi->nqsets;
1685 case CHELSIO_GET_QSET_NUM:{
1686 struct ch_reg edata;
1687 struct port_info *pi = netdev_priv(dev);
1689 edata.cmd = CHELSIO_GET_QSET_NUM;
1690 edata.val = pi->nqsets;
1691 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1695 case CHELSIO_LOAD_FW:{
1697 struct ch_mem_range t;
1699 if (!capable(CAP_NET_ADMIN))
1701 if (copy_from_user(&t, useraddr, sizeof(t)))
1704 fw_data = kmalloc(t.len, GFP_KERNEL);
1709 (fw_data, useraddr + sizeof(t), t.len)) {
1714 ret = t3_load_fw(adapter, fw_data, t.len);
1720 case CHELSIO_SETMTUTAB:{
1724 if (!is_offload(adapter))
1726 if (!capable(CAP_NET_ADMIN))
1728 if (offload_running(adapter))
1730 if (copy_from_user(&m, useraddr, sizeof(m)))
1732 if (m.nmtus != NMTUS)
1734 if (m.mtus[0] < 81) /* accommodate SACK */
1737 /* MTUs must be in ascending order */
1738 for (i = 1; i < NMTUS; ++i)
1739 if (m.mtus[i] < m.mtus[i - 1])
1742 memcpy(adapter->params.mtus, m.mtus,
1743 sizeof(adapter->params.mtus));
1746 case CHELSIO_GET_PM:{
1747 struct tp_params *p = &adapter->params.tp;
1748 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1750 if (!is_offload(adapter))
1752 m.tx_pg_sz = p->tx_pg_size;
1753 m.tx_num_pg = p->tx_num_pgs;
1754 m.rx_pg_sz = p->rx_pg_size;
1755 m.rx_num_pg = p->rx_num_pgs;
1756 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1757 if (copy_to_user(useraddr, &m, sizeof(m)))
1761 case CHELSIO_SET_PM:{
1763 struct tp_params *p = &adapter->params.tp;
1765 if (!is_offload(adapter))
1767 if (!capable(CAP_NET_ADMIN))
1769 if (adapter->flags & FULL_INIT_DONE)
1771 if (copy_from_user(&m, useraddr, sizeof(m)))
1773 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1774 !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1775 return -EINVAL; /* not power of 2 */
1776 if (!(m.rx_pg_sz & 0x14000))
1777 return -EINVAL; /* not 16KB or 64KB */
1778 if (!(m.tx_pg_sz & 0x1554000))
1780 if (m.tx_num_pg == -1)
1781 m.tx_num_pg = p->tx_num_pgs;
1782 if (m.rx_num_pg == -1)
1783 m.rx_num_pg = p->rx_num_pgs;
1784 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1786 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1787 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1789 p->rx_pg_size = m.rx_pg_sz;
1790 p->tx_pg_size = m.tx_pg_sz;
1791 p->rx_num_pgs = m.rx_num_pg;
1792 p->tx_num_pgs = m.tx_num_pg;
1795 case CHELSIO_GET_MEM:{
1796 struct ch_mem_range t;
1800 if (!is_offload(adapter))
1802 if (!(adapter->flags & FULL_INIT_DONE))
1803 return -EIO; /* need the memory controllers */
1804 if (copy_from_user(&t, useraddr, sizeof(t)))
1806 if ((t.addr & 7) || (t.len & 7))
1808 if (t.mem_id == MEM_CM)
1810 else if (t.mem_id == MEM_PMRX)
1811 mem = &adapter->pmrx;
1812 else if (t.mem_id == MEM_PMTX)
1813 mem = &adapter->pmtx;
1819 * bits 0..9: chip version
1820 * bits 10..15: chip revision
1822 t.version = 3 | (adapter->params.rev << 10);
1823 if (copy_to_user(useraddr, &t, sizeof(t)))
1827 * Read 256 bytes at a time as len can be large and we don't
1828 * want to use huge intermediate buffers.
1830 useraddr += sizeof(t); /* advance to start of buffer */
1832 unsigned int chunk =
1833 min_t(unsigned int, t.len, sizeof(buf));
1836 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1840 if (copy_to_user(useraddr, buf, chunk))
1848 case CHELSIO_SET_TRACE_FILTER:{
1850 const struct trace_params *tp;
1852 if (!capable(CAP_NET_ADMIN))
1854 if (!offload_running(adapter))
1856 if (copy_from_user(&t, useraddr, sizeof(t)))
1859 tp = (const struct trace_params *)&t.sip;
1861 t3_config_trace_filter(adapter, tp, 0,
1865 t3_config_trace_filter(adapter, tp, 1,
1876 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1879 struct adapter *adapter = dev->priv;
1880 struct port_info *pi = netdev_priv(dev);
1881 struct mii_ioctl_data *data = if_mii(req);
1885 data->phy_id = pi->phy.addr;
1889 struct cphy *phy = &pi->phy;
1891 if (!phy->mdio_read)
1893 if (is_10G(adapter)) {
1894 mmd = data->phy_id >> 8;
1897 else if (mmd > MDIO_DEV_XGXS)
1901 phy->mdio_read(adapter, data->phy_id & 0x1f,
1902 mmd, data->reg_num, &val);
1905 phy->mdio_read(adapter, data->phy_id & 0x1f,
1906 0, data->reg_num & 0x1f,
1909 data->val_out = val;
1913 struct cphy *phy = &pi->phy;
1915 if (!capable(CAP_NET_ADMIN))
1917 if (!phy->mdio_write)
1919 if (is_10G(adapter)) {
1920 mmd = data->phy_id >> 8;
1923 else if (mmd > MDIO_DEV_XGXS)
1927 phy->mdio_write(adapter,
1928 data->phy_id & 0x1f, mmd,
1933 phy->mdio_write(adapter,
1934 data->phy_id & 0x1f, 0,
1935 data->reg_num & 0x1f,
1940 return cxgb_extension_ioctl(dev, req->ifr_data);
1947 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1950 struct adapter *adapter = dev->priv;
1951 struct port_info *pi = netdev_priv(dev);
1953 if (new_mtu < 81) /* accommodate SACK */
1955 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1958 init_port_mtus(adapter);
1959 if (adapter->params.rev == 0 && offload_running(adapter))
1960 t3_load_mtus(adapter, adapter->params.mtus,
1961 adapter->params.a_wnd, adapter->params.b_wnd,
1962 adapter->port[0]->mtu);
1966 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1968 struct adapter *adapter = dev->priv;
1969 struct port_info *pi = netdev_priv(dev);
1970 struct sockaddr *addr = p;
1972 if (!is_valid_ether_addr(addr->sa_data))
1975 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1976 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
1977 if (offload_running(adapter))
1978 write_smt_entry(adapter, pi->port_id);
1983 * t3_synchronize_rx - wait for current Rx processing on a port to complete
1984 * @adap: the adapter
1987 * Ensures that current Rx processing on any of the queues associated with
1988 * the given port completes before returning. We do this by acquiring and
1989 * releasing the locks of the response queues associated with the port.
1991 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1995 for (i = 0; i < p->nqsets; i++) {
1996 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
1998 spin_lock_irq(&q->lock);
1999 spin_unlock_irq(&q->lock);
2003 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2005 struct adapter *adapter = dev->priv;
2006 struct port_info *pi = netdev_priv(dev);
2009 if (adapter->params.rev > 0)
2010 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2012 /* single control for all ports */
2013 unsigned int i, have_vlans = 0;
2014 for_each_port(adapter, i)
2015 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2017 t3_set_vlan_accel(adapter, 1, have_vlans);
2019 t3_synchronize_rx(adapter, pi);
2022 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2027 #ifdef CONFIG_NET_POLL_CONTROLLER
2028 static void cxgb_netpoll(struct net_device *dev)
2030 struct adapter *adapter = dev->priv;
2031 struct sge_qset *qs = dev2qset(dev);
2033 t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2039 * Periodic accumulation of MAC statistics.
2041 static void mac_stats_update(struct adapter *adapter)
2045 for_each_port(adapter, i) {
2046 struct net_device *dev = adapter->port[i];
2047 struct port_info *p = netdev_priv(dev);
2049 if (netif_running(dev)) {
2050 spin_lock(&adapter->stats_lock);
2051 t3_mac_update_stats(&p->mac);
2052 spin_unlock(&adapter->stats_lock);
2057 static void check_link_status(struct adapter *adapter)
2061 for_each_port(adapter, i) {
2062 struct net_device *dev = adapter->port[i];
2063 struct port_info *p = netdev_priv(dev);
2065 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2066 t3_link_changed(adapter, i);
2070 static void t3_adap_check_task(struct work_struct *work)
2072 struct adapter *adapter = container_of(work, struct adapter,
2073 adap_check_task.work);
2074 const struct adapter_params *p = &adapter->params;
2076 adapter->check_task_cnt++;
2078 /* Check link status for PHYs without interrupts */
2079 if (p->linkpoll_period)
2080 check_link_status(adapter);
2082 /* Accumulate MAC stats if needed */
2083 if (!p->linkpoll_period ||
2084 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2085 p->stats_update_period) {
2086 mac_stats_update(adapter);
2087 adapter->check_task_cnt = 0;
2090 /* Schedule the next check update if any port is active. */
2091 spin_lock(&adapter->work_lock);
2092 if (adapter->open_device_map & PORT_MASK)
2093 schedule_chk_task(adapter);
2094 spin_unlock(&adapter->work_lock);
2098 * Processes external (PHY) interrupts in process context.
2100 static void ext_intr_task(struct work_struct *work)
2102 struct adapter *adapter = container_of(work, struct adapter,
2103 ext_intr_handler_task);
2105 t3_phy_intr_handler(adapter);
2107 /* Now reenable external interrupts */
2108 spin_lock_irq(&adapter->work_lock);
2109 if (adapter->slow_intr_mask) {
2110 adapter->slow_intr_mask |= F_T3DBG;
2111 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2112 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2113 adapter->slow_intr_mask);
2115 spin_unlock_irq(&adapter->work_lock);
2119 * Interrupt-context handler for external (PHY) interrupts.
2121 void t3_os_ext_intr_handler(struct adapter *adapter)
2124 * Schedule a task to handle external interrupts as they may be slow
2125 * and we use a mutex to protect MDIO registers. We disable PHY
2126 * interrupts in the meantime and let the task reenable them when
2129 spin_lock(&adapter->work_lock);
2130 if (adapter->slow_intr_mask) {
2131 adapter->slow_intr_mask &= ~F_T3DBG;
2132 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2133 adapter->slow_intr_mask);
2134 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2136 spin_unlock(&adapter->work_lock);
2139 void t3_fatal_err(struct adapter *adapter)
2141 unsigned int fw_status[4];
2143 if (adapter->flags & FULL_INIT_DONE) {
2144 t3_sge_stop(adapter);
2145 t3_intr_disable(adapter);
2147 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2148 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2149 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2150 fw_status[0], fw_status[1],
2151 fw_status[2], fw_status[3]);
2155 static int __devinit cxgb_enable_msix(struct adapter *adap)
2157 struct msix_entry entries[SGE_QSETS + 1];
2160 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2161 entries[i].entry = i;
2163 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2165 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2166 adap->msix_info[i].vec = entries[i].vector;
2168 dev_info(&adap->pdev->dev,
2169 "only %d MSI-X vectors left, not using MSI-X\n", err);
2173 static void __devinit print_port_info(struct adapter *adap,
2174 const struct adapter_info *ai)
2176 static const char *pci_variant[] = {
2177 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2184 snprintf(buf, sizeof(buf), "%s x%d",
2185 pci_variant[adap->params.pci.variant],
2186 adap->params.pci.width);
2188 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2189 pci_variant[adap->params.pci.variant],
2190 adap->params.pci.speed, adap->params.pci.width);
2192 for_each_port(adap, i) {
2193 struct net_device *dev = adap->port[i];
2194 const struct port_info *pi = netdev_priv(dev);
2196 if (!test_bit(i, &adap->registered_device_map))
2198 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2199 dev->name, ai->desc, pi->port_type->desc,
2200 adap->params.rev, buf,
2201 (adap->flags & USING_MSIX) ? " MSI-X" :
2202 (adap->flags & USING_MSI) ? " MSI" : "");
2203 if (adap->name == dev->name && adap->params.vpd.mclk)
2204 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2205 adap->name, t3_mc7_size(&adap->cm) >> 20,
2206 t3_mc7_size(&adap->pmtx) >> 20,
2207 t3_mc7_size(&adap->pmrx) >> 20);
2211 static int __devinit init_one(struct pci_dev *pdev,
2212 const struct pci_device_id *ent)
2214 static int version_printed;
2216 int i, err, pci_using_dac = 0;
2217 unsigned long mmio_start, mmio_len;
2218 const struct adapter_info *ai;
2219 struct adapter *adapter = NULL;
2220 struct port_info *pi;
2222 if (!version_printed) {
2223 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2228 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2230 printk(KERN_ERR DRV_NAME
2231 ": cannot initialize work queue\n");
2236 err = pci_request_regions(pdev, DRV_NAME);
2238 /* Just info, some other driver may have claimed the device. */
2239 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2243 err = pci_enable_device(pdev);
2245 dev_err(&pdev->dev, "cannot enable PCI device\n");
2246 goto out_release_regions;
2249 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2251 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2253 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2254 "coherent allocations\n");
2255 goto out_disable_device;
2257 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2258 dev_err(&pdev->dev, "no usable DMA configuration\n");
2259 goto out_disable_device;
2262 pci_set_master(pdev);
2264 mmio_start = pci_resource_start(pdev, 0);
2265 mmio_len = pci_resource_len(pdev, 0);
2266 ai = t3_get_adapter_info(ent->driver_data);
2268 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2271 goto out_disable_device;
2274 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2275 if (!adapter->regs) {
2276 dev_err(&pdev->dev, "cannot map device registers\n");
2278 goto out_free_adapter;
2281 adapter->pdev = pdev;
2282 adapter->name = pci_name(pdev);
2283 adapter->msg_enable = dflt_msg_enable;
2284 adapter->mmio_len = mmio_len;
2286 mutex_init(&adapter->mdio_lock);
2287 spin_lock_init(&adapter->work_lock);
2288 spin_lock_init(&adapter->stats_lock);
2290 INIT_LIST_HEAD(&adapter->adapter_list);
2291 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2292 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2294 for (i = 0; i < ai->nports; ++i) {
2295 struct net_device *netdev;
2297 netdev = alloc_etherdev(sizeof(struct port_info));
2303 SET_MODULE_OWNER(netdev);
2304 SET_NETDEV_DEV(netdev, &pdev->dev);
2306 adapter->port[i] = netdev;
2307 pi = netdev_priv(netdev);
2308 pi->rx_csum_offload = 1;
2313 netif_carrier_off(netdev);
2314 netdev->irq = pdev->irq;
2315 netdev->mem_start = mmio_start;
2316 netdev->mem_end = mmio_start + mmio_len - 1;
2317 netdev->priv = adapter;
2318 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2319 netdev->features |= NETIF_F_LLTX;
2321 netdev->features |= NETIF_F_HIGHDMA;
2323 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2324 netdev->vlan_rx_register = vlan_rx_register;
2325 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2327 netdev->open = cxgb_open;
2328 netdev->stop = cxgb_close;
2329 netdev->hard_start_xmit = t3_eth_xmit;
2330 netdev->get_stats = cxgb_get_stats;
2331 netdev->set_multicast_list = cxgb_set_rxmode;
2332 netdev->do_ioctl = cxgb_ioctl;
2333 netdev->change_mtu = cxgb_change_mtu;
2334 netdev->set_mac_address = cxgb_set_mac_addr;
2335 #ifdef CONFIG_NET_POLL_CONTROLLER
2336 netdev->poll_controller = cxgb_netpoll;
2338 netdev->weight = 64;
2340 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2343 pci_set_drvdata(pdev, adapter->port[0]);
2344 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2350 * The card is now ready to go. If any errors occur during device
2351 * registration we do not fail the whole card but rather proceed only
2352 * with the ports we manage to register successfully. However we must
2353 * register at least one net device.
2355 for_each_port(adapter, i) {
2356 err = register_netdev(adapter->port[i]);
2358 dev_warn(&pdev->dev,
2359 "cannot register net device %s, skipping\n",
2360 adapter->port[i]->name);
2363 * Change the name we use for messages to the name of
2364 * the first successfully registered interface.
2366 if (!adapter->registered_device_map)
2367 adapter->name = adapter->port[i]->name;
2369 __set_bit(i, &adapter->registered_device_map);
2372 if (!adapter->registered_device_map) {
2373 dev_err(&pdev->dev, "could not register any net devices\n");
2377 /* Driver's ready. Reflect it on LEDs */
2378 t3_led_ready(adapter);
2380 if (is_offload(adapter)) {
2381 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2382 cxgb3_adapter_ofld(adapter);
2385 /* See what interrupts we'll be using */
2386 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2387 adapter->flags |= USING_MSIX;
2388 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2389 adapter->flags |= USING_MSI;
2391 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2394 print_port_info(adapter, ai);
2398 iounmap(adapter->regs);
2399 for (i = ai->nports - 1; i >= 0; --i)
2400 if (adapter->port[i])
2401 free_netdev(adapter->port[i]);
2407 pci_disable_device(pdev);
2408 out_release_regions:
2409 pci_release_regions(pdev);
2410 pci_set_drvdata(pdev, NULL);
2414 static void __devexit remove_one(struct pci_dev *pdev)
2416 struct net_device *dev = pci_get_drvdata(pdev);
2420 struct adapter *adapter = dev->priv;
2422 t3_sge_stop(adapter);
2423 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2426 for_each_port(adapter, i)
2427 if (test_bit(i, &adapter->registered_device_map))
2428 unregister_netdev(adapter->port[i]);
2430 if (is_offload(adapter)) {
2431 cxgb3_adapter_unofld(adapter);
2432 if (test_bit(OFFLOAD_DEVMAP_BIT,
2433 &adapter->open_device_map))
2434 offload_close(&adapter->tdev);
2437 t3_free_sge_resources(adapter);
2438 cxgb_disable_msi(adapter);
2440 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2441 if (adapter->dummy_netdev[i]) {
2442 free_netdev(adapter->dummy_netdev[i]);
2443 adapter->dummy_netdev[i] = NULL;
2446 for_each_port(adapter, i)
2447 if (adapter->port[i])
2448 free_netdev(adapter->port[i]);
2450 iounmap(adapter->regs);
2452 pci_release_regions(pdev);
2453 pci_disable_device(pdev);
2454 pci_set_drvdata(pdev, NULL);
2458 static struct pci_driver driver = {
2460 .id_table = cxgb3_pci_tbl,
2462 .remove = __devexit_p(remove_one),
2465 static int __init cxgb3_init_module(void)
2469 cxgb3_offload_init();
2471 ret = pci_register_driver(&driver);
2475 static void __exit cxgb3_cleanup_module(void)
2477 pci_unregister_driver(&driver);
2479 destroy_workqueue(cxgb3_wq);
2482 module_init(cxgb3_init_module);
2483 module_exit(cxgb3_cleanup_module);