2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
211 static void cxgb_set_rxmode(struct net_device *dev)
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
221 * link_start - enable a port
222 * @dev: the device to enable
224 * Performs the MAC and PHY actions needed to enable a port.
226 static void link_start(struct net_device *dev)
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
232 init_rx_mode(&rm, dev, dev->mc_list);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
241 static inline void cxgb_disable_msi(struct adapter *adapter)
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
253 * Interrupt handler for asynchronous events used with MSI-X.
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
257 t3_slow_intr_handler(cookie);
262 * Name the MSI-X interrupts.
264 static void name_msix_vecs(struct adapter *adap)
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
283 static int request_msix_data_irqs(struct adapter *adap)
285 int i, j, err, qidx = 0;
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
310 * setup_rss - configure RSS
313 * Sets up RSS to distribute packets to multiple receive queues. We
314 * configure the RSS CPU lookup table to distribute to the number of HW
315 * receive queues, and the response queue lookup table to narrow that
316 * down to the response queues actually configured for each port.
317 * We always configure the RSS mapping for two ports since the mapping
318 * table has plenty of entries.
320 static void setup_rss(struct adapter *adap)
323 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325 u8 cpus[SGE_QSETS + 1];
326 u16 rspq_map[RSS_TABLE_SIZE];
328 for (i = 0; i < SGE_QSETS; ++i)
330 cpus[SGE_QSETS] = 0xff; /* terminator */
332 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333 rspq_map[i] = i % nq0;
334 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
337 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
339 V_RRCPLCPUSIZE(6), cpus, rspq_map);
342 static void init_napi(struct adapter *adap)
346 for (i = 0; i < SGE_QSETS; i++) {
347 struct sge_qset *qs = &adap->sge.qs[i];
350 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
356 * Wait until all NAPI handlers are descheduled. This includes the handlers of
357 * both netdevices representing interfaces and the dummy ones for the extra
360 static void quiesce_rx(struct adapter *adap)
364 for (i = 0; i < SGE_QSETS; i++)
365 if (adap->sge.qs[i].adap)
366 napi_disable(&adap->sge.qs[i].napi);
369 static void enable_all_napi(struct adapter *adap)
372 for (i = 0; i < SGE_QSETS; i++)
373 if (adap->sge.qs[i].adap)
374 napi_enable(&adap->sge.qs[i].napi);
378 * setup_sge_qsets - configure SGE Tx/Rx/response queues
381 * Determines how many sets of SGE queues to use and initializes them.
382 * We support multiple queue sets per port if we have MSI-X, otherwise
383 * just one queue set per port.
385 static int setup_sge_qsets(struct adapter *adap)
387 int i, j, err, irq_idx = 0, qset_idx = 0;
388 unsigned int ntxq = SGE_TXQ_PER_SET;
390 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
393 for_each_port(adap, i) {
394 struct net_device *dev = adap->port[i];
395 struct port_info *pi = netdev_priv(dev);
397 pi->qs = &adap->sge.qs[pi->first_qset];
398 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
399 err = t3_sge_alloc_qset(adap, qset_idx, 1,
400 (adap->flags & USING_MSIX) ? qset_idx + 1 :
402 &adap->params.sge.qset[qset_idx], ntxq, dev);
404 t3_free_sge_resources(adap);
413 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
415 ssize_t(*format) (struct net_device *, char *))
419 /* Synchronize with ioctls that may shut down the device */
421 len = (*format) (to_net_dev(d), buf);
426 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
427 const char *buf, size_t len,
428 ssize_t(*set) (struct net_device *, unsigned int),
429 unsigned int min_val, unsigned int max_val)
435 if (!capable(CAP_NET_ADMIN))
438 val = simple_strtoul(buf, &endp, 0);
439 if (endp == buf || val < min_val || val > max_val)
443 ret = (*set) (to_net_dev(d), val);
450 #define CXGB3_SHOW(name, val_expr) \
451 static ssize_t format_##name(struct net_device *dev, char *buf) \
453 struct port_info *pi = netdev_priv(dev); \
454 struct adapter *adap = pi->adapter; \
455 return sprintf(buf, "%u\n", val_expr); \
457 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
460 return attr_show(d, attr, buf, format_##name); \
463 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
465 struct port_info *pi = netdev_priv(dev);
466 struct adapter *adap = pi->adapter;
467 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
469 if (adap->flags & FULL_INIT_DONE)
471 if (val && adap->params.rev == 0)
473 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
476 adap->params.mc5.nfilters = val;
480 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
481 const char *buf, size_t len)
483 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
486 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
488 struct port_info *pi = netdev_priv(dev);
489 struct adapter *adap = pi->adapter;
491 if (adap->flags & FULL_INIT_DONE)
493 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
496 adap->params.mc5.nservers = val;
500 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
501 const char *buf, size_t len)
503 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
506 #define CXGB3_ATTR_R(name, val_expr) \
507 CXGB3_SHOW(name, val_expr) \
508 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
510 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
511 CXGB3_SHOW(name, val_expr) \
512 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
514 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
515 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
516 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
518 static struct attribute *cxgb3_attrs[] = {
519 &dev_attr_cam_size.attr,
520 &dev_attr_nfilters.attr,
521 &dev_attr_nservers.attr,
525 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
527 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
528 char *buf, int sched)
530 struct port_info *pi = netdev_priv(to_net_dev(d));
531 struct adapter *adap = pi->adapter;
532 unsigned int v, addr, bpt, cpt;
535 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
537 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
538 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
541 bpt = (v >> 8) & 0xff;
544 len = sprintf(buf, "disabled\n");
546 v = (adap->params.vpd.cclk * 1000) / cpt;
547 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
553 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
554 const char *buf, size_t len, int sched)
556 struct port_info *pi = netdev_priv(to_net_dev(d));
557 struct adapter *adap = pi->adapter;
562 if (!capable(CAP_NET_ADMIN))
565 val = simple_strtoul(buf, &endp, 0);
566 if (endp == buf || val > 10000000)
570 ret = t3_config_sched(adap, val, sched);
577 #define TM_ATTR(name, sched) \
578 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
581 return tm_attr_show(d, attr, buf, sched); \
583 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
584 const char *buf, size_t len) \
586 return tm_attr_store(d, attr, buf, len, sched); \
588 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
599 static struct attribute *offload_attrs[] = {
600 &dev_attr_sched0.attr,
601 &dev_attr_sched1.attr,
602 &dev_attr_sched2.attr,
603 &dev_attr_sched3.attr,
604 &dev_attr_sched4.attr,
605 &dev_attr_sched5.attr,
606 &dev_attr_sched6.attr,
607 &dev_attr_sched7.attr,
611 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
614 * Sends an sk_buff to an offload queue driver
615 * after dealing with any active network taps.
617 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
622 ret = t3_offload_tx(tdev, skb);
627 static int write_smt_entry(struct adapter *adapter, int idx)
629 struct cpl_smt_write_req *req;
630 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
635 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
636 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
637 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
638 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
640 memset(req->src_mac1, 0, sizeof(req->src_mac1));
641 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
643 offload_tx(&adapter->tdev, skb);
647 static int init_smt(struct adapter *adapter)
651 for_each_port(adapter, i)
652 write_smt_entry(adapter, i);
656 static void init_port_mtus(struct adapter *adapter)
658 unsigned int mtus = adapter->port[0]->mtu;
660 if (adapter->port[1])
661 mtus |= adapter->port[1]->mtu << 16;
662 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
665 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
669 struct mngt_pktsched_wr *req;
671 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
672 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
673 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
674 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
680 t3_mgmt_tx(adap, skb);
683 static void bind_qsets(struct adapter *adap)
687 for_each_port(adap, i) {
688 const struct port_info *pi = adap2pinfo(adap, i);
690 for (j = 0; j < pi->nqsets; ++j)
691 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
696 #define FW_FNAME "t3fw-%d.%d.%d.bin"
697 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
699 static int upgrade_fw(struct adapter *adap)
703 const struct firmware *fw;
704 struct device *dev = &adap->pdev->dev;
706 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
707 FW_VERSION_MINOR, FW_VERSION_MICRO);
708 ret = request_firmware(&fw, buf, dev);
710 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
714 ret = t3_load_fw(adap, fw->data, fw->size);
715 release_firmware(fw);
718 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
719 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
721 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
722 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
727 static inline char t3rev2char(struct adapter *adapter)
731 switch(adapter->params.rev) {
743 static int update_tpsram(struct adapter *adap)
745 const struct firmware *tpsram;
747 struct device *dev = &adap->pdev->dev;
751 rev = t3rev2char(adap);
755 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
756 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
758 ret = request_firmware(&tpsram, buf, dev);
760 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
765 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
769 ret = t3_set_proto_sram(adap, tpsram->data);
772 "successful update of protocol engine "
774 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
776 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
777 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
779 dev_err(dev, "loading protocol SRAM failed\n");
782 release_firmware(tpsram);
788 * cxgb_up - enable the adapter
789 * @adapter: adapter being enabled
791 * Called when the first port is enabled, this function performs the
792 * actions necessary to make an adapter operational, such as completing
793 * the initialization of HW modules, and enabling interrupts.
795 * Must be called with the rtnl lock held.
797 static int cxgb_up(struct adapter *adap)
802 if (!(adap->flags & FULL_INIT_DONE)) {
803 err = t3_check_fw_version(adap, &must_load);
804 if (err == -EINVAL) {
805 err = upgrade_fw(adap);
806 if (err && must_load)
810 err = t3_check_tpsram_version(adap, &must_load);
811 if (err == -EINVAL) {
812 err = update_tpsram(adap);
813 if (err && must_load)
817 err = t3_init_hw(adap, 0);
821 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
823 err = setup_sge_qsets(adap);
829 adap->flags |= FULL_INIT_DONE;
834 if (adap->flags & USING_MSIX) {
835 name_msix_vecs(adap);
836 err = request_irq(adap->msix_info[0].vec,
837 t3_async_intr_handler, 0,
838 adap->msix_info[0].desc, adap);
842 if (request_msix_data_irqs(adap)) {
843 free_irq(adap->msix_info[0].vec, adap);
846 } else if ((err = request_irq(adap->pdev->irq,
847 t3_intr_handler(adap,
848 adap->sge.qs[0].rspq.
850 (adap->flags & USING_MSI) ?
855 enable_all_napi(adap);
857 t3_intr_enable(adap);
859 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
861 adap->flags |= QUEUES_BOUND;
866 CH_ERR(adap, "request_irq failed, err %d\n", err);
871 * Release resources when all the ports and offloading have been stopped.
873 static void cxgb_down(struct adapter *adapter)
875 t3_sge_stop(adapter);
876 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
877 t3_intr_disable(adapter);
878 spin_unlock_irq(&adapter->work_lock);
880 if (adapter->flags & USING_MSIX) {
883 free_irq(adapter->msix_info[0].vec, adapter);
884 for_each_port(adapter, i)
885 n += adap2pinfo(adapter, i)->nqsets;
887 for (i = 0; i < n; ++i)
888 free_irq(adapter->msix_info[i + 1].vec,
889 &adapter->sge.qs[i]);
891 free_irq(adapter->pdev->irq, adapter);
893 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
897 static void schedule_chk_task(struct adapter *adap)
901 timeo = adap->params.linkpoll_period ?
902 (HZ * adap->params.linkpoll_period) / 10 :
903 adap->params.stats_update_period * HZ;
905 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
908 static int offload_open(struct net_device *dev)
910 struct port_info *pi = netdev_priv(dev);
911 struct adapter *adapter = pi->adapter;
912 struct t3cdev *tdev = dev2t3cdev(dev);
913 int adap_up = adapter->open_device_map & PORT_MASK;
916 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
919 if (!adap_up && (err = cxgb_up(adapter)) < 0)
922 t3_tp_set_offload_mode(adapter, 1);
923 tdev->lldev = adapter->port[0];
924 err = cxgb3_offload_activate(adapter);
928 init_port_mtus(adapter);
929 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
930 adapter->params.b_wnd,
931 adapter->params.rev == 0 ?
932 adapter->port[0]->mtu : 0xffff);
935 /* Never mind if the next step fails */
936 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
938 /* Call back all registered clients */
939 cxgb3_add_clients(tdev);
942 /* restore them in case the offload module has changed them */
944 t3_tp_set_offload_mode(adapter, 0);
945 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
946 cxgb3_set_dummy_ops(tdev);
951 static int offload_close(struct t3cdev *tdev)
953 struct adapter *adapter = tdev2adap(tdev);
955 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
958 /* Call back all registered clients */
959 cxgb3_remove_clients(tdev);
961 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
964 cxgb3_set_dummy_ops(tdev);
965 t3_tp_set_offload_mode(adapter, 0);
966 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
968 if (!adapter->open_device_map)
971 cxgb3_offload_deactivate(adapter);
975 static int cxgb_open(struct net_device *dev)
977 struct port_info *pi = netdev_priv(dev);
978 struct adapter *adapter = pi->adapter;
979 int other_ports = adapter->open_device_map & PORT_MASK;
982 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
987 set_bit(pi->port_id, &adapter->open_device_map);
988 if (is_offload(adapter) && !ofld_disable) {
989 err = offload_open(dev);
992 "Could not initialize offload capabilities\n");
996 t3_port_intr_enable(adapter, pi->port_id);
997 netif_start_queue(dev);
999 schedule_chk_task(adapter);
1004 static int cxgb_close(struct net_device *dev)
1006 struct port_info *pi = netdev_priv(dev);
1007 struct adapter *adapter = pi->adapter;
1009 t3_port_intr_disable(adapter, pi->port_id);
1010 netif_stop_queue(dev);
1011 pi->phy.ops->power_down(&pi->phy, 1);
1012 netif_carrier_off(dev);
1013 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1015 spin_lock(&adapter->work_lock); /* sync with update task */
1016 clear_bit(pi->port_id, &adapter->open_device_map);
1017 spin_unlock(&adapter->work_lock);
1019 if (!(adapter->open_device_map & PORT_MASK))
1020 cancel_rearming_delayed_workqueue(cxgb3_wq,
1021 &adapter->adap_check_task);
1023 if (!adapter->open_device_map)
1029 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1031 struct port_info *pi = netdev_priv(dev);
1032 struct adapter *adapter = pi->adapter;
1033 struct net_device_stats *ns = &pi->netstats;
1034 const struct mac_stats *pstats;
1036 spin_lock(&adapter->stats_lock);
1037 pstats = t3_mac_update_stats(&pi->mac);
1038 spin_unlock(&adapter->stats_lock);
1040 ns->tx_bytes = pstats->tx_octets;
1041 ns->tx_packets = pstats->tx_frames;
1042 ns->rx_bytes = pstats->rx_octets;
1043 ns->rx_packets = pstats->rx_frames;
1044 ns->multicast = pstats->rx_mcast_frames;
1046 ns->tx_errors = pstats->tx_underrun;
1047 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1048 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1049 pstats->rx_fifo_ovfl;
1051 /* detailed rx_errors */
1052 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1053 ns->rx_over_errors = 0;
1054 ns->rx_crc_errors = pstats->rx_fcs_errs;
1055 ns->rx_frame_errors = pstats->rx_symbol_errs;
1056 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1057 ns->rx_missed_errors = pstats->rx_cong_drops;
1059 /* detailed tx_errors */
1060 ns->tx_aborted_errors = 0;
1061 ns->tx_carrier_errors = 0;
1062 ns->tx_fifo_errors = pstats->tx_underrun;
1063 ns->tx_heartbeat_errors = 0;
1064 ns->tx_window_errors = 0;
1068 static u32 get_msglevel(struct net_device *dev)
1070 struct port_info *pi = netdev_priv(dev);
1071 struct adapter *adapter = pi->adapter;
1073 return adapter->msg_enable;
1076 static void set_msglevel(struct net_device *dev, u32 val)
1078 struct port_info *pi = netdev_priv(dev);
1079 struct adapter *adapter = pi->adapter;
1081 adapter->msg_enable = val;
1084 static char stats_strings[][ETH_GSTRING_LEN] = {
1087 "TxMulticastFramesOK",
1088 "TxBroadcastFramesOK",
1095 "TxFrames128To255 ",
1096 "TxFrames256To511 ",
1097 "TxFrames512To1023 ",
1098 "TxFrames1024To1518 ",
1099 "TxFrames1519ToMax ",
1103 "RxMulticastFramesOK",
1104 "RxBroadcastFramesOK",
1115 "RxFrames128To255 ",
1116 "RxFrames256To511 ",
1117 "RxFrames512To1023 ",
1118 "RxFrames1024To1518 ",
1119 "RxFrames1519ToMax ",
1129 "CheckTXEnToggled ",
1134 static int get_sset_count(struct net_device *dev, int sset)
1138 return ARRAY_SIZE(stats_strings);
1144 #define T3_REGMAP_SIZE (3 * 1024)
1146 static int get_regs_len(struct net_device *dev)
1148 return T3_REGMAP_SIZE;
1151 static int get_eeprom_len(struct net_device *dev)
1156 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1158 struct port_info *pi = netdev_priv(dev);
1159 struct adapter *adapter = pi->adapter;
1163 t3_get_fw_version(adapter, &fw_vers);
1164 t3_get_tp_version(adapter, &tp_vers);
1166 strcpy(info->driver, DRV_NAME);
1167 strcpy(info->version, DRV_VERSION);
1168 strcpy(info->bus_info, pci_name(adapter->pdev));
1170 strcpy(info->fw_version, "N/A");
1172 snprintf(info->fw_version, sizeof(info->fw_version),
1173 "%s %u.%u.%u TP %u.%u.%u",
1174 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1175 G_FW_VERSION_MAJOR(fw_vers),
1176 G_FW_VERSION_MINOR(fw_vers),
1177 G_FW_VERSION_MICRO(fw_vers),
1178 G_TP_VERSION_MAJOR(tp_vers),
1179 G_TP_VERSION_MINOR(tp_vers),
1180 G_TP_VERSION_MICRO(tp_vers));
1184 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1186 if (stringset == ETH_SS_STATS)
1187 memcpy(data, stats_strings, sizeof(stats_strings));
1190 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1191 struct port_info *p, int idx)
1194 unsigned long tot = 0;
1196 for (i = 0; i < p->nqsets; ++i)
1197 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1201 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1204 struct port_info *pi = netdev_priv(dev);
1205 struct adapter *adapter = pi->adapter;
1206 const struct mac_stats *s;
1208 spin_lock(&adapter->stats_lock);
1209 s = t3_mac_update_stats(&pi->mac);
1210 spin_unlock(&adapter->stats_lock);
1212 *data++ = s->tx_octets;
1213 *data++ = s->tx_frames;
1214 *data++ = s->tx_mcast_frames;
1215 *data++ = s->tx_bcast_frames;
1216 *data++ = s->tx_pause;
1217 *data++ = s->tx_underrun;
1218 *data++ = s->tx_fifo_urun;
1220 *data++ = s->tx_frames_64;
1221 *data++ = s->tx_frames_65_127;
1222 *data++ = s->tx_frames_128_255;
1223 *data++ = s->tx_frames_256_511;
1224 *data++ = s->tx_frames_512_1023;
1225 *data++ = s->tx_frames_1024_1518;
1226 *data++ = s->tx_frames_1519_max;
1228 *data++ = s->rx_octets;
1229 *data++ = s->rx_frames;
1230 *data++ = s->rx_mcast_frames;
1231 *data++ = s->rx_bcast_frames;
1232 *data++ = s->rx_pause;
1233 *data++ = s->rx_fcs_errs;
1234 *data++ = s->rx_symbol_errs;
1235 *data++ = s->rx_short;
1236 *data++ = s->rx_jabber;
1237 *data++ = s->rx_too_long;
1238 *data++ = s->rx_fifo_ovfl;
1240 *data++ = s->rx_frames_64;
1241 *data++ = s->rx_frames_65_127;
1242 *data++ = s->rx_frames_128_255;
1243 *data++ = s->rx_frames_256_511;
1244 *data++ = s->rx_frames_512_1023;
1245 *data++ = s->rx_frames_1024_1518;
1246 *data++ = s->rx_frames_1519_max;
1248 *data++ = pi->phy.fifo_errors;
1250 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1251 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1252 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1253 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1254 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1255 *data++ = s->rx_cong_drops;
1257 *data++ = s->num_toggled;
1258 *data++ = s->num_resets;
1261 static inline void reg_block_dump(struct adapter *ap, void *buf,
1262 unsigned int start, unsigned int end)
1264 u32 *p = buf + start;
1266 for (; start <= end; start += sizeof(u32))
1267 *p++ = t3_read_reg(ap, start);
1270 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1273 struct port_info *pi = netdev_priv(dev);
1274 struct adapter *ap = pi->adapter;
1278 * bits 0..9: chip version
1279 * bits 10..15: chip revision
1280 * bit 31: set for PCIe cards
1282 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1285 * We skip the MAC statistics registers because they are clear-on-read.
1286 * Also reading multi-register stats would need to synchronize with the
1287 * periodic mac stats accumulation. Hard to justify the complexity.
1289 memset(buf, 0, T3_REGMAP_SIZE);
1290 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1291 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1292 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1293 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1294 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1295 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1296 XGM_REG(A_XGM_SERDES_STAT3, 1));
1297 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1298 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1301 static int restart_autoneg(struct net_device *dev)
1303 struct port_info *p = netdev_priv(dev);
1305 if (!netif_running(dev))
1307 if (p->link_config.autoneg != AUTONEG_ENABLE)
1309 p->phy.ops->autoneg_restart(&p->phy);
1313 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1315 struct port_info *pi = netdev_priv(dev);
1316 struct adapter *adapter = pi->adapter;
1322 for (i = 0; i < data * 2; i++) {
1323 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1324 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1325 if (msleep_interruptible(500))
1328 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1333 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1335 struct port_info *p = netdev_priv(dev);
1337 cmd->supported = p->link_config.supported;
1338 cmd->advertising = p->link_config.advertising;
1340 if (netif_carrier_ok(dev)) {
1341 cmd->speed = p->link_config.speed;
1342 cmd->duplex = p->link_config.duplex;
1348 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1349 cmd->phy_address = p->phy.addr;
1350 cmd->transceiver = XCVR_EXTERNAL;
1351 cmd->autoneg = p->link_config.autoneg;
1357 static int speed_duplex_to_caps(int speed, int duplex)
1363 if (duplex == DUPLEX_FULL)
1364 cap = SUPPORTED_10baseT_Full;
1366 cap = SUPPORTED_10baseT_Half;
1369 if (duplex == DUPLEX_FULL)
1370 cap = SUPPORTED_100baseT_Full;
1372 cap = SUPPORTED_100baseT_Half;
1375 if (duplex == DUPLEX_FULL)
1376 cap = SUPPORTED_1000baseT_Full;
1378 cap = SUPPORTED_1000baseT_Half;
1381 if (duplex == DUPLEX_FULL)
1382 cap = SUPPORTED_10000baseT_Full;
1387 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1388 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1389 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1390 ADVERTISED_10000baseT_Full)
1392 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1394 struct port_info *p = netdev_priv(dev);
1395 struct link_config *lc = &p->link_config;
1397 if (!(lc->supported & SUPPORTED_Autoneg))
1398 return -EOPNOTSUPP; /* can't change speed/duplex */
1400 if (cmd->autoneg == AUTONEG_DISABLE) {
1401 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1403 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1405 lc->requested_speed = cmd->speed;
1406 lc->requested_duplex = cmd->duplex;
1407 lc->advertising = 0;
1409 cmd->advertising &= ADVERTISED_MASK;
1410 cmd->advertising &= lc->supported;
1411 if (!cmd->advertising)
1413 lc->requested_speed = SPEED_INVALID;
1414 lc->requested_duplex = DUPLEX_INVALID;
1415 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1417 lc->autoneg = cmd->autoneg;
1418 if (netif_running(dev))
1419 t3_link_start(&p->phy, &p->mac, lc);
1423 static void get_pauseparam(struct net_device *dev,
1424 struct ethtool_pauseparam *epause)
1426 struct port_info *p = netdev_priv(dev);
1428 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1429 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1430 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1433 static int set_pauseparam(struct net_device *dev,
1434 struct ethtool_pauseparam *epause)
1436 struct port_info *p = netdev_priv(dev);
1437 struct link_config *lc = &p->link_config;
1439 if (epause->autoneg == AUTONEG_DISABLE)
1440 lc->requested_fc = 0;
1441 else if (lc->supported & SUPPORTED_Autoneg)
1442 lc->requested_fc = PAUSE_AUTONEG;
1446 if (epause->rx_pause)
1447 lc->requested_fc |= PAUSE_RX;
1448 if (epause->tx_pause)
1449 lc->requested_fc |= PAUSE_TX;
1450 if (lc->autoneg == AUTONEG_ENABLE) {
1451 if (netif_running(dev))
1452 t3_link_start(&p->phy, &p->mac, lc);
1454 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1455 if (netif_running(dev))
1456 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1461 static u32 get_rx_csum(struct net_device *dev)
1463 struct port_info *p = netdev_priv(dev);
1465 return p->rx_csum_offload;
1468 static int set_rx_csum(struct net_device *dev, u32 data)
1470 struct port_info *p = netdev_priv(dev);
1472 p->rx_csum_offload = data;
1476 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1478 struct port_info *pi = netdev_priv(dev);
1479 struct adapter *adapter = pi->adapter;
1480 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1482 e->rx_max_pending = MAX_RX_BUFFERS;
1483 e->rx_mini_max_pending = 0;
1484 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1485 e->tx_max_pending = MAX_TXQ_ENTRIES;
1487 e->rx_pending = q->fl_size;
1488 e->rx_mini_pending = q->rspq_size;
1489 e->rx_jumbo_pending = q->jumbo_size;
1490 e->tx_pending = q->txq_size[0];
1493 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1495 struct port_info *pi = netdev_priv(dev);
1496 struct adapter *adapter = pi->adapter;
1497 struct qset_params *q;
1500 if (e->rx_pending > MAX_RX_BUFFERS ||
1501 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1502 e->tx_pending > MAX_TXQ_ENTRIES ||
1503 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1504 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1505 e->rx_pending < MIN_FL_ENTRIES ||
1506 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1507 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1510 if (adapter->flags & FULL_INIT_DONE)
1513 q = &adapter->params.sge.qset[pi->first_qset];
1514 for (i = 0; i < pi->nqsets; ++i, ++q) {
1515 q->rspq_size = e->rx_mini_pending;
1516 q->fl_size = e->rx_pending;
1517 q->jumbo_size = e->rx_jumbo_pending;
1518 q->txq_size[0] = e->tx_pending;
1519 q->txq_size[1] = e->tx_pending;
1520 q->txq_size[2] = e->tx_pending;
1525 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1527 struct port_info *pi = netdev_priv(dev);
1528 struct adapter *adapter = pi->adapter;
1529 struct qset_params *qsp = &adapter->params.sge.qset[0];
1530 struct sge_qset *qs = &adapter->sge.qs[0];
1532 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1535 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1536 t3_update_qset_coalesce(qs, qsp);
1540 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1542 struct port_info *pi = netdev_priv(dev);
1543 struct adapter *adapter = pi->adapter;
1544 struct qset_params *q = adapter->params.sge.qset;
1546 c->rx_coalesce_usecs = q->coalesce_usecs;
1550 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1553 struct port_info *pi = netdev_priv(dev);
1554 struct adapter *adapter = pi->adapter;
1557 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1561 e->magic = EEPROM_MAGIC;
1562 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1563 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1566 memcpy(data, buf + e->offset, e->len);
1571 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1574 struct port_info *pi = netdev_priv(dev);
1575 struct adapter *adapter = pi->adapter;
1576 u32 aligned_offset, aligned_len, *p;
1580 if (eeprom->magic != EEPROM_MAGIC)
1583 aligned_offset = eeprom->offset & ~3;
1584 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1586 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1587 buf = kmalloc(aligned_len, GFP_KERNEL);
1590 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1591 if (!err && aligned_len > 4)
1592 err = t3_seeprom_read(adapter,
1593 aligned_offset + aligned_len - 4,
1594 (u32 *) & buf[aligned_len - 4]);
1597 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1601 err = t3_seeprom_wp(adapter, 0);
1605 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1606 err = t3_seeprom_write(adapter, aligned_offset, *p);
1607 aligned_offset += 4;
1611 err = t3_seeprom_wp(adapter, 1);
1618 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1622 memset(&wol->sopass, 0, sizeof(wol->sopass));
1625 static const struct ethtool_ops cxgb_ethtool_ops = {
1626 .get_settings = get_settings,
1627 .set_settings = set_settings,
1628 .get_drvinfo = get_drvinfo,
1629 .get_msglevel = get_msglevel,
1630 .set_msglevel = set_msglevel,
1631 .get_ringparam = get_sge_param,
1632 .set_ringparam = set_sge_param,
1633 .get_coalesce = get_coalesce,
1634 .set_coalesce = set_coalesce,
1635 .get_eeprom_len = get_eeprom_len,
1636 .get_eeprom = get_eeprom,
1637 .set_eeprom = set_eeprom,
1638 .get_pauseparam = get_pauseparam,
1639 .set_pauseparam = set_pauseparam,
1640 .get_rx_csum = get_rx_csum,
1641 .set_rx_csum = set_rx_csum,
1642 .set_tx_csum = ethtool_op_set_tx_csum,
1643 .set_sg = ethtool_op_set_sg,
1644 .get_link = ethtool_op_get_link,
1645 .get_strings = get_strings,
1646 .phys_id = cxgb3_phys_id,
1647 .nway_reset = restart_autoneg,
1648 .get_sset_count = get_sset_count,
1649 .get_ethtool_stats = get_stats,
1650 .get_regs_len = get_regs_len,
1651 .get_regs = get_regs,
1653 .set_tso = ethtool_op_set_tso,
1656 static int in_range(int val, int lo, int hi)
1658 return val < 0 || (val <= hi && val >= lo);
1661 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1663 struct port_info *pi = netdev_priv(dev);
1664 struct adapter *adapter = pi->adapter;
1668 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1672 case CHELSIO_SET_QSET_PARAMS:{
1674 struct qset_params *q;
1675 struct ch_qset_params t;
1677 if (!capable(CAP_NET_ADMIN))
1679 if (copy_from_user(&t, useraddr, sizeof(t)))
1681 if (t.qset_idx >= SGE_QSETS)
1683 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1684 !in_range(t.cong_thres, 0, 255) ||
1685 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1687 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1689 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1690 MAX_CTRL_TXQ_ENTRIES) ||
1691 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1693 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1694 MAX_RX_JUMBO_BUFFERS)
1695 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1698 if ((adapter->flags & FULL_INIT_DONE) &&
1699 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1700 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1701 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1702 t.polling >= 0 || t.cong_thres >= 0))
1705 q = &adapter->params.sge.qset[t.qset_idx];
1707 if (t.rspq_size >= 0)
1708 q->rspq_size = t.rspq_size;
1709 if (t.fl_size[0] >= 0)
1710 q->fl_size = t.fl_size[0];
1711 if (t.fl_size[1] >= 0)
1712 q->jumbo_size = t.fl_size[1];
1713 if (t.txq_size[0] >= 0)
1714 q->txq_size[0] = t.txq_size[0];
1715 if (t.txq_size[1] >= 0)
1716 q->txq_size[1] = t.txq_size[1];
1717 if (t.txq_size[2] >= 0)
1718 q->txq_size[2] = t.txq_size[2];
1719 if (t.cong_thres >= 0)
1720 q->cong_thres = t.cong_thres;
1721 if (t.intr_lat >= 0) {
1722 struct sge_qset *qs =
1723 &adapter->sge.qs[t.qset_idx];
1725 q->coalesce_usecs = t.intr_lat;
1726 t3_update_qset_coalesce(qs, q);
1728 if (t.polling >= 0) {
1729 if (adapter->flags & USING_MSIX)
1730 q->polling = t.polling;
1732 /* No polling with INTx for T3A */
1733 if (adapter->params.rev == 0 &&
1734 !(adapter->flags & USING_MSI))
1737 for (i = 0; i < SGE_QSETS; i++) {
1738 q = &adapter->params.sge.
1740 q->polling = t.polling;
1746 case CHELSIO_GET_QSET_PARAMS:{
1747 struct qset_params *q;
1748 struct ch_qset_params t;
1750 if (copy_from_user(&t, useraddr, sizeof(t)))
1752 if (t.qset_idx >= SGE_QSETS)
1755 q = &adapter->params.sge.qset[t.qset_idx];
1756 t.rspq_size = q->rspq_size;
1757 t.txq_size[0] = q->txq_size[0];
1758 t.txq_size[1] = q->txq_size[1];
1759 t.txq_size[2] = q->txq_size[2];
1760 t.fl_size[0] = q->fl_size;
1761 t.fl_size[1] = q->jumbo_size;
1762 t.polling = q->polling;
1763 t.intr_lat = q->coalesce_usecs;
1764 t.cong_thres = q->cong_thres;
1766 if (copy_to_user(useraddr, &t, sizeof(t)))
1770 case CHELSIO_SET_QSET_NUM:{
1771 struct ch_reg edata;
1772 unsigned int i, first_qset = 0, other_qsets = 0;
1774 if (!capable(CAP_NET_ADMIN))
1776 if (adapter->flags & FULL_INIT_DONE)
1778 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1780 if (edata.val < 1 ||
1781 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1784 for_each_port(adapter, i)
1785 if (adapter->port[i] && adapter->port[i] != dev)
1786 other_qsets += adap2pinfo(adapter, i)->nqsets;
1788 if (edata.val + other_qsets > SGE_QSETS)
1791 pi->nqsets = edata.val;
1793 for_each_port(adapter, i)
1794 if (adapter->port[i]) {
1795 pi = adap2pinfo(adapter, i);
1796 pi->first_qset = first_qset;
1797 first_qset += pi->nqsets;
1801 case CHELSIO_GET_QSET_NUM:{
1802 struct ch_reg edata;
1804 edata.cmd = CHELSIO_GET_QSET_NUM;
1805 edata.val = pi->nqsets;
1806 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1810 case CHELSIO_LOAD_FW:{
1812 struct ch_mem_range t;
1814 if (!capable(CAP_NET_ADMIN))
1816 if (copy_from_user(&t, useraddr, sizeof(t)))
1819 fw_data = kmalloc(t.len, GFP_KERNEL);
1824 (fw_data, useraddr + sizeof(t), t.len)) {
1829 ret = t3_load_fw(adapter, fw_data, t.len);
1835 case CHELSIO_SETMTUTAB:{
1839 if (!is_offload(adapter))
1841 if (!capable(CAP_NET_ADMIN))
1843 if (offload_running(adapter))
1845 if (copy_from_user(&m, useraddr, sizeof(m)))
1847 if (m.nmtus != NMTUS)
1849 if (m.mtus[0] < 81) /* accommodate SACK */
1852 /* MTUs must be in ascending order */
1853 for (i = 1; i < NMTUS; ++i)
1854 if (m.mtus[i] < m.mtus[i - 1])
1857 memcpy(adapter->params.mtus, m.mtus,
1858 sizeof(adapter->params.mtus));
1861 case CHELSIO_GET_PM:{
1862 struct tp_params *p = &adapter->params.tp;
1863 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1865 if (!is_offload(adapter))
1867 m.tx_pg_sz = p->tx_pg_size;
1868 m.tx_num_pg = p->tx_num_pgs;
1869 m.rx_pg_sz = p->rx_pg_size;
1870 m.rx_num_pg = p->rx_num_pgs;
1871 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1872 if (copy_to_user(useraddr, &m, sizeof(m)))
1876 case CHELSIO_SET_PM:{
1878 struct tp_params *p = &adapter->params.tp;
1880 if (!is_offload(adapter))
1882 if (!capable(CAP_NET_ADMIN))
1884 if (adapter->flags & FULL_INIT_DONE)
1886 if (copy_from_user(&m, useraddr, sizeof(m)))
1888 if (!is_power_of_2(m.rx_pg_sz) ||
1889 !is_power_of_2(m.tx_pg_sz))
1890 return -EINVAL; /* not power of 2 */
1891 if (!(m.rx_pg_sz & 0x14000))
1892 return -EINVAL; /* not 16KB or 64KB */
1893 if (!(m.tx_pg_sz & 0x1554000))
1895 if (m.tx_num_pg == -1)
1896 m.tx_num_pg = p->tx_num_pgs;
1897 if (m.rx_num_pg == -1)
1898 m.rx_num_pg = p->rx_num_pgs;
1899 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1901 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1902 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1904 p->rx_pg_size = m.rx_pg_sz;
1905 p->tx_pg_size = m.tx_pg_sz;
1906 p->rx_num_pgs = m.rx_num_pg;
1907 p->tx_num_pgs = m.tx_num_pg;
1910 case CHELSIO_GET_MEM:{
1911 struct ch_mem_range t;
1915 if (!is_offload(adapter))
1917 if (!(adapter->flags & FULL_INIT_DONE))
1918 return -EIO; /* need the memory controllers */
1919 if (copy_from_user(&t, useraddr, sizeof(t)))
1921 if ((t.addr & 7) || (t.len & 7))
1923 if (t.mem_id == MEM_CM)
1925 else if (t.mem_id == MEM_PMRX)
1926 mem = &adapter->pmrx;
1927 else if (t.mem_id == MEM_PMTX)
1928 mem = &adapter->pmtx;
1934 * bits 0..9: chip version
1935 * bits 10..15: chip revision
1937 t.version = 3 | (adapter->params.rev << 10);
1938 if (copy_to_user(useraddr, &t, sizeof(t)))
1942 * Read 256 bytes at a time as len can be large and we don't
1943 * want to use huge intermediate buffers.
1945 useraddr += sizeof(t); /* advance to start of buffer */
1947 unsigned int chunk =
1948 min_t(unsigned int, t.len, sizeof(buf));
1951 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1955 if (copy_to_user(useraddr, buf, chunk))
1963 case CHELSIO_SET_TRACE_FILTER:{
1965 const struct trace_params *tp;
1967 if (!capable(CAP_NET_ADMIN))
1969 if (!offload_running(adapter))
1971 if (copy_from_user(&t, useraddr, sizeof(t)))
1974 tp = (const struct trace_params *)&t.sip;
1976 t3_config_trace_filter(adapter, tp, 0,
1980 t3_config_trace_filter(adapter, tp, 1,
1991 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1993 struct mii_ioctl_data *data = if_mii(req);
1994 struct port_info *pi = netdev_priv(dev);
1995 struct adapter *adapter = pi->adapter;
2000 data->phy_id = pi->phy.addr;
2004 struct cphy *phy = &pi->phy;
2006 if (!phy->mdio_read)
2008 if (is_10G(adapter)) {
2009 mmd = data->phy_id >> 8;
2012 else if (mmd > MDIO_DEV_XGXS)
2016 phy->mdio_read(adapter, data->phy_id & 0x1f,
2017 mmd, data->reg_num, &val);
2020 phy->mdio_read(adapter, data->phy_id & 0x1f,
2021 0, data->reg_num & 0x1f,
2024 data->val_out = val;
2028 struct cphy *phy = &pi->phy;
2030 if (!capable(CAP_NET_ADMIN))
2032 if (!phy->mdio_write)
2034 if (is_10G(adapter)) {
2035 mmd = data->phy_id >> 8;
2038 else if (mmd > MDIO_DEV_XGXS)
2042 phy->mdio_write(adapter,
2043 data->phy_id & 0x1f, mmd,
2048 phy->mdio_write(adapter,
2049 data->phy_id & 0x1f, 0,
2050 data->reg_num & 0x1f,
2055 return cxgb_extension_ioctl(dev, req->ifr_data);
2062 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2064 struct port_info *pi = netdev_priv(dev);
2065 struct adapter *adapter = pi->adapter;
2068 if (new_mtu < 81) /* accommodate SACK */
2070 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2073 init_port_mtus(adapter);
2074 if (adapter->params.rev == 0 && offload_running(adapter))
2075 t3_load_mtus(adapter, adapter->params.mtus,
2076 adapter->params.a_wnd, adapter->params.b_wnd,
2077 adapter->port[0]->mtu);
2081 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2083 struct port_info *pi = netdev_priv(dev);
2084 struct adapter *adapter = pi->adapter;
2085 struct sockaddr *addr = p;
2087 if (!is_valid_ether_addr(addr->sa_data))
2090 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2091 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2092 if (offload_running(adapter))
2093 write_smt_entry(adapter, pi->port_id);
2098 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2099 * @adap: the adapter
2102 * Ensures that current Rx processing on any of the queues associated with
2103 * the given port completes before returning. We do this by acquiring and
2104 * releasing the locks of the response queues associated with the port.
2106 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2110 for (i = 0; i < p->nqsets; i++) {
2111 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2113 spin_lock_irq(&q->lock);
2114 spin_unlock_irq(&q->lock);
2118 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2120 struct port_info *pi = netdev_priv(dev);
2121 struct adapter *adapter = pi->adapter;
2124 if (adapter->params.rev > 0)
2125 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2127 /* single control for all ports */
2128 unsigned int i, have_vlans = 0;
2129 for_each_port(adapter, i)
2130 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2132 t3_set_vlan_accel(adapter, 1, have_vlans);
2134 t3_synchronize_rx(adapter, pi);
2137 #ifdef CONFIG_NET_POLL_CONTROLLER
2138 static void cxgb_netpoll(struct net_device *dev)
2140 struct port_info *pi = netdev_priv(dev);
2141 struct adapter *adapter = pi->adapter;
2144 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2145 struct sge_qset *qs = &adapter->sge.qs[qidx];
2148 if (adapter->flags & USING_MSIX)
2153 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2159 * Periodic accumulation of MAC statistics.
2161 static void mac_stats_update(struct adapter *adapter)
2165 for_each_port(adapter, i) {
2166 struct net_device *dev = adapter->port[i];
2167 struct port_info *p = netdev_priv(dev);
2169 if (netif_running(dev)) {
2170 spin_lock(&adapter->stats_lock);
2171 t3_mac_update_stats(&p->mac);
2172 spin_unlock(&adapter->stats_lock);
2177 static void check_link_status(struct adapter *adapter)
2181 for_each_port(adapter, i) {
2182 struct net_device *dev = adapter->port[i];
2183 struct port_info *p = netdev_priv(dev);
2185 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2186 t3_link_changed(adapter, i);
2190 static void check_t3b2_mac(struct adapter *adapter)
2194 if (!rtnl_trylock()) /* synchronize with ifdown */
2197 for_each_port(adapter, i) {
2198 struct net_device *dev = adapter->port[i];
2199 struct port_info *p = netdev_priv(dev);
2202 if (!netif_running(dev))
2206 if (netif_running(dev) && netif_carrier_ok(dev))
2207 status = t3b2_mac_watchdog_task(&p->mac);
2209 p->mac.stats.num_toggled++;
2210 else if (status == 2) {
2211 struct cmac *mac = &p->mac;
2213 t3_mac_set_mtu(mac, dev->mtu);
2214 t3_mac_set_address(mac, 0, dev->dev_addr);
2215 cxgb_set_rxmode(dev);
2216 t3_link_start(&p->phy, mac, &p->link_config);
2217 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2218 t3_port_intr_enable(adapter, p->port_id);
2219 p->mac.stats.num_resets++;
2226 static void t3_adap_check_task(struct work_struct *work)
2228 struct adapter *adapter = container_of(work, struct adapter,
2229 adap_check_task.work);
2230 const struct adapter_params *p = &adapter->params;
2232 adapter->check_task_cnt++;
2234 /* Check link status for PHYs without interrupts */
2235 if (p->linkpoll_period)
2236 check_link_status(adapter);
2238 /* Accumulate MAC stats if needed */
2239 if (!p->linkpoll_period ||
2240 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2241 p->stats_update_period) {
2242 mac_stats_update(adapter);
2243 adapter->check_task_cnt = 0;
2246 if (p->rev == T3_REV_B2)
2247 check_t3b2_mac(adapter);
2249 /* Schedule the next check update if any port is active. */
2250 spin_lock(&adapter->work_lock);
2251 if (adapter->open_device_map & PORT_MASK)
2252 schedule_chk_task(adapter);
2253 spin_unlock(&adapter->work_lock);
2257 * Processes external (PHY) interrupts in process context.
2259 static void ext_intr_task(struct work_struct *work)
2261 struct adapter *adapter = container_of(work, struct adapter,
2262 ext_intr_handler_task);
2264 t3_phy_intr_handler(adapter);
2266 /* Now reenable external interrupts */
2267 spin_lock_irq(&adapter->work_lock);
2268 if (adapter->slow_intr_mask) {
2269 adapter->slow_intr_mask |= F_T3DBG;
2270 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2271 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2272 adapter->slow_intr_mask);
2274 spin_unlock_irq(&adapter->work_lock);
2278 * Interrupt-context handler for external (PHY) interrupts.
2280 void t3_os_ext_intr_handler(struct adapter *adapter)
2283 * Schedule a task to handle external interrupts as they may be slow
2284 * and we use a mutex to protect MDIO registers. We disable PHY
2285 * interrupts in the meantime and let the task reenable them when
2288 spin_lock(&adapter->work_lock);
2289 if (adapter->slow_intr_mask) {
2290 adapter->slow_intr_mask &= ~F_T3DBG;
2291 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2292 adapter->slow_intr_mask);
2293 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2295 spin_unlock(&adapter->work_lock);
2298 void t3_fatal_err(struct adapter *adapter)
2300 unsigned int fw_status[4];
2302 if (adapter->flags & FULL_INIT_DONE) {
2303 t3_sge_stop(adapter);
2304 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2305 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2306 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2307 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2308 t3_intr_disable(adapter);
2310 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2311 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2312 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2313 fw_status[0], fw_status[1],
2314 fw_status[2], fw_status[3]);
2318 static int __devinit cxgb_enable_msix(struct adapter *adap)
2320 struct msix_entry entries[SGE_QSETS + 1];
2323 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2324 entries[i].entry = i;
2326 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2328 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2329 adap->msix_info[i].vec = entries[i].vector;
2331 dev_info(&adap->pdev->dev,
2332 "only %d MSI-X vectors left, not using MSI-X\n", err);
2336 static void __devinit print_port_info(struct adapter *adap,
2337 const struct adapter_info *ai)
2339 static const char *pci_variant[] = {
2340 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2347 snprintf(buf, sizeof(buf), "%s x%d",
2348 pci_variant[adap->params.pci.variant],
2349 adap->params.pci.width);
2351 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2352 pci_variant[adap->params.pci.variant],
2353 adap->params.pci.speed, adap->params.pci.width);
2355 for_each_port(adap, i) {
2356 struct net_device *dev = adap->port[i];
2357 const struct port_info *pi = netdev_priv(dev);
2359 if (!test_bit(i, &adap->registered_device_map))
2361 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2362 dev->name, ai->desc, pi->port_type->desc,
2363 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2364 (adap->flags & USING_MSIX) ? " MSI-X" :
2365 (adap->flags & USING_MSI) ? " MSI" : "");
2366 if (adap->name == dev->name && adap->params.vpd.mclk)
2368 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2369 adap->name, t3_mc7_size(&adap->cm) >> 20,
2370 t3_mc7_size(&adap->pmtx) >> 20,
2371 t3_mc7_size(&adap->pmrx) >> 20,
2372 adap->params.vpd.sn);
2376 static int __devinit init_one(struct pci_dev *pdev,
2377 const struct pci_device_id *ent)
2379 static int version_printed;
2381 int i, err, pci_using_dac = 0;
2382 unsigned long mmio_start, mmio_len;
2383 const struct adapter_info *ai;
2384 struct adapter *adapter = NULL;
2385 struct port_info *pi;
2387 if (!version_printed) {
2388 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2393 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2395 printk(KERN_ERR DRV_NAME
2396 ": cannot initialize work queue\n");
2401 err = pci_request_regions(pdev, DRV_NAME);
2403 /* Just info, some other driver may have claimed the device. */
2404 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2408 err = pci_enable_device(pdev);
2410 dev_err(&pdev->dev, "cannot enable PCI device\n");
2411 goto out_release_regions;
2414 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2416 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2418 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2419 "coherent allocations\n");
2420 goto out_disable_device;
2422 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2423 dev_err(&pdev->dev, "no usable DMA configuration\n");
2424 goto out_disable_device;
2427 pci_set_master(pdev);
2429 mmio_start = pci_resource_start(pdev, 0);
2430 mmio_len = pci_resource_len(pdev, 0);
2431 ai = t3_get_adapter_info(ent->driver_data);
2433 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2436 goto out_disable_device;
2439 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2440 if (!adapter->regs) {
2441 dev_err(&pdev->dev, "cannot map device registers\n");
2443 goto out_free_adapter;
2446 adapter->pdev = pdev;
2447 adapter->name = pci_name(pdev);
2448 adapter->msg_enable = dflt_msg_enable;
2449 adapter->mmio_len = mmio_len;
2451 mutex_init(&adapter->mdio_lock);
2452 spin_lock_init(&adapter->work_lock);
2453 spin_lock_init(&adapter->stats_lock);
2455 INIT_LIST_HEAD(&adapter->adapter_list);
2456 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2457 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2459 for (i = 0; i < ai->nports; ++i) {
2460 struct net_device *netdev;
2462 netdev = alloc_etherdev(sizeof(struct port_info));
2468 SET_NETDEV_DEV(netdev, &pdev->dev);
2470 adapter->port[i] = netdev;
2471 pi = netdev_priv(netdev);
2472 pi->adapter = adapter;
2473 pi->rx_csum_offload = 1;
2478 netif_carrier_off(netdev);
2479 netdev->irq = pdev->irq;
2480 netdev->mem_start = mmio_start;
2481 netdev->mem_end = mmio_start + mmio_len - 1;
2482 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2483 netdev->features |= NETIF_F_LLTX;
2485 netdev->features |= NETIF_F_HIGHDMA;
2487 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2488 netdev->vlan_rx_register = vlan_rx_register;
2490 netdev->open = cxgb_open;
2491 netdev->stop = cxgb_close;
2492 netdev->hard_start_xmit = t3_eth_xmit;
2493 netdev->get_stats = cxgb_get_stats;
2494 netdev->set_multicast_list = cxgb_set_rxmode;
2495 netdev->do_ioctl = cxgb_ioctl;
2496 netdev->change_mtu = cxgb_change_mtu;
2497 netdev->set_mac_address = cxgb_set_mac_addr;
2498 #ifdef CONFIG_NET_POLL_CONTROLLER
2499 netdev->poll_controller = cxgb_netpoll;
2502 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2505 pci_set_drvdata(pdev, adapter);
2506 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2512 * The card is now ready to go. If any errors occur during device
2513 * registration we do not fail the whole card but rather proceed only
2514 * with the ports we manage to register successfully. However we must
2515 * register at least one net device.
2517 for_each_port(adapter, i) {
2518 err = register_netdev(adapter->port[i]);
2520 dev_warn(&pdev->dev,
2521 "cannot register net device %s, skipping\n",
2522 adapter->port[i]->name);
2525 * Change the name we use for messages to the name of
2526 * the first successfully registered interface.
2528 if (!adapter->registered_device_map)
2529 adapter->name = adapter->port[i]->name;
2531 __set_bit(i, &adapter->registered_device_map);
2534 if (!adapter->registered_device_map) {
2535 dev_err(&pdev->dev, "could not register any net devices\n");
2539 /* Driver's ready. Reflect it on LEDs */
2540 t3_led_ready(adapter);
2542 if (is_offload(adapter)) {
2543 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2544 cxgb3_adapter_ofld(adapter);
2547 /* See what interrupts we'll be using */
2548 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2549 adapter->flags |= USING_MSIX;
2550 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2551 adapter->flags |= USING_MSI;
2553 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2556 print_port_info(adapter, ai);
2560 iounmap(adapter->regs);
2561 for (i = ai->nports - 1; i >= 0; --i)
2562 if (adapter->port[i])
2563 free_netdev(adapter->port[i]);
2569 pci_disable_device(pdev);
2570 out_release_regions:
2571 pci_release_regions(pdev);
2572 pci_set_drvdata(pdev, NULL);
2576 static void __devexit remove_one(struct pci_dev *pdev)
2578 struct adapter *adapter = pci_get_drvdata(pdev);
2583 t3_sge_stop(adapter);
2584 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2587 for_each_port(adapter, i)
2588 if (test_bit(i, &adapter->registered_device_map))
2589 unregister_netdev(adapter->port[i]);
2591 if (is_offload(adapter)) {
2592 cxgb3_adapter_unofld(adapter);
2593 if (test_bit(OFFLOAD_DEVMAP_BIT,
2594 &adapter->open_device_map))
2595 offload_close(&adapter->tdev);
2598 t3_free_sge_resources(adapter);
2599 cxgb_disable_msi(adapter);
2601 for_each_port(adapter, i)
2602 if (adapter->port[i])
2603 free_netdev(adapter->port[i]);
2605 iounmap(adapter->regs);
2607 pci_release_regions(pdev);
2608 pci_disable_device(pdev);
2609 pci_set_drvdata(pdev, NULL);
2613 static struct pci_driver driver = {
2615 .id_table = cxgb3_pci_tbl,
2617 .remove = __devexit_p(remove_one),
2620 static int __init cxgb3_init_module(void)
2624 cxgb3_offload_init();
2626 ret = pci_register_driver(&driver);
2630 static void __exit cxgb3_cleanup_module(void)
2632 pci_unregister_driver(&driver);
2634 destroy_workqueue(cxgb3_wq);
2637 module_init(cxgb3_init_module);
2638 module_exit(cxgb3_cleanup_module);