Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
[linux-2.6] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <asm/uaccess.h>
46
47 #include "common.h"
48 #include "cxgb3_ioctl.h"
49 #include "regs.h"
50 #include "cxgb3_offload.h"
51 #include "version.h"
52
53 #include "cxgb3_ctl_defs.h"
54 #include "t3_cpl.h"
55 #include "firmware_exports.h"
56
57 enum {
58         MAX_TXQ_ENTRIES = 16384,
59         MAX_CTRL_TXQ_ENTRIES = 1024,
60         MAX_RSPQ_ENTRIES = 16384,
61         MAX_RX_BUFFERS = 16384,
62         MAX_RX_JUMBO_BUFFERS = 16384,
63         MIN_TXQ_ENTRIES = 4,
64         MIN_CTRL_TXQ_ENTRIES = 4,
65         MIN_RSPQ_ENTRIES = 32,
66         MIN_FL_ENTRIES = 32
67 };
68
69 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
70
71 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
74
75 #define EEPROM_MAGIC 0x38E2F10C
76
77 #define CH_DEVICE(devid, ssid, idx) \
78         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
79
80 static const struct pci_device_id cxgb3_pci_tbl[] = {
81         CH_DEVICE(0x20, 1, 0),  /* PE9000 */
82         CH_DEVICE(0x21, 1, 1),  /* T302E */
83         CH_DEVICE(0x22, 1, 2),  /* T310E */
84         CH_DEVICE(0x23, 1, 3),  /* T320X */
85         CH_DEVICE(0x24, 1, 1),  /* T302X */
86         CH_DEVICE(0x25, 1, 3),  /* T320E */
87         CH_DEVICE(0x26, 1, 2),  /* T310X */
88         CH_DEVICE(0x30, 1, 2),  /* T3B10 */
89         CH_DEVICE(0x31, 1, 3),  /* T3B20 */
90         CH_DEVICE(0x32, 1, 1),  /* T3B02 */
91         {0,}
92 };
93
94 MODULE_DESCRIPTION(DRV_DESC);
95 MODULE_AUTHOR("Chelsio Communications");
96 MODULE_LICENSE("Dual BSD/GPL");
97 MODULE_VERSION(DRV_VERSION);
98 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
99
100 static int dflt_msg_enable = DFLT_MSG_ENABLE;
101
102 module_param(dflt_msg_enable, int, 0644);
103 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
104
105 /*
106  * The driver uses the best interrupt scheme available on a platform in the
107  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
108  * of these schemes the driver may consider as follows:
109  *
110  * msi = 2: choose from among all three options
111  * msi = 1: only consider MSI and pin interrupts
112  * msi = 0: force pin interrupts
113  */
114 static int msi = 2;
115
116 module_param(msi, int, 0644);
117 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
118
119 /*
120  * The driver enables offload as a default.
121  * To disable it, use ofld_disable = 1.
122  */
123
124 static int ofld_disable = 0;
125
126 module_param(ofld_disable, int, 0644);
127 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
128
129 /*
130  * We have work elements that we need to cancel when an interface is taken
131  * down.  Normally the work elements would be executed by keventd but that
132  * can deadlock because of linkwatch.  If our close method takes the rtnl
133  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
134  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
135  * for our work to complete.  Get our own work queue to solve this.
136  */
137 static struct workqueue_struct *cxgb3_wq;
138
139 /**
140  *      link_report - show link status and link speed/duplex
141  *      @p: the port whose settings are to be reported
142  *
143  *      Shows the link status, speed, and duplex of a port.
144  */
145 static void link_report(struct net_device *dev)
146 {
147         if (!netif_carrier_ok(dev))
148                 printk(KERN_INFO "%s: link down\n", dev->name);
149         else {
150                 const char *s = "10Mbps";
151                 const struct port_info *p = netdev_priv(dev);
152
153                 switch (p->link_config.speed) {
154                 case SPEED_10000:
155                         s = "10Gbps";
156                         break;
157                 case SPEED_1000:
158                         s = "1000Mbps";
159                         break;
160                 case SPEED_100:
161                         s = "100Mbps";
162                         break;
163                 }
164
165                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
166                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
167         }
168 }
169
170 /**
171  *      t3_os_link_changed - handle link status changes
172  *      @adapter: the adapter associated with the link change
173  *      @port_id: the port index whose limk status has changed
174  *      @link_stat: the new status of the link
175  *      @speed: the new speed setting
176  *      @duplex: the new duplex setting
177  *      @pause: the new flow-control setting
178  *
179  *      This is the OS-dependent handler for link status changes.  The OS
180  *      neutral handler takes care of most of the processing for these events,
181  *      then calls this handler for any OS-specific processing.
182  */
183 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
184                         int speed, int duplex, int pause)
185 {
186         struct net_device *dev = adapter->port[port_id];
187
188         /* Skip changes from disabled ports. */
189         if (!netif_running(dev))
190                 return;
191
192         if (link_stat != netif_carrier_ok(dev)) {
193                 if (link_stat)
194                         netif_carrier_on(dev);
195                 else
196                         netif_carrier_off(dev);
197                 link_report(dev);
198         }
199 }
200
201 static void cxgb_set_rxmode(struct net_device *dev)
202 {
203         struct t3_rx_mode rm;
204         struct port_info *pi = netdev_priv(dev);
205
206         init_rx_mode(&rm, dev, dev->mc_list);
207         t3_mac_set_rx_mode(&pi->mac, &rm);
208 }
209
210 /**
211  *      link_start - enable a port
212  *      @dev: the device to enable
213  *
214  *      Performs the MAC and PHY actions needed to enable a port.
215  */
216 static void link_start(struct net_device *dev)
217 {
218         struct t3_rx_mode rm;
219         struct port_info *pi = netdev_priv(dev);
220         struct cmac *mac = &pi->mac;
221
222         init_rx_mode(&rm, dev, dev->mc_list);
223         t3_mac_reset(mac);
224         t3_mac_set_mtu(mac, dev->mtu);
225         t3_mac_set_address(mac, 0, dev->dev_addr);
226         t3_mac_set_rx_mode(mac, &rm);
227         t3_link_start(&pi->phy, mac, &pi->link_config);
228         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
229 }
230
231 static inline void cxgb_disable_msi(struct adapter *adapter)
232 {
233         if (adapter->flags & USING_MSIX) {
234                 pci_disable_msix(adapter->pdev);
235                 adapter->flags &= ~USING_MSIX;
236         } else if (adapter->flags & USING_MSI) {
237                 pci_disable_msi(adapter->pdev);
238                 adapter->flags &= ~USING_MSI;
239         }
240 }
241
242 /*
243  * Interrupt handler for asynchronous events used with MSI-X.
244  */
245 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
246 {
247         t3_slow_intr_handler(cookie);
248         return IRQ_HANDLED;
249 }
250
251 /*
252  * Name the MSI-X interrupts.
253  */
254 static void name_msix_vecs(struct adapter *adap)
255 {
256         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
257
258         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
259         adap->msix_info[0].desc[n] = 0;
260
261         for_each_port(adap, j) {
262                 struct net_device *d = adap->port[j];
263                 const struct port_info *pi = netdev_priv(d);
264
265                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
266                         snprintf(adap->msix_info[msi_idx].desc, n,
267                                  "%s (queue %d)", d->name, i);
268                         adap->msix_info[msi_idx].desc[n] = 0;
269                 }
270         }
271 }
272
273 static int request_msix_data_irqs(struct adapter *adap)
274 {
275         int i, j, err, qidx = 0;
276
277         for_each_port(adap, i) {
278                 int nqsets = adap2pinfo(adap, i)->nqsets;
279
280                 for (j = 0; j < nqsets; ++j) {
281                         err = request_irq(adap->msix_info[qidx + 1].vec,
282                                           t3_intr_handler(adap,
283                                                           adap->sge.qs[qidx].
284                                                           rspq.polling), 0,
285                                           adap->msix_info[qidx + 1].desc,
286                                           &adap->sge.qs[qidx]);
287                         if (err) {
288                                 while (--qidx >= 0)
289                                         free_irq(adap->msix_info[qidx + 1].vec,
290                                                  &adap->sge.qs[qidx]);
291                                 return err;
292                         }
293                         qidx++;
294                 }
295         }
296         return 0;
297 }
298
299 /**
300  *      setup_rss - configure RSS
301  *      @adap: the adapter
302  *
303  *      Sets up RSS to distribute packets to multiple receive queues.  We
304  *      configure the RSS CPU lookup table to distribute to the number of HW
305  *      receive queues, and the response queue lookup table to narrow that
306  *      down to the response queues actually configured for each port.
307  *      We always configure the RSS mapping for two ports since the mapping
308  *      table has plenty of entries.
309  */
310 static void setup_rss(struct adapter *adap)
311 {
312         int i;
313         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
314         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
315         u8 cpus[SGE_QSETS + 1];
316         u16 rspq_map[RSS_TABLE_SIZE];
317
318         for (i = 0; i < SGE_QSETS; ++i)
319                 cpus[i] = i;
320         cpus[SGE_QSETS] = 0xff; /* terminator */
321
322         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
323                 rspq_map[i] = i % nq0;
324                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
325         }
326
327         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
328                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
329                       V_RRCPLCPUSIZE(6), cpus, rspq_map);
330 }
331
332 /*
333  * If we have multiple receive queues per port serviced by NAPI we need one
334  * netdevice per queue as NAPI operates on netdevices.  We already have one
335  * netdevice, namely the one associated with the interface, so we use dummy
336  * ones for any additional queues.  Note that these netdevices exist purely
337  * so that NAPI has something to work with, they do not represent network
338  * ports and are not registered.
339  */
340 static int init_dummy_netdevs(struct adapter *adap)
341 {
342         int i, j, dummy_idx = 0;
343         struct net_device *nd;
344
345         for_each_port(adap, i) {
346                 struct net_device *dev = adap->port[i];
347                 const struct port_info *pi = netdev_priv(dev);
348
349                 for (j = 0; j < pi->nqsets - 1; j++) {
350                         if (!adap->dummy_netdev[dummy_idx]) {
351                                 nd = alloc_netdev(0, "", ether_setup);
352                                 if (!nd)
353                                         goto free_all;
354
355                                 nd->priv = adap;
356                                 nd->weight = 64;
357                                 set_bit(__LINK_STATE_START, &nd->state);
358                                 adap->dummy_netdev[dummy_idx] = nd;
359                         }
360                         strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
361                         dummy_idx++;
362                 }
363         }
364         return 0;
365
366 free_all:
367         while (--dummy_idx >= 0) {
368                 free_netdev(adap->dummy_netdev[dummy_idx]);
369                 adap->dummy_netdev[dummy_idx] = NULL;
370         }
371         return -ENOMEM;
372 }
373
374 /*
375  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
376  * both netdevices representing interfaces and the dummy ones for the extra
377  * queues.
378  */
379 static void quiesce_rx(struct adapter *adap)
380 {
381         int i;
382         struct net_device *dev;
383
384         for_each_port(adap, i) {
385                 dev = adap->port[i];
386                 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
387                         msleep(1);
388         }
389
390         for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
391                 dev = adap->dummy_netdev[i];
392                 if (dev)
393                         while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
394                                 msleep(1);
395         }
396 }
397
398 /**
399  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
400  *      @adap: the adapter
401  *
402  *      Determines how many sets of SGE queues to use and initializes them.
403  *      We support multiple queue sets per port if we have MSI-X, otherwise
404  *      just one queue set per port.
405  */
406 static int setup_sge_qsets(struct adapter *adap)
407 {
408         int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
409         unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
410
411         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
412                 irq_idx = -1;
413
414         for_each_port(adap, i) {
415                 struct net_device *dev = adap->port[i];
416                 const struct port_info *pi = netdev_priv(dev);
417
418                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
419                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
420                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
421                                                              irq_idx,
422                                 &adap->params.sge.qset[qset_idx], ntxq,
423                                 j == 0 ? dev :
424                                          adap-> dummy_netdev[dummy_dev_idx++]);
425                         if (err) {
426                                 t3_free_sge_resources(adap);
427                                 return err;
428                         }
429                 }
430         }
431
432         return 0;
433 }
434
435 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
436                          char *buf,
437                          ssize_t(*format) (struct adapter *, char *))
438 {
439         ssize_t len;
440         struct adapter *adap = to_net_dev(d)->priv;
441
442         /* Synchronize with ioctls that may shut down the device */
443         rtnl_lock();
444         len = (*format) (adap, buf);
445         rtnl_unlock();
446         return len;
447 }
448
449 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
450                           const char *buf, size_t len,
451                           ssize_t(*set) (struct adapter *, unsigned int),
452                           unsigned int min_val, unsigned int max_val)
453 {
454         char *endp;
455         ssize_t ret;
456         unsigned int val;
457         struct adapter *adap = to_net_dev(d)->priv;
458
459         if (!capable(CAP_NET_ADMIN))
460                 return -EPERM;
461
462         val = simple_strtoul(buf, &endp, 0);
463         if (endp == buf || val < min_val || val > max_val)
464                 return -EINVAL;
465
466         rtnl_lock();
467         ret = (*set) (adap, val);
468         if (!ret)
469                 ret = len;
470         rtnl_unlock();
471         return ret;
472 }
473
474 #define CXGB3_SHOW(name, val_expr) \
475 static ssize_t format_##name(struct adapter *adap, char *buf) \
476 { \
477         return sprintf(buf, "%u\n", val_expr); \
478 } \
479 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
480                            char *buf) \
481 { \
482         return attr_show(d, attr, buf, format_##name); \
483 }
484
485 static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
486 {
487         if (adap->flags & FULL_INIT_DONE)
488                 return -EBUSY;
489         if (val && adap->params.rev == 0)
490                 return -EINVAL;
491         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
492                 return -EINVAL;
493         adap->params.mc5.nfilters = val;
494         return 0;
495 }
496
497 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
498                               const char *buf, size_t len)
499 {
500         return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
501 }
502
503 static ssize_t set_nservers(struct adapter *adap, unsigned int val)
504 {
505         if (adap->flags & FULL_INIT_DONE)
506                 return -EBUSY;
507         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
508                 return -EINVAL;
509         adap->params.mc5.nservers = val;
510         return 0;
511 }
512
513 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
514                               const char *buf, size_t len)
515 {
516         return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
517 }
518
519 #define CXGB3_ATTR_R(name, val_expr) \
520 CXGB3_SHOW(name, val_expr) \
521 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
522
523 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
524 CXGB3_SHOW(name, val_expr) \
525 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
526
527 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
528 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
529 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
530
531 static struct attribute *cxgb3_attrs[] = {
532         &dev_attr_cam_size.attr,
533         &dev_attr_nfilters.attr,
534         &dev_attr_nservers.attr,
535         NULL
536 };
537
538 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
539
540 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
541                             char *buf, int sched)
542 {
543         ssize_t len;
544         unsigned int v, addr, bpt, cpt;
545         struct adapter *adap = to_net_dev(d)->priv;
546
547         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
548         rtnl_lock();
549         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
550         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
551         if (sched & 1)
552                 v >>= 16;
553         bpt = (v >> 8) & 0xff;
554         cpt = v & 0xff;
555         if (!cpt)
556                 len = sprintf(buf, "disabled\n");
557         else {
558                 v = (adap->params.vpd.cclk * 1000) / cpt;
559                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
560         }
561         rtnl_unlock();
562         return len;
563 }
564
565 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
566                              const char *buf, size_t len, int sched)
567 {
568         char *endp;
569         ssize_t ret;
570         unsigned int val;
571         struct adapter *adap = to_net_dev(d)->priv;
572
573         if (!capable(CAP_NET_ADMIN))
574                 return -EPERM;
575
576         val = simple_strtoul(buf, &endp, 0);
577         if (endp == buf || val > 10000000)
578                 return -EINVAL;
579
580         rtnl_lock();
581         ret = t3_config_sched(adap, val, sched);
582         if (!ret)
583                 ret = len;
584         rtnl_unlock();
585         return ret;
586 }
587
588 #define TM_ATTR(name, sched) \
589 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
590                            char *buf) \
591 { \
592         return tm_attr_show(d, attr, buf, sched); \
593 } \
594 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
595                             const char *buf, size_t len) \
596 { \
597         return tm_attr_store(d, attr, buf, len, sched); \
598 } \
599 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
600
601 TM_ATTR(sched0, 0);
602 TM_ATTR(sched1, 1);
603 TM_ATTR(sched2, 2);
604 TM_ATTR(sched3, 3);
605 TM_ATTR(sched4, 4);
606 TM_ATTR(sched5, 5);
607 TM_ATTR(sched6, 6);
608 TM_ATTR(sched7, 7);
609
610 static struct attribute *offload_attrs[] = {
611         &dev_attr_sched0.attr,
612         &dev_attr_sched1.attr,
613         &dev_attr_sched2.attr,
614         &dev_attr_sched3.attr,
615         &dev_attr_sched4.attr,
616         &dev_attr_sched5.attr,
617         &dev_attr_sched6.attr,
618         &dev_attr_sched7.attr,
619         NULL
620 };
621
622 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
623
624 /*
625  * Sends an sk_buff to an offload queue driver
626  * after dealing with any active network taps.
627  */
628 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
629 {
630         int ret;
631
632         local_bh_disable();
633         ret = t3_offload_tx(tdev, skb);
634         local_bh_enable();
635         return ret;
636 }
637
638 static int write_smt_entry(struct adapter *adapter, int idx)
639 {
640         struct cpl_smt_write_req *req;
641         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
642
643         if (!skb)
644                 return -ENOMEM;
645
646         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
647         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
648         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
649         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
650         req->iff = idx;
651         memset(req->src_mac1, 0, sizeof(req->src_mac1));
652         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
653         skb->priority = 1;
654         offload_tx(&adapter->tdev, skb);
655         return 0;
656 }
657
658 static int init_smt(struct adapter *adapter)
659 {
660         int i;
661
662         for_each_port(adapter, i)
663             write_smt_entry(adapter, i);
664         return 0;
665 }
666
667 static void init_port_mtus(struct adapter *adapter)
668 {
669         unsigned int mtus = adapter->port[0]->mtu;
670
671         if (adapter->port[1])
672                 mtus |= adapter->port[1]->mtu << 16;
673         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
674 }
675
676 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
677                               int hi, int port)
678 {
679         struct sk_buff *skb;
680         struct mngt_pktsched_wr *req;
681
682         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
683         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
684         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
685         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
686         req->sched = sched;
687         req->idx = qidx;
688         req->min = lo;
689         req->max = hi;
690         req->binding = port;
691         t3_mgmt_tx(adap, skb);
692 }
693
694 static void bind_qsets(struct adapter *adap)
695 {
696         int i, j;
697
698         for_each_port(adap, i) {
699                 const struct port_info *pi = adap2pinfo(adap, i);
700
701                 for (j = 0; j < pi->nqsets; ++j)
702                         send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
703                                           -1, i);
704         }
705 }
706
707 /**
708  *      cxgb_up - enable the adapter
709  *      @adapter: adapter being enabled
710  *
711  *      Called when the first port is enabled, this function performs the
712  *      actions necessary to make an adapter operational, such as completing
713  *      the initialization of HW modules, and enabling interrupts.
714  *
715  *      Must be called with the rtnl lock held.
716  */
717 static int cxgb_up(struct adapter *adap)
718 {
719         int err = 0;
720
721         if (!(adap->flags & FULL_INIT_DONE)) {
722                 err = t3_check_fw_version(adap);
723                 if (err)
724                         goto out;
725
726                 err = init_dummy_netdevs(adap);
727                 if (err)
728                         goto out;
729
730                 err = t3_init_hw(adap, 0);
731                 if (err)
732                         goto out;
733
734                 err = setup_sge_qsets(adap);
735                 if (err)
736                         goto out;
737
738                 setup_rss(adap);
739                 adap->flags |= FULL_INIT_DONE;
740         }
741
742         t3_intr_clear(adap);
743
744         if (adap->flags & USING_MSIX) {
745                 name_msix_vecs(adap);
746                 err = request_irq(adap->msix_info[0].vec,
747                                   t3_async_intr_handler, 0,
748                                   adap->msix_info[0].desc, adap);
749                 if (err)
750                         goto irq_err;
751
752                 if (request_msix_data_irqs(adap)) {
753                         free_irq(adap->msix_info[0].vec, adap);
754                         goto irq_err;
755                 }
756         } else if ((err = request_irq(adap->pdev->irq,
757                                       t3_intr_handler(adap,
758                                                       adap->sge.qs[0].rspq.
759                                                       polling),
760                                       (adap->flags & USING_MSI) ?
761                                        0 : IRQF_SHARED,
762                                       adap->name, adap)))
763                 goto irq_err;
764
765         t3_sge_start(adap);
766         t3_intr_enable(adap);
767
768         if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
769                 bind_qsets(adap);
770         adap->flags |= QUEUES_BOUND;
771
772 out:
773         return err;
774 irq_err:
775         CH_ERR(adap, "request_irq failed, err %d\n", err);
776         goto out;
777 }
778
779 /*
780  * Release resources when all the ports and offloading have been stopped.
781  */
782 static void cxgb_down(struct adapter *adapter)
783 {
784         t3_sge_stop(adapter);
785         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
786         t3_intr_disable(adapter);
787         spin_unlock_irq(&adapter->work_lock);
788
789         if (adapter->flags & USING_MSIX) {
790                 int i, n = 0;
791
792                 free_irq(adapter->msix_info[0].vec, adapter);
793                 for_each_port(adapter, i)
794                     n += adap2pinfo(adapter, i)->nqsets;
795
796                 for (i = 0; i < n; ++i)
797                         free_irq(adapter->msix_info[i + 1].vec,
798                                  &adapter->sge.qs[i]);
799         } else
800                 free_irq(adapter->pdev->irq, adapter);
801
802         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
803         quiesce_rx(adapter);
804 }
805
806 static void schedule_chk_task(struct adapter *adap)
807 {
808         unsigned int timeo;
809
810         timeo = adap->params.linkpoll_period ?
811             (HZ * adap->params.linkpoll_period) / 10 :
812             adap->params.stats_update_period * HZ;
813         if (timeo)
814                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
815 }
816
817 static int offload_open(struct net_device *dev)
818 {
819         struct adapter *adapter = dev->priv;
820         struct t3cdev *tdev = T3CDEV(dev);
821         int adap_up = adapter->open_device_map & PORT_MASK;
822         int err = 0;
823
824         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
825                 return 0;
826
827         if (!adap_up && (err = cxgb_up(adapter)) < 0)
828                 return err;
829
830         t3_tp_set_offload_mode(adapter, 1);
831         tdev->lldev = adapter->port[0];
832         err = cxgb3_offload_activate(adapter);
833         if (err)
834                 goto out;
835
836         init_port_mtus(adapter);
837         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
838                      adapter->params.b_wnd,
839                      adapter->params.rev == 0 ?
840                      adapter->port[0]->mtu : 0xffff);
841         init_smt(adapter);
842
843         /* Never mind if the next step fails */
844         sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
845
846         /* Call back all registered clients */
847         cxgb3_add_clients(tdev);
848
849 out:
850         /* restore them in case the offload module has changed them */
851         if (err) {
852                 t3_tp_set_offload_mode(adapter, 0);
853                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
854                 cxgb3_set_dummy_ops(tdev);
855         }
856         return err;
857 }
858
859 static int offload_close(struct t3cdev *tdev)
860 {
861         struct adapter *adapter = tdev2adap(tdev);
862
863         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
864                 return 0;
865
866         /* Call back all registered clients */
867         cxgb3_remove_clients(tdev);
868
869         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
870
871         tdev->lldev = NULL;
872         cxgb3_set_dummy_ops(tdev);
873         t3_tp_set_offload_mode(adapter, 0);
874         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
875
876         if (!adapter->open_device_map)
877                 cxgb_down(adapter);
878
879         cxgb3_offload_deactivate(adapter);
880         return 0;
881 }
882
883 static int cxgb_open(struct net_device *dev)
884 {
885         int err;
886         struct adapter *adapter = dev->priv;
887         struct port_info *pi = netdev_priv(dev);
888         int other_ports = adapter->open_device_map & PORT_MASK;
889
890         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
891                 return err;
892
893         set_bit(pi->port_id, &adapter->open_device_map);
894         if (!ofld_disable) {
895                 err = offload_open(dev);
896                 if (err)
897                         printk(KERN_WARNING
898                                "Could not initialize offload capabilities\n");
899         }
900
901         link_start(dev);
902         t3_port_intr_enable(adapter, pi->port_id);
903         netif_start_queue(dev);
904         if (!other_ports)
905                 schedule_chk_task(adapter);
906
907         return 0;
908 }
909
910 static int cxgb_close(struct net_device *dev)
911 {
912         struct adapter *adapter = dev->priv;
913         struct port_info *p = netdev_priv(dev);
914
915         t3_port_intr_disable(adapter, p->port_id);
916         netif_stop_queue(dev);
917         p->phy.ops->power_down(&p->phy, 1);
918         netif_carrier_off(dev);
919         t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
920
921         spin_lock(&adapter->work_lock); /* sync with update task */
922         clear_bit(p->port_id, &adapter->open_device_map);
923         spin_unlock(&adapter->work_lock);
924
925         if (!(adapter->open_device_map & PORT_MASK))
926                 cancel_rearming_delayed_workqueue(cxgb3_wq,
927                                                   &adapter->adap_check_task);
928
929         if (!adapter->open_device_map)
930                 cxgb_down(adapter);
931
932         return 0;
933 }
934
935 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
936 {
937         struct adapter *adapter = dev->priv;
938         struct port_info *p = netdev_priv(dev);
939         struct net_device_stats *ns = &p->netstats;
940         const struct mac_stats *pstats;
941
942         spin_lock(&adapter->stats_lock);
943         pstats = t3_mac_update_stats(&p->mac);
944         spin_unlock(&adapter->stats_lock);
945
946         ns->tx_bytes = pstats->tx_octets;
947         ns->tx_packets = pstats->tx_frames;
948         ns->rx_bytes = pstats->rx_octets;
949         ns->rx_packets = pstats->rx_frames;
950         ns->multicast = pstats->rx_mcast_frames;
951
952         ns->tx_errors = pstats->tx_underrun;
953         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
954             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
955             pstats->rx_fifo_ovfl;
956
957         /* detailed rx_errors */
958         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
959         ns->rx_over_errors = 0;
960         ns->rx_crc_errors = pstats->rx_fcs_errs;
961         ns->rx_frame_errors = pstats->rx_symbol_errs;
962         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
963         ns->rx_missed_errors = pstats->rx_cong_drops;
964
965         /* detailed tx_errors */
966         ns->tx_aborted_errors = 0;
967         ns->tx_carrier_errors = 0;
968         ns->tx_fifo_errors = pstats->tx_underrun;
969         ns->tx_heartbeat_errors = 0;
970         ns->tx_window_errors = 0;
971         return ns;
972 }
973
974 static u32 get_msglevel(struct net_device *dev)
975 {
976         struct adapter *adapter = dev->priv;
977
978         return adapter->msg_enable;
979 }
980
981 static void set_msglevel(struct net_device *dev, u32 val)
982 {
983         struct adapter *adapter = dev->priv;
984
985         adapter->msg_enable = val;
986 }
987
988 static char stats_strings[][ETH_GSTRING_LEN] = {
989         "TxOctetsOK         ",
990         "TxFramesOK         ",
991         "TxMulticastFramesOK",
992         "TxBroadcastFramesOK",
993         "TxPauseFrames      ",
994         "TxUnderrun         ",
995         "TxExtUnderrun      ",
996
997         "TxFrames64         ",
998         "TxFrames65To127    ",
999         "TxFrames128To255   ",
1000         "TxFrames256To511   ",
1001         "TxFrames512To1023  ",
1002         "TxFrames1024To1518 ",
1003         "TxFrames1519ToMax  ",
1004
1005         "RxOctetsOK         ",
1006         "RxFramesOK         ",
1007         "RxMulticastFramesOK",
1008         "RxBroadcastFramesOK",
1009         "RxPauseFrames      ",
1010         "RxFCSErrors        ",
1011         "RxSymbolErrors     ",
1012         "RxShortErrors      ",
1013         "RxJabberErrors     ",
1014         "RxLengthErrors     ",
1015         "RxFIFOoverflow     ",
1016
1017         "RxFrames64         ",
1018         "RxFrames65To127    ",
1019         "RxFrames128To255   ",
1020         "RxFrames256To511   ",
1021         "RxFrames512To1023  ",
1022         "RxFrames1024To1518 ",
1023         "RxFrames1519ToMax  ",
1024
1025         "PhyFIFOErrors      ",
1026         "TSO                ",
1027         "VLANextractions    ",
1028         "VLANinsertions     ",
1029         "TxCsumOffload      ",
1030         "RxCsumGood         ",
1031         "RxDrops            "
1032 };
1033
1034 static int get_stats_count(struct net_device *dev)
1035 {
1036         return ARRAY_SIZE(stats_strings);
1037 }
1038
1039 #define T3_REGMAP_SIZE (3 * 1024)
1040
1041 static int get_regs_len(struct net_device *dev)
1042 {
1043         return T3_REGMAP_SIZE;
1044 }
1045
1046 static int get_eeprom_len(struct net_device *dev)
1047 {
1048         return EEPROMSIZE;
1049 }
1050
1051 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1052 {
1053         u32 fw_vers = 0;
1054         struct adapter *adapter = dev->priv;
1055
1056         t3_get_fw_version(adapter, &fw_vers);
1057
1058         strcpy(info->driver, DRV_NAME);
1059         strcpy(info->version, DRV_VERSION);
1060         strcpy(info->bus_info, pci_name(adapter->pdev));
1061         if (!fw_vers)
1062                 strcpy(info->fw_version, "N/A");
1063         else {
1064                 snprintf(info->fw_version, sizeof(info->fw_version),
1065                          "%s %u.%u.%u",
1066                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1067                          G_FW_VERSION_MAJOR(fw_vers),
1068                          G_FW_VERSION_MINOR(fw_vers),
1069                          G_FW_VERSION_MICRO(fw_vers));
1070         }
1071 }
1072
1073 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1074 {
1075         if (stringset == ETH_SS_STATS)
1076                 memcpy(data, stats_strings, sizeof(stats_strings));
1077 }
1078
1079 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1080                                             struct port_info *p, int idx)
1081 {
1082         int i;
1083         unsigned long tot = 0;
1084
1085         for (i = 0; i < p->nqsets; ++i)
1086                 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1087         return tot;
1088 }
1089
1090 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1091                       u64 *data)
1092 {
1093         struct adapter *adapter = dev->priv;
1094         struct port_info *pi = netdev_priv(dev);
1095         const struct mac_stats *s;
1096
1097         spin_lock(&adapter->stats_lock);
1098         s = t3_mac_update_stats(&pi->mac);
1099         spin_unlock(&adapter->stats_lock);
1100
1101         *data++ = s->tx_octets;
1102         *data++ = s->tx_frames;
1103         *data++ = s->tx_mcast_frames;
1104         *data++ = s->tx_bcast_frames;
1105         *data++ = s->tx_pause;
1106         *data++ = s->tx_underrun;
1107         *data++ = s->tx_fifo_urun;
1108
1109         *data++ = s->tx_frames_64;
1110         *data++ = s->tx_frames_65_127;
1111         *data++ = s->tx_frames_128_255;
1112         *data++ = s->tx_frames_256_511;
1113         *data++ = s->tx_frames_512_1023;
1114         *data++ = s->tx_frames_1024_1518;
1115         *data++ = s->tx_frames_1519_max;
1116
1117         *data++ = s->rx_octets;
1118         *data++ = s->rx_frames;
1119         *data++ = s->rx_mcast_frames;
1120         *data++ = s->rx_bcast_frames;
1121         *data++ = s->rx_pause;
1122         *data++ = s->rx_fcs_errs;
1123         *data++ = s->rx_symbol_errs;
1124         *data++ = s->rx_short;
1125         *data++ = s->rx_jabber;
1126         *data++ = s->rx_too_long;
1127         *data++ = s->rx_fifo_ovfl;
1128
1129         *data++ = s->rx_frames_64;
1130         *data++ = s->rx_frames_65_127;
1131         *data++ = s->rx_frames_128_255;
1132         *data++ = s->rx_frames_256_511;
1133         *data++ = s->rx_frames_512_1023;
1134         *data++ = s->rx_frames_1024_1518;
1135         *data++ = s->rx_frames_1519_max;
1136
1137         *data++ = pi->phy.fifo_errors;
1138
1139         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1140         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1141         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1142         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1143         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1144         *data++ = s->rx_cong_drops;
1145 }
1146
1147 static inline void reg_block_dump(struct adapter *ap, void *buf,
1148                                   unsigned int start, unsigned int end)
1149 {
1150         u32 *p = buf + start;
1151
1152         for (; start <= end; start += sizeof(u32))
1153                 *p++ = t3_read_reg(ap, start);
1154 }
1155
1156 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1157                      void *buf)
1158 {
1159         struct adapter *ap = dev->priv;
1160
1161         /*
1162          * Version scheme:
1163          * bits 0..9: chip version
1164          * bits 10..15: chip revision
1165          * bit 31: set for PCIe cards
1166          */
1167         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1168
1169         /*
1170          * We skip the MAC statistics registers because they are clear-on-read.
1171          * Also reading multi-register stats would need to synchronize with the
1172          * periodic mac stats accumulation.  Hard to justify the complexity.
1173          */
1174         memset(buf, 0, T3_REGMAP_SIZE);
1175         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1176         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1177         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1178         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1179         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1180         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1181                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1182         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1183                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1184 }
1185
1186 static int restart_autoneg(struct net_device *dev)
1187 {
1188         struct port_info *p = netdev_priv(dev);
1189
1190         if (!netif_running(dev))
1191                 return -EAGAIN;
1192         if (p->link_config.autoneg != AUTONEG_ENABLE)
1193                 return -EINVAL;
1194         p->phy.ops->autoneg_restart(&p->phy);
1195         return 0;
1196 }
1197
1198 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1199 {
1200         int i;
1201         struct adapter *adapter = dev->priv;
1202
1203         if (data == 0)
1204                 data = 2;
1205
1206         for (i = 0; i < data * 2; i++) {
1207                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1208                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1209                 if (msleep_interruptible(500))
1210                         break;
1211         }
1212         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1213                          F_GPIO0_OUT_VAL);
1214         return 0;
1215 }
1216
1217 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1218 {
1219         struct port_info *p = netdev_priv(dev);
1220
1221         cmd->supported = p->link_config.supported;
1222         cmd->advertising = p->link_config.advertising;
1223
1224         if (netif_carrier_ok(dev)) {
1225                 cmd->speed = p->link_config.speed;
1226                 cmd->duplex = p->link_config.duplex;
1227         } else {
1228                 cmd->speed = -1;
1229                 cmd->duplex = -1;
1230         }
1231
1232         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1233         cmd->phy_address = p->phy.addr;
1234         cmd->transceiver = XCVR_EXTERNAL;
1235         cmd->autoneg = p->link_config.autoneg;
1236         cmd->maxtxpkt = 0;
1237         cmd->maxrxpkt = 0;
1238         return 0;
1239 }
1240
1241 static int speed_duplex_to_caps(int speed, int duplex)
1242 {
1243         int cap = 0;
1244
1245         switch (speed) {
1246         case SPEED_10:
1247                 if (duplex == DUPLEX_FULL)
1248                         cap = SUPPORTED_10baseT_Full;
1249                 else
1250                         cap = SUPPORTED_10baseT_Half;
1251                 break;
1252         case SPEED_100:
1253                 if (duplex == DUPLEX_FULL)
1254                         cap = SUPPORTED_100baseT_Full;
1255                 else
1256                         cap = SUPPORTED_100baseT_Half;
1257                 break;
1258         case SPEED_1000:
1259                 if (duplex == DUPLEX_FULL)
1260                         cap = SUPPORTED_1000baseT_Full;
1261                 else
1262                         cap = SUPPORTED_1000baseT_Half;
1263                 break;
1264         case SPEED_10000:
1265                 if (duplex == DUPLEX_FULL)
1266                         cap = SUPPORTED_10000baseT_Full;
1267         }
1268         return cap;
1269 }
1270
1271 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1272                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1273                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1274                       ADVERTISED_10000baseT_Full)
1275
1276 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1277 {
1278         struct port_info *p = netdev_priv(dev);
1279         struct link_config *lc = &p->link_config;
1280
1281         if (!(lc->supported & SUPPORTED_Autoneg))
1282                 return -EOPNOTSUPP;     /* can't change speed/duplex */
1283
1284         if (cmd->autoneg == AUTONEG_DISABLE) {
1285                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1286
1287                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1288                         return -EINVAL;
1289                 lc->requested_speed = cmd->speed;
1290                 lc->requested_duplex = cmd->duplex;
1291                 lc->advertising = 0;
1292         } else {
1293                 cmd->advertising &= ADVERTISED_MASK;
1294                 cmd->advertising &= lc->supported;
1295                 if (!cmd->advertising)
1296                         return -EINVAL;
1297                 lc->requested_speed = SPEED_INVALID;
1298                 lc->requested_duplex = DUPLEX_INVALID;
1299                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1300         }
1301         lc->autoneg = cmd->autoneg;
1302         if (netif_running(dev))
1303                 t3_link_start(&p->phy, &p->mac, lc);
1304         return 0;
1305 }
1306
1307 static void get_pauseparam(struct net_device *dev,
1308                            struct ethtool_pauseparam *epause)
1309 {
1310         struct port_info *p = netdev_priv(dev);
1311
1312         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1313         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1314         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1315 }
1316
1317 static int set_pauseparam(struct net_device *dev,
1318                           struct ethtool_pauseparam *epause)
1319 {
1320         struct port_info *p = netdev_priv(dev);
1321         struct link_config *lc = &p->link_config;
1322
1323         if (epause->autoneg == AUTONEG_DISABLE)
1324                 lc->requested_fc = 0;
1325         else if (lc->supported & SUPPORTED_Autoneg)
1326                 lc->requested_fc = PAUSE_AUTONEG;
1327         else
1328                 return -EINVAL;
1329
1330         if (epause->rx_pause)
1331                 lc->requested_fc |= PAUSE_RX;
1332         if (epause->tx_pause)
1333                 lc->requested_fc |= PAUSE_TX;
1334         if (lc->autoneg == AUTONEG_ENABLE) {
1335                 if (netif_running(dev))
1336                         t3_link_start(&p->phy, &p->mac, lc);
1337         } else {
1338                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1339                 if (netif_running(dev))
1340                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1341         }
1342         return 0;
1343 }
1344
1345 static u32 get_rx_csum(struct net_device *dev)
1346 {
1347         struct port_info *p = netdev_priv(dev);
1348
1349         return p->rx_csum_offload;
1350 }
1351
1352 static int set_rx_csum(struct net_device *dev, u32 data)
1353 {
1354         struct port_info *p = netdev_priv(dev);
1355
1356         p->rx_csum_offload = data;
1357         return 0;
1358 }
1359
1360 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1361 {
1362         struct adapter *adapter = dev->priv;
1363
1364         e->rx_max_pending = MAX_RX_BUFFERS;
1365         e->rx_mini_max_pending = 0;
1366         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1367         e->tx_max_pending = MAX_TXQ_ENTRIES;
1368
1369         e->rx_pending = adapter->params.sge.qset[0].fl_size;
1370         e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1371         e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1372         e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1373 }
1374
1375 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1376 {
1377         int i;
1378         struct adapter *adapter = dev->priv;
1379
1380         if (e->rx_pending > MAX_RX_BUFFERS ||
1381             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1382             e->tx_pending > MAX_TXQ_ENTRIES ||
1383             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1384             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1385             e->rx_pending < MIN_FL_ENTRIES ||
1386             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1387             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1388                 return -EINVAL;
1389
1390         if (adapter->flags & FULL_INIT_DONE)
1391                 return -EBUSY;
1392
1393         for (i = 0; i < SGE_QSETS; ++i) {
1394                 struct qset_params *q = &adapter->params.sge.qset[i];
1395
1396                 q->rspq_size = e->rx_mini_pending;
1397                 q->fl_size = e->rx_pending;
1398                 q->jumbo_size = e->rx_jumbo_pending;
1399                 q->txq_size[0] = e->tx_pending;
1400                 q->txq_size[1] = e->tx_pending;
1401                 q->txq_size[2] = e->tx_pending;
1402         }
1403         return 0;
1404 }
1405
1406 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1407 {
1408         struct adapter *adapter = dev->priv;
1409         struct qset_params *qsp = &adapter->params.sge.qset[0];
1410         struct sge_qset *qs = &adapter->sge.qs[0];
1411
1412         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1413                 return -EINVAL;
1414
1415         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1416         t3_update_qset_coalesce(qs, qsp);
1417         return 0;
1418 }
1419
1420 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1421 {
1422         struct adapter *adapter = dev->priv;
1423         struct qset_params *q = adapter->params.sge.qset;
1424
1425         c->rx_coalesce_usecs = q->coalesce_usecs;
1426         return 0;
1427 }
1428
1429 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1430                       u8 * data)
1431 {
1432         int i, err = 0;
1433         struct adapter *adapter = dev->priv;
1434
1435         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1436         if (!buf)
1437                 return -ENOMEM;
1438
1439         e->magic = EEPROM_MAGIC;
1440         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1441                 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1442
1443         if (!err)
1444                 memcpy(data, buf + e->offset, e->len);
1445         kfree(buf);
1446         return err;
1447 }
1448
1449 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1450                       u8 * data)
1451 {
1452         u8 *buf;
1453         int err = 0;
1454         u32 aligned_offset, aligned_len, *p;
1455         struct adapter *adapter = dev->priv;
1456
1457         if (eeprom->magic != EEPROM_MAGIC)
1458                 return -EINVAL;
1459
1460         aligned_offset = eeprom->offset & ~3;
1461         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1462
1463         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1464                 buf = kmalloc(aligned_len, GFP_KERNEL);
1465                 if (!buf)
1466                         return -ENOMEM;
1467                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1468                 if (!err && aligned_len > 4)
1469                         err = t3_seeprom_read(adapter,
1470                                               aligned_offset + aligned_len - 4,
1471                                               (u32 *) & buf[aligned_len - 4]);
1472                 if (err)
1473                         goto out;
1474                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1475         } else
1476                 buf = data;
1477
1478         err = t3_seeprom_wp(adapter, 0);
1479         if (err)
1480                 goto out;
1481
1482         for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1483                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1484                 aligned_offset += 4;
1485         }
1486
1487         if (!err)
1488                 err = t3_seeprom_wp(adapter, 1);
1489 out:
1490         if (buf != data)
1491                 kfree(buf);
1492         return err;
1493 }
1494
1495 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1496 {
1497         wol->supported = 0;
1498         wol->wolopts = 0;
1499         memset(&wol->sopass, 0, sizeof(wol->sopass));
1500 }
1501
1502 static const struct ethtool_ops cxgb_ethtool_ops = {
1503         .get_settings = get_settings,
1504         .set_settings = set_settings,
1505         .get_drvinfo = get_drvinfo,
1506         .get_msglevel = get_msglevel,
1507         .set_msglevel = set_msglevel,
1508         .get_ringparam = get_sge_param,
1509         .set_ringparam = set_sge_param,
1510         .get_coalesce = get_coalesce,
1511         .set_coalesce = set_coalesce,
1512         .get_eeprom_len = get_eeprom_len,
1513         .get_eeprom = get_eeprom,
1514         .set_eeprom = set_eeprom,
1515         .get_pauseparam = get_pauseparam,
1516         .set_pauseparam = set_pauseparam,
1517         .get_rx_csum = get_rx_csum,
1518         .set_rx_csum = set_rx_csum,
1519         .get_tx_csum = ethtool_op_get_tx_csum,
1520         .set_tx_csum = ethtool_op_set_tx_csum,
1521         .get_sg = ethtool_op_get_sg,
1522         .set_sg = ethtool_op_set_sg,
1523         .get_link = ethtool_op_get_link,
1524         .get_strings = get_strings,
1525         .phys_id = cxgb3_phys_id,
1526         .nway_reset = restart_autoneg,
1527         .get_stats_count = get_stats_count,
1528         .get_ethtool_stats = get_stats,
1529         .get_regs_len = get_regs_len,
1530         .get_regs = get_regs,
1531         .get_wol = get_wol,
1532         .get_tso = ethtool_op_get_tso,
1533         .set_tso = ethtool_op_set_tso,
1534         .get_perm_addr = ethtool_op_get_perm_addr
1535 };
1536
1537 static int in_range(int val, int lo, int hi)
1538 {
1539         return val < 0 || (val <= hi && val >= lo);
1540 }
1541
1542 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1543 {
1544         int ret;
1545         u32 cmd;
1546         struct adapter *adapter = dev->priv;
1547
1548         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1549                 return -EFAULT;
1550
1551         switch (cmd) {
1552         case CHELSIO_SETREG:{
1553                 struct ch_reg edata;
1554
1555                 if (!capable(CAP_NET_ADMIN))
1556                         return -EPERM;
1557                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1558                         return -EFAULT;
1559                 if ((edata.addr & 3) != 0
1560                         || edata.addr >= adapter->mmio_len)
1561                         return -EINVAL;
1562                 writel(edata.val, adapter->regs + edata.addr);
1563                 break;
1564         }
1565         case CHELSIO_GETREG:{
1566                 struct ch_reg edata;
1567
1568                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1569                         return -EFAULT;
1570                 if ((edata.addr & 3) != 0
1571                         || edata.addr >= adapter->mmio_len)
1572                         return -EINVAL;
1573                 edata.val = readl(adapter->regs + edata.addr);
1574                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1575                         return -EFAULT;
1576                 break;
1577         }
1578         case CHELSIO_SET_QSET_PARAMS:{
1579                 int i;
1580                 struct qset_params *q;
1581                 struct ch_qset_params t;
1582
1583                 if (!capable(CAP_NET_ADMIN))
1584                         return -EPERM;
1585                 if (copy_from_user(&t, useraddr, sizeof(t)))
1586                         return -EFAULT;
1587                 if (t.qset_idx >= SGE_QSETS)
1588                         return -EINVAL;
1589                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1590                         !in_range(t.cong_thres, 0, 255) ||
1591                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1592                                 MAX_TXQ_ENTRIES) ||
1593                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1594                                 MAX_TXQ_ENTRIES) ||
1595                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1596                                 MAX_CTRL_TXQ_ENTRIES) ||
1597                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1598                                 MAX_RX_BUFFERS)
1599                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1600                                         MAX_RX_JUMBO_BUFFERS)
1601                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1602                                         MAX_RSPQ_ENTRIES))
1603                         return -EINVAL;
1604                 if ((adapter->flags & FULL_INIT_DONE) &&
1605                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1606                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1607                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1608                         t.polling >= 0 || t.cong_thres >= 0))
1609                         return -EBUSY;
1610
1611                 q = &adapter->params.sge.qset[t.qset_idx];
1612
1613                 if (t.rspq_size >= 0)
1614                         q->rspq_size = t.rspq_size;
1615                 if (t.fl_size[0] >= 0)
1616                         q->fl_size = t.fl_size[0];
1617                 if (t.fl_size[1] >= 0)
1618                         q->jumbo_size = t.fl_size[1];
1619                 if (t.txq_size[0] >= 0)
1620                         q->txq_size[0] = t.txq_size[0];
1621                 if (t.txq_size[1] >= 0)
1622                         q->txq_size[1] = t.txq_size[1];
1623                 if (t.txq_size[2] >= 0)
1624                         q->txq_size[2] = t.txq_size[2];
1625                 if (t.cong_thres >= 0)
1626                         q->cong_thres = t.cong_thres;
1627                 if (t.intr_lat >= 0) {
1628                         struct sge_qset *qs =
1629                                 &adapter->sge.qs[t.qset_idx];
1630
1631                         q->coalesce_usecs = t.intr_lat;
1632                         t3_update_qset_coalesce(qs, q);
1633                 }
1634                 if (t.polling >= 0) {
1635                         if (adapter->flags & USING_MSIX)
1636                                 q->polling = t.polling;
1637                         else {
1638                                 /* No polling with INTx for T3A */
1639                                 if (adapter->params.rev == 0 &&
1640                                         !(adapter->flags & USING_MSI))
1641                                         t.polling = 0;
1642
1643                                 for (i = 0; i < SGE_QSETS; i++) {
1644                                         q = &adapter->params.sge.
1645                                                 qset[i];
1646                                         q->polling = t.polling;
1647                                 }
1648                         }
1649                 }
1650                 break;
1651         }
1652         case CHELSIO_GET_QSET_PARAMS:{
1653                 struct qset_params *q;
1654                 struct ch_qset_params t;
1655
1656                 if (copy_from_user(&t, useraddr, sizeof(t)))
1657                         return -EFAULT;
1658                 if (t.qset_idx >= SGE_QSETS)
1659                         return -EINVAL;
1660
1661                 q = &adapter->params.sge.qset[t.qset_idx];
1662                 t.rspq_size = q->rspq_size;
1663                 t.txq_size[0] = q->txq_size[0];
1664                 t.txq_size[1] = q->txq_size[1];
1665                 t.txq_size[2] = q->txq_size[2];
1666                 t.fl_size[0] = q->fl_size;
1667                 t.fl_size[1] = q->jumbo_size;
1668                 t.polling = q->polling;
1669                 t.intr_lat = q->coalesce_usecs;
1670                 t.cong_thres = q->cong_thres;
1671
1672                 if (copy_to_user(useraddr, &t, sizeof(t)))
1673                         return -EFAULT;
1674                 break;
1675         }
1676         case CHELSIO_SET_QSET_NUM:{
1677                 struct ch_reg edata;
1678                 struct port_info *pi = netdev_priv(dev);
1679                 unsigned int i, first_qset = 0, other_qsets = 0;
1680
1681                 if (!capable(CAP_NET_ADMIN))
1682                         return -EPERM;
1683                 if (adapter->flags & FULL_INIT_DONE)
1684                         return -EBUSY;
1685                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1686                         return -EFAULT;
1687                 if (edata.val < 1 ||
1688                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1689                         return -EINVAL;
1690
1691                 for_each_port(adapter, i)
1692                         if (adapter->port[i] && adapter->port[i] != dev)
1693                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
1694
1695                 if (edata.val + other_qsets > SGE_QSETS)
1696                         return -EINVAL;
1697
1698                 pi->nqsets = edata.val;
1699
1700                 for_each_port(adapter, i)
1701                         if (adapter->port[i]) {
1702                                 pi = adap2pinfo(adapter, i);
1703                                 pi->first_qset = first_qset;
1704                                 first_qset += pi->nqsets;
1705                         }
1706                 break;
1707         }
1708         case CHELSIO_GET_QSET_NUM:{
1709                 struct ch_reg edata;
1710                 struct port_info *pi = netdev_priv(dev);
1711
1712                 edata.cmd = CHELSIO_GET_QSET_NUM;
1713                 edata.val = pi->nqsets;
1714                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1715                         return -EFAULT;
1716                 break;
1717         }
1718         case CHELSIO_LOAD_FW:{
1719                 u8 *fw_data;
1720                 struct ch_mem_range t;
1721
1722                 if (!capable(CAP_NET_ADMIN))
1723                         return -EPERM;
1724                 if (copy_from_user(&t, useraddr, sizeof(t)))
1725                         return -EFAULT;
1726
1727                 fw_data = kmalloc(t.len, GFP_KERNEL);
1728                 if (!fw_data)
1729                         return -ENOMEM;
1730
1731                 if (copy_from_user
1732                         (fw_data, useraddr + sizeof(t), t.len)) {
1733                         kfree(fw_data);
1734                         return -EFAULT;
1735                 }
1736
1737                 ret = t3_load_fw(adapter, fw_data, t.len);
1738                 kfree(fw_data);
1739                 if (ret)
1740                         return ret;
1741                 break;
1742         }
1743         case CHELSIO_SETMTUTAB:{
1744                 struct ch_mtus m;
1745                 int i;
1746
1747                 if (!is_offload(adapter))
1748                         return -EOPNOTSUPP;
1749                 if (!capable(CAP_NET_ADMIN))
1750                         return -EPERM;
1751                 if (offload_running(adapter))
1752                         return -EBUSY;
1753                 if (copy_from_user(&m, useraddr, sizeof(m)))
1754                         return -EFAULT;
1755                 if (m.nmtus != NMTUS)
1756                         return -EINVAL;
1757                 if (m.mtus[0] < 81)     /* accommodate SACK */
1758                         return -EINVAL;
1759
1760                 /* MTUs must be in ascending order */
1761                 for (i = 1; i < NMTUS; ++i)
1762                         if (m.mtus[i] < m.mtus[i - 1])
1763                                 return -EINVAL;
1764
1765                 memcpy(adapter->params.mtus, m.mtus,
1766                         sizeof(adapter->params.mtus));
1767                 break;
1768         }
1769         case CHELSIO_GET_PM:{
1770                 struct tp_params *p = &adapter->params.tp;
1771                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1772
1773                 if (!is_offload(adapter))
1774                         return -EOPNOTSUPP;
1775                 m.tx_pg_sz = p->tx_pg_size;
1776                 m.tx_num_pg = p->tx_num_pgs;
1777                 m.rx_pg_sz = p->rx_pg_size;
1778                 m.rx_num_pg = p->rx_num_pgs;
1779                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1780                 if (copy_to_user(useraddr, &m, sizeof(m)))
1781                         return -EFAULT;
1782                 break;
1783         }
1784         case CHELSIO_SET_PM:{
1785                 struct ch_pm m;
1786                 struct tp_params *p = &adapter->params.tp;
1787
1788                 if (!is_offload(adapter))
1789                         return -EOPNOTSUPP;
1790                 if (!capable(CAP_NET_ADMIN))
1791                         return -EPERM;
1792                 if (adapter->flags & FULL_INIT_DONE)
1793                         return -EBUSY;
1794                 if (copy_from_user(&m, useraddr, sizeof(m)))
1795                         return -EFAULT;
1796                 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1797                         !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1798                         return -EINVAL; /* not power of 2 */
1799                 if (!(m.rx_pg_sz & 0x14000))
1800                         return -EINVAL; /* not 16KB or 64KB */
1801                 if (!(m.tx_pg_sz & 0x1554000))
1802                         return -EINVAL;
1803                 if (m.tx_num_pg == -1)
1804                         m.tx_num_pg = p->tx_num_pgs;
1805                 if (m.rx_num_pg == -1)
1806                         m.rx_num_pg = p->rx_num_pgs;
1807                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1808                         return -EINVAL;
1809                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1810                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1811                         return -EINVAL;
1812                 p->rx_pg_size = m.rx_pg_sz;
1813                 p->tx_pg_size = m.tx_pg_sz;
1814                 p->rx_num_pgs = m.rx_num_pg;
1815                 p->tx_num_pgs = m.tx_num_pg;
1816                 break;
1817         }
1818         case CHELSIO_GET_MEM:{
1819                 struct ch_mem_range t;
1820                 struct mc7 *mem;
1821                 u64 buf[32];
1822
1823                 if (!is_offload(adapter))
1824                         return -EOPNOTSUPP;
1825                 if (!(adapter->flags & FULL_INIT_DONE))
1826                         return -EIO;    /* need the memory controllers */
1827                 if (copy_from_user(&t, useraddr, sizeof(t)))
1828                         return -EFAULT;
1829                 if ((t.addr & 7) || (t.len & 7))
1830                         return -EINVAL;
1831                 if (t.mem_id == MEM_CM)
1832                         mem = &adapter->cm;
1833                 else if (t.mem_id == MEM_PMRX)
1834                         mem = &adapter->pmrx;
1835                 else if (t.mem_id == MEM_PMTX)
1836                         mem = &adapter->pmtx;
1837                 else
1838                         return -EINVAL;
1839
1840                 /*
1841                         * Version scheme:
1842                         * bits 0..9: chip version
1843                         * bits 10..15: chip revision
1844                         */
1845                 t.version = 3 | (adapter->params.rev << 10);
1846                 if (copy_to_user(useraddr, &t, sizeof(t)))
1847                         return -EFAULT;
1848
1849                 /*
1850                  * Read 256 bytes at a time as len can be large and we don't
1851                  * want to use huge intermediate buffers.
1852                  */
1853                 useraddr += sizeof(t);  /* advance to start of buffer */
1854                 while (t.len) {
1855                         unsigned int chunk =
1856                                 min_t(unsigned int, t.len, sizeof(buf));
1857
1858                         ret =
1859                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1860                                                 buf);
1861                         if (ret)
1862                                 return ret;
1863                         if (copy_to_user(useraddr, buf, chunk))
1864                                 return -EFAULT;
1865                         useraddr += chunk;
1866                         t.addr += chunk;
1867                         t.len -= chunk;
1868                 }
1869                 break;
1870         }
1871         case CHELSIO_SET_TRACE_FILTER:{
1872                 struct ch_trace t;
1873                 const struct trace_params *tp;
1874
1875                 if (!capable(CAP_NET_ADMIN))
1876                         return -EPERM;
1877                 if (!offload_running(adapter))
1878                         return -EAGAIN;
1879                 if (copy_from_user(&t, useraddr, sizeof(t)))
1880                         return -EFAULT;
1881
1882                 tp = (const struct trace_params *)&t.sip;
1883                 if (t.config_tx)
1884                         t3_config_trace_filter(adapter, tp, 0,
1885                                                 t.invert_match,
1886                                                 t.trace_tx);
1887                 if (t.config_rx)
1888                         t3_config_trace_filter(adapter, tp, 1,
1889                                                 t.invert_match,
1890                                                 t.trace_rx);
1891                 break;
1892         }
1893         case CHELSIO_SET_PKTSCHED:{
1894                 struct ch_pktsched_params p;
1895
1896                 if (!capable(CAP_NET_ADMIN))
1897                                 return -EPERM;
1898                 if (!adapter->open_device_map)
1899                                 return -EAGAIN; /* uP and SGE must be running */
1900                 if (copy_from_user(&p, useraddr, sizeof(p)))
1901                                 return -EFAULT;
1902                 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1903                                   p.binding);
1904                 break;
1905                         
1906         }
1907         default:
1908                 return -EOPNOTSUPP;
1909         }
1910         return 0;
1911 }
1912
1913 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1914 {
1915         int ret, mmd;
1916         struct adapter *adapter = dev->priv;
1917         struct port_info *pi = netdev_priv(dev);
1918         struct mii_ioctl_data *data = if_mii(req);
1919
1920         switch (cmd) {
1921         case SIOCGMIIPHY:
1922                 data->phy_id = pi->phy.addr;
1923                 /* FALLTHRU */
1924         case SIOCGMIIREG:{
1925                 u32 val;
1926                 struct cphy *phy = &pi->phy;
1927
1928                 if (!phy->mdio_read)
1929                         return -EOPNOTSUPP;
1930                 if (is_10G(adapter)) {
1931                         mmd = data->phy_id >> 8;
1932                         if (!mmd)
1933                                 mmd = MDIO_DEV_PCS;
1934                         else if (mmd > MDIO_DEV_XGXS)
1935                                 return -EINVAL;
1936
1937                         ret =
1938                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1939                                                 mmd, data->reg_num, &val);
1940                 } else
1941                         ret =
1942                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1943                                                 0, data->reg_num & 0x1f,
1944                                                 &val);
1945                 if (!ret)
1946                         data->val_out = val;
1947                 break;
1948         }
1949         case SIOCSMIIREG:{
1950                 struct cphy *phy = &pi->phy;
1951
1952                 if (!capable(CAP_NET_ADMIN))
1953                         return -EPERM;
1954                 if (!phy->mdio_write)
1955                         return -EOPNOTSUPP;
1956                 if (is_10G(adapter)) {
1957                         mmd = data->phy_id >> 8;
1958                         if (!mmd)
1959                                 mmd = MDIO_DEV_PCS;
1960                         else if (mmd > MDIO_DEV_XGXS)
1961                                 return -EINVAL;
1962
1963                         ret =
1964                                 phy->mdio_write(adapter,
1965                                                 data->phy_id & 0x1f, mmd,
1966                                                 data->reg_num,
1967                                                 data->val_in);
1968                 } else
1969                         ret =
1970                                 phy->mdio_write(adapter,
1971                                                 data->phy_id & 0x1f, 0,
1972                                                 data->reg_num & 0x1f,
1973                                                 data->val_in);
1974                 break;
1975         }
1976         case SIOCCHIOCTL:
1977                 return cxgb_extension_ioctl(dev, req->ifr_data);
1978         default:
1979                 return -EOPNOTSUPP;
1980         }
1981         return ret;
1982 }
1983
1984 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1985 {
1986         int ret;
1987         struct adapter *adapter = dev->priv;
1988         struct port_info *pi = netdev_priv(dev);
1989
1990         if (new_mtu < 81)       /* accommodate SACK */
1991                 return -EINVAL;
1992         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1993                 return ret;
1994         dev->mtu = new_mtu;
1995         init_port_mtus(adapter);
1996         if (adapter->params.rev == 0 && offload_running(adapter))
1997                 t3_load_mtus(adapter, adapter->params.mtus,
1998                              adapter->params.a_wnd, adapter->params.b_wnd,
1999                              adapter->port[0]->mtu);
2000         return 0;
2001 }
2002
2003 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2004 {
2005         struct adapter *adapter = dev->priv;
2006         struct port_info *pi = netdev_priv(dev);
2007         struct sockaddr *addr = p;
2008
2009         if (!is_valid_ether_addr(addr->sa_data))
2010                 return -EINVAL;
2011
2012         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2013         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2014         if (offload_running(adapter))
2015                 write_smt_entry(adapter, pi->port_id);
2016         return 0;
2017 }
2018
2019 /**
2020  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2021  * @adap: the adapter
2022  * @p: the port
2023  *
2024  * Ensures that current Rx processing on any of the queues associated with
2025  * the given port completes before returning.  We do this by acquiring and
2026  * releasing the locks of the response queues associated with the port.
2027  */
2028 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2029 {
2030         int i;
2031
2032         for (i = 0; i < p->nqsets; i++) {
2033                 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2034
2035                 spin_lock_irq(&q->lock);
2036                 spin_unlock_irq(&q->lock);
2037         }
2038 }
2039
2040 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2041 {
2042         struct adapter *adapter = dev->priv;
2043         struct port_info *pi = netdev_priv(dev);
2044
2045         pi->vlan_grp = grp;
2046         if (adapter->params.rev > 0)
2047                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2048         else {
2049                 /* single control for all ports */
2050                 unsigned int i, have_vlans = 0;
2051                 for_each_port(adapter, i)
2052                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2053
2054                 t3_set_vlan_accel(adapter, 1, have_vlans);
2055         }
2056         t3_synchronize_rx(adapter, pi);
2057 }
2058
2059 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2060 {
2061         /* nothing */
2062 }
2063
2064 #ifdef CONFIG_NET_POLL_CONTROLLER
2065 static void cxgb_netpoll(struct net_device *dev)
2066 {
2067         struct adapter *adapter = dev->priv;
2068         struct sge_qset *qs = dev2qset(dev);
2069
2070         t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2071                                                     adapter);
2072 }
2073 #endif
2074
2075 /*
2076  * Periodic accumulation of MAC statistics.
2077  */
2078 static void mac_stats_update(struct adapter *adapter)
2079 {
2080         int i;
2081
2082         for_each_port(adapter, i) {
2083                 struct net_device *dev = adapter->port[i];
2084                 struct port_info *p = netdev_priv(dev);
2085
2086                 if (netif_running(dev)) {
2087                         spin_lock(&adapter->stats_lock);
2088                         t3_mac_update_stats(&p->mac);
2089                         spin_unlock(&adapter->stats_lock);
2090                 }
2091         }
2092 }
2093
2094 static void check_link_status(struct adapter *adapter)
2095 {
2096         int i;
2097
2098         for_each_port(adapter, i) {
2099                 struct net_device *dev = adapter->port[i];
2100                 struct port_info *p = netdev_priv(dev);
2101
2102                 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2103                         t3_link_changed(adapter, i);
2104         }
2105 }
2106
2107 static void t3_adap_check_task(struct work_struct *work)
2108 {
2109         struct adapter *adapter = container_of(work, struct adapter,
2110                                                adap_check_task.work);
2111         const struct adapter_params *p = &adapter->params;
2112
2113         adapter->check_task_cnt++;
2114
2115         /* Check link status for PHYs without interrupts */
2116         if (p->linkpoll_period)
2117                 check_link_status(adapter);
2118
2119         /* Accumulate MAC stats if needed */
2120         if (!p->linkpoll_period ||
2121             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2122             p->stats_update_period) {
2123                 mac_stats_update(adapter);
2124                 adapter->check_task_cnt = 0;
2125         }
2126
2127         /* Schedule the next check update if any port is active. */
2128         spin_lock(&adapter->work_lock);
2129         if (adapter->open_device_map & PORT_MASK)
2130                 schedule_chk_task(adapter);
2131         spin_unlock(&adapter->work_lock);
2132 }
2133
2134 /*
2135  * Processes external (PHY) interrupts in process context.
2136  */
2137 static void ext_intr_task(struct work_struct *work)
2138 {
2139         struct adapter *adapter = container_of(work, struct adapter,
2140                                                ext_intr_handler_task);
2141
2142         t3_phy_intr_handler(adapter);
2143
2144         /* Now reenable external interrupts */
2145         spin_lock_irq(&adapter->work_lock);
2146         if (adapter->slow_intr_mask) {
2147                 adapter->slow_intr_mask |= F_T3DBG;
2148                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2149                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2150                              adapter->slow_intr_mask);
2151         }
2152         spin_unlock_irq(&adapter->work_lock);
2153 }
2154
2155 /*
2156  * Interrupt-context handler for external (PHY) interrupts.
2157  */
2158 void t3_os_ext_intr_handler(struct adapter *adapter)
2159 {
2160         /*
2161          * Schedule a task to handle external interrupts as they may be slow
2162          * and we use a mutex to protect MDIO registers.  We disable PHY
2163          * interrupts in the meantime and let the task reenable them when
2164          * it's done.
2165          */
2166         spin_lock(&adapter->work_lock);
2167         if (adapter->slow_intr_mask) {
2168                 adapter->slow_intr_mask &= ~F_T3DBG;
2169                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2170                              adapter->slow_intr_mask);
2171                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2172         }
2173         spin_unlock(&adapter->work_lock);
2174 }
2175
2176 void t3_fatal_err(struct adapter *adapter)
2177 {
2178         unsigned int fw_status[4];
2179
2180         if (adapter->flags & FULL_INIT_DONE) {
2181                 t3_sge_stop(adapter);
2182                 t3_intr_disable(adapter);
2183         }
2184         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2185         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2186                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2187                          fw_status[0], fw_status[1],
2188                          fw_status[2], fw_status[3]);
2189
2190 }
2191
2192 static int __devinit cxgb_enable_msix(struct adapter *adap)
2193 {
2194         struct msix_entry entries[SGE_QSETS + 1];
2195         int i, err;
2196
2197         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2198                 entries[i].entry = i;
2199
2200         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2201         if (!err) {
2202                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2203                         adap->msix_info[i].vec = entries[i].vector;
2204         } else if (err > 0)
2205                 dev_info(&adap->pdev->dev,
2206                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2207         return err;
2208 }
2209
2210 static void __devinit print_port_info(struct adapter *adap,
2211                                       const struct adapter_info *ai)
2212 {
2213         static const char *pci_variant[] = {
2214                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2215         };
2216
2217         int i;
2218         char buf[80];
2219
2220         if (is_pcie(adap))
2221                 snprintf(buf, sizeof(buf), "%s x%d",
2222                          pci_variant[adap->params.pci.variant],
2223                          adap->params.pci.width);
2224         else
2225                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2226                          pci_variant[adap->params.pci.variant],
2227                          adap->params.pci.speed, adap->params.pci.width);
2228
2229         for_each_port(adap, i) {
2230                 struct net_device *dev = adap->port[i];
2231                 const struct port_info *pi = netdev_priv(dev);
2232
2233                 if (!test_bit(i, &adap->registered_device_map))
2234                         continue;
2235                 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2236                        dev->name, ai->desc, pi->port_type->desc,
2237                        adap->params.rev, buf,
2238                        (adap->flags & USING_MSIX) ? " MSI-X" :
2239                        (adap->flags & USING_MSI) ? " MSI" : "");
2240                 if (adap->name == dev->name && adap->params.vpd.mclk)
2241                         printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2242                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2243                                t3_mc7_size(&adap->pmtx) >> 20,
2244                                t3_mc7_size(&adap->pmrx) >> 20);
2245         }
2246 }
2247
2248 static int __devinit init_one(struct pci_dev *pdev,
2249                               const struct pci_device_id *ent)
2250 {
2251         static int version_printed;
2252
2253         int i, err, pci_using_dac = 0;
2254         unsigned long mmio_start, mmio_len;
2255         const struct adapter_info *ai;
2256         struct adapter *adapter = NULL;
2257         struct port_info *pi;
2258
2259         if (!version_printed) {
2260                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2261                 ++version_printed;
2262         }
2263
2264         if (!cxgb3_wq) {
2265                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2266                 if (!cxgb3_wq) {
2267                         printk(KERN_ERR DRV_NAME
2268                                ": cannot initialize work queue\n");
2269                         return -ENOMEM;
2270                 }
2271         }
2272
2273         err = pci_request_regions(pdev, DRV_NAME);
2274         if (err) {
2275                 /* Just info, some other driver may have claimed the device. */
2276                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2277                 return err;
2278         }
2279
2280         err = pci_enable_device(pdev);
2281         if (err) {
2282                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2283                 goto out_release_regions;
2284         }
2285
2286         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2287                 pci_using_dac = 1;
2288                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2289                 if (err) {
2290                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2291                                "coherent allocations\n");
2292                         goto out_disable_device;
2293                 }
2294         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2295                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2296                 goto out_disable_device;
2297         }
2298
2299         pci_set_master(pdev);
2300
2301         mmio_start = pci_resource_start(pdev, 0);
2302         mmio_len = pci_resource_len(pdev, 0);
2303         ai = t3_get_adapter_info(ent->driver_data);
2304
2305         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2306         if (!adapter) {
2307                 err = -ENOMEM;
2308                 goto out_disable_device;
2309         }
2310
2311         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2312         if (!adapter->regs) {
2313                 dev_err(&pdev->dev, "cannot map device registers\n");
2314                 err = -ENOMEM;
2315                 goto out_free_adapter;
2316         }
2317
2318         adapter->pdev = pdev;
2319         adapter->name = pci_name(pdev);
2320         adapter->msg_enable = dflt_msg_enable;
2321         adapter->mmio_len = mmio_len;
2322
2323         mutex_init(&adapter->mdio_lock);
2324         spin_lock_init(&adapter->work_lock);
2325         spin_lock_init(&adapter->stats_lock);
2326
2327         INIT_LIST_HEAD(&adapter->adapter_list);
2328         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2329         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2330
2331         for (i = 0; i < ai->nports; ++i) {
2332                 struct net_device *netdev;
2333
2334                 netdev = alloc_etherdev(sizeof(struct port_info));
2335                 if (!netdev) {
2336                         err = -ENOMEM;
2337                         goto out_free_dev;
2338                 }
2339
2340                 SET_MODULE_OWNER(netdev);
2341                 SET_NETDEV_DEV(netdev, &pdev->dev);
2342
2343                 adapter->port[i] = netdev;
2344                 pi = netdev_priv(netdev);
2345                 pi->rx_csum_offload = 1;
2346                 pi->nqsets = 1;
2347                 pi->first_qset = i;
2348                 pi->activity = 0;
2349                 pi->port_id = i;
2350                 netif_carrier_off(netdev);
2351                 netdev->irq = pdev->irq;
2352                 netdev->mem_start = mmio_start;
2353                 netdev->mem_end = mmio_start + mmio_len - 1;
2354                 netdev->priv = adapter;
2355                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2356                 netdev->features |= NETIF_F_LLTX;
2357                 if (pci_using_dac)
2358                         netdev->features |= NETIF_F_HIGHDMA;
2359
2360                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2361                 netdev->vlan_rx_register = vlan_rx_register;
2362                 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2363
2364                 netdev->open = cxgb_open;
2365                 netdev->stop = cxgb_close;
2366                 netdev->hard_start_xmit = t3_eth_xmit;
2367                 netdev->get_stats = cxgb_get_stats;
2368                 netdev->set_multicast_list = cxgb_set_rxmode;
2369                 netdev->do_ioctl = cxgb_ioctl;
2370                 netdev->change_mtu = cxgb_change_mtu;
2371                 netdev->set_mac_address = cxgb_set_mac_addr;
2372 #ifdef CONFIG_NET_POLL_CONTROLLER
2373                 netdev->poll_controller = cxgb_netpoll;
2374 #endif
2375                 netdev->weight = 64;
2376
2377                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2378         }
2379
2380         pci_set_drvdata(pdev, adapter->port[0]);
2381         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2382                 err = -ENODEV;
2383                 goto out_free_dev;
2384         }
2385
2386         /*
2387          * The card is now ready to go.  If any errors occur during device
2388          * registration we do not fail the whole card but rather proceed only
2389          * with the ports we manage to register successfully.  However we must
2390          * register at least one net device.
2391          */
2392         for_each_port(adapter, i) {
2393                 err = register_netdev(adapter->port[i]);
2394                 if (err)
2395                         dev_warn(&pdev->dev,
2396                                  "cannot register net device %s, skipping\n",
2397                                  adapter->port[i]->name);
2398                 else {
2399                         /*
2400                          * Change the name we use for messages to the name of
2401                          * the first successfully registered interface.
2402                          */
2403                         if (!adapter->registered_device_map)
2404                                 adapter->name = adapter->port[i]->name;
2405
2406                         __set_bit(i, &adapter->registered_device_map);
2407                 }
2408         }
2409         if (!adapter->registered_device_map) {
2410                 dev_err(&pdev->dev, "could not register any net devices\n");
2411                 goto out_free_dev;
2412         }
2413
2414         /* Driver's ready. Reflect it on LEDs */
2415         t3_led_ready(adapter);
2416
2417         if (is_offload(adapter)) {
2418                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2419                 cxgb3_adapter_ofld(adapter);
2420         }
2421
2422         /* See what interrupts we'll be using */
2423         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2424                 adapter->flags |= USING_MSIX;
2425         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2426                 adapter->flags |= USING_MSI;
2427
2428         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2429                                  &cxgb3_attr_group);
2430
2431         print_port_info(adapter, ai);
2432         return 0;
2433
2434 out_free_dev:
2435         iounmap(adapter->regs);
2436         for (i = ai->nports - 1; i >= 0; --i)
2437                 if (adapter->port[i])
2438                         free_netdev(adapter->port[i]);
2439
2440 out_free_adapter:
2441         kfree(adapter);
2442
2443 out_disable_device:
2444         pci_disable_device(pdev);
2445 out_release_regions:
2446         pci_release_regions(pdev);
2447         pci_set_drvdata(pdev, NULL);
2448         return err;
2449 }
2450
2451 static void __devexit remove_one(struct pci_dev *pdev)
2452 {
2453         struct net_device *dev = pci_get_drvdata(pdev);
2454
2455         if (dev) {
2456                 int i;
2457                 struct adapter *adapter = dev->priv;
2458
2459                 t3_sge_stop(adapter);
2460                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2461                                    &cxgb3_attr_group);
2462
2463                 for_each_port(adapter, i)
2464                     if (test_bit(i, &adapter->registered_device_map))
2465                         unregister_netdev(adapter->port[i]);
2466
2467                 if (is_offload(adapter)) {
2468                         cxgb3_adapter_unofld(adapter);
2469                         if (test_bit(OFFLOAD_DEVMAP_BIT,
2470                                      &adapter->open_device_map))
2471                                 offload_close(&adapter->tdev);
2472                 }
2473
2474                 t3_free_sge_resources(adapter);
2475                 cxgb_disable_msi(adapter);
2476
2477                 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2478                         if (adapter->dummy_netdev[i]) {
2479                                 free_netdev(adapter->dummy_netdev[i]);
2480                                 adapter->dummy_netdev[i] = NULL;
2481                         }
2482
2483                 for_each_port(adapter, i)
2484                         if (adapter->port[i])
2485                                 free_netdev(adapter->port[i]);
2486
2487                 iounmap(adapter->regs);
2488                 kfree(adapter);
2489                 pci_release_regions(pdev);
2490                 pci_disable_device(pdev);
2491                 pci_set_drvdata(pdev, NULL);
2492         }
2493 }
2494
2495 static struct pci_driver driver = {
2496         .name = DRV_NAME,
2497         .id_table = cxgb3_pci_tbl,
2498         .probe = init_one,
2499         .remove = __devexit_p(remove_one),
2500 };
2501
2502 static int __init cxgb3_init_module(void)
2503 {
2504         int ret;
2505
2506         cxgb3_offload_init();
2507
2508         ret = pci_register_driver(&driver);
2509         return ret;
2510 }
2511
2512 static void __exit cxgb3_cleanup_module(void)
2513 {
2514         pci_unregister_driver(&driver);
2515         if (cxgb3_wq)
2516                 destroy_workqueue(cxgb3_wq);
2517 }
2518
2519 module_init(cxgb3_init_module);
2520 module_exit(cxgb3_cleanup_module);