Merge branch 'linux-2.6' into for-2.6.24
[linux-2.6] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, ssid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 1, 0),  /* PE9000 */
84         CH_DEVICE(0x21, 1, 1),  /* T302E */
85         CH_DEVICE(0x22, 1, 2),  /* T310E */
86         CH_DEVICE(0x23, 1, 3),  /* T320X */
87         CH_DEVICE(0x24, 1, 1),  /* T302X */
88         CH_DEVICE(0x25, 1, 3),  /* T320E */
89         CH_DEVICE(0x26, 1, 2),  /* T310X */
90         CH_DEVICE(0x30, 1, 2),  /* T3B10 */
91         CH_DEVICE(0x31, 1, 3),  /* T3B20 */
92         CH_DEVICE(0x32, 1, 1),  /* T3B02 */
93         {0,}
94 };
95
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107 /*
108  * The driver uses the best interrupt scheme available on a platform in the
109  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
110  * of these schemes the driver may consider as follows:
111  *
112  * msi = 2: choose from among all three options
113  * msi = 1: only consider MSI and pin interrupts
114  * msi = 0: force pin interrupts
115  */
116 static int msi = 2;
117
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121 /*
122  * The driver enables offload as a default.
123  * To disable it, use ofld_disable = 1.
124  */
125
126 static int ofld_disable = 0;
127
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131 /*
132  * We have work elements that we need to cancel when an interface is taken
133  * down.  Normally the work elements would be executed by keventd but that
134  * can deadlock because of linkwatch.  If our close method takes the rtnl
135  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137  * for our work to complete.  Get our own work queue to solve this.
138  */
139 static struct workqueue_struct *cxgb3_wq;
140
141 /**
142  *      link_report - show link status and link speed/duplex
143  *      @p: the port whose settings are to be reported
144  *
145  *      Shows the link status, speed, and duplex of a port.
146  */
147 static void link_report(struct net_device *dev)
148 {
149         if (!netif_carrier_ok(dev))
150                 printk(KERN_INFO "%s: link down\n", dev->name);
151         else {
152                 const char *s = "10Mbps";
153                 const struct port_info *p = netdev_priv(dev);
154
155                 switch (p->link_config.speed) {
156                 case SPEED_10000:
157                         s = "10Gbps";
158                         break;
159                 case SPEED_1000:
160                         s = "1000Mbps";
161                         break;
162                 case SPEED_100:
163                         s = "100Mbps";
164                         break;
165                 }
166
167                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169         }
170 }
171
172 /**
173  *      t3_os_link_changed - handle link status changes
174  *      @adapter: the adapter associated with the link change
175  *      @port_id: the port index whose limk status has changed
176  *      @link_stat: the new status of the link
177  *      @speed: the new speed setting
178  *      @duplex: the new duplex setting
179  *      @pause: the new flow-control setting
180  *
181  *      This is the OS-dependent handler for link status changes.  The OS
182  *      neutral handler takes care of most of the processing for these events,
183  *      then calls this handler for any OS-specific processing.
184  */
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186                         int speed, int duplex, int pause)
187 {
188         struct net_device *dev = adapter->port[port_id];
189         struct port_info *pi = netdev_priv(dev);
190         struct cmac *mac = &pi->mac;
191
192         /* Skip changes from disabled ports. */
193         if (!netif_running(dev))
194                 return;
195
196         if (link_stat != netif_carrier_ok(dev)) {
197                 if (link_stat) {
198                         t3_mac_enable(mac, MAC_DIRECTION_RX);
199                         netif_carrier_on(dev);
200                 } else {
201                         netif_carrier_off(dev);
202                         pi->phy.ops->power_down(&pi->phy, 1);
203                         t3_mac_disable(mac, MAC_DIRECTION_RX);
204                         t3_link_start(&pi->phy, mac, &pi->link_config);
205                 }
206
207                 link_report(dev);
208         }
209 }
210
211 static void cxgb_set_rxmode(struct net_device *dev)
212 {
213         struct t3_rx_mode rm;
214         struct port_info *pi = netdev_priv(dev);
215
216         init_rx_mode(&rm, dev, dev->mc_list);
217         t3_mac_set_rx_mode(&pi->mac, &rm);
218 }
219
220 /**
221  *      link_start - enable a port
222  *      @dev: the device to enable
223  *
224  *      Performs the MAC and PHY actions needed to enable a port.
225  */
226 static void link_start(struct net_device *dev)
227 {
228         struct t3_rx_mode rm;
229         struct port_info *pi = netdev_priv(dev);
230         struct cmac *mac = &pi->mac;
231
232         init_rx_mode(&rm, dev, dev->mc_list);
233         t3_mac_reset(mac);
234         t3_mac_set_mtu(mac, dev->mtu);
235         t3_mac_set_address(mac, 0, dev->dev_addr);
236         t3_mac_set_rx_mode(mac, &rm);
237         t3_link_start(&pi->phy, mac, &pi->link_config);
238         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239 }
240
241 static inline void cxgb_disable_msi(struct adapter *adapter)
242 {
243         if (adapter->flags & USING_MSIX) {
244                 pci_disable_msix(adapter->pdev);
245                 adapter->flags &= ~USING_MSIX;
246         } else if (adapter->flags & USING_MSI) {
247                 pci_disable_msi(adapter->pdev);
248                 adapter->flags &= ~USING_MSI;
249         }
250 }
251
252 /*
253  * Interrupt handler for asynchronous events used with MSI-X.
254  */
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256 {
257         t3_slow_intr_handler(cookie);
258         return IRQ_HANDLED;
259 }
260
261 /*
262  * Name the MSI-X interrupts.
263  */
264 static void name_msix_vecs(struct adapter *adap)
265 {
266         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269         adap->msix_info[0].desc[n] = 0;
270
271         for_each_port(adap, j) {
272                 struct net_device *d = adap->port[j];
273                 const struct port_info *pi = netdev_priv(d);
274
275                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276                         snprintf(adap->msix_info[msi_idx].desc, n,
277                                  "%s (queue %d)", d->name, i);
278                         adap->msix_info[msi_idx].desc[n] = 0;
279                 }
280         }
281 }
282
283 static int request_msix_data_irqs(struct adapter *adap)
284 {
285         int i, j, err, qidx = 0;
286
287         for_each_port(adap, i) {
288                 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290                 for (j = 0; j < nqsets; ++j) {
291                         err = request_irq(adap->msix_info[qidx + 1].vec,
292                                           t3_intr_handler(adap,
293                                                           adap->sge.qs[qidx].
294                                                           rspq.polling), 0,
295                                           adap->msix_info[qidx + 1].desc,
296                                           &adap->sge.qs[qidx]);
297                         if (err) {
298                                 while (--qidx >= 0)
299                                         free_irq(adap->msix_info[qidx + 1].vec,
300                                                  &adap->sge.qs[qidx]);
301                                 return err;
302                         }
303                         qidx++;
304                 }
305         }
306         return 0;
307 }
308
309 /**
310  *      setup_rss - configure RSS
311  *      @adap: the adapter
312  *
313  *      Sets up RSS to distribute packets to multiple receive queues.  We
314  *      configure the RSS CPU lookup table to distribute to the number of HW
315  *      receive queues, and the response queue lookup table to narrow that
316  *      down to the response queues actually configured for each port.
317  *      We always configure the RSS mapping for two ports since the mapping
318  *      table has plenty of entries.
319  */
320 static void setup_rss(struct adapter *adap)
321 {
322         int i;
323         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325         u8 cpus[SGE_QSETS + 1];
326         u16 rspq_map[RSS_TABLE_SIZE];
327
328         for (i = 0; i < SGE_QSETS; ++i)
329                 cpus[i] = i;
330         cpus[SGE_QSETS] = 0xff; /* terminator */
331
332         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333                 rspq_map[i] = i % nq0;
334                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
335         }
336
337         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
339                       V_RRCPLCPUSIZE(6), cpus, rspq_map);
340 }
341
342 /*
343  * If we have multiple receive queues per port serviced by NAPI we need one
344  * netdevice per queue as NAPI operates on netdevices.  We already have one
345  * netdevice, namely the one associated with the interface, so we use dummy
346  * ones for any additional queues.  Note that these netdevices exist purely
347  * so that NAPI has something to work with, they do not represent network
348  * ports and are not registered.
349  */
350 static int init_dummy_netdevs(struct adapter *adap)
351 {
352         int i, j, dummy_idx = 0;
353         struct net_device *nd;
354
355         for_each_port(adap, i) {
356                 struct net_device *dev = adap->port[i];
357                 const struct port_info *pi = netdev_priv(dev);
358
359                 for (j = 0; j < pi->nqsets - 1; j++) {
360                         if (!adap->dummy_netdev[dummy_idx]) {
361                                 struct port_info *p;
362
363                                 nd = alloc_netdev(sizeof(*p), "", ether_setup);
364                                 if (!nd)
365                                         goto free_all;
366
367                                 p = netdev_priv(nd);
368                                 p->adapter = adap;
369                                 nd->weight = 64;
370                                 set_bit(__LINK_STATE_START, &nd->state);
371                                 adap->dummy_netdev[dummy_idx] = nd;
372                         }
373                         strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
374                         dummy_idx++;
375                 }
376         }
377         return 0;
378
379 free_all:
380         while (--dummy_idx >= 0) {
381                 free_netdev(adap->dummy_netdev[dummy_idx]);
382                 adap->dummy_netdev[dummy_idx] = NULL;
383         }
384         return -ENOMEM;
385 }
386
387 /*
388  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
389  * both netdevices representing interfaces and the dummy ones for the extra
390  * queues.
391  */
392 static void quiesce_rx(struct adapter *adap)
393 {
394         int i;
395         struct net_device *dev;
396
397         for_each_port(adap, i) {
398                 dev = adap->port[i];
399                 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
400                         msleep(1);
401         }
402
403         for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
404                 dev = adap->dummy_netdev[i];
405                 if (dev)
406                         while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
407                                 msleep(1);
408         }
409 }
410
411 /**
412  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
413  *      @adap: the adapter
414  *
415  *      Determines how many sets of SGE queues to use and initializes them.
416  *      We support multiple queue sets per port if we have MSI-X, otherwise
417  *      just one queue set per port.
418  */
419 static int setup_sge_qsets(struct adapter *adap)
420 {
421         int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
422         unsigned int ntxq = SGE_TXQ_PER_SET;
423
424         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
425                 irq_idx = -1;
426
427         for_each_port(adap, i) {
428                 struct net_device *dev = adap->port[i];
429                 const struct port_info *pi = netdev_priv(dev);
430
431                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
432                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
433                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
434                                                              irq_idx,
435                                 &adap->params.sge.qset[qset_idx], ntxq,
436                                 j == 0 ? dev :
437                                          adap-> dummy_netdev[dummy_dev_idx++]);
438                         if (err) {
439                                 t3_free_sge_resources(adap);
440                                 return err;
441                         }
442                 }
443         }
444
445         return 0;
446 }
447
448 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
449                          char *buf,
450                          ssize_t(*format) (struct net_device *, char *))
451 {
452         ssize_t len;
453
454         /* Synchronize with ioctls that may shut down the device */
455         rtnl_lock();
456         len = (*format) (to_net_dev(d), buf);
457         rtnl_unlock();
458         return len;
459 }
460
461 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
462                           const char *buf, size_t len,
463                           ssize_t(*set) (struct net_device *, unsigned int),
464                           unsigned int min_val, unsigned int max_val)
465 {
466         char *endp;
467         ssize_t ret;
468         unsigned int val;
469
470         if (!capable(CAP_NET_ADMIN))
471                 return -EPERM;
472
473         val = simple_strtoul(buf, &endp, 0);
474         if (endp == buf || val < min_val || val > max_val)
475                 return -EINVAL;
476
477         rtnl_lock();
478         ret = (*set) (to_net_dev(d), val);
479         if (!ret)
480                 ret = len;
481         rtnl_unlock();
482         return ret;
483 }
484
485 #define CXGB3_SHOW(name, val_expr) \
486 static ssize_t format_##name(struct net_device *dev, char *buf) \
487 { \
488         struct port_info *pi = netdev_priv(dev); \
489         struct adapter *adap = pi->adapter; \
490         return sprintf(buf, "%u\n", val_expr); \
491 } \
492 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
493                            char *buf) \
494 { \
495         return attr_show(d, attr, buf, format_##name); \
496 }
497
498 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
499 {
500         struct port_info *pi = netdev_priv(dev);
501         struct adapter *adap = pi->adapter;
502         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
503
504         if (adap->flags & FULL_INIT_DONE)
505                 return -EBUSY;
506         if (val && adap->params.rev == 0)
507                 return -EINVAL;
508         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
509             min_tids)
510                 return -EINVAL;
511         adap->params.mc5.nfilters = val;
512         return 0;
513 }
514
515 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
516                               const char *buf, size_t len)
517 {
518         return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
519 }
520
521 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
522 {
523         struct port_info *pi = netdev_priv(dev);
524         struct adapter *adap = pi->adapter;
525
526         if (adap->flags & FULL_INIT_DONE)
527                 return -EBUSY;
528         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
529             MC5_MIN_TIDS)
530                 return -EINVAL;
531         adap->params.mc5.nservers = val;
532         return 0;
533 }
534
535 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
536                               const char *buf, size_t len)
537 {
538         return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
539 }
540
541 #define CXGB3_ATTR_R(name, val_expr) \
542 CXGB3_SHOW(name, val_expr) \
543 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
544
545 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
546 CXGB3_SHOW(name, val_expr) \
547 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
548
549 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
550 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
551 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
552
553 static struct attribute *cxgb3_attrs[] = {
554         &dev_attr_cam_size.attr,
555         &dev_attr_nfilters.attr,
556         &dev_attr_nservers.attr,
557         NULL
558 };
559
560 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
561
562 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
563                             char *buf, int sched)
564 {
565         struct port_info *pi = netdev_priv(to_net_dev(d));
566         struct adapter *adap = pi->adapter;
567         unsigned int v, addr, bpt, cpt;
568         ssize_t len;
569
570         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
571         rtnl_lock();
572         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
573         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
574         if (sched & 1)
575                 v >>= 16;
576         bpt = (v >> 8) & 0xff;
577         cpt = v & 0xff;
578         if (!cpt)
579                 len = sprintf(buf, "disabled\n");
580         else {
581                 v = (adap->params.vpd.cclk * 1000) / cpt;
582                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
583         }
584         rtnl_unlock();
585         return len;
586 }
587
588 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
589                              const char *buf, size_t len, int sched)
590 {
591         struct port_info *pi = netdev_priv(to_net_dev(d));
592         struct adapter *adap = pi->adapter;
593         unsigned int val;
594         char *endp;
595         ssize_t ret;
596
597         if (!capable(CAP_NET_ADMIN))
598                 return -EPERM;
599
600         val = simple_strtoul(buf, &endp, 0);
601         if (endp == buf || val > 10000000)
602                 return -EINVAL;
603
604         rtnl_lock();
605         ret = t3_config_sched(adap, val, sched);
606         if (!ret)
607                 ret = len;
608         rtnl_unlock();
609         return ret;
610 }
611
612 #define TM_ATTR(name, sched) \
613 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
614                            char *buf) \
615 { \
616         return tm_attr_show(d, attr, buf, sched); \
617 } \
618 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
619                             const char *buf, size_t len) \
620 { \
621         return tm_attr_store(d, attr, buf, len, sched); \
622 } \
623 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
624
625 TM_ATTR(sched0, 0);
626 TM_ATTR(sched1, 1);
627 TM_ATTR(sched2, 2);
628 TM_ATTR(sched3, 3);
629 TM_ATTR(sched4, 4);
630 TM_ATTR(sched5, 5);
631 TM_ATTR(sched6, 6);
632 TM_ATTR(sched7, 7);
633
634 static struct attribute *offload_attrs[] = {
635         &dev_attr_sched0.attr,
636         &dev_attr_sched1.attr,
637         &dev_attr_sched2.attr,
638         &dev_attr_sched3.attr,
639         &dev_attr_sched4.attr,
640         &dev_attr_sched5.attr,
641         &dev_attr_sched6.attr,
642         &dev_attr_sched7.attr,
643         NULL
644 };
645
646 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
647
648 /*
649  * Sends an sk_buff to an offload queue driver
650  * after dealing with any active network taps.
651  */
652 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
653 {
654         int ret;
655
656         local_bh_disable();
657         ret = t3_offload_tx(tdev, skb);
658         local_bh_enable();
659         return ret;
660 }
661
662 static int write_smt_entry(struct adapter *adapter, int idx)
663 {
664         struct cpl_smt_write_req *req;
665         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
666
667         if (!skb)
668                 return -ENOMEM;
669
670         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
671         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
672         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
673         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
674         req->iff = idx;
675         memset(req->src_mac1, 0, sizeof(req->src_mac1));
676         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
677         skb->priority = 1;
678         offload_tx(&adapter->tdev, skb);
679         return 0;
680 }
681
682 static int init_smt(struct adapter *adapter)
683 {
684         int i;
685
686         for_each_port(adapter, i)
687             write_smt_entry(adapter, i);
688         return 0;
689 }
690
691 static void init_port_mtus(struct adapter *adapter)
692 {
693         unsigned int mtus = adapter->port[0]->mtu;
694
695         if (adapter->port[1])
696                 mtus |= adapter->port[1]->mtu << 16;
697         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
698 }
699
700 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
701                               int hi, int port)
702 {
703         struct sk_buff *skb;
704         struct mngt_pktsched_wr *req;
705
706         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
707         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
708         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
709         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
710         req->sched = sched;
711         req->idx = qidx;
712         req->min = lo;
713         req->max = hi;
714         req->binding = port;
715         t3_mgmt_tx(adap, skb);
716 }
717
718 static void bind_qsets(struct adapter *adap)
719 {
720         int i, j;
721
722         for_each_port(adap, i) {
723                 const struct port_info *pi = adap2pinfo(adap, i);
724
725                 for (j = 0; j < pi->nqsets; ++j)
726                         send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
727                                           -1, i);
728         }
729 }
730
731 #define FW_FNAME "t3fw-%d.%d.%d.bin"
732 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
733
734 static int upgrade_fw(struct adapter *adap)
735 {
736         int ret;
737         char buf[64];
738         const struct firmware *fw;
739         struct device *dev = &adap->pdev->dev;
740
741         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
742                  FW_VERSION_MINOR, FW_VERSION_MICRO);
743         ret = request_firmware(&fw, buf, dev);
744         if (ret < 0) {
745                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
746                         buf);
747                 return ret;
748         }
749         ret = t3_load_fw(adap, fw->data, fw->size);
750         release_firmware(fw);
751
752         if (ret == 0)
753                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
754                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
755         else
756                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
757                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
758         
759         return ret;
760 }
761
762 static inline char t3rev2char(struct adapter *adapter)
763 {
764         char rev = 0;
765
766         switch(adapter->params.rev) {
767         case T3_REV_B:
768         case T3_REV_B2:
769                 rev = 'b';
770                 break;
771         }
772         return rev;
773 }
774
775 int update_tpsram(struct adapter *adap)
776 {
777         const struct firmware *tpsram;
778         char buf[64];
779         struct device *dev = &adap->pdev->dev;
780         int ret;
781         char rev;
782         
783         rev = t3rev2char(adap);
784         if (!rev)
785                 return 0;
786
787         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
788                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
789
790         ret = request_firmware(&tpsram, buf, dev);
791         if (ret < 0) {
792                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
793                         buf);
794                 return ret;
795         }
796         
797         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
798         if (ret)
799                 goto release_tpsram;    
800
801         ret = t3_set_proto_sram(adap, tpsram->data);
802         if (ret == 0)
803                 dev_info(dev,
804                          "successful update of protocol engine "
805                          "to %d.%d.%d\n",
806                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
807         else
808                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
809                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
810         if (ret)
811                 dev_err(dev, "loading protocol SRAM failed\n");
812
813 release_tpsram:
814         release_firmware(tpsram);
815         
816         return ret;
817 }
818
819 /**
820  *      cxgb_up - enable the adapter
821  *      @adapter: adapter being enabled
822  *
823  *      Called when the first port is enabled, this function performs the
824  *      actions necessary to make an adapter operational, such as completing
825  *      the initialization of HW modules, and enabling interrupts.
826  *
827  *      Must be called with the rtnl lock held.
828  */
829 static int cxgb_up(struct adapter *adap)
830 {
831         int err = 0;
832         int must_load;
833
834         if (!(adap->flags & FULL_INIT_DONE)) {
835                 err = t3_check_fw_version(adap);
836                 if (err == -EINVAL)
837                         err = upgrade_fw(adap);
838                 if (err)
839                         goto out;
840
841                 err = t3_check_tpsram_version(adap, &must_load);
842                 if (err == -EINVAL) {
843                         err = update_tpsram(adap);
844                         if (err && must_load)
845                                 goto out;
846                 }
847
848                 err = init_dummy_netdevs(adap);
849                 if (err)
850                         goto out;
851
852                 err = t3_init_hw(adap, 0);
853                 if (err)
854                         goto out;
855
856                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
857                 
858                 err = setup_sge_qsets(adap);
859                 if (err)
860                         goto out;
861
862                 setup_rss(adap);
863                 adap->flags |= FULL_INIT_DONE;
864         }
865
866         t3_intr_clear(adap);
867
868         if (adap->flags & USING_MSIX) {
869                 name_msix_vecs(adap);
870                 err = request_irq(adap->msix_info[0].vec,
871                                   t3_async_intr_handler, 0,
872                                   adap->msix_info[0].desc, adap);
873                 if (err)
874                         goto irq_err;
875
876                 if (request_msix_data_irqs(adap)) {
877                         free_irq(adap->msix_info[0].vec, adap);
878                         goto irq_err;
879                 }
880         } else if ((err = request_irq(adap->pdev->irq,
881                                       t3_intr_handler(adap,
882                                                       adap->sge.qs[0].rspq.
883                                                       polling),
884                                       (adap->flags & USING_MSI) ?
885                                        0 : IRQF_SHARED,
886                                       adap->name, adap)))
887                 goto irq_err;
888
889         t3_sge_start(adap);
890         t3_intr_enable(adap);
891
892         if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
893                 bind_qsets(adap);
894         adap->flags |= QUEUES_BOUND;
895
896 out:
897         return err;
898 irq_err:
899         CH_ERR(adap, "request_irq failed, err %d\n", err);
900         goto out;
901 }
902
903 /*
904  * Release resources when all the ports and offloading have been stopped.
905  */
906 static void cxgb_down(struct adapter *adapter)
907 {
908         t3_sge_stop(adapter);
909         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
910         t3_intr_disable(adapter);
911         spin_unlock_irq(&adapter->work_lock);
912
913         if (adapter->flags & USING_MSIX) {
914                 int i, n = 0;
915
916                 free_irq(adapter->msix_info[0].vec, adapter);
917                 for_each_port(adapter, i)
918                     n += adap2pinfo(adapter, i)->nqsets;
919
920                 for (i = 0; i < n; ++i)
921                         free_irq(adapter->msix_info[i + 1].vec,
922                                  &adapter->sge.qs[i]);
923         } else
924                 free_irq(adapter->pdev->irq, adapter);
925
926         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
927         quiesce_rx(adapter);
928 }
929
930 static void schedule_chk_task(struct adapter *adap)
931 {
932         unsigned int timeo;
933
934         timeo = adap->params.linkpoll_period ?
935             (HZ * adap->params.linkpoll_period) / 10 :
936             adap->params.stats_update_period * HZ;
937         if (timeo)
938                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
939 }
940
941 static int offload_open(struct net_device *dev)
942 {
943         struct port_info *pi = netdev_priv(dev);
944         struct adapter *adapter = pi->adapter;
945         struct t3cdev *tdev = dev2t3cdev(dev);
946         int adap_up = adapter->open_device_map & PORT_MASK;
947         int err = 0;
948
949         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
950                 return 0;
951
952         if (!adap_up && (err = cxgb_up(adapter)) < 0)
953                 return err;
954
955         t3_tp_set_offload_mode(adapter, 1);
956         tdev->lldev = adapter->port[0];
957         err = cxgb3_offload_activate(adapter);
958         if (err)
959                 goto out;
960
961         init_port_mtus(adapter);
962         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
963                      adapter->params.b_wnd,
964                      adapter->params.rev == 0 ?
965                      adapter->port[0]->mtu : 0xffff);
966         init_smt(adapter);
967
968         /* Never mind if the next step fails */
969         sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
970
971         /* Call back all registered clients */
972         cxgb3_add_clients(tdev);
973
974 out:
975         /* restore them in case the offload module has changed them */
976         if (err) {
977                 t3_tp_set_offload_mode(adapter, 0);
978                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
979                 cxgb3_set_dummy_ops(tdev);
980         }
981         return err;
982 }
983
984 static int offload_close(struct t3cdev *tdev)
985 {
986         struct adapter *adapter = tdev2adap(tdev);
987
988         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
989                 return 0;
990
991         /* Call back all registered clients */
992         cxgb3_remove_clients(tdev);
993
994         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
995
996         tdev->lldev = NULL;
997         cxgb3_set_dummy_ops(tdev);
998         t3_tp_set_offload_mode(adapter, 0);
999         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1000
1001         if (!adapter->open_device_map)
1002                 cxgb_down(adapter);
1003
1004         cxgb3_offload_deactivate(adapter);
1005         return 0;
1006 }
1007
1008 static int cxgb_open(struct net_device *dev)
1009 {
1010         struct port_info *pi = netdev_priv(dev);
1011         struct adapter *adapter = pi->adapter;
1012         int other_ports = adapter->open_device_map & PORT_MASK;
1013         int err;
1014
1015         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1016                 return err;
1017
1018         set_bit(pi->port_id, &adapter->open_device_map);
1019         if (is_offload(adapter) && !ofld_disable) {
1020                 err = offload_open(dev);
1021                 if (err)
1022                         printk(KERN_WARNING
1023                                "Could not initialize offload capabilities\n");
1024         }
1025
1026         link_start(dev);
1027         t3_port_intr_enable(adapter, pi->port_id);
1028         netif_start_queue(dev);
1029         if (!other_ports)
1030                 schedule_chk_task(adapter);
1031
1032         return 0;
1033 }
1034
1035 static int cxgb_close(struct net_device *dev)
1036 {
1037         struct port_info *pi = netdev_priv(dev);
1038         struct adapter *adapter = pi->adapter;
1039
1040         t3_port_intr_disable(adapter, pi->port_id);
1041         netif_stop_queue(dev);
1042         pi->phy.ops->power_down(&pi->phy, 1);
1043         netif_carrier_off(dev);
1044         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1045
1046         spin_lock(&adapter->work_lock); /* sync with update task */
1047         clear_bit(pi->port_id, &adapter->open_device_map);
1048         spin_unlock(&adapter->work_lock);
1049
1050         if (!(adapter->open_device_map & PORT_MASK))
1051                 cancel_rearming_delayed_workqueue(cxgb3_wq,
1052                                                   &adapter->adap_check_task);
1053
1054         if (!adapter->open_device_map)
1055                 cxgb_down(adapter);
1056
1057         return 0;
1058 }
1059
1060 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1061 {
1062         struct port_info *pi = netdev_priv(dev);
1063         struct adapter *adapter = pi->adapter;
1064         struct net_device_stats *ns = &pi->netstats;
1065         const struct mac_stats *pstats;
1066
1067         spin_lock(&adapter->stats_lock);
1068         pstats = t3_mac_update_stats(&pi->mac);
1069         spin_unlock(&adapter->stats_lock);
1070
1071         ns->tx_bytes = pstats->tx_octets;
1072         ns->tx_packets = pstats->tx_frames;
1073         ns->rx_bytes = pstats->rx_octets;
1074         ns->rx_packets = pstats->rx_frames;
1075         ns->multicast = pstats->rx_mcast_frames;
1076
1077         ns->tx_errors = pstats->tx_underrun;
1078         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1079             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1080             pstats->rx_fifo_ovfl;
1081
1082         /* detailed rx_errors */
1083         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1084         ns->rx_over_errors = 0;
1085         ns->rx_crc_errors = pstats->rx_fcs_errs;
1086         ns->rx_frame_errors = pstats->rx_symbol_errs;
1087         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1088         ns->rx_missed_errors = pstats->rx_cong_drops;
1089
1090         /* detailed tx_errors */
1091         ns->tx_aborted_errors = 0;
1092         ns->tx_carrier_errors = 0;
1093         ns->tx_fifo_errors = pstats->tx_underrun;
1094         ns->tx_heartbeat_errors = 0;
1095         ns->tx_window_errors = 0;
1096         return ns;
1097 }
1098
1099 static u32 get_msglevel(struct net_device *dev)
1100 {
1101         struct port_info *pi = netdev_priv(dev);
1102         struct adapter *adapter = pi->adapter;
1103
1104         return adapter->msg_enable;
1105 }
1106
1107 static void set_msglevel(struct net_device *dev, u32 val)
1108 {
1109         struct port_info *pi = netdev_priv(dev);
1110         struct adapter *adapter = pi->adapter;
1111
1112         adapter->msg_enable = val;
1113 }
1114
1115 static char stats_strings[][ETH_GSTRING_LEN] = {
1116         "TxOctetsOK         ",
1117         "TxFramesOK         ",
1118         "TxMulticastFramesOK",
1119         "TxBroadcastFramesOK",
1120         "TxPauseFrames      ",
1121         "TxUnderrun         ",
1122         "TxExtUnderrun      ",
1123
1124         "TxFrames64         ",
1125         "TxFrames65To127    ",
1126         "TxFrames128To255   ",
1127         "TxFrames256To511   ",
1128         "TxFrames512To1023  ",
1129         "TxFrames1024To1518 ",
1130         "TxFrames1519ToMax  ",
1131
1132         "RxOctetsOK         ",
1133         "RxFramesOK         ",
1134         "RxMulticastFramesOK",
1135         "RxBroadcastFramesOK",
1136         "RxPauseFrames      ",
1137         "RxFCSErrors        ",
1138         "RxSymbolErrors     ",
1139         "RxShortErrors      ",
1140         "RxJabberErrors     ",
1141         "RxLengthErrors     ",
1142         "RxFIFOoverflow     ",
1143
1144         "RxFrames64         ",
1145         "RxFrames65To127    ",
1146         "RxFrames128To255   ",
1147         "RxFrames256To511   ",
1148         "RxFrames512To1023  ",
1149         "RxFrames1024To1518 ",
1150         "RxFrames1519ToMax  ",
1151
1152         "PhyFIFOErrors      ",
1153         "TSO                ",
1154         "VLANextractions    ",
1155         "VLANinsertions     ",
1156         "TxCsumOffload      ",
1157         "RxCsumGood         ",
1158         "RxDrops            ",
1159
1160         "CheckTXEnToggled   ",
1161         "CheckResets        ",
1162
1163 };
1164
1165 static int get_stats_count(struct net_device *dev)
1166 {
1167         return ARRAY_SIZE(stats_strings);
1168 }
1169
1170 #define T3_REGMAP_SIZE (3 * 1024)
1171
1172 static int get_regs_len(struct net_device *dev)
1173 {
1174         return T3_REGMAP_SIZE;
1175 }
1176
1177 static int get_eeprom_len(struct net_device *dev)
1178 {
1179         return EEPROMSIZE;
1180 }
1181
1182 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1183 {
1184         struct port_info *pi = netdev_priv(dev);
1185         struct adapter *adapter = pi->adapter;
1186         u32 fw_vers = 0;
1187         u32 tp_vers = 0;
1188
1189         t3_get_fw_version(adapter, &fw_vers);
1190         t3_get_tp_version(adapter, &tp_vers);
1191
1192         strcpy(info->driver, DRV_NAME);
1193         strcpy(info->version, DRV_VERSION);
1194         strcpy(info->bus_info, pci_name(adapter->pdev));
1195         if (!fw_vers)
1196                 strcpy(info->fw_version, "N/A");
1197         else {
1198                 snprintf(info->fw_version, sizeof(info->fw_version),
1199                          "%s %u.%u.%u TP %u.%u.%u",
1200                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1201                          G_FW_VERSION_MAJOR(fw_vers),
1202                          G_FW_VERSION_MINOR(fw_vers),
1203                          G_FW_VERSION_MICRO(fw_vers),
1204                          G_TP_VERSION_MAJOR(tp_vers),
1205                          G_TP_VERSION_MINOR(tp_vers),
1206                          G_TP_VERSION_MICRO(tp_vers));
1207         }
1208 }
1209
1210 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1211 {
1212         if (stringset == ETH_SS_STATS)
1213                 memcpy(data, stats_strings, sizeof(stats_strings));
1214 }
1215
1216 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1217                                             struct port_info *p, int idx)
1218 {
1219         int i;
1220         unsigned long tot = 0;
1221
1222         for (i = 0; i < p->nqsets; ++i)
1223                 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1224         return tot;
1225 }
1226
1227 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1228                       u64 *data)
1229 {
1230         struct port_info *pi = netdev_priv(dev);
1231         struct adapter *adapter = pi->adapter;
1232         const struct mac_stats *s;
1233
1234         spin_lock(&adapter->stats_lock);
1235         s = t3_mac_update_stats(&pi->mac);
1236         spin_unlock(&adapter->stats_lock);
1237
1238         *data++ = s->tx_octets;
1239         *data++ = s->tx_frames;
1240         *data++ = s->tx_mcast_frames;
1241         *data++ = s->tx_bcast_frames;
1242         *data++ = s->tx_pause;
1243         *data++ = s->tx_underrun;
1244         *data++ = s->tx_fifo_urun;
1245
1246         *data++ = s->tx_frames_64;
1247         *data++ = s->tx_frames_65_127;
1248         *data++ = s->tx_frames_128_255;
1249         *data++ = s->tx_frames_256_511;
1250         *data++ = s->tx_frames_512_1023;
1251         *data++ = s->tx_frames_1024_1518;
1252         *data++ = s->tx_frames_1519_max;
1253
1254         *data++ = s->rx_octets;
1255         *data++ = s->rx_frames;
1256         *data++ = s->rx_mcast_frames;
1257         *data++ = s->rx_bcast_frames;
1258         *data++ = s->rx_pause;
1259         *data++ = s->rx_fcs_errs;
1260         *data++ = s->rx_symbol_errs;
1261         *data++ = s->rx_short;
1262         *data++ = s->rx_jabber;
1263         *data++ = s->rx_too_long;
1264         *data++ = s->rx_fifo_ovfl;
1265
1266         *data++ = s->rx_frames_64;
1267         *data++ = s->rx_frames_65_127;
1268         *data++ = s->rx_frames_128_255;
1269         *data++ = s->rx_frames_256_511;
1270         *data++ = s->rx_frames_512_1023;
1271         *data++ = s->rx_frames_1024_1518;
1272         *data++ = s->rx_frames_1519_max;
1273
1274         *data++ = pi->phy.fifo_errors;
1275
1276         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1277         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1278         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1279         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1280         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1281         *data++ = s->rx_cong_drops;
1282
1283         *data++ = s->num_toggled;
1284         *data++ = s->num_resets;
1285 }
1286
1287 static inline void reg_block_dump(struct adapter *ap, void *buf,
1288                                   unsigned int start, unsigned int end)
1289 {
1290         u32 *p = buf + start;
1291
1292         for (; start <= end; start += sizeof(u32))
1293                 *p++ = t3_read_reg(ap, start);
1294 }
1295
1296 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1297                      void *buf)
1298 {
1299         struct port_info *pi = netdev_priv(dev);
1300         struct adapter *ap = pi->adapter;
1301
1302         /*
1303          * Version scheme:
1304          * bits 0..9: chip version
1305          * bits 10..15: chip revision
1306          * bit 31: set for PCIe cards
1307          */
1308         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1309
1310         /*
1311          * We skip the MAC statistics registers because they are clear-on-read.
1312          * Also reading multi-register stats would need to synchronize with the
1313          * periodic mac stats accumulation.  Hard to justify the complexity.
1314          */
1315         memset(buf, 0, T3_REGMAP_SIZE);
1316         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1317         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1318         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1319         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1320         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1321         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1322                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1323         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1324                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1325 }
1326
1327 static int restart_autoneg(struct net_device *dev)
1328 {
1329         struct port_info *p = netdev_priv(dev);
1330
1331         if (!netif_running(dev))
1332                 return -EAGAIN;
1333         if (p->link_config.autoneg != AUTONEG_ENABLE)
1334                 return -EINVAL;
1335         p->phy.ops->autoneg_restart(&p->phy);
1336         return 0;
1337 }
1338
1339 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1340 {
1341         struct port_info *pi = netdev_priv(dev);
1342         struct adapter *adapter = pi->adapter;
1343         int i;
1344
1345         if (data == 0)
1346                 data = 2;
1347
1348         for (i = 0; i < data * 2; i++) {
1349                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1350                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1351                 if (msleep_interruptible(500))
1352                         break;
1353         }
1354         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1355                          F_GPIO0_OUT_VAL);
1356         return 0;
1357 }
1358
1359 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1360 {
1361         struct port_info *p = netdev_priv(dev);
1362
1363         cmd->supported = p->link_config.supported;
1364         cmd->advertising = p->link_config.advertising;
1365
1366         if (netif_carrier_ok(dev)) {
1367                 cmd->speed = p->link_config.speed;
1368                 cmd->duplex = p->link_config.duplex;
1369         } else {
1370                 cmd->speed = -1;
1371                 cmd->duplex = -1;
1372         }
1373
1374         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1375         cmd->phy_address = p->phy.addr;
1376         cmd->transceiver = XCVR_EXTERNAL;
1377         cmd->autoneg = p->link_config.autoneg;
1378         cmd->maxtxpkt = 0;
1379         cmd->maxrxpkt = 0;
1380         return 0;
1381 }
1382
1383 static int speed_duplex_to_caps(int speed, int duplex)
1384 {
1385         int cap = 0;
1386
1387         switch (speed) {
1388         case SPEED_10:
1389                 if (duplex == DUPLEX_FULL)
1390                         cap = SUPPORTED_10baseT_Full;
1391                 else
1392                         cap = SUPPORTED_10baseT_Half;
1393                 break;
1394         case SPEED_100:
1395                 if (duplex == DUPLEX_FULL)
1396                         cap = SUPPORTED_100baseT_Full;
1397                 else
1398                         cap = SUPPORTED_100baseT_Half;
1399                 break;
1400         case SPEED_1000:
1401                 if (duplex == DUPLEX_FULL)
1402                         cap = SUPPORTED_1000baseT_Full;
1403                 else
1404                         cap = SUPPORTED_1000baseT_Half;
1405                 break;
1406         case SPEED_10000:
1407                 if (duplex == DUPLEX_FULL)
1408                         cap = SUPPORTED_10000baseT_Full;
1409         }
1410         return cap;
1411 }
1412
1413 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1414                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1415                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1416                       ADVERTISED_10000baseT_Full)
1417
1418 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1419 {
1420         struct port_info *p = netdev_priv(dev);
1421         struct link_config *lc = &p->link_config;
1422
1423         if (!(lc->supported & SUPPORTED_Autoneg))
1424                 return -EOPNOTSUPP;     /* can't change speed/duplex */
1425
1426         if (cmd->autoneg == AUTONEG_DISABLE) {
1427                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1428
1429                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1430                         return -EINVAL;
1431                 lc->requested_speed = cmd->speed;
1432                 lc->requested_duplex = cmd->duplex;
1433                 lc->advertising = 0;
1434         } else {
1435                 cmd->advertising &= ADVERTISED_MASK;
1436                 cmd->advertising &= lc->supported;
1437                 if (!cmd->advertising)
1438                         return -EINVAL;
1439                 lc->requested_speed = SPEED_INVALID;
1440                 lc->requested_duplex = DUPLEX_INVALID;
1441                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1442         }
1443         lc->autoneg = cmd->autoneg;
1444         if (netif_running(dev))
1445                 t3_link_start(&p->phy, &p->mac, lc);
1446         return 0;
1447 }
1448
1449 static void get_pauseparam(struct net_device *dev,
1450                            struct ethtool_pauseparam *epause)
1451 {
1452         struct port_info *p = netdev_priv(dev);
1453
1454         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1455         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1456         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1457 }
1458
1459 static int set_pauseparam(struct net_device *dev,
1460                           struct ethtool_pauseparam *epause)
1461 {
1462         struct port_info *p = netdev_priv(dev);
1463         struct link_config *lc = &p->link_config;
1464
1465         if (epause->autoneg == AUTONEG_DISABLE)
1466                 lc->requested_fc = 0;
1467         else if (lc->supported & SUPPORTED_Autoneg)
1468                 lc->requested_fc = PAUSE_AUTONEG;
1469         else
1470                 return -EINVAL;
1471
1472         if (epause->rx_pause)
1473                 lc->requested_fc |= PAUSE_RX;
1474         if (epause->tx_pause)
1475                 lc->requested_fc |= PAUSE_TX;
1476         if (lc->autoneg == AUTONEG_ENABLE) {
1477                 if (netif_running(dev))
1478                         t3_link_start(&p->phy, &p->mac, lc);
1479         } else {
1480                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1481                 if (netif_running(dev))
1482                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1483         }
1484         return 0;
1485 }
1486
1487 static u32 get_rx_csum(struct net_device *dev)
1488 {
1489         struct port_info *p = netdev_priv(dev);
1490
1491         return p->rx_csum_offload;
1492 }
1493
1494 static int set_rx_csum(struct net_device *dev, u32 data)
1495 {
1496         struct port_info *p = netdev_priv(dev);
1497
1498         p->rx_csum_offload = data;
1499         return 0;
1500 }
1501
1502 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1503 {
1504         struct port_info *pi = netdev_priv(dev);
1505         struct adapter *adapter = pi->adapter;
1506         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1507
1508         e->rx_max_pending = MAX_RX_BUFFERS;
1509         e->rx_mini_max_pending = 0;
1510         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1511         e->tx_max_pending = MAX_TXQ_ENTRIES;
1512
1513         e->rx_pending = q->fl_size;
1514         e->rx_mini_pending = q->rspq_size;
1515         e->rx_jumbo_pending = q->jumbo_size;
1516         e->tx_pending = q->txq_size[0];
1517 }
1518
1519 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1520 {
1521         struct port_info *pi = netdev_priv(dev);
1522         struct adapter *adapter = pi->adapter;
1523         struct qset_params *q;
1524         int i;
1525
1526         if (e->rx_pending > MAX_RX_BUFFERS ||
1527             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1528             e->tx_pending > MAX_TXQ_ENTRIES ||
1529             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1530             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1531             e->rx_pending < MIN_FL_ENTRIES ||
1532             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1533             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1534                 return -EINVAL;
1535
1536         if (adapter->flags & FULL_INIT_DONE)
1537                 return -EBUSY;
1538
1539         q = &adapter->params.sge.qset[pi->first_qset];
1540         for (i = 0; i < pi->nqsets; ++i, ++q) {
1541                 q->rspq_size = e->rx_mini_pending;
1542                 q->fl_size = e->rx_pending;
1543                 q->jumbo_size = e->rx_jumbo_pending;
1544                 q->txq_size[0] = e->tx_pending;
1545                 q->txq_size[1] = e->tx_pending;
1546                 q->txq_size[2] = e->tx_pending;
1547         }
1548         return 0;
1549 }
1550
1551 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1552 {
1553         struct port_info *pi = netdev_priv(dev);
1554         struct adapter *adapter = pi->adapter;
1555         struct qset_params *qsp = &adapter->params.sge.qset[0];
1556         struct sge_qset *qs = &adapter->sge.qs[0];
1557
1558         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1559                 return -EINVAL;
1560
1561         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1562         t3_update_qset_coalesce(qs, qsp);
1563         return 0;
1564 }
1565
1566 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1567 {
1568         struct port_info *pi = netdev_priv(dev);
1569         struct adapter *adapter = pi->adapter;
1570         struct qset_params *q = adapter->params.sge.qset;
1571
1572         c->rx_coalesce_usecs = q->coalesce_usecs;
1573         return 0;
1574 }
1575
1576 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1577                       u8 * data)
1578 {
1579         struct port_info *pi = netdev_priv(dev);
1580         struct adapter *adapter = pi->adapter;
1581         int i, err = 0;
1582
1583         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1584         if (!buf)
1585                 return -ENOMEM;
1586
1587         e->magic = EEPROM_MAGIC;
1588         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1589                 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1590
1591         if (!err)
1592                 memcpy(data, buf + e->offset, e->len);
1593         kfree(buf);
1594         return err;
1595 }
1596
1597 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1598                       u8 * data)
1599 {
1600         struct port_info *pi = netdev_priv(dev);
1601         struct adapter *adapter = pi->adapter;
1602         u32 aligned_offset, aligned_len, *p;
1603         u8 *buf;
1604         int err = 0;
1605
1606         if (eeprom->magic != EEPROM_MAGIC)
1607                 return -EINVAL;
1608
1609         aligned_offset = eeprom->offset & ~3;
1610         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1611
1612         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1613                 buf = kmalloc(aligned_len, GFP_KERNEL);
1614                 if (!buf)
1615                         return -ENOMEM;
1616                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1617                 if (!err && aligned_len > 4)
1618                         err = t3_seeprom_read(adapter,
1619                                               aligned_offset + aligned_len - 4,
1620                                               (u32 *) & buf[aligned_len - 4]);
1621                 if (err)
1622                         goto out;
1623                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1624         } else
1625                 buf = data;
1626
1627         err = t3_seeprom_wp(adapter, 0);
1628         if (err)
1629                 goto out;
1630
1631         for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1632                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1633                 aligned_offset += 4;
1634         }
1635
1636         if (!err)
1637                 err = t3_seeprom_wp(adapter, 1);
1638 out:
1639         if (buf != data)
1640                 kfree(buf);
1641         return err;
1642 }
1643
1644 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1645 {
1646         wol->supported = 0;
1647         wol->wolopts = 0;
1648         memset(&wol->sopass, 0, sizeof(wol->sopass));
1649 }
1650
1651 static const struct ethtool_ops cxgb_ethtool_ops = {
1652         .get_settings = get_settings,
1653         .set_settings = set_settings,
1654         .get_drvinfo = get_drvinfo,
1655         .get_msglevel = get_msglevel,
1656         .set_msglevel = set_msglevel,
1657         .get_ringparam = get_sge_param,
1658         .set_ringparam = set_sge_param,
1659         .get_coalesce = get_coalesce,
1660         .set_coalesce = set_coalesce,
1661         .get_eeprom_len = get_eeprom_len,
1662         .get_eeprom = get_eeprom,
1663         .set_eeprom = set_eeprom,
1664         .get_pauseparam = get_pauseparam,
1665         .set_pauseparam = set_pauseparam,
1666         .get_rx_csum = get_rx_csum,
1667         .set_rx_csum = set_rx_csum,
1668         .get_tx_csum = ethtool_op_get_tx_csum,
1669         .set_tx_csum = ethtool_op_set_tx_csum,
1670         .get_sg = ethtool_op_get_sg,
1671         .set_sg = ethtool_op_set_sg,
1672         .get_link = ethtool_op_get_link,
1673         .get_strings = get_strings,
1674         .phys_id = cxgb3_phys_id,
1675         .nway_reset = restart_autoneg,
1676         .get_stats_count = get_stats_count,
1677         .get_ethtool_stats = get_stats,
1678         .get_regs_len = get_regs_len,
1679         .get_regs = get_regs,
1680         .get_wol = get_wol,
1681         .get_tso = ethtool_op_get_tso,
1682         .set_tso = ethtool_op_set_tso,
1683 };
1684
1685 static int in_range(int val, int lo, int hi)
1686 {
1687         return val < 0 || (val <= hi && val >= lo);
1688 }
1689
1690 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1691 {
1692         struct port_info *pi = netdev_priv(dev);
1693         struct adapter *adapter = pi->adapter;
1694         u32 cmd;
1695         int ret;
1696
1697         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1698                 return -EFAULT;
1699
1700         switch (cmd) {
1701         case CHELSIO_SET_QSET_PARAMS:{
1702                 int i;
1703                 struct qset_params *q;
1704                 struct ch_qset_params t;
1705
1706                 if (!capable(CAP_NET_ADMIN))
1707                         return -EPERM;
1708                 if (copy_from_user(&t, useraddr, sizeof(t)))
1709                         return -EFAULT;
1710                 if (t.qset_idx >= SGE_QSETS)
1711                         return -EINVAL;
1712                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1713                         !in_range(t.cong_thres, 0, 255) ||
1714                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1715                                 MAX_TXQ_ENTRIES) ||
1716                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1717                                 MAX_TXQ_ENTRIES) ||
1718                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1719                                 MAX_CTRL_TXQ_ENTRIES) ||
1720                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1721                                 MAX_RX_BUFFERS)
1722                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1723                                         MAX_RX_JUMBO_BUFFERS)
1724                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1725                                         MAX_RSPQ_ENTRIES))
1726                         return -EINVAL;
1727                 if ((adapter->flags & FULL_INIT_DONE) &&
1728                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1729                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1730                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1731                         t.polling >= 0 || t.cong_thres >= 0))
1732                         return -EBUSY;
1733
1734                 q = &adapter->params.sge.qset[t.qset_idx];
1735
1736                 if (t.rspq_size >= 0)
1737                         q->rspq_size = t.rspq_size;
1738                 if (t.fl_size[0] >= 0)
1739                         q->fl_size = t.fl_size[0];
1740                 if (t.fl_size[1] >= 0)
1741                         q->jumbo_size = t.fl_size[1];
1742                 if (t.txq_size[0] >= 0)
1743                         q->txq_size[0] = t.txq_size[0];
1744                 if (t.txq_size[1] >= 0)
1745                         q->txq_size[1] = t.txq_size[1];
1746                 if (t.txq_size[2] >= 0)
1747                         q->txq_size[2] = t.txq_size[2];
1748                 if (t.cong_thres >= 0)
1749                         q->cong_thres = t.cong_thres;
1750                 if (t.intr_lat >= 0) {
1751                         struct sge_qset *qs =
1752                                 &adapter->sge.qs[t.qset_idx];
1753
1754                         q->coalesce_usecs = t.intr_lat;
1755                         t3_update_qset_coalesce(qs, q);
1756                 }
1757                 if (t.polling >= 0) {
1758                         if (adapter->flags & USING_MSIX)
1759                                 q->polling = t.polling;
1760                         else {
1761                                 /* No polling with INTx for T3A */
1762                                 if (adapter->params.rev == 0 &&
1763                                         !(adapter->flags & USING_MSI))
1764                                         t.polling = 0;
1765
1766                                 for (i = 0; i < SGE_QSETS; i++) {
1767                                         q = &adapter->params.sge.
1768                                                 qset[i];
1769                                         q->polling = t.polling;
1770                                 }
1771                         }
1772                 }
1773                 break;
1774         }
1775         case CHELSIO_GET_QSET_PARAMS:{
1776                 struct qset_params *q;
1777                 struct ch_qset_params t;
1778
1779                 if (copy_from_user(&t, useraddr, sizeof(t)))
1780                         return -EFAULT;
1781                 if (t.qset_idx >= SGE_QSETS)
1782                         return -EINVAL;
1783
1784                 q = &adapter->params.sge.qset[t.qset_idx];
1785                 t.rspq_size = q->rspq_size;
1786                 t.txq_size[0] = q->txq_size[0];
1787                 t.txq_size[1] = q->txq_size[1];
1788                 t.txq_size[2] = q->txq_size[2];
1789                 t.fl_size[0] = q->fl_size;
1790                 t.fl_size[1] = q->jumbo_size;
1791                 t.polling = q->polling;
1792                 t.intr_lat = q->coalesce_usecs;
1793                 t.cong_thres = q->cong_thres;
1794
1795                 if (copy_to_user(useraddr, &t, sizeof(t)))
1796                         return -EFAULT;
1797                 break;
1798         }
1799         case CHELSIO_SET_QSET_NUM:{
1800                 struct ch_reg edata;
1801                 struct port_info *pi = netdev_priv(dev);
1802                 unsigned int i, first_qset = 0, other_qsets = 0;
1803
1804                 if (!capable(CAP_NET_ADMIN))
1805                         return -EPERM;
1806                 if (adapter->flags & FULL_INIT_DONE)
1807                         return -EBUSY;
1808                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1809                         return -EFAULT;
1810                 if (edata.val < 1 ||
1811                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1812                         return -EINVAL;
1813
1814                 for_each_port(adapter, i)
1815                         if (adapter->port[i] && adapter->port[i] != dev)
1816                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
1817
1818                 if (edata.val + other_qsets > SGE_QSETS)
1819                         return -EINVAL;
1820
1821                 pi->nqsets = edata.val;
1822
1823                 for_each_port(adapter, i)
1824                         if (adapter->port[i]) {
1825                                 pi = adap2pinfo(adapter, i);
1826                                 pi->first_qset = first_qset;
1827                                 first_qset += pi->nqsets;
1828                         }
1829                 break;
1830         }
1831         case CHELSIO_GET_QSET_NUM:{
1832                 struct ch_reg edata;
1833                 struct port_info *pi = netdev_priv(dev);
1834
1835                 edata.cmd = CHELSIO_GET_QSET_NUM;
1836                 edata.val = pi->nqsets;
1837                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1838                         return -EFAULT;
1839                 break;
1840         }
1841         case CHELSIO_LOAD_FW:{
1842                 u8 *fw_data;
1843                 struct ch_mem_range t;
1844
1845                 if (!capable(CAP_NET_ADMIN))
1846                         return -EPERM;
1847                 if (copy_from_user(&t, useraddr, sizeof(t)))
1848                         return -EFAULT;
1849
1850                 fw_data = kmalloc(t.len, GFP_KERNEL);
1851                 if (!fw_data)
1852                         return -ENOMEM;
1853
1854                 if (copy_from_user
1855                         (fw_data, useraddr + sizeof(t), t.len)) {
1856                         kfree(fw_data);
1857                         return -EFAULT;
1858                 }
1859
1860                 ret = t3_load_fw(adapter, fw_data, t.len);
1861                 kfree(fw_data);
1862                 if (ret)
1863                         return ret;
1864                 break;
1865         }
1866         case CHELSIO_SETMTUTAB:{
1867                 struct ch_mtus m;
1868                 int i;
1869
1870                 if (!is_offload(adapter))
1871                         return -EOPNOTSUPP;
1872                 if (!capable(CAP_NET_ADMIN))
1873                         return -EPERM;
1874                 if (offload_running(adapter))
1875                         return -EBUSY;
1876                 if (copy_from_user(&m, useraddr, sizeof(m)))
1877                         return -EFAULT;
1878                 if (m.nmtus != NMTUS)
1879                         return -EINVAL;
1880                 if (m.mtus[0] < 81)     /* accommodate SACK */
1881                         return -EINVAL;
1882
1883                 /* MTUs must be in ascending order */
1884                 for (i = 1; i < NMTUS; ++i)
1885                         if (m.mtus[i] < m.mtus[i - 1])
1886                                 return -EINVAL;
1887
1888                 memcpy(adapter->params.mtus, m.mtus,
1889                         sizeof(adapter->params.mtus));
1890                 break;
1891         }
1892         case CHELSIO_GET_PM:{
1893                 struct tp_params *p = &adapter->params.tp;
1894                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1895
1896                 if (!is_offload(adapter))
1897                         return -EOPNOTSUPP;
1898                 m.tx_pg_sz = p->tx_pg_size;
1899                 m.tx_num_pg = p->tx_num_pgs;
1900                 m.rx_pg_sz = p->rx_pg_size;
1901                 m.rx_num_pg = p->rx_num_pgs;
1902                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1903                 if (copy_to_user(useraddr, &m, sizeof(m)))
1904                         return -EFAULT;
1905                 break;
1906         }
1907         case CHELSIO_SET_PM:{
1908                 struct ch_pm m;
1909                 struct tp_params *p = &adapter->params.tp;
1910
1911                 if (!is_offload(adapter))
1912                         return -EOPNOTSUPP;
1913                 if (!capable(CAP_NET_ADMIN))
1914                         return -EPERM;
1915                 if (adapter->flags & FULL_INIT_DONE)
1916                         return -EBUSY;
1917                 if (copy_from_user(&m, useraddr, sizeof(m)))
1918                         return -EFAULT;
1919                 if (!is_power_of_2(m.rx_pg_sz) ||
1920                         !is_power_of_2(m.tx_pg_sz))
1921                         return -EINVAL; /* not power of 2 */
1922                 if (!(m.rx_pg_sz & 0x14000))
1923                         return -EINVAL; /* not 16KB or 64KB */
1924                 if (!(m.tx_pg_sz & 0x1554000))
1925                         return -EINVAL;
1926                 if (m.tx_num_pg == -1)
1927                         m.tx_num_pg = p->tx_num_pgs;
1928                 if (m.rx_num_pg == -1)
1929                         m.rx_num_pg = p->rx_num_pgs;
1930                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1931                         return -EINVAL;
1932                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1933                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1934                         return -EINVAL;
1935                 p->rx_pg_size = m.rx_pg_sz;
1936                 p->tx_pg_size = m.tx_pg_sz;
1937                 p->rx_num_pgs = m.rx_num_pg;
1938                 p->tx_num_pgs = m.tx_num_pg;
1939                 break;
1940         }
1941         case CHELSIO_GET_MEM:{
1942                 struct ch_mem_range t;
1943                 struct mc7 *mem;
1944                 u64 buf[32];
1945
1946                 if (!is_offload(adapter))
1947                         return -EOPNOTSUPP;
1948                 if (!(adapter->flags & FULL_INIT_DONE))
1949                         return -EIO;    /* need the memory controllers */
1950                 if (copy_from_user(&t, useraddr, sizeof(t)))
1951                         return -EFAULT;
1952                 if ((t.addr & 7) || (t.len & 7))
1953                         return -EINVAL;
1954                 if (t.mem_id == MEM_CM)
1955                         mem = &adapter->cm;
1956                 else if (t.mem_id == MEM_PMRX)
1957                         mem = &adapter->pmrx;
1958                 else if (t.mem_id == MEM_PMTX)
1959                         mem = &adapter->pmtx;
1960                 else
1961                         return -EINVAL;
1962
1963                 /*
1964                  * Version scheme:
1965                  * bits 0..9: chip version
1966                  * bits 10..15: chip revision
1967                  */
1968                 t.version = 3 | (adapter->params.rev << 10);
1969                 if (copy_to_user(useraddr, &t, sizeof(t)))
1970                         return -EFAULT;
1971
1972                 /*
1973                  * Read 256 bytes at a time as len can be large and we don't
1974                  * want to use huge intermediate buffers.
1975                  */
1976                 useraddr += sizeof(t);  /* advance to start of buffer */
1977                 while (t.len) {
1978                         unsigned int chunk =
1979                                 min_t(unsigned int, t.len, sizeof(buf));
1980
1981                         ret =
1982                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1983                                                 buf);
1984                         if (ret)
1985                                 return ret;
1986                         if (copy_to_user(useraddr, buf, chunk))
1987                                 return -EFAULT;
1988                         useraddr += chunk;
1989                         t.addr += chunk;
1990                         t.len -= chunk;
1991                 }
1992                 break;
1993         }
1994         case CHELSIO_SET_TRACE_FILTER:{
1995                 struct ch_trace t;
1996                 const struct trace_params *tp;
1997
1998                 if (!capable(CAP_NET_ADMIN))
1999                         return -EPERM;
2000                 if (!offload_running(adapter))
2001                         return -EAGAIN;
2002                 if (copy_from_user(&t, useraddr, sizeof(t)))
2003                         return -EFAULT;
2004
2005                 tp = (const struct trace_params *)&t.sip;
2006                 if (t.config_tx)
2007                         t3_config_trace_filter(adapter, tp, 0,
2008                                                 t.invert_match,
2009                                                 t.trace_tx);
2010                 if (t.config_rx)
2011                         t3_config_trace_filter(adapter, tp, 1,
2012                                                 t.invert_match,
2013                                                 t.trace_rx);
2014                 break;
2015         }
2016         default:
2017                 return -EOPNOTSUPP;
2018         }
2019         return 0;
2020 }
2021
2022 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2023 {
2024         struct mii_ioctl_data *data = if_mii(req);
2025         struct port_info *pi = netdev_priv(dev);
2026         struct adapter *adapter = pi->adapter;
2027         int ret, mmd;
2028
2029         switch (cmd) {
2030         case SIOCGMIIPHY:
2031                 data->phy_id = pi->phy.addr;
2032                 /* FALLTHRU */
2033         case SIOCGMIIREG:{
2034                 u32 val;
2035                 struct cphy *phy = &pi->phy;
2036
2037                 if (!phy->mdio_read)
2038                         return -EOPNOTSUPP;
2039                 if (is_10G(adapter)) {
2040                         mmd = data->phy_id >> 8;
2041                         if (!mmd)
2042                                 mmd = MDIO_DEV_PCS;
2043                         else if (mmd > MDIO_DEV_XGXS)
2044                                 return -EINVAL;
2045
2046                         ret =
2047                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2048                                                 mmd, data->reg_num, &val);
2049                 } else
2050                         ret =
2051                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2052                                                 0, data->reg_num & 0x1f,
2053                                                 &val);
2054                 if (!ret)
2055                         data->val_out = val;
2056                 break;
2057         }
2058         case SIOCSMIIREG:{
2059                 struct cphy *phy = &pi->phy;
2060
2061                 if (!capable(CAP_NET_ADMIN))
2062                         return -EPERM;
2063                 if (!phy->mdio_write)
2064                         return -EOPNOTSUPP;
2065                 if (is_10G(adapter)) {
2066                         mmd = data->phy_id >> 8;
2067                         if (!mmd)
2068                                 mmd = MDIO_DEV_PCS;
2069                         else if (mmd > MDIO_DEV_XGXS)
2070                                 return -EINVAL;
2071
2072                         ret =
2073                                 phy->mdio_write(adapter,
2074                                                 data->phy_id & 0x1f, mmd,
2075                                                 data->reg_num,
2076                                                 data->val_in);
2077                 } else
2078                         ret =
2079                                 phy->mdio_write(adapter,
2080                                                 data->phy_id & 0x1f, 0,
2081                                                 data->reg_num & 0x1f,
2082                                                 data->val_in);
2083                 break;
2084         }
2085         case SIOCCHIOCTL:
2086                 return cxgb_extension_ioctl(dev, req->ifr_data);
2087         default:
2088                 return -EOPNOTSUPP;
2089         }
2090         return ret;
2091 }
2092
2093 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2094 {
2095         struct port_info *pi = netdev_priv(dev);
2096         struct adapter *adapter = pi->adapter;
2097         int ret;
2098
2099         if (new_mtu < 81)       /* accommodate SACK */
2100                 return -EINVAL;
2101         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2102                 return ret;
2103         dev->mtu = new_mtu;
2104         init_port_mtus(adapter);
2105         if (adapter->params.rev == 0 && offload_running(adapter))
2106                 t3_load_mtus(adapter, adapter->params.mtus,
2107                              adapter->params.a_wnd, adapter->params.b_wnd,
2108                              adapter->port[0]->mtu);
2109         return 0;
2110 }
2111
2112 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2113 {
2114         struct port_info *pi = netdev_priv(dev);
2115         struct adapter *adapter = pi->adapter;
2116         struct sockaddr *addr = p;
2117
2118         if (!is_valid_ether_addr(addr->sa_data))
2119                 return -EINVAL;
2120
2121         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2122         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2123         if (offload_running(adapter))
2124                 write_smt_entry(adapter, pi->port_id);
2125         return 0;
2126 }
2127
2128 /**
2129  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2130  * @adap: the adapter
2131  * @p: the port
2132  *
2133  * Ensures that current Rx processing on any of the queues associated with
2134  * the given port completes before returning.  We do this by acquiring and
2135  * releasing the locks of the response queues associated with the port.
2136  */
2137 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2138 {
2139         int i;
2140
2141         for (i = 0; i < p->nqsets; i++) {
2142                 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2143
2144                 spin_lock_irq(&q->lock);
2145                 spin_unlock_irq(&q->lock);
2146         }
2147 }
2148
2149 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2150 {
2151         struct port_info *pi = netdev_priv(dev);
2152         struct adapter *adapter = pi->adapter;
2153
2154         pi->vlan_grp = grp;
2155         if (adapter->params.rev > 0)
2156                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2157         else {
2158                 /* single control for all ports */
2159                 unsigned int i, have_vlans = 0;
2160                 for_each_port(adapter, i)
2161                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2162
2163                 t3_set_vlan_accel(adapter, 1, have_vlans);
2164         }
2165         t3_synchronize_rx(adapter, pi);
2166 }
2167
2168 #ifdef CONFIG_NET_POLL_CONTROLLER
2169 static void cxgb_netpoll(struct net_device *dev)
2170 {
2171         struct port_info *pi = netdev_priv(dev);
2172         struct adapter *adapter = pi->adapter;
2173         int qidx;
2174
2175         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2176                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2177                 void *source;
2178                 
2179                 if (adapter->flags & USING_MSIX)
2180                         source = qs;
2181                 else
2182                         source = adapter;
2183
2184                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2185         }
2186 }
2187 #endif
2188
2189 /*
2190  * Periodic accumulation of MAC statistics.
2191  */
2192 static void mac_stats_update(struct adapter *adapter)
2193 {
2194         int i;
2195
2196         for_each_port(adapter, i) {
2197                 struct net_device *dev = adapter->port[i];
2198                 struct port_info *p = netdev_priv(dev);
2199
2200                 if (netif_running(dev)) {
2201                         spin_lock(&adapter->stats_lock);
2202                         t3_mac_update_stats(&p->mac);
2203                         spin_unlock(&adapter->stats_lock);
2204                 }
2205         }
2206 }
2207
2208 static void check_link_status(struct adapter *adapter)
2209 {
2210         int i;
2211
2212         for_each_port(adapter, i) {
2213                 struct net_device *dev = adapter->port[i];
2214                 struct port_info *p = netdev_priv(dev);
2215
2216                 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2217                         t3_link_changed(adapter, i);
2218         }
2219 }
2220
2221 static void check_t3b2_mac(struct adapter *adapter)
2222 {
2223         int i;
2224
2225         if (!rtnl_trylock())    /* synchronize with ifdown */
2226                 return;
2227
2228         for_each_port(adapter, i) {
2229                 struct net_device *dev = adapter->port[i];
2230                 struct port_info *p = netdev_priv(dev);
2231                 int status;
2232
2233                 if (!netif_running(dev))
2234                         continue;
2235
2236                 status = 0;
2237                 if (netif_running(dev) && netif_carrier_ok(dev))
2238                         status = t3b2_mac_watchdog_task(&p->mac);
2239                 if (status == 1)
2240                         p->mac.stats.num_toggled++;
2241                 else if (status == 2) {
2242                         struct cmac *mac = &p->mac;
2243
2244                         t3_mac_set_mtu(mac, dev->mtu);
2245                         t3_mac_set_address(mac, 0, dev->dev_addr);
2246                         cxgb_set_rxmode(dev);
2247                         t3_link_start(&p->phy, mac, &p->link_config);
2248                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2249                         t3_port_intr_enable(adapter, p->port_id);
2250                         p->mac.stats.num_resets++;
2251                 }
2252         }
2253         rtnl_unlock();
2254 }
2255
2256
2257 static void t3_adap_check_task(struct work_struct *work)
2258 {
2259         struct adapter *adapter = container_of(work, struct adapter,
2260                                                adap_check_task.work);
2261         const struct adapter_params *p = &adapter->params;
2262
2263         adapter->check_task_cnt++;
2264
2265         /* Check link status for PHYs without interrupts */
2266         if (p->linkpoll_period)
2267                 check_link_status(adapter);
2268
2269         /* Accumulate MAC stats if needed */
2270         if (!p->linkpoll_period ||
2271             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2272             p->stats_update_period) {
2273                 mac_stats_update(adapter);
2274                 adapter->check_task_cnt = 0;
2275         }
2276
2277         if (p->rev == T3_REV_B2)
2278                 check_t3b2_mac(adapter);
2279
2280         /* Schedule the next check update if any port is active. */
2281         spin_lock(&adapter->work_lock);
2282         if (adapter->open_device_map & PORT_MASK)
2283                 schedule_chk_task(adapter);
2284         spin_unlock(&adapter->work_lock);
2285 }
2286
2287 /*
2288  * Processes external (PHY) interrupts in process context.
2289  */
2290 static void ext_intr_task(struct work_struct *work)
2291 {
2292         struct adapter *adapter = container_of(work, struct adapter,
2293                                                ext_intr_handler_task);
2294
2295         t3_phy_intr_handler(adapter);
2296
2297         /* Now reenable external interrupts */
2298         spin_lock_irq(&adapter->work_lock);
2299         if (adapter->slow_intr_mask) {
2300                 adapter->slow_intr_mask |= F_T3DBG;
2301                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2302                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2303                              adapter->slow_intr_mask);
2304         }
2305         spin_unlock_irq(&adapter->work_lock);
2306 }
2307
2308 /*
2309  * Interrupt-context handler for external (PHY) interrupts.
2310  */
2311 void t3_os_ext_intr_handler(struct adapter *adapter)
2312 {
2313         /*
2314          * Schedule a task to handle external interrupts as they may be slow
2315          * and we use a mutex to protect MDIO registers.  We disable PHY
2316          * interrupts in the meantime and let the task reenable them when
2317          * it's done.
2318          */
2319         spin_lock(&adapter->work_lock);
2320         if (adapter->slow_intr_mask) {
2321                 adapter->slow_intr_mask &= ~F_T3DBG;
2322                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2323                              adapter->slow_intr_mask);
2324                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2325         }
2326         spin_unlock(&adapter->work_lock);
2327 }
2328
2329 void t3_fatal_err(struct adapter *adapter)
2330 {
2331         unsigned int fw_status[4];
2332
2333         if (adapter->flags & FULL_INIT_DONE) {
2334                 t3_sge_stop(adapter);
2335                 t3_intr_disable(adapter);
2336         }
2337         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2338         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2339                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2340                          fw_status[0], fw_status[1],
2341                          fw_status[2], fw_status[3]);
2342
2343 }
2344
2345 static int __devinit cxgb_enable_msix(struct adapter *adap)
2346 {
2347         struct msix_entry entries[SGE_QSETS + 1];
2348         int i, err;
2349
2350         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2351                 entries[i].entry = i;
2352
2353         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2354         if (!err) {
2355                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2356                         adap->msix_info[i].vec = entries[i].vector;
2357         } else if (err > 0)
2358                 dev_info(&adap->pdev->dev,
2359                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2360         return err;
2361 }
2362
2363 static void __devinit print_port_info(struct adapter *adap,
2364                                       const struct adapter_info *ai)
2365 {
2366         static const char *pci_variant[] = {
2367                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2368         };
2369
2370         int i;
2371         char buf[80];
2372
2373         if (is_pcie(adap))
2374                 snprintf(buf, sizeof(buf), "%s x%d",
2375                          pci_variant[adap->params.pci.variant],
2376                          adap->params.pci.width);
2377         else
2378                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2379                          pci_variant[adap->params.pci.variant],
2380                          adap->params.pci.speed, adap->params.pci.width);
2381
2382         for_each_port(adap, i) {
2383                 struct net_device *dev = adap->port[i];
2384                 const struct port_info *pi = netdev_priv(dev);
2385
2386                 if (!test_bit(i, &adap->registered_device_map))
2387                         continue;
2388                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2389                        dev->name, ai->desc, pi->port_type->desc,
2390                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2391                        (adap->flags & USING_MSIX) ? " MSI-X" :
2392                        (adap->flags & USING_MSI) ? " MSI" : "");
2393                 if (adap->name == dev->name && adap->params.vpd.mclk)
2394                         printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2395                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2396                                t3_mc7_size(&adap->pmtx) >> 20,
2397                                t3_mc7_size(&adap->pmrx) >> 20);
2398         }
2399 }
2400
2401 static int __devinit init_one(struct pci_dev *pdev,
2402                               const struct pci_device_id *ent)
2403 {
2404         static int version_printed;
2405
2406         int i, err, pci_using_dac = 0;
2407         unsigned long mmio_start, mmio_len;
2408         const struct adapter_info *ai;
2409         struct adapter *adapter = NULL;
2410         struct port_info *pi;
2411
2412         if (!version_printed) {
2413                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2414                 ++version_printed;
2415         }
2416
2417         if (!cxgb3_wq) {
2418                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2419                 if (!cxgb3_wq) {
2420                         printk(KERN_ERR DRV_NAME
2421                                ": cannot initialize work queue\n");
2422                         return -ENOMEM;
2423                 }
2424         }
2425
2426         err = pci_request_regions(pdev, DRV_NAME);
2427         if (err) {
2428                 /* Just info, some other driver may have claimed the device. */
2429                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2430                 return err;
2431         }
2432
2433         err = pci_enable_device(pdev);
2434         if (err) {
2435                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2436                 goto out_release_regions;
2437         }
2438
2439         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2440                 pci_using_dac = 1;
2441                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2442                 if (err) {
2443                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2444                                "coherent allocations\n");
2445                         goto out_disable_device;
2446                 }
2447         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2448                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2449                 goto out_disable_device;
2450         }
2451
2452         pci_set_master(pdev);
2453
2454         mmio_start = pci_resource_start(pdev, 0);
2455         mmio_len = pci_resource_len(pdev, 0);
2456         ai = t3_get_adapter_info(ent->driver_data);
2457
2458         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2459         if (!adapter) {
2460                 err = -ENOMEM;
2461                 goto out_disable_device;
2462         }
2463
2464         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2465         if (!adapter->regs) {
2466                 dev_err(&pdev->dev, "cannot map device registers\n");
2467                 err = -ENOMEM;
2468                 goto out_free_adapter;
2469         }
2470
2471         adapter->pdev = pdev;
2472         adapter->name = pci_name(pdev);
2473         adapter->msg_enable = dflt_msg_enable;
2474         adapter->mmio_len = mmio_len;
2475
2476         mutex_init(&adapter->mdio_lock);
2477         spin_lock_init(&adapter->work_lock);
2478         spin_lock_init(&adapter->stats_lock);
2479
2480         INIT_LIST_HEAD(&adapter->adapter_list);
2481         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2482         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2483
2484         for (i = 0; i < ai->nports; ++i) {
2485                 struct net_device *netdev;
2486
2487                 netdev = alloc_etherdev(sizeof(struct port_info));
2488                 if (!netdev) {
2489                         err = -ENOMEM;
2490                         goto out_free_dev;
2491                 }
2492
2493                 SET_MODULE_OWNER(netdev);
2494                 SET_NETDEV_DEV(netdev, &pdev->dev);
2495
2496                 adapter->port[i] = netdev;
2497                 pi = netdev_priv(netdev);
2498                 pi->adapter = adapter;
2499                 pi->rx_csum_offload = 1;
2500                 pi->nqsets = 1;
2501                 pi->first_qset = i;
2502                 pi->activity = 0;
2503                 pi->port_id = i;
2504                 netif_carrier_off(netdev);
2505                 netdev->irq = pdev->irq;
2506                 netdev->mem_start = mmio_start;
2507                 netdev->mem_end = mmio_start + mmio_len - 1;
2508                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2509                 netdev->features |= NETIF_F_LLTX;
2510                 if (pci_using_dac)
2511                         netdev->features |= NETIF_F_HIGHDMA;
2512
2513                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2514                 netdev->vlan_rx_register = vlan_rx_register;
2515
2516                 netdev->open = cxgb_open;
2517                 netdev->stop = cxgb_close;
2518                 netdev->hard_start_xmit = t3_eth_xmit;
2519                 netdev->get_stats = cxgb_get_stats;
2520                 netdev->set_multicast_list = cxgb_set_rxmode;
2521                 netdev->do_ioctl = cxgb_ioctl;
2522                 netdev->change_mtu = cxgb_change_mtu;
2523                 netdev->set_mac_address = cxgb_set_mac_addr;
2524 #ifdef CONFIG_NET_POLL_CONTROLLER
2525                 netdev->poll_controller = cxgb_netpoll;
2526 #endif
2527                 netdev->weight = 64;
2528
2529                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2530         }
2531
2532         pci_set_drvdata(pdev, adapter);
2533         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2534                 err = -ENODEV;
2535                 goto out_free_dev;
2536         }
2537                 
2538         /*
2539          * The card is now ready to go.  If any errors occur during device
2540          * registration we do not fail the whole card but rather proceed only
2541          * with the ports we manage to register successfully.  However we must
2542          * register at least one net device.
2543          */
2544         for_each_port(adapter, i) {
2545                 err = register_netdev(adapter->port[i]);
2546                 if (err)
2547                         dev_warn(&pdev->dev,
2548                                  "cannot register net device %s, skipping\n",
2549                                  adapter->port[i]->name);
2550                 else {
2551                         /*
2552                          * Change the name we use for messages to the name of
2553                          * the first successfully registered interface.
2554                          */
2555                         if (!adapter->registered_device_map)
2556                                 adapter->name = adapter->port[i]->name;
2557
2558                         __set_bit(i, &adapter->registered_device_map);
2559                 }
2560         }
2561         if (!adapter->registered_device_map) {
2562                 dev_err(&pdev->dev, "could not register any net devices\n");
2563                 goto out_free_dev;
2564         }
2565
2566         /* Driver's ready. Reflect it on LEDs */
2567         t3_led_ready(adapter);
2568
2569         if (is_offload(adapter)) {
2570                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2571                 cxgb3_adapter_ofld(adapter);
2572         }
2573
2574         /* See what interrupts we'll be using */
2575         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2576                 adapter->flags |= USING_MSIX;
2577         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2578                 adapter->flags |= USING_MSI;
2579
2580         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2581                                  &cxgb3_attr_group);
2582
2583         print_port_info(adapter, ai);
2584         return 0;
2585
2586 out_free_dev:
2587         iounmap(adapter->regs);
2588         for (i = ai->nports - 1; i >= 0; --i)
2589                 if (adapter->port[i])
2590                         free_netdev(adapter->port[i]);
2591
2592 out_free_adapter:
2593         kfree(adapter);
2594
2595 out_disable_device:
2596         pci_disable_device(pdev);
2597 out_release_regions:
2598         pci_release_regions(pdev);
2599         pci_set_drvdata(pdev, NULL);
2600         return err;
2601 }
2602
2603 static void __devexit remove_one(struct pci_dev *pdev)
2604 {
2605         struct adapter *adapter = pci_get_drvdata(pdev);
2606
2607         if (adapter) {
2608                 int i;
2609
2610                 t3_sge_stop(adapter);
2611                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2612                                    &cxgb3_attr_group);
2613
2614                 for_each_port(adapter, i)
2615                     if (test_bit(i, &adapter->registered_device_map))
2616                         unregister_netdev(adapter->port[i]);
2617
2618                 if (is_offload(adapter)) {
2619                         cxgb3_adapter_unofld(adapter);
2620                         if (test_bit(OFFLOAD_DEVMAP_BIT,
2621                                      &adapter->open_device_map))
2622                                 offload_close(&adapter->tdev);
2623                 }
2624
2625                 t3_free_sge_resources(adapter);
2626                 cxgb_disable_msi(adapter);
2627
2628                 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2629                         if (adapter->dummy_netdev[i]) {
2630                                 free_netdev(adapter->dummy_netdev[i]);
2631                                 adapter->dummy_netdev[i] = NULL;
2632                         }
2633
2634                 for_each_port(adapter, i)
2635                         if (adapter->port[i])
2636                                 free_netdev(adapter->port[i]);
2637
2638                 iounmap(adapter->regs);
2639                 kfree(adapter);
2640                 pci_release_regions(pdev);
2641                 pci_disable_device(pdev);
2642                 pci_set_drvdata(pdev, NULL);
2643         }
2644 }
2645
2646 static struct pci_driver driver = {
2647         .name = DRV_NAME,
2648         .id_table = cxgb3_pci_tbl,
2649         .probe = init_one,
2650         .remove = __devexit_p(remove_one),
2651 };
2652
2653 static int __init cxgb3_init_module(void)
2654 {
2655         int ret;
2656
2657         cxgb3_offload_init();
2658
2659         ret = pci_register_driver(&driver);
2660         return ret;
2661 }
2662
2663 static void __exit cxgb3_cleanup_module(void)
2664 {
2665         pci_unregister_driver(&driver);
2666         if (cxgb3_wq)
2667                 destroy_workqueue(cxgb3_wq);
2668 }
2669
2670 module_init(cxgb3_init_module);
2671 module_exit(cxgb3_cleanup_module);