PCIe: fix double initialization bug
[linux-2.6] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         {0,}
94 };
95
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107 /*
108  * The driver uses the best interrupt scheme available on a platform in the
109  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
110  * of these schemes the driver may consider as follows:
111  *
112  * msi = 2: choose from among all three options
113  * msi = 1: only consider MSI and pin interrupts
114  * msi = 0: force pin interrupts
115  */
116 static int msi = 2;
117
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121 /*
122  * The driver enables offload as a default.
123  * To disable it, use ofld_disable = 1.
124  */
125
126 static int ofld_disable = 0;
127
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131 /*
132  * We have work elements that we need to cancel when an interface is taken
133  * down.  Normally the work elements would be executed by keventd but that
134  * can deadlock because of linkwatch.  If our close method takes the rtnl
135  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137  * for our work to complete.  Get our own work queue to solve this.
138  */
139 static struct workqueue_struct *cxgb3_wq;
140
141 /**
142  *      link_report - show link status and link speed/duplex
143  *      @p: the port whose settings are to be reported
144  *
145  *      Shows the link status, speed, and duplex of a port.
146  */
147 static void link_report(struct net_device *dev)
148 {
149         if (!netif_carrier_ok(dev))
150                 printk(KERN_INFO "%s: link down\n", dev->name);
151         else {
152                 const char *s = "10Mbps";
153                 const struct port_info *p = netdev_priv(dev);
154
155                 switch (p->link_config.speed) {
156                 case SPEED_10000:
157                         s = "10Gbps";
158                         break;
159                 case SPEED_1000:
160                         s = "1000Mbps";
161                         break;
162                 case SPEED_100:
163                         s = "100Mbps";
164                         break;
165                 }
166
167                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169         }
170 }
171
172 /**
173  *      t3_os_link_changed - handle link status changes
174  *      @adapter: the adapter associated with the link change
175  *      @port_id: the port index whose limk status has changed
176  *      @link_stat: the new status of the link
177  *      @speed: the new speed setting
178  *      @duplex: the new duplex setting
179  *      @pause: the new flow-control setting
180  *
181  *      This is the OS-dependent handler for link status changes.  The OS
182  *      neutral handler takes care of most of the processing for these events,
183  *      then calls this handler for any OS-specific processing.
184  */
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186                         int speed, int duplex, int pause)
187 {
188         struct net_device *dev = adapter->port[port_id];
189         struct port_info *pi = netdev_priv(dev);
190         struct cmac *mac = &pi->mac;
191
192         /* Skip changes from disabled ports. */
193         if (!netif_running(dev))
194                 return;
195
196         if (link_stat != netif_carrier_ok(dev)) {
197                 if (link_stat) {
198                         t3_mac_enable(mac, MAC_DIRECTION_RX);
199                         netif_carrier_on(dev);
200                 } else {
201                         netif_carrier_off(dev);
202                         pi->phy.ops->power_down(&pi->phy, 1);
203                         t3_mac_disable(mac, MAC_DIRECTION_RX);
204                         t3_link_start(&pi->phy, mac, &pi->link_config);
205                 }
206
207                 link_report(dev);
208         }
209 }
210
211 static void cxgb_set_rxmode(struct net_device *dev)
212 {
213         struct t3_rx_mode rm;
214         struct port_info *pi = netdev_priv(dev);
215
216         init_rx_mode(&rm, dev, dev->mc_list);
217         t3_mac_set_rx_mode(&pi->mac, &rm);
218 }
219
220 /**
221  *      link_start - enable a port
222  *      @dev: the device to enable
223  *
224  *      Performs the MAC and PHY actions needed to enable a port.
225  */
226 static void link_start(struct net_device *dev)
227 {
228         struct t3_rx_mode rm;
229         struct port_info *pi = netdev_priv(dev);
230         struct cmac *mac = &pi->mac;
231
232         init_rx_mode(&rm, dev, dev->mc_list);
233         t3_mac_reset(mac);
234         t3_mac_set_mtu(mac, dev->mtu);
235         t3_mac_set_address(mac, 0, dev->dev_addr);
236         t3_mac_set_rx_mode(mac, &rm);
237         t3_link_start(&pi->phy, mac, &pi->link_config);
238         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
239 }
240
241 static inline void cxgb_disable_msi(struct adapter *adapter)
242 {
243         if (adapter->flags & USING_MSIX) {
244                 pci_disable_msix(adapter->pdev);
245                 adapter->flags &= ~USING_MSIX;
246         } else if (adapter->flags & USING_MSI) {
247                 pci_disable_msi(adapter->pdev);
248                 adapter->flags &= ~USING_MSI;
249         }
250 }
251
252 /*
253  * Interrupt handler for asynchronous events used with MSI-X.
254  */
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
256 {
257         t3_slow_intr_handler(cookie);
258         return IRQ_HANDLED;
259 }
260
261 /*
262  * Name the MSI-X interrupts.
263  */
264 static void name_msix_vecs(struct adapter *adap)
265 {
266         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
267
268         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269         adap->msix_info[0].desc[n] = 0;
270
271         for_each_port(adap, j) {
272                 struct net_device *d = adap->port[j];
273                 const struct port_info *pi = netdev_priv(d);
274
275                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276                         snprintf(adap->msix_info[msi_idx].desc, n,
277                                  "%s (queue %d)", d->name, i);
278                         adap->msix_info[msi_idx].desc[n] = 0;
279                 }
280         }
281 }
282
283 static int request_msix_data_irqs(struct adapter *adap)
284 {
285         int i, j, err, qidx = 0;
286
287         for_each_port(adap, i) {
288                 int nqsets = adap2pinfo(adap, i)->nqsets;
289
290                 for (j = 0; j < nqsets; ++j) {
291                         err = request_irq(adap->msix_info[qidx + 1].vec,
292                                           t3_intr_handler(adap,
293                                                           adap->sge.qs[qidx].
294                                                           rspq.polling), 0,
295                                           adap->msix_info[qidx + 1].desc,
296                                           &adap->sge.qs[qidx]);
297                         if (err) {
298                                 while (--qidx >= 0)
299                                         free_irq(adap->msix_info[qidx + 1].vec,
300                                                  &adap->sge.qs[qidx]);
301                                 return err;
302                         }
303                         qidx++;
304                 }
305         }
306         return 0;
307 }
308
309 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
310                               unsigned long n)
311 {
312         int attempts = 5;
313
314         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
315                 if (!--attempts)
316                         return -ETIMEDOUT;
317                 msleep(10);
318         }
319         return 0;
320 }
321
322 static int init_tp_parity(struct adapter *adap)
323 {
324         int i;
325         struct sk_buff *skb;
326         struct cpl_set_tcb_field *greq;
327         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
328
329         t3_tp_set_offload_mode(adap, 1);
330
331         for (i = 0; i < 16; i++) {
332                 struct cpl_smt_write_req *req;
333
334                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
335                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
336                 memset(req, 0, sizeof(*req));
337                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
338                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
339                 req->iff = i;
340                 t3_mgmt_tx(adap, skb);
341         }
342
343         for (i = 0; i < 2048; i++) {
344                 struct cpl_l2t_write_req *req;
345
346                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
347                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
348                 memset(req, 0, sizeof(*req));
349                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
350                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
351                 req->params = htonl(V_L2T_W_IDX(i));
352                 t3_mgmt_tx(adap, skb);
353         }
354
355         for (i = 0; i < 2048; i++) {
356                 struct cpl_rte_write_req *req;
357
358                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
359                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
360                 memset(req, 0, sizeof(*req));
361                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
362                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
363                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
364                 t3_mgmt_tx(adap, skb);
365         }
366
367         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
368         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
369         memset(greq, 0, sizeof(*greq));
370         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
371         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
372         greq->mask = cpu_to_be64(1);
373         t3_mgmt_tx(adap, skb);
374
375         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
376         t3_tp_set_offload_mode(adap, 0);
377         return i;
378 }
379
380 /**
381  *      setup_rss - configure RSS
382  *      @adap: the adapter
383  *
384  *      Sets up RSS to distribute packets to multiple receive queues.  We
385  *      configure the RSS CPU lookup table to distribute to the number of HW
386  *      receive queues, and the response queue lookup table to narrow that
387  *      down to the response queues actually configured for each port.
388  *      We always configure the RSS mapping for two ports since the mapping
389  *      table has plenty of entries.
390  */
391 static void setup_rss(struct adapter *adap)
392 {
393         int i;
394         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
395         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
396         u8 cpus[SGE_QSETS + 1];
397         u16 rspq_map[RSS_TABLE_SIZE];
398
399         for (i = 0; i < SGE_QSETS; ++i)
400                 cpus[i] = i;
401         cpus[SGE_QSETS] = 0xff; /* terminator */
402
403         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
404                 rspq_map[i] = i % nq0;
405                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
406         }
407
408         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
409                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
410                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
411 }
412
413 static void init_napi(struct adapter *adap)
414 {
415         int i;
416
417         for (i = 0; i < SGE_QSETS; i++) {
418                 struct sge_qset *qs = &adap->sge.qs[i];
419
420                 if (qs->adap)
421                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
422                                        64);
423         }
424 }
425
426 /*
427  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
428  * both netdevices representing interfaces and the dummy ones for the extra
429  * queues.
430  */
431 static void quiesce_rx(struct adapter *adap)
432 {
433         int i;
434
435         for (i = 0; i < SGE_QSETS; i++)
436                 if (adap->sge.qs[i].adap)
437                         napi_disable(&adap->sge.qs[i].napi);
438 }
439
440 static void enable_all_napi(struct adapter *adap)
441 {
442         int i;
443         for (i = 0; i < SGE_QSETS; i++)
444                 if (adap->sge.qs[i].adap)
445                         napi_enable(&adap->sge.qs[i].napi);
446 }
447
448 /**
449  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
450  *      @adap: the adapter
451  *
452  *      Determines how many sets of SGE queues to use and initializes them.
453  *      We support multiple queue sets per port if we have MSI-X, otherwise
454  *      just one queue set per port.
455  */
456 static int setup_sge_qsets(struct adapter *adap)
457 {
458         int i, j, err, irq_idx = 0, qset_idx = 0;
459         unsigned int ntxq = SGE_TXQ_PER_SET;
460
461         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
462                 irq_idx = -1;
463
464         for_each_port(adap, i) {
465                 struct net_device *dev = adap->port[i];
466                 struct port_info *pi = netdev_priv(dev);
467
468                 pi->qs = &adap->sge.qs[pi->first_qset];
469                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
470                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
471                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
472                                                              irq_idx,
473                                 &adap->params.sge.qset[qset_idx], ntxq, dev);
474                         if (err) {
475                                 t3_free_sge_resources(adap);
476                                 return err;
477                         }
478                 }
479         }
480
481         return 0;
482 }
483
484 static ssize_t attr_show(struct device *d, char *buf,
485                          ssize_t(*format) (struct net_device *, char *))
486 {
487         ssize_t len;
488
489         /* Synchronize with ioctls that may shut down the device */
490         rtnl_lock();
491         len = (*format) (to_net_dev(d), buf);
492         rtnl_unlock();
493         return len;
494 }
495
496 static ssize_t attr_store(struct device *d,
497                           const char *buf, size_t len,
498                           ssize_t(*set) (struct net_device *, unsigned int),
499                           unsigned int min_val, unsigned int max_val)
500 {
501         char *endp;
502         ssize_t ret;
503         unsigned int val;
504
505         if (!capable(CAP_NET_ADMIN))
506                 return -EPERM;
507
508         val = simple_strtoul(buf, &endp, 0);
509         if (endp == buf || val < min_val || val > max_val)
510                 return -EINVAL;
511
512         rtnl_lock();
513         ret = (*set) (to_net_dev(d), val);
514         if (!ret)
515                 ret = len;
516         rtnl_unlock();
517         return ret;
518 }
519
520 #define CXGB3_SHOW(name, val_expr) \
521 static ssize_t format_##name(struct net_device *dev, char *buf) \
522 { \
523         struct port_info *pi = netdev_priv(dev); \
524         struct adapter *adap = pi->adapter; \
525         return sprintf(buf, "%u\n", val_expr); \
526 } \
527 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
528                            char *buf) \
529 { \
530         return attr_show(d, buf, format_##name); \
531 }
532
533 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
534 {
535         struct port_info *pi = netdev_priv(dev);
536         struct adapter *adap = pi->adapter;
537         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
538
539         if (adap->flags & FULL_INIT_DONE)
540                 return -EBUSY;
541         if (val && adap->params.rev == 0)
542                 return -EINVAL;
543         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
544             min_tids)
545                 return -EINVAL;
546         adap->params.mc5.nfilters = val;
547         return 0;
548 }
549
550 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
551                               const char *buf, size_t len)
552 {
553         return attr_store(d, buf, len, set_nfilters, 0, ~0);
554 }
555
556 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
557 {
558         struct port_info *pi = netdev_priv(dev);
559         struct adapter *adap = pi->adapter;
560
561         if (adap->flags & FULL_INIT_DONE)
562                 return -EBUSY;
563         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
564             MC5_MIN_TIDS)
565                 return -EINVAL;
566         adap->params.mc5.nservers = val;
567         return 0;
568 }
569
570 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
571                               const char *buf, size_t len)
572 {
573         return attr_store(d, buf, len, set_nservers, 0, ~0);
574 }
575
576 #define CXGB3_ATTR_R(name, val_expr) \
577 CXGB3_SHOW(name, val_expr) \
578 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
579
580 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
581 CXGB3_SHOW(name, val_expr) \
582 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
583
584 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
585 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
586 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
587
588 static struct attribute *cxgb3_attrs[] = {
589         &dev_attr_cam_size.attr,
590         &dev_attr_nfilters.attr,
591         &dev_attr_nservers.attr,
592         NULL
593 };
594
595 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
596
597 static ssize_t tm_attr_show(struct device *d,
598                             char *buf, int sched)
599 {
600         struct port_info *pi = netdev_priv(to_net_dev(d));
601         struct adapter *adap = pi->adapter;
602         unsigned int v, addr, bpt, cpt;
603         ssize_t len;
604
605         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
606         rtnl_lock();
607         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
608         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
609         if (sched & 1)
610                 v >>= 16;
611         bpt = (v >> 8) & 0xff;
612         cpt = v & 0xff;
613         if (!cpt)
614                 len = sprintf(buf, "disabled\n");
615         else {
616                 v = (adap->params.vpd.cclk * 1000) / cpt;
617                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
618         }
619         rtnl_unlock();
620         return len;
621 }
622
623 static ssize_t tm_attr_store(struct device *d,
624                              const char *buf, size_t len, int sched)
625 {
626         struct port_info *pi = netdev_priv(to_net_dev(d));
627         struct adapter *adap = pi->adapter;
628         unsigned int val;
629         char *endp;
630         ssize_t ret;
631
632         if (!capable(CAP_NET_ADMIN))
633                 return -EPERM;
634
635         val = simple_strtoul(buf, &endp, 0);
636         if (endp == buf || val > 10000000)
637                 return -EINVAL;
638
639         rtnl_lock();
640         ret = t3_config_sched(adap, val, sched);
641         if (!ret)
642                 ret = len;
643         rtnl_unlock();
644         return ret;
645 }
646
647 #define TM_ATTR(name, sched) \
648 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
649                            char *buf) \
650 { \
651         return tm_attr_show(d, buf, sched); \
652 } \
653 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
654                             const char *buf, size_t len) \
655 { \
656         return tm_attr_store(d, buf, len, sched); \
657 } \
658 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
659
660 TM_ATTR(sched0, 0);
661 TM_ATTR(sched1, 1);
662 TM_ATTR(sched2, 2);
663 TM_ATTR(sched3, 3);
664 TM_ATTR(sched4, 4);
665 TM_ATTR(sched5, 5);
666 TM_ATTR(sched6, 6);
667 TM_ATTR(sched7, 7);
668
669 static struct attribute *offload_attrs[] = {
670         &dev_attr_sched0.attr,
671         &dev_attr_sched1.attr,
672         &dev_attr_sched2.attr,
673         &dev_attr_sched3.attr,
674         &dev_attr_sched4.attr,
675         &dev_attr_sched5.attr,
676         &dev_attr_sched6.attr,
677         &dev_attr_sched7.attr,
678         NULL
679 };
680
681 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
682
683 /*
684  * Sends an sk_buff to an offload queue driver
685  * after dealing with any active network taps.
686  */
687 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
688 {
689         int ret;
690
691         local_bh_disable();
692         ret = t3_offload_tx(tdev, skb);
693         local_bh_enable();
694         return ret;
695 }
696
697 static int write_smt_entry(struct adapter *adapter, int idx)
698 {
699         struct cpl_smt_write_req *req;
700         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
701
702         if (!skb)
703                 return -ENOMEM;
704
705         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
706         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
707         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
708         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
709         req->iff = idx;
710         memset(req->src_mac1, 0, sizeof(req->src_mac1));
711         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
712         skb->priority = 1;
713         offload_tx(&adapter->tdev, skb);
714         return 0;
715 }
716
717 static int init_smt(struct adapter *adapter)
718 {
719         int i;
720
721         for_each_port(adapter, i)
722             write_smt_entry(adapter, i);
723         return 0;
724 }
725
726 static void init_port_mtus(struct adapter *adapter)
727 {
728         unsigned int mtus = adapter->port[0]->mtu;
729
730         if (adapter->port[1])
731                 mtus |= adapter->port[1]->mtu << 16;
732         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
733 }
734
735 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
736                               int hi, int port)
737 {
738         struct sk_buff *skb;
739         struct mngt_pktsched_wr *req;
740
741         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
742         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
743         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
744         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
745         req->sched = sched;
746         req->idx = qidx;
747         req->min = lo;
748         req->max = hi;
749         req->binding = port;
750         t3_mgmt_tx(adap, skb);
751 }
752
753 static void bind_qsets(struct adapter *adap)
754 {
755         int i, j;
756
757         for_each_port(adap, i) {
758                 const struct port_info *pi = adap2pinfo(adap, i);
759
760                 for (j = 0; j < pi->nqsets; ++j)
761                         send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
762                                           -1, i);
763         }
764 }
765
766 #define FW_FNAME "t3fw-%d.%d.%d.bin"
767 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
768
769 static int upgrade_fw(struct adapter *adap)
770 {
771         int ret;
772         char buf[64];
773         const struct firmware *fw;
774         struct device *dev = &adap->pdev->dev;
775
776         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
777                  FW_VERSION_MINOR, FW_VERSION_MICRO);
778         ret = request_firmware(&fw, buf, dev);
779         if (ret < 0) {
780                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
781                         buf);
782                 return ret;
783         }
784         ret = t3_load_fw(adap, fw->data, fw->size);
785         release_firmware(fw);
786
787         if (ret == 0)
788                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
789                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
790         else
791                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
792                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
793
794         return ret;
795 }
796
797 static inline char t3rev2char(struct adapter *adapter)
798 {
799         char rev = 0;
800
801         switch(adapter->params.rev) {
802         case T3_REV_B:
803         case T3_REV_B2:
804                 rev = 'b';
805                 break;
806         case T3_REV_C:
807                 rev = 'c';
808                 break;
809         }
810         return rev;
811 }
812
813 static int update_tpsram(struct adapter *adap)
814 {
815         const struct firmware *tpsram;
816         char buf[64];
817         struct device *dev = &adap->pdev->dev;
818         int ret;
819         char rev;
820
821         rev = t3rev2char(adap);
822         if (!rev)
823                 return 0;
824
825         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
826                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
827
828         ret = request_firmware(&tpsram, buf, dev);
829         if (ret < 0) {
830                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
831                         buf);
832                 return ret;
833         }
834
835         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
836         if (ret)
837                 goto release_tpsram;
838
839         ret = t3_set_proto_sram(adap, tpsram->data);
840         if (ret == 0)
841                 dev_info(dev,
842                          "successful update of protocol engine "
843                          "to %d.%d.%d\n",
844                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
845         else
846                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
847                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
848         if (ret)
849                 dev_err(dev, "loading protocol SRAM failed\n");
850
851 release_tpsram:
852         release_firmware(tpsram);
853
854         return ret;
855 }
856
857 /**
858  *      cxgb_up - enable the adapter
859  *      @adapter: adapter being enabled
860  *
861  *      Called when the first port is enabled, this function performs the
862  *      actions necessary to make an adapter operational, such as completing
863  *      the initialization of HW modules, and enabling interrupts.
864  *
865  *      Must be called with the rtnl lock held.
866  */
867 static int cxgb_up(struct adapter *adap)
868 {
869         int err;
870         int must_load;
871
872         if (!(adap->flags & FULL_INIT_DONE)) {
873                 err = t3_check_fw_version(adap, &must_load);
874                 if (err == -EINVAL) {
875                         err = upgrade_fw(adap);
876                         if (err && must_load)
877                                 goto out;
878                 }
879
880                 err = t3_check_tpsram_version(adap, &must_load);
881                 if (err == -EINVAL) {
882                         err = update_tpsram(adap);
883                         if (err && must_load)
884                                 goto out;
885                 }
886
887                 err = t3_init_hw(adap, 0);
888                 if (err)
889                         goto out;
890
891                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
892                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
893
894                 err = setup_sge_qsets(adap);
895                 if (err)
896                         goto out;
897
898                 setup_rss(adap);
899                 init_napi(adap);
900                 adap->flags |= FULL_INIT_DONE;
901         }
902
903         t3_intr_clear(adap);
904
905         if (adap->flags & USING_MSIX) {
906                 name_msix_vecs(adap);
907                 err = request_irq(adap->msix_info[0].vec,
908                                   t3_async_intr_handler, 0,
909                                   adap->msix_info[0].desc, adap);
910                 if (err)
911                         goto irq_err;
912
913                 err = request_msix_data_irqs(adap);
914                 if (err) {
915                         free_irq(adap->msix_info[0].vec, adap);
916                         goto irq_err;
917                 }
918         } else if ((err = request_irq(adap->pdev->irq,
919                                       t3_intr_handler(adap,
920                                                       adap->sge.qs[0].rspq.
921                                                       polling),
922                                       (adap->flags & USING_MSI) ?
923                                        0 : IRQF_SHARED,
924                                       adap->name, adap)))
925                 goto irq_err;
926
927         enable_all_napi(adap);
928         t3_sge_start(adap);
929         t3_intr_enable(adap);
930
931         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
932             is_offload(adap) && init_tp_parity(adap) == 0)
933                 adap->flags |= TP_PARITY_INIT;
934
935         if (adap->flags & TP_PARITY_INIT) {
936                 t3_write_reg(adap, A_TP_INT_CAUSE,
937                              F_CMCACHEPERR | F_ARPLUTPERR);
938                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
939         }
940
941         if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
942                 bind_qsets(adap);
943         adap->flags |= QUEUES_BOUND;
944
945 out:
946         return err;
947 irq_err:
948         CH_ERR(adap, "request_irq failed, err %d\n", err);
949         goto out;
950 }
951
952 /*
953  * Release resources when all the ports and offloading have been stopped.
954  */
955 static void cxgb_down(struct adapter *adapter)
956 {
957         t3_sge_stop(adapter);
958         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
959         t3_intr_disable(adapter);
960         spin_unlock_irq(&adapter->work_lock);
961
962         if (adapter->flags & USING_MSIX) {
963                 int i, n = 0;
964
965                 free_irq(adapter->msix_info[0].vec, adapter);
966                 for_each_port(adapter, i)
967                     n += adap2pinfo(adapter, i)->nqsets;
968
969                 for (i = 0; i < n; ++i)
970                         free_irq(adapter->msix_info[i + 1].vec,
971                                  &adapter->sge.qs[i]);
972         } else
973                 free_irq(adapter->pdev->irq, adapter);
974
975         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
976         quiesce_rx(adapter);
977 }
978
979 static void schedule_chk_task(struct adapter *adap)
980 {
981         unsigned int timeo;
982
983         timeo = adap->params.linkpoll_period ?
984             (HZ * adap->params.linkpoll_period) / 10 :
985             adap->params.stats_update_period * HZ;
986         if (timeo)
987                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
988 }
989
990 static int offload_open(struct net_device *dev)
991 {
992         struct port_info *pi = netdev_priv(dev);
993         struct adapter *adapter = pi->adapter;
994         struct t3cdev *tdev = dev2t3cdev(dev);
995         int adap_up = adapter->open_device_map & PORT_MASK;
996         int err;
997
998         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
999                 return 0;
1000
1001         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1002                 return err;
1003
1004         t3_tp_set_offload_mode(adapter, 1);
1005         tdev->lldev = adapter->port[0];
1006         err = cxgb3_offload_activate(adapter);
1007         if (err)
1008                 goto out;
1009
1010         init_port_mtus(adapter);
1011         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1012                      adapter->params.b_wnd,
1013                      adapter->params.rev == 0 ?
1014                      adapter->port[0]->mtu : 0xffff);
1015         init_smt(adapter);
1016
1017         /* Never mind if the next step fails */
1018         sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1019
1020         /* Call back all registered clients */
1021         cxgb3_add_clients(tdev);
1022
1023 out:
1024         /* restore them in case the offload module has changed them */
1025         if (err) {
1026                 t3_tp_set_offload_mode(adapter, 0);
1027                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1028                 cxgb3_set_dummy_ops(tdev);
1029         }
1030         return err;
1031 }
1032
1033 static int offload_close(struct t3cdev *tdev)
1034 {
1035         struct adapter *adapter = tdev2adap(tdev);
1036
1037         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1038                 return 0;
1039
1040         /* Call back all registered clients */
1041         cxgb3_remove_clients(tdev);
1042
1043         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1044
1045         tdev->lldev = NULL;
1046         cxgb3_set_dummy_ops(tdev);
1047         t3_tp_set_offload_mode(adapter, 0);
1048         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1049
1050         if (!adapter->open_device_map)
1051                 cxgb_down(adapter);
1052
1053         cxgb3_offload_deactivate(adapter);
1054         return 0;
1055 }
1056
1057 static int cxgb_open(struct net_device *dev)
1058 {
1059         struct port_info *pi = netdev_priv(dev);
1060         struct adapter *adapter = pi->adapter;
1061         int other_ports = adapter->open_device_map & PORT_MASK;
1062         int err;
1063
1064         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
1065                 quiesce_rx(adapter);
1066                 return err;
1067         }
1068
1069         set_bit(pi->port_id, &adapter->open_device_map);
1070         if (is_offload(adapter) && !ofld_disable) {
1071                 err = offload_open(dev);
1072                 if (err)
1073                         printk(KERN_WARNING
1074                                "Could not initialize offload capabilities\n");
1075         }
1076
1077         link_start(dev);
1078         t3_port_intr_enable(adapter, pi->port_id);
1079         netif_start_queue(dev);
1080         if (!other_ports)
1081                 schedule_chk_task(adapter);
1082
1083         return 0;
1084 }
1085
1086 static int cxgb_close(struct net_device *dev)
1087 {
1088         struct port_info *pi = netdev_priv(dev);
1089         struct adapter *adapter = pi->adapter;
1090
1091         t3_port_intr_disable(adapter, pi->port_id);
1092         netif_stop_queue(dev);
1093         pi->phy.ops->power_down(&pi->phy, 1);
1094         netif_carrier_off(dev);
1095         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1096
1097         spin_lock(&adapter->work_lock); /* sync with update task */
1098         clear_bit(pi->port_id, &adapter->open_device_map);
1099         spin_unlock(&adapter->work_lock);
1100
1101         if (!(adapter->open_device_map & PORT_MASK))
1102                 cancel_rearming_delayed_workqueue(cxgb3_wq,
1103                                                   &adapter->adap_check_task);
1104
1105         if (!adapter->open_device_map)
1106                 cxgb_down(adapter);
1107
1108         return 0;
1109 }
1110
1111 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1112 {
1113         struct port_info *pi = netdev_priv(dev);
1114         struct adapter *adapter = pi->adapter;
1115         struct net_device_stats *ns = &pi->netstats;
1116         const struct mac_stats *pstats;
1117
1118         spin_lock(&adapter->stats_lock);
1119         pstats = t3_mac_update_stats(&pi->mac);
1120         spin_unlock(&adapter->stats_lock);
1121
1122         ns->tx_bytes = pstats->tx_octets;
1123         ns->tx_packets = pstats->tx_frames;
1124         ns->rx_bytes = pstats->rx_octets;
1125         ns->rx_packets = pstats->rx_frames;
1126         ns->multicast = pstats->rx_mcast_frames;
1127
1128         ns->tx_errors = pstats->tx_underrun;
1129         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1130             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1131             pstats->rx_fifo_ovfl;
1132
1133         /* detailed rx_errors */
1134         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1135         ns->rx_over_errors = 0;
1136         ns->rx_crc_errors = pstats->rx_fcs_errs;
1137         ns->rx_frame_errors = pstats->rx_symbol_errs;
1138         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1139         ns->rx_missed_errors = pstats->rx_cong_drops;
1140
1141         /* detailed tx_errors */
1142         ns->tx_aborted_errors = 0;
1143         ns->tx_carrier_errors = 0;
1144         ns->tx_fifo_errors = pstats->tx_underrun;
1145         ns->tx_heartbeat_errors = 0;
1146         ns->tx_window_errors = 0;
1147         return ns;
1148 }
1149
1150 static u32 get_msglevel(struct net_device *dev)
1151 {
1152         struct port_info *pi = netdev_priv(dev);
1153         struct adapter *adapter = pi->adapter;
1154
1155         return adapter->msg_enable;
1156 }
1157
1158 static void set_msglevel(struct net_device *dev, u32 val)
1159 {
1160         struct port_info *pi = netdev_priv(dev);
1161         struct adapter *adapter = pi->adapter;
1162
1163         adapter->msg_enable = val;
1164 }
1165
1166 static char stats_strings[][ETH_GSTRING_LEN] = {
1167         "TxOctetsOK         ",
1168         "TxFramesOK         ",
1169         "TxMulticastFramesOK",
1170         "TxBroadcastFramesOK",
1171         "TxPauseFrames      ",
1172         "TxUnderrun         ",
1173         "TxExtUnderrun      ",
1174
1175         "TxFrames64         ",
1176         "TxFrames65To127    ",
1177         "TxFrames128To255   ",
1178         "TxFrames256To511   ",
1179         "TxFrames512To1023  ",
1180         "TxFrames1024To1518 ",
1181         "TxFrames1519ToMax  ",
1182
1183         "RxOctetsOK         ",
1184         "RxFramesOK         ",
1185         "RxMulticastFramesOK",
1186         "RxBroadcastFramesOK",
1187         "RxPauseFrames      ",
1188         "RxFCSErrors        ",
1189         "RxSymbolErrors     ",
1190         "RxShortErrors      ",
1191         "RxJabberErrors     ",
1192         "RxLengthErrors     ",
1193         "RxFIFOoverflow     ",
1194
1195         "RxFrames64         ",
1196         "RxFrames65To127    ",
1197         "RxFrames128To255   ",
1198         "RxFrames256To511   ",
1199         "RxFrames512To1023  ",
1200         "RxFrames1024To1518 ",
1201         "RxFrames1519ToMax  ",
1202
1203         "PhyFIFOErrors      ",
1204         "TSO                ",
1205         "VLANextractions    ",
1206         "VLANinsertions     ",
1207         "TxCsumOffload      ",
1208         "RxCsumGood         ",
1209         "RxDrops            ",
1210
1211         "CheckTXEnToggled   ",
1212         "CheckResets        ",
1213
1214 };
1215
1216 static int get_sset_count(struct net_device *dev, int sset)
1217 {
1218         switch (sset) {
1219         case ETH_SS_STATS:
1220                 return ARRAY_SIZE(stats_strings);
1221         default:
1222                 return -EOPNOTSUPP;
1223         }
1224 }
1225
1226 #define T3_REGMAP_SIZE (3 * 1024)
1227
1228 static int get_regs_len(struct net_device *dev)
1229 {
1230         return T3_REGMAP_SIZE;
1231 }
1232
1233 static int get_eeprom_len(struct net_device *dev)
1234 {
1235         return EEPROMSIZE;
1236 }
1237
1238 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1239 {
1240         struct port_info *pi = netdev_priv(dev);
1241         struct adapter *adapter = pi->adapter;
1242         u32 fw_vers = 0;
1243         u32 tp_vers = 0;
1244
1245         t3_get_fw_version(adapter, &fw_vers);
1246         t3_get_tp_version(adapter, &tp_vers);
1247
1248         strcpy(info->driver, DRV_NAME);
1249         strcpy(info->version, DRV_VERSION);
1250         strcpy(info->bus_info, pci_name(adapter->pdev));
1251         if (!fw_vers)
1252                 strcpy(info->fw_version, "N/A");
1253         else {
1254                 snprintf(info->fw_version, sizeof(info->fw_version),
1255                          "%s %u.%u.%u TP %u.%u.%u",
1256                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1257                          G_FW_VERSION_MAJOR(fw_vers),
1258                          G_FW_VERSION_MINOR(fw_vers),
1259                          G_FW_VERSION_MICRO(fw_vers),
1260                          G_TP_VERSION_MAJOR(tp_vers),
1261                          G_TP_VERSION_MINOR(tp_vers),
1262                          G_TP_VERSION_MICRO(tp_vers));
1263         }
1264 }
1265
1266 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1267 {
1268         if (stringset == ETH_SS_STATS)
1269                 memcpy(data, stats_strings, sizeof(stats_strings));
1270 }
1271
1272 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1273                                             struct port_info *p, int idx)
1274 {
1275         int i;
1276         unsigned long tot = 0;
1277
1278         for (i = 0; i < p->nqsets; ++i)
1279                 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1280         return tot;
1281 }
1282
1283 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1284                       u64 *data)
1285 {
1286         struct port_info *pi = netdev_priv(dev);
1287         struct adapter *adapter = pi->adapter;
1288         const struct mac_stats *s;
1289
1290         spin_lock(&adapter->stats_lock);
1291         s = t3_mac_update_stats(&pi->mac);
1292         spin_unlock(&adapter->stats_lock);
1293
1294         *data++ = s->tx_octets;
1295         *data++ = s->tx_frames;
1296         *data++ = s->tx_mcast_frames;
1297         *data++ = s->tx_bcast_frames;
1298         *data++ = s->tx_pause;
1299         *data++ = s->tx_underrun;
1300         *data++ = s->tx_fifo_urun;
1301
1302         *data++ = s->tx_frames_64;
1303         *data++ = s->tx_frames_65_127;
1304         *data++ = s->tx_frames_128_255;
1305         *data++ = s->tx_frames_256_511;
1306         *data++ = s->tx_frames_512_1023;
1307         *data++ = s->tx_frames_1024_1518;
1308         *data++ = s->tx_frames_1519_max;
1309
1310         *data++ = s->rx_octets;
1311         *data++ = s->rx_frames;
1312         *data++ = s->rx_mcast_frames;
1313         *data++ = s->rx_bcast_frames;
1314         *data++ = s->rx_pause;
1315         *data++ = s->rx_fcs_errs;
1316         *data++ = s->rx_symbol_errs;
1317         *data++ = s->rx_short;
1318         *data++ = s->rx_jabber;
1319         *data++ = s->rx_too_long;
1320         *data++ = s->rx_fifo_ovfl;
1321
1322         *data++ = s->rx_frames_64;
1323         *data++ = s->rx_frames_65_127;
1324         *data++ = s->rx_frames_128_255;
1325         *data++ = s->rx_frames_256_511;
1326         *data++ = s->rx_frames_512_1023;
1327         *data++ = s->rx_frames_1024_1518;
1328         *data++ = s->rx_frames_1519_max;
1329
1330         *data++ = pi->phy.fifo_errors;
1331
1332         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1333         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1334         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1335         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1336         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1337         *data++ = s->rx_cong_drops;
1338
1339         *data++ = s->num_toggled;
1340         *data++ = s->num_resets;
1341 }
1342
1343 static inline void reg_block_dump(struct adapter *ap, void *buf,
1344                                   unsigned int start, unsigned int end)
1345 {
1346         u32 *p = buf + start;
1347
1348         for (; start <= end; start += sizeof(u32))
1349                 *p++ = t3_read_reg(ap, start);
1350 }
1351
1352 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1353                      void *buf)
1354 {
1355         struct port_info *pi = netdev_priv(dev);
1356         struct adapter *ap = pi->adapter;
1357
1358         /*
1359          * Version scheme:
1360          * bits 0..9: chip version
1361          * bits 10..15: chip revision
1362          * bit 31: set for PCIe cards
1363          */
1364         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1365
1366         /*
1367          * We skip the MAC statistics registers because they are clear-on-read.
1368          * Also reading multi-register stats would need to synchronize with the
1369          * periodic mac stats accumulation.  Hard to justify the complexity.
1370          */
1371         memset(buf, 0, T3_REGMAP_SIZE);
1372         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1373         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1374         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1375         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1376         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1377         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1378                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1379         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1380                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1381 }
1382
1383 static int restart_autoneg(struct net_device *dev)
1384 {
1385         struct port_info *p = netdev_priv(dev);
1386
1387         if (!netif_running(dev))
1388                 return -EAGAIN;
1389         if (p->link_config.autoneg != AUTONEG_ENABLE)
1390                 return -EINVAL;
1391         p->phy.ops->autoneg_restart(&p->phy);
1392         return 0;
1393 }
1394
1395 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1396 {
1397         struct port_info *pi = netdev_priv(dev);
1398         struct adapter *adapter = pi->adapter;
1399         int i;
1400
1401         if (data == 0)
1402                 data = 2;
1403
1404         for (i = 0; i < data * 2; i++) {
1405                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1406                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1407                 if (msleep_interruptible(500))
1408                         break;
1409         }
1410         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1411                          F_GPIO0_OUT_VAL);
1412         return 0;
1413 }
1414
1415 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1416 {
1417         struct port_info *p = netdev_priv(dev);
1418
1419         cmd->supported = p->link_config.supported;
1420         cmd->advertising = p->link_config.advertising;
1421
1422         if (netif_carrier_ok(dev)) {
1423                 cmd->speed = p->link_config.speed;
1424                 cmd->duplex = p->link_config.duplex;
1425         } else {
1426                 cmd->speed = -1;
1427                 cmd->duplex = -1;
1428         }
1429
1430         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1431         cmd->phy_address = p->phy.addr;
1432         cmd->transceiver = XCVR_EXTERNAL;
1433         cmd->autoneg = p->link_config.autoneg;
1434         cmd->maxtxpkt = 0;
1435         cmd->maxrxpkt = 0;
1436         return 0;
1437 }
1438
1439 static int speed_duplex_to_caps(int speed, int duplex)
1440 {
1441         int cap = 0;
1442
1443         switch (speed) {
1444         case SPEED_10:
1445                 if (duplex == DUPLEX_FULL)
1446                         cap = SUPPORTED_10baseT_Full;
1447                 else
1448                         cap = SUPPORTED_10baseT_Half;
1449                 break;
1450         case SPEED_100:
1451                 if (duplex == DUPLEX_FULL)
1452                         cap = SUPPORTED_100baseT_Full;
1453                 else
1454                         cap = SUPPORTED_100baseT_Half;
1455                 break;
1456         case SPEED_1000:
1457                 if (duplex == DUPLEX_FULL)
1458                         cap = SUPPORTED_1000baseT_Full;
1459                 else
1460                         cap = SUPPORTED_1000baseT_Half;
1461                 break;
1462         case SPEED_10000:
1463                 if (duplex == DUPLEX_FULL)
1464                         cap = SUPPORTED_10000baseT_Full;
1465         }
1466         return cap;
1467 }
1468
1469 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1470                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1471                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1472                       ADVERTISED_10000baseT_Full)
1473
1474 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1475 {
1476         struct port_info *p = netdev_priv(dev);
1477         struct link_config *lc = &p->link_config;
1478
1479         if (!(lc->supported & SUPPORTED_Autoneg))
1480                 return -EOPNOTSUPP;     /* can't change speed/duplex */
1481
1482         if (cmd->autoneg == AUTONEG_DISABLE) {
1483                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1484
1485                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1486                         return -EINVAL;
1487                 lc->requested_speed = cmd->speed;
1488                 lc->requested_duplex = cmd->duplex;
1489                 lc->advertising = 0;
1490         } else {
1491                 cmd->advertising &= ADVERTISED_MASK;
1492                 cmd->advertising &= lc->supported;
1493                 if (!cmd->advertising)
1494                         return -EINVAL;
1495                 lc->requested_speed = SPEED_INVALID;
1496                 lc->requested_duplex = DUPLEX_INVALID;
1497                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1498         }
1499         lc->autoneg = cmd->autoneg;
1500         if (netif_running(dev))
1501                 t3_link_start(&p->phy, &p->mac, lc);
1502         return 0;
1503 }
1504
1505 static void get_pauseparam(struct net_device *dev,
1506                            struct ethtool_pauseparam *epause)
1507 {
1508         struct port_info *p = netdev_priv(dev);
1509
1510         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1511         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1512         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1513 }
1514
1515 static int set_pauseparam(struct net_device *dev,
1516                           struct ethtool_pauseparam *epause)
1517 {
1518         struct port_info *p = netdev_priv(dev);
1519         struct link_config *lc = &p->link_config;
1520
1521         if (epause->autoneg == AUTONEG_DISABLE)
1522                 lc->requested_fc = 0;
1523         else if (lc->supported & SUPPORTED_Autoneg)
1524                 lc->requested_fc = PAUSE_AUTONEG;
1525         else
1526                 return -EINVAL;
1527
1528         if (epause->rx_pause)
1529                 lc->requested_fc |= PAUSE_RX;
1530         if (epause->tx_pause)
1531                 lc->requested_fc |= PAUSE_TX;
1532         if (lc->autoneg == AUTONEG_ENABLE) {
1533                 if (netif_running(dev))
1534                         t3_link_start(&p->phy, &p->mac, lc);
1535         } else {
1536                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1537                 if (netif_running(dev))
1538                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1539         }
1540         return 0;
1541 }
1542
1543 static u32 get_rx_csum(struct net_device *dev)
1544 {
1545         struct port_info *p = netdev_priv(dev);
1546
1547         return p->rx_csum_offload;
1548 }
1549
1550 static int set_rx_csum(struct net_device *dev, u32 data)
1551 {
1552         struct port_info *p = netdev_priv(dev);
1553
1554         p->rx_csum_offload = data;
1555         return 0;
1556 }
1557
1558 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1559 {
1560         struct port_info *pi = netdev_priv(dev);
1561         struct adapter *adapter = pi->adapter;
1562         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1563
1564         e->rx_max_pending = MAX_RX_BUFFERS;
1565         e->rx_mini_max_pending = 0;
1566         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1567         e->tx_max_pending = MAX_TXQ_ENTRIES;
1568
1569         e->rx_pending = q->fl_size;
1570         e->rx_mini_pending = q->rspq_size;
1571         e->rx_jumbo_pending = q->jumbo_size;
1572         e->tx_pending = q->txq_size[0];
1573 }
1574
1575 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1576 {
1577         struct port_info *pi = netdev_priv(dev);
1578         struct adapter *adapter = pi->adapter;
1579         struct qset_params *q;
1580         int i;
1581
1582         if (e->rx_pending > MAX_RX_BUFFERS ||
1583             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1584             e->tx_pending > MAX_TXQ_ENTRIES ||
1585             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1586             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1587             e->rx_pending < MIN_FL_ENTRIES ||
1588             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1589             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1590                 return -EINVAL;
1591
1592         if (adapter->flags & FULL_INIT_DONE)
1593                 return -EBUSY;
1594
1595         q = &adapter->params.sge.qset[pi->first_qset];
1596         for (i = 0; i < pi->nqsets; ++i, ++q) {
1597                 q->rspq_size = e->rx_mini_pending;
1598                 q->fl_size = e->rx_pending;
1599                 q->jumbo_size = e->rx_jumbo_pending;
1600                 q->txq_size[0] = e->tx_pending;
1601                 q->txq_size[1] = e->tx_pending;
1602                 q->txq_size[2] = e->tx_pending;
1603         }
1604         return 0;
1605 }
1606
1607 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1608 {
1609         struct port_info *pi = netdev_priv(dev);
1610         struct adapter *adapter = pi->adapter;
1611         struct qset_params *qsp = &adapter->params.sge.qset[0];
1612         struct sge_qset *qs = &adapter->sge.qs[0];
1613
1614         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1615                 return -EINVAL;
1616
1617         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1618         t3_update_qset_coalesce(qs, qsp);
1619         return 0;
1620 }
1621
1622 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1623 {
1624         struct port_info *pi = netdev_priv(dev);
1625         struct adapter *adapter = pi->adapter;
1626         struct qset_params *q = adapter->params.sge.qset;
1627
1628         c->rx_coalesce_usecs = q->coalesce_usecs;
1629         return 0;
1630 }
1631
1632 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1633                       u8 * data)
1634 {
1635         struct port_info *pi = netdev_priv(dev);
1636         struct adapter *adapter = pi->adapter;
1637         int i, err = 0;
1638
1639         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1640         if (!buf)
1641                 return -ENOMEM;
1642
1643         e->magic = EEPROM_MAGIC;
1644         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1645                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1646
1647         if (!err)
1648                 memcpy(data, buf + e->offset, e->len);
1649         kfree(buf);
1650         return err;
1651 }
1652
1653 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1654                       u8 * data)
1655 {
1656         struct port_info *pi = netdev_priv(dev);
1657         struct adapter *adapter = pi->adapter;
1658         u32 aligned_offset, aligned_len;
1659         __le32 *p;
1660         u8 *buf;
1661         int err;
1662
1663         if (eeprom->magic != EEPROM_MAGIC)
1664                 return -EINVAL;
1665
1666         aligned_offset = eeprom->offset & ~3;
1667         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1668
1669         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1670                 buf = kmalloc(aligned_len, GFP_KERNEL);
1671                 if (!buf)
1672                         return -ENOMEM;
1673                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1674                 if (!err && aligned_len > 4)
1675                         err = t3_seeprom_read(adapter,
1676                                               aligned_offset + aligned_len - 4,
1677                                               (__le32 *) & buf[aligned_len - 4]);
1678                 if (err)
1679                         goto out;
1680                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1681         } else
1682                 buf = data;
1683
1684         err = t3_seeprom_wp(adapter, 0);
1685         if (err)
1686                 goto out;
1687
1688         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1689                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1690                 aligned_offset += 4;
1691         }
1692
1693         if (!err)
1694                 err = t3_seeprom_wp(adapter, 1);
1695 out:
1696         if (buf != data)
1697                 kfree(buf);
1698         return err;
1699 }
1700
1701 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1702 {
1703         wol->supported = 0;
1704         wol->wolopts = 0;
1705         memset(&wol->sopass, 0, sizeof(wol->sopass));
1706 }
1707
1708 static const struct ethtool_ops cxgb_ethtool_ops = {
1709         .get_settings = get_settings,
1710         .set_settings = set_settings,
1711         .get_drvinfo = get_drvinfo,
1712         .get_msglevel = get_msglevel,
1713         .set_msglevel = set_msglevel,
1714         .get_ringparam = get_sge_param,
1715         .set_ringparam = set_sge_param,
1716         .get_coalesce = get_coalesce,
1717         .set_coalesce = set_coalesce,
1718         .get_eeprom_len = get_eeprom_len,
1719         .get_eeprom = get_eeprom,
1720         .set_eeprom = set_eeprom,
1721         .get_pauseparam = get_pauseparam,
1722         .set_pauseparam = set_pauseparam,
1723         .get_rx_csum = get_rx_csum,
1724         .set_rx_csum = set_rx_csum,
1725         .set_tx_csum = ethtool_op_set_tx_csum,
1726         .set_sg = ethtool_op_set_sg,
1727         .get_link = ethtool_op_get_link,
1728         .get_strings = get_strings,
1729         .phys_id = cxgb3_phys_id,
1730         .nway_reset = restart_autoneg,
1731         .get_sset_count = get_sset_count,
1732         .get_ethtool_stats = get_stats,
1733         .get_regs_len = get_regs_len,
1734         .get_regs = get_regs,
1735         .get_wol = get_wol,
1736         .set_tso = ethtool_op_set_tso,
1737 };
1738
1739 static int in_range(int val, int lo, int hi)
1740 {
1741         return val < 0 || (val <= hi && val >= lo);
1742 }
1743
1744 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1745 {
1746         struct port_info *pi = netdev_priv(dev);
1747         struct adapter *adapter = pi->adapter;
1748         u32 cmd;
1749         int ret;
1750
1751         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1752                 return -EFAULT;
1753
1754         switch (cmd) {
1755         case CHELSIO_SET_QSET_PARAMS:{
1756                 int i;
1757                 struct qset_params *q;
1758                 struct ch_qset_params t;
1759
1760                 if (!capable(CAP_NET_ADMIN))
1761                         return -EPERM;
1762                 if (copy_from_user(&t, useraddr, sizeof(t)))
1763                         return -EFAULT;
1764                 if (t.qset_idx >= SGE_QSETS)
1765                         return -EINVAL;
1766                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1767                         !in_range(t.cong_thres, 0, 255) ||
1768                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1769                                 MAX_TXQ_ENTRIES) ||
1770                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1771                                 MAX_TXQ_ENTRIES) ||
1772                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1773                                 MAX_CTRL_TXQ_ENTRIES) ||
1774                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1775                                 MAX_RX_BUFFERS)
1776                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1777                                         MAX_RX_JUMBO_BUFFERS)
1778                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1779                                         MAX_RSPQ_ENTRIES))
1780                         return -EINVAL;
1781                 if ((adapter->flags & FULL_INIT_DONE) &&
1782                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1783                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1784                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1785                         t.polling >= 0 || t.cong_thres >= 0))
1786                         return -EBUSY;
1787
1788                 q = &adapter->params.sge.qset[t.qset_idx];
1789
1790                 if (t.rspq_size >= 0)
1791                         q->rspq_size = t.rspq_size;
1792                 if (t.fl_size[0] >= 0)
1793                         q->fl_size = t.fl_size[0];
1794                 if (t.fl_size[1] >= 0)
1795                         q->jumbo_size = t.fl_size[1];
1796                 if (t.txq_size[0] >= 0)
1797                         q->txq_size[0] = t.txq_size[0];
1798                 if (t.txq_size[1] >= 0)
1799                         q->txq_size[1] = t.txq_size[1];
1800                 if (t.txq_size[2] >= 0)
1801                         q->txq_size[2] = t.txq_size[2];
1802                 if (t.cong_thres >= 0)
1803                         q->cong_thres = t.cong_thres;
1804                 if (t.intr_lat >= 0) {
1805                         struct sge_qset *qs =
1806                                 &adapter->sge.qs[t.qset_idx];
1807
1808                         q->coalesce_usecs = t.intr_lat;
1809                         t3_update_qset_coalesce(qs, q);
1810                 }
1811                 if (t.polling >= 0) {
1812                         if (adapter->flags & USING_MSIX)
1813                                 q->polling = t.polling;
1814                         else {
1815                                 /* No polling with INTx for T3A */
1816                                 if (adapter->params.rev == 0 &&
1817                                         !(adapter->flags & USING_MSI))
1818                                         t.polling = 0;
1819
1820                                 for (i = 0; i < SGE_QSETS; i++) {
1821                                         q = &adapter->params.sge.
1822                                                 qset[i];
1823                                         q->polling = t.polling;
1824                                 }
1825                         }
1826                 }
1827                 break;
1828         }
1829         case CHELSIO_GET_QSET_PARAMS:{
1830                 struct qset_params *q;
1831                 struct ch_qset_params t;
1832
1833                 if (copy_from_user(&t, useraddr, sizeof(t)))
1834                         return -EFAULT;
1835                 if (t.qset_idx >= SGE_QSETS)
1836                         return -EINVAL;
1837
1838                 q = &adapter->params.sge.qset[t.qset_idx];
1839                 t.rspq_size = q->rspq_size;
1840                 t.txq_size[0] = q->txq_size[0];
1841                 t.txq_size[1] = q->txq_size[1];
1842                 t.txq_size[2] = q->txq_size[2];
1843                 t.fl_size[0] = q->fl_size;
1844                 t.fl_size[1] = q->jumbo_size;
1845                 t.polling = q->polling;
1846                 t.intr_lat = q->coalesce_usecs;
1847                 t.cong_thres = q->cong_thres;
1848
1849                 if (copy_to_user(useraddr, &t, sizeof(t)))
1850                         return -EFAULT;
1851                 break;
1852         }
1853         case CHELSIO_SET_QSET_NUM:{
1854                 struct ch_reg edata;
1855                 unsigned int i, first_qset = 0, other_qsets = 0;
1856
1857                 if (!capable(CAP_NET_ADMIN))
1858                         return -EPERM;
1859                 if (adapter->flags & FULL_INIT_DONE)
1860                         return -EBUSY;
1861                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1862                         return -EFAULT;
1863                 if (edata.val < 1 ||
1864                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1865                         return -EINVAL;
1866
1867                 for_each_port(adapter, i)
1868                         if (adapter->port[i] && adapter->port[i] != dev)
1869                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
1870
1871                 if (edata.val + other_qsets > SGE_QSETS)
1872                         return -EINVAL;
1873
1874                 pi->nqsets = edata.val;
1875
1876                 for_each_port(adapter, i)
1877                         if (adapter->port[i]) {
1878                                 pi = adap2pinfo(adapter, i);
1879                                 pi->first_qset = first_qset;
1880                                 first_qset += pi->nqsets;
1881                         }
1882                 break;
1883         }
1884         case CHELSIO_GET_QSET_NUM:{
1885                 struct ch_reg edata;
1886
1887                 edata.cmd = CHELSIO_GET_QSET_NUM;
1888                 edata.val = pi->nqsets;
1889                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1890                         return -EFAULT;
1891                 break;
1892         }
1893         case CHELSIO_LOAD_FW:{
1894                 u8 *fw_data;
1895                 struct ch_mem_range t;
1896
1897                 if (!capable(CAP_NET_ADMIN))
1898                         return -EPERM;
1899                 if (copy_from_user(&t, useraddr, sizeof(t)))
1900                         return -EFAULT;
1901
1902                 fw_data = kmalloc(t.len, GFP_KERNEL);
1903                 if (!fw_data)
1904                         return -ENOMEM;
1905
1906                 if (copy_from_user
1907                         (fw_data, useraddr + sizeof(t), t.len)) {
1908                         kfree(fw_data);
1909                         return -EFAULT;
1910                 }
1911
1912                 ret = t3_load_fw(adapter, fw_data, t.len);
1913                 kfree(fw_data);
1914                 if (ret)
1915                         return ret;
1916                 break;
1917         }
1918         case CHELSIO_SETMTUTAB:{
1919                 struct ch_mtus m;
1920                 int i;
1921
1922                 if (!is_offload(adapter))
1923                         return -EOPNOTSUPP;
1924                 if (!capable(CAP_NET_ADMIN))
1925                         return -EPERM;
1926                 if (offload_running(adapter))
1927                         return -EBUSY;
1928                 if (copy_from_user(&m, useraddr, sizeof(m)))
1929                         return -EFAULT;
1930                 if (m.nmtus != NMTUS)
1931                         return -EINVAL;
1932                 if (m.mtus[0] < 81)     /* accommodate SACK */
1933                         return -EINVAL;
1934
1935                 /* MTUs must be in ascending order */
1936                 for (i = 1; i < NMTUS; ++i)
1937                         if (m.mtus[i] < m.mtus[i - 1])
1938                                 return -EINVAL;
1939
1940                 memcpy(adapter->params.mtus, m.mtus,
1941                         sizeof(adapter->params.mtus));
1942                 break;
1943         }
1944         case CHELSIO_GET_PM:{
1945                 struct tp_params *p = &adapter->params.tp;
1946                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1947
1948                 if (!is_offload(adapter))
1949                         return -EOPNOTSUPP;
1950                 m.tx_pg_sz = p->tx_pg_size;
1951                 m.tx_num_pg = p->tx_num_pgs;
1952                 m.rx_pg_sz = p->rx_pg_size;
1953                 m.rx_num_pg = p->rx_num_pgs;
1954                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1955                 if (copy_to_user(useraddr, &m, sizeof(m)))
1956                         return -EFAULT;
1957                 break;
1958         }
1959         case CHELSIO_SET_PM:{
1960                 struct ch_pm m;
1961                 struct tp_params *p = &adapter->params.tp;
1962
1963                 if (!is_offload(adapter))
1964                         return -EOPNOTSUPP;
1965                 if (!capable(CAP_NET_ADMIN))
1966                         return -EPERM;
1967                 if (adapter->flags & FULL_INIT_DONE)
1968                         return -EBUSY;
1969                 if (copy_from_user(&m, useraddr, sizeof(m)))
1970                         return -EFAULT;
1971                 if (!is_power_of_2(m.rx_pg_sz) ||
1972                         !is_power_of_2(m.tx_pg_sz))
1973                         return -EINVAL; /* not power of 2 */
1974                 if (!(m.rx_pg_sz & 0x14000))
1975                         return -EINVAL; /* not 16KB or 64KB */
1976                 if (!(m.tx_pg_sz & 0x1554000))
1977                         return -EINVAL;
1978                 if (m.tx_num_pg == -1)
1979                         m.tx_num_pg = p->tx_num_pgs;
1980                 if (m.rx_num_pg == -1)
1981                         m.rx_num_pg = p->rx_num_pgs;
1982                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1983                         return -EINVAL;
1984                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1985                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1986                         return -EINVAL;
1987                 p->rx_pg_size = m.rx_pg_sz;
1988                 p->tx_pg_size = m.tx_pg_sz;
1989                 p->rx_num_pgs = m.rx_num_pg;
1990                 p->tx_num_pgs = m.tx_num_pg;
1991                 break;
1992         }
1993         case CHELSIO_GET_MEM:{
1994                 struct ch_mem_range t;
1995                 struct mc7 *mem;
1996                 u64 buf[32];
1997
1998                 if (!is_offload(adapter))
1999                         return -EOPNOTSUPP;
2000                 if (!(adapter->flags & FULL_INIT_DONE))
2001                         return -EIO;    /* need the memory controllers */
2002                 if (copy_from_user(&t, useraddr, sizeof(t)))
2003                         return -EFAULT;
2004                 if ((t.addr & 7) || (t.len & 7))
2005                         return -EINVAL;
2006                 if (t.mem_id == MEM_CM)
2007                         mem = &adapter->cm;
2008                 else if (t.mem_id == MEM_PMRX)
2009                         mem = &adapter->pmrx;
2010                 else if (t.mem_id == MEM_PMTX)
2011                         mem = &adapter->pmtx;
2012                 else
2013                         return -EINVAL;
2014
2015                 /*
2016                  * Version scheme:
2017                  * bits 0..9: chip version
2018                  * bits 10..15: chip revision
2019                  */
2020                 t.version = 3 | (adapter->params.rev << 10);
2021                 if (copy_to_user(useraddr, &t, sizeof(t)))
2022                         return -EFAULT;
2023
2024                 /*
2025                  * Read 256 bytes at a time as len can be large and we don't
2026                  * want to use huge intermediate buffers.
2027                  */
2028                 useraddr += sizeof(t);  /* advance to start of buffer */
2029                 while (t.len) {
2030                         unsigned int chunk =
2031                                 min_t(unsigned int, t.len, sizeof(buf));
2032
2033                         ret =
2034                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2035                                                 buf);
2036                         if (ret)
2037                                 return ret;
2038                         if (copy_to_user(useraddr, buf, chunk))
2039                                 return -EFAULT;
2040                         useraddr += chunk;
2041                         t.addr += chunk;
2042                         t.len -= chunk;
2043                 }
2044                 break;
2045         }
2046         case CHELSIO_SET_TRACE_FILTER:{
2047                 struct ch_trace t;
2048                 const struct trace_params *tp;
2049
2050                 if (!capable(CAP_NET_ADMIN))
2051                         return -EPERM;
2052                 if (!offload_running(adapter))
2053                         return -EAGAIN;
2054                 if (copy_from_user(&t, useraddr, sizeof(t)))
2055                         return -EFAULT;
2056
2057                 tp = (const struct trace_params *)&t.sip;
2058                 if (t.config_tx)
2059                         t3_config_trace_filter(adapter, tp, 0,
2060                                                 t.invert_match,
2061                                                 t.trace_tx);
2062                 if (t.config_rx)
2063                         t3_config_trace_filter(adapter, tp, 1,
2064                                                 t.invert_match,
2065                                                 t.trace_rx);
2066                 break;
2067         }
2068         default:
2069                 return -EOPNOTSUPP;
2070         }
2071         return 0;
2072 }
2073
2074 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2075 {
2076         struct mii_ioctl_data *data = if_mii(req);
2077         struct port_info *pi = netdev_priv(dev);
2078         struct adapter *adapter = pi->adapter;
2079         int ret, mmd;
2080
2081         switch (cmd) {
2082         case SIOCGMIIPHY:
2083                 data->phy_id = pi->phy.addr;
2084                 /* FALLTHRU */
2085         case SIOCGMIIREG:{
2086                 u32 val;
2087                 struct cphy *phy = &pi->phy;
2088
2089                 if (!phy->mdio_read)
2090                         return -EOPNOTSUPP;
2091                 if (is_10G(adapter)) {
2092                         mmd = data->phy_id >> 8;
2093                         if (!mmd)
2094                                 mmd = MDIO_DEV_PCS;
2095                         else if (mmd > MDIO_DEV_XGXS)
2096                                 return -EINVAL;
2097
2098                         ret =
2099                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2100                                                 mmd, data->reg_num, &val);
2101                 } else
2102                         ret =
2103                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2104                                                 0, data->reg_num & 0x1f,
2105                                                 &val);
2106                 if (!ret)
2107                         data->val_out = val;
2108                 break;
2109         }
2110         case SIOCSMIIREG:{
2111                 struct cphy *phy = &pi->phy;
2112
2113                 if (!capable(CAP_NET_ADMIN))
2114                         return -EPERM;
2115                 if (!phy->mdio_write)
2116                         return -EOPNOTSUPP;
2117                 if (is_10G(adapter)) {
2118                         mmd = data->phy_id >> 8;
2119                         if (!mmd)
2120                                 mmd = MDIO_DEV_PCS;
2121                         else if (mmd > MDIO_DEV_XGXS)
2122                                 return -EINVAL;
2123
2124                         ret =
2125                                 phy->mdio_write(adapter,
2126                                                 data->phy_id & 0x1f, mmd,
2127                                                 data->reg_num,
2128                                                 data->val_in);
2129                 } else
2130                         ret =
2131                                 phy->mdio_write(adapter,
2132                                                 data->phy_id & 0x1f, 0,
2133                                                 data->reg_num & 0x1f,
2134                                                 data->val_in);
2135                 break;
2136         }
2137         case SIOCCHIOCTL:
2138                 return cxgb_extension_ioctl(dev, req->ifr_data);
2139         default:
2140                 return -EOPNOTSUPP;
2141         }
2142         return ret;
2143 }
2144
2145 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2146 {
2147         struct port_info *pi = netdev_priv(dev);
2148         struct adapter *adapter = pi->adapter;
2149         int ret;
2150
2151         if (new_mtu < 81)       /* accommodate SACK */
2152                 return -EINVAL;
2153         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2154                 return ret;
2155         dev->mtu = new_mtu;
2156         init_port_mtus(adapter);
2157         if (adapter->params.rev == 0 && offload_running(adapter))
2158                 t3_load_mtus(adapter, adapter->params.mtus,
2159                              adapter->params.a_wnd, adapter->params.b_wnd,
2160                              adapter->port[0]->mtu);
2161         return 0;
2162 }
2163
2164 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2165 {
2166         struct port_info *pi = netdev_priv(dev);
2167         struct adapter *adapter = pi->adapter;
2168         struct sockaddr *addr = p;
2169
2170         if (!is_valid_ether_addr(addr->sa_data))
2171                 return -EINVAL;
2172
2173         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2174         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2175         if (offload_running(adapter))
2176                 write_smt_entry(adapter, pi->port_id);
2177         return 0;
2178 }
2179
2180 /**
2181  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2182  * @adap: the adapter
2183  * @p: the port
2184  *
2185  * Ensures that current Rx processing on any of the queues associated with
2186  * the given port completes before returning.  We do this by acquiring and
2187  * releasing the locks of the response queues associated with the port.
2188  */
2189 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2190 {
2191         int i;
2192
2193         for (i = 0; i < p->nqsets; i++) {
2194                 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2195
2196                 spin_lock_irq(&q->lock);
2197                 spin_unlock_irq(&q->lock);
2198         }
2199 }
2200
2201 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2202 {
2203         struct port_info *pi = netdev_priv(dev);
2204         struct adapter *adapter = pi->adapter;
2205
2206         pi->vlan_grp = grp;
2207         if (adapter->params.rev > 0)
2208                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2209         else {
2210                 /* single control for all ports */
2211                 unsigned int i, have_vlans = 0;
2212                 for_each_port(adapter, i)
2213                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2214
2215                 t3_set_vlan_accel(adapter, 1, have_vlans);
2216         }
2217         t3_synchronize_rx(adapter, pi);
2218 }
2219
2220 #ifdef CONFIG_NET_POLL_CONTROLLER
2221 static void cxgb_netpoll(struct net_device *dev)
2222 {
2223         struct port_info *pi = netdev_priv(dev);
2224         struct adapter *adapter = pi->adapter;
2225         int qidx;
2226
2227         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2228                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2229                 void *source;
2230
2231                 if (adapter->flags & USING_MSIX)
2232                         source = qs;
2233                 else
2234                         source = adapter;
2235
2236                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2237         }
2238 }
2239 #endif
2240
2241 /*
2242  * Periodic accumulation of MAC statistics.
2243  */
2244 static void mac_stats_update(struct adapter *adapter)
2245 {
2246         int i;
2247
2248         for_each_port(adapter, i) {
2249                 struct net_device *dev = adapter->port[i];
2250                 struct port_info *p = netdev_priv(dev);
2251
2252                 if (netif_running(dev)) {
2253                         spin_lock(&adapter->stats_lock);
2254                         t3_mac_update_stats(&p->mac);
2255                         spin_unlock(&adapter->stats_lock);
2256                 }
2257         }
2258 }
2259
2260 static void check_link_status(struct adapter *adapter)
2261 {
2262         int i;
2263
2264         for_each_port(adapter, i) {
2265                 struct net_device *dev = adapter->port[i];
2266                 struct port_info *p = netdev_priv(dev);
2267
2268                 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2269                         t3_link_changed(adapter, i);
2270         }
2271 }
2272
2273 static void check_t3b2_mac(struct adapter *adapter)
2274 {
2275         int i;
2276
2277         if (!rtnl_trylock())    /* synchronize with ifdown */
2278                 return;
2279
2280         for_each_port(adapter, i) {
2281                 struct net_device *dev = adapter->port[i];
2282                 struct port_info *p = netdev_priv(dev);
2283                 int status;
2284
2285                 if (!netif_running(dev))
2286                         continue;
2287
2288                 status = 0;
2289                 if (netif_running(dev) && netif_carrier_ok(dev))
2290                         status = t3b2_mac_watchdog_task(&p->mac);
2291                 if (status == 1)
2292                         p->mac.stats.num_toggled++;
2293                 else if (status == 2) {
2294                         struct cmac *mac = &p->mac;
2295
2296                         t3_mac_set_mtu(mac, dev->mtu);
2297                         t3_mac_set_address(mac, 0, dev->dev_addr);
2298                         cxgb_set_rxmode(dev);
2299                         t3_link_start(&p->phy, mac, &p->link_config);
2300                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2301                         t3_port_intr_enable(adapter, p->port_id);
2302                         p->mac.stats.num_resets++;
2303                 }
2304         }
2305         rtnl_unlock();
2306 }
2307
2308
2309 static void t3_adap_check_task(struct work_struct *work)
2310 {
2311         struct adapter *adapter = container_of(work, struct adapter,
2312                                                adap_check_task.work);
2313         const struct adapter_params *p = &adapter->params;
2314
2315         adapter->check_task_cnt++;
2316
2317         /* Check link status for PHYs without interrupts */
2318         if (p->linkpoll_period)
2319                 check_link_status(adapter);
2320
2321         /* Accumulate MAC stats if needed */
2322         if (!p->linkpoll_period ||
2323             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2324             p->stats_update_period) {
2325                 mac_stats_update(adapter);
2326                 adapter->check_task_cnt = 0;
2327         }
2328
2329         if (p->rev == T3_REV_B2)
2330                 check_t3b2_mac(adapter);
2331
2332         /* Schedule the next check update if any port is active. */
2333         spin_lock(&adapter->work_lock);
2334         if (adapter->open_device_map & PORT_MASK)
2335                 schedule_chk_task(adapter);
2336         spin_unlock(&adapter->work_lock);
2337 }
2338
2339 /*
2340  * Processes external (PHY) interrupts in process context.
2341  */
2342 static void ext_intr_task(struct work_struct *work)
2343 {
2344         struct adapter *adapter = container_of(work, struct adapter,
2345                                                ext_intr_handler_task);
2346
2347         t3_phy_intr_handler(adapter);
2348
2349         /* Now reenable external interrupts */
2350         spin_lock_irq(&adapter->work_lock);
2351         if (adapter->slow_intr_mask) {
2352                 adapter->slow_intr_mask |= F_T3DBG;
2353                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2354                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2355                              adapter->slow_intr_mask);
2356         }
2357         spin_unlock_irq(&adapter->work_lock);
2358 }
2359
2360 /*
2361  * Interrupt-context handler for external (PHY) interrupts.
2362  */
2363 void t3_os_ext_intr_handler(struct adapter *adapter)
2364 {
2365         /*
2366          * Schedule a task to handle external interrupts as they may be slow
2367          * and we use a mutex to protect MDIO registers.  We disable PHY
2368          * interrupts in the meantime and let the task reenable them when
2369          * it's done.
2370          */
2371         spin_lock(&adapter->work_lock);
2372         if (adapter->slow_intr_mask) {
2373                 adapter->slow_intr_mask &= ~F_T3DBG;
2374                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2375                              adapter->slow_intr_mask);
2376                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2377         }
2378         spin_unlock(&adapter->work_lock);
2379 }
2380
2381 void t3_fatal_err(struct adapter *adapter)
2382 {
2383         unsigned int fw_status[4];
2384
2385         if (adapter->flags & FULL_INIT_DONE) {
2386                 t3_sge_stop(adapter);
2387                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2388                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2389                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2390                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2391                 t3_intr_disable(adapter);
2392         }
2393         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2394         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2395                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2396                          fw_status[0], fw_status[1],
2397                          fw_status[2], fw_status[3]);
2398
2399 }
2400
2401 /**
2402  * t3_io_error_detected - called when PCI error is detected
2403  * @pdev: Pointer to PCI device
2404  * @state: The current pci connection state
2405  *
2406  * This function is called after a PCI bus error affecting
2407  * this device has been detected.
2408  */
2409 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2410                                              pci_channel_state_t state)
2411 {
2412         struct adapter *adapter = pci_get_drvdata(pdev);
2413         int i;
2414
2415         /* Stop all ports */
2416         for_each_port(adapter, i) {
2417                 struct net_device *netdev = adapter->port[i];
2418
2419                 if (netif_running(netdev))
2420                         cxgb_close(netdev);
2421         }
2422
2423         if (is_offload(adapter) &&
2424             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2425                 offload_close(&adapter->tdev);
2426
2427         /* Free sge resources */
2428         t3_free_sge_resources(adapter);
2429
2430         adapter->flags &= ~FULL_INIT_DONE;
2431
2432         pci_disable_device(pdev);
2433
2434         /* Request a slot slot reset. */
2435         return PCI_ERS_RESULT_NEED_RESET;
2436 }
2437
2438 /**
2439  * t3_io_slot_reset - called after the pci bus has been reset.
2440  * @pdev: Pointer to PCI device
2441  *
2442  * Restart the card from scratch, as if from a cold-boot.
2443  */
2444 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2445 {
2446         struct adapter *adapter = pci_get_drvdata(pdev);
2447
2448         if (pci_enable_device(pdev)) {
2449                 dev_err(&pdev->dev,
2450                         "Cannot re-enable PCI device after reset.\n");
2451                 return PCI_ERS_RESULT_DISCONNECT;
2452         }
2453         pci_set_master(pdev);
2454
2455         t3_prep_adapter(adapter, adapter->params.info, 1);
2456
2457         return PCI_ERS_RESULT_RECOVERED;
2458 }
2459
2460 /**
2461  * t3_io_resume - called when traffic can start flowing again.
2462  * @pdev: Pointer to PCI device
2463  *
2464  * This callback is called when the error recovery driver tells us that
2465  * its OK to resume normal operation.
2466  */
2467 static void t3_io_resume(struct pci_dev *pdev)
2468 {
2469         struct adapter *adapter = pci_get_drvdata(pdev);
2470         int i;
2471
2472         /* Restart the ports */
2473         for_each_port(adapter, i) {
2474                 struct net_device *netdev = adapter->port[i];
2475
2476                 if (netif_running(netdev)) {
2477                         if (cxgb_open(netdev)) {
2478                                 dev_err(&pdev->dev,
2479                                         "can't bring device back up"
2480                                         " after reset\n");
2481                                 continue;
2482                         }
2483                         netif_device_attach(netdev);
2484                 }
2485         }
2486
2487         if (is_offload(adapter)) {
2488                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2489                 if (offload_open(adapter->port[0]))
2490                         printk(KERN_WARNING
2491                                "Could not bring back offload capabilities\n");
2492         }
2493 }
2494
2495 static struct pci_error_handlers t3_err_handler = {
2496         .error_detected = t3_io_error_detected,
2497         .slot_reset = t3_io_slot_reset,
2498         .resume = t3_io_resume,
2499 };
2500
2501 static int __devinit cxgb_enable_msix(struct adapter *adap)
2502 {
2503         struct msix_entry entries[SGE_QSETS + 1];
2504         int i, err;
2505
2506         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2507                 entries[i].entry = i;
2508
2509         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2510         if (!err) {
2511                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2512                         adap->msix_info[i].vec = entries[i].vector;
2513         } else if (err > 0)
2514                 dev_info(&adap->pdev->dev,
2515                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2516         return err;
2517 }
2518
2519 static void __devinit print_port_info(struct adapter *adap,
2520                                       const struct adapter_info *ai)
2521 {
2522         static const char *pci_variant[] = {
2523                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2524         };
2525
2526         int i;
2527         char buf[80];
2528
2529         if (is_pcie(adap))
2530                 snprintf(buf, sizeof(buf), "%s x%d",
2531                          pci_variant[adap->params.pci.variant],
2532                          adap->params.pci.width);
2533         else
2534                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2535                          pci_variant[adap->params.pci.variant],
2536                          adap->params.pci.speed, adap->params.pci.width);
2537
2538         for_each_port(adap, i) {
2539                 struct net_device *dev = adap->port[i];
2540                 const struct port_info *pi = netdev_priv(dev);
2541
2542                 if (!test_bit(i, &adap->registered_device_map))
2543                         continue;
2544                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2545                        dev->name, ai->desc, pi->port_type->desc,
2546                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2547                        (adap->flags & USING_MSIX) ? " MSI-X" :
2548                        (adap->flags & USING_MSI) ? " MSI" : "");
2549                 if (adap->name == dev->name && adap->params.vpd.mclk)
2550                         printk(KERN_INFO
2551                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2552                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2553                                t3_mc7_size(&adap->pmtx) >> 20,
2554                                t3_mc7_size(&adap->pmrx) >> 20,
2555                                adap->params.vpd.sn);
2556         }
2557 }
2558
2559 static int __devinit init_one(struct pci_dev *pdev,
2560                               const struct pci_device_id *ent)
2561 {
2562         static int version_printed;
2563
2564         int i, err, pci_using_dac = 0;
2565         unsigned long mmio_start, mmio_len;
2566         const struct adapter_info *ai;
2567         struct adapter *adapter = NULL;
2568         struct port_info *pi;
2569
2570         if (!version_printed) {
2571                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2572                 ++version_printed;
2573         }
2574
2575         if (!cxgb3_wq) {
2576                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2577                 if (!cxgb3_wq) {
2578                         printk(KERN_ERR DRV_NAME
2579                                ": cannot initialize work queue\n");
2580                         return -ENOMEM;
2581                 }
2582         }
2583
2584         err = pci_request_regions(pdev, DRV_NAME);
2585         if (err) {
2586                 /* Just info, some other driver may have claimed the device. */
2587                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2588                 return err;
2589         }
2590
2591         err = pci_enable_device(pdev);
2592         if (err) {
2593                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2594                 goto out_release_regions;
2595         }
2596
2597         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2598                 pci_using_dac = 1;
2599                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2600                 if (err) {
2601                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2602                                "coherent allocations\n");
2603                         goto out_disable_device;
2604                 }
2605         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2606                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2607                 goto out_disable_device;
2608         }
2609
2610         pci_set_master(pdev);
2611
2612         mmio_start = pci_resource_start(pdev, 0);
2613         mmio_len = pci_resource_len(pdev, 0);
2614         ai = t3_get_adapter_info(ent->driver_data);
2615
2616         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2617         if (!adapter) {
2618                 err = -ENOMEM;
2619                 goto out_disable_device;
2620         }
2621
2622         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2623         if (!adapter->regs) {
2624                 dev_err(&pdev->dev, "cannot map device registers\n");
2625                 err = -ENOMEM;
2626                 goto out_free_adapter;
2627         }
2628
2629         adapter->pdev = pdev;
2630         adapter->name = pci_name(pdev);
2631         adapter->msg_enable = dflt_msg_enable;
2632         adapter->mmio_len = mmio_len;
2633
2634         mutex_init(&adapter->mdio_lock);
2635         spin_lock_init(&adapter->work_lock);
2636         spin_lock_init(&adapter->stats_lock);
2637
2638         INIT_LIST_HEAD(&adapter->adapter_list);
2639         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2640         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2641
2642         for (i = 0; i < ai->nports; ++i) {
2643                 struct net_device *netdev;
2644
2645                 netdev = alloc_etherdev(sizeof(struct port_info));
2646                 if (!netdev) {
2647                         err = -ENOMEM;
2648                         goto out_free_dev;
2649                 }
2650
2651                 SET_NETDEV_DEV(netdev, &pdev->dev);
2652
2653                 adapter->port[i] = netdev;
2654                 pi = netdev_priv(netdev);
2655                 pi->adapter = adapter;
2656                 pi->rx_csum_offload = 1;
2657                 pi->nqsets = 1;
2658                 pi->first_qset = i;
2659                 pi->activity = 0;
2660                 pi->port_id = i;
2661                 netif_carrier_off(netdev);
2662                 netdev->irq = pdev->irq;
2663                 netdev->mem_start = mmio_start;
2664                 netdev->mem_end = mmio_start + mmio_len - 1;
2665                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2666                 netdev->features |= NETIF_F_LLTX;
2667                 if (pci_using_dac)
2668                         netdev->features |= NETIF_F_HIGHDMA;
2669
2670                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2671                 netdev->vlan_rx_register = vlan_rx_register;
2672
2673                 netdev->open = cxgb_open;
2674                 netdev->stop = cxgb_close;
2675                 netdev->hard_start_xmit = t3_eth_xmit;
2676                 netdev->get_stats = cxgb_get_stats;
2677                 netdev->set_multicast_list = cxgb_set_rxmode;
2678                 netdev->do_ioctl = cxgb_ioctl;
2679                 netdev->change_mtu = cxgb_change_mtu;
2680                 netdev->set_mac_address = cxgb_set_mac_addr;
2681 #ifdef CONFIG_NET_POLL_CONTROLLER
2682                 netdev->poll_controller = cxgb_netpoll;
2683 #endif
2684
2685                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2686         }
2687
2688         pci_set_drvdata(pdev, adapter);
2689         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2690                 err = -ENODEV;
2691                 goto out_free_dev;
2692         }
2693
2694         /*
2695          * The card is now ready to go.  If any errors occur during device
2696          * registration we do not fail the whole card but rather proceed only
2697          * with the ports we manage to register successfully.  However we must
2698          * register at least one net device.
2699          */
2700         for_each_port(adapter, i) {
2701                 err = register_netdev(adapter->port[i]);
2702                 if (err)
2703                         dev_warn(&pdev->dev,
2704                                  "cannot register net device %s, skipping\n",
2705                                  adapter->port[i]->name);
2706                 else {
2707                         /*
2708                          * Change the name we use for messages to the name of
2709                          * the first successfully registered interface.
2710                          */
2711                         if (!adapter->registered_device_map)
2712                                 adapter->name = adapter->port[i]->name;
2713
2714                         __set_bit(i, &adapter->registered_device_map);
2715                 }
2716         }
2717         if (!adapter->registered_device_map) {
2718                 dev_err(&pdev->dev, "could not register any net devices\n");
2719                 goto out_free_dev;
2720         }
2721
2722         /* Driver's ready. Reflect it on LEDs */
2723         t3_led_ready(adapter);
2724
2725         if (is_offload(adapter)) {
2726                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2727                 cxgb3_adapter_ofld(adapter);
2728         }
2729
2730         /* See what interrupts we'll be using */
2731         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2732                 adapter->flags |= USING_MSIX;
2733         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2734                 adapter->flags |= USING_MSI;
2735
2736         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2737                                  &cxgb3_attr_group);
2738
2739         print_port_info(adapter, ai);
2740         return 0;
2741
2742 out_free_dev:
2743         iounmap(adapter->regs);
2744         for (i = ai->nports - 1; i >= 0; --i)
2745                 if (adapter->port[i])
2746                         free_netdev(adapter->port[i]);
2747
2748 out_free_adapter:
2749         kfree(adapter);
2750
2751 out_disable_device:
2752         pci_disable_device(pdev);
2753 out_release_regions:
2754         pci_release_regions(pdev);
2755         pci_set_drvdata(pdev, NULL);
2756         return err;
2757 }
2758
2759 static void __devexit remove_one(struct pci_dev *pdev)
2760 {
2761         struct adapter *adapter = pci_get_drvdata(pdev);
2762
2763         if (adapter) {
2764                 int i;
2765
2766                 t3_sge_stop(adapter);
2767                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2768                                    &cxgb3_attr_group);
2769
2770                 if (is_offload(adapter)) {
2771                         cxgb3_adapter_unofld(adapter);
2772                         if (test_bit(OFFLOAD_DEVMAP_BIT,
2773                                      &adapter->open_device_map))
2774                                 offload_close(&adapter->tdev);
2775                 }
2776
2777                 for_each_port(adapter, i)
2778                     if (test_bit(i, &adapter->registered_device_map))
2779                         unregister_netdev(adapter->port[i]);
2780
2781                 t3_free_sge_resources(adapter);
2782                 cxgb_disable_msi(adapter);
2783
2784                 for_each_port(adapter, i)
2785                         if (adapter->port[i])
2786                                 free_netdev(adapter->port[i]);
2787
2788                 iounmap(adapter->regs);
2789                 kfree(adapter);
2790                 pci_release_regions(pdev);
2791                 pci_disable_device(pdev);
2792                 pci_set_drvdata(pdev, NULL);
2793         }
2794 }
2795
2796 static struct pci_driver driver = {
2797         .name = DRV_NAME,
2798         .id_table = cxgb3_pci_tbl,
2799         .probe = init_one,
2800         .remove = __devexit_p(remove_one),
2801         .err_handler = &t3_err_handler,
2802 };
2803
2804 static int __init cxgb3_init_module(void)
2805 {
2806         int ret;
2807
2808         cxgb3_offload_init();
2809
2810         ret = pci_register_driver(&driver);
2811         return ret;
2812 }
2813
2814 static void __exit cxgb3_cleanup_module(void)
2815 {
2816         pci_unregister_driver(&driver);
2817         if (cxgb3_wq)
2818                 destroy_workqueue(cxgb3_wq);
2819 }
2820
2821 module_init(cxgb3_init_module);
2822 module_exit(cxgb3_cleanup_module);