iwlagn: delay ict interrupt.
[linux-2.6] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
94         CH_DEVICE(0x36, 3),     /* S320E-CR */
95         CH_DEVICE(0x37, 7),     /* N320E-G2 */
96         {0,}
97 };
98
99 MODULE_DESCRIPTION(DRV_DESC);
100 MODULE_AUTHOR("Chelsio Communications");
101 MODULE_LICENSE("Dual BSD/GPL");
102 MODULE_VERSION(DRV_VERSION);
103 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
104
105 static int dflt_msg_enable = DFLT_MSG_ENABLE;
106
107 module_param(dflt_msg_enable, int, 0644);
108 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
109
110 /*
111  * The driver uses the best interrupt scheme available on a platform in the
112  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
113  * of these schemes the driver may consider as follows:
114  *
115  * msi = 2: choose from among all three options
116  * msi = 1: only consider MSI and pin interrupts
117  * msi = 0: force pin interrupts
118  */
119 static int msi = 2;
120
121 module_param(msi, int, 0644);
122 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
123
124 /*
125  * The driver enables offload as a default.
126  * To disable it, use ofld_disable = 1.
127  */
128
129 static int ofld_disable = 0;
130
131 module_param(ofld_disable, int, 0644);
132 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
133
134 /*
135  * We have work elements that we need to cancel when an interface is taken
136  * down.  Normally the work elements would be executed by keventd but that
137  * can deadlock because of linkwatch.  If our close method takes the rtnl
138  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
139  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
140  * for our work to complete.  Get our own work queue to solve this.
141  */
142 static struct workqueue_struct *cxgb3_wq;
143
144 /**
145  *      link_report - show link status and link speed/duplex
146  *      @p: the port whose settings are to be reported
147  *
148  *      Shows the link status, speed, and duplex of a port.
149  */
150 static void link_report(struct net_device *dev)
151 {
152         if (!netif_carrier_ok(dev))
153                 printk(KERN_INFO "%s: link down\n", dev->name);
154         else {
155                 const char *s = "10Mbps";
156                 const struct port_info *p = netdev_priv(dev);
157
158                 switch (p->link_config.speed) {
159                 case SPEED_10000:
160                         s = "10Gbps";
161                         break;
162                 case SPEED_1000:
163                         s = "1000Mbps";
164                         break;
165                 case SPEED_100:
166                         s = "100Mbps";
167                         break;
168                 }
169
170                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
171                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
172         }
173 }
174
175 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
176 {
177         struct net_device *dev = adap->port[port_id];
178         struct port_info *pi = netdev_priv(dev);
179
180         if (state == netif_carrier_ok(dev))
181                 return;
182
183         if (state) {
184                 struct cmac *mac = &pi->mac;
185
186                 netif_carrier_on(dev);
187
188                 /* Clear local faults */
189                 t3_xgm_intr_disable(adap, pi->port_id);
190                 t3_read_reg(adap, A_XGM_INT_STATUS +
191                                     pi->mac.offset);
192                 t3_write_reg(adap,
193                              A_XGM_INT_CAUSE + pi->mac.offset,
194                              F_XGM_INT);
195
196                 t3_set_reg_field(adap,
197                                  A_XGM_INT_ENABLE +
198                                  pi->mac.offset,
199                                  F_XGM_INT, F_XGM_INT);
200                 t3_xgm_intr_enable(adap, pi->port_id);
201
202                 t3_mac_enable(mac, MAC_DIRECTION_TX);
203         } else
204                 netif_carrier_off(dev);
205
206         link_report(dev);
207 }
208
209 /**
210  *      t3_os_link_changed - handle link status changes
211  *      @adapter: the adapter associated with the link change
212  *      @port_id: the port index whose limk status has changed
213  *      @link_stat: the new status of the link
214  *      @speed: the new speed setting
215  *      @duplex: the new duplex setting
216  *      @pause: the new flow-control setting
217  *
218  *      This is the OS-dependent handler for link status changes.  The OS
219  *      neutral handler takes care of most of the processing for these events,
220  *      then calls this handler for any OS-specific processing.
221  */
222 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
223                         int speed, int duplex, int pause)
224 {
225         struct net_device *dev = adapter->port[port_id];
226         struct port_info *pi = netdev_priv(dev);
227         struct cmac *mac = &pi->mac;
228
229         /* Skip changes from disabled ports. */
230         if (!netif_running(dev))
231                 return;
232
233         if (link_stat != netif_carrier_ok(dev)) {
234                 if (link_stat) {
235                         t3_mac_enable(mac, MAC_DIRECTION_RX);
236
237                         /* Clear local faults */
238                         t3_xgm_intr_disable(adapter, pi->port_id);
239                         t3_read_reg(adapter, A_XGM_INT_STATUS +
240                                     pi->mac.offset);
241                         t3_write_reg(adapter,
242                                      A_XGM_INT_CAUSE + pi->mac.offset,
243                                      F_XGM_INT);
244
245                         t3_set_reg_field(adapter,
246                                          A_XGM_INT_ENABLE + pi->mac.offset,
247                                          F_XGM_INT, F_XGM_INT);
248                         t3_xgm_intr_enable(adapter, pi->port_id);
249
250                         netif_carrier_on(dev);
251                 } else {
252                         netif_carrier_off(dev);
253
254                         t3_xgm_intr_disable(adapter, pi->port_id);
255                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
256                         t3_set_reg_field(adapter,
257                                          A_XGM_INT_ENABLE + pi->mac.offset,
258                                          F_XGM_INT, 0);
259
260                         if (is_10G(adapter))
261                                 pi->phy.ops->power_down(&pi->phy, 1);
262
263                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
264                         t3_mac_disable(mac, MAC_DIRECTION_RX);
265                         t3_link_start(&pi->phy, mac, &pi->link_config);
266                 }
267
268                 link_report(dev);
269         }
270 }
271
272 /**
273  *      t3_os_phymod_changed - handle PHY module changes
274  *      @phy: the PHY reporting the module change
275  *      @mod_type: new module type
276  *
277  *      This is the OS-dependent handler for PHY module changes.  It is
278  *      invoked when a PHY module is removed or inserted for any OS-specific
279  *      processing.
280  */
281 void t3_os_phymod_changed(struct adapter *adap, int port_id)
282 {
283         static const char *mod_str[] = {
284                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
285         };
286
287         const struct net_device *dev = adap->port[port_id];
288         const struct port_info *pi = netdev_priv(dev);
289
290         if (pi->phy.modtype == phy_modtype_none)
291                 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
292         else
293                 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
294                        mod_str[pi->phy.modtype]);
295 }
296
297 static void cxgb_set_rxmode(struct net_device *dev)
298 {
299         struct t3_rx_mode rm;
300         struct port_info *pi = netdev_priv(dev);
301
302         init_rx_mode(&rm, dev, dev->mc_list);
303         t3_mac_set_rx_mode(&pi->mac, &rm);
304 }
305
306 /**
307  *      link_start - enable a port
308  *      @dev: the device to enable
309  *
310  *      Performs the MAC and PHY actions needed to enable a port.
311  */
312 static void link_start(struct net_device *dev)
313 {
314         struct t3_rx_mode rm;
315         struct port_info *pi = netdev_priv(dev);
316         struct cmac *mac = &pi->mac;
317
318         init_rx_mode(&rm, dev, dev->mc_list);
319         t3_mac_reset(mac);
320         t3_mac_set_mtu(mac, dev->mtu);
321         t3_mac_set_address(mac, 0, dev->dev_addr);
322         t3_mac_set_rx_mode(mac, &rm);
323         t3_link_start(&pi->phy, mac, &pi->link_config);
324         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
325 }
326
327 static inline void cxgb_disable_msi(struct adapter *adapter)
328 {
329         if (adapter->flags & USING_MSIX) {
330                 pci_disable_msix(adapter->pdev);
331                 adapter->flags &= ~USING_MSIX;
332         } else if (adapter->flags & USING_MSI) {
333                 pci_disable_msi(adapter->pdev);
334                 adapter->flags &= ~USING_MSI;
335         }
336 }
337
338 /*
339  * Interrupt handler for asynchronous events used with MSI-X.
340  */
341 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
342 {
343         t3_slow_intr_handler(cookie);
344         return IRQ_HANDLED;
345 }
346
347 /*
348  * Name the MSI-X interrupts.
349  */
350 static void name_msix_vecs(struct adapter *adap)
351 {
352         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
353
354         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
355         adap->msix_info[0].desc[n] = 0;
356
357         for_each_port(adap, j) {
358                 struct net_device *d = adap->port[j];
359                 const struct port_info *pi = netdev_priv(d);
360
361                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
362                         snprintf(adap->msix_info[msi_idx].desc, n,
363                                  "%s-%d", d->name, pi->first_qset + i);
364                         adap->msix_info[msi_idx].desc[n] = 0;
365                 }
366         }
367 }
368
369 static int request_msix_data_irqs(struct adapter *adap)
370 {
371         int i, j, err, qidx = 0;
372
373         for_each_port(adap, i) {
374                 int nqsets = adap2pinfo(adap, i)->nqsets;
375
376                 for (j = 0; j < nqsets; ++j) {
377                         err = request_irq(adap->msix_info[qidx + 1].vec,
378                                           t3_intr_handler(adap,
379                                                           adap->sge.qs[qidx].
380                                                           rspq.polling), 0,
381                                           adap->msix_info[qidx + 1].desc,
382                                           &adap->sge.qs[qidx]);
383                         if (err) {
384                                 while (--qidx >= 0)
385                                         free_irq(adap->msix_info[qidx + 1].vec,
386                                                  &adap->sge.qs[qidx]);
387                                 return err;
388                         }
389                         qidx++;
390                 }
391         }
392         return 0;
393 }
394
395 static void free_irq_resources(struct adapter *adapter)
396 {
397         if (adapter->flags & USING_MSIX) {
398                 int i, n = 0;
399
400                 free_irq(adapter->msix_info[0].vec, adapter);
401                 for_each_port(adapter, i)
402                         n += adap2pinfo(adapter, i)->nqsets;
403
404                 for (i = 0; i < n; ++i)
405                         free_irq(adapter->msix_info[i + 1].vec,
406                                  &adapter->sge.qs[i]);
407         } else
408                 free_irq(adapter->pdev->irq, adapter);
409 }
410
411 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
412                               unsigned long n)
413 {
414         int attempts = 5;
415
416         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
417                 if (!--attempts)
418                         return -ETIMEDOUT;
419                 msleep(10);
420         }
421         return 0;
422 }
423
424 static int init_tp_parity(struct adapter *adap)
425 {
426         int i;
427         struct sk_buff *skb;
428         struct cpl_set_tcb_field *greq;
429         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
430
431         t3_tp_set_offload_mode(adap, 1);
432
433         for (i = 0; i < 16; i++) {
434                 struct cpl_smt_write_req *req;
435
436                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
437                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
438                 memset(req, 0, sizeof(*req));
439                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
440                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
441                 req->iff = i;
442                 t3_mgmt_tx(adap, skb);
443         }
444
445         for (i = 0; i < 2048; i++) {
446                 struct cpl_l2t_write_req *req;
447
448                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
449                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
450                 memset(req, 0, sizeof(*req));
451                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
452                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
453                 req->params = htonl(V_L2T_W_IDX(i));
454                 t3_mgmt_tx(adap, skb);
455         }
456
457         for (i = 0; i < 2048; i++) {
458                 struct cpl_rte_write_req *req;
459
460                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
461                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
462                 memset(req, 0, sizeof(*req));
463                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
464                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
465                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
466                 t3_mgmt_tx(adap, skb);
467         }
468
469         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
470         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
471         memset(greq, 0, sizeof(*greq));
472         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
473         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
474         greq->mask = cpu_to_be64(1);
475         t3_mgmt_tx(adap, skb);
476
477         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
478         t3_tp_set_offload_mode(adap, 0);
479         return i;
480 }
481
482 /**
483  *      setup_rss - configure RSS
484  *      @adap: the adapter
485  *
486  *      Sets up RSS to distribute packets to multiple receive queues.  We
487  *      configure the RSS CPU lookup table to distribute to the number of HW
488  *      receive queues, and the response queue lookup table to narrow that
489  *      down to the response queues actually configured for each port.
490  *      We always configure the RSS mapping for two ports since the mapping
491  *      table has plenty of entries.
492  */
493 static void setup_rss(struct adapter *adap)
494 {
495         int i;
496         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
497         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
498         u8 cpus[SGE_QSETS + 1];
499         u16 rspq_map[RSS_TABLE_SIZE];
500
501         for (i = 0; i < SGE_QSETS; ++i)
502                 cpus[i] = i;
503         cpus[SGE_QSETS] = 0xff; /* terminator */
504
505         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
506                 rspq_map[i] = i % nq0;
507                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
508         }
509
510         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
511                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
512                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
513 }
514
515 static void init_napi(struct adapter *adap)
516 {
517         int i;
518
519         for (i = 0; i < SGE_QSETS; i++) {
520                 struct sge_qset *qs = &adap->sge.qs[i];
521
522                 if (qs->adap)
523                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
524                                        64);
525         }
526
527         /*
528          * netif_napi_add() can be called only once per napi_struct because it
529          * adds each new napi_struct to a list.  Be careful not to call it a
530          * second time, e.g., during EEH recovery, by making a note of it.
531          */
532         adap->flags |= NAPI_INIT;
533 }
534
535 /*
536  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
537  * both netdevices representing interfaces and the dummy ones for the extra
538  * queues.
539  */
540 static void quiesce_rx(struct adapter *adap)
541 {
542         int i;
543
544         for (i = 0; i < SGE_QSETS; i++)
545                 if (adap->sge.qs[i].adap)
546                         napi_disable(&adap->sge.qs[i].napi);
547 }
548
549 static void enable_all_napi(struct adapter *adap)
550 {
551         int i;
552         for (i = 0; i < SGE_QSETS; i++)
553                 if (adap->sge.qs[i].adap)
554                         napi_enable(&adap->sge.qs[i].napi);
555 }
556
557 /**
558  *      set_qset_lro - Turn a queue set's LRO capability on and off
559  *      @dev: the device the qset is attached to
560  *      @qset_idx: the queue set index
561  *      @val: the LRO switch
562  *
563  *      Sets LRO on or off for a particular queue set.
564  *      the device's features flag is updated to reflect the LRO
565  *      capability when all queues belonging to the device are
566  *      in the same state.
567  */
568 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
569 {
570         struct port_info *pi = netdev_priv(dev);
571         struct adapter *adapter = pi->adapter;
572
573         adapter->params.sge.qset[qset_idx].lro = !!val;
574         adapter->sge.qs[qset_idx].lro_enabled = !!val;
575 }
576
577 /**
578  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
579  *      @adap: the adapter
580  *
581  *      Determines how many sets of SGE queues to use and initializes them.
582  *      We support multiple queue sets per port if we have MSI-X, otherwise
583  *      just one queue set per port.
584  */
585 static int setup_sge_qsets(struct adapter *adap)
586 {
587         int i, j, err, irq_idx = 0, qset_idx = 0;
588         unsigned int ntxq = SGE_TXQ_PER_SET;
589
590         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
591                 irq_idx = -1;
592
593         for_each_port(adap, i) {
594                 struct net_device *dev = adap->port[i];
595                 struct port_info *pi = netdev_priv(dev);
596
597                 pi->qs = &adap->sge.qs[pi->first_qset];
598                 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
599                      ++j, ++qset_idx) {
600                         set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
601                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
602                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
603                                                              irq_idx,
604                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
605                                 netdev_get_tx_queue(dev, j));
606                         if (err) {
607                                 t3_free_sge_resources(adap);
608                                 return err;
609                         }
610                 }
611         }
612
613         return 0;
614 }
615
616 static ssize_t attr_show(struct device *d, char *buf,
617                          ssize_t(*format) (struct net_device *, char *))
618 {
619         ssize_t len;
620
621         /* Synchronize with ioctls that may shut down the device */
622         rtnl_lock();
623         len = (*format) (to_net_dev(d), buf);
624         rtnl_unlock();
625         return len;
626 }
627
628 static ssize_t attr_store(struct device *d,
629                           const char *buf, size_t len,
630                           ssize_t(*set) (struct net_device *, unsigned int),
631                           unsigned int min_val, unsigned int max_val)
632 {
633         char *endp;
634         ssize_t ret;
635         unsigned int val;
636
637         if (!capable(CAP_NET_ADMIN))
638                 return -EPERM;
639
640         val = simple_strtoul(buf, &endp, 0);
641         if (endp == buf || val < min_val || val > max_val)
642                 return -EINVAL;
643
644         rtnl_lock();
645         ret = (*set) (to_net_dev(d), val);
646         if (!ret)
647                 ret = len;
648         rtnl_unlock();
649         return ret;
650 }
651
652 #define CXGB3_SHOW(name, val_expr) \
653 static ssize_t format_##name(struct net_device *dev, char *buf) \
654 { \
655         struct port_info *pi = netdev_priv(dev); \
656         struct adapter *adap = pi->adapter; \
657         return sprintf(buf, "%u\n", val_expr); \
658 } \
659 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
660                            char *buf) \
661 { \
662         return attr_show(d, buf, format_##name); \
663 }
664
665 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
666 {
667         struct port_info *pi = netdev_priv(dev);
668         struct adapter *adap = pi->adapter;
669         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
670
671         if (adap->flags & FULL_INIT_DONE)
672                 return -EBUSY;
673         if (val && adap->params.rev == 0)
674                 return -EINVAL;
675         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
676             min_tids)
677                 return -EINVAL;
678         adap->params.mc5.nfilters = val;
679         return 0;
680 }
681
682 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
683                               const char *buf, size_t len)
684 {
685         return attr_store(d, buf, len, set_nfilters, 0, ~0);
686 }
687
688 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
689 {
690         struct port_info *pi = netdev_priv(dev);
691         struct adapter *adap = pi->adapter;
692
693         if (adap->flags & FULL_INIT_DONE)
694                 return -EBUSY;
695         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
696             MC5_MIN_TIDS)
697                 return -EINVAL;
698         adap->params.mc5.nservers = val;
699         return 0;
700 }
701
702 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
703                               const char *buf, size_t len)
704 {
705         return attr_store(d, buf, len, set_nservers, 0, ~0);
706 }
707
708 #define CXGB3_ATTR_R(name, val_expr) \
709 CXGB3_SHOW(name, val_expr) \
710 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
711
712 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
713 CXGB3_SHOW(name, val_expr) \
714 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
715
716 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
717 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
718 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
719
720 static struct attribute *cxgb3_attrs[] = {
721         &dev_attr_cam_size.attr,
722         &dev_attr_nfilters.attr,
723         &dev_attr_nservers.attr,
724         NULL
725 };
726
727 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
728
729 static ssize_t tm_attr_show(struct device *d,
730                             char *buf, int sched)
731 {
732         struct port_info *pi = netdev_priv(to_net_dev(d));
733         struct adapter *adap = pi->adapter;
734         unsigned int v, addr, bpt, cpt;
735         ssize_t len;
736
737         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
738         rtnl_lock();
739         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
740         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
741         if (sched & 1)
742                 v >>= 16;
743         bpt = (v >> 8) & 0xff;
744         cpt = v & 0xff;
745         if (!cpt)
746                 len = sprintf(buf, "disabled\n");
747         else {
748                 v = (adap->params.vpd.cclk * 1000) / cpt;
749                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
750         }
751         rtnl_unlock();
752         return len;
753 }
754
755 static ssize_t tm_attr_store(struct device *d,
756                              const char *buf, size_t len, int sched)
757 {
758         struct port_info *pi = netdev_priv(to_net_dev(d));
759         struct adapter *adap = pi->adapter;
760         unsigned int val;
761         char *endp;
762         ssize_t ret;
763
764         if (!capable(CAP_NET_ADMIN))
765                 return -EPERM;
766
767         val = simple_strtoul(buf, &endp, 0);
768         if (endp == buf || val > 10000000)
769                 return -EINVAL;
770
771         rtnl_lock();
772         ret = t3_config_sched(adap, val, sched);
773         if (!ret)
774                 ret = len;
775         rtnl_unlock();
776         return ret;
777 }
778
779 #define TM_ATTR(name, sched) \
780 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
781                            char *buf) \
782 { \
783         return tm_attr_show(d, buf, sched); \
784 } \
785 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
786                             const char *buf, size_t len) \
787 { \
788         return tm_attr_store(d, buf, len, sched); \
789 } \
790 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
791
792 TM_ATTR(sched0, 0);
793 TM_ATTR(sched1, 1);
794 TM_ATTR(sched2, 2);
795 TM_ATTR(sched3, 3);
796 TM_ATTR(sched4, 4);
797 TM_ATTR(sched5, 5);
798 TM_ATTR(sched6, 6);
799 TM_ATTR(sched7, 7);
800
801 static struct attribute *offload_attrs[] = {
802         &dev_attr_sched0.attr,
803         &dev_attr_sched1.attr,
804         &dev_attr_sched2.attr,
805         &dev_attr_sched3.attr,
806         &dev_attr_sched4.attr,
807         &dev_attr_sched5.attr,
808         &dev_attr_sched6.attr,
809         &dev_attr_sched7.attr,
810         NULL
811 };
812
813 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
814
815 /*
816  * Sends an sk_buff to an offload queue driver
817  * after dealing with any active network taps.
818  */
819 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
820 {
821         int ret;
822
823         local_bh_disable();
824         ret = t3_offload_tx(tdev, skb);
825         local_bh_enable();
826         return ret;
827 }
828
829 static int write_smt_entry(struct adapter *adapter, int idx)
830 {
831         struct cpl_smt_write_req *req;
832         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
833
834         if (!skb)
835                 return -ENOMEM;
836
837         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
838         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
839         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
840         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
841         req->iff = idx;
842         memset(req->src_mac1, 0, sizeof(req->src_mac1));
843         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
844         skb->priority = 1;
845         offload_tx(&adapter->tdev, skb);
846         return 0;
847 }
848
849 static int init_smt(struct adapter *adapter)
850 {
851         int i;
852
853         for_each_port(adapter, i)
854             write_smt_entry(adapter, i);
855         return 0;
856 }
857
858 static void init_port_mtus(struct adapter *adapter)
859 {
860         unsigned int mtus = adapter->port[0]->mtu;
861
862         if (adapter->port[1])
863                 mtus |= adapter->port[1]->mtu << 16;
864         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
865 }
866
867 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
868                               int hi, int port)
869 {
870         struct sk_buff *skb;
871         struct mngt_pktsched_wr *req;
872         int ret;
873
874         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
875         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
876         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
877         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
878         req->sched = sched;
879         req->idx = qidx;
880         req->min = lo;
881         req->max = hi;
882         req->binding = port;
883         ret = t3_mgmt_tx(adap, skb);
884
885         return ret;
886 }
887
888 static int bind_qsets(struct adapter *adap)
889 {
890         int i, j, err = 0;
891
892         for_each_port(adap, i) {
893                 const struct port_info *pi = adap2pinfo(adap, i);
894
895                 for (j = 0; j < pi->nqsets; ++j) {
896                         int ret = send_pktsched_cmd(adap, 1,
897                                                     pi->first_qset + j, -1,
898                                                     -1, i);
899                         if (ret)
900                                 err = ret;
901                 }
902         }
903
904         return err;
905 }
906
907 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
908 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
909
910 static int upgrade_fw(struct adapter *adap)
911 {
912         int ret;
913         char buf[64];
914         const struct firmware *fw;
915         struct device *dev = &adap->pdev->dev;
916
917         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
918                  FW_VERSION_MINOR, FW_VERSION_MICRO);
919         ret = request_firmware(&fw, buf, dev);
920         if (ret < 0) {
921                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
922                         buf);
923                 return ret;
924         }
925         ret = t3_load_fw(adap, fw->data, fw->size);
926         release_firmware(fw);
927
928         if (ret == 0)
929                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
930                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
931         else
932                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
933                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
934
935         return ret;
936 }
937
938 static inline char t3rev2char(struct adapter *adapter)
939 {
940         char rev = 0;
941
942         switch(adapter->params.rev) {
943         case T3_REV_B:
944         case T3_REV_B2:
945                 rev = 'b';
946                 break;
947         case T3_REV_C:
948                 rev = 'c';
949                 break;
950         }
951         return rev;
952 }
953
954 static int update_tpsram(struct adapter *adap)
955 {
956         const struct firmware *tpsram;
957         char buf[64];
958         struct device *dev = &adap->pdev->dev;
959         int ret;
960         char rev;
961
962         rev = t3rev2char(adap);
963         if (!rev)
964                 return 0;
965
966         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
967                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
968
969         ret = request_firmware(&tpsram, buf, dev);
970         if (ret < 0) {
971                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
972                         buf);
973                 return ret;
974         }
975
976         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
977         if (ret)
978                 goto release_tpsram;
979
980         ret = t3_set_proto_sram(adap, tpsram->data);
981         if (ret == 0)
982                 dev_info(dev,
983                          "successful update of protocol engine "
984                          "to %d.%d.%d\n",
985                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
986         else
987                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
988                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
989         if (ret)
990                 dev_err(dev, "loading protocol SRAM failed\n");
991
992 release_tpsram:
993         release_firmware(tpsram);
994
995         return ret;
996 }
997
998 /**
999  *      cxgb_up - enable the adapter
1000  *      @adapter: adapter being enabled
1001  *
1002  *      Called when the first port is enabled, this function performs the
1003  *      actions necessary to make an adapter operational, such as completing
1004  *      the initialization of HW modules, and enabling interrupts.
1005  *
1006  *      Must be called with the rtnl lock held.
1007  */
1008 static int cxgb_up(struct adapter *adap)
1009 {
1010         int err;
1011
1012         if (!(adap->flags & FULL_INIT_DONE)) {
1013                 err = t3_check_fw_version(adap);
1014                 if (err == -EINVAL) {
1015                         err = upgrade_fw(adap);
1016                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1017                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1018                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1019                 }
1020
1021                 err = t3_check_tpsram_version(adap);
1022                 if (err == -EINVAL) {
1023                         err = update_tpsram(adap);
1024                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1025                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1026                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1027                 }
1028
1029                 /*
1030                  * Clear interrupts now to catch errors if t3_init_hw fails.
1031                  * We clear them again later as initialization may trigger
1032                  * conditions that can interrupt.
1033                  */
1034                 t3_intr_clear(adap);
1035
1036                 err = t3_init_hw(adap, 0);
1037                 if (err)
1038                         goto out;
1039
1040                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1041                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1042
1043                 err = setup_sge_qsets(adap);
1044                 if (err)
1045                         goto out;
1046
1047                 setup_rss(adap);
1048                 if (!(adap->flags & NAPI_INIT))
1049                         init_napi(adap);
1050
1051                 t3_start_sge_timers(adap);
1052                 adap->flags |= FULL_INIT_DONE;
1053         }
1054
1055         t3_intr_clear(adap);
1056
1057         if (adap->flags & USING_MSIX) {
1058                 name_msix_vecs(adap);
1059                 err = request_irq(adap->msix_info[0].vec,
1060                                   t3_async_intr_handler, 0,
1061                                   adap->msix_info[0].desc, adap);
1062                 if (err)
1063                         goto irq_err;
1064
1065                 err = request_msix_data_irqs(adap);
1066                 if (err) {
1067                         free_irq(adap->msix_info[0].vec, adap);
1068                         goto irq_err;
1069                 }
1070         } else if ((err = request_irq(adap->pdev->irq,
1071                                       t3_intr_handler(adap,
1072                                                       adap->sge.qs[0].rspq.
1073                                                       polling),
1074                                       (adap->flags & USING_MSI) ?
1075                                        0 : IRQF_SHARED,
1076                                       adap->name, adap)))
1077                 goto irq_err;
1078
1079         enable_all_napi(adap);
1080         t3_sge_start(adap);
1081         t3_intr_enable(adap);
1082
1083         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1084             is_offload(adap) && init_tp_parity(adap) == 0)
1085                 adap->flags |= TP_PARITY_INIT;
1086
1087         if (adap->flags & TP_PARITY_INIT) {
1088                 t3_write_reg(adap, A_TP_INT_CAUSE,
1089                              F_CMCACHEPERR | F_ARPLUTPERR);
1090                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1091         }
1092
1093         if (!(adap->flags & QUEUES_BOUND)) {
1094                 err = bind_qsets(adap);
1095                 if (err) {
1096                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1097                         t3_intr_disable(adap);
1098                         free_irq_resources(adap);
1099                         goto out;
1100                 }
1101                 adap->flags |= QUEUES_BOUND;
1102         }
1103
1104 out:
1105         return err;
1106 irq_err:
1107         CH_ERR(adap, "request_irq failed, err %d\n", err);
1108         goto out;
1109 }
1110
1111 /*
1112  * Release resources when all the ports and offloading have been stopped.
1113  */
1114 static void cxgb_down(struct adapter *adapter)
1115 {
1116         t3_sge_stop(adapter);
1117         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1118         t3_intr_disable(adapter);
1119         spin_unlock_irq(&adapter->work_lock);
1120
1121         free_irq_resources(adapter);
1122         quiesce_rx(adapter);
1123         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
1124 }
1125
1126 static void schedule_chk_task(struct adapter *adap)
1127 {
1128         unsigned int timeo;
1129
1130         timeo = adap->params.linkpoll_period ?
1131             (HZ * adap->params.linkpoll_period) / 10 :
1132             adap->params.stats_update_period * HZ;
1133         if (timeo)
1134                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1135 }
1136
1137 static int offload_open(struct net_device *dev)
1138 {
1139         struct port_info *pi = netdev_priv(dev);
1140         struct adapter *adapter = pi->adapter;
1141         struct t3cdev *tdev = dev2t3cdev(dev);
1142         int adap_up = adapter->open_device_map & PORT_MASK;
1143         int err;
1144
1145         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1146                 return 0;
1147
1148         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1149                 goto out;
1150
1151         t3_tp_set_offload_mode(adapter, 1);
1152         tdev->lldev = adapter->port[0];
1153         err = cxgb3_offload_activate(adapter);
1154         if (err)
1155                 goto out;
1156
1157         init_port_mtus(adapter);
1158         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1159                      adapter->params.b_wnd,
1160                      adapter->params.rev == 0 ?
1161                      adapter->port[0]->mtu : 0xffff);
1162         init_smt(adapter);
1163
1164         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1165                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1166
1167         /* Call back all registered clients */
1168         cxgb3_add_clients(tdev);
1169
1170 out:
1171         /* restore them in case the offload module has changed them */
1172         if (err) {
1173                 t3_tp_set_offload_mode(adapter, 0);
1174                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1175                 cxgb3_set_dummy_ops(tdev);
1176         }
1177         return err;
1178 }
1179
1180 static int offload_close(struct t3cdev *tdev)
1181 {
1182         struct adapter *adapter = tdev2adap(tdev);
1183
1184         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1185                 return 0;
1186
1187         /* Call back all registered clients */
1188         cxgb3_remove_clients(tdev);
1189
1190         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1191
1192         /* Flush work scheduled while releasing TIDs */
1193         flush_scheduled_work();
1194
1195         tdev->lldev = NULL;
1196         cxgb3_set_dummy_ops(tdev);
1197         t3_tp_set_offload_mode(adapter, 0);
1198         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1199
1200         if (!adapter->open_device_map)
1201                 cxgb_down(adapter);
1202
1203         cxgb3_offload_deactivate(adapter);
1204         return 0;
1205 }
1206
1207 static int cxgb_open(struct net_device *dev)
1208 {
1209         struct port_info *pi = netdev_priv(dev);
1210         struct adapter *adapter = pi->adapter;
1211         int other_ports = adapter->open_device_map & PORT_MASK;
1212         int err;
1213
1214         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1215                 return err;
1216
1217         set_bit(pi->port_id, &adapter->open_device_map);
1218         if (is_offload(adapter) && !ofld_disable) {
1219                 err = offload_open(dev);
1220                 if (err)
1221                         printk(KERN_WARNING
1222                                "Could not initialize offload capabilities\n");
1223         }
1224
1225         dev->real_num_tx_queues = pi->nqsets;
1226         link_start(dev);
1227         t3_port_intr_enable(adapter, pi->port_id);
1228         netif_tx_start_all_queues(dev);
1229         if (!other_ports)
1230                 schedule_chk_task(adapter);
1231
1232         return 0;
1233 }
1234
1235 static int cxgb_close(struct net_device *dev)
1236 {
1237         struct port_info *pi = netdev_priv(dev);
1238         struct adapter *adapter = pi->adapter;
1239
1240         
1241         if (!adapter->open_device_map)
1242                 return 0;
1243
1244         /* Stop link fault interrupts */
1245         t3_xgm_intr_disable(adapter, pi->port_id);
1246         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1247
1248         t3_port_intr_disable(adapter, pi->port_id);
1249         netif_tx_stop_all_queues(dev);
1250         pi->phy.ops->power_down(&pi->phy, 1);
1251         netif_carrier_off(dev);
1252         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1253
1254         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1255         clear_bit(pi->port_id, &adapter->open_device_map);
1256         spin_unlock_irq(&adapter->work_lock);
1257
1258         if (!(adapter->open_device_map & PORT_MASK))
1259                 cancel_delayed_work_sync(&adapter->adap_check_task);
1260
1261         if (!adapter->open_device_map)
1262                 cxgb_down(adapter);
1263
1264         return 0;
1265 }
1266
1267 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1268 {
1269         struct port_info *pi = netdev_priv(dev);
1270         struct adapter *adapter = pi->adapter;
1271         struct net_device_stats *ns = &pi->netstats;
1272         const struct mac_stats *pstats;
1273
1274         spin_lock(&adapter->stats_lock);
1275         pstats = t3_mac_update_stats(&pi->mac);
1276         spin_unlock(&adapter->stats_lock);
1277
1278         ns->tx_bytes = pstats->tx_octets;
1279         ns->tx_packets = pstats->tx_frames;
1280         ns->rx_bytes = pstats->rx_octets;
1281         ns->rx_packets = pstats->rx_frames;
1282         ns->multicast = pstats->rx_mcast_frames;
1283
1284         ns->tx_errors = pstats->tx_underrun;
1285         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1286             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1287             pstats->rx_fifo_ovfl;
1288
1289         /* detailed rx_errors */
1290         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1291         ns->rx_over_errors = 0;
1292         ns->rx_crc_errors = pstats->rx_fcs_errs;
1293         ns->rx_frame_errors = pstats->rx_symbol_errs;
1294         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1295         ns->rx_missed_errors = pstats->rx_cong_drops;
1296
1297         /* detailed tx_errors */
1298         ns->tx_aborted_errors = 0;
1299         ns->tx_carrier_errors = 0;
1300         ns->tx_fifo_errors = pstats->tx_underrun;
1301         ns->tx_heartbeat_errors = 0;
1302         ns->tx_window_errors = 0;
1303         return ns;
1304 }
1305
1306 static u32 get_msglevel(struct net_device *dev)
1307 {
1308         struct port_info *pi = netdev_priv(dev);
1309         struct adapter *adapter = pi->adapter;
1310
1311         return adapter->msg_enable;
1312 }
1313
1314 static void set_msglevel(struct net_device *dev, u32 val)
1315 {
1316         struct port_info *pi = netdev_priv(dev);
1317         struct adapter *adapter = pi->adapter;
1318
1319         adapter->msg_enable = val;
1320 }
1321
1322 static char stats_strings[][ETH_GSTRING_LEN] = {
1323         "TxOctetsOK         ",
1324         "TxFramesOK         ",
1325         "TxMulticastFramesOK",
1326         "TxBroadcastFramesOK",
1327         "TxPauseFrames      ",
1328         "TxUnderrun         ",
1329         "TxExtUnderrun      ",
1330
1331         "TxFrames64         ",
1332         "TxFrames65To127    ",
1333         "TxFrames128To255   ",
1334         "TxFrames256To511   ",
1335         "TxFrames512To1023  ",
1336         "TxFrames1024To1518 ",
1337         "TxFrames1519ToMax  ",
1338
1339         "RxOctetsOK         ",
1340         "RxFramesOK         ",
1341         "RxMulticastFramesOK",
1342         "RxBroadcastFramesOK",
1343         "RxPauseFrames      ",
1344         "RxFCSErrors        ",
1345         "RxSymbolErrors     ",
1346         "RxShortErrors      ",
1347         "RxJabberErrors     ",
1348         "RxLengthErrors     ",
1349         "RxFIFOoverflow     ",
1350
1351         "RxFrames64         ",
1352         "RxFrames65To127    ",
1353         "RxFrames128To255   ",
1354         "RxFrames256To511   ",
1355         "RxFrames512To1023  ",
1356         "RxFrames1024To1518 ",
1357         "RxFrames1519ToMax  ",
1358
1359         "PhyFIFOErrors      ",
1360         "TSO                ",
1361         "VLANextractions    ",
1362         "VLANinsertions     ",
1363         "TxCsumOffload      ",
1364         "RxCsumGood         ",
1365         "LroAggregated      ",
1366         "LroFlushed         ",
1367         "LroNoDesc          ",
1368         "RxDrops            ",
1369
1370         "CheckTXEnToggled   ",
1371         "CheckResets        ",
1372
1373         "LinkFaults         ",
1374 };
1375
1376 static int get_sset_count(struct net_device *dev, int sset)
1377 {
1378         switch (sset) {
1379         case ETH_SS_STATS:
1380                 return ARRAY_SIZE(stats_strings);
1381         default:
1382                 return -EOPNOTSUPP;
1383         }
1384 }
1385
1386 #define T3_REGMAP_SIZE (3 * 1024)
1387
1388 static int get_regs_len(struct net_device *dev)
1389 {
1390         return T3_REGMAP_SIZE;
1391 }
1392
1393 static int get_eeprom_len(struct net_device *dev)
1394 {
1395         return EEPROMSIZE;
1396 }
1397
1398 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1399 {
1400         struct port_info *pi = netdev_priv(dev);
1401         struct adapter *adapter = pi->adapter;
1402         u32 fw_vers = 0;
1403         u32 tp_vers = 0;
1404
1405         spin_lock(&adapter->stats_lock);
1406         t3_get_fw_version(adapter, &fw_vers);
1407         t3_get_tp_version(adapter, &tp_vers);
1408         spin_unlock(&adapter->stats_lock);
1409
1410         strcpy(info->driver, DRV_NAME);
1411         strcpy(info->version, DRV_VERSION);
1412         strcpy(info->bus_info, pci_name(adapter->pdev));
1413         if (!fw_vers)
1414                 strcpy(info->fw_version, "N/A");
1415         else {
1416                 snprintf(info->fw_version, sizeof(info->fw_version),
1417                          "%s %u.%u.%u TP %u.%u.%u",
1418                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1419                          G_FW_VERSION_MAJOR(fw_vers),
1420                          G_FW_VERSION_MINOR(fw_vers),
1421                          G_FW_VERSION_MICRO(fw_vers),
1422                          G_TP_VERSION_MAJOR(tp_vers),
1423                          G_TP_VERSION_MINOR(tp_vers),
1424                          G_TP_VERSION_MICRO(tp_vers));
1425         }
1426 }
1427
1428 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1429 {
1430         if (stringset == ETH_SS_STATS)
1431                 memcpy(data, stats_strings, sizeof(stats_strings));
1432 }
1433
1434 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1435                                             struct port_info *p, int idx)
1436 {
1437         int i;
1438         unsigned long tot = 0;
1439
1440         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1441                 tot += adapter->sge.qs[i].port_stats[idx];
1442         return tot;
1443 }
1444
1445 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1446                       u64 *data)
1447 {
1448         struct port_info *pi = netdev_priv(dev);
1449         struct adapter *adapter = pi->adapter;
1450         const struct mac_stats *s;
1451
1452         spin_lock(&adapter->stats_lock);
1453         s = t3_mac_update_stats(&pi->mac);
1454         spin_unlock(&adapter->stats_lock);
1455
1456         *data++ = s->tx_octets;
1457         *data++ = s->tx_frames;
1458         *data++ = s->tx_mcast_frames;
1459         *data++ = s->tx_bcast_frames;
1460         *data++ = s->tx_pause;
1461         *data++ = s->tx_underrun;
1462         *data++ = s->tx_fifo_urun;
1463
1464         *data++ = s->tx_frames_64;
1465         *data++ = s->tx_frames_65_127;
1466         *data++ = s->tx_frames_128_255;
1467         *data++ = s->tx_frames_256_511;
1468         *data++ = s->tx_frames_512_1023;
1469         *data++ = s->tx_frames_1024_1518;
1470         *data++ = s->tx_frames_1519_max;
1471
1472         *data++ = s->rx_octets;
1473         *data++ = s->rx_frames;
1474         *data++ = s->rx_mcast_frames;
1475         *data++ = s->rx_bcast_frames;
1476         *data++ = s->rx_pause;
1477         *data++ = s->rx_fcs_errs;
1478         *data++ = s->rx_symbol_errs;
1479         *data++ = s->rx_short;
1480         *data++ = s->rx_jabber;
1481         *data++ = s->rx_too_long;
1482         *data++ = s->rx_fifo_ovfl;
1483
1484         *data++ = s->rx_frames_64;
1485         *data++ = s->rx_frames_65_127;
1486         *data++ = s->rx_frames_128_255;
1487         *data++ = s->rx_frames_256_511;
1488         *data++ = s->rx_frames_512_1023;
1489         *data++ = s->rx_frames_1024_1518;
1490         *data++ = s->rx_frames_1519_max;
1491
1492         *data++ = pi->phy.fifo_errors;
1493
1494         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1495         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1496         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1497         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1498         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1499         *data++ = 0;
1500         *data++ = 0;
1501         *data++ = 0;
1502         *data++ = s->rx_cong_drops;
1503
1504         *data++ = s->num_toggled;
1505         *data++ = s->num_resets;
1506
1507         *data++ = s->link_faults;
1508 }
1509
1510 static inline void reg_block_dump(struct adapter *ap, void *buf,
1511                                   unsigned int start, unsigned int end)
1512 {
1513         u32 *p = buf + start;
1514
1515         for (; start <= end; start += sizeof(u32))
1516                 *p++ = t3_read_reg(ap, start);
1517 }
1518
1519 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1520                      void *buf)
1521 {
1522         struct port_info *pi = netdev_priv(dev);
1523         struct adapter *ap = pi->adapter;
1524
1525         /*
1526          * Version scheme:
1527          * bits 0..9: chip version
1528          * bits 10..15: chip revision
1529          * bit 31: set for PCIe cards
1530          */
1531         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1532
1533         /*
1534          * We skip the MAC statistics registers because they are clear-on-read.
1535          * Also reading multi-register stats would need to synchronize with the
1536          * periodic mac stats accumulation.  Hard to justify the complexity.
1537          */
1538         memset(buf, 0, T3_REGMAP_SIZE);
1539         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1540         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1541         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1542         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1543         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1544         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1545                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1546         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1547                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1548 }
1549
1550 static int restart_autoneg(struct net_device *dev)
1551 {
1552         struct port_info *p = netdev_priv(dev);
1553
1554         if (!netif_running(dev))
1555                 return -EAGAIN;
1556         if (p->link_config.autoneg != AUTONEG_ENABLE)
1557                 return -EINVAL;
1558         p->phy.ops->autoneg_restart(&p->phy);
1559         return 0;
1560 }
1561
1562 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1563 {
1564         struct port_info *pi = netdev_priv(dev);
1565         struct adapter *adapter = pi->adapter;
1566         int i;
1567
1568         if (data == 0)
1569                 data = 2;
1570
1571         for (i = 0; i < data * 2; i++) {
1572                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1573                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1574                 if (msleep_interruptible(500))
1575                         break;
1576         }
1577         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1578                          F_GPIO0_OUT_VAL);
1579         return 0;
1580 }
1581
1582 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1583 {
1584         struct port_info *p = netdev_priv(dev);
1585
1586         cmd->supported = p->link_config.supported;
1587         cmd->advertising = p->link_config.advertising;
1588
1589         if (netif_carrier_ok(dev)) {
1590                 cmd->speed = p->link_config.speed;
1591                 cmd->duplex = p->link_config.duplex;
1592         } else {
1593                 cmd->speed = -1;
1594                 cmd->duplex = -1;
1595         }
1596
1597         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1598         cmd->phy_address = p->phy.mdio.prtad;
1599         cmd->transceiver = XCVR_EXTERNAL;
1600         cmd->autoneg = p->link_config.autoneg;
1601         cmd->maxtxpkt = 0;
1602         cmd->maxrxpkt = 0;
1603         return 0;
1604 }
1605
1606 static int speed_duplex_to_caps(int speed, int duplex)
1607 {
1608         int cap = 0;
1609
1610         switch (speed) {
1611         case SPEED_10:
1612                 if (duplex == DUPLEX_FULL)
1613                         cap = SUPPORTED_10baseT_Full;
1614                 else
1615                         cap = SUPPORTED_10baseT_Half;
1616                 break;
1617         case SPEED_100:
1618                 if (duplex == DUPLEX_FULL)
1619                         cap = SUPPORTED_100baseT_Full;
1620                 else
1621                         cap = SUPPORTED_100baseT_Half;
1622                 break;
1623         case SPEED_1000:
1624                 if (duplex == DUPLEX_FULL)
1625                         cap = SUPPORTED_1000baseT_Full;
1626                 else
1627                         cap = SUPPORTED_1000baseT_Half;
1628                 break;
1629         case SPEED_10000:
1630                 if (duplex == DUPLEX_FULL)
1631                         cap = SUPPORTED_10000baseT_Full;
1632         }
1633         return cap;
1634 }
1635
1636 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1637                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1638                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1639                       ADVERTISED_10000baseT_Full)
1640
1641 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1642 {
1643         struct port_info *p = netdev_priv(dev);
1644         struct link_config *lc = &p->link_config;
1645
1646         if (!(lc->supported & SUPPORTED_Autoneg)) {
1647                 /*
1648                  * PHY offers a single speed/duplex.  See if that's what's
1649                  * being requested.
1650                  */
1651                 if (cmd->autoneg == AUTONEG_DISABLE) {
1652                         int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1653                         if (lc->supported & cap)
1654                                 return 0;
1655                 }
1656                 return -EINVAL;
1657         }
1658
1659         if (cmd->autoneg == AUTONEG_DISABLE) {
1660                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1661
1662                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1663                         return -EINVAL;
1664                 lc->requested_speed = cmd->speed;
1665                 lc->requested_duplex = cmd->duplex;
1666                 lc->advertising = 0;
1667         } else {
1668                 cmd->advertising &= ADVERTISED_MASK;
1669                 cmd->advertising &= lc->supported;
1670                 if (!cmd->advertising)
1671                         return -EINVAL;
1672                 lc->requested_speed = SPEED_INVALID;
1673                 lc->requested_duplex = DUPLEX_INVALID;
1674                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1675         }
1676         lc->autoneg = cmd->autoneg;
1677         if (netif_running(dev))
1678                 t3_link_start(&p->phy, &p->mac, lc);
1679         return 0;
1680 }
1681
1682 static void get_pauseparam(struct net_device *dev,
1683                            struct ethtool_pauseparam *epause)
1684 {
1685         struct port_info *p = netdev_priv(dev);
1686
1687         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1688         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1689         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1690 }
1691
1692 static int set_pauseparam(struct net_device *dev,
1693                           struct ethtool_pauseparam *epause)
1694 {
1695         struct port_info *p = netdev_priv(dev);
1696         struct link_config *lc = &p->link_config;
1697
1698         if (epause->autoneg == AUTONEG_DISABLE)
1699                 lc->requested_fc = 0;
1700         else if (lc->supported & SUPPORTED_Autoneg)
1701                 lc->requested_fc = PAUSE_AUTONEG;
1702         else
1703                 return -EINVAL;
1704
1705         if (epause->rx_pause)
1706                 lc->requested_fc |= PAUSE_RX;
1707         if (epause->tx_pause)
1708                 lc->requested_fc |= PAUSE_TX;
1709         if (lc->autoneg == AUTONEG_ENABLE) {
1710                 if (netif_running(dev))
1711                         t3_link_start(&p->phy, &p->mac, lc);
1712         } else {
1713                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1714                 if (netif_running(dev))
1715                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1716         }
1717         return 0;
1718 }
1719
1720 static u32 get_rx_csum(struct net_device *dev)
1721 {
1722         struct port_info *p = netdev_priv(dev);
1723
1724         return p->rx_offload & T3_RX_CSUM;
1725 }
1726
1727 static int set_rx_csum(struct net_device *dev, u32 data)
1728 {
1729         struct port_info *p = netdev_priv(dev);
1730
1731         if (data) {
1732                 p->rx_offload |= T3_RX_CSUM;
1733         } else {
1734                 int i;
1735
1736                 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1737                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1738                         set_qset_lro(dev, i, 0);
1739         }
1740         return 0;
1741 }
1742
1743 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1744 {
1745         struct port_info *pi = netdev_priv(dev);
1746         struct adapter *adapter = pi->adapter;
1747         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1748
1749         e->rx_max_pending = MAX_RX_BUFFERS;
1750         e->rx_mini_max_pending = 0;
1751         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1752         e->tx_max_pending = MAX_TXQ_ENTRIES;
1753
1754         e->rx_pending = q->fl_size;
1755         e->rx_mini_pending = q->rspq_size;
1756         e->rx_jumbo_pending = q->jumbo_size;
1757         e->tx_pending = q->txq_size[0];
1758 }
1759
1760 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1761 {
1762         struct port_info *pi = netdev_priv(dev);
1763         struct adapter *adapter = pi->adapter;
1764         struct qset_params *q;
1765         int i;
1766
1767         if (e->rx_pending > MAX_RX_BUFFERS ||
1768             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1769             e->tx_pending > MAX_TXQ_ENTRIES ||
1770             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1771             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1772             e->rx_pending < MIN_FL_ENTRIES ||
1773             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1774             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1775                 return -EINVAL;
1776
1777         if (adapter->flags & FULL_INIT_DONE)
1778                 return -EBUSY;
1779
1780         q = &adapter->params.sge.qset[pi->first_qset];
1781         for (i = 0; i < pi->nqsets; ++i, ++q) {
1782                 q->rspq_size = e->rx_mini_pending;
1783                 q->fl_size = e->rx_pending;
1784                 q->jumbo_size = e->rx_jumbo_pending;
1785                 q->txq_size[0] = e->tx_pending;
1786                 q->txq_size[1] = e->tx_pending;
1787                 q->txq_size[2] = e->tx_pending;
1788         }
1789         return 0;
1790 }
1791
1792 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1793 {
1794         struct port_info *pi = netdev_priv(dev);
1795         struct adapter *adapter = pi->adapter;
1796         struct qset_params *qsp = &adapter->params.sge.qset[0];
1797         struct sge_qset *qs = &adapter->sge.qs[0];
1798
1799         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1800                 return -EINVAL;
1801
1802         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1803         t3_update_qset_coalesce(qs, qsp);
1804         return 0;
1805 }
1806
1807 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1808 {
1809         struct port_info *pi = netdev_priv(dev);
1810         struct adapter *adapter = pi->adapter;
1811         struct qset_params *q = adapter->params.sge.qset;
1812
1813         c->rx_coalesce_usecs = q->coalesce_usecs;
1814         return 0;
1815 }
1816
1817 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1818                       u8 * data)
1819 {
1820         struct port_info *pi = netdev_priv(dev);
1821         struct adapter *adapter = pi->adapter;
1822         int i, err = 0;
1823
1824         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1825         if (!buf)
1826                 return -ENOMEM;
1827
1828         e->magic = EEPROM_MAGIC;
1829         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1830                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1831
1832         if (!err)
1833                 memcpy(data, buf + e->offset, e->len);
1834         kfree(buf);
1835         return err;
1836 }
1837
1838 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1839                       u8 * data)
1840 {
1841         struct port_info *pi = netdev_priv(dev);
1842         struct adapter *adapter = pi->adapter;
1843         u32 aligned_offset, aligned_len;
1844         __le32 *p;
1845         u8 *buf;
1846         int err;
1847
1848         if (eeprom->magic != EEPROM_MAGIC)
1849                 return -EINVAL;
1850
1851         aligned_offset = eeprom->offset & ~3;
1852         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1853
1854         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1855                 buf = kmalloc(aligned_len, GFP_KERNEL);
1856                 if (!buf)
1857                         return -ENOMEM;
1858                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1859                 if (!err && aligned_len > 4)
1860                         err = t3_seeprom_read(adapter,
1861                                               aligned_offset + aligned_len - 4,
1862                                               (__le32 *) & buf[aligned_len - 4]);
1863                 if (err)
1864                         goto out;
1865                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1866         } else
1867                 buf = data;
1868
1869         err = t3_seeprom_wp(adapter, 0);
1870         if (err)
1871                 goto out;
1872
1873         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1874                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1875                 aligned_offset += 4;
1876         }
1877
1878         if (!err)
1879                 err = t3_seeprom_wp(adapter, 1);
1880 out:
1881         if (buf != data)
1882                 kfree(buf);
1883         return err;
1884 }
1885
1886 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1887 {
1888         wol->supported = 0;
1889         wol->wolopts = 0;
1890         memset(&wol->sopass, 0, sizeof(wol->sopass));
1891 }
1892
1893 static const struct ethtool_ops cxgb_ethtool_ops = {
1894         .get_settings = get_settings,
1895         .set_settings = set_settings,
1896         .get_drvinfo = get_drvinfo,
1897         .get_msglevel = get_msglevel,
1898         .set_msglevel = set_msglevel,
1899         .get_ringparam = get_sge_param,
1900         .set_ringparam = set_sge_param,
1901         .get_coalesce = get_coalesce,
1902         .set_coalesce = set_coalesce,
1903         .get_eeprom_len = get_eeprom_len,
1904         .get_eeprom = get_eeprom,
1905         .set_eeprom = set_eeprom,
1906         .get_pauseparam = get_pauseparam,
1907         .set_pauseparam = set_pauseparam,
1908         .get_rx_csum = get_rx_csum,
1909         .set_rx_csum = set_rx_csum,
1910         .set_tx_csum = ethtool_op_set_tx_csum,
1911         .set_sg = ethtool_op_set_sg,
1912         .get_link = ethtool_op_get_link,
1913         .get_strings = get_strings,
1914         .phys_id = cxgb3_phys_id,
1915         .nway_reset = restart_autoneg,
1916         .get_sset_count = get_sset_count,
1917         .get_ethtool_stats = get_stats,
1918         .get_regs_len = get_regs_len,
1919         .get_regs = get_regs,
1920         .get_wol = get_wol,
1921         .set_tso = ethtool_op_set_tso,
1922 };
1923
1924 static int in_range(int val, int lo, int hi)
1925 {
1926         return val < 0 || (val <= hi && val >= lo);
1927 }
1928
1929 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1930 {
1931         struct port_info *pi = netdev_priv(dev);
1932         struct adapter *adapter = pi->adapter;
1933         u32 cmd;
1934         int ret;
1935
1936         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1937                 return -EFAULT;
1938
1939         switch (cmd) {
1940         case CHELSIO_SET_QSET_PARAMS:{
1941                 int i;
1942                 struct qset_params *q;
1943                 struct ch_qset_params t;
1944                 int q1 = pi->first_qset;
1945                 int nqsets = pi->nqsets;
1946
1947                 if (!capable(CAP_NET_ADMIN))
1948                         return -EPERM;
1949                 if (copy_from_user(&t, useraddr, sizeof(t)))
1950                         return -EFAULT;
1951                 if (t.qset_idx >= SGE_QSETS)
1952                         return -EINVAL;
1953                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1954                         !in_range(t.cong_thres, 0, 255) ||
1955                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1956                                 MAX_TXQ_ENTRIES) ||
1957                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1958                                 MAX_TXQ_ENTRIES) ||
1959                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1960                                 MAX_CTRL_TXQ_ENTRIES) ||
1961                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1962                                 MAX_RX_BUFFERS)
1963                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1964                                         MAX_RX_JUMBO_BUFFERS)
1965                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1966                                         MAX_RSPQ_ENTRIES))
1967                         return -EINVAL;
1968
1969                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1970                         for_each_port(adapter, i) {
1971                                 pi = adap2pinfo(adapter, i);
1972                                 if (t.qset_idx >= pi->first_qset &&
1973                                     t.qset_idx < pi->first_qset + pi->nqsets &&
1974                                     !(pi->rx_offload & T3_RX_CSUM))
1975                                         return -EINVAL;
1976                         }
1977
1978                 if ((adapter->flags & FULL_INIT_DONE) &&
1979                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1980                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1981                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1982                         t.polling >= 0 || t.cong_thres >= 0))
1983                         return -EBUSY;
1984
1985                 /* Allow setting of any available qset when offload enabled */
1986                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1987                         q1 = 0;
1988                         for_each_port(adapter, i) {
1989                                 pi = adap2pinfo(adapter, i);
1990                                 nqsets += pi->first_qset + pi->nqsets;
1991                         }
1992                 }
1993
1994                 if (t.qset_idx < q1)
1995                         return -EINVAL;
1996                 if (t.qset_idx > q1 + nqsets - 1)
1997                         return -EINVAL;
1998
1999                 q = &adapter->params.sge.qset[t.qset_idx];
2000
2001                 if (t.rspq_size >= 0)
2002                         q->rspq_size = t.rspq_size;
2003                 if (t.fl_size[0] >= 0)
2004                         q->fl_size = t.fl_size[0];
2005                 if (t.fl_size[1] >= 0)
2006                         q->jumbo_size = t.fl_size[1];
2007                 if (t.txq_size[0] >= 0)
2008                         q->txq_size[0] = t.txq_size[0];
2009                 if (t.txq_size[1] >= 0)
2010                         q->txq_size[1] = t.txq_size[1];
2011                 if (t.txq_size[2] >= 0)
2012                         q->txq_size[2] = t.txq_size[2];
2013                 if (t.cong_thres >= 0)
2014                         q->cong_thres = t.cong_thres;
2015                 if (t.intr_lat >= 0) {
2016                         struct sge_qset *qs =
2017                                 &adapter->sge.qs[t.qset_idx];
2018
2019                         q->coalesce_usecs = t.intr_lat;
2020                         t3_update_qset_coalesce(qs, q);
2021                 }
2022                 if (t.polling >= 0) {
2023                         if (adapter->flags & USING_MSIX)
2024                                 q->polling = t.polling;
2025                         else {
2026                                 /* No polling with INTx for T3A */
2027                                 if (adapter->params.rev == 0 &&
2028                                         !(adapter->flags & USING_MSI))
2029                                         t.polling = 0;
2030
2031                                 for (i = 0; i < SGE_QSETS; i++) {
2032                                         q = &adapter->params.sge.
2033                                                 qset[i];
2034                                         q->polling = t.polling;
2035                                 }
2036                         }
2037                 }
2038                 if (t.lro >= 0)
2039                         set_qset_lro(dev, t.qset_idx, t.lro);
2040
2041                 break;
2042         }
2043         case CHELSIO_GET_QSET_PARAMS:{
2044                 struct qset_params *q;
2045                 struct ch_qset_params t;
2046                 int q1 = pi->first_qset;
2047                 int nqsets = pi->nqsets;
2048                 int i;
2049
2050                 if (copy_from_user(&t, useraddr, sizeof(t)))
2051                         return -EFAULT;
2052
2053                 /* Display qsets for all ports when offload enabled */
2054                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2055                         q1 = 0;
2056                         for_each_port(adapter, i) {
2057                                 pi = adap2pinfo(adapter, i);
2058                                 nqsets = pi->first_qset + pi->nqsets;
2059                         }
2060                 }
2061
2062                 if (t.qset_idx >= nqsets)
2063                         return -EINVAL;
2064
2065                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2066                 t.rspq_size = q->rspq_size;
2067                 t.txq_size[0] = q->txq_size[0];
2068                 t.txq_size[1] = q->txq_size[1];
2069                 t.txq_size[2] = q->txq_size[2];
2070                 t.fl_size[0] = q->fl_size;
2071                 t.fl_size[1] = q->jumbo_size;
2072                 t.polling = q->polling;
2073                 t.lro = q->lro;
2074                 t.intr_lat = q->coalesce_usecs;
2075                 t.cong_thres = q->cong_thres;
2076                 t.qnum = q1;
2077
2078                 if (adapter->flags & USING_MSIX)
2079                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2080                 else
2081                         t.vector = adapter->pdev->irq;
2082
2083                 if (copy_to_user(useraddr, &t, sizeof(t)))
2084                         return -EFAULT;
2085                 break;
2086         }
2087         case CHELSIO_SET_QSET_NUM:{
2088                 struct ch_reg edata;
2089                 unsigned int i, first_qset = 0, other_qsets = 0;
2090
2091                 if (!capable(CAP_NET_ADMIN))
2092                         return -EPERM;
2093                 if (adapter->flags & FULL_INIT_DONE)
2094                         return -EBUSY;
2095                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2096                         return -EFAULT;
2097                 if (edata.val < 1 ||
2098                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2099                         return -EINVAL;
2100
2101                 for_each_port(adapter, i)
2102                         if (adapter->port[i] && adapter->port[i] != dev)
2103                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2104
2105                 if (edata.val + other_qsets > SGE_QSETS)
2106                         return -EINVAL;
2107
2108                 pi->nqsets = edata.val;
2109
2110                 for_each_port(adapter, i)
2111                         if (adapter->port[i]) {
2112                                 pi = adap2pinfo(adapter, i);
2113                                 pi->first_qset = first_qset;
2114                                 first_qset += pi->nqsets;
2115                         }
2116                 break;
2117         }
2118         case CHELSIO_GET_QSET_NUM:{
2119                 struct ch_reg edata;
2120
2121                 edata.cmd = CHELSIO_GET_QSET_NUM;
2122                 edata.val = pi->nqsets;
2123                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2124                         return -EFAULT;
2125                 break;
2126         }
2127         case CHELSIO_LOAD_FW:{
2128                 u8 *fw_data;
2129                 struct ch_mem_range t;
2130
2131                 if (!capable(CAP_SYS_RAWIO))
2132                         return -EPERM;
2133                 if (copy_from_user(&t, useraddr, sizeof(t)))
2134                         return -EFAULT;
2135                 /* Check t.len sanity ? */
2136                 fw_data = kmalloc(t.len, GFP_KERNEL);
2137                 if (!fw_data)
2138                         return -ENOMEM;
2139
2140                 if (copy_from_user
2141                         (fw_data, useraddr + sizeof(t), t.len)) {
2142                         kfree(fw_data);
2143                         return -EFAULT;
2144                 }
2145
2146                 ret = t3_load_fw(adapter, fw_data, t.len);
2147                 kfree(fw_data);
2148                 if (ret)
2149                         return ret;
2150                 break;
2151         }
2152         case CHELSIO_SETMTUTAB:{
2153                 struct ch_mtus m;
2154                 int i;
2155
2156                 if (!is_offload(adapter))
2157                         return -EOPNOTSUPP;
2158                 if (!capable(CAP_NET_ADMIN))
2159                         return -EPERM;
2160                 if (offload_running(adapter))
2161                         return -EBUSY;
2162                 if (copy_from_user(&m, useraddr, sizeof(m)))
2163                         return -EFAULT;
2164                 if (m.nmtus != NMTUS)
2165                         return -EINVAL;
2166                 if (m.mtus[0] < 81)     /* accommodate SACK */
2167                         return -EINVAL;
2168
2169                 /* MTUs must be in ascending order */
2170                 for (i = 1; i < NMTUS; ++i)
2171                         if (m.mtus[i] < m.mtus[i - 1])
2172                                 return -EINVAL;
2173
2174                 memcpy(adapter->params.mtus, m.mtus,
2175                         sizeof(adapter->params.mtus));
2176                 break;
2177         }
2178         case CHELSIO_GET_PM:{
2179                 struct tp_params *p = &adapter->params.tp;
2180                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2181
2182                 if (!is_offload(adapter))
2183                         return -EOPNOTSUPP;
2184                 m.tx_pg_sz = p->tx_pg_size;
2185                 m.tx_num_pg = p->tx_num_pgs;
2186                 m.rx_pg_sz = p->rx_pg_size;
2187                 m.rx_num_pg = p->rx_num_pgs;
2188                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2189                 if (copy_to_user(useraddr, &m, sizeof(m)))
2190                         return -EFAULT;
2191                 break;
2192         }
2193         case CHELSIO_SET_PM:{
2194                 struct ch_pm m;
2195                 struct tp_params *p = &adapter->params.tp;
2196
2197                 if (!is_offload(adapter))
2198                         return -EOPNOTSUPP;
2199                 if (!capable(CAP_NET_ADMIN))
2200                         return -EPERM;
2201                 if (adapter->flags & FULL_INIT_DONE)
2202                         return -EBUSY;
2203                 if (copy_from_user(&m, useraddr, sizeof(m)))
2204                         return -EFAULT;
2205                 if (!is_power_of_2(m.rx_pg_sz) ||
2206                         !is_power_of_2(m.tx_pg_sz))
2207                         return -EINVAL; /* not power of 2 */
2208                 if (!(m.rx_pg_sz & 0x14000))
2209                         return -EINVAL; /* not 16KB or 64KB */
2210                 if (!(m.tx_pg_sz & 0x1554000))
2211                         return -EINVAL;
2212                 if (m.tx_num_pg == -1)
2213                         m.tx_num_pg = p->tx_num_pgs;
2214                 if (m.rx_num_pg == -1)
2215                         m.rx_num_pg = p->rx_num_pgs;
2216                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2217                         return -EINVAL;
2218                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2219                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2220                         return -EINVAL;
2221                 p->rx_pg_size = m.rx_pg_sz;
2222                 p->tx_pg_size = m.tx_pg_sz;
2223                 p->rx_num_pgs = m.rx_num_pg;
2224                 p->tx_num_pgs = m.tx_num_pg;
2225                 break;
2226         }
2227         case CHELSIO_GET_MEM:{
2228                 struct ch_mem_range t;
2229                 struct mc7 *mem;
2230                 u64 buf[32];
2231
2232                 if (!is_offload(adapter))
2233                         return -EOPNOTSUPP;
2234                 if (!(adapter->flags & FULL_INIT_DONE))
2235                         return -EIO;    /* need the memory controllers */
2236                 if (copy_from_user(&t, useraddr, sizeof(t)))
2237                         return -EFAULT;
2238                 if ((t.addr & 7) || (t.len & 7))
2239                         return -EINVAL;
2240                 if (t.mem_id == MEM_CM)
2241                         mem = &adapter->cm;
2242                 else if (t.mem_id == MEM_PMRX)
2243                         mem = &adapter->pmrx;
2244                 else if (t.mem_id == MEM_PMTX)
2245                         mem = &adapter->pmtx;
2246                 else
2247                         return -EINVAL;
2248
2249                 /*
2250                  * Version scheme:
2251                  * bits 0..9: chip version
2252                  * bits 10..15: chip revision
2253                  */
2254                 t.version = 3 | (adapter->params.rev << 10);
2255                 if (copy_to_user(useraddr, &t, sizeof(t)))
2256                         return -EFAULT;
2257
2258                 /*
2259                  * Read 256 bytes at a time as len can be large and we don't
2260                  * want to use huge intermediate buffers.
2261                  */
2262                 useraddr += sizeof(t);  /* advance to start of buffer */
2263                 while (t.len) {
2264                         unsigned int chunk =
2265                                 min_t(unsigned int, t.len, sizeof(buf));
2266
2267                         ret =
2268                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2269                                                 buf);
2270                         if (ret)
2271                                 return ret;
2272                         if (copy_to_user(useraddr, buf, chunk))
2273                                 return -EFAULT;
2274                         useraddr += chunk;
2275                         t.addr += chunk;
2276                         t.len -= chunk;
2277                 }
2278                 break;
2279         }
2280         case CHELSIO_SET_TRACE_FILTER:{
2281                 struct ch_trace t;
2282                 const struct trace_params *tp;
2283
2284                 if (!capable(CAP_NET_ADMIN))
2285                         return -EPERM;
2286                 if (!offload_running(adapter))
2287                         return -EAGAIN;
2288                 if (copy_from_user(&t, useraddr, sizeof(t)))
2289                         return -EFAULT;
2290
2291                 tp = (const struct trace_params *)&t.sip;
2292                 if (t.config_tx)
2293                         t3_config_trace_filter(adapter, tp, 0,
2294                                                 t.invert_match,
2295                                                 t.trace_tx);
2296                 if (t.config_rx)
2297                         t3_config_trace_filter(adapter, tp, 1,
2298                                                 t.invert_match,
2299                                                 t.trace_rx);
2300                 break;
2301         }
2302         default:
2303                 return -EOPNOTSUPP;
2304         }
2305         return 0;
2306 }
2307
2308 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2309 {
2310         struct mii_ioctl_data *data = if_mii(req);
2311         struct port_info *pi = netdev_priv(dev);
2312         struct adapter *adapter = pi->adapter;
2313
2314         switch (cmd) {
2315         case SIOCGMIIREG:
2316         case SIOCSMIIREG:
2317                 /* Convert phy_id from older PRTAD/DEVAD format */
2318                 if (is_10G(adapter) &&
2319                     !mdio_phy_id_is_c45(data->phy_id) &&
2320                     (data->phy_id & 0x1f00) &&
2321                     !(data->phy_id & 0xe0e0))
2322                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2323                                                        data->phy_id & 0x1f);
2324                 /* FALLTHRU */
2325         case SIOCGMIIPHY:
2326                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2327         case SIOCCHIOCTL:
2328                 return cxgb_extension_ioctl(dev, req->ifr_data);
2329         default:
2330                 return -EOPNOTSUPP;
2331         }
2332 }
2333
2334 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2335 {
2336         struct port_info *pi = netdev_priv(dev);
2337         struct adapter *adapter = pi->adapter;
2338         int ret;
2339
2340         if (new_mtu < 81)       /* accommodate SACK */
2341                 return -EINVAL;
2342         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2343                 return ret;
2344         dev->mtu = new_mtu;
2345         init_port_mtus(adapter);
2346         if (adapter->params.rev == 0 && offload_running(adapter))
2347                 t3_load_mtus(adapter, adapter->params.mtus,
2348                              adapter->params.a_wnd, adapter->params.b_wnd,
2349                              adapter->port[0]->mtu);
2350         return 0;
2351 }
2352
2353 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2354 {
2355         struct port_info *pi = netdev_priv(dev);
2356         struct adapter *adapter = pi->adapter;
2357         struct sockaddr *addr = p;
2358
2359         if (!is_valid_ether_addr(addr->sa_data))
2360                 return -EINVAL;
2361
2362         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2363         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2364         if (offload_running(adapter))
2365                 write_smt_entry(adapter, pi->port_id);
2366         return 0;
2367 }
2368
2369 /**
2370  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2371  * @adap: the adapter
2372  * @p: the port
2373  *
2374  * Ensures that current Rx processing on any of the queues associated with
2375  * the given port completes before returning.  We do this by acquiring and
2376  * releasing the locks of the response queues associated with the port.
2377  */
2378 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2379 {
2380         int i;
2381
2382         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2383                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2384
2385                 spin_lock_irq(&q->lock);
2386                 spin_unlock_irq(&q->lock);
2387         }
2388 }
2389
2390 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2391 {
2392         struct port_info *pi = netdev_priv(dev);
2393         struct adapter *adapter = pi->adapter;
2394
2395         pi->vlan_grp = grp;
2396         if (adapter->params.rev > 0)
2397                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2398         else {
2399                 /* single control for all ports */
2400                 unsigned int i, have_vlans = 0;
2401                 for_each_port(adapter, i)
2402                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2403
2404                 t3_set_vlan_accel(adapter, 1, have_vlans);
2405         }
2406         t3_synchronize_rx(adapter, pi);
2407 }
2408
2409 #ifdef CONFIG_NET_POLL_CONTROLLER
2410 static void cxgb_netpoll(struct net_device *dev)
2411 {
2412         struct port_info *pi = netdev_priv(dev);
2413         struct adapter *adapter = pi->adapter;
2414         int qidx;
2415
2416         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2417                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2418                 void *source;
2419
2420                 if (adapter->flags & USING_MSIX)
2421                         source = qs;
2422                 else
2423                         source = adapter;
2424
2425                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2426         }
2427 }
2428 #endif
2429
2430 /*
2431  * Periodic accumulation of MAC statistics.
2432  */
2433 static void mac_stats_update(struct adapter *adapter)
2434 {
2435         int i;
2436
2437         for_each_port(adapter, i) {
2438                 struct net_device *dev = adapter->port[i];
2439                 struct port_info *p = netdev_priv(dev);
2440
2441                 if (netif_running(dev)) {
2442                         spin_lock(&adapter->stats_lock);
2443                         t3_mac_update_stats(&p->mac);
2444                         spin_unlock(&adapter->stats_lock);
2445                 }
2446         }
2447 }
2448
2449 static void check_link_status(struct adapter *adapter)
2450 {
2451         int i;
2452
2453         for_each_port(adapter, i) {
2454                 struct net_device *dev = adapter->port[i];
2455                 struct port_info *p = netdev_priv(dev);
2456                 int link_fault;
2457
2458                 spin_lock_irq(&adapter->work_lock);
2459                 link_fault = p->link_fault;
2460                 spin_unlock_irq(&adapter->work_lock);
2461
2462                 if (link_fault) {
2463                         t3_link_fault(adapter, i);
2464                         continue;
2465                 }
2466
2467                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2468                         t3_xgm_intr_disable(adapter, i);
2469                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2470
2471                         t3_link_changed(adapter, i);
2472                         t3_xgm_intr_enable(adapter, i);
2473                 }
2474         }
2475 }
2476
2477 static void check_t3b2_mac(struct adapter *adapter)
2478 {
2479         int i;
2480
2481         if (!rtnl_trylock())    /* synchronize with ifdown */
2482                 return;
2483
2484         for_each_port(adapter, i) {
2485                 struct net_device *dev = adapter->port[i];
2486                 struct port_info *p = netdev_priv(dev);
2487                 int status;
2488
2489                 if (!netif_running(dev))
2490                         continue;
2491
2492                 status = 0;
2493                 if (netif_running(dev) && netif_carrier_ok(dev))
2494                         status = t3b2_mac_watchdog_task(&p->mac);
2495                 if (status == 1)
2496                         p->mac.stats.num_toggled++;
2497                 else if (status == 2) {
2498                         struct cmac *mac = &p->mac;
2499
2500                         t3_mac_set_mtu(mac, dev->mtu);
2501                         t3_mac_set_address(mac, 0, dev->dev_addr);
2502                         cxgb_set_rxmode(dev);
2503                         t3_link_start(&p->phy, mac, &p->link_config);
2504                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2505                         t3_port_intr_enable(adapter, p->port_id);
2506                         p->mac.stats.num_resets++;
2507                 }
2508         }
2509         rtnl_unlock();
2510 }
2511
2512
2513 static void t3_adap_check_task(struct work_struct *work)
2514 {
2515         struct adapter *adapter = container_of(work, struct adapter,
2516                                                adap_check_task.work);
2517         const struct adapter_params *p = &adapter->params;
2518         int port;
2519         unsigned int v, status, reset;
2520
2521         adapter->check_task_cnt++;
2522
2523         check_link_status(adapter);
2524
2525         /* Accumulate MAC stats if needed */
2526         if (!p->linkpoll_period ||
2527             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2528             p->stats_update_period) {
2529                 mac_stats_update(adapter);
2530                 adapter->check_task_cnt = 0;
2531         }
2532
2533         if (p->rev == T3_REV_B2)
2534                 check_t3b2_mac(adapter);
2535
2536         /*
2537          * Scan the XGMAC's to check for various conditions which we want to
2538          * monitor in a periodic polling manner rather than via an interrupt
2539          * condition.  This is used for conditions which would otherwise flood
2540          * the system with interrupts and we only really need to know that the
2541          * conditions are "happening" ...  For each condition we count the
2542          * detection of the condition and reset it for the next polling loop.
2543          */
2544         for_each_port(adapter, port) {
2545                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2546                 u32 cause;
2547
2548                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2549                 reset = 0;
2550                 if (cause & F_RXFIFO_OVERFLOW) {
2551                         mac->stats.rx_fifo_ovfl++;
2552                         reset |= F_RXFIFO_OVERFLOW;
2553                 }
2554
2555                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2556         }
2557
2558         /*
2559          * We do the same as above for FL_EMPTY interrupts.
2560          */
2561         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2562         reset = 0;
2563
2564         if (status & F_FLEMPTY) {
2565                 struct sge_qset *qs = &adapter->sge.qs[0];
2566                 int i = 0;
2567
2568                 reset |= F_FLEMPTY;
2569
2570                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2571                     0xffff;
2572
2573                 while (v) {
2574                         qs->fl[i].empty += (v & 1);
2575                         if (i)
2576                                 qs++;
2577                         i ^= 1;
2578                         v >>= 1;
2579                 }
2580         }
2581
2582         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2583
2584         /* Schedule the next check update if any port is active. */
2585         spin_lock_irq(&adapter->work_lock);
2586         if (adapter->open_device_map & PORT_MASK)
2587                 schedule_chk_task(adapter);
2588         spin_unlock_irq(&adapter->work_lock);
2589 }
2590
2591 /*
2592  * Processes external (PHY) interrupts in process context.
2593  */
2594 static void ext_intr_task(struct work_struct *work)
2595 {
2596         struct adapter *adapter = container_of(work, struct adapter,
2597                                                ext_intr_handler_task);
2598         int i;
2599
2600         /* Disable link fault interrupts */
2601         for_each_port(adapter, i) {
2602                 struct net_device *dev = adapter->port[i];
2603                 struct port_info *p = netdev_priv(dev);
2604
2605                 t3_xgm_intr_disable(adapter, i);
2606                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2607         }
2608
2609         /* Re-enable link fault interrupts */
2610         t3_phy_intr_handler(adapter);
2611
2612         for_each_port(adapter, i)
2613                 t3_xgm_intr_enable(adapter, i);
2614
2615         /* Now reenable external interrupts */
2616         spin_lock_irq(&adapter->work_lock);
2617         if (adapter->slow_intr_mask) {
2618                 adapter->slow_intr_mask |= F_T3DBG;
2619                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2620                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2621                              adapter->slow_intr_mask);
2622         }
2623         spin_unlock_irq(&adapter->work_lock);
2624 }
2625
2626 /*
2627  * Interrupt-context handler for external (PHY) interrupts.
2628  */
2629 void t3_os_ext_intr_handler(struct adapter *adapter)
2630 {
2631         /*
2632          * Schedule a task to handle external interrupts as they may be slow
2633          * and we use a mutex to protect MDIO registers.  We disable PHY
2634          * interrupts in the meantime and let the task reenable them when
2635          * it's done.
2636          */
2637         spin_lock(&adapter->work_lock);
2638         if (adapter->slow_intr_mask) {
2639                 adapter->slow_intr_mask &= ~F_T3DBG;
2640                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2641                              adapter->slow_intr_mask);
2642                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2643         }
2644         spin_unlock(&adapter->work_lock);
2645 }
2646
2647 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2648 {
2649         struct net_device *netdev = adapter->port[port_id];
2650         struct port_info *pi = netdev_priv(netdev);
2651
2652         spin_lock(&adapter->work_lock);
2653         pi->link_fault = 1;
2654         spin_unlock(&adapter->work_lock);
2655 }
2656
2657 static int t3_adapter_error(struct adapter *adapter, int reset)
2658 {
2659         int i, ret = 0;
2660
2661         if (is_offload(adapter) &&
2662             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2663                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2664                 offload_close(&adapter->tdev);
2665         }
2666
2667         /* Stop all ports */
2668         for_each_port(adapter, i) {
2669                 struct net_device *netdev = adapter->port[i];
2670
2671                 if (netif_running(netdev))
2672                         cxgb_close(netdev);
2673         }
2674
2675         /* Stop SGE timers */
2676         t3_stop_sge_timers(adapter);
2677
2678         adapter->flags &= ~FULL_INIT_DONE;
2679
2680         if (reset)
2681                 ret = t3_reset_adapter(adapter);
2682
2683         pci_disable_device(adapter->pdev);
2684
2685         return ret;
2686 }
2687
2688 static int t3_reenable_adapter(struct adapter *adapter)
2689 {
2690         if (pci_enable_device(adapter->pdev)) {
2691                 dev_err(&adapter->pdev->dev,
2692                         "Cannot re-enable PCI device after reset.\n");
2693                 goto err;
2694         }
2695         pci_set_master(adapter->pdev);
2696         pci_restore_state(adapter->pdev);
2697
2698         /* Free sge resources */
2699         t3_free_sge_resources(adapter);
2700
2701         if (t3_replay_prep_adapter(adapter))
2702                 goto err;
2703
2704         return 0;
2705 err:
2706         return -1;
2707 }
2708
2709 static void t3_resume_ports(struct adapter *adapter)
2710 {
2711         int i;
2712
2713         /* Restart the ports */
2714         for_each_port(adapter, i) {
2715                 struct net_device *netdev = adapter->port[i];
2716
2717                 if (netif_running(netdev)) {
2718                         if (cxgb_open(netdev)) {
2719                                 dev_err(&adapter->pdev->dev,
2720                                         "can't bring device back up"
2721                                         " after reset\n");
2722                                 continue;
2723                         }
2724                 }
2725         }
2726
2727         if (is_offload(adapter) && !ofld_disable)
2728                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2729 }
2730
2731 /*
2732  * processes a fatal error.
2733  * Bring the ports down, reset the chip, bring the ports back up.
2734  */
2735 static void fatal_error_task(struct work_struct *work)
2736 {
2737         struct adapter *adapter = container_of(work, struct adapter,
2738                                                fatal_error_handler_task);
2739         int err = 0;
2740
2741         rtnl_lock();
2742         err = t3_adapter_error(adapter, 1);
2743         if (!err)
2744                 err = t3_reenable_adapter(adapter);
2745         if (!err)
2746                 t3_resume_ports(adapter);
2747
2748         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2749         rtnl_unlock();
2750 }
2751
2752 void t3_fatal_err(struct adapter *adapter)
2753 {
2754         unsigned int fw_status[4];
2755
2756         if (adapter->flags & FULL_INIT_DONE) {
2757                 t3_sge_stop(adapter);
2758                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2759                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2760                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2761                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2762
2763                 spin_lock(&adapter->work_lock);
2764                 t3_intr_disable(adapter);
2765                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2766                 spin_unlock(&adapter->work_lock);
2767         }
2768         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2769         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2770                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2771                          fw_status[0], fw_status[1],
2772                          fw_status[2], fw_status[3]);
2773 }
2774
2775 /**
2776  * t3_io_error_detected - called when PCI error is detected
2777  * @pdev: Pointer to PCI device
2778  * @state: The current pci connection state
2779  *
2780  * This function is called after a PCI bus error affecting
2781  * this device has been detected.
2782  */
2783 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2784                                              pci_channel_state_t state)
2785 {
2786         struct adapter *adapter = pci_get_drvdata(pdev);
2787         int ret;
2788
2789         if (state == pci_channel_io_perm_failure)
2790                 return PCI_ERS_RESULT_DISCONNECT;
2791
2792         ret = t3_adapter_error(adapter, 0);
2793
2794         /* Request a slot reset. */
2795         return PCI_ERS_RESULT_NEED_RESET;
2796 }
2797
2798 /**
2799  * t3_io_slot_reset - called after the pci bus has been reset.
2800  * @pdev: Pointer to PCI device
2801  *
2802  * Restart the card from scratch, as if from a cold-boot.
2803  */
2804 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2805 {
2806         struct adapter *adapter = pci_get_drvdata(pdev);
2807
2808         if (!t3_reenable_adapter(adapter))
2809                 return PCI_ERS_RESULT_RECOVERED;
2810
2811         return PCI_ERS_RESULT_DISCONNECT;
2812 }
2813
2814 /**
2815  * t3_io_resume - called when traffic can start flowing again.
2816  * @pdev: Pointer to PCI device
2817  *
2818  * This callback is called when the error recovery driver tells us that
2819  * its OK to resume normal operation.
2820  */
2821 static void t3_io_resume(struct pci_dev *pdev)
2822 {
2823         struct adapter *adapter = pci_get_drvdata(pdev);
2824
2825         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2826                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
2827
2828         t3_resume_ports(adapter);
2829 }
2830
2831 static struct pci_error_handlers t3_err_handler = {
2832         .error_detected = t3_io_error_detected,
2833         .slot_reset = t3_io_slot_reset,
2834         .resume = t3_io_resume,
2835 };
2836
2837 /*
2838  * Set the number of qsets based on the number of CPUs and the number of ports,
2839  * not to exceed the number of available qsets, assuming there are enough qsets
2840  * per port in HW.
2841  */
2842 static void set_nqsets(struct adapter *adap)
2843 {
2844         int i, j = 0;
2845         int num_cpus = num_online_cpus();
2846         int hwports = adap->params.nports;
2847         int nqsets = adap->msix_nvectors - 1;
2848
2849         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2850                 if (hwports == 2 &&
2851                     (hwports * nqsets > SGE_QSETS ||
2852                      num_cpus >= nqsets / hwports))
2853                         nqsets /= hwports;
2854                 if (nqsets > num_cpus)
2855                         nqsets = num_cpus;
2856                 if (nqsets < 1 || hwports == 4)
2857                         nqsets = 1;
2858         } else
2859                 nqsets = 1;
2860
2861         for_each_port(adap, i) {
2862                 struct port_info *pi = adap2pinfo(adap, i);
2863
2864                 pi->first_qset = j;
2865                 pi->nqsets = nqsets;
2866                 j = pi->first_qset + nqsets;
2867
2868                 dev_info(&adap->pdev->dev,
2869                          "Port %d using %d queue sets.\n", i, nqsets);
2870         }
2871 }
2872
2873 static int __devinit cxgb_enable_msix(struct adapter *adap)
2874 {
2875         struct msix_entry entries[SGE_QSETS + 1];
2876         int vectors;
2877         int i, err;
2878
2879         vectors = ARRAY_SIZE(entries);
2880         for (i = 0; i < vectors; ++i)
2881                 entries[i].entry = i;
2882
2883         while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2884                 vectors = err;
2885
2886         if (err < 0)
2887                 pci_disable_msix(adap->pdev);
2888
2889         if (!err && vectors < (adap->params.nports + 1)) {
2890                 pci_disable_msix(adap->pdev);
2891                 err = -1;
2892         }
2893
2894         if (!err) {
2895                 for (i = 0; i < vectors; ++i)
2896                         adap->msix_info[i].vec = entries[i].vector;
2897                 adap->msix_nvectors = vectors;
2898         }
2899
2900         return err;
2901 }
2902
2903 static void __devinit print_port_info(struct adapter *adap,
2904                                       const struct adapter_info *ai)
2905 {
2906         static const char *pci_variant[] = {
2907                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2908         };
2909
2910         int i;
2911         char buf[80];
2912
2913         if (is_pcie(adap))
2914                 snprintf(buf, sizeof(buf), "%s x%d",
2915                          pci_variant[adap->params.pci.variant],
2916                          adap->params.pci.width);
2917         else
2918                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2919                          pci_variant[adap->params.pci.variant],
2920                          adap->params.pci.speed, adap->params.pci.width);
2921
2922         for_each_port(adap, i) {
2923                 struct net_device *dev = adap->port[i];
2924                 const struct port_info *pi = netdev_priv(dev);
2925
2926                 if (!test_bit(i, &adap->registered_device_map))
2927                         continue;
2928                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2929                        dev->name, ai->desc, pi->phy.desc,
2930                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2931                        (adap->flags & USING_MSIX) ? " MSI-X" :
2932                        (adap->flags & USING_MSI) ? " MSI" : "");
2933                 if (adap->name == dev->name && adap->params.vpd.mclk)
2934                         printk(KERN_INFO
2935                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2936                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2937                                t3_mc7_size(&adap->pmtx) >> 20,
2938                                t3_mc7_size(&adap->pmrx) >> 20,
2939                                adap->params.vpd.sn);
2940         }
2941 }
2942
2943 static const struct net_device_ops cxgb_netdev_ops = {
2944         .ndo_open               = cxgb_open,
2945         .ndo_stop               = cxgb_close,
2946         .ndo_start_xmit         = t3_eth_xmit,
2947         .ndo_get_stats          = cxgb_get_stats,
2948         .ndo_validate_addr      = eth_validate_addr,
2949         .ndo_set_multicast_list = cxgb_set_rxmode,
2950         .ndo_do_ioctl           = cxgb_ioctl,
2951         .ndo_change_mtu         = cxgb_change_mtu,
2952         .ndo_set_mac_address    = cxgb_set_mac_addr,
2953         .ndo_vlan_rx_register   = vlan_rx_register,
2954 #ifdef CONFIG_NET_POLL_CONTROLLER
2955         .ndo_poll_controller    = cxgb_netpoll,
2956 #endif
2957 };
2958
2959 static int __devinit init_one(struct pci_dev *pdev,
2960                               const struct pci_device_id *ent)
2961 {
2962         static int version_printed;
2963
2964         int i, err, pci_using_dac = 0;
2965         resource_size_t mmio_start, mmio_len;
2966         const struct adapter_info *ai;
2967         struct adapter *adapter = NULL;
2968         struct port_info *pi;
2969
2970         if (!version_printed) {
2971                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2972                 ++version_printed;
2973         }
2974
2975         if (!cxgb3_wq) {
2976                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2977                 if (!cxgb3_wq) {
2978                         printk(KERN_ERR DRV_NAME
2979                                ": cannot initialize work queue\n");
2980                         return -ENOMEM;
2981                 }
2982         }
2983
2984         err = pci_request_regions(pdev, DRV_NAME);
2985         if (err) {
2986                 /* Just info, some other driver may have claimed the device. */
2987                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2988                 return err;
2989         }
2990
2991         err = pci_enable_device(pdev);
2992         if (err) {
2993                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2994                 goto out_release_regions;
2995         }
2996
2997         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2998                 pci_using_dac = 1;
2999                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3000                 if (err) {
3001                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3002                                "coherent allocations\n");
3003                         goto out_disable_device;
3004                 }
3005         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3006                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3007                 goto out_disable_device;
3008         }
3009
3010         pci_set_master(pdev);
3011         pci_save_state(pdev);
3012
3013         mmio_start = pci_resource_start(pdev, 0);
3014         mmio_len = pci_resource_len(pdev, 0);
3015         ai = t3_get_adapter_info(ent->driver_data);
3016
3017         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3018         if (!adapter) {
3019                 err = -ENOMEM;
3020                 goto out_disable_device;
3021         }
3022
3023         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3024         if (!adapter->regs) {
3025                 dev_err(&pdev->dev, "cannot map device registers\n");
3026                 err = -ENOMEM;
3027                 goto out_free_adapter;
3028         }
3029
3030         adapter->pdev = pdev;
3031         adapter->name = pci_name(pdev);
3032         adapter->msg_enable = dflt_msg_enable;
3033         adapter->mmio_len = mmio_len;
3034
3035         mutex_init(&adapter->mdio_lock);
3036         spin_lock_init(&adapter->work_lock);
3037         spin_lock_init(&adapter->stats_lock);
3038
3039         INIT_LIST_HEAD(&adapter->adapter_list);
3040         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3041         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3042         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3043
3044         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3045                 struct net_device *netdev;
3046
3047                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3048                 if (!netdev) {
3049                         err = -ENOMEM;
3050                         goto out_free_dev;
3051                 }
3052
3053                 SET_NETDEV_DEV(netdev, &pdev->dev);
3054
3055                 adapter->port[i] = netdev;
3056                 pi = netdev_priv(netdev);
3057                 pi->adapter = adapter;
3058                 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3059                 pi->port_id = i;
3060                 netif_carrier_off(netdev);
3061                 netif_tx_stop_all_queues(netdev);
3062                 netdev->irq = pdev->irq;
3063                 netdev->mem_start = mmio_start;
3064                 netdev->mem_end = mmio_start + mmio_len - 1;
3065                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3066                 netdev->features |= NETIF_F_GRO;
3067                 if (pci_using_dac)
3068                         netdev->features |= NETIF_F_HIGHDMA;
3069
3070                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3071                 netdev->netdev_ops = &cxgb_netdev_ops;
3072                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3073         }
3074
3075         pci_set_drvdata(pdev, adapter);
3076         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3077                 err = -ENODEV;
3078                 goto out_free_dev;
3079         }
3080
3081         /*
3082          * The card is now ready to go.  If any errors occur during device
3083          * registration we do not fail the whole card but rather proceed only
3084          * with the ports we manage to register successfully.  However we must
3085          * register at least one net device.
3086          */
3087         for_each_port(adapter, i) {
3088                 err = register_netdev(adapter->port[i]);
3089                 if (err)
3090                         dev_warn(&pdev->dev,
3091                                  "cannot register net device %s, skipping\n",
3092                                  adapter->port[i]->name);
3093                 else {
3094                         /*
3095                          * Change the name we use for messages to the name of
3096                          * the first successfully registered interface.
3097                          */
3098                         if (!adapter->registered_device_map)
3099                                 adapter->name = adapter->port[i]->name;
3100
3101                         __set_bit(i, &adapter->registered_device_map);
3102                 }
3103         }
3104         if (!adapter->registered_device_map) {
3105                 dev_err(&pdev->dev, "could not register any net devices\n");
3106                 goto out_free_dev;
3107         }
3108
3109         /* Driver's ready. Reflect it on LEDs */
3110         t3_led_ready(adapter);
3111
3112         if (is_offload(adapter)) {
3113                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3114                 cxgb3_adapter_ofld(adapter);
3115         }
3116
3117         /* See what interrupts we'll be using */
3118         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3119                 adapter->flags |= USING_MSIX;
3120         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3121                 adapter->flags |= USING_MSI;
3122
3123         set_nqsets(adapter);
3124
3125         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3126                                  &cxgb3_attr_group);
3127
3128         print_port_info(adapter, ai);
3129         return 0;
3130
3131 out_free_dev:
3132         iounmap(adapter->regs);
3133         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3134                 if (adapter->port[i])
3135                         free_netdev(adapter->port[i]);
3136
3137 out_free_adapter:
3138         kfree(adapter);
3139
3140 out_disable_device:
3141         pci_disable_device(pdev);
3142 out_release_regions:
3143         pci_release_regions(pdev);
3144         pci_set_drvdata(pdev, NULL);
3145         return err;
3146 }
3147
3148 static void __devexit remove_one(struct pci_dev *pdev)
3149 {
3150         struct adapter *adapter = pci_get_drvdata(pdev);
3151
3152         if (adapter) {
3153                 int i;
3154
3155                 t3_sge_stop(adapter);
3156                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3157                                    &cxgb3_attr_group);
3158
3159                 if (is_offload(adapter)) {
3160                         cxgb3_adapter_unofld(adapter);
3161                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3162                                      &adapter->open_device_map))
3163                                 offload_close(&adapter->tdev);
3164                 }
3165
3166                 for_each_port(adapter, i)
3167                     if (test_bit(i, &adapter->registered_device_map))
3168                         unregister_netdev(adapter->port[i]);
3169
3170                 t3_stop_sge_timers(adapter);
3171                 t3_free_sge_resources(adapter);
3172                 cxgb_disable_msi(adapter);
3173
3174                 for_each_port(adapter, i)
3175                         if (adapter->port[i])
3176                                 free_netdev(adapter->port[i]);
3177
3178                 iounmap(adapter->regs);
3179                 kfree(adapter);
3180                 pci_release_regions(pdev);
3181                 pci_disable_device(pdev);
3182                 pci_set_drvdata(pdev, NULL);
3183         }
3184 }
3185
3186 static struct pci_driver driver = {
3187         .name = DRV_NAME,
3188         .id_table = cxgb3_pci_tbl,
3189         .probe = init_one,
3190         .remove = __devexit_p(remove_one),
3191         .err_handler = &t3_err_handler,
3192 };
3193
3194 static int __init cxgb3_init_module(void)
3195 {
3196         int ret;
3197
3198         cxgb3_offload_init();
3199
3200         ret = pci_register_driver(&driver);
3201         return ret;
3202 }
3203
3204 static void __exit cxgb3_cleanup_module(void)
3205 {
3206         pci_unregister_driver(&driver);
3207         if (cxgb3_wq)
3208                 destroy_workqueue(cxgb3_wq);
3209 }
3210
3211 module_init(cxgb3_init_module);
3212 module_exit(cxgb3_cleanup_module);