2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
 
   4  * This software is available to you under a choice of one of two
 
   5  * licenses.  You may choose to be licensed under the terms of the GNU
 
   6  * General Public License (GPL) Version 2, available from the file
 
   7  * COPYING in the main directory of this source tree, or the
 
   8  * OpenIB.org BSD license below:
 
  10  *     Redistribution and use in source and binary forms, with or
 
  11  *     without modification, are permitted provided that the following
 
  14  *      - Redistributions of source code must retain the above
 
  15  *        copyright notice, this list of conditions and the following
 
  18  *      - Redistributions in binary form must reproduce the above
 
  19  *        copyright notice, this list of conditions and the following
 
  20  *        disclaimer in the documentation and/or other materials
 
  21  *        provided with the distribution.
 
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 
  32 #include <linux/module.h>
 
  33 #include <linux/moduleparam.h>
 
  34 #include <linux/init.h>
 
  35 #include <linux/pci.h>
 
  36 #include <linux/dma-mapping.h>
 
  37 #include <linux/netdevice.h>
 
  38 #include <linux/etherdevice.h>
 
  39 #include <linux/if_vlan.h>
 
  40 #include <linux/mii.h>
 
  41 #include <linux/sockios.h>
 
  42 #include <linux/workqueue.h>
 
  43 #include <linux/proc_fs.h>
 
  44 #include <linux/rtnetlink.h>
 
  45 #include <linux/firmware.h>
 
  46 #include <linux/log2.h>
 
  47 #include <asm/uaccess.h>
 
  50 #include "cxgb3_ioctl.h"
 
  52 #include "cxgb3_offload.h"
 
  55 #include "cxgb3_ctl_defs.h"
 
  57 #include "firmware_exports.h"
 
  60         MAX_TXQ_ENTRIES = 16384,
 
  61         MAX_CTRL_TXQ_ENTRIES = 1024,
 
  62         MAX_RSPQ_ENTRIES = 16384,
 
  63         MAX_RX_BUFFERS = 16384,
 
  64         MAX_RX_JUMBO_BUFFERS = 16384,
 
  66         MIN_CTRL_TXQ_ENTRIES = 4,
 
  67         MIN_RSPQ_ENTRIES = 32,
 
  71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
 
  73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
 
  74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
 
  75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
 
  77 #define EEPROM_MAGIC 0x38E2F10C
 
  79 #define CH_DEVICE(devid, idx) \
 
  80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
 
  82 static const struct pci_device_id cxgb3_pci_tbl[] = {
 
  83         CH_DEVICE(0x20, 0),     /* PE9000 */
 
  84         CH_DEVICE(0x21, 1),     /* T302E */
 
  85         CH_DEVICE(0x22, 2),     /* T310E */
 
  86         CH_DEVICE(0x23, 3),     /* T320X */
 
  87         CH_DEVICE(0x24, 1),     /* T302X */
 
  88         CH_DEVICE(0x25, 3),     /* T320E */
 
  89         CH_DEVICE(0x26, 2),     /* T310X */
 
  90         CH_DEVICE(0x30, 2),     /* T3B10 */
 
  91         CH_DEVICE(0x31, 3),     /* T3B20 */
 
  92         CH_DEVICE(0x32, 1),     /* T3B02 */
 
  96 MODULE_DESCRIPTION(DRV_DESC);
 
  97 MODULE_AUTHOR("Chelsio Communications");
 
  98 MODULE_LICENSE("Dual BSD/GPL");
 
  99 MODULE_VERSION(DRV_VERSION);
 
 100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
 
 102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
 
 104 module_param(dflt_msg_enable, int, 0644);
 
 105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
 
 108  * The driver uses the best interrupt scheme available on a platform in the
 
 109  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
 
 110  * of these schemes the driver may consider as follows:
 
 112  * msi = 2: choose from among all three options
 
 113  * msi = 1: only consider MSI and pin interrupts
 
 114  * msi = 0: force pin interrupts
 
 118 module_param(msi, int, 0644);
 
 119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
 
 122  * The driver enables offload as a default.
 
 123  * To disable it, use ofld_disable = 1.
 
 126 static int ofld_disable = 0;
 
 128 module_param(ofld_disable, int, 0644);
 
 129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
 
 132  * We have work elements that we need to cancel when an interface is taken
 
 133  * down.  Normally the work elements would be executed by keventd but that
 
 134  * can deadlock because of linkwatch.  If our close method takes the rtnl
 
 135  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
 
 136  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
 
 137  * for our work to complete.  Get our own work queue to solve this.
 
 139 static struct workqueue_struct *cxgb3_wq;
 
 142  *      link_report - show link status and link speed/duplex
 
 143  *      @p: the port whose settings are to be reported
 
 145  *      Shows the link status, speed, and duplex of a port.
 
 147 static void link_report(struct net_device *dev)
 
 149         if (!netif_carrier_ok(dev))
 
 150                 printk(KERN_INFO "%s: link down\n", dev->name);
 
 152                 const char *s = "10Mbps";
 
 153                 const struct port_info *p = netdev_priv(dev);
 
 155                 switch (p->link_config.speed) {
 
 167                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
 
 168                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
 
 173  *      t3_os_link_changed - handle link status changes
 
 174  *      @adapter: the adapter associated with the link change
 
 175  *      @port_id: the port index whose limk status has changed
 
 176  *      @link_stat: the new status of the link
 
 177  *      @speed: the new speed setting
 
 178  *      @duplex: the new duplex setting
 
 179  *      @pause: the new flow-control setting
 
 181  *      This is the OS-dependent handler for link status changes.  The OS
 
 182  *      neutral handler takes care of most of the processing for these events,
 
 183  *      then calls this handler for any OS-specific processing.
 
 185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
 
 186                         int speed, int duplex, int pause)
 
 188         struct net_device *dev = adapter->port[port_id];
 
 189         struct port_info *pi = netdev_priv(dev);
 
 190         struct cmac *mac = &pi->mac;
 
 192         /* Skip changes from disabled ports. */
 
 193         if (!netif_running(dev))
 
 196         if (link_stat != netif_carrier_ok(dev)) {
 
 198                         t3_mac_enable(mac, MAC_DIRECTION_RX);
 
 199                         netif_carrier_on(dev);
 
 201                         netif_carrier_off(dev);
 
 202                         pi->phy.ops->power_down(&pi->phy, 1);
 
 203                         t3_mac_disable(mac, MAC_DIRECTION_RX);
 
 204                         t3_link_start(&pi->phy, mac, &pi->link_config);
 
 212  *      t3_os_phymod_changed - handle PHY module changes
 
 213  *      @phy: the PHY reporting the module change
 
 214  *      @mod_type: new module type
 
 216  *      This is the OS-dependent handler for PHY module changes.  It is
 
 217  *      invoked when a PHY module is removed or inserted for any OS-specific
 
 220 void t3_os_phymod_changed(struct adapter *adap, int port_id)
 
 222         static const char *mod_str[] = {
 
 223                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
 
 226         const struct net_device *dev = adap->port[port_id];
 
 227         const struct port_info *pi = netdev_priv(dev);
 
 229         if (pi->phy.modtype == phy_modtype_none)
 
 230                 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
 
 232                 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
 
 233                        mod_str[pi->phy.modtype]);
 
 236 static void cxgb_set_rxmode(struct net_device *dev)
 
 238         struct t3_rx_mode rm;
 
 239         struct port_info *pi = netdev_priv(dev);
 
 241         init_rx_mode(&rm, dev, dev->mc_list);
 
 242         t3_mac_set_rx_mode(&pi->mac, &rm);
 
 246  *      link_start - enable a port
 
 247  *      @dev: the device to enable
 
 249  *      Performs the MAC and PHY actions needed to enable a port.
 
 251 static void link_start(struct net_device *dev)
 
 253         struct t3_rx_mode rm;
 
 254         struct port_info *pi = netdev_priv(dev);
 
 255         struct cmac *mac = &pi->mac;
 
 257         init_rx_mode(&rm, dev, dev->mc_list);
 
 259         t3_mac_set_mtu(mac, dev->mtu);
 
 260         t3_mac_set_address(mac, 0, dev->dev_addr);
 
 261         t3_mac_set_rx_mode(mac, &rm);
 
 262         t3_link_start(&pi->phy, mac, &pi->link_config);
 
 263         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 
 266 static inline void cxgb_disable_msi(struct adapter *adapter)
 
 268         if (adapter->flags & USING_MSIX) {
 
 269                 pci_disable_msix(adapter->pdev);
 
 270                 adapter->flags &= ~USING_MSIX;
 
 271         } else if (adapter->flags & USING_MSI) {
 
 272                 pci_disable_msi(adapter->pdev);
 
 273                 adapter->flags &= ~USING_MSI;
 
 278  * Interrupt handler for asynchronous events used with MSI-X.
 
 280 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
 
 282         t3_slow_intr_handler(cookie);
 
 287  * Name the MSI-X interrupts.
 
 289 static void name_msix_vecs(struct adapter *adap)
 
 291         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
 
 293         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
 
 294         adap->msix_info[0].desc[n] = 0;
 
 296         for_each_port(adap, j) {
 
 297                 struct net_device *d = adap->port[j];
 
 298                 const struct port_info *pi = netdev_priv(d);
 
 300                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
 
 301                         snprintf(adap->msix_info[msi_idx].desc, n,
 
 302                                  "%s-%d", d->name, pi->first_qset + i);
 
 303                         adap->msix_info[msi_idx].desc[n] = 0;
 
 308 static int request_msix_data_irqs(struct adapter *adap)
 
 310         int i, j, err, qidx = 0;
 
 312         for_each_port(adap, i) {
 
 313                 int nqsets = adap2pinfo(adap, i)->nqsets;
 
 315                 for (j = 0; j < nqsets; ++j) {
 
 316                         err = request_irq(adap->msix_info[qidx + 1].vec,
 
 317                                           t3_intr_handler(adap,
 
 320                                           adap->msix_info[qidx + 1].desc,
 
 321                                           &adap->sge.qs[qidx]);
 
 324                                         free_irq(adap->msix_info[qidx + 1].vec,
 
 325                                                  &adap->sge.qs[qidx]);
 
 334 static void free_irq_resources(struct adapter *adapter)
 
 336         if (adapter->flags & USING_MSIX) {
 
 339                 free_irq(adapter->msix_info[0].vec, adapter);
 
 340                 for_each_port(adapter, i)
 
 341                     n += adap2pinfo(adapter, i)->nqsets;
 
 343                 for (i = 0; i < n; ++i)
 
 344                         free_irq(adapter->msix_info[i + 1].vec,
 
 345                                  &adapter->sge.qs[i]);
 
 347                 free_irq(adapter->pdev->irq, adapter);
 
 350 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
 
 355         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
 
 363 static int init_tp_parity(struct adapter *adap)
 
 367         struct cpl_set_tcb_field *greq;
 
 368         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
 
 370         t3_tp_set_offload_mode(adap, 1);
 
 372         for (i = 0; i < 16; i++) {
 
 373                 struct cpl_smt_write_req *req;
 
 375                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
 
 376                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
 
 377                 memset(req, 0, sizeof(*req));
 
 378                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 
 379                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
 
 381                 t3_mgmt_tx(adap, skb);
 
 384         for (i = 0; i < 2048; i++) {
 
 385                 struct cpl_l2t_write_req *req;
 
 387                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
 
 388                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
 
 389                 memset(req, 0, sizeof(*req));
 
 390                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 
 391                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
 
 392                 req->params = htonl(V_L2T_W_IDX(i));
 
 393                 t3_mgmt_tx(adap, skb);
 
 396         for (i = 0; i < 2048; i++) {
 
 397                 struct cpl_rte_write_req *req;
 
 399                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
 
 400                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
 
 401                 memset(req, 0, sizeof(*req));
 
 402                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 
 403                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
 
 404                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
 
 405                 t3_mgmt_tx(adap, skb);
 
 408         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
 
 409         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
 
 410         memset(greq, 0, sizeof(*greq));
 
 411         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 
 412         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
 
 413         greq->mask = cpu_to_be64(1);
 
 414         t3_mgmt_tx(adap, skb);
 
 416         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
 
 417         t3_tp_set_offload_mode(adap, 0);
 
 422  *      setup_rss - configure RSS
 
 425  *      Sets up RSS to distribute packets to multiple receive queues.  We
 
 426  *      configure the RSS CPU lookup table to distribute to the number of HW
 
 427  *      receive queues, and the response queue lookup table to narrow that
 
 428  *      down to the response queues actually configured for each port.
 
 429  *      We always configure the RSS mapping for two ports since the mapping
 
 430  *      table has plenty of entries.
 
 432 static void setup_rss(struct adapter *adap)
 
 435         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
 
 436         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
 
 437         u8 cpus[SGE_QSETS + 1];
 
 438         u16 rspq_map[RSS_TABLE_SIZE];
 
 440         for (i = 0; i < SGE_QSETS; ++i)
 
 442         cpus[SGE_QSETS] = 0xff; /* terminator */
 
 444         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 
 445                 rspq_map[i] = i % nq0;
 
 446                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
 
 449         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 
 450                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
 
 451                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
 
 454 static void init_napi(struct adapter *adap)
 
 458         for (i = 0; i < SGE_QSETS; i++) {
 
 459                 struct sge_qset *qs = &adap->sge.qs[i];
 
 462                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
 
 467          * netif_napi_add() can be called only once per napi_struct because it
 
 468          * adds each new napi_struct to a list.  Be careful not to call it a
 
 469          * second time, e.g., during EEH recovery, by making a note of it.
 
 471         adap->flags |= NAPI_INIT;
 
 475  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
 
 476  * both netdevices representing interfaces and the dummy ones for the extra
 
 479 static void quiesce_rx(struct adapter *adap)
 
 483         for (i = 0; i < SGE_QSETS; i++)
 
 484                 if (adap->sge.qs[i].adap)
 
 485                         napi_disable(&adap->sge.qs[i].napi);
 
 488 static void enable_all_napi(struct adapter *adap)
 
 491         for (i = 0; i < SGE_QSETS; i++)
 
 492                 if (adap->sge.qs[i].adap)
 
 493                         napi_enable(&adap->sge.qs[i].napi);
 
 497  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
 
 500  *      Determines how many sets of SGE queues to use and initializes them.
 
 501  *      We support multiple queue sets per port if we have MSI-X, otherwise
 
 502  *      just one queue set per port.
 
 504 static int setup_sge_qsets(struct adapter *adap)
 
 506         int i, j, err, irq_idx = 0, qset_idx = 0;
 
 507         unsigned int ntxq = SGE_TXQ_PER_SET;
 
 509         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
 
 512         for_each_port(adap, i) {
 
 513                 struct net_device *dev = adap->port[i];
 
 514                 struct port_info *pi = netdev_priv(dev);
 
 516                 pi->qs = &adap->sge.qs[pi->first_qset];
 
 517                 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
 
 519                         if (!pi->rx_csum_offload)
 
 520                                 adap->params.sge.qset[qset_idx].lro = 0;
 
 521                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
 
 522                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
 
 524                                 &adap->params.sge.qset[qset_idx], ntxq, dev);
 
 526                                 t3_stop_sge_timers(adap);
 
 527                                 t3_free_sge_resources(adap);
 
 536 static ssize_t attr_show(struct device *d, char *buf,
 
 537                          ssize_t(*format) (struct net_device *, char *))
 
 541         /* Synchronize with ioctls that may shut down the device */
 
 543         len = (*format) (to_net_dev(d), buf);
 
 548 static ssize_t attr_store(struct device *d,
 
 549                           const char *buf, size_t len,
 
 550                           ssize_t(*set) (struct net_device *, unsigned int),
 
 551                           unsigned int min_val, unsigned int max_val)
 
 557         if (!capable(CAP_NET_ADMIN))
 
 560         val = simple_strtoul(buf, &endp, 0);
 
 561         if (endp == buf || val < min_val || val > max_val)
 
 565         ret = (*set) (to_net_dev(d), val);
 
 572 #define CXGB3_SHOW(name, val_expr) \
 
 573 static ssize_t format_##name(struct net_device *dev, char *buf) \
 
 575         struct port_info *pi = netdev_priv(dev); \
 
 576         struct adapter *adap = pi->adapter; \
 
 577         return sprintf(buf, "%u\n", val_expr); \
 
 579 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 
 582         return attr_show(d, buf, format_##name); \
 
 585 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
 
 587         struct port_info *pi = netdev_priv(dev);
 
 588         struct adapter *adap = pi->adapter;
 
 589         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
 
 591         if (adap->flags & FULL_INIT_DONE)
 
 593         if (val && adap->params.rev == 0)
 
 595         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
 
 598         adap->params.mc5.nfilters = val;
 
 602 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
 
 603                               const char *buf, size_t len)
 
 605         return attr_store(d, buf, len, set_nfilters, 0, ~0);
 
 608 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
 
 610         struct port_info *pi = netdev_priv(dev);
 
 611         struct adapter *adap = pi->adapter;
 
 613         if (adap->flags & FULL_INIT_DONE)
 
 615         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
 
 618         adap->params.mc5.nservers = val;
 
 622 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
 
 623                               const char *buf, size_t len)
 
 625         return attr_store(d, buf, len, set_nservers, 0, ~0);
 
 628 #define CXGB3_ATTR_R(name, val_expr) \
 
 629 CXGB3_SHOW(name, val_expr) \
 
 630 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 
 632 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
 
 633 CXGB3_SHOW(name, val_expr) \
 
 634 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
 
 636 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
 
 637 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
 
 638 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
 
 640 static struct attribute *cxgb3_attrs[] = {
 
 641         &dev_attr_cam_size.attr,
 
 642         &dev_attr_nfilters.attr,
 
 643         &dev_attr_nservers.attr,
 
 647 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
 
 649 static ssize_t tm_attr_show(struct device *d,
 
 650                             char *buf, int sched)
 
 652         struct port_info *pi = netdev_priv(to_net_dev(d));
 
 653         struct adapter *adap = pi->adapter;
 
 654         unsigned int v, addr, bpt, cpt;
 
 657         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
 
 659         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 
 660         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 
 663         bpt = (v >> 8) & 0xff;
 
 666                 len = sprintf(buf, "disabled\n");
 
 668                 v = (adap->params.vpd.cclk * 1000) / cpt;
 
 669                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
 
 675 static ssize_t tm_attr_store(struct device *d,
 
 676                              const char *buf, size_t len, int sched)
 
 678         struct port_info *pi = netdev_priv(to_net_dev(d));
 
 679         struct adapter *adap = pi->adapter;
 
 684         if (!capable(CAP_NET_ADMIN))
 
 687         val = simple_strtoul(buf, &endp, 0);
 
 688         if (endp == buf || val > 10000000)
 
 692         ret = t3_config_sched(adap, val, sched);
 
 699 #define TM_ATTR(name, sched) \
 
 700 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 
 703         return tm_attr_show(d, buf, sched); \
 
 705 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
 
 706                             const char *buf, size_t len) \
 
 708         return tm_attr_store(d, buf, len, sched); \
 
 710 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
 
 721 static struct attribute *offload_attrs[] = {
 
 722         &dev_attr_sched0.attr,
 
 723         &dev_attr_sched1.attr,
 
 724         &dev_attr_sched2.attr,
 
 725         &dev_attr_sched3.attr,
 
 726         &dev_attr_sched4.attr,
 
 727         &dev_attr_sched5.attr,
 
 728         &dev_attr_sched6.attr,
 
 729         &dev_attr_sched7.attr,
 
 733 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
 
 736  * Sends an sk_buff to an offload queue driver
 
 737  * after dealing with any active network taps.
 
 739 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
 
 744         ret = t3_offload_tx(tdev, skb);
 
 749 static int write_smt_entry(struct adapter *adapter, int idx)
 
 751         struct cpl_smt_write_req *req;
 
 752         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 
 757         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
 
 758         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 
 759         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 
 760         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
 
 762         memset(req->src_mac1, 0, sizeof(req->src_mac1));
 
 763         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
 
 765         offload_tx(&adapter->tdev, skb);
 
 769 static int init_smt(struct adapter *adapter)
 
 773         for_each_port(adapter, i)
 
 774             write_smt_entry(adapter, i);
 
 778 static void init_port_mtus(struct adapter *adapter)
 
 780         unsigned int mtus = adapter->port[0]->mtu;
 
 782         if (adapter->port[1])
 
 783                 mtus |= adapter->port[1]->mtu << 16;
 
 784         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 
 787 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 
 791         struct mngt_pktsched_wr *req;
 
 794         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
 
 795         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
 
 796         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 
 797         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 
 803         ret = t3_mgmt_tx(adap, skb);
 
 808 static int bind_qsets(struct adapter *adap)
 
 812         for_each_port(adap, i) {
 
 813                 const struct port_info *pi = adap2pinfo(adap, i);
 
 815                 for (j = 0; j < pi->nqsets; ++j) {
 
 816                         int ret = send_pktsched_cmd(adap, 1,
 
 817                                                     pi->first_qset + j, -1,
 
 827 #define FW_FNAME "t3fw-%d.%d.%d.bin"
 
 828 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
 
 830 static int upgrade_fw(struct adapter *adap)
 
 834         const struct firmware *fw;
 
 835         struct device *dev = &adap->pdev->dev;
 
 837         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
 
 838                  FW_VERSION_MINOR, FW_VERSION_MICRO);
 
 839         ret = request_firmware(&fw, buf, dev);
 
 841                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
 
 845         ret = t3_load_fw(adap, fw->data, fw->size);
 
 846         release_firmware(fw);
 
 849                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
 
 850                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
 
 852                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
 
 853                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
 
 858 static inline char t3rev2char(struct adapter *adapter)
 
 862         switch(adapter->params.rev) {
 
 874 static int update_tpsram(struct adapter *adap)
 
 876         const struct firmware *tpsram;
 
 878         struct device *dev = &adap->pdev->dev;
 
 882         rev = t3rev2char(adap);
 
 886         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
 
 887                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
 
 889         ret = request_firmware(&tpsram, buf, dev);
 
 891                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
 
 896         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
 
 900         ret = t3_set_proto_sram(adap, tpsram->data);
 
 903                          "successful update of protocol engine "
 
 905                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
 
 907                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
 
 908                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
 
 910                 dev_err(dev, "loading protocol SRAM failed\n");
 
 913         release_firmware(tpsram);
 
 919  *      cxgb_up - enable the adapter
 
 920  *      @adapter: adapter being enabled
 
 922  *      Called when the first port is enabled, this function performs the
 
 923  *      actions necessary to make an adapter operational, such as completing
 
 924  *      the initialization of HW modules, and enabling interrupts.
 
 926  *      Must be called with the rtnl lock held.
 
 928 static int cxgb_up(struct adapter *adap)
 
 933         if (!(adap->flags & FULL_INIT_DONE)) {
 
 934                 err = t3_check_fw_version(adap, &must_load);
 
 935                 if (err == -EINVAL) {
 
 936                         err = upgrade_fw(adap);
 
 937                         if (err && must_load)
 
 941                 err = t3_check_tpsram_version(adap, &must_load);
 
 942                 if (err == -EINVAL) {
 
 943                         err = update_tpsram(adap);
 
 944                         if (err && must_load)
 
 949                  * Clear interrupts now to catch errors if t3_init_hw fails.
 
 950                  * We clear them again later as initialization may trigger
 
 951                  * conditions that can interrupt.
 
 955                 err = t3_init_hw(adap, 0);
 
 959                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
 
 960                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
 
 962                 err = setup_sge_qsets(adap);
 
 967                 if (!(adap->flags & NAPI_INIT))
 
 969                 adap->flags |= FULL_INIT_DONE;
 
 974         if (adap->flags & USING_MSIX) {
 
 975                 name_msix_vecs(adap);
 
 976                 err = request_irq(adap->msix_info[0].vec,
 
 977                                   t3_async_intr_handler, 0,
 
 978                                   adap->msix_info[0].desc, adap);
 
 982                 err = request_msix_data_irqs(adap);
 
 984                         free_irq(adap->msix_info[0].vec, adap);
 
 987         } else if ((err = request_irq(adap->pdev->irq,
 
 988                                       t3_intr_handler(adap,
 
 989                                                       adap->sge.qs[0].rspq.
 
 991                                       (adap->flags & USING_MSI) ?
 
 996         enable_all_napi(adap);
 
 998         t3_intr_enable(adap);
 
1000         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
 
1001             is_offload(adap) && init_tp_parity(adap) == 0)
 
1002                 adap->flags |= TP_PARITY_INIT;
 
1004         if (adap->flags & TP_PARITY_INIT) {
 
1005                 t3_write_reg(adap, A_TP_INT_CAUSE,
 
1006                              F_CMCACHEPERR | F_ARPLUTPERR);
 
1007                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
 
1010         if (!(adap->flags & QUEUES_BOUND)) {
 
1011                 err = bind_qsets(adap);
 
1013                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
 
1014                         t3_intr_disable(adap);
 
1015                         free_irq_resources(adap);
 
1018                 adap->flags |= QUEUES_BOUND;
 
1024         CH_ERR(adap, "request_irq failed, err %d\n", err);
 
1029  * Release resources when all the ports and offloading have been stopped.
 
1031 static void cxgb_down(struct adapter *adapter)
 
1033         t3_sge_stop(adapter);
 
1034         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
 
1035         t3_intr_disable(adapter);
 
1036         spin_unlock_irq(&adapter->work_lock);
 
1038         free_irq_resources(adapter);
 
1039         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
 
1040         quiesce_rx(adapter);
 
1043 static void schedule_chk_task(struct adapter *adap)
 
1047         timeo = adap->params.linkpoll_period ?
 
1048             (HZ * adap->params.linkpoll_period) / 10 :
 
1049             adap->params.stats_update_period * HZ;
 
1051                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
 
1054 static int offload_open(struct net_device *dev)
 
1056         struct port_info *pi = netdev_priv(dev);
 
1057         struct adapter *adapter = pi->adapter;
 
1058         struct t3cdev *tdev = dev2t3cdev(dev);
 
1059         int adap_up = adapter->open_device_map & PORT_MASK;
 
1062         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
 
1065         if (!adap_up && (err = cxgb_up(adapter)) < 0)
 
1068         t3_tp_set_offload_mode(adapter, 1);
 
1069         tdev->lldev = adapter->port[0];
 
1070         err = cxgb3_offload_activate(adapter);
 
1074         init_port_mtus(adapter);
 
1075         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
 
1076                      adapter->params.b_wnd,
 
1077                      adapter->params.rev == 0 ?
 
1078                      adapter->port[0]->mtu : 0xffff);
 
1081         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
 
1082                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
 
1084         /* Call back all registered clients */
 
1085         cxgb3_add_clients(tdev);
 
1088         /* restore them in case the offload module has changed them */
 
1090                 t3_tp_set_offload_mode(adapter, 0);
 
1091                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
 
1092                 cxgb3_set_dummy_ops(tdev);
 
1097 static int offload_close(struct t3cdev *tdev)
 
1099         struct adapter *adapter = tdev2adap(tdev);
 
1101         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
 
1104         /* Call back all registered clients */
 
1105         cxgb3_remove_clients(tdev);
 
1107         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
 
1110         cxgb3_set_dummy_ops(tdev);
 
1111         t3_tp_set_offload_mode(adapter, 0);
 
1112         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
 
1114         if (!adapter->open_device_map)
 
1117         cxgb3_offload_deactivate(adapter);
 
1121 static int cxgb_open(struct net_device *dev)
 
1123         struct port_info *pi = netdev_priv(dev);
 
1124         struct adapter *adapter = pi->adapter;
 
1125         int other_ports = adapter->open_device_map & PORT_MASK;
 
1128         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
 
1131         set_bit(pi->port_id, &adapter->open_device_map);
 
1132         if (is_offload(adapter) && !ofld_disable) {
 
1133                 err = offload_open(dev);
 
1136                                "Could not initialize offload capabilities\n");
 
1140         t3_port_intr_enable(adapter, pi->port_id);
 
1141         netif_start_queue(dev);
 
1143                 schedule_chk_task(adapter);
 
1148 static int cxgb_close(struct net_device *dev)
 
1150         struct port_info *pi = netdev_priv(dev);
 
1151         struct adapter *adapter = pi->adapter;
 
1153         t3_port_intr_disable(adapter, pi->port_id);
 
1154         netif_stop_queue(dev);
 
1155         pi->phy.ops->power_down(&pi->phy, 1);
 
1156         netif_carrier_off(dev);
 
1157         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
 
1159         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
 
1160         clear_bit(pi->port_id, &adapter->open_device_map);
 
1161         spin_unlock_irq(&adapter->work_lock);
 
1163         if (!(adapter->open_device_map & PORT_MASK))
 
1164                 cancel_rearming_delayed_workqueue(cxgb3_wq,
 
1165                                                   &adapter->adap_check_task);
 
1167         if (!adapter->open_device_map)
 
1173 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
 
1175         struct port_info *pi = netdev_priv(dev);
 
1176         struct adapter *adapter = pi->adapter;
 
1177         struct net_device_stats *ns = &pi->netstats;
 
1178         const struct mac_stats *pstats;
 
1180         spin_lock(&adapter->stats_lock);
 
1181         pstats = t3_mac_update_stats(&pi->mac);
 
1182         spin_unlock(&adapter->stats_lock);
 
1184         ns->tx_bytes = pstats->tx_octets;
 
1185         ns->tx_packets = pstats->tx_frames;
 
1186         ns->rx_bytes = pstats->rx_octets;
 
1187         ns->rx_packets = pstats->rx_frames;
 
1188         ns->multicast = pstats->rx_mcast_frames;
 
1190         ns->tx_errors = pstats->tx_underrun;
 
1191         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
 
1192             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
 
1193             pstats->rx_fifo_ovfl;
 
1195         /* detailed rx_errors */
 
1196         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
 
1197         ns->rx_over_errors = 0;
 
1198         ns->rx_crc_errors = pstats->rx_fcs_errs;
 
1199         ns->rx_frame_errors = pstats->rx_symbol_errs;
 
1200         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
 
1201         ns->rx_missed_errors = pstats->rx_cong_drops;
 
1203         /* detailed tx_errors */
 
1204         ns->tx_aborted_errors = 0;
 
1205         ns->tx_carrier_errors = 0;
 
1206         ns->tx_fifo_errors = pstats->tx_underrun;
 
1207         ns->tx_heartbeat_errors = 0;
 
1208         ns->tx_window_errors = 0;
 
1212 static u32 get_msglevel(struct net_device *dev)
 
1214         struct port_info *pi = netdev_priv(dev);
 
1215         struct adapter *adapter = pi->adapter;
 
1217         return adapter->msg_enable;
 
1220 static void set_msglevel(struct net_device *dev, u32 val)
 
1222         struct port_info *pi = netdev_priv(dev);
 
1223         struct adapter *adapter = pi->adapter;
 
1225         adapter->msg_enable = val;
 
1228 static char stats_strings[][ETH_GSTRING_LEN] = {
 
1231         "TxMulticastFramesOK",
 
1232         "TxBroadcastFramesOK",
 
1239         "TxFrames128To255   ",
 
1240         "TxFrames256To511   ",
 
1241         "TxFrames512To1023  ",
 
1242         "TxFrames1024To1518 ",
 
1243         "TxFrames1519ToMax  ",
 
1247         "RxMulticastFramesOK",
 
1248         "RxBroadcastFramesOK",
 
1259         "RxFrames128To255   ",
 
1260         "RxFrames256To511   ",
 
1261         "RxFrames512To1023  ",
 
1262         "RxFrames1024To1518 ",
 
1263         "RxFrames1519ToMax  ",
 
1276         "CheckTXEnToggled   ",
 
1281 static int get_sset_count(struct net_device *dev, int sset)
 
1285                 return ARRAY_SIZE(stats_strings);
 
1291 #define T3_REGMAP_SIZE (3 * 1024)
 
1293 static int get_regs_len(struct net_device *dev)
 
1295         return T3_REGMAP_SIZE;
 
1298 static int get_eeprom_len(struct net_device *dev)
 
1303 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 
1305         struct port_info *pi = netdev_priv(dev);
 
1306         struct adapter *adapter = pi->adapter;
 
1310         spin_lock(&adapter->stats_lock);
 
1311         t3_get_fw_version(adapter, &fw_vers);
 
1312         t3_get_tp_version(adapter, &tp_vers);
 
1313         spin_unlock(&adapter->stats_lock);
 
1315         strcpy(info->driver, DRV_NAME);
 
1316         strcpy(info->version, DRV_VERSION);
 
1317         strcpy(info->bus_info, pci_name(adapter->pdev));
 
1319                 strcpy(info->fw_version, "N/A");
 
1321                 snprintf(info->fw_version, sizeof(info->fw_version),
 
1322                          "%s %u.%u.%u TP %u.%u.%u",
 
1323                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
 
1324                          G_FW_VERSION_MAJOR(fw_vers),
 
1325                          G_FW_VERSION_MINOR(fw_vers),
 
1326                          G_FW_VERSION_MICRO(fw_vers),
 
1327                          G_TP_VERSION_MAJOR(tp_vers),
 
1328                          G_TP_VERSION_MINOR(tp_vers),
 
1329                          G_TP_VERSION_MICRO(tp_vers));
 
1333 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
 
1335         if (stringset == ETH_SS_STATS)
 
1336                 memcpy(data, stats_strings, sizeof(stats_strings));
 
1339 static unsigned long collect_sge_port_stats(struct adapter *adapter,
 
1340                                             struct port_info *p, int idx)
 
1343         unsigned long tot = 0;
 
1345         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
 
1346                 tot += adapter->sge.qs[i].port_stats[idx];
 
1350 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
 
1353         struct port_info *pi = netdev_priv(dev);
 
1354         struct adapter *adapter = pi->adapter;
 
1355         const struct mac_stats *s;
 
1357         spin_lock(&adapter->stats_lock);
 
1358         s = t3_mac_update_stats(&pi->mac);
 
1359         spin_unlock(&adapter->stats_lock);
 
1361         *data++ = s->tx_octets;
 
1362         *data++ = s->tx_frames;
 
1363         *data++ = s->tx_mcast_frames;
 
1364         *data++ = s->tx_bcast_frames;
 
1365         *data++ = s->tx_pause;
 
1366         *data++ = s->tx_underrun;
 
1367         *data++ = s->tx_fifo_urun;
 
1369         *data++ = s->tx_frames_64;
 
1370         *data++ = s->tx_frames_65_127;
 
1371         *data++ = s->tx_frames_128_255;
 
1372         *data++ = s->tx_frames_256_511;
 
1373         *data++ = s->tx_frames_512_1023;
 
1374         *data++ = s->tx_frames_1024_1518;
 
1375         *data++ = s->tx_frames_1519_max;
 
1377         *data++ = s->rx_octets;
 
1378         *data++ = s->rx_frames;
 
1379         *data++ = s->rx_mcast_frames;
 
1380         *data++ = s->rx_bcast_frames;
 
1381         *data++ = s->rx_pause;
 
1382         *data++ = s->rx_fcs_errs;
 
1383         *data++ = s->rx_symbol_errs;
 
1384         *data++ = s->rx_short;
 
1385         *data++ = s->rx_jabber;
 
1386         *data++ = s->rx_too_long;
 
1387         *data++ = s->rx_fifo_ovfl;
 
1389         *data++ = s->rx_frames_64;
 
1390         *data++ = s->rx_frames_65_127;
 
1391         *data++ = s->rx_frames_128_255;
 
1392         *data++ = s->rx_frames_256_511;
 
1393         *data++ = s->rx_frames_512_1023;
 
1394         *data++ = s->rx_frames_1024_1518;
 
1395         *data++ = s->rx_frames_1519_max;
 
1397         *data++ = pi->phy.fifo_errors;
 
1399         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
 
1400         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
 
1401         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
 
1402         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
 
1403         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
 
1404         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
 
1405         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
 
1406         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
 
1407         *data++ = s->rx_cong_drops;
 
1409         *data++ = s->num_toggled;
 
1410         *data++ = s->num_resets;
 
1413 static inline void reg_block_dump(struct adapter *ap, void *buf,
 
1414                                   unsigned int start, unsigned int end)
 
1416         u32 *p = buf + start;
 
1418         for (; start <= end; start += sizeof(u32))
 
1419                 *p++ = t3_read_reg(ap, start);
 
1422 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
 
1425         struct port_info *pi = netdev_priv(dev);
 
1426         struct adapter *ap = pi->adapter;
 
1430          * bits 0..9: chip version
 
1431          * bits 10..15: chip revision
 
1432          * bit 31: set for PCIe cards
 
1434         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
 
1437          * We skip the MAC statistics registers because they are clear-on-read.
 
1438          * Also reading multi-register stats would need to synchronize with the
 
1439          * periodic mac stats accumulation.  Hard to justify the complexity.
 
1441         memset(buf, 0, T3_REGMAP_SIZE);
 
1442         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
 
1443         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
 
1444         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
 
1445         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
 
1446         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
 
1447         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
 
1448                        XGM_REG(A_XGM_SERDES_STAT3, 1));
 
1449         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
 
1450                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
 
1453 static int restart_autoneg(struct net_device *dev)
 
1455         struct port_info *p = netdev_priv(dev);
 
1457         if (!netif_running(dev))
 
1459         if (p->link_config.autoneg != AUTONEG_ENABLE)
 
1461         p->phy.ops->autoneg_restart(&p->phy);
 
1465 static int cxgb3_phys_id(struct net_device *dev, u32 data)
 
1467         struct port_info *pi = netdev_priv(dev);
 
1468         struct adapter *adapter = pi->adapter;
 
1474         for (i = 0; i < data * 2; i++) {
 
1475                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
 
1476                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
 
1477                 if (msleep_interruptible(500))
 
1480         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
 
1485 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
1487         struct port_info *p = netdev_priv(dev);
 
1489         cmd->supported = p->link_config.supported;
 
1490         cmd->advertising = p->link_config.advertising;
 
1492         if (netif_carrier_ok(dev)) {
 
1493                 cmd->speed = p->link_config.speed;
 
1494                 cmd->duplex = p->link_config.duplex;
 
1500         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
 
1501         cmd->phy_address = p->phy.addr;
 
1502         cmd->transceiver = XCVR_EXTERNAL;
 
1503         cmd->autoneg = p->link_config.autoneg;
 
1509 static int speed_duplex_to_caps(int speed, int duplex)
 
1515                 if (duplex == DUPLEX_FULL)
 
1516                         cap = SUPPORTED_10baseT_Full;
 
1518                         cap = SUPPORTED_10baseT_Half;
 
1521                 if (duplex == DUPLEX_FULL)
 
1522                         cap = SUPPORTED_100baseT_Full;
 
1524                         cap = SUPPORTED_100baseT_Half;
 
1527                 if (duplex == DUPLEX_FULL)
 
1528                         cap = SUPPORTED_1000baseT_Full;
 
1530                         cap = SUPPORTED_1000baseT_Half;
 
1533                 if (duplex == DUPLEX_FULL)
 
1534                         cap = SUPPORTED_10000baseT_Full;
 
1539 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
 
1540                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
 
1541                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
 
1542                       ADVERTISED_10000baseT_Full)
 
1544 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
1547         struct port_info *p = netdev_priv(dev);
 
1548         struct link_config *lc = &p->link_config;
 
1550         if (!(lc->supported & SUPPORTED_Autoneg)) {
 
1552                  * PHY offers a single speed/duplex.  See if that's what's
 
1555                 if (cmd->autoneg == AUTONEG_DISABLE) {
 
1556                         cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
 
1557                         if (lc->supported & cap)
 
1563         if (cmd->autoneg == AUTONEG_DISABLE) {
 
1564                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
 
1566                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
 
1568                 lc->requested_speed = cmd->speed;
 
1569                 lc->requested_duplex = cmd->duplex;
 
1570                 lc->advertising = 0;
 
1572                 cmd->advertising &= ADVERTISED_MASK;
 
1573                 cmd->advertising &= lc->supported;
 
1574                 if (!cmd->advertising)
 
1576                 lc->requested_speed = SPEED_INVALID;
 
1577                 lc->requested_duplex = DUPLEX_INVALID;
 
1578                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
 
1580         lc->autoneg = cmd->autoneg;
 
1581         if (netif_running(dev))
 
1582                 t3_link_start(&p->phy, &p->mac, lc);
 
1586 static void get_pauseparam(struct net_device *dev,
 
1587                            struct ethtool_pauseparam *epause)
 
1589         struct port_info *p = netdev_priv(dev);
 
1591         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
 
1592         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
 
1593         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
 
1596 static int set_pauseparam(struct net_device *dev,
 
1597                           struct ethtool_pauseparam *epause)
 
1599         struct port_info *p = netdev_priv(dev);
 
1600         struct link_config *lc = &p->link_config;
 
1602         if (epause->autoneg == AUTONEG_DISABLE)
 
1603                 lc->requested_fc = 0;
 
1604         else if (lc->supported & SUPPORTED_Autoneg)
 
1605                 lc->requested_fc = PAUSE_AUTONEG;
 
1609         if (epause->rx_pause)
 
1610                 lc->requested_fc |= PAUSE_RX;
 
1611         if (epause->tx_pause)
 
1612                 lc->requested_fc |= PAUSE_TX;
 
1613         if (lc->autoneg == AUTONEG_ENABLE) {
 
1614                 if (netif_running(dev))
 
1615                         t3_link_start(&p->phy, &p->mac, lc);
 
1617                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
 
1618                 if (netif_running(dev))
 
1619                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
 
1624 static u32 get_rx_csum(struct net_device *dev)
 
1626         struct port_info *p = netdev_priv(dev);
 
1628         return p->rx_csum_offload;
 
1631 static int set_rx_csum(struct net_device *dev, u32 data)
 
1633         struct port_info *p = netdev_priv(dev);
 
1635         p->rx_csum_offload = data;
 
1637                 struct adapter *adap = p->adapter;
 
1640                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
 
1641                         adap->params.sge.qset[i].lro = 0;
 
1642                         adap->sge.qs[i].lro_enabled = 0;
 
1648 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 
1650         struct port_info *pi = netdev_priv(dev);
 
1651         struct adapter *adapter = pi->adapter;
 
1652         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
 
1654         e->rx_max_pending = MAX_RX_BUFFERS;
 
1655         e->rx_mini_max_pending = 0;
 
1656         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
 
1657         e->tx_max_pending = MAX_TXQ_ENTRIES;
 
1659         e->rx_pending = q->fl_size;
 
1660         e->rx_mini_pending = q->rspq_size;
 
1661         e->rx_jumbo_pending = q->jumbo_size;
 
1662         e->tx_pending = q->txq_size[0];
 
1665 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 
1667         struct port_info *pi = netdev_priv(dev);
 
1668         struct adapter *adapter = pi->adapter;
 
1669         struct qset_params *q;
 
1672         if (e->rx_pending > MAX_RX_BUFFERS ||
 
1673             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
 
1674             e->tx_pending > MAX_TXQ_ENTRIES ||
 
1675             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
 
1676             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
 
1677             e->rx_pending < MIN_FL_ENTRIES ||
 
1678             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
 
1679             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
 
1682         if (adapter->flags & FULL_INIT_DONE)
 
1685         q = &adapter->params.sge.qset[pi->first_qset];
 
1686         for (i = 0; i < pi->nqsets; ++i, ++q) {
 
1687                 q->rspq_size = e->rx_mini_pending;
 
1688                 q->fl_size = e->rx_pending;
 
1689                 q->jumbo_size = e->rx_jumbo_pending;
 
1690                 q->txq_size[0] = e->tx_pending;
 
1691                 q->txq_size[1] = e->tx_pending;
 
1692                 q->txq_size[2] = e->tx_pending;
 
1697 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 
1699         struct port_info *pi = netdev_priv(dev);
 
1700         struct adapter *adapter = pi->adapter;
 
1701         struct qset_params *qsp = &adapter->params.sge.qset[0];
 
1702         struct sge_qset *qs = &adapter->sge.qs[0];
 
1704         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
 
1707         qsp->coalesce_usecs = c->rx_coalesce_usecs;
 
1708         t3_update_qset_coalesce(qs, qsp);
 
1712 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 
1714         struct port_info *pi = netdev_priv(dev);
 
1715         struct adapter *adapter = pi->adapter;
 
1716         struct qset_params *q = adapter->params.sge.qset;
 
1718         c->rx_coalesce_usecs = q->coalesce_usecs;
 
1722 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
 
1725         struct port_info *pi = netdev_priv(dev);
 
1726         struct adapter *adapter = pi->adapter;
 
1729         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
 
1733         e->magic = EEPROM_MAGIC;
 
1734         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
 
1735                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
 
1738                 memcpy(data, buf + e->offset, e->len);
 
1743 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
 
1746         struct port_info *pi = netdev_priv(dev);
 
1747         struct adapter *adapter = pi->adapter;
 
1748         u32 aligned_offset, aligned_len;
 
1753         if (eeprom->magic != EEPROM_MAGIC)
 
1756         aligned_offset = eeprom->offset & ~3;
 
1757         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
 
1759         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
 
1760                 buf = kmalloc(aligned_len, GFP_KERNEL);
 
1763                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
 
1764                 if (!err && aligned_len > 4)
 
1765                         err = t3_seeprom_read(adapter,
 
1766                                               aligned_offset + aligned_len - 4,
 
1767                                               (__le32 *) & buf[aligned_len - 4]);
 
1770                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
 
1774         err = t3_seeprom_wp(adapter, 0);
 
1778         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
 
1779                 err = t3_seeprom_write(adapter, aligned_offset, *p);
 
1780                 aligned_offset += 4;
 
1784                 err = t3_seeprom_wp(adapter, 1);
 
1791 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 
1795         memset(&wol->sopass, 0, sizeof(wol->sopass));
 
1798 static const struct ethtool_ops cxgb_ethtool_ops = {
 
1799         .get_settings = get_settings,
 
1800         .set_settings = set_settings,
 
1801         .get_drvinfo = get_drvinfo,
 
1802         .get_msglevel = get_msglevel,
 
1803         .set_msglevel = set_msglevel,
 
1804         .get_ringparam = get_sge_param,
 
1805         .set_ringparam = set_sge_param,
 
1806         .get_coalesce = get_coalesce,
 
1807         .set_coalesce = set_coalesce,
 
1808         .get_eeprom_len = get_eeprom_len,
 
1809         .get_eeprom = get_eeprom,
 
1810         .set_eeprom = set_eeprom,
 
1811         .get_pauseparam = get_pauseparam,
 
1812         .set_pauseparam = set_pauseparam,
 
1813         .get_rx_csum = get_rx_csum,
 
1814         .set_rx_csum = set_rx_csum,
 
1815         .set_tx_csum = ethtool_op_set_tx_csum,
 
1816         .set_sg = ethtool_op_set_sg,
 
1817         .get_link = ethtool_op_get_link,
 
1818         .get_strings = get_strings,
 
1819         .phys_id = cxgb3_phys_id,
 
1820         .nway_reset = restart_autoneg,
 
1821         .get_sset_count = get_sset_count,
 
1822         .get_ethtool_stats = get_stats,
 
1823         .get_regs_len = get_regs_len,
 
1824         .get_regs = get_regs,
 
1826         .set_tso = ethtool_op_set_tso,
 
1829 static int in_range(int val, int lo, int hi)
 
1831         return val < 0 || (val <= hi && val >= lo);
 
1834 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 
1836         struct port_info *pi = netdev_priv(dev);
 
1837         struct adapter *adapter = pi->adapter;
 
1841         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
 
1845         case CHELSIO_SET_QSET_PARAMS:{
 
1847                 struct qset_params *q;
 
1848                 struct ch_qset_params t;
 
1849                 int q1 = pi->first_qset;
 
1850                 int nqsets = pi->nqsets;
 
1852                 if (!capable(CAP_NET_ADMIN))
 
1854                 if (copy_from_user(&t, useraddr, sizeof(t)))
 
1856                 if (t.qset_idx >= SGE_QSETS)
 
1858                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
 
1859                         !in_range(t.cong_thres, 0, 255) ||
 
1860                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
 
1862                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
 
1864                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
 
1865                                 MAX_CTRL_TXQ_ENTRIES) ||
 
1866                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
 
1868                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
 
1869                                         MAX_RX_JUMBO_BUFFERS)
 
1870                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
 
1874                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
 
1875                         for_each_port(adapter, i) {
 
1876                                 pi = adap2pinfo(adapter, i);
 
1877                                 if (t.qset_idx >= pi->first_qset &&
 
1878                                     t.qset_idx < pi->first_qset + pi->nqsets &&
 
1879                                     !pi->rx_csum_offload)
 
1883                 if ((adapter->flags & FULL_INIT_DONE) &&
 
1884                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
 
1885                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
 
1886                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
 
1887                         t.polling >= 0 || t.cong_thres >= 0))
 
1890                 /* Allow setting of any available qset when offload enabled */
 
1891                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
 
1893                         for_each_port(adapter, i) {
 
1894                                 pi = adap2pinfo(adapter, i);
 
1895                                 nqsets += pi->first_qset + pi->nqsets;
 
1899                 if (t.qset_idx < q1)
 
1901                 if (t.qset_idx > q1 + nqsets - 1)
 
1904                 q = &adapter->params.sge.qset[t.qset_idx];
 
1906                 if (t.rspq_size >= 0)
 
1907                         q->rspq_size = t.rspq_size;
 
1908                 if (t.fl_size[0] >= 0)
 
1909                         q->fl_size = t.fl_size[0];
 
1910                 if (t.fl_size[1] >= 0)
 
1911                         q->jumbo_size = t.fl_size[1];
 
1912                 if (t.txq_size[0] >= 0)
 
1913                         q->txq_size[0] = t.txq_size[0];
 
1914                 if (t.txq_size[1] >= 0)
 
1915                         q->txq_size[1] = t.txq_size[1];
 
1916                 if (t.txq_size[2] >= 0)
 
1917                         q->txq_size[2] = t.txq_size[2];
 
1918                 if (t.cong_thres >= 0)
 
1919                         q->cong_thres = t.cong_thres;
 
1920                 if (t.intr_lat >= 0) {
 
1921                         struct sge_qset *qs =
 
1922                                 &adapter->sge.qs[t.qset_idx];
 
1924                         q->coalesce_usecs = t.intr_lat;
 
1925                         t3_update_qset_coalesce(qs, q);
 
1927                 if (t.polling >= 0) {
 
1928                         if (adapter->flags & USING_MSIX)
 
1929                                 q->polling = t.polling;
 
1931                                 /* No polling with INTx for T3A */
 
1932                                 if (adapter->params.rev == 0 &&
 
1933                                         !(adapter->flags & USING_MSI))
 
1936                                 for (i = 0; i < SGE_QSETS; i++) {
 
1937                                         q = &adapter->params.sge.
 
1939                                         q->polling = t.polling;
 
1944                         struct sge_qset *qs = &adapter->sge.qs[t.qset_idx];
 
1946                         qs->lro_enabled = t.lro;
 
1950         case CHELSIO_GET_QSET_PARAMS:{
 
1951                 struct qset_params *q;
 
1952                 struct ch_qset_params t;
 
1953                 int q1 = pi->first_qset;
 
1954                 int nqsets = pi->nqsets;
 
1957                 if (copy_from_user(&t, useraddr, sizeof(t)))
 
1960                 /* Display qsets for all ports when offload enabled */
 
1961                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
 
1963                         for_each_port(adapter, i) {
 
1964                                 pi = adap2pinfo(adapter, i);
 
1965                                 nqsets = pi->first_qset + pi->nqsets;
 
1969                 if (t.qset_idx >= nqsets)
 
1972                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
 
1973                 t.rspq_size = q->rspq_size;
 
1974                 t.txq_size[0] = q->txq_size[0];
 
1975                 t.txq_size[1] = q->txq_size[1];
 
1976                 t.txq_size[2] = q->txq_size[2];
 
1977                 t.fl_size[0] = q->fl_size;
 
1978                 t.fl_size[1] = q->jumbo_size;
 
1979                 t.polling = q->polling;
 
1981                 t.intr_lat = q->coalesce_usecs;
 
1982                 t.cong_thres = q->cong_thres;
 
1985                 if (adapter->flags & USING_MSIX)
 
1986                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
 
1988                         t.vector = adapter->pdev->irq;
 
1990                 if (copy_to_user(useraddr, &t, sizeof(t)))
 
1994         case CHELSIO_SET_QSET_NUM:{
 
1995                 struct ch_reg edata;
 
1996                 unsigned int i, first_qset = 0, other_qsets = 0;
 
1998                 if (!capable(CAP_NET_ADMIN))
 
2000                 if (adapter->flags & FULL_INIT_DONE)
 
2002                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
 
2004                 if (edata.val < 1 ||
 
2005                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
 
2008                 for_each_port(adapter, i)
 
2009                         if (adapter->port[i] && adapter->port[i] != dev)
 
2010                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
 
2012                 if (edata.val + other_qsets > SGE_QSETS)
 
2015                 pi->nqsets = edata.val;
 
2017                 for_each_port(adapter, i)
 
2018                         if (adapter->port[i]) {
 
2019                                 pi = adap2pinfo(adapter, i);
 
2020                                 pi->first_qset = first_qset;
 
2021                                 first_qset += pi->nqsets;
 
2025         case CHELSIO_GET_QSET_NUM:{
 
2026                 struct ch_reg edata;
 
2028                 edata.cmd = CHELSIO_GET_QSET_NUM;
 
2029                 edata.val = pi->nqsets;
 
2030                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
 
2034         case CHELSIO_LOAD_FW:{
 
2036                 struct ch_mem_range t;
 
2038                 if (!capable(CAP_SYS_RAWIO))
 
2040                 if (copy_from_user(&t, useraddr, sizeof(t)))
 
2042                 /* Check t.len sanity ? */
 
2043                 fw_data = kmalloc(t.len, GFP_KERNEL);
 
2048                         (fw_data, useraddr + sizeof(t), t.len)) {
 
2053                 ret = t3_load_fw(adapter, fw_data, t.len);
 
2059         case CHELSIO_SETMTUTAB:{
 
2063                 if (!is_offload(adapter))
 
2065                 if (!capable(CAP_NET_ADMIN))
 
2067                 if (offload_running(adapter))
 
2069                 if (copy_from_user(&m, useraddr, sizeof(m)))
 
2071                 if (m.nmtus != NMTUS)
 
2073                 if (m.mtus[0] < 81)     /* accommodate SACK */
 
2076                 /* MTUs must be in ascending order */
 
2077                 for (i = 1; i < NMTUS; ++i)
 
2078                         if (m.mtus[i] < m.mtus[i - 1])
 
2081                 memcpy(adapter->params.mtus, m.mtus,
 
2082                         sizeof(adapter->params.mtus));
 
2085         case CHELSIO_GET_PM:{
 
2086                 struct tp_params *p = &adapter->params.tp;
 
2087                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
 
2089                 if (!is_offload(adapter))
 
2091                 m.tx_pg_sz = p->tx_pg_size;
 
2092                 m.tx_num_pg = p->tx_num_pgs;
 
2093                 m.rx_pg_sz = p->rx_pg_size;
 
2094                 m.rx_num_pg = p->rx_num_pgs;
 
2095                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
 
2096                 if (copy_to_user(useraddr, &m, sizeof(m)))
 
2100         case CHELSIO_SET_PM:{
 
2102                 struct tp_params *p = &adapter->params.tp;
 
2104                 if (!is_offload(adapter))
 
2106                 if (!capable(CAP_NET_ADMIN))
 
2108                 if (adapter->flags & FULL_INIT_DONE)
 
2110                 if (copy_from_user(&m, useraddr, sizeof(m)))
 
2112                 if (!is_power_of_2(m.rx_pg_sz) ||
 
2113                         !is_power_of_2(m.tx_pg_sz))
 
2114                         return -EINVAL; /* not power of 2 */
 
2115                 if (!(m.rx_pg_sz & 0x14000))
 
2116                         return -EINVAL; /* not 16KB or 64KB */
 
2117                 if (!(m.tx_pg_sz & 0x1554000))
 
2119                 if (m.tx_num_pg == -1)
 
2120                         m.tx_num_pg = p->tx_num_pgs;
 
2121                 if (m.rx_num_pg == -1)
 
2122                         m.rx_num_pg = p->rx_num_pgs;
 
2123                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
 
2125                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
 
2126                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
 
2128                 p->rx_pg_size = m.rx_pg_sz;
 
2129                 p->tx_pg_size = m.tx_pg_sz;
 
2130                 p->rx_num_pgs = m.rx_num_pg;
 
2131                 p->tx_num_pgs = m.tx_num_pg;
 
2134         case CHELSIO_GET_MEM:{
 
2135                 struct ch_mem_range t;
 
2139                 if (!is_offload(adapter))
 
2141                 if (!(adapter->flags & FULL_INIT_DONE))
 
2142                         return -EIO;    /* need the memory controllers */
 
2143                 if (copy_from_user(&t, useraddr, sizeof(t)))
 
2145                 if ((t.addr & 7) || (t.len & 7))
 
2147                 if (t.mem_id == MEM_CM)
 
2149                 else if (t.mem_id == MEM_PMRX)
 
2150                         mem = &adapter->pmrx;
 
2151                 else if (t.mem_id == MEM_PMTX)
 
2152                         mem = &adapter->pmtx;
 
2158                  * bits 0..9: chip version
 
2159                  * bits 10..15: chip revision
 
2161                 t.version = 3 | (adapter->params.rev << 10);
 
2162                 if (copy_to_user(useraddr, &t, sizeof(t)))
 
2166                  * Read 256 bytes at a time as len can be large and we don't
 
2167                  * want to use huge intermediate buffers.
 
2169                 useraddr += sizeof(t);  /* advance to start of buffer */
 
2171                         unsigned int chunk =
 
2172                                 min_t(unsigned int, t.len, sizeof(buf));
 
2175                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
 
2179                         if (copy_to_user(useraddr, buf, chunk))
 
2187         case CHELSIO_SET_TRACE_FILTER:{
 
2189                 const struct trace_params *tp;
 
2191                 if (!capable(CAP_NET_ADMIN))
 
2193                 if (!offload_running(adapter))
 
2195                 if (copy_from_user(&t, useraddr, sizeof(t)))
 
2198                 tp = (const struct trace_params *)&t.sip;
 
2200                         t3_config_trace_filter(adapter, tp, 0,
 
2204                         t3_config_trace_filter(adapter, tp, 1,
 
2215 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 
2217         struct mii_ioctl_data *data = if_mii(req);
 
2218         struct port_info *pi = netdev_priv(dev);
 
2219         struct adapter *adapter = pi->adapter;
 
2224                 data->phy_id = pi->phy.addr;
 
2228                 struct cphy *phy = &pi->phy;
 
2230                 if (!phy->mdio_read)
 
2232                 if (is_10G(adapter)) {
 
2233                         mmd = data->phy_id >> 8;
 
2236                         else if (mmd > MDIO_DEV_VEND2)
 
2240                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
 
2241                                                 mmd, data->reg_num, &val);
 
2244                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
 
2245                                                 0, data->reg_num & 0x1f,
 
2248                         data->val_out = val;
 
2252                 struct cphy *phy = &pi->phy;
 
2254                 if (!capable(CAP_NET_ADMIN))
 
2256                 if (!phy->mdio_write)
 
2258                 if (is_10G(adapter)) {
 
2259                         mmd = data->phy_id >> 8;
 
2262                         else if (mmd > MDIO_DEV_VEND2)
 
2266                                 phy->mdio_write(adapter,
 
2267                                                 data->phy_id & 0x1f, mmd,
 
2272                                 phy->mdio_write(adapter,
 
2273                                                 data->phy_id & 0x1f, 0,
 
2274                                                 data->reg_num & 0x1f,
 
2279                 return cxgb_extension_ioctl(dev, req->ifr_data);
 
2286 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
 
2288         struct port_info *pi = netdev_priv(dev);
 
2289         struct adapter *adapter = pi->adapter;
 
2292         if (new_mtu < 81)       /* accommodate SACK */
 
2294         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
 
2297         init_port_mtus(adapter);
 
2298         if (adapter->params.rev == 0 && offload_running(adapter))
 
2299                 t3_load_mtus(adapter, adapter->params.mtus,
 
2300                              adapter->params.a_wnd, adapter->params.b_wnd,
 
2301                              adapter->port[0]->mtu);
 
2305 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
 
2307         struct port_info *pi = netdev_priv(dev);
 
2308         struct adapter *adapter = pi->adapter;
 
2309         struct sockaddr *addr = p;
 
2311         if (!is_valid_ether_addr(addr->sa_data))
 
2314         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 
2315         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
 
2316         if (offload_running(adapter))
 
2317                 write_smt_entry(adapter, pi->port_id);
 
2322  * t3_synchronize_rx - wait for current Rx processing on a port to complete
 
2323  * @adap: the adapter
 
2326  * Ensures that current Rx processing on any of the queues associated with
 
2327  * the given port completes before returning.  We do this by acquiring and
 
2328  * releasing the locks of the response queues associated with the port.
 
2330 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
 
2334         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
 
2335                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
 
2337                 spin_lock_irq(&q->lock);
 
2338                 spin_unlock_irq(&q->lock);
 
2342 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 
2344         struct port_info *pi = netdev_priv(dev);
 
2345         struct adapter *adapter = pi->adapter;
 
2348         if (adapter->params.rev > 0)
 
2349                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
 
2351                 /* single control for all ports */
 
2352                 unsigned int i, have_vlans = 0;
 
2353                 for_each_port(adapter, i)
 
2354                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
 
2356                 t3_set_vlan_accel(adapter, 1, have_vlans);
 
2358         t3_synchronize_rx(adapter, pi);
 
2361 #ifdef CONFIG_NET_POLL_CONTROLLER
 
2362 static void cxgb_netpoll(struct net_device *dev)
 
2364         struct port_info *pi = netdev_priv(dev);
 
2365         struct adapter *adapter = pi->adapter;
 
2368         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
 
2369                 struct sge_qset *qs = &adapter->sge.qs[qidx];
 
2372                 if (adapter->flags & USING_MSIX)
 
2377                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
 
2383  * Periodic accumulation of MAC statistics.
 
2385 static void mac_stats_update(struct adapter *adapter)
 
2389         for_each_port(adapter, i) {
 
2390                 struct net_device *dev = adapter->port[i];
 
2391                 struct port_info *p = netdev_priv(dev);
 
2393                 if (netif_running(dev)) {
 
2394                         spin_lock(&adapter->stats_lock);
 
2395                         t3_mac_update_stats(&p->mac);
 
2396                         spin_unlock(&adapter->stats_lock);
 
2401 static void check_link_status(struct adapter *adapter)
 
2405         for_each_port(adapter, i) {
 
2406                 struct net_device *dev = adapter->port[i];
 
2407                 struct port_info *p = netdev_priv(dev);
 
2409                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
 
2410                         t3_link_changed(adapter, i);
 
2414 static void check_t3b2_mac(struct adapter *adapter)
 
2418         if (!rtnl_trylock())    /* synchronize with ifdown */
 
2421         for_each_port(adapter, i) {
 
2422                 struct net_device *dev = adapter->port[i];
 
2423                 struct port_info *p = netdev_priv(dev);
 
2426                 if (!netif_running(dev))
 
2430                 if (netif_running(dev) && netif_carrier_ok(dev))
 
2431                         status = t3b2_mac_watchdog_task(&p->mac);
 
2433                         p->mac.stats.num_toggled++;
 
2434                 else if (status == 2) {
 
2435                         struct cmac *mac = &p->mac;
 
2437                         t3_mac_set_mtu(mac, dev->mtu);
 
2438                         t3_mac_set_address(mac, 0, dev->dev_addr);
 
2439                         cxgb_set_rxmode(dev);
 
2440                         t3_link_start(&p->phy, mac, &p->link_config);
 
2441                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 
2442                         t3_port_intr_enable(adapter, p->port_id);
 
2443                         p->mac.stats.num_resets++;
 
2450 static void t3_adap_check_task(struct work_struct *work)
 
2452         struct adapter *adapter = container_of(work, struct adapter,
 
2453                                                adap_check_task.work);
 
2454         const struct adapter_params *p = &adapter->params;
 
2456         adapter->check_task_cnt++;
 
2458         /* Check link status for PHYs without interrupts */
 
2459         if (p->linkpoll_period)
 
2460                 check_link_status(adapter);
 
2462         /* Accumulate MAC stats if needed */
 
2463         if (!p->linkpoll_period ||
 
2464             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
 
2465             p->stats_update_period) {
 
2466                 mac_stats_update(adapter);
 
2467                 adapter->check_task_cnt = 0;
 
2470         if (p->rev == T3_REV_B2)
 
2471                 check_t3b2_mac(adapter);
 
2473         /* Schedule the next check update if any port is active. */
 
2474         spin_lock_irq(&adapter->work_lock);
 
2475         if (adapter->open_device_map & PORT_MASK)
 
2476                 schedule_chk_task(adapter);
 
2477         spin_unlock_irq(&adapter->work_lock);
 
2481  * Processes external (PHY) interrupts in process context.
 
2483 static void ext_intr_task(struct work_struct *work)
 
2485         struct adapter *adapter = container_of(work, struct adapter,
 
2486                                                ext_intr_handler_task);
 
2488         t3_phy_intr_handler(adapter);
 
2490         /* Now reenable external interrupts */
 
2491         spin_lock_irq(&adapter->work_lock);
 
2492         if (adapter->slow_intr_mask) {
 
2493                 adapter->slow_intr_mask |= F_T3DBG;
 
2494                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
 
2495                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
 
2496                              adapter->slow_intr_mask);
 
2498         spin_unlock_irq(&adapter->work_lock);
 
2502  * Interrupt-context handler for external (PHY) interrupts.
 
2504 void t3_os_ext_intr_handler(struct adapter *adapter)
 
2507          * Schedule a task to handle external interrupts as they may be slow
 
2508          * and we use a mutex to protect MDIO registers.  We disable PHY
 
2509          * interrupts in the meantime and let the task reenable them when
 
2512         spin_lock(&adapter->work_lock);
 
2513         if (adapter->slow_intr_mask) {
 
2514                 adapter->slow_intr_mask &= ~F_T3DBG;
 
2515                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
 
2516                              adapter->slow_intr_mask);
 
2517                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
 
2519         spin_unlock(&adapter->work_lock);
 
2522 static int t3_adapter_error(struct adapter *adapter, int reset)
 
2526         /* Stop all ports */
 
2527         for_each_port(adapter, i) {
 
2528                 struct net_device *netdev = adapter->port[i];
 
2530                 if (netif_running(netdev))
 
2534         if (is_offload(adapter) &&
 
2535             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
 
2536                 offload_close(&adapter->tdev);
 
2538         /* Stop SGE timers */
 
2539         t3_stop_sge_timers(adapter);
 
2541         adapter->flags &= ~FULL_INIT_DONE;
 
2544                 ret = t3_reset_adapter(adapter);
 
2546         pci_disable_device(adapter->pdev);
 
2551 static int t3_reenable_adapter(struct adapter *adapter)
 
2553         if (pci_enable_device(adapter->pdev)) {
 
2554                 dev_err(&adapter->pdev->dev,
 
2555                         "Cannot re-enable PCI device after reset.\n");
 
2558         pci_set_master(adapter->pdev);
 
2559         pci_restore_state(adapter->pdev);
 
2561         /* Free sge resources */
 
2562         t3_free_sge_resources(adapter);
 
2564         if (t3_replay_prep_adapter(adapter))
 
2572 static void t3_resume_ports(struct adapter *adapter)
 
2576         /* Restart the ports */
 
2577         for_each_port(adapter, i) {
 
2578                 struct net_device *netdev = adapter->port[i];
 
2580                 if (netif_running(netdev)) {
 
2581                         if (cxgb_open(netdev)) {
 
2582                                 dev_err(&adapter->pdev->dev,
 
2583                                         "can't bring device back up"
 
2592  * processes a fatal error.
 
2593  * Bring the ports down, reset the chip, bring the ports back up.
 
2595 static void fatal_error_task(struct work_struct *work)
 
2597         struct adapter *adapter = container_of(work, struct adapter,
 
2598                                                fatal_error_handler_task);
 
2602         err = t3_adapter_error(adapter, 1);
 
2604                 err = t3_reenable_adapter(adapter);
 
2606                 t3_resume_ports(adapter);
 
2608         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
 
2612 void t3_fatal_err(struct adapter *adapter)
 
2614         unsigned int fw_status[4];
 
2616         if (adapter->flags & FULL_INIT_DONE) {
 
2617                 t3_sge_stop(adapter);
 
2618                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
 
2619                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
 
2620                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
 
2621                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
 
2623                 spin_lock(&adapter->work_lock);
 
2624                 t3_intr_disable(adapter);
 
2625                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
 
2626                 spin_unlock(&adapter->work_lock);
 
2628         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
 
2629         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
 
2630                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
 
2631                          fw_status[0], fw_status[1],
 
2632                          fw_status[2], fw_status[3]);
 
2637  * t3_io_error_detected - called when PCI error is detected
 
2638  * @pdev: Pointer to PCI device
 
2639  * @state: The current pci connection state
 
2641  * This function is called after a PCI bus error affecting
 
2642  * this device has been detected.
 
2644 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
 
2645                                              pci_channel_state_t state)
 
2647         struct adapter *adapter = pci_get_drvdata(pdev);
 
2650         ret = t3_adapter_error(adapter, 0);
 
2652         /* Request a slot reset. */
 
2653         return PCI_ERS_RESULT_NEED_RESET;
 
2657  * t3_io_slot_reset - called after the pci bus has been reset.
 
2658  * @pdev: Pointer to PCI device
 
2660  * Restart the card from scratch, as if from a cold-boot.
 
2662 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
 
2664         struct adapter *adapter = pci_get_drvdata(pdev);
 
2666         if (!t3_reenable_adapter(adapter))
 
2667                 return PCI_ERS_RESULT_RECOVERED;
 
2669         return PCI_ERS_RESULT_DISCONNECT;
 
2673  * t3_io_resume - called when traffic can start flowing again.
 
2674  * @pdev: Pointer to PCI device
 
2676  * This callback is called when the error recovery driver tells us that
 
2677  * its OK to resume normal operation.
 
2679 static void t3_io_resume(struct pci_dev *pdev)
 
2681         struct adapter *adapter = pci_get_drvdata(pdev);
 
2683         t3_resume_ports(adapter);
 
2686 static struct pci_error_handlers t3_err_handler = {
 
2687         .error_detected = t3_io_error_detected,
 
2688         .slot_reset = t3_io_slot_reset,
 
2689         .resume = t3_io_resume,
 
2693  * Set the number of qsets based on the number of CPUs and the number of ports,
 
2694  * not to exceed the number of available qsets, assuming there are enough qsets
 
2697 static void set_nqsets(struct adapter *adap)
 
2700         int num_cpus = num_online_cpus();
 
2701         int hwports = adap->params.nports;
 
2702         int nqsets = SGE_QSETS;
 
2704         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
 
2706                     (hwports * nqsets > SGE_QSETS ||
 
2707                      num_cpus >= nqsets / hwports))
 
2709                 if (nqsets > num_cpus)
 
2711                 if (nqsets < 1 || hwports == 4)
 
2716         for_each_port(adap, i) {
 
2717                 struct port_info *pi = adap2pinfo(adap, i);
 
2720                 pi->nqsets = nqsets;
 
2721                 j = pi->first_qset + nqsets;
 
2723                 dev_info(&adap->pdev->dev,
 
2724                          "Port %d using %d queue sets.\n", i, nqsets);
 
2728 static int __devinit cxgb_enable_msix(struct adapter *adap)
 
2730         struct msix_entry entries[SGE_QSETS + 1];
 
2733         for (i = 0; i < ARRAY_SIZE(entries); ++i)
 
2734                 entries[i].entry = i;
 
2736         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
 
2738                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
 
2739                         adap->msix_info[i].vec = entries[i].vector;
 
2741                 dev_info(&adap->pdev->dev,
 
2742                        "only %d MSI-X vectors left, not using MSI-X\n", err);
 
2746 static void __devinit print_port_info(struct adapter *adap,
 
2747                                       const struct adapter_info *ai)
 
2749         static const char *pci_variant[] = {
 
2750                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
 
2757                 snprintf(buf, sizeof(buf), "%s x%d",
 
2758                          pci_variant[adap->params.pci.variant],
 
2759                          adap->params.pci.width);
 
2761                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
 
2762                          pci_variant[adap->params.pci.variant],
 
2763                          adap->params.pci.speed, adap->params.pci.width);
 
2765         for_each_port(adap, i) {
 
2766                 struct net_device *dev = adap->port[i];
 
2767                 const struct port_info *pi = netdev_priv(dev);
 
2769                 if (!test_bit(i, &adap->registered_device_map))
 
2771                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
 
2772                        dev->name, ai->desc, pi->phy.desc,
 
2773                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
 
2774                        (adap->flags & USING_MSIX) ? " MSI-X" :
 
2775                        (adap->flags & USING_MSI) ? " MSI" : "");
 
2776                 if (adap->name == dev->name && adap->params.vpd.mclk)
 
2778                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
 
2779                                adap->name, t3_mc7_size(&adap->cm) >> 20,
 
2780                                t3_mc7_size(&adap->pmtx) >> 20,
 
2781                                t3_mc7_size(&adap->pmrx) >> 20,
 
2782                                adap->params.vpd.sn);
 
2786 static int __devinit init_one(struct pci_dev *pdev,
 
2787                               const struct pci_device_id *ent)
 
2789         static int version_printed;
 
2791         int i, err, pci_using_dac = 0;
 
2792         unsigned long mmio_start, mmio_len;
 
2793         const struct adapter_info *ai;
 
2794         struct adapter *adapter = NULL;
 
2795         struct port_info *pi;
 
2797         if (!version_printed) {
 
2798                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 
2803                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
 
2805                         printk(KERN_ERR DRV_NAME
 
2806                                ": cannot initialize work queue\n");
 
2811         err = pci_request_regions(pdev, DRV_NAME);
 
2813                 /* Just info, some other driver may have claimed the device. */
 
2814                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
 
2818         err = pci_enable_device(pdev);
 
2820                 dev_err(&pdev->dev, "cannot enable PCI device\n");
 
2821                 goto out_release_regions;
 
2824         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
 
2826                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
 
2828                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
 
2829                                "coherent allocations\n");
 
2830                         goto out_disable_device;
 
2832         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
 
2833                 dev_err(&pdev->dev, "no usable DMA configuration\n");
 
2834                 goto out_disable_device;
 
2837         pci_set_master(pdev);
 
2838         pci_save_state(pdev);
 
2840         mmio_start = pci_resource_start(pdev, 0);
 
2841         mmio_len = pci_resource_len(pdev, 0);
 
2842         ai = t3_get_adapter_info(ent->driver_data);
 
2844         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
 
2847                 goto out_disable_device;
 
2850         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
 
2851         if (!adapter->regs) {
 
2852                 dev_err(&pdev->dev, "cannot map device registers\n");
 
2854                 goto out_free_adapter;
 
2857         adapter->pdev = pdev;
 
2858         adapter->name = pci_name(pdev);
 
2859         adapter->msg_enable = dflt_msg_enable;
 
2860         adapter->mmio_len = mmio_len;
 
2862         mutex_init(&adapter->mdio_lock);
 
2863         spin_lock_init(&adapter->work_lock);
 
2864         spin_lock_init(&adapter->stats_lock);
 
2866         INIT_LIST_HEAD(&adapter->adapter_list);
 
2867         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
 
2868         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
 
2869         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
 
2871         for (i = 0; i < ai->nports; ++i) {
 
2872                 struct net_device *netdev;
 
2874                 netdev = alloc_etherdev(sizeof(struct port_info));
 
2880                 SET_NETDEV_DEV(netdev, &pdev->dev);
 
2882                 adapter->port[i] = netdev;
 
2883                 pi = netdev_priv(netdev);
 
2884                 pi->adapter = adapter;
 
2885                 pi->rx_csum_offload = 1;
 
2887                 netif_carrier_off(netdev);
 
2888                 netdev->irq = pdev->irq;
 
2889                 netdev->mem_start = mmio_start;
 
2890                 netdev->mem_end = mmio_start + mmio_len - 1;
 
2891                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
 
2892                 netdev->features |= NETIF_F_LLTX;
 
2894                         netdev->features |= NETIF_F_HIGHDMA;
 
2896                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 
2897                 netdev->vlan_rx_register = vlan_rx_register;
 
2899                 netdev->open = cxgb_open;
 
2900                 netdev->stop = cxgb_close;
 
2901                 netdev->hard_start_xmit = t3_eth_xmit;
 
2902                 netdev->get_stats = cxgb_get_stats;
 
2903                 netdev->set_multicast_list = cxgb_set_rxmode;
 
2904                 netdev->do_ioctl = cxgb_ioctl;
 
2905                 netdev->change_mtu = cxgb_change_mtu;
 
2906                 netdev->set_mac_address = cxgb_set_mac_addr;
 
2907 #ifdef CONFIG_NET_POLL_CONTROLLER
 
2908                 netdev->poll_controller = cxgb_netpoll;
 
2911                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
 
2914         pci_set_drvdata(pdev, adapter);
 
2915         if (t3_prep_adapter(adapter, ai, 1) < 0) {
 
2921          * The card is now ready to go.  If any errors occur during device
 
2922          * registration we do not fail the whole card but rather proceed only
 
2923          * with the ports we manage to register successfully.  However we must
 
2924          * register at least one net device.
 
2926         for_each_port(adapter, i) {
 
2927                 err = register_netdev(adapter->port[i]);
 
2929                         dev_warn(&pdev->dev,
 
2930                                  "cannot register net device %s, skipping\n",
 
2931                                  adapter->port[i]->name);
 
2934                          * Change the name we use for messages to the name of
 
2935                          * the first successfully registered interface.
 
2937                         if (!adapter->registered_device_map)
 
2938                                 adapter->name = adapter->port[i]->name;
 
2940                         __set_bit(i, &adapter->registered_device_map);
 
2943         if (!adapter->registered_device_map) {
 
2944                 dev_err(&pdev->dev, "could not register any net devices\n");
 
2948         /* Driver's ready. Reflect it on LEDs */
 
2949         t3_led_ready(adapter);
 
2951         if (is_offload(adapter)) {
 
2952                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
 
2953                 cxgb3_adapter_ofld(adapter);
 
2956         /* See what interrupts we'll be using */
 
2957         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
 
2958                 adapter->flags |= USING_MSIX;
 
2959         else if (msi > 0 && pci_enable_msi(pdev) == 0)
 
2960                 adapter->flags |= USING_MSI;
 
2962         set_nqsets(adapter);
 
2964         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
 
2967         print_port_info(adapter, ai);
 
2971         iounmap(adapter->regs);
 
2972         for (i = ai->nports - 1; i >= 0; --i)
 
2973                 if (adapter->port[i])
 
2974                         free_netdev(adapter->port[i]);
 
2980         pci_disable_device(pdev);
 
2981 out_release_regions:
 
2982         pci_release_regions(pdev);
 
2983         pci_set_drvdata(pdev, NULL);
 
2987 static void __devexit remove_one(struct pci_dev *pdev)
 
2989         struct adapter *adapter = pci_get_drvdata(pdev);
 
2994                 t3_sge_stop(adapter);
 
2995                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
 
2998                 if (is_offload(adapter)) {
 
2999                         cxgb3_adapter_unofld(adapter);
 
3000                         if (test_bit(OFFLOAD_DEVMAP_BIT,
 
3001                                      &adapter->open_device_map))
 
3002                                 offload_close(&adapter->tdev);
 
3005                 for_each_port(adapter, i)
 
3006                     if (test_bit(i, &adapter->registered_device_map))
 
3007                         unregister_netdev(adapter->port[i]);
 
3009                 t3_stop_sge_timers(adapter);
 
3010                 t3_free_sge_resources(adapter);
 
3011                 cxgb_disable_msi(adapter);
 
3013                 for_each_port(adapter, i)
 
3014                         if (adapter->port[i])
 
3015                                 free_netdev(adapter->port[i]);
 
3017                 iounmap(adapter->regs);
 
3019                 pci_release_regions(pdev);
 
3020                 pci_disable_device(pdev);
 
3021                 pci_set_drvdata(pdev, NULL);
 
3025 static struct pci_driver driver = {
 
3027         .id_table = cxgb3_pci_tbl,
 
3029         .remove = __devexit_p(remove_one),
 
3030         .err_handler = &t3_err_handler,
 
3033 static int __init cxgb3_init_module(void)
 
3037         cxgb3_offload_init();
 
3039         ret = pci_register_driver(&driver);
 
3043 static void __exit cxgb3_cleanup_module(void)
 
3045         pci_unregister_driver(&driver);
 
3047                 destroy_workqueue(cxgb3_wq);
 
3050 module_init(cxgb3_init_module);
 
3051 module_exit(cxgb3_cleanup_module);