2  * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
 
   4  * This software is available to you under a choice of one of two
 
   5  * licenses.  You may choose to be licensed under the terms of the GNU
 
   6  * General Public License (GPL) Version 2, available from the file
 
   7  * COPYING in the main directory of this source tree, or the
 
   8  * OpenIB.org BSD license below:
 
  10  *     Redistribution and use in source and binary forms, with or
 
  11  *     without modification, are permitted provided that the following
 
  14  *      - Redistributions of source code must retain the above
 
  15  *        copyright notice, this list of conditions and the following
 
  18  *      - Redistributions in binary form must reproduce the above
 
  19  *        copyright notice, this list of conditions and the following
 
  20  *        disclaimer in the documentation and/or other materials
 
  21  *        provided with the distribution.
 
  23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 
  24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 
  25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 
  26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 
  27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 
  28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 
  29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 
  32 #include <linux/module.h>
 
  33 #include <linux/moduleparam.h>
 
  34 #include <linux/init.h>
 
  35 #include <linux/pci.h>
 
  36 #include <linux/dma-mapping.h>
 
  37 #include <linux/netdevice.h>
 
  38 #include <linux/etherdevice.h>
 
  39 #include <linux/if_vlan.h>
 
  40 #include <linux/mii.h>
 
  41 #include <linux/sockios.h>
 
  42 #include <linux/workqueue.h>
 
  43 #include <linux/proc_fs.h>
 
  44 #include <linux/rtnetlink.h>
 
  45 #include <linux/firmware.h>
 
  46 #include <linux/log2.h>
 
  47 #include <asm/uaccess.h>
 
  50 #include "cxgb3_ioctl.h"
 
  52 #include "cxgb3_offload.h"
 
  55 #include "cxgb3_ctl_defs.h"
 
  57 #include "firmware_exports.h"
 
  60         MAX_TXQ_ENTRIES = 16384,
 
  61         MAX_CTRL_TXQ_ENTRIES = 1024,
 
  62         MAX_RSPQ_ENTRIES = 16384,
 
  63         MAX_RX_BUFFERS = 16384,
 
  64         MAX_RX_JUMBO_BUFFERS = 16384,
 
  66         MIN_CTRL_TXQ_ENTRIES = 4,
 
  67         MIN_RSPQ_ENTRIES = 32,
 
  71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
 
  73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
 
  74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
 
  75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
 
  77 #define EEPROM_MAGIC 0x38E2F10C
 
  79 #define CH_DEVICE(devid, ssid, idx) \
 
  80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
 
  82 static const struct pci_device_id cxgb3_pci_tbl[] = {
 
  83         CH_DEVICE(0x20, 1, 0),  /* PE9000 */
 
  84         CH_DEVICE(0x21, 1, 1),  /* T302E */
 
  85         CH_DEVICE(0x22, 1, 2),  /* T310E */
 
  86         CH_DEVICE(0x23, 1, 3),  /* T320X */
 
  87         CH_DEVICE(0x24, 1, 1),  /* T302X */
 
  88         CH_DEVICE(0x25, 1, 3),  /* T320E */
 
  89         CH_DEVICE(0x26, 1, 2),  /* T310X */
 
  90         CH_DEVICE(0x30, 1, 2),  /* T3B10 */
 
  91         CH_DEVICE(0x31, 1, 3),  /* T3B20 */
 
  92         CH_DEVICE(0x32, 1, 1),  /* T3B02 */
 
  96 MODULE_DESCRIPTION(DRV_DESC);
 
  97 MODULE_AUTHOR("Chelsio Communications");
 
  98 MODULE_LICENSE("Dual BSD/GPL");
 
  99 MODULE_VERSION(DRV_VERSION);
 
 100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
 
 102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
 
 104 module_param(dflt_msg_enable, int, 0644);
 
 105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
 
 108  * The driver uses the best interrupt scheme available on a platform in the
 
 109  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
 
 110  * of these schemes the driver may consider as follows:
 
 112  * msi = 2: choose from among all three options
 
 113  * msi = 1: only consider MSI and pin interrupts
 
 114  * msi = 0: force pin interrupts
 
 118 module_param(msi, int, 0644);
 
 119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
 
 122  * The driver enables offload as a default.
 
 123  * To disable it, use ofld_disable = 1.
 
 126 static int ofld_disable = 0;
 
 128 module_param(ofld_disable, int, 0644);
 
 129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
 
 132  * We have work elements that we need to cancel when an interface is taken
 
 133  * down.  Normally the work elements would be executed by keventd but that
 
 134  * can deadlock because of linkwatch.  If our close method takes the rtnl
 
 135  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
 
 136  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
 
 137  * for our work to complete.  Get our own work queue to solve this.
 
 139 static struct workqueue_struct *cxgb3_wq;
 
 142  *      link_report - show link status and link speed/duplex
 
 143  *      @p: the port whose settings are to be reported
 
 145  *      Shows the link status, speed, and duplex of a port.
 
 147 static void link_report(struct net_device *dev)
 
 149         if (!netif_carrier_ok(dev))
 
 150                 printk(KERN_INFO "%s: link down\n", dev->name);
 
 152                 const char *s = "10Mbps";
 
 153                 const struct port_info *p = netdev_priv(dev);
 
 155                 switch (p->link_config.speed) {
 
 167                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
 
 168                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
 
 173  *      t3_os_link_changed - handle link status changes
 
 174  *      @adapter: the adapter associated with the link change
 
 175  *      @port_id: the port index whose limk status has changed
 
 176  *      @link_stat: the new status of the link
 
 177  *      @speed: the new speed setting
 
 178  *      @duplex: the new duplex setting
 
 179  *      @pause: the new flow-control setting
 
 181  *      This is the OS-dependent handler for link status changes.  The OS
 
 182  *      neutral handler takes care of most of the processing for these events,
 
 183  *      then calls this handler for any OS-specific processing.
 
 185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
 
 186                         int speed, int duplex, int pause)
 
 188         struct net_device *dev = adapter->port[port_id];
 
 189         struct port_info *pi = netdev_priv(dev);
 
 190         struct cmac *mac = &pi->mac;
 
 192         /* Skip changes from disabled ports. */
 
 193         if (!netif_running(dev))
 
 196         if (link_stat != netif_carrier_ok(dev)) {
 
 198                         t3_mac_enable(mac, MAC_DIRECTION_RX);
 
 199                         netif_carrier_on(dev);
 
 201                         netif_carrier_off(dev);
 
 202                         pi->phy.ops->power_down(&pi->phy, 1);
 
 203                         t3_mac_disable(mac, MAC_DIRECTION_RX);
 
 204                         t3_link_start(&pi->phy, mac, &pi->link_config);
 
 211 static void cxgb_set_rxmode(struct net_device *dev)
 
 213         struct t3_rx_mode rm;
 
 214         struct port_info *pi = netdev_priv(dev);
 
 216         init_rx_mode(&rm, dev, dev->mc_list);
 
 217         t3_mac_set_rx_mode(&pi->mac, &rm);
 
 221  *      link_start - enable a port
 
 222  *      @dev: the device to enable
 
 224  *      Performs the MAC and PHY actions needed to enable a port.
 
 226 static void link_start(struct net_device *dev)
 
 228         struct t3_rx_mode rm;
 
 229         struct port_info *pi = netdev_priv(dev);
 
 230         struct cmac *mac = &pi->mac;
 
 232         init_rx_mode(&rm, dev, dev->mc_list);
 
 234         t3_mac_set_mtu(mac, dev->mtu);
 
 235         t3_mac_set_address(mac, 0, dev->dev_addr);
 
 236         t3_mac_set_rx_mode(mac, &rm);
 
 237         t3_link_start(&pi->phy, mac, &pi->link_config);
 
 238         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 
 241 static inline void cxgb_disable_msi(struct adapter *adapter)
 
 243         if (adapter->flags & USING_MSIX) {
 
 244                 pci_disable_msix(adapter->pdev);
 
 245                 adapter->flags &= ~USING_MSIX;
 
 246         } else if (adapter->flags & USING_MSI) {
 
 247                 pci_disable_msi(adapter->pdev);
 
 248                 adapter->flags &= ~USING_MSI;
 
 253  * Interrupt handler for asynchronous events used with MSI-X.
 
 255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
 
 257         t3_slow_intr_handler(cookie);
 
 262  * Name the MSI-X interrupts.
 
 264 static void name_msix_vecs(struct adapter *adap)
 
 266         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
 
 268         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
 
 269         adap->msix_info[0].desc[n] = 0;
 
 271         for_each_port(adap, j) {
 
 272                 struct net_device *d = adap->port[j];
 
 273                 const struct port_info *pi = netdev_priv(d);
 
 275                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
 
 276                         snprintf(adap->msix_info[msi_idx].desc, n,
 
 277                                  "%s (queue %d)", d->name, i);
 
 278                         adap->msix_info[msi_idx].desc[n] = 0;
 
 283 static int request_msix_data_irqs(struct adapter *adap)
 
 285         int i, j, err, qidx = 0;
 
 287         for_each_port(adap, i) {
 
 288                 int nqsets = adap2pinfo(adap, i)->nqsets;
 
 290                 for (j = 0; j < nqsets; ++j) {
 
 291                         err = request_irq(adap->msix_info[qidx + 1].vec,
 
 292                                           t3_intr_handler(adap,
 
 295                                           adap->msix_info[qidx + 1].desc,
 
 296                                           &adap->sge.qs[qidx]);
 
 299                                         free_irq(adap->msix_info[qidx + 1].vec,
 
 300                                                  &adap->sge.qs[qidx]);
 
 310  *      setup_rss - configure RSS
 
 313  *      Sets up RSS to distribute packets to multiple receive queues.  We
 
 314  *      configure the RSS CPU lookup table to distribute to the number of HW
 
 315  *      receive queues, and the response queue lookup table to narrow that
 
 316  *      down to the response queues actually configured for each port.
 
 317  *      We always configure the RSS mapping for two ports since the mapping
 
 318  *      table has plenty of entries.
 
 320 static void setup_rss(struct adapter *adap)
 
 323         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
 
 324         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
 
 325         u8 cpus[SGE_QSETS + 1];
 
 326         u16 rspq_map[RSS_TABLE_SIZE];
 
 328         for (i = 0; i < SGE_QSETS; ++i)
 
 330         cpus[SGE_QSETS] = 0xff; /* terminator */
 
 332         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
 
 333                 rspq_map[i] = i % nq0;
 
 334                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
 
 337         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
 
 338                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
 
 339                       V_RRCPLCPUSIZE(6), cpus, rspq_map);
 
 343  * If we have multiple receive queues per port serviced by NAPI we need one
 
 344  * netdevice per queue as NAPI operates on netdevices.  We already have one
 
 345  * netdevice, namely the one associated with the interface, so we use dummy
 
 346  * ones for any additional queues.  Note that these netdevices exist purely
 
 347  * so that NAPI has something to work with, they do not represent network
 
 348  * ports and are not registered.
 
 350 static int init_dummy_netdevs(struct adapter *adap)
 
 352         int i, j, dummy_idx = 0;
 
 353         struct net_device *nd;
 
 355         for_each_port(adap, i) {
 
 356                 struct net_device *dev = adap->port[i];
 
 357                 const struct port_info *pi = netdev_priv(dev);
 
 359                 for (j = 0; j < pi->nqsets - 1; j++) {
 
 360                         if (!adap->dummy_netdev[dummy_idx]) {
 
 361                                 nd = alloc_netdev(0, "", ether_setup);
 
 367                                 set_bit(__LINK_STATE_START, &nd->state);
 
 368                                 adap->dummy_netdev[dummy_idx] = nd;
 
 370                         strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
 
 377         while (--dummy_idx >= 0) {
 
 378                 free_netdev(adap->dummy_netdev[dummy_idx]);
 
 379                 adap->dummy_netdev[dummy_idx] = NULL;
 
 385  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
 
 386  * both netdevices representing interfaces and the dummy ones for the extra
 
 389 static void quiesce_rx(struct adapter *adap)
 
 392         struct net_device *dev;
 
 394         for_each_port(adap, i) {
 
 396                 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
 
 400         for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
 
 401                 dev = adap->dummy_netdev[i];
 
 403                         while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
 
 409  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
 
 412  *      Determines how many sets of SGE queues to use and initializes them.
 
 413  *      We support multiple queue sets per port if we have MSI-X, otherwise
 
 414  *      just one queue set per port.
 
 416 static int setup_sge_qsets(struct adapter *adap)
 
 418         int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
 
 419         unsigned int ntxq = SGE_TXQ_PER_SET;
 
 421         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
 
 424         for_each_port(adap, i) {
 
 425                 struct net_device *dev = adap->port[i];
 
 426                 const struct port_info *pi = netdev_priv(dev);
 
 428                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
 
 429                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
 
 430                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
 
 432                                 &adap->params.sge.qset[qset_idx], ntxq,
 
 434                                          adap-> dummy_netdev[dummy_dev_idx++]);
 
 436                                 t3_free_sge_resources(adap);
 
 445 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
 
 447                          ssize_t(*format) (struct net_device *, char *))
 
 451         /* Synchronize with ioctls that may shut down the device */
 
 453         len = (*format) (to_net_dev(d), buf);
 
 458 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
 
 459                           const char *buf, size_t len,
 
 460                           ssize_t(*set) (struct net_device *, unsigned int),
 
 461                           unsigned int min_val, unsigned int max_val)
 
 467         if (!capable(CAP_NET_ADMIN))
 
 470         val = simple_strtoul(buf, &endp, 0);
 
 471         if (endp == buf || val < min_val || val > max_val)
 
 475         ret = (*set) (to_net_dev(d), val);
 
 482 #define CXGB3_SHOW(name, val_expr) \
 
 483 static ssize_t format_##name(struct net_device *dev, char *buf) \
 
 485         struct adapter *adap = dev->priv; \
 
 486         return sprintf(buf, "%u\n", val_expr); \
 
 488 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 
 491         return attr_show(d, attr, buf, format_##name); \
 
 494 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
 
 496         struct adapter *adap = dev->priv;
 
 497         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
 
 499         if (adap->flags & FULL_INIT_DONE)
 
 501         if (val && adap->params.rev == 0)
 
 503         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
 
 506         adap->params.mc5.nfilters = val;
 
 510 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
 
 511                               const char *buf, size_t len)
 
 513         return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
 
 516 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
 
 518         struct adapter *adap = dev->priv;
 
 520         if (adap->flags & FULL_INIT_DONE)
 
 522         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
 
 525         adap->params.mc5.nservers = val;
 
 529 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
 
 530                               const char *buf, size_t len)
 
 532         return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
 
 535 #define CXGB3_ATTR_R(name, val_expr) \
 
 536 CXGB3_SHOW(name, val_expr) \
 
 537 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
 
 539 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
 
 540 CXGB3_SHOW(name, val_expr) \
 
 541 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
 
 543 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
 
 544 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
 
 545 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
 
 547 static struct attribute *cxgb3_attrs[] = {
 
 548         &dev_attr_cam_size.attr,
 
 549         &dev_attr_nfilters.attr,
 
 550         &dev_attr_nservers.attr,
 
 554 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
 
 556 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
 
 557                             char *buf, int sched)
 
 560         unsigned int v, addr, bpt, cpt;
 
 561         struct adapter *adap = to_net_dev(d)->priv;
 
 563         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
 
 565         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
 
 566         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
 
 569         bpt = (v >> 8) & 0xff;
 
 572                 len = sprintf(buf, "disabled\n");
 
 574                 v = (adap->params.vpd.cclk * 1000) / cpt;
 
 575                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
 
 581 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
 
 582                              const char *buf, size_t len, int sched)
 
 587         struct adapter *adap = to_net_dev(d)->priv;
 
 589         if (!capable(CAP_NET_ADMIN))
 
 592         val = simple_strtoul(buf, &endp, 0);
 
 593         if (endp == buf || val > 10000000)
 
 597         ret = t3_config_sched(adap, val, sched);
 
 604 #define TM_ATTR(name, sched) \
 
 605 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
 
 608         return tm_attr_show(d, attr, buf, sched); \
 
 610 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
 
 611                             const char *buf, size_t len) \
 
 613         return tm_attr_store(d, attr, buf, len, sched); \
 
 615 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
 
 626 static struct attribute *offload_attrs[] = {
 
 627         &dev_attr_sched0.attr,
 
 628         &dev_attr_sched1.attr,
 
 629         &dev_attr_sched2.attr,
 
 630         &dev_attr_sched3.attr,
 
 631         &dev_attr_sched4.attr,
 
 632         &dev_attr_sched5.attr,
 
 633         &dev_attr_sched6.attr,
 
 634         &dev_attr_sched7.attr,
 
 638 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
 
 641  * Sends an sk_buff to an offload queue driver
 
 642  * after dealing with any active network taps.
 
 644 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
 
 649         ret = t3_offload_tx(tdev, skb);
 
 654 static int write_smt_entry(struct adapter *adapter, int idx)
 
 656         struct cpl_smt_write_req *req;
 
 657         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
 
 662         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
 
 663         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 
 664         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
 
 665         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
 
 667         memset(req->src_mac1, 0, sizeof(req->src_mac1));
 
 668         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
 
 670         offload_tx(&adapter->tdev, skb);
 
 674 static int init_smt(struct adapter *adapter)
 
 678         for_each_port(adapter, i)
 
 679             write_smt_entry(adapter, i);
 
 683 static void init_port_mtus(struct adapter *adapter)
 
 685         unsigned int mtus = adapter->port[0]->mtu;
 
 687         if (adapter->port[1])
 
 688                 mtus |= adapter->port[1]->mtu << 16;
 
 689         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
 
 692 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
 
 696         struct mngt_pktsched_wr *req;
 
 698         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
 
 699         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
 
 700         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
 
 701         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
 
 707         t3_mgmt_tx(adap, skb);
 
 710 static void bind_qsets(struct adapter *adap)
 
 714         for_each_port(adap, i) {
 
 715                 const struct port_info *pi = adap2pinfo(adap, i);
 
 717                 for (j = 0; j < pi->nqsets; ++j)
 
 718                         send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
 
 723 #define FW_FNAME "t3fw-%d.%d.%d.bin"
 
 725 static int upgrade_fw(struct adapter *adap)
 
 729         const struct firmware *fw;
 
 730         struct device *dev = &adap->pdev->dev;
 
 732         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
 
 733                  FW_VERSION_MINOR, FW_VERSION_MICRO);
 
 734         ret = request_firmware(&fw, buf, dev);
 
 736                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
 
 740         ret = t3_load_fw(adap, fw->data, fw->size);
 
 741         release_firmware(fw);
 
 746  *      cxgb_up - enable the adapter
 
 747  *      @adapter: adapter being enabled
 
 749  *      Called when the first port is enabled, this function performs the
 
 750  *      actions necessary to make an adapter operational, such as completing
 
 751  *      the initialization of HW modules, and enabling interrupts.
 
 753  *      Must be called with the rtnl lock held.
 
 755 static int cxgb_up(struct adapter *adap)
 
 759         if (!(adap->flags & FULL_INIT_DONE)) {
 
 760                 err = t3_check_fw_version(adap);
 
 762                         err = upgrade_fw(adap);
 
 766                 err = init_dummy_netdevs(adap);
 
 770                 err = t3_init_hw(adap, 0);
 
 774                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
 
 776                 err = setup_sge_qsets(adap);
 
 781                 adap->flags |= FULL_INIT_DONE;
 
 786         if (adap->flags & USING_MSIX) {
 
 787                 name_msix_vecs(adap);
 
 788                 err = request_irq(adap->msix_info[0].vec,
 
 789                                   t3_async_intr_handler, 0,
 
 790                                   adap->msix_info[0].desc, adap);
 
 794                 if (request_msix_data_irqs(adap)) {
 
 795                         free_irq(adap->msix_info[0].vec, adap);
 
 798         } else if ((err = request_irq(adap->pdev->irq,
 
 799                                       t3_intr_handler(adap,
 
 800                                                       adap->sge.qs[0].rspq.
 
 802                                       (adap->flags & USING_MSI) ?
 
 808         t3_intr_enable(adap);
 
 810         if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
 
 812         adap->flags |= QUEUES_BOUND;
 
 817         CH_ERR(adap, "request_irq failed, err %d\n", err);
 
 822  * Release resources when all the ports and offloading have been stopped.
 
 824 static void cxgb_down(struct adapter *adapter)
 
 826         t3_sge_stop(adapter);
 
 827         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
 
 828         t3_intr_disable(adapter);
 
 829         spin_unlock_irq(&adapter->work_lock);
 
 831         if (adapter->flags & USING_MSIX) {
 
 834                 free_irq(adapter->msix_info[0].vec, adapter);
 
 835                 for_each_port(adapter, i)
 
 836                     n += adap2pinfo(adapter, i)->nqsets;
 
 838                 for (i = 0; i < n; ++i)
 
 839                         free_irq(adapter->msix_info[i + 1].vec,
 
 840                                  &adapter->sge.qs[i]);
 
 842                 free_irq(adapter->pdev->irq, adapter);
 
 844         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
 
 848 static void schedule_chk_task(struct adapter *adap)
 
 852         timeo = adap->params.linkpoll_period ?
 
 853             (HZ * adap->params.linkpoll_period) / 10 :
 
 854             adap->params.stats_update_period * HZ;
 
 856                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
 
 859 static int offload_open(struct net_device *dev)
 
 861         struct adapter *adapter = dev->priv;
 
 862         struct t3cdev *tdev = T3CDEV(dev);
 
 863         int adap_up = adapter->open_device_map & PORT_MASK;
 
 866         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
 
 869         if (!adap_up && (err = cxgb_up(adapter)) < 0)
 
 872         t3_tp_set_offload_mode(adapter, 1);
 
 873         tdev->lldev = adapter->port[0];
 
 874         err = cxgb3_offload_activate(adapter);
 
 878         init_port_mtus(adapter);
 
 879         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
 
 880                      adapter->params.b_wnd,
 
 881                      adapter->params.rev == 0 ?
 
 882                      adapter->port[0]->mtu : 0xffff);
 
 885         /* Never mind if the next step fails */
 
 886         sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
 
 888         /* Call back all registered clients */
 
 889         cxgb3_add_clients(tdev);
 
 892         /* restore them in case the offload module has changed them */
 
 894                 t3_tp_set_offload_mode(adapter, 0);
 
 895                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
 
 896                 cxgb3_set_dummy_ops(tdev);
 
 901 static int offload_close(struct t3cdev *tdev)
 
 903         struct adapter *adapter = tdev2adap(tdev);
 
 905         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
 
 908         /* Call back all registered clients */
 
 909         cxgb3_remove_clients(tdev);
 
 911         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
 
 914         cxgb3_set_dummy_ops(tdev);
 
 915         t3_tp_set_offload_mode(adapter, 0);
 
 916         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
 
 918         if (!adapter->open_device_map)
 
 921         cxgb3_offload_deactivate(adapter);
 
 925 static int cxgb_open(struct net_device *dev)
 
 928         struct adapter *adapter = dev->priv;
 
 929         struct port_info *pi = netdev_priv(dev);
 
 930         int other_ports = adapter->open_device_map & PORT_MASK;
 
 932         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
 
 935         set_bit(pi->port_id, &adapter->open_device_map);
 
 936         if (is_offload(adapter) && !ofld_disable) {
 
 937                 err = offload_open(dev);
 
 940                                "Could not initialize offload capabilities\n");
 
 944         t3_port_intr_enable(adapter, pi->port_id);
 
 945         netif_start_queue(dev);
 
 947                 schedule_chk_task(adapter);
 
 952 static int cxgb_close(struct net_device *dev)
 
 954         struct adapter *adapter = dev->priv;
 
 955         struct port_info *p = netdev_priv(dev);
 
 957         t3_port_intr_disable(adapter, p->port_id);
 
 958         netif_stop_queue(dev);
 
 959         p->phy.ops->power_down(&p->phy, 1);
 
 960         netif_carrier_off(dev);
 
 961         t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
 
 963         spin_lock(&adapter->work_lock); /* sync with update task */
 
 964         clear_bit(p->port_id, &adapter->open_device_map);
 
 965         spin_unlock(&adapter->work_lock);
 
 967         if (!(adapter->open_device_map & PORT_MASK))
 
 968                 cancel_rearming_delayed_workqueue(cxgb3_wq,
 
 969                                                   &adapter->adap_check_task);
 
 971         if (!adapter->open_device_map)
 
 977 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
 
 979         struct adapter *adapter = dev->priv;
 
 980         struct port_info *p = netdev_priv(dev);
 
 981         struct net_device_stats *ns = &p->netstats;
 
 982         const struct mac_stats *pstats;
 
 984         spin_lock(&adapter->stats_lock);
 
 985         pstats = t3_mac_update_stats(&p->mac);
 
 986         spin_unlock(&adapter->stats_lock);
 
 988         ns->tx_bytes = pstats->tx_octets;
 
 989         ns->tx_packets = pstats->tx_frames;
 
 990         ns->rx_bytes = pstats->rx_octets;
 
 991         ns->rx_packets = pstats->rx_frames;
 
 992         ns->multicast = pstats->rx_mcast_frames;
 
 994         ns->tx_errors = pstats->tx_underrun;
 
 995         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
 
 996             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
 
 997             pstats->rx_fifo_ovfl;
 
 999         /* detailed rx_errors */
 
1000         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
 
1001         ns->rx_over_errors = 0;
 
1002         ns->rx_crc_errors = pstats->rx_fcs_errs;
 
1003         ns->rx_frame_errors = pstats->rx_symbol_errs;
 
1004         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
 
1005         ns->rx_missed_errors = pstats->rx_cong_drops;
 
1007         /* detailed tx_errors */
 
1008         ns->tx_aborted_errors = 0;
 
1009         ns->tx_carrier_errors = 0;
 
1010         ns->tx_fifo_errors = pstats->tx_underrun;
 
1011         ns->tx_heartbeat_errors = 0;
 
1012         ns->tx_window_errors = 0;
 
1016 static u32 get_msglevel(struct net_device *dev)
 
1018         struct adapter *adapter = dev->priv;
 
1020         return adapter->msg_enable;
 
1023 static void set_msglevel(struct net_device *dev, u32 val)
 
1025         struct adapter *adapter = dev->priv;
 
1027         adapter->msg_enable = val;
 
1030 static char stats_strings[][ETH_GSTRING_LEN] = {
 
1033         "TxMulticastFramesOK",
 
1034         "TxBroadcastFramesOK",
 
1041         "TxFrames128To255   ",
 
1042         "TxFrames256To511   ",
 
1043         "TxFrames512To1023  ",
 
1044         "TxFrames1024To1518 ",
 
1045         "TxFrames1519ToMax  ",
 
1049         "RxMulticastFramesOK",
 
1050         "RxBroadcastFramesOK",
 
1061         "RxFrames128To255   ",
 
1062         "RxFrames256To511   ",
 
1063         "RxFrames512To1023  ",
 
1064         "RxFrames1024To1518 ",
 
1065         "RxFrames1519ToMax  ",
 
1075         "CheckTXEnToggled   ",
 
1080 static int get_stats_count(struct net_device *dev)
 
1082         return ARRAY_SIZE(stats_strings);
 
1085 #define T3_REGMAP_SIZE (3 * 1024)
 
1087 static int get_regs_len(struct net_device *dev)
 
1089         return T3_REGMAP_SIZE;
 
1092 static int get_eeprom_len(struct net_device *dev)
 
1097 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 
1100         struct adapter *adapter = dev->priv;
 
1102         t3_get_fw_version(adapter, &fw_vers);
 
1104         strcpy(info->driver, DRV_NAME);
 
1105         strcpy(info->version, DRV_VERSION);
 
1106         strcpy(info->bus_info, pci_name(adapter->pdev));
 
1108                 strcpy(info->fw_version, "N/A");
 
1110                 snprintf(info->fw_version, sizeof(info->fw_version),
 
1112                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
 
1113                          G_FW_VERSION_MAJOR(fw_vers),
 
1114                          G_FW_VERSION_MINOR(fw_vers),
 
1115                          G_FW_VERSION_MICRO(fw_vers));
 
1119 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
 
1121         if (stringset == ETH_SS_STATS)
 
1122                 memcpy(data, stats_strings, sizeof(stats_strings));
 
1125 static unsigned long collect_sge_port_stats(struct adapter *adapter,
 
1126                                             struct port_info *p, int idx)
 
1129         unsigned long tot = 0;
 
1131         for (i = 0; i < p->nqsets; ++i)
 
1132                 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
 
1136 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
 
1139         struct adapter *adapter = dev->priv;
 
1140         struct port_info *pi = netdev_priv(dev);
 
1141         const struct mac_stats *s;
 
1143         spin_lock(&adapter->stats_lock);
 
1144         s = t3_mac_update_stats(&pi->mac);
 
1145         spin_unlock(&adapter->stats_lock);
 
1147         *data++ = s->tx_octets;
 
1148         *data++ = s->tx_frames;
 
1149         *data++ = s->tx_mcast_frames;
 
1150         *data++ = s->tx_bcast_frames;
 
1151         *data++ = s->tx_pause;
 
1152         *data++ = s->tx_underrun;
 
1153         *data++ = s->tx_fifo_urun;
 
1155         *data++ = s->tx_frames_64;
 
1156         *data++ = s->tx_frames_65_127;
 
1157         *data++ = s->tx_frames_128_255;
 
1158         *data++ = s->tx_frames_256_511;
 
1159         *data++ = s->tx_frames_512_1023;
 
1160         *data++ = s->tx_frames_1024_1518;
 
1161         *data++ = s->tx_frames_1519_max;
 
1163         *data++ = s->rx_octets;
 
1164         *data++ = s->rx_frames;
 
1165         *data++ = s->rx_mcast_frames;
 
1166         *data++ = s->rx_bcast_frames;
 
1167         *data++ = s->rx_pause;
 
1168         *data++ = s->rx_fcs_errs;
 
1169         *data++ = s->rx_symbol_errs;
 
1170         *data++ = s->rx_short;
 
1171         *data++ = s->rx_jabber;
 
1172         *data++ = s->rx_too_long;
 
1173         *data++ = s->rx_fifo_ovfl;
 
1175         *data++ = s->rx_frames_64;
 
1176         *data++ = s->rx_frames_65_127;
 
1177         *data++ = s->rx_frames_128_255;
 
1178         *data++ = s->rx_frames_256_511;
 
1179         *data++ = s->rx_frames_512_1023;
 
1180         *data++ = s->rx_frames_1024_1518;
 
1181         *data++ = s->rx_frames_1519_max;
 
1183         *data++ = pi->phy.fifo_errors;
 
1185         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
 
1186         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
 
1187         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
 
1188         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
 
1189         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
 
1190         *data++ = s->rx_cong_drops;
 
1192         *data++ = s->num_toggled;
 
1193         *data++ = s->num_resets;
 
1196 static inline void reg_block_dump(struct adapter *ap, void *buf,
 
1197                                   unsigned int start, unsigned int end)
 
1199         u32 *p = buf + start;
 
1201         for (; start <= end; start += sizeof(u32))
 
1202                 *p++ = t3_read_reg(ap, start);
 
1205 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
 
1208         struct adapter *ap = dev->priv;
 
1212          * bits 0..9: chip version
 
1213          * bits 10..15: chip revision
 
1214          * bit 31: set for PCIe cards
 
1216         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
 
1219          * We skip the MAC statistics registers because they are clear-on-read.
 
1220          * Also reading multi-register stats would need to synchronize with the
 
1221          * periodic mac stats accumulation.  Hard to justify the complexity.
 
1223         memset(buf, 0, T3_REGMAP_SIZE);
 
1224         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
 
1225         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
 
1226         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
 
1227         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
 
1228         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
 
1229         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
 
1230                        XGM_REG(A_XGM_SERDES_STAT3, 1));
 
1231         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
 
1232                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
 
1235 static int restart_autoneg(struct net_device *dev)
 
1237         struct port_info *p = netdev_priv(dev);
 
1239         if (!netif_running(dev))
 
1241         if (p->link_config.autoneg != AUTONEG_ENABLE)
 
1243         p->phy.ops->autoneg_restart(&p->phy);
 
1247 static int cxgb3_phys_id(struct net_device *dev, u32 data)
 
1250         struct adapter *adapter = dev->priv;
 
1255         for (i = 0; i < data * 2; i++) {
 
1256                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
 
1257                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
 
1258                 if (msleep_interruptible(500))
 
1261         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
 
1266 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
1268         struct port_info *p = netdev_priv(dev);
 
1270         cmd->supported = p->link_config.supported;
 
1271         cmd->advertising = p->link_config.advertising;
 
1273         if (netif_carrier_ok(dev)) {
 
1274                 cmd->speed = p->link_config.speed;
 
1275                 cmd->duplex = p->link_config.duplex;
 
1281         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
 
1282         cmd->phy_address = p->phy.addr;
 
1283         cmd->transceiver = XCVR_EXTERNAL;
 
1284         cmd->autoneg = p->link_config.autoneg;
 
1290 static int speed_duplex_to_caps(int speed, int duplex)
 
1296                 if (duplex == DUPLEX_FULL)
 
1297                         cap = SUPPORTED_10baseT_Full;
 
1299                         cap = SUPPORTED_10baseT_Half;
 
1302                 if (duplex == DUPLEX_FULL)
 
1303                         cap = SUPPORTED_100baseT_Full;
 
1305                         cap = SUPPORTED_100baseT_Half;
 
1308                 if (duplex == DUPLEX_FULL)
 
1309                         cap = SUPPORTED_1000baseT_Full;
 
1311                         cap = SUPPORTED_1000baseT_Half;
 
1314                 if (duplex == DUPLEX_FULL)
 
1315                         cap = SUPPORTED_10000baseT_Full;
 
1320 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
 
1321                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
 
1322                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
 
1323                       ADVERTISED_10000baseT_Full)
 
1325 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
1327         struct port_info *p = netdev_priv(dev);
 
1328         struct link_config *lc = &p->link_config;
 
1330         if (!(lc->supported & SUPPORTED_Autoneg))
 
1331                 return -EOPNOTSUPP;     /* can't change speed/duplex */
 
1333         if (cmd->autoneg == AUTONEG_DISABLE) {
 
1334                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
 
1336                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
 
1338                 lc->requested_speed = cmd->speed;
 
1339                 lc->requested_duplex = cmd->duplex;
 
1340                 lc->advertising = 0;
 
1342                 cmd->advertising &= ADVERTISED_MASK;
 
1343                 cmd->advertising &= lc->supported;
 
1344                 if (!cmd->advertising)
 
1346                 lc->requested_speed = SPEED_INVALID;
 
1347                 lc->requested_duplex = DUPLEX_INVALID;
 
1348                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
 
1350         lc->autoneg = cmd->autoneg;
 
1351         if (netif_running(dev))
 
1352                 t3_link_start(&p->phy, &p->mac, lc);
 
1356 static void get_pauseparam(struct net_device *dev,
 
1357                            struct ethtool_pauseparam *epause)
 
1359         struct port_info *p = netdev_priv(dev);
 
1361         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
 
1362         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
 
1363         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
 
1366 static int set_pauseparam(struct net_device *dev,
 
1367                           struct ethtool_pauseparam *epause)
 
1369         struct port_info *p = netdev_priv(dev);
 
1370         struct link_config *lc = &p->link_config;
 
1372         if (epause->autoneg == AUTONEG_DISABLE)
 
1373                 lc->requested_fc = 0;
 
1374         else if (lc->supported & SUPPORTED_Autoneg)
 
1375                 lc->requested_fc = PAUSE_AUTONEG;
 
1379         if (epause->rx_pause)
 
1380                 lc->requested_fc |= PAUSE_RX;
 
1381         if (epause->tx_pause)
 
1382                 lc->requested_fc |= PAUSE_TX;
 
1383         if (lc->autoneg == AUTONEG_ENABLE) {
 
1384                 if (netif_running(dev))
 
1385                         t3_link_start(&p->phy, &p->mac, lc);
 
1387                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
 
1388                 if (netif_running(dev))
 
1389                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
 
1394 static u32 get_rx_csum(struct net_device *dev)
 
1396         struct port_info *p = netdev_priv(dev);
 
1398         return p->rx_csum_offload;
 
1401 static int set_rx_csum(struct net_device *dev, u32 data)
 
1403         struct port_info *p = netdev_priv(dev);
 
1405         p->rx_csum_offload = data;
 
1409 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 
1411         const struct adapter *adapter = dev->priv;
 
1412         const struct port_info *pi = netdev_priv(dev);
 
1413         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
 
1415         e->rx_max_pending = MAX_RX_BUFFERS;
 
1416         e->rx_mini_max_pending = 0;
 
1417         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
 
1418         e->tx_max_pending = MAX_TXQ_ENTRIES;
 
1420         e->rx_pending = q->fl_size;
 
1421         e->rx_mini_pending = q->rspq_size;
 
1422         e->rx_jumbo_pending = q->jumbo_size;
 
1423         e->tx_pending = q->txq_size[0];
 
1426 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
 
1429         struct qset_params *q;
 
1430         struct adapter *adapter = dev->priv;
 
1431         const struct port_info *pi = netdev_priv(dev);
 
1433         if (e->rx_pending > MAX_RX_BUFFERS ||
 
1434             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
 
1435             e->tx_pending > MAX_TXQ_ENTRIES ||
 
1436             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
 
1437             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
 
1438             e->rx_pending < MIN_FL_ENTRIES ||
 
1439             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
 
1440             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
 
1443         if (adapter->flags & FULL_INIT_DONE)
 
1446         q = &adapter->params.sge.qset[pi->first_qset];
 
1447         for (i = 0; i < pi->nqsets; ++i, ++q) {
 
1448                 q->rspq_size = e->rx_mini_pending;
 
1449                 q->fl_size = e->rx_pending;
 
1450                 q->jumbo_size = e->rx_jumbo_pending;
 
1451                 q->txq_size[0] = e->tx_pending;
 
1452                 q->txq_size[1] = e->tx_pending;
 
1453                 q->txq_size[2] = e->tx_pending;
 
1458 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 
1460         struct adapter *adapter = dev->priv;
 
1461         struct qset_params *qsp = &adapter->params.sge.qset[0];
 
1462         struct sge_qset *qs = &adapter->sge.qs[0];
 
1464         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
 
1467         qsp->coalesce_usecs = c->rx_coalesce_usecs;
 
1468         t3_update_qset_coalesce(qs, qsp);
 
1472 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
 
1474         struct adapter *adapter = dev->priv;
 
1475         struct qset_params *q = adapter->params.sge.qset;
 
1477         c->rx_coalesce_usecs = q->coalesce_usecs;
 
1481 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
 
1485         struct adapter *adapter = dev->priv;
 
1487         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
 
1491         e->magic = EEPROM_MAGIC;
 
1492         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
 
1493                 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
 
1496                 memcpy(data, buf + e->offset, e->len);
 
1501 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
 
1506         u32 aligned_offset, aligned_len, *p;
 
1507         struct adapter *adapter = dev->priv;
 
1509         if (eeprom->magic != EEPROM_MAGIC)
 
1512         aligned_offset = eeprom->offset & ~3;
 
1513         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
 
1515         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
 
1516                 buf = kmalloc(aligned_len, GFP_KERNEL);
 
1519                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
 
1520                 if (!err && aligned_len > 4)
 
1521                         err = t3_seeprom_read(adapter,
 
1522                                               aligned_offset + aligned_len - 4,
 
1523                                               (u32 *) & buf[aligned_len - 4]);
 
1526                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
 
1530         err = t3_seeprom_wp(adapter, 0);
 
1534         for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
 
1535                 err = t3_seeprom_write(adapter, aligned_offset, *p);
 
1536                 aligned_offset += 4;
 
1540                 err = t3_seeprom_wp(adapter, 1);
 
1547 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 
1551         memset(&wol->sopass, 0, sizeof(wol->sopass));
 
1554 static const struct ethtool_ops cxgb_ethtool_ops = {
 
1555         .get_settings = get_settings,
 
1556         .set_settings = set_settings,
 
1557         .get_drvinfo = get_drvinfo,
 
1558         .get_msglevel = get_msglevel,
 
1559         .set_msglevel = set_msglevel,
 
1560         .get_ringparam = get_sge_param,
 
1561         .set_ringparam = set_sge_param,
 
1562         .get_coalesce = get_coalesce,
 
1563         .set_coalesce = set_coalesce,
 
1564         .get_eeprom_len = get_eeprom_len,
 
1565         .get_eeprom = get_eeprom,
 
1566         .set_eeprom = set_eeprom,
 
1567         .get_pauseparam = get_pauseparam,
 
1568         .set_pauseparam = set_pauseparam,
 
1569         .get_rx_csum = get_rx_csum,
 
1570         .set_rx_csum = set_rx_csum,
 
1571         .get_tx_csum = ethtool_op_get_tx_csum,
 
1572         .set_tx_csum = ethtool_op_set_tx_csum,
 
1573         .get_sg = ethtool_op_get_sg,
 
1574         .set_sg = ethtool_op_set_sg,
 
1575         .get_link = ethtool_op_get_link,
 
1576         .get_strings = get_strings,
 
1577         .phys_id = cxgb3_phys_id,
 
1578         .nway_reset = restart_autoneg,
 
1579         .get_stats_count = get_stats_count,
 
1580         .get_ethtool_stats = get_stats,
 
1581         .get_regs_len = get_regs_len,
 
1582         .get_regs = get_regs,
 
1584         .get_tso = ethtool_op_get_tso,
 
1585         .set_tso = ethtool_op_set_tso,
 
1586         .get_perm_addr = ethtool_op_get_perm_addr
 
1589 static int in_range(int val, int lo, int hi)
 
1591         return val < 0 || (val <= hi && val >= lo);
 
1594 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 
1598         struct adapter *adapter = dev->priv;
 
1600         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
 
1604         case CHELSIO_SET_QSET_PARAMS:{
 
1606                 struct qset_params *q;
 
1607                 struct ch_qset_params t;
 
1609                 if (!capable(CAP_NET_ADMIN))
 
1611                 if (copy_from_user(&t, useraddr, sizeof(t)))
 
1613                 if (t.qset_idx >= SGE_QSETS)
 
1615                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
 
1616                         !in_range(t.cong_thres, 0, 255) ||
 
1617                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
 
1619                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
 
1621                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
 
1622                                 MAX_CTRL_TXQ_ENTRIES) ||
 
1623                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
 
1625                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
 
1626                                         MAX_RX_JUMBO_BUFFERS)
 
1627                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
 
1630                 if ((adapter->flags & FULL_INIT_DONE) &&
 
1631                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
 
1632                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
 
1633                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
 
1634                         t.polling >= 0 || t.cong_thres >= 0))
 
1637                 q = &adapter->params.sge.qset[t.qset_idx];
 
1639                 if (t.rspq_size >= 0)
 
1640                         q->rspq_size = t.rspq_size;
 
1641                 if (t.fl_size[0] >= 0)
 
1642                         q->fl_size = t.fl_size[0];
 
1643                 if (t.fl_size[1] >= 0)
 
1644                         q->jumbo_size = t.fl_size[1];
 
1645                 if (t.txq_size[0] >= 0)
 
1646                         q->txq_size[0] = t.txq_size[0];
 
1647                 if (t.txq_size[1] >= 0)
 
1648                         q->txq_size[1] = t.txq_size[1];
 
1649                 if (t.txq_size[2] >= 0)
 
1650                         q->txq_size[2] = t.txq_size[2];
 
1651                 if (t.cong_thres >= 0)
 
1652                         q->cong_thres = t.cong_thres;
 
1653                 if (t.intr_lat >= 0) {
 
1654                         struct sge_qset *qs =
 
1655                                 &adapter->sge.qs[t.qset_idx];
 
1657                         q->coalesce_usecs = t.intr_lat;
 
1658                         t3_update_qset_coalesce(qs, q);
 
1660                 if (t.polling >= 0) {
 
1661                         if (adapter->flags & USING_MSIX)
 
1662                                 q->polling = t.polling;
 
1664                                 /* No polling with INTx for T3A */
 
1665                                 if (adapter->params.rev == 0 &&
 
1666                                         !(adapter->flags & USING_MSI))
 
1669                                 for (i = 0; i < SGE_QSETS; i++) {
 
1670                                         q = &adapter->params.sge.
 
1672                                         q->polling = t.polling;
 
1678         case CHELSIO_GET_QSET_PARAMS:{
 
1679                 struct qset_params *q;
 
1680                 struct ch_qset_params t;
 
1682                 if (copy_from_user(&t, useraddr, sizeof(t)))
 
1684                 if (t.qset_idx >= SGE_QSETS)
 
1687                 q = &adapter->params.sge.qset[t.qset_idx];
 
1688                 t.rspq_size = q->rspq_size;
 
1689                 t.txq_size[0] = q->txq_size[0];
 
1690                 t.txq_size[1] = q->txq_size[1];
 
1691                 t.txq_size[2] = q->txq_size[2];
 
1692                 t.fl_size[0] = q->fl_size;
 
1693                 t.fl_size[1] = q->jumbo_size;
 
1694                 t.polling = q->polling;
 
1695                 t.intr_lat = q->coalesce_usecs;
 
1696                 t.cong_thres = q->cong_thres;
 
1698                 if (copy_to_user(useraddr, &t, sizeof(t)))
 
1702         case CHELSIO_SET_QSET_NUM:{
 
1703                 struct ch_reg edata;
 
1704                 struct port_info *pi = netdev_priv(dev);
 
1705                 unsigned int i, first_qset = 0, other_qsets = 0;
 
1707                 if (!capable(CAP_NET_ADMIN))
 
1709                 if (adapter->flags & FULL_INIT_DONE)
 
1711                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
 
1713                 if (edata.val < 1 ||
 
1714                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
 
1717                 for_each_port(adapter, i)
 
1718                         if (adapter->port[i] && adapter->port[i] != dev)
 
1719                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
 
1721                 if (edata.val + other_qsets > SGE_QSETS)
 
1724                 pi->nqsets = edata.val;
 
1726                 for_each_port(adapter, i)
 
1727                         if (adapter->port[i]) {
 
1728                                 pi = adap2pinfo(adapter, i);
 
1729                                 pi->first_qset = first_qset;
 
1730                                 first_qset += pi->nqsets;
 
1734         case CHELSIO_GET_QSET_NUM:{
 
1735                 struct ch_reg edata;
 
1736                 struct port_info *pi = netdev_priv(dev);
 
1738                 edata.cmd = CHELSIO_GET_QSET_NUM;
 
1739                 edata.val = pi->nqsets;
 
1740                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
 
1744         case CHELSIO_LOAD_FW:{
 
1746                 struct ch_mem_range t;
 
1748                 if (!capable(CAP_NET_ADMIN))
 
1750                 if (copy_from_user(&t, useraddr, sizeof(t)))
 
1753                 fw_data = kmalloc(t.len, GFP_KERNEL);
 
1758                         (fw_data, useraddr + sizeof(t), t.len)) {
 
1763                 ret = t3_load_fw(adapter, fw_data, t.len);
 
1769         case CHELSIO_SETMTUTAB:{
 
1773                 if (!is_offload(adapter))
 
1775                 if (!capable(CAP_NET_ADMIN))
 
1777                 if (offload_running(adapter))
 
1779                 if (copy_from_user(&m, useraddr, sizeof(m)))
 
1781                 if (m.nmtus != NMTUS)
 
1783                 if (m.mtus[0] < 81)     /* accommodate SACK */
 
1786                 /* MTUs must be in ascending order */
 
1787                 for (i = 1; i < NMTUS; ++i)
 
1788                         if (m.mtus[i] < m.mtus[i - 1])
 
1791                 memcpy(adapter->params.mtus, m.mtus,
 
1792                         sizeof(adapter->params.mtus));
 
1795         case CHELSIO_GET_PM:{
 
1796                 struct tp_params *p = &adapter->params.tp;
 
1797                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
 
1799                 if (!is_offload(adapter))
 
1801                 m.tx_pg_sz = p->tx_pg_size;
 
1802                 m.tx_num_pg = p->tx_num_pgs;
 
1803                 m.rx_pg_sz = p->rx_pg_size;
 
1804                 m.rx_num_pg = p->rx_num_pgs;
 
1805                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
 
1806                 if (copy_to_user(useraddr, &m, sizeof(m)))
 
1810         case CHELSIO_SET_PM:{
 
1812                 struct tp_params *p = &adapter->params.tp;
 
1814                 if (!is_offload(adapter))
 
1816                 if (!capable(CAP_NET_ADMIN))
 
1818                 if (adapter->flags & FULL_INIT_DONE)
 
1820                 if (copy_from_user(&m, useraddr, sizeof(m)))
 
1822                 if (!is_power_of_2(m.rx_pg_sz) ||
 
1823                         !is_power_of_2(m.tx_pg_sz))
 
1824                         return -EINVAL; /* not power of 2 */
 
1825                 if (!(m.rx_pg_sz & 0x14000))
 
1826                         return -EINVAL; /* not 16KB or 64KB */
 
1827                 if (!(m.tx_pg_sz & 0x1554000))
 
1829                 if (m.tx_num_pg == -1)
 
1830                         m.tx_num_pg = p->tx_num_pgs;
 
1831                 if (m.rx_num_pg == -1)
 
1832                         m.rx_num_pg = p->rx_num_pgs;
 
1833                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
 
1835                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
 
1836                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
 
1838                 p->rx_pg_size = m.rx_pg_sz;
 
1839                 p->tx_pg_size = m.tx_pg_sz;
 
1840                 p->rx_num_pgs = m.rx_num_pg;
 
1841                 p->tx_num_pgs = m.tx_num_pg;
 
1844         case CHELSIO_GET_MEM:{
 
1845                 struct ch_mem_range t;
 
1849                 if (!is_offload(adapter))
 
1851                 if (!(adapter->flags & FULL_INIT_DONE))
 
1852                         return -EIO;    /* need the memory controllers */
 
1853                 if (copy_from_user(&t, useraddr, sizeof(t)))
 
1855                 if ((t.addr & 7) || (t.len & 7))
 
1857                 if (t.mem_id == MEM_CM)
 
1859                 else if (t.mem_id == MEM_PMRX)
 
1860                         mem = &adapter->pmrx;
 
1861                 else if (t.mem_id == MEM_PMTX)
 
1862                         mem = &adapter->pmtx;
 
1868                  * bits 0..9: chip version
 
1869                  * bits 10..15: chip revision
 
1871                 t.version = 3 | (adapter->params.rev << 10);
 
1872                 if (copy_to_user(useraddr, &t, sizeof(t)))
 
1876                  * Read 256 bytes at a time as len can be large and we don't
 
1877                  * want to use huge intermediate buffers.
 
1879                 useraddr += sizeof(t);  /* advance to start of buffer */
 
1881                         unsigned int chunk =
 
1882                                 min_t(unsigned int, t.len, sizeof(buf));
 
1885                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
 
1889                         if (copy_to_user(useraddr, buf, chunk))
 
1897         case CHELSIO_SET_TRACE_FILTER:{
 
1899                 const struct trace_params *tp;
 
1901                 if (!capable(CAP_NET_ADMIN))
 
1903                 if (!offload_running(adapter))
 
1905                 if (copy_from_user(&t, useraddr, sizeof(t)))
 
1908                 tp = (const struct trace_params *)&t.sip;
 
1910                         t3_config_trace_filter(adapter, tp, 0,
 
1914                         t3_config_trace_filter(adapter, tp, 1,
 
1925 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
 
1928         struct adapter *adapter = dev->priv;
 
1929         struct port_info *pi = netdev_priv(dev);
 
1930         struct mii_ioctl_data *data = if_mii(req);
 
1934                 data->phy_id = pi->phy.addr;
 
1938                 struct cphy *phy = &pi->phy;
 
1940                 if (!phy->mdio_read)
 
1942                 if (is_10G(adapter)) {
 
1943                         mmd = data->phy_id >> 8;
 
1946                         else if (mmd > MDIO_DEV_XGXS)
 
1950                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
 
1951                                                 mmd, data->reg_num, &val);
 
1954                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
 
1955                                                 0, data->reg_num & 0x1f,
 
1958                         data->val_out = val;
 
1962                 struct cphy *phy = &pi->phy;
 
1964                 if (!capable(CAP_NET_ADMIN))
 
1966                 if (!phy->mdio_write)
 
1968                 if (is_10G(adapter)) {
 
1969                         mmd = data->phy_id >> 8;
 
1972                         else if (mmd > MDIO_DEV_XGXS)
 
1976                                 phy->mdio_write(adapter,
 
1977                                                 data->phy_id & 0x1f, mmd,
 
1982                                 phy->mdio_write(adapter,
 
1983                                                 data->phy_id & 0x1f, 0,
 
1984                                                 data->reg_num & 0x1f,
 
1989                 return cxgb_extension_ioctl(dev, req->ifr_data);
 
1996 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
 
1999         struct adapter *adapter = dev->priv;
 
2000         struct port_info *pi = netdev_priv(dev);
 
2002         if (new_mtu < 81)       /* accommodate SACK */
 
2004         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
 
2007         init_port_mtus(adapter);
 
2008         if (adapter->params.rev == 0 && offload_running(adapter))
 
2009                 t3_load_mtus(adapter, adapter->params.mtus,
 
2010                              adapter->params.a_wnd, adapter->params.b_wnd,
 
2011                              adapter->port[0]->mtu);
 
2015 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
 
2017         struct adapter *adapter = dev->priv;
 
2018         struct port_info *pi = netdev_priv(dev);
 
2019         struct sockaddr *addr = p;
 
2021         if (!is_valid_ether_addr(addr->sa_data))
 
2024         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
 
2025         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
 
2026         if (offload_running(adapter))
 
2027                 write_smt_entry(adapter, pi->port_id);
 
2032  * t3_synchronize_rx - wait for current Rx processing on a port to complete
 
2033  * @adap: the adapter
 
2036  * Ensures that current Rx processing on any of the queues associated with
 
2037  * the given port completes before returning.  We do this by acquiring and
 
2038  * releasing the locks of the response queues associated with the port.
 
2040 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
 
2044         for (i = 0; i < p->nqsets; i++) {
 
2045                 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
 
2047                 spin_lock_irq(&q->lock);
 
2048                 spin_unlock_irq(&q->lock);
 
2052 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
 
2054         struct adapter *adapter = dev->priv;
 
2055         struct port_info *pi = netdev_priv(dev);
 
2058         if (adapter->params.rev > 0)
 
2059                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
 
2061                 /* single control for all ports */
 
2062                 unsigned int i, have_vlans = 0;
 
2063                 for_each_port(adapter, i)
 
2064                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
 
2066                 t3_set_vlan_accel(adapter, 1, have_vlans);
 
2068         t3_synchronize_rx(adapter, pi);
 
2071 #ifdef CONFIG_NET_POLL_CONTROLLER
 
2072 static void cxgb_netpoll(struct net_device *dev)
 
2074         struct adapter *adapter = dev->priv;
 
2075         struct port_info *pi = netdev_priv(dev);
 
2078         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
 
2079                 struct sge_qset *qs = &adapter->sge.qs[qidx];
 
2082                 if (adapter->flags & USING_MSIX)
 
2087                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
 
2092 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
 
2093 int update_tpsram(struct adapter *adap)
 
2095         const struct firmware *tpsram;
 
2097         struct device *dev = &adap->pdev->dev;
 
2101         rev = adap->params.rev == T3_REV_B2 ? 'b' : 'a';
 
2103         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
 
2104                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
 
2106         ret = request_firmware(&tpsram, buf, dev);
 
2108                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
 
2113         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
 
2115                 goto release_tpsram;    
 
2117         ret = t3_set_proto_sram(adap, tpsram->data);
 
2119                 dev_err(dev, "loading protocol SRAM failed\n");
 
2122         release_firmware(tpsram);
 
2129  * Periodic accumulation of MAC statistics.
 
2131 static void mac_stats_update(struct adapter *adapter)
 
2135         for_each_port(adapter, i) {
 
2136                 struct net_device *dev = adapter->port[i];
 
2137                 struct port_info *p = netdev_priv(dev);
 
2139                 if (netif_running(dev)) {
 
2140                         spin_lock(&adapter->stats_lock);
 
2141                         t3_mac_update_stats(&p->mac);
 
2142                         spin_unlock(&adapter->stats_lock);
 
2147 static void check_link_status(struct adapter *adapter)
 
2151         for_each_port(adapter, i) {
 
2152                 struct net_device *dev = adapter->port[i];
 
2153                 struct port_info *p = netdev_priv(dev);
 
2155                 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
 
2156                         t3_link_changed(adapter, i);
 
2160 static void check_t3b2_mac(struct adapter *adapter)
 
2164         if (!rtnl_trylock())    /* synchronize with ifdown */
 
2167         for_each_port(adapter, i) {
 
2168                 struct net_device *dev = adapter->port[i];
 
2169                 struct port_info *p = netdev_priv(dev);
 
2172                 if (!netif_running(dev))
 
2176                 if (netif_running(dev) && netif_carrier_ok(dev))
 
2177                         status = t3b2_mac_watchdog_task(&p->mac);
 
2179                         p->mac.stats.num_toggled++;
 
2180                 else if (status == 2) {
 
2181                         struct cmac *mac = &p->mac;
 
2183                         t3_mac_set_mtu(mac, dev->mtu);
 
2184                         t3_mac_set_address(mac, 0, dev->dev_addr);
 
2185                         cxgb_set_rxmode(dev);
 
2186                         t3_link_start(&p->phy, mac, &p->link_config);
 
2187                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
 
2188                         t3_port_intr_enable(adapter, p->port_id);
 
2189                         p->mac.stats.num_resets++;
 
2196 static void t3_adap_check_task(struct work_struct *work)
 
2198         struct adapter *adapter = container_of(work, struct adapter,
 
2199                                                adap_check_task.work);
 
2200         const struct adapter_params *p = &adapter->params;
 
2202         adapter->check_task_cnt++;
 
2204         /* Check link status for PHYs without interrupts */
 
2205         if (p->linkpoll_period)
 
2206                 check_link_status(adapter);
 
2208         /* Accumulate MAC stats if needed */
 
2209         if (!p->linkpoll_period ||
 
2210             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
 
2211             p->stats_update_period) {
 
2212                 mac_stats_update(adapter);
 
2213                 adapter->check_task_cnt = 0;
 
2216         if (p->rev == T3_REV_B2)
 
2217                 check_t3b2_mac(adapter);
 
2219         /* Schedule the next check update if any port is active. */
 
2220         spin_lock(&adapter->work_lock);
 
2221         if (adapter->open_device_map & PORT_MASK)
 
2222                 schedule_chk_task(adapter);
 
2223         spin_unlock(&adapter->work_lock);
 
2227  * Processes external (PHY) interrupts in process context.
 
2229 static void ext_intr_task(struct work_struct *work)
 
2231         struct adapter *adapter = container_of(work, struct adapter,
 
2232                                                ext_intr_handler_task);
 
2234         t3_phy_intr_handler(adapter);
 
2236         /* Now reenable external interrupts */
 
2237         spin_lock_irq(&adapter->work_lock);
 
2238         if (adapter->slow_intr_mask) {
 
2239                 adapter->slow_intr_mask |= F_T3DBG;
 
2240                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
 
2241                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
 
2242                              adapter->slow_intr_mask);
 
2244         spin_unlock_irq(&adapter->work_lock);
 
2248  * Interrupt-context handler for external (PHY) interrupts.
 
2250 void t3_os_ext_intr_handler(struct adapter *adapter)
 
2253          * Schedule a task to handle external interrupts as they may be slow
 
2254          * and we use a mutex to protect MDIO registers.  We disable PHY
 
2255          * interrupts in the meantime and let the task reenable them when
 
2258         spin_lock(&adapter->work_lock);
 
2259         if (adapter->slow_intr_mask) {
 
2260                 adapter->slow_intr_mask &= ~F_T3DBG;
 
2261                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
 
2262                              adapter->slow_intr_mask);
 
2263                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
 
2265         spin_unlock(&adapter->work_lock);
 
2268 void t3_fatal_err(struct adapter *adapter)
 
2270         unsigned int fw_status[4];
 
2272         if (adapter->flags & FULL_INIT_DONE) {
 
2273                 t3_sge_stop(adapter);
 
2274                 t3_intr_disable(adapter);
 
2276         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
 
2277         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
 
2278                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
 
2279                          fw_status[0], fw_status[1],
 
2280                          fw_status[2], fw_status[3]);
 
2284 static int __devinit cxgb_enable_msix(struct adapter *adap)
 
2286         struct msix_entry entries[SGE_QSETS + 1];
 
2289         for (i = 0; i < ARRAY_SIZE(entries); ++i)
 
2290                 entries[i].entry = i;
 
2292         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
 
2294                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
 
2295                         adap->msix_info[i].vec = entries[i].vector;
 
2297                 dev_info(&adap->pdev->dev,
 
2298                        "only %d MSI-X vectors left, not using MSI-X\n", err);
 
2302 static void __devinit print_port_info(struct adapter *adap,
 
2303                                       const struct adapter_info *ai)
 
2305         static const char *pci_variant[] = {
 
2306                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
 
2313                 snprintf(buf, sizeof(buf), "%s x%d",
 
2314                          pci_variant[adap->params.pci.variant],
 
2315                          adap->params.pci.width);
 
2317                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
 
2318                          pci_variant[adap->params.pci.variant],
 
2319                          adap->params.pci.speed, adap->params.pci.width);
 
2321         for_each_port(adap, i) {
 
2322                 struct net_device *dev = adap->port[i];
 
2323                 const struct port_info *pi = netdev_priv(dev);
 
2325                 if (!test_bit(i, &adap->registered_device_map))
 
2327                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
 
2328                        dev->name, ai->desc, pi->port_type->desc,
 
2329                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
 
2330                        (adap->flags & USING_MSIX) ? " MSI-X" :
 
2331                        (adap->flags & USING_MSI) ? " MSI" : "");
 
2332                 if (adap->name == dev->name && adap->params.vpd.mclk)
 
2333                         printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
 
2334                                adap->name, t3_mc7_size(&adap->cm) >> 20,
 
2335                                t3_mc7_size(&adap->pmtx) >> 20,
 
2336                                t3_mc7_size(&adap->pmrx) >> 20);
 
2340 static int __devinit init_one(struct pci_dev *pdev,
 
2341                               const struct pci_device_id *ent)
 
2343         static int version_printed;
 
2345         int i, err, pci_using_dac = 0;
 
2346         unsigned long mmio_start, mmio_len;
 
2347         const struct adapter_info *ai;
 
2348         struct adapter *adapter = NULL;
 
2349         struct port_info *pi;
 
2351         if (!version_printed) {
 
2352                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
 
2357                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
 
2359                         printk(KERN_ERR DRV_NAME
 
2360                                ": cannot initialize work queue\n");
 
2365         err = pci_request_regions(pdev, DRV_NAME);
 
2367                 /* Just info, some other driver may have claimed the device. */
 
2368                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
 
2372         err = pci_enable_device(pdev);
 
2374                 dev_err(&pdev->dev, "cannot enable PCI device\n");
 
2375                 goto out_release_regions;
 
2378         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
 
2380                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
 
2382                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
 
2383                                "coherent allocations\n");
 
2384                         goto out_disable_device;
 
2386         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
 
2387                 dev_err(&pdev->dev, "no usable DMA configuration\n");
 
2388                 goto out_disable_device;
 
2391         pci_set_master(pdev);
 
2393         mmio_start = pci_resource_start(pdev, 0);
 
2394         mmio_len = pci_resource_len(pdev, 0);
 
2395         ai = t3_get_adapter_info(ent->driver_data);
 
2397         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
 
2400                 goto out_disable_device;
 
2403         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
 
2404         if (!adapter->regs) {
 
2405                 dev_err(&pdev->dev, "cannot map device registers\n");
 
2407                 goto out_free_adapter;
 
2410         adapter->pdev = pdev;
 
2411         adapter->name = pci_name(pdev);
 
2412         adapter->msg_enable = dflt_msg_enable;
 
2413         adapter->mmio_len = mmio_len;
 
2415         mutex_init(&adapter->mdio_lock);
 
2416         spin_lock_init(&adapter->work_lock);
 
2417         spin_lock_init(&adapter->stats_lock);
 
2419         INIT_LIST_HEAD(&adapter->adapter_list);
 
2420         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
 
2421         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
 
2423         for (i = 0; i < ai->nports; ++i) {
 
2424                 struct net_device *netdev;
 
2426                 netdev = alloc_etherdev(sizeof(struct port_info));
 
2432                 SET_MODULE_OWNER(netdev);
 
2433                 SET_NETDEV_DEV(netdev, &pdev->dev);
 
2435                 adapter->port[i] = netdev;
 
2436                 pi = netdev_priv(netdev);
 
2437                 pi->rx_csum_offload = 1;
 
2442                 netif_carrier_off(netdev);
 
2443                 netdev->irq = pdev->irq;
 
2444                 netdev->mem_start = mmio_start;
 
2445                 netdev->mem_end = mmio_start + mmio_len - 1;
 
2446                 netdev->priv = adapter;
 
2447                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
 
2448                 netdev->features |= NETIF_F_LLTX;
 
2450                         netdev->features |= NETIF_F_HIGHDMA;
 
2452                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
 
2453                 netdev->vlan_rx_register = vlan_rx_register;
 
2455                 netdev->open = cxgb_open;
 
2456                 netdev->stop = cxgb_close;
 
2457                 netdev->hard_start_xmit = t3_eth_xmit;
 
2458                 netdev->get_stats = cxgb_get_stats;
 
2459                 netdev->set_multicast_list = cxgb_set_rxmode;
 
2460                 netdev->do_ioctl = cxgb_ioctl;
 
2461                 netdev->change_mtu = cxgb_change_mtu;
 
2462                 netdev->set_mac_address = cxgb_set_mac_addr;
 
2463 #ifdef CONFIG_NET_POLL_CONTROLLER
 
2464                 netdev->poll_controller = cxgb_netpoll;
 
2466                 netdev->weight = 64;
 
2468                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
 
2471         pci_set_drvdata(pdev, adapter->port[0]);
 
2472         if (t3_prep_adapter(adapter, ai, 1) < 0) {
 
2477         err = t3_check_tpsram_version(adapter);
 
2479                 err = update_tpsram(adapter);
 
2485          * The card is now ready to go.  If any errors occur during device
 
2486          * registration we do not fail the whole card but rather proceed only
 
2487          * with the ports we manage to register successfully.  However we must
 
2488          * register at least one net device.
 
2490         for_each_port(adapter, i) {
 
2491                 err = register_netdev(adapter->port[i]);
 
2493                         dev_warn(&pdev->dev,
 
2494                                  "cannot register net device %s, skipping\n",
 
2495                                  adapter->port[i]->name);
 
2498                          * Change the name we use for messages to the name of
 
2499                          * the first successfully registered interface.
 
2501                         if (!adapter->registered_device_map)
 
2502                                 adapter->name = adapter->port[i]->name;
 
2504                         __set_bit(i, &adapter->registered_device_map);
 
2507         if (!adapter->registered_device_map) {
 
2508                 dev_err(&pdev->dev, "could not register any net devices\n");
 
2512         /* Driver's ready. Reflect it on LEDs */
 
2513         t3_led_ready(adapter);
 
2515         if (is_offload(adapter)) {
 
2516                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
 
2517                 cxgb3_adapter_ofld(adapter);
 
2520         /* See what interrupts we'll be using */
 
2521         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
 
2522                 adapter->flags |= USING_MSIX;
 
2523         else if (msi > 0 && pci_enable_msi(pdev) == 0)
 
2524                 adapter->flags |= USING_MSI;
 
2526         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
 
2529         print_port_info(adapter, ai);
 
2533         iounmap(adapter->regs);
 
2534         for (i = ai->nports - 1; i >= 0; --i)
 
2535                 if (adapter->port[i])
 
2536                         free_netdev(adapter->port[i]);
 
2542         pci_disable_device(pdev);
 
2543 out_release_regions:
 
2544         pci_release_regions(pdev);
 
2545         pci_set_drvdata(pdev, NULL);
 
2549 static void __devexit remove_one(struct pci_dev *pdev)
 
2551         struct net_device *dev = pci_get_drvdata(pdev);
 
2555                 struct adapter *adapter = dev->priv;
 
2557                 t3_sge_stop(adapter);
 
2558                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
 
2561                 for_each_port(adapter, i)
 
2562                     if (test_bit(i, &adapter->registered_device_map))
 
2563                         unregister_netdev(adapter->port[i]);
 
2565                 if (is_offload(adapter)) {
 
2566                         cxgb3_adapter_unofld(adapter);
 
2567                         if (test_bit(OFFLOAD_DEVMAP_BIT,
 
2568                                      &adapter->open_device_map))
 
2569                                 offload_close(&adapter->tdev);
 
2572                 t3_free_sge_resources(adapter);
 
2573                 cxgb_disable_msi(adapter);
 
2575                 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
 
2576                         if (adapter->dummy_netdev[i]) {
 
2577                                 free_netdev(adapter->dummy_netdev[i]);
 
2578                                 adapter->dummy_netdev[i] = NULL;
 
2581                 for_each_port(adapter, i)
 
2582                         if (adapter->port[i])
 
2583                                 free_netdev(adapter->port[i]);
 
2585                 iounmap(adapter->regs);
 
2587                 pci_release_regions(pdev);
 
2588                 pci_disable_device(pdev);
 
2589                 pci_set_drvdata(pdev, NULL);
 
2593 static struct pci_driver driver = {
 
2595         .id_table = cxgb3_pci_tbl,
 
2597         .remove = __devexit_p(remove_one),
 
2600 static int __init cxgb3_init_module(void)
 
2604         cxgb3_offload_init();
 
2606         ret = pci_register_driver(&driver);
 
2610 static void __exit cxgb3_cleanup_module(void)
 
2612         pci_unregister_driver(&driver);
 
2614                 destroy_workqueue(cxgb3_wq);
 
2617 module_init(cxgb3_init_module);
 
2618 module_exit(cxgb3_cleanup_module);