2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
5 * See LICENSE.qla3xxx for copyright and licensing details.
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dmapool.h>
18 #include <linux/mempool.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
21 #include <linux/interrupt.h>
22 #include <linux/errno.h>
23 #include <linux/ioport.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_ether.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/skbuff.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/if_vlan.h>
34 #include <linux/init.h>
35 #include <linux/delay.h>
40 #define DRV_NAME "qla3xxx"
41 #define DRV_STRING "QLogic ISP3XXX Network Driver"
42 #define DRV_VERSION "v2.03.00-k3"
43 #define PFX DRV_NAME " "
45 static const char ql3xxx_driver_name[] = DRV_NAME;
46 static const char ql3xxx_driver_version[] = DRV_VERSION;
48 MODULE_AUTHOR("QLogic Corporation");
49 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
53 static const u32 default_msg
54 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
55 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
57 static int debug = -1; /* defaults above */
58 module_param(debug, int, 0);
59 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
62 module_param(msi, int, 0);
63 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
65 static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
67 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
68 /* required last entry */
72 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
75 * Caller must take hw_lock.
77 static int ql_sem_spinlock(struct ql3_adapter *qdev,
78 u32 sem_mask, u32 sem_bits)
80 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
82 unsigned int seconds = 3;
85 writel((sem_mask | sem_bits),
86 &port_regs->CommonRegs.semaphoreReg);
87 value = readl(&port_regs->CommonRegs.semaphoreReg);
88 if ((value & (sem_mask >> 16)) == sem_bits)
95 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
97 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
98 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
99 readl(&port_regs->CommonRegs.semaphoreReg);
102 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
104 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
107 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
108 value = readl(&port_regs->CommonRegs.semaphoreReg);
109 return ((value & (sem_mask >> 16)) == sem_bits);
113 * Caller holds hw_lock.
115 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
120 if (!ql_sem_lock(qdev,
122 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
128 printk(KERN_ERR PFX "%s: Timed out waiting for "
134 printk(KERN_DEBUG PFX
135 "%s: driver lock acquired.\n",
142 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
144 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
146 writel(((ISP_CONTROL_NP_MASK << 16) | page),
147 &port_regs->CommonRegs.ispControlStatus);
148 readl(&port_regs->CommonRegs.ispControlStatus);
149 qdev->current_page = page;
152 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
156 unsigned long hw_flags;
158 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
160 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
165 static u32 ql_read_common_reg(struct ql3_adapter *qdev,
171 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
174 unsigned long hw_flags;
176 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
178 if (qdev->current_page != 0)
179 ql_set_register_page(qdev,0);
182 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
186 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
188 if (qdev->current_page != 0)
189 ql_set_register_page(qdev,0);
193 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
194 u32 __iomem *reg, u32 value)
196 unsigned long hw_flags;
198 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
201 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
205 static void ql_write_common_reg(struct ql3_adapter *qdev,
206 u32 __iomem *reg, u32 value)
213 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
214 u32 __iomem *reg, u32 value)
222 static void ql_write_page0_reg(struct ql3_adapter *qdev,
223 u32 __iomem *reg, u32 value)
225 if (qdev->current_page != 0)
226 ql_set_register_page(qdev,0);
233 * Caller holds hw_lock. Only called during init.
235 static void ql_write_page1_reg(struct ql3_adapter *qdev,
236 u32 __iomem *reg, u32 value)
238 if (qdev->current_page != 1)
239 ql_set_register_page(qdev,1);
246 * Caller holds hw_lock. Only called during init.
248 static void ql_write_page2_reg(struct ql3_adapter *qdev,
249 u32 __iomem *reg, u32 value)
251 if (qdev->current_page != 2)
252 ql_set_register_page(qdev,2);
258 static void ql_disable_interrupts(struct ql3_adapter *qdev)
260 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
262 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
263 (ISP_IMR_ENABLE_INT << 16));
267 static void ql_enable_interrupts(struct ql3_adapter *qdev)
269 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
271 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
272 ((0xff << 16) | ISP_IMR_ENABLE_INT));
276 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
277 struct ql_rcv_buf_cb *lrg_buf_cb)
281 lrg_buf_cb->next = NULL;
283 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
284 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
286 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
287 qdev->lrg_buf_free_tail = lrg_buf_cb;
290 if (!lrg_buf_cb->skb) {
291 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
292 qdev->lrg_buffer_len);
293 if (unlikely(!lrg_buf_cb->skb)) {
294 printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
296 qdev->lrg_buf_skb_check++;
299 * We save some space to copy the ethhdr from first
302 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
303 map = pci_map_single(qdev->pdev,
304 lrg_buf_cb->skb->data,
305 qdev->lrg_buffer_len -
308 err = pci_dma_mapping_error(map);
310 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
311 qdev->ndev->name, err);
312 dev_kfree_skb(lrg_buf_cb->skb);
313 lrg_buf_cb->skb = NULL;
315 qdev->lrg_buf_skb_check++;
319 lrg_buf_cb->buf_phy_addr_low =
320 cpu_to_le32(LS_64BITS(map));
321 lrg_buf_cb->buf_phy_addr_high =
322 cpu_to_le32(MS_64BITS(map));
323 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
324 pci_unmap_len_set(lrg_buf_cb, maplen,
325 qdev->lrg_buffer_len -
330 qdev->lrg_buf_free_count++;
333 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
336 struct ql_rcv_buf_cb *lrg_buf_cb;
338 if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
339 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
340 qdev->lrg_buf_free_tail = NULL;
341 qdev->lrg_buf_free_count--;
347 static u32 addrBits = EEPROM_NO_ADDR_BITS;
348 static u32 dataBits = EEPROM_NO_DATA_BITS;
350 static void fm93c56a_deselect(struct ql3_adapter *qdev);
351 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
352 unsigned short *value);
355 * Caller holds hw_lock.
357 static void fm93c56a_select(struct ql3_adapter *qdev)
359 struct ql3xxx_port_registers __iomem *port_regs =
360 qdev->mem_map_registers;
362 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
363 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
364 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
365 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
366 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
370 * Caller holds hw_lock.
372 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
378 struct ql3xxx_port_registers __iomem *port_regs =
379 qdev->mem_map_registers;
381 /* Clock in a zero, then do the start bit */
382 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
383 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
385 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
386 ISP_NVRAM_MASK | qdev->
387 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
388 AUBURN_EEPROM_CLK_RISE);
389 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
390 ISP_NVRAM_MASK | qdev->
391 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
392 AUBURN_EEPROM_CLK_FALL);
394 mask = 1 << (FM93C56A_CMD_BITS - 1);
395 /* Force the previous data bit to be different */
396 previousBit = 0xffff;
397 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
399 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
400 if (previousBit != dataBit) {
402 * If the bit changed, then change the DO state to
405 ql_write_nvram_reg(qdev,
406 &port_regs->CommonRegs.
407 serialPortInterfaceReg,
408 ISP_NVRAM_MASK | qdev->
409 eeprom_cmd_data | dataBit);
410 previousBit = dataBit;
412 ql_write_nvram_reg(qdev,
413 &port_regs->CommonRegs.
414 serialPortInterfaceReg,
415 ISP_NVRAM_MASK | qdev->
416 eeprom_cmd_data | dataBit |
417 AUBURN_EEPROM_CLK_RISE);
418 ql_write_nvram_reg(qdev,
419 &port_regs->CommonRegs.
420 serialPortInterfaceReg,
421 ISP_NVRAM_MASK | qdev->
422 eeprom_cmd_data | dataBit |
423 AUBURN_EEPROM_CLK_FALL);
427 mask = 1 << (addrBits - 1);
428 /* Force the previous data bit to be different */
429 previousBit = 0xffff;
430 for (i = 0; i < addrBits; i++) {
432 (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
434 if (previousBit != dataBit) {
436 * If the bit changed, then change the DO state to
439 ql_write_nvram_reg(qdev,
440 &port_regs->CommonRegs.
441 serialPortInterfaceReg,
442 ISP_NVRAM_MASK | qdev->
443 eeprom_cmd_data | dataBit);
444 previousBit = dataBit;
446 ql_write_nvram_reg(qdev,
447 &port_regs->CommonRegs.
448 serialPortInterfaceReg,
449 ISP_NVRAM_MASK | qdev->
450 eeprom_cmd_data | dataBit |
451 AUBURN_EEPROM_CLK_RISE);
452 ql_write_nvram_reg(qdev,
453 &port_regs->CommonRegs.
454 serialPortInterfaceReg,
455 ISP_NVRAM_MASK | qdev->
456 eeprom_cmd_data | dataBit |
457 AUBURN_EEPROM_CLK_FALL);
458 eepromAddr = eepromAddr << 1;
463 * Caller holds hw_lock.
465 static void fm93c56a_deselect(struct ql3_adapter *qdev)
467 struct ql3xxx_port_registers __iomem *port_regs =
468 qdev->mem_map_registers;
469 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
470 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
471 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
475 * Caller holds hw_lock.
477 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
482 struct ql3xxx_port_registers __iomem *port_regs =
483 qdev->mem_map_registers;
485 /* Read the data bits */
486 /* The first bit is a dummy. Clock right over it. */
487 for (i = 0; i < dataBits; i++) {
488 ql_write_nvram_reg(qdev,
489 &port_regs->CommonRegs.
490 serialPortInterfaceReg,
491 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
492 AUBURN_EEPROM_CLK_RISE);
493 ql_write_nvram_reg(qdev,
494 &port_regs->CommonRegs.
495 serialPortInterfaceReg,
496 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
497 AUBURN_EEPROM_CLK_FALL);
501 &port_regs->CommonRegs.
502 serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
503 data = (data << 1) | dataBit;
509 * Caller holds hw_lock.
511 static void eeprom_readword(struct ql3_adapter *qdev,
512 u32 eepromAddr, unsigned short *value)
514 fm93c56a_select(qdev);
515 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
516 fm93c56a_datain(qdev, value);
517 fm93c56a_deselect(qdev);
520 static void ql_swap_mac_addr(u8 * macAddress)
524 temp = macAddress[0];
525 macAddress[0] = macAddress[1];
526 macAddress[1] = temp;
527 temp = macAddress[2];
528 macAddress[2] = macAddress[3];
529 macAddress[3] = temp;
530 temp = macAddress[4];
531 macAddress[4] = macAddress[5];
532 macAddress[5] = temp;
536 static int ql_get_nvram_params(struct ql3_adapter *qdev)
541 unsigned long hw_flags;
543 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
545 pEEPROMData = (u16 *) & qdev->nvram_data;
546 qdev->eeprom_cmd_data = 0;
547 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
548 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
550 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
552 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
556 for (index = 0; index < EEPROM_SIZE; index++) {
557 eeprom_readword(qdev, index, pEEPROMData);
558 checksum += *pEEPROMData;
561 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
564 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
565 qdev->ndev->name, checksum);
566 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
571 * We have a problem with endianness for the MAC addresses
572 * and the two 8-bit values version, and numPorts. We
573 * have to swap them on big endian systems.
575 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
576 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
577 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
578 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
579 pEEPROMData = (u16 *) & qdev->nvram_data.version;
580 *pEEPROMData = le16_to_cpu(*pEEPROMData);
582 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
586 static const u32 PHYAddr[2] = {
587 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
590 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
592 struct ql3xxx_port_registers __iomem *port_regs =
593 qdev->mem_map_registers;
598 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
599 if (!(temp & MAC_MII_STATUS_BSY))
607 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
609 struct ql3xxx_port_registers __iomem *port_regs =
610 qdev->mem_map_registers;
613 if (qdev->numPorts > 1) {
614 /* Auto scan will cycle through multiple ports */
615 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
617 scanControl = MAC_MII_CONTROL_SC;
621 * Scan register 1 of PHY/PETBI,
622 * Set up to scan both devices
623 * The autoscan starts from the first register, completes
624 * the last one before rolling over to the first
626 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
627 PHYAddr[0] | MII_SCAN_REGISTER);
629 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
631 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
634 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
637 struct ql3xxx_port_registers __iomem *port_regs =
638 qdev->mem_map_registers;
640 /* See if scan mode is enabled before we turn it off */
641 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
642 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
643 /* Scan is enabled */
646 /* Scan is disabled */
651 * When disabling scan mode you must first change the MII register
654 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
655 PHYAddr[0] | MII_SCAN_REGISTER);
657 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
658 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
659 MAC_MII_CONTROL_RC) << 16));
664 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
665 u16 regAddr, u16 value, u32 mac_index)
667 struct ql3xxx_port_registers __iomem *port_regs =
668 qdev->mem_map_registers;
671 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
673 if (ql_wait_for_mii_ready(qdev)) {
674 if (netif_msg_link(qdev))
675 printk(KERN_WARNING PFX
676 "%s Timed out waiting for management port to "
677 "get free before issuing command.\n",
682 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
683 PHYAddr[mac_index] | regAddr);
685 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
687 /* Wait for write to complete 9/10/04 SJP */
688 if (ql_wait_for_mii_ready(qdev)) {
689 if (netif_msg_link(qdev))
690 printk(KERN_WARNING PFX
691 "%s: Timed out waiting for management port to"
692 "get free before issuing command.\n",
698 ql_mii_enable_scan_mode(qdev);
703 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
704 u16 * value, u32 mac_index)
706 struct ql3xxx_port_registers __iomem *port_regs =
707 qdev->mem_map_registers;
711 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
713 if (ql_wait_for_mii_ready(qdev)) {
714 if (netif_msg_link(qdev))
715 printk(KERN_WARNING PFX
716 "%s: Timed out waiting for management port to "
717 "get free before issuing command.\n",
722 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
723 PHYAddr[mac_index] | regAddr);
725 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
726 (MAC_MII_CONTROL_RC << 16));
728 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
729 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
731 /* Wait for the read to complete */
732 if (ql_wait_for_mii_ready(qdev)) {
733 if (netif_msg_link(qdev))
734 printk(KERN_WARNING PFX
735 "%s: Timed out waiting for management port to "
736 "get free after issuing command.\n",
741 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
745 ql_mii_enable_scan_mode(qdev);
750 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
752 struct ql3xxx_port_registers __iomem *port_regs =
753 qdev->mem_map_registers;
755 ql_mii_disable_scan_mode(qdev);
757 if (ql_wait_for_mii_ready(qdev)) {
758 if (netif_msg_link(qdev))
759 printk(KERN_WARNING PFX
760 "%s: Timed out waiting for management port to "
761 "get free before issuing command.\n",
766 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
767 qdev->PHYAddr | regAddr);
769 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
771 /* Wait for write to complete. */
772 if (ql_wait_for_mii_ready(qdev)) {
773 if (netif_msg_link(qdev))
774 printk(KERN_WARNING PFX
775 "%s: Timed out waiting for management port to "
776 "get free before issuing command.\n",
781 ql_mii_enable_scan_mode(qdev);
786 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
789 struct ql3xxx_port_registers __iomem *port_regs =
790 qdev->mem_map_registers;
792 ql_mii_disable_scan_mode(qdev);
794 if (ql_wait_for_mii_ready(qdev)) {
795 if (netif_msg_link(qdev))
796 printk(KERN_WARNING PFX
797 "%s: Timed out waiting for management port to "
798 "get free before issuing command.\n",
803 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
804 qdev->PHYAddr | regAddr);
806 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
807 (MAC_MII_CONTROL_RC << 16));
809 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
810 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
812 /* Wait for the read to complete */
813 if (ql_wait_for_mii_ready(qdev)) {
814 if (netif_msg_link(qdev))
815 printk(KERN_WARNING PFX
816 "%s: Timed out waiting for management port to "
817 "get free before issuing command.\n",
822 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
825 ql_mii_enable_scan_mode(qdev);
830 static void ql_petbi_reset(struct ql3_adapter *qdev)
832 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
835 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
839 /* Enable Auto-negotiation sense */
840 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®);
841 reg |= PETBI_TBI_AUTO_SENSE;
842 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
844 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
845 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
847 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
848 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
849 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
853 static void ql_petbi_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
855 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
859 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
863 /* Enable Auto-negotiation sense */
864 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®, mac_index);
865 reg |= PETBI_TBI_AUTO_SENSE;
866 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, mac_index);
868 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
869 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, mac_index);
871 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
872 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
873 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
877 static void ql_petbi_init(struct ql3_adapter *qdev)
879 ql_petbi_reset(qdev);
880 ql_petbi_start_neg(qdev);
883 static void ql_petbi_init_ex(struct ql3_adapter *qdev, u32 mac_index)
885 ql_petbi_reset_ex(qdev, mac_index);
886 ql_petbi_start_neg_ex(qdev, mac_index);
889 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
893 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0)
896 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
899 static int ql_phy_get_speed(struct ql3_adapter *qdev)
903 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0)
906 reg = (((reg & 0x18) >> 3) & 3);
918 static int ql_is_full_dup(struct ql3_adapter *qdev)
922 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0)
925 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
928 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
932 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0)
935 return (reg & PHY_NEG_PAUSE) != 0;
939 * Caller holds hw_lock.
941 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
943 struct ql3xxx_port_registers __iomem *port_regs =
944 qdev->mem_map_registers;
948 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
950 value = (MAC_CONFIG_REG_PE << 16);
953 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
955 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
959 * Caller holds hw_lock.
961 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
963 struct ql3xxx_port_registers __iomem *port_regs =
964 qdev->mem_map_registers;
968 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
970 value = (MAC_CONFIG_REG_SR << 16);
973 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
975 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
979 * Caller holds hw_lock.
981 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
983 struct ql3xxx_port_registers __iomem *port_regs =
984 qdev->mem_map_registers;
988 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
990 value = (MAC_CONFIG_REG_GM << 16);
993 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
995 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
999 * Caller holds hw_lock.
1001 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1003 struct ql3xxx_port_registers __iomem *port_regs =
1004 qdev->mem_map_registers;
1008 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1010 value = (MAC_CONFIG_REG_FD << 16);
1012 if (qdev->mac_index)
1013 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1015 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1019 * Caller holds hw_lock.
1021 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1023 struct ql3xxx_port_registers __iomem *port_regs =
1024 qdev->mem_map_registers;
1029 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1030 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1032 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1034 if (qdev->mac_index)
1035 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1037 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1041 * Caller holds hw_lock.
1043 static int ql_is_fiber(struct ql3_adapter *qdev)
1045 struct ql3xxx_port_registers __iomem *port_regs =
1046 qdev->mem_map_registers;
1050 switch (qdev->mac_index) {
1052 bitToCheck = PORT_STATUS_SM0;
1055 bitToCheck = PORT_STATUS_SM1;
1059 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1060 return (temp & bitToCheck) != 0;
1063 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1066 ql_mii_read_reg(qdev, 0x00, ®);
1067 return (reg & 0x1000) != 0;
1071 * Caller holds hw_lock.
1073 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1075 struct ql3xxx_port_registers __iomem *port_regs =
1076 qdev->mem_map_registers;
1080 switch (qdev->mac_index) {
1082 bitToCheck = PORT_STATUS_AC0;
1085 bitToCheck = PORT_STATUS_AC1;
1089 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1090 if (temp & bitToCheck) {
1091 if (netif_msg_link(qdev))
1092 printk(KERN_INFO PFX
1093 "%s: Auto-Negotiate complete.\n",
1097 if (netif_msg_link(qdev))
1098 printk(KERN_WARNING PFX
1099 "%s: Auto-Negotiate incomplete.\n",
1106 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1108 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1110 if (ql_is_fiber(qdev))
1111 return ql_is_petbi_neg_pause(qdev);
1113 return ql_is_phy_neg_pause(qdev);
1116 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1118 struct ql3xxx_port_registers __iomem *port_regs =
1119 qdev->mem_map_registers;
1123 switch (qdev->mac_index) {
1125 bitToCheck = PORT_STATUS_AE0;
1128 bitToCheck = PORT_STATUS_AE1;
1131 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1132 return (temp & bitToCheck) != 0;
1135 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1137 if (ql_is_fiber(qdev))
1140 return ql_phy_get_speed(qdev);
1143 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1145 if (ql_is_fiber(qdev))
1148 return ql_is_full_dup(qdev);
1152 * Caller holds hw_lock.
1154 static int ql_link_down_detect(struct ql3_adapter *qdev)
1156 struct ql3xxx_port_registers __iomem *port_regs =
1157 qdev->mem_map_registers;
1161 switch (qdev->mac_index) {
1163 bitToCheck = ISP_CONTROL_LINK_DN_0;
1166 bitToCheck = ISP_CONTROL_LINK_DN_1;
1171 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1172 return (temp & bitToCheck) != 0;
1176 * Caller holds hw_lock.
1178 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1180 struct ql3xxx_port_registers __iomem *port_regs =
1181 qdev->mem_map_registers;
1183 switch (qdev->mac_index) {
1185 ql_write_common_reg(qdev,
1186 &port_regs->CommonRegs.ispControlStatus,
1187 (ISP_CONTROL_LINK_DN_0) |
1188 (ISP_CONTROL_LINK_DN_0 << 16));
1192 ql_write_common_reg(qdev,
1193 &port_regs->CommonRegs.ispControlStatus,
1194 (ISP_CONTROL_LINK_DN_1) |
1195 (ISP_CONTROL_LINK_DN_1 << 16));
1206 * Caller holds hw_lock.
1208 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev,
1211 struct ql3xxx_port_registers __iomem *port_regs =
1212 qdev->mem_map_registers;
1216 switch (mac_index) {
1218 bitToCheck = PORT_STATUS_F1_ENABLED;
1221 bitToCheck = PORT_STATUS_F3_ENABLED;
1227 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1228 if (temp & bitToCheck) {
1229 if (netif_msg_link(qdev))
1230 printk(KERN_DEBUG PFX
1231 "%s: is not link master.\n", qdev->ndev->name);
1234 if (netif_msg_link(qdev))
1235 printk(KERN_DEBUG PFX
1236 "%s: is link master.\n", qdev->ndev->name);
1241 static void ql_phy_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
1243 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, mac_index);
1246 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
1250 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER,
1251 PHY_NEG_PAUSE | PHY_NEG_ADV_SPEED | 1, mac_index);
1253 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, mac_index);
1254 ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG,
1258 static void ql_phy_init_ex(struct ql3_adapter *qdev, u32 mac_index)
1260 ql_phy_reset_ex(qdev, mac_index);
1261 ql_phy_start_neg_ex(qdev, mac_index);
1265 * Caller holds hw_lock.
1267 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1269 struct ql3xxx_port_registers __iomem *port_regs =
1270 qdev->mem_map_registers;
1272 u32 temp, linkState;
1274 switch (qdev->mac_index) {
1276 bitToCheck = PORT_STATUS_UP0;
1279 bitToCheck = PORT_STATUS_UP1;
1282 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1283 if (temp & bitToCheck) {
1286 linkState = LS_DOWN;
1287 if (netif_msg_link(qdev))
1288 printk(KERN_WARNING PFX
1289 "%s: Link is down.\n", qdev->ndev->name);
1294 static int ql_port_start(struct ql3_adapter *qdev)
1296 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1297 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1301 if (ql_is_fiber(qdev)) {
1302 ql_petbi_init(qdev);
1305 ql_phy_init_ex(qdev, qdev->mac_index);
1308 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1312 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1315 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1316 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1320 if (!ql_auto_neg_error(qdev)) {
1321 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1322 /* configure the MAC */
1323 if (netif_msg_link(qdev))
1324 printk(KERN_DEBUG PFX
1325 "%s: Configuring link.\n",
1328 ql_mac_cfg_soft_reset(qdev, 1);
1329 ql_mac_cfg_gig(qdev,
1333 ql_mac_cfg_full_dup(qdev,
1336 ql_mac_cfg_pause(qdev,
1339 ql_mac_cfg_soft_reset(qdev, 0);
1341 /* enable the MAC */
1342 if (netif_msg_link(qdev))
1343 printk(KERN_DEBUG PFX
1344 "%s: Enabling mac.\n",
1347 ql_mac_enable(qdev, 1);
1350 if (netif_msg_link(qdev))
1351 printk(KERN_DEBUG PFX
1352 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1354 qdev->port_link_state = LS_UP;
1355 netif_start_queue(qdev->ndev);
1356 netif_carrier_on(qdev->ndev);
1357 if (netif_msg_link(qdev))
1358 printk(KERN_INFO PFX
1359 "%s: Link is up at %d Mbps, %s duplex.\n",
1361 ql_get_link_speed(qdev),
1362 ql_is_link_full_dup(qdev)
1365 } else { /* Remote error detected */
1367 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1368 if (netif_msg_link(qdev))
1369 printk(KERN_DEBUG PFX
1370 "%s: Remote error detected. "
1371 "Calling ql_port_start().\n",
1375 * ql_port_start() is shared code and needs
1376 * to lock the PHY on it's own.
1378 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1379 if(ql_port_start(qdev)) {/* Restart port */
1385 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1389 static void ql_link_state_machine(struct ql3_adapter *qdev)
1391 u32 curr_link_state;
1392 unsigned long hw_flags;
1394 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1396 curr_link_state = ql_get_link_state(qdev);
1398 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1399 if (netif_msg_link(qdev))
1400 printk(KERN_INFO PFX
1401 "%s: Reset in progress, skip processing link "
1402 "state.\n", qdev->ndev->name);
1404 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1408 switch (qdev->port_link_state) {
1410 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1411 ql_port_start(qdev);
1413 qdev->port_link_state = LS_DOWN;
1417 if (netif_msg_link(qdev))
1418 printk(KERN_DEBUG PFX
1419 "%s: port_link_state = LS_DOWN.\n",
1421 if (curr_link_state == LS_UP) {
1422 if (netif_msg_link(qdev))
1423 printk(KERN_DEBUG PFX
1424 "%s: curr_link_state = LS_UP.\n",
1426 if (ql_is_auto_neg_complete(qdev))
1427 ql_finish_auto_neg(qdev);
1429 if (qdev->port_link_state == LS_UP)
1430 ql_link_down_detect_clear(qdev);
1437 * See if the link is currently down or went down and came
1440 if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
1441 if (netif_msg_link(qdev))
1442 printk(KERN_INFO PFX "%s: Link is down.\n",
1444 qdev->port_link_state = LS_DOWN;
1448 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1452 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1454 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1456 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1457 set_bit(QL_LINK_MASTER,&qdev->flags);
1459 clear_bit(QL_LINK_MASTER,&qdev->flags);
1463 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1465 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1467 ql_mii_enable_scan_mode(qdev);
1469 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1470 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1471 ql_petbi_init_ex(qdev, qdev->mac_index);
1473 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1474 ql_phy_init_ex(qdev, qdev->mac_index);
1479 * MII_Setup needs to be called before taking the PHY out of reset so that the
1480 * management interface clock speed can be set properly. It would be better if
1481 * we had a way to disable MDC until after the PHY is out of reset, but we
1482 * don't have that capability.
1484 static int ql_mii_setup(struct ql3_adapter *qdev)
1487 struct ql3xxx_port_registers __iomem *port_regs =
1488 qdev->mem_map_registers;
1490 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1491 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1495 if (qdev->device_id == QL3032_DEVICE_ID)
1496 ql_write_page0_reg(qdev,
1497 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1499 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1500 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1502 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1503 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1505 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1509 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1513 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1514 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1515 | SUPPORTED_Autoneg;
1517 supported = SUPPORTED_10baseT_Half
1518 | SUPPORTED_10baseT_Full
1519 | SUPPORTED_100baseT_Half
1520 | SUPPORTED_100baseT_Full
1521 | SUPPORTED_1000baseT_Half
1522 | SUPPORTED_1000baseT_Full
1523 | SUPPORTED_Autoneg | SUPPORTED_TP;
1529 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1532 unsigned long hw_flags;
1533 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1534 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1537 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1540 status = ql_is_auto_cfg(qdev);
1541 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1542 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1546 static u32 ql_get_speed(struct ql3_adapter *qdev)
1549 unsigned long hw_flags;
1550 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1551 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1552 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1554 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1557 status = ql_get_link_speed(qdev);
1558 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1559 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1563 static int ql_get_full_dup(struct ql3_adapter *qdev)
1566 unsigned long hw_flags;
1567 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1568 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1569 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1571 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1574 status = ql_is_link_full_dup(qdev);
1575 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1576 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1581 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1583 struct ql3_adapter *qdev = netdev_priv(ndev);
1585 ecmd->transceiver = XCVR_INTERNAL;
1586 ecmd->supported = ql_supported_modes(qdev);
1588 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1589 ecmd->port = PORT_FIBRE;
1591 ecmd->port = PORT_TP;
1592 ecmd->phy_address = qdev->PHYAddr;
1594 ecmd->advertising = ql_supported_modes(qdev);
1595 ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1596 ecmd->speed = ql_get_speed(qdev);
1597 ecmd->duplex = ql_get_full_dup(qdev);
1601 static void ql_get_drvinfo(struct net_device *ndev,
1602 struct ethtool_drvinfo *drvinfo)
1604 struct ql3_adapter *qdev = netdev_priv(ndev);
1605 strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1606 strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1607 strncpy(drvinfo->fw_version, "N/A", 32);
1608 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1609 drvinfo->n_stats = 0;
1610 drvinfo->testinfo_len = 0;
1611 drvinfo->regdump_len = 0;
1612 drvinfo->eedump_len = 0;
1615 static u32 ql_get_msglevel(struct net_device *ndev)
1617 struct ql3_adapter *qdev = netdev_priv(ndev);
1618 return qdev->msg_enable;
1621 static void ql_set_msglevel(struct net_device *ndev, u32 value)
1623 struct ql3_adapter *qdev = netdev_priv(ndev);
1624 qdev->msg_enable = value;
1627 static const struct ethtool_ops ql3xxx_ethtool_ops = {
1628 .get_settings = ql_get_settings,
1629 .get_drvinfo = ql_get_drvinfo,
1630 .get_perm_addr = ethtool_op_get_perm_addr,
1631 .get_link = ethtool_op_get_link,
1632 .get_msglevel = ql_get_msglevel,
1633 .set_msglevel = ql_set_msglevel,
1636 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1638 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1642 while (lrg_buf_cb) {
1643 if (!lrg_buf_cb->skb) {
1644 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1645 qdev->lrg_buffer_len);
1646 if (unlikely(!lrg_buf_cb->skb)) {
1647 printk(KERN_DEBUG PFX
1648 "%s: Failed netdev_alloc_skb().\n",
1653 * We save some space to copy the ethhdr from
1656 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1657 map = pci_map_single(qdev->pdev,
1658 lrg_buf_cb->skb->data,
1659 qdev->lrg_buffer_len -
1661 PCI_DMA_FROMDEVICE);
1663 err = pci_dma_mapping_error(map);
1665 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
1666 qdev->ndev->name, err);
1667 dev_kfree_skb(lrg_buf_cb->skb);
1668 lrg_buf_cb->skb = NULL;
1673 lrg_buf_cb->buf_phy_addr_low =
1674 cpu_to_le32(LS_64BITS(map));
1675 lrg_buf_cb->buf_phy_addr_high =
1676 cpu_to_le32(MS_64BITS(map));
1677 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1678 pci_unmap_len_set(lrg_buf_cb, maplen,
1679 qdev->lrg_buffer_len -
1681 --qdev->lrg_buf_skb_check;
1682 if (!qdev->lrg_buf_skb_check)
1686 lrg_buf_cb = lrg_buf_cb->next;
1692 * Caller holds hw_lock.
1694 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1696 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1697 if (qdev->small_buf_release_cnt >= 16) {
1698 while (qdev->small_buf_release_cnt >= 16) {
1699 qdev->small_buf_q_producer_index++;
1701 if (qdev->small_buf_q_producer_index ==
1703 qdev->small_buf_q_producer_index = 0;
1704 qdev->small_buf_release_cnt -= 8;
1707 writel(qdev->small_buf_q_producer_index,
1708 &port_regs->CommonRegs.rxSmallQProducerIndex);
1713 * Caller holds hw_lock.
1715 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1717 struct bufq_addr_element *lrg_buf_q_ele;
1719 struct ql_rcv_buf_cb *lrg_buf_cb;
1720 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1722 if ((qdev->lrg_buf_free_count >= 8)
1723 && (qdev->lrg_buf_release_cnt >= 16)) {
1725 if (qdev->lrg_buf_skb_check)
1726 if (!ql_populate_free_queue(qdev))
1729 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1731 while ((qdev->lrg_buf_release_cnt >= 16)
1732 && (qdev->lrg_buf_free_count >= 8)) {
1734 for (i = 0; i < 8; i++) {
1736 ql_get_from_lrg_buf_free_list(qdev);
1737 lrg_buf_q_ele->addr_high =
1738 lrg_buf_cb->buf_phy_addr_high;
1739 lrg_buf_q_ele->addr_low =
1740 lrg_buf_cb->buf_phy_addr_low;
1743 qdev->lrg_buf_release_cnt--;
1746 qdev->lrg_buf_q_producer_index++;
1748 if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
1749 qdev->lrg_buf_q_producer_index = 0;
1751 if (qdev->lrg_buf_q_producer_index ==
1752 (qdev->num_lbufq_entries - 1)) {
1753 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1757 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1758 writel(qdev->lrg_buf_q_producer_index,
1759 &port_regs->CommonRegs.rxLargeQProducerIndex);
1763 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1764 struct ob_mac_iocb_rsp *mac_rsp)
1766 struct ql_tx_buf_cb *tx_cb;
1770 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1771 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
1774 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1776 /* Check the transmit response flags for any errors */
1777 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
1778 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
1780 qdev->stats.tx_errors++;
1782 goto frame_not_sent;
1785 if(tx_cb->seg_count == 0) {
1786 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
1788 qdev->stats.tx_errors++;
1790 goto invalid_seg_count;
1793 pci_unmap_single(qdev->pdev,
1794 pci_unmap_addr(&tx_cb->map[0], mapaddr),
1795 pci_unmap_len(&tx_cb->map[0], maplen),
1798 if (tx_cb->seg_count) {
1799 for (i = 1; i < tx_cb->seg_count; i++) {
1800 pci_unmap_page(qdev->pdev,
1801 pci_unmap_addr(&tx_cb->map[i],
1803 pci_unmap_len(&tx_cb->map[i], maplen),
1807 qdev->stats.tx_packets++;
1808 qdev->stats.tx_bytes += tx_cb->skb->len;
1811 dev_kfree_skb_irq(tx_cb->skb);
1815 atomic_inc(&qdev->tx_count);
1818 void ql_get_sbuf(struct ql3_adapter *qdev)
1820 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1821 qdev->small_buf_index = 0;
1822 qdev->small_buf_release_cnt++;
1825 struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
1827 struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
1828 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
1829 qdev->lrg_buf_release_cnt++;
1830 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
1831 qdev->lrg_buf_index = 0;
1836 * The difference between 3022 and 3032 for inbound completions:
1837 * 3022 uses two buffers per completion. The first buffer contains
1838 * (some) header info, the second the remainder of the headers plus
1839 * the data. For this chip we reserve some space at the top of the
1840 * receive buffer so that the header info in buffer one can be
1841 * prepended to the buffer two. Buffer two is the sent up while
1842 * buffer one is returned to the hardware to be reused.
1843 * 3032 receives all of it's data and headers in one buffer for a
1844 * simpler process. 3032 also supports checksum verification as
1845 * can be seen in ql_process_macip_rx_intr().
1847 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1848 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1850 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1851 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1852 struct sk_buff *skb;
1853 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
1856 * Get the inbound address list (small buffer).
1860 if (qdev->device_id == QL3022_DEVICE_ID)
1861 lrg_buf_cb1 = ql_get_lbuf(qdev);
1863 /* start of second buffer */
1864 lrg_buf_cb2 = ql_get_lbuf(qdev);
1865 skb = lrg_buf_cb2->skb;
1867 qdev->stats.rx_packets++;
1868 qdev->stats.rx_bytes += length;
1870 skb_put(skb, length);
1871 pci_unmap_single(qdev->pdev,
1872 pci_unmap_addr(lrg_buf_cb2, mapaddr),
1873 pci_unmap_len(lrg_buf_cb2, maplen),
1874 PCI_DMA_FROMDEVICE);
1875 prefetch(skb->data);
1876 skb->ip_summed = CHECKSUM_NONE;
1877 skb->protocol = eth_type_trans(skb, qdev->ndev);
1879 netif_receive_skb(skb);
1880 qdev->ndev->last_rx = jiffies;
1881 lrg_buf_cb2->skb = NULL;
1883 if (qdev->device_id == QL3022_DEVICE_ID)
1884 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1885 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1888 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1889 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
1891 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1892 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1893 struct sk_buff *skb1 = NULL, *skb2;
1894 struct net_device *ndev = qdev->ndev;
1895 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
1899 * Get the inbound address list (small buffer).
1904 if (qdev->device_id == QL3022_DEVICE_ID) {
1905 /* start of first buffer on 3022 */
1906 lrg_buf_cb1 = ql_get_lbuf(qdev);
1907 skb1 = lrg_buf_cb1->skb;
1909 if (*((u16 *) skb1->data) != 0xFFFF)
1910 size += VLAN_ETH_HLEN - ETH_HLEN;
1913 /* start of second buffer */
1914 lrg_buf_cb2 = ql_get_lbuf(qdev);
1915 skb2 = lrg_buf_cb2->skb;
1917 skb_put(skb2, length); /* Just the second buffer length here. */
1918 pci_unmap_single(qdev->pdev,
1919 pci_unmap_addr(lrg_buf_cb2, mapaddr),
1920 pci_unmap_len(lrg_buf_cb2, maplen),
1921 PCI_DMA_FROMDEVICE);
1922 prefetch(skb2->data);
1924 skb2->ip_summed = CHECKSUM_NONE;
1925 if (qdev->device_id == QL3022_DEVICE_ID) {
1927 * Copy the ethhdr from first buffer to second. This
1928 * is necessary for 3022 IP completions.
1930 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
1931 skb_push(skb2, size), size);
1933 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
1935 (IB_IP_IOCB_RSP_3032_ICE |
1936 IB_IP_IOCB_RSP_3032_CE)) {
1938 "%s: Bad checksum for this %s packet, checksum = %x.\n",
1941 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
1943 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
1944 (checksum & IB_IP_IOCB_RSP_3032_UDP &&
1945 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
1946 skb2->ip_summed = CHECKSUM_UNNECESSARY;
1949 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
1951 netif_receive_skb(skb2);
1952 qdev->stats.rx_packets++;
1953 qdev->stats.rx_bytes += length;
1954 ndev->last_rx = jiffies;
1955 lrg_buf_cb2->skb = NULL;
1957 if (qdev->device_id == QL3022_DEVICE_ID)
1958 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1959 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1962 static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1963 int *tx_cleaned, int *rx_cleaned, int work_to_do)
1965 struct net_rsp_iocb *net_rsp;
1966 struct net_device *ndev = qdev->ndev;
1969 /* While there are entries in the completion queue. */
1970 while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
1971 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
1973 net_rsp = qdev->rsp_current;
1974 switch (net_rsp->opcode) {
1976 case OPCODE_OB_MAC_IOCB_FN0:
1977 case OPCODE_OB_MAC_IOCB_FN2:
1978 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
1983 case OPCODE_IB_MAC_IOCB:
1984 case OPCODE_IB_3032_MAC_IOCB:
1985 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
1990 case OPCODE_IB_IP_IOCB:
1991 case OPCODE_IB_3032_IP_IOCB:
1992 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
1998 u32 *tmp = (u32 *) net_rsp;
2000 "%s: Hit default case, not "
2002 " dropping the packet, opcode = "
2004 ndev->name, net_rsp->opcode);
2006 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
2007 (unsigned long int)tmp[0],
2008 (unsigned long int)tmp[1],
2009 (unsigned long int)tmp[2],
2010 (unsigned long int)tmp[3]);
2014 qdev->rsp_consumer_index++;
2016 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2017 qdev->rsp_consumer_index = 0;
2018 qdev->rsp_current = qdev->rsp_q_virt_addr;
2020 qdev->rsp_current++;
2023 work_done = *tx_cleaned + *rx_cleaned;
2029 static int ql_poll(struct net_device *ndev, int *budget)
2031 struct ql3_adapter *qdev = netdev_priv(ndev);
2032 int work_to_do = min(*budget, ndev->quota);
2033 int rx_cleaned = 0, tx_cleaned = 0;
2034 unsigned long hw_flags;
2035 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2037 if (!netif_carrier_ok(ndev))
2040 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
2041 *budget -= rx_cleaned;
2042 ndev->quota -= rx_cleaned;
2044 if( tx_cleaned + rx_cleaned != work_to_do ||
2045 !netif_running(ndev)) {
2047 netif_rx_complete(ndev);
2049 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2050 ql_update_small_bufq_prod_index(qdev);
2051 ql_update_lrg_bufq_prod_index(qdev);
2052 writel(qdev->rsp_consumer_index,
2053 &port_regs->CommonRegs.rspQConsumerIndex);
2054 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2056 ql_enable_interrupts(qdev);
2062 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2065 struct net_device *ndev = dev_id;
2066 struct ql3_adapter *qdev = netdev_priv(ndev);
2067 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2072 port_regs = qdev->mem_map_registers;
2075 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2077 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2078 spin_lock(&qdev->adapter_lock);
2079 netif_stop_queue(qdev->ndev);
2080 netif_carrier_off(qdev->ndev);
2081 ql_disable_interrupts(qdev);
2082 qdev->port_link_state = LS_DOWN;
2083 set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
2085 if (value & ISP_CONTROL_FE) {
2090 ql_read_page0_reg_l(qdev,
2091 &port_regs->PortFatalErrStatus);
2092 printk(KERN_WARNING PFX
2093 "%s: Resetting chip. PortFatalErrStatus "
2094 "register = 0x%x\n", ndev->name, var);
2095 set_bit(QL_RESET_START,&qdev->flags) ;
2098 * Soft Reset Requested.
2100 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2102 "%s: Another function issued a reset to the "
2103 "chip. ISR value = %x.\n", ndev->name, value);
2105 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2106 spin_unlock(&qdev->adapter_lock);
2107 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2108 ql_disable_interrupts(qdev);
2109 if (likely(netif_rx_schedule_prep(ndev))) {
2110 __netif_rx_schedule(ndev);
2116 return IRQ_RETVAL(handled);
2120 * Get the total number of segments needed for the
2121 * given number of fragments. This is necessary because
2122 * outbound address lists (OAL) will be used when more than
2123 * two frags are given. Each address list has 5 addr/len
2124 * pairs. The 5th pair in each AOL is used to point to
2125 * the next AOL if more frags are coming.
2126 * That is why the frags:segment count ratio is not linear.
2128 static int ql_get_seg_count(struct ql3_adapter *qdev,
2129 unsigned short frags)
2131 if (qdev->device_id == QL3022_DEVICE_ID)
2135 case 0: return 1; /* just the skb->data seg */
2136 case 1: return 2; /* skb->data + 1 frag */
2137 case 2: return 3; /* skb->data + 2 frags */
2138 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
2158 static void ql_hw_csum_setup(struct sk_buff *skb,
2159 struct ob_mac_iocb_req *mac_iocb_ptr)
2162 struct iphdr *ip = NULL;
2163 u8 offset = ETH_HLEN;
2165 eth = (struct ethhdr *)(skb->data);
2167 if (eth->h_proto == __constant_htons(ETH_P_IP)) {
2168 ip = (struct iphdr *)&skb->data[ETH_HLEN];
2169 } else if (eth->h_proto == htons(ETH_P_8021Q) &&
2170 ((struct vlan_ethhdr *)skb->data)->
2171 h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
2172 ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
2173 offset = VLAN_ETH_HLEN;
2177 if (ip->protocol == IPPROTO_TCP) {
2178 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2179 OB_3032MAC_IOCB_REQ_IC;
2180 mac_iocb_ptr->ip_hdr_off = offset;
2181 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2182 } else if (ip->protocol == IPPROTO_UDP) {
2183 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2184 OB_3032MAC_IOCB_REQ_IC;
2185 mac_iocb_ptr->ip_hdr_off = offset;
2186 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2192 * Map the buffers for this transmit. This will return
2193 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2195 static int ql_send_map(struct ql3_adapter *qdev,
2196 struct ob_mac_iocb_req *mac_iocb_ptr,
2197 struct ql_tx_buf_cb *tx_cb,
2198 struct sk_buff *skb)
2201 struct oal_entry *oal_entry;
2202 int len = skb_headlen(skb);
2205 int completed_segs, i;
2206 int seg_cnt, seg = 0;
2207 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2209 seg_cnt = tx_cb->seg_count;
2211 * Map the skb buffer first.
2213 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2215 err = pci_dma_mapping_error(map);
2217 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2218 qdev->ndev->name, err);
2220 return NETDEV_TX_BUSY;
2223 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2224 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2225 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2226 oal_entry->len = cpu_to_le32(len);
2227 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2228 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2232 /* Terminate the last segment. */
2234 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2237 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
2238 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2240 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2241 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2242 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2243 (seg == 17 && seg_cnt > 18)) {
2244 /* Continuation entry points to outbound address list. */
2245 map = pci_map_single(qdev->pdev, oal,
2249 err = pci_dma_mapping_error(map);
2252 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
2253 qdev->ndev->name, err);
2257 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2258 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2260 cpu_to_le32(sizeof(struct oal) |
2262 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2264 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2265 sizeof(struct oal));
2266 oal_entry = (struct oal_entry *)oal;
2272 pci_map_page(qdev->pdev, frag->page,
2273 frag->page_offset, frag->size,
2276 err = pci_dma_mapping_error(map);
2278 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
2279 qdev->ndev->name, err);
2283 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2284 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2285 oal_entry->len = cpu_to_le32(frag->size);
2286 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2287 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2290 /* Terminate the last segment. */
2292 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2295 return NETDEV_TX_OK;
2298 /* A PCI mapping failed and now we will need to back out
2299 * We need to traverse through the oal's and associated pages which
2300 * have been mapped and now we must unmap them to clean up properly
2304 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2306 for (i=0; i<completed_segs; i++,seg++) {
2309 if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2310 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2311 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2312 (seg == 17 && seg_cnt > 18)) {
2313 pci_unmap_single(qdev->pdev,
2314 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2315 pci_unmap_len(&tx_cb->map[seg], maplen),
2321 pci_unmap_page(qdev->pdev,
2322 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2323 pci_unmap_len(&tx_cb->map[seg], maplen),
2327 pci_unmap_single(qdev->pdev,
2328 pci_unmap_addr(&tx_cb->map[0], mapaddr),
2329 pci_unmap_addr(&tx_cb->map[0], maplen),
2332 return NETDEV_TX_BUSY;
2337 * The difference between 3022 and 3032 sends:
2338 * 3022 only supports a simple single segment transmission.
2339 * 3032 supports checksumming and scatter/gather lists (fragments).
2340 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2341 * in the IOCB plus a chain of outbound address lists (OAL) that
2342 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2343 * will used to point to an OAL when more ALP entries are required.
2344 * The IOCB is always the top of the chain followed by one or more
2345 * OALs (when necessary).
2347 static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2349 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2350 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2351 struct ql_tx_buf_cb *tx_cb;
2352 u32 tot_len = skb->len;
2353 struct ob_mac_iocb_req *mac_iocb_ptr;
2355 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2356 return NETDEV_TX_BUSY;
2359 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2360 if((tx_cb->seg_count = ql_get_seg_count(qdev,
2361 (skb_shinfo(skb)->nr_frags))) == -1) {
2362 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2363 return NETDEV_TX_OK;
2366 mac_iocb_ptr = tx_cb->queue_entry;
2367 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2368 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2369 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2370 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2371 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2372 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2374 if (qdev->device_id == QL3032_DEVICE_ID &&
2375 skb->ip_summed == CHECKSUM_PARTIAL)
2376 ql_hw_csum_setup(skb, mac_iocb_ptr);
2378 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2379 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
2380 return NETDEV_TX_BUSY;
2384 qdev->req_producer_index++;
2385 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2386 qdev->req_producer_index = 0;
2388 ql_write_common_reg_l(qdev,
2389 &port_regs->CommonRegs.reqQProducerIndex,
2390 qdev->req_producer_index);
2392 ndev->trans_start = jiffies;
2393 if (netif_msg_tx_queued(qdev))
2394 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2395 ndev->name, qdev->req_producer_index, skb->len);
2397 atomic_dec(&qdev->tx_count);
2398 return NETDEV_TX_OK;
2401 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2404 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2406 qdev->req_q_virt_addr =
2407 pci_alloc_consistent(qdev->pdev,
2408 (size_t) qdev->req_q_size,
2409 &qdev->req_q_phy_addr);
2411 if ((qdev->req_q_virt_addr == NULL) ||
2412 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2413 printk(KERN_ERR PFX "%s: reqQ failed.\n",
2418 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2420 qdev->rsp_q_virt_addr =
2421 pci_alloc_consistent(qdev->pdev,
2422 (size_t) qdev->rsp_q_size,
2423 &qdev->rsp_q_phy_addr);
2425 if ((qdev->rsp_q_virt_addr == NULL) ||
2426 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2428 "%s: rspQ allocation failed\n",
2430 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2431 qdev->req_q_virt_addr,
2432 qdev->req_q_phy_addr);
2436 set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2441 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2443 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2444 printk(KERN_INFO PFX
2445 "%s: Already done.\n", qdev->ndev->name);
2449 pci_free_consistent(qdev->pdev,
2451 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2453 qdev->req_q_virt_addr = NULL;
2455 pci_free_consistent(qdev->pdev,
2457 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2459 qdev->rsp_q_virt_addr = NULL;
2461 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2464 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2466 /* Create Large Buffer Queue */
2467 qdev->lrg_buf_q_size =
2468 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2469 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2470 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2472 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2474 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2475 if (qdev->lrg_buf == NULL) {
2477 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2481 qdev->lrg_buf_q_alloc_virt_addr =
2482 pci_alloc_consistent(qdev->pdev,
2483 qdev->lrg_buf_q_alloc_size,
2484 &qdev->lrg_buf_q_alloc_phy_addr);
2486 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2488 "%s: lBufQ failed\n", qdev->ndev->name);
2491 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2492 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2494 /* Create Small Buffer Queue */
2495 qdev->small_buf_q_size =
2496 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2497 if (qdev->small_buf_q_size < PAGE_SIZE)
2498 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2500 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2502 qdev->small_buf_q_alloc_virt_addr =
2503 pci_alloc_consistent(qdev->pdev,
2504 qdev->small_buf_q_alloc_size,
2505 &qdev->small_buf_q_alloc_phy_addr);
2507 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2509 "%s: Small Buffer Queue allocation failed.\n",
2511 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2512 qdev->lrg_buf_q_alloc_virt_addr,
2513 qdev->lrg_buf_q_alloc_phy_addr);
2517 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2518 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2519 set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2523 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2525 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2526 printk(KERN_INFO PFX
2527 "%s: Already done.\n", qdev->ndev->name);
2530 if(qdev->lrg_buf) kfree(qdev->lrg_buf);
2531 pci_free_consistent(qdev->pdev,
2532 qdev->lrg_buf_q_alloc_size,
2533 qdev->lrg_buf_q_alloc_virt_addr,
2534 qdev->lrg_buf_q_alloc_phy_addr);
2536 qdev->lrg_buf_q_virt_addr = NULL;
2538 pci_free_consistent(qdev->pdev,
2539 qdev->small_buf_q_alloc_size,
2540 qdev->small_buf_q_alloc_virt_addr,
2541 qdev->small_buf_q_alloc_phy_addr);
2543 qdev->small_buf_q_virt_addr = NULL;
2545 clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2548 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2551 struct bufq_addr_element *small_buf_q_entry;
2553 /* Currently we allocate on one of memory and use it for smallbuffers */
2554 qdev->small_buf_total_size =
2555 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2556 QL_SMALL_BUFFER_SIZE);
2558 qdev->small_buf_virt_addr =
2559 pci_alloc_consistent(qdev->pdev,
2560 qdev->small_buf_total_size,
2561 &qdev->small_buf_phy_addr);
2563 if (qdev->small_buf_virt_addr == NULL) {
2565 "%s: Failed to get small buffer memory.\n",
2570 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2571 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2573 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2575 /* Initialize the small buffer queue. */
2576 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2577 small_buf_q_entry->addr_high =
2578 cpu_to_le32(qdev->small_buf_phy_addr_high);
2579 small_buf_q_entry->addr_low =
2580 cpu_to_le32(qdev->small_buf_phy_addr_low +
2581 (i * QL_SMALL_BUFFER_SIZE));
2582 small_buf_q_entry++;
2584 qdev->small_buf_index = 0;
2585 set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2589 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2591 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2592 printk(KERN_INFO PFX
2593 "%s: Already done.\n", qdev->ndev->name);
2596 if (qdev->small_buf_virt_addr != NULL) {
2597 pci_free_consistent(qdev->pdev,
2598 qdev->small_buf_total_size,
2599 qdev->small_buf_virt_addr,
2600 qdev->small_buf_phy_addr);
2602 qdev->small_buf_virt_addr = NULL;
2606 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2609 struct ql_rcv_buf_cb *lrg_buf_cb;
2611 for (i = 0; i < qdev->num_large_buffers; i++) {
2612 lrg_buf_cb = &qdev->lrg_buf[i];
2613 if (lrg_buf_cb->skb) {
2614 dev_kfree_skb(lrg_buf_cb->skb);
2615 pci_unmap_single(qdev->pdev,
2616 pci_unmap_addr(lrg_buf_cb, mapaddr),
2617 pci_unmap_len(lrg_buf_cb, maplen),
2618 PCI_DMA_FROMDEVICE);
2619 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2626 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2629 struct ql_rcv_buf_cb *lrg_buf_cb;
2630 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2632 for (i = 0; i < qdev->num_large_buffers; i++) {
2633 lrg_buf_cb = &qdev->lrg_buf[i];
2634 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2635 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2638 qdev->lrg_buf_index = 0;
2639 qdev->lrg_buf_skb_check = 0;
2642 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2645 struct ql_rcv_buf_cb *lrg_buf_cb;
2646 struct sk_buff *skb;
2650 for (i = 0; i < qdev->num_large_buffers; i++) {
2651 skb = netdev_alloc_skb(qdev->ndev,
2652 qdev->lrg_buffer_len);
2653 if (unlikely(!skb)) {
2654 /* Better luck next round */
2656 "%s: large buff alloc failed, "
2657 "for %d bytes at index %d.\n",
2659 qdev->lrg_buffer_len * 2, i);
2660 ql_free_large_buffers(qdev);
2664 lrg_buf_cb = &qdev->lrg_buf[i];
2665 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2666 lrg_buf_cb->index = i;
2667 lrg_buf_cb->skb = skb;
2669 * We save some space to copy the ethhdr from first
2672 skb_reserve(skb, QL_HEADER_SPACE);
2673 map = pci_map_single(qdev->pdev,
2675 qdev->lrg_buffer_len -
2677 PCI_DMA_FROMDEVICE);
2679 err = pci_dma_mapping_error(map);
2681 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2682 qdev->ndev->name, err);
2683 ql_free_large_buffers(qdev);
2687 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2688 pci_unmap_len_set(lrg_buf_cb, maplen,
2689 qdev->lrg_buffer_len -
2691 lrg_buf_cb->buf_phy_addr_low =
2692 cpu_to_le32(LS_64BITS(map));
2693 lrg_buf_cb->buf_phy_addr_high =
2694 cpu_to_le32(MS_64BITS(map));
2700 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2702 struct ql_tx_buf_cb *tx_cb;
2705 tx_cb = &qdev->tx_buf[0];
2706 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2715 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2717 struct ql_tx_buf_cb *tx_cb;
2719 struct ob_mac_iocb_req *req_q_curr =
2720 qdev->req_q_virt_addr;
2722 /* Create free list of transmit buffers */
2723 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2725 tx_cb = &qdev->tx_buf[i];
2727 tx_cb->queue_entry = req_q_curr;
2729 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2730 if (tx_cb->oal == NULL)
2736 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2738 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
2739 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
2740 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2742 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2744 * Bigger buffers, so less of them.
2746 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
2747 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2750 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
2754 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
2755 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2756 qdev->max_frame_size =
2757 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2760 * First allocate a page of shared memory and use it for shadow
2761 * locations of Network Request Queue Consumer Address Register and
2762 * Network Completion Queue Producer Index Register
2764 qdev->shadow_reg_virt_addr =
2765 pci_alloc_consistent(qdev->pdev,
2766 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2768 if (qdev->shadow_reg_virt_addr != NULL) {
2769 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2770 qdev->req_consumer_index_phy_addr_high =
2771 MS_64BITS(qdev->shadow_reg_phy_addr);
2772 qdev->req_consumer_index_phy_addr_low =
2773 LS_64BITS(qdev->shadow_reg_phy_addr);
2775 qdev->prsp_producer_index =
2776 (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2777 qdev->rsp_producer_index_phy_addr_high =
2778 qdev->req_consumer_index_phy_addr_high;
2779 qdev->rsp_producer_index_phy_addr_low =
2780 qdev->req_consumer_index_phy_addr_low + 8;
2783 "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
2787 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2789 "%s: ql_alloc_net_req_rsp_queues failed.\n",
2794 if (ql_alloc_buffer_queues(qdev) != 0) {
2796 "%s: ql_alloc_buffer_queues failed.\n",
2798 goto err_buffer_queues;
2801 if (ql_alloc_small_buffers(qdev) != 0) {
2803 "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
2804 goto err_small_buffers;
2807 if (ql_alloc_large_buffers(qdev) != 0) {
2809 "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
2810 goto err_small_buffers;
2813 /* Initialize the large buffer queue. */
2814 ql_init_large_buffers(qdev);
2815 if (ql_create_send_free_list(qdev))
2818 qdev->rsp_current = qdev->rsp_q_virt_addr;
2822 ql_free_send_free_list(qdev);
2824 ql_free_buffer_queues(qdev);
2826 ql_free_net_req_rsp_queues(qdev);
2828 pci_free_consistent(qdev->pdev,
2830 qdev->shadow_reg_virt_addr,
2831 qdev->shadow_reg_phy_addr);
2836 static void ql_free_mem_resources(struct ql3_adapter *qdev)
2838 ql_free_send_free_list(qdev);
2839 ql_free_large_buffers(qdev);
2840 ql_free_small_buffers(qdev);
2841 ql_free_buffer_queues(qdev);
2842 ql_free_net_req_rsp_queues(qdev);
2843 if (qdev->shadow_reg_virt_addr != NULL) {
2844 pci_free_consistent(qdev->pdev,
2846 qdev->shadow_reg_virt_addr,
2847 qdev->shadow_reg_phy_addr);
2848 qdev->shadow_reg_virt_addr = NULL;
2852 static int ql_init_misc_registers(struct ql3_adapter *qdev)
2854 struct ql3xxx_local_ram_registers __iomem *local_ram =
2855 (void __iomem *)qdev->mem_map_registers;
2857 if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2858 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2862 ql_write_page2_reg(qdev,
2863 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2865 ql_write_page2_reg(qdev,
2866 &local_ram->maxBufletCount,
2867 qdev->nvram_data.bufletCount);
2869 ql_write_page2_reg(qdev,
2870 &local_ram->freeBufletThresholdLow,
2871 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2872 (qdev->nvram_data.tcpWindowThreshold0));
2874 ql_write_page2_reg(qdev,
2875 &local_ram->freeBufletThresholdHigh,
2876 qdev->nvram_data.tcpWindowThreshold50);
2878 ql_write_page2_reg(qdev,
2879 &local_ram->ipHashTableBase,
2880 (qdev->nvram_data.ipHashTableBaseHi << 16) |
2881 qdev->nvram_data.ipHashTableBaseLo);
2882 ql_write_page2_reg(qdev,
2883 &local_ram->ipHashTableCount,
2884 qdev->nvram_data.ipHashTableSize);
2885 ql_write_page2_reg(qdev,
2886 &local_ram->tcpHashTableBase,
2887 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2888 qdev->nvram_data.tcpHashTableBaseLo);
2889 ql_write_page2_reg(qdev,
2890 &local_ram->tcpHashTableCount,
2891 qdev->nvram_data.tcpHashTableSize);
2892 ql_write_page2_reg(qdev,
2893 &local_ram->ncbBase,
2894 (qdev->nvram_data.ncbTableBaseHi << 16) |
2895 qdev->nvram_data.ncbTableBaseLo);
2896 ql_write_page2_reg(qdev,
2897 &local_ram->maxNcbCount,
2898 qdev->nvram_data.ncbTableSize);
2899 ql_write_page2_reg(qdev,
2900 &local_ram->drbBase,
2901 (qdev->nvram_data.drbTableBaseHi << 16) |
2902 qdev->nvram_data.drbTableBaseLo);
2903 ql_write_page2_reg(qdev,
2904 &local_ram->maxDrbCount,
2905 qdev->nvram_data.drbTableSize);
2906 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
2910 static int ql_adapter_initialize(struct ql3_adapter *qdev)
2913 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2914 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
2915 (void __iomem *)port_regs;
2919 if(ql_mii_setup(qdev))
2922 /* Bring out PHY out of reset */
2923 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
2924 (ISP_SERIAL_PORT_IF_WE |
2925 (ISP_SERIAL_PORT_IF_WE << 16)));
2927 qdev->port_link_state = LS_DOWN;
2928 netif_carrier_off(qdev->ndev);
2930 /* V2 chip fix for ARS-39168. */
2931 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
2932 (ISP_SERIAL_PORT_IF_SDE |
2933 (ISP_SERIAL_PORT_IF_SDE << 16)));
2935 /* Request Queue Registers */
2936 *((u32 *) (qdev->preq_consumer_index)) = 0;
2937 atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
2938 qdev->req_producer_index = 0;
2940 ql_write_page1_reg(qdev,
2941 &hmem_regs->reqConsumerIndexAddrHigh,
2942 qdev->req_consumer_index_phy_addr_high);
2943 ql_write_page1_reg(qdev,
2944 &hmem_regs->reqConsumerIndexAddrLow,
2945 qdev->req_consumer_index_phy_addr_low);
2947 ql_write_page1_reg(qdev,
2948 &hmem_regs->reqBaseAddrHigh,
2949 MS_64BITS(qdev->req_q_phy_addr));
2950 ql_write_page1_reg(qdev,
2951 &hmem_regs->reqBaseAddrLow,
2952 LS_64BITS(qdev->req_q_phy_addr));
2953 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
2955 /* Response Queue Registers */
2956 *((u16 *) (qdev->prsp_producer_index)) = 0;
2957 qdev->rsp_consumer_index = 0;
2958 qdev->rsp_current = qdev->rsp_q_virt_addr;
2960 ql_write_page1_reg(qdev,
2961 &hmem_regs->rspProducerIndexAddrHigh,
2962 qdev->rsp_producer_index_phy_addr_high);
2964 ql_write_page1_reg(qdev,
2965 &hmem_regs->rspProducerIndexAddrLow,
2966 qdev->rsp_producer_index_phy_addr_low);
2968 ql_write_page1_reg(qdev,
2969 &hmem_regs->rspBaseAddrHigh,
2970 MS_64BITS(qdev->rsp_q_phy_addr));
2972 ql_write_page1_reg(qdev,
2973 &hmem_regs->rspBaseAddrLow,
2974 LS_64BITS(qdev->rsp_q_phy_addr));
2976 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
2978 /* Large Buffer Queue */
2979 ql_write_page1_reg(qdev,
2980 &hmem_regs->rxLargeQBaseAddrHigh,
2981 MS_64BITS(qdev->lrg_buf_q_phy_addr));
2983 ql_write_page1_reg(qdev,
2984 &hmem_regs->rxLargeQBaseAddrLow,
2985 LS_64BITS(qdev->lrg_buf_q_phy_addr));
2987 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
2989 ql_write_page1_reg(qdev,
2990 &hmem_regs->rxLargeBufferLength,
2991 qdev->lrg_buffer_len);
2993 /* Small Buffer Queue */
2994 ql_write_page1_reg(qdev,
2995 &hmem_regs->rxSmallQBaseAddrHigh,
2996 MS_64BITS(qdev->small_buf_q_phy_addr));
2998 ql_write_page1_reg(qdev,
2999 &hmem_regs->rxSmallQBaseAddrLow,
3000 LS_64BITS(qdev->small_buf_q_phy_addr));
3002 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3003 ql_write_page1_reg(qdev,
3004 &hmem_regs->rxSmallBufferLength,
3005 QL_SMALL_BUFFER_SIZE);
3007 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3008 qdev->small_buf_release_cnt = 8;
3009 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3010 qdev->lrg_buf_release_cnt = 8;
3011 qdev->lrg_buf_next_free =
3012 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
3013 qdev->small_buf_index = 0;
3014 qdev->lrg_buf_index = 0;
3015 qdev->lrg_buf_free_count = 0;
3016 qdev->lrg_buf_free_head = NULL;
3017 qdev->lrg_buf_free_tail = NULL;
3019 ql_write_common_reg(qdev,
3020 &port_regs->CommonRegs.
3021 rxSmallQProducerIndex,
3022 qdev->small_buf_q_producer_index);
3023 ql_write_common_reg(qdev,
3024 &port_regs->CommonRegs.
3025 rxLargeQProducerIndex,
3026 qdev->lrg_buf_q_producer_index);
3029 * Find out if the chip has already been initialized. If it has, then
3030 * we skip some of the initialization.
3032 clear_bit(QL_LINK_MASTER, &qdev->flags);
3033 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3034 if ((value & PORT_STATUS_IC) == 0) {
3036 /* Chip has not been configured yet, so let it rip. */
3037 if(ql_init_misc_registers(qdev)) {
3042 value = qdev->nvram_data.tcpMaxWindowSize;
3043 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3045 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3047 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3048 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3053 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3054 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3055 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3056 16) | (INTERNAL_CHIP_SD |
3057 INTERNAL_CHIP_WE)));
3058 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3061 if (qdev->mac_index)
3062 ql_write_page0_reg(qdev,
3063 &port_regs->mac1MaxFrameLengthReg,
3064 qdev->max_frame_size);
3066 ql_write_page0_reg(qdev,
3067 &port_regs->mac0MaxFrameLengthReg,
3068 qdev->max_frame_size);
3070 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3071 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3077 ql_init_scan_mode(qdev);
3078 ql_get_phy_owner(qdev);
3080 /* Load the MAC Configuration */
3082 /* Program lower 32 bits of the MAC address */
3083 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3084 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3085 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3086 ((qdev->ndev->dev_addr[2] << 24)
3087 | (qdev->ndev->dev_addr[3] << 16)
3088 | (qdev->ndev->dev_addr[4] << 8)
3089 | qdev->ndev->dev_addr[5]));
3091 /* Program top 16 bits of the MAC address */
3092 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3093 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3094 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3095 ((qdev->ndev->dev_addr[0] << 8)
3096 | qdev->ndev->dev_addr[1]));
3098 /* Enable Primary MAC */
3099 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3100 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3101 MAC_ADDR_INDIRECT_PTR_REG_PE));
3103 /* Clear Primary and Secondary IP addresses */
3104 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3105 ((IP_ADDR_INDEX_REG_MASK << 16) |
3106 (qdev->mac_index << 2)));
3107 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3109 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3110 ((IP_ADDR_INDEX_REG_MASK << 16) |
3111 ((qdev->mac_index << 2) + 1)));
3112 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3114 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3116 /* Indicate Configuration Complete */
3117 ql_write_page0_reg(qdev,
3118 &port_regs->portControl,
3119 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3122 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3123 if (value & PORT_STATUS_IC)
3130 "%s: Hw Initialization timeout.\n", qdev->ndev->name);
3135 /* Enable Ethernet Function */
3136 if (qdev->device_id == QL3032_DEVICE_ID) {
3138 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3139 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3140 QL3032_PORT_CONTROL_ET);
3141 ql_write_page0_reg(qdev, &port_regs->functionControl,
3142 ((value << 16) | value));
3145 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3147 ql_write_page0_reg(qdev, &port_regs->portControl,
3148 ((value << 16) | value));
3157 * Caller holds hw_lock.
3159 static int ql_adapter_reset(struct ql3_adapter *qdev)
3161 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3166 set_bit(QL_RESET_ACTIVE, &qdev->flags);
3167 clear_bit(QL_RESET_DONE, &qdev->flags);
3170 * Issue soft reset to chip.
3172 printk(KERN_DEBUG PFX
3173 "%s: Issue soft reset to chip.\n",
3175 ql_write_common_reg(qdev,
3176 &port_regs->CommonRegs.ispControlStatus,
3177 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3179 /* Wait 3 seconds for reset to complete. */
3180 printk(KERN_DEBUG PFX
3181 "%s: Wait 10 milliseconds for reset to complete.\n",
3184 /* Wait until the firmware tells us the Soft Reset is done */
3188 ql_read_common_reg(qdev,
3189 &port_regs->CommonRegs.ispControlStatus);
3190 if ((value & ISP_CONTROL_SR) == 0)
3194 } while ((--max_wait_time));
3197 * Also, make sure that the Network Reset Interrupt bit has been
3198 * cleared after the soft reset has taken place.
3201 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3202 if (value & ISP_CONTROL_RI) {
3203 printk(KERN_DEBUG PFX
3204 "ql_adapter_reset: clearing RI after reset.\n");
3205 ql_write_common_reg(qdev,
3206 &port_regs->CommonRegs.
3208 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3211 if (max_wait_time == 0) {
3212 /* Issue Force Soft Reset */
3213 ql_write_common_reg(qdev,
3214 &port_regs->CommonRegs.
3216 ((ISP_CONTROL_FSR << 16) |
3219 * Wait until the firmware tells us the Force Soft Reset is
3225 ql_read_common_reg(qdev,
3226 &port_regs->CommonRegs.
3228 if ((value & ISP_CONTROL_FSR) == 0) {
3232 } while ((--max_wait_time));
3234 if (max_wait_time == 0)
3237 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3238 set_bit(QL_RESET_DONE, &qdev->flags);
3242 static void ql_set_mac_info(struct ql3_adapter *qdev)
3244 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3245 u32 value, port_status;
3248 /* Get the function number */
3250 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3251 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3252 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3253 switch (value & ISP_CONTROL_FN_MASK) {
3254 case ISP_CONTROL_FN0_NET:
3255 qdev->mac_index = 0;
3256 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3257 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3258 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3259 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3260 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3261 if (port_status & PORT_STATUS_SM0)
3262 set_bit(QL_LINK_OPTICAL,&qdev->flags);
3264 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3267 case ISP_CONTROL_FN1_NET:
3268 qdev->mac_index = 1;
3269 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3270 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3271 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3272 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3273 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3274 if (port_status & PORT_STATUS_SM1)
3275 set_bit(QL_LINK_OPTICAL,&qdev->flags);
3277 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3280 case ISP_CONTROL_FN0_SCSI:
3281 case ISP_CONTROL_FN1_SCSI:
3283 printk(KERN_DEBUG PFX
3284 "%s: Invalid function number, ispControlStatus = 0x%x\n",
3285 qdev->ndev->name,value);
3288 qdev->numPorts = qdev->nvram_data.numPorts;
3291 static void ql_display_dev_info(struct net_device *ndev)
3293 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3294 struct pci_dev *pdev = qdev->pdev;
3296 printk(KERN_INFO PFX
3297 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
3298 DRV_NAME, qdev->index, qdev->chip_rev_id,
3299 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3301 printk(KERN_INFO PFX
3303 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
3306 * Print PCI bus width/type.
3308 printk(KERN_INFO PFX
3309 "Bus interface is %s %s.\n",
3310 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3311 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3313 printk(KERN_INFO PFX
3314 "mem IO base address adjusted = 0x%p\n",
3315 qdev->mem_map_registers);
3316 printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
3318 if (netif_msg_probe(qdev))
3319 printk(KERN_INFO PFX
3320 "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
3321 ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
3322 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
3326 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3328 struct net_device *ndev = qdev->ndev;
3331 netif_stop_queue(ndev);
3332 netif_carrier_off(ndev);
3334 clear_bit(QL_ADAPTER_UP,&qdev->flags);
3335 clear_bit(QL_LINK_MASTER,&qdev->flags);
3337 ql_disable_interrupts(qdev);
3339 free_irq(qdev->pdev->irq, ndev);
3341 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3342 printk(KERN_INFO PFX
3343 "%s: calling pci_disable_msi().\n", qdev->ndev->name);
3344 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3345 pci_disable_msi(qdev->pdev);
3348 del_timer_sync(&qdev->adapter_timer);
3350 netif_poll_disable(ndev);
3354 unsigned long hw_flags;
3356 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3357 if (ql_wait_for_drvr_lock(qdev)) {
3358 if ((soft_reset = ql_adapter_reset(qdev))) {
3360 "%s: ql_adapter_reset(%d) FAILED!\n",
3361 ndev->name, qdev->index);
3364 "%s: Releaseing driver lock via chip reset.\n",ndev->name);
3367 "%s: Could not acquire driver lock to do "
3368 "reset!\n", ndev->name);
3371 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3373 ql_free_mem_resources(qdev);
3377 static int ql_adapter_up(struct ql3_adapter *qdev)
3379 struct net_device *ndev = qdev->ndev;
3381 unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
3382 unsigned long hw_flags;
3384 if (ql_alloc_mem_resources(qdev)) {
3386 "%s Unable to allocate buffers.\n", ndev->name);
3391 if (pci_enable_msi(qdev->pdev)) {
3393 "%s: User requested MSI, but MSI failed to "
3394 "initialize. Continuing without MSI.\n",
3398 printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
3399 set_bit(QL_MSI_ENABLED,&qdev->flags);
3400 irq_flags &= ~IRQF_SHARED;
3404 if ((err = request_irq(qdev->pdev->irq,
3406 irq_flags, ndev->name, ndev))) {
3408 "%s: Failed to reserve interrupt %d already in use.\n",
3409 ndev->name, qdev->pdev->irq);
3413 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3415 if ((err = ql_wait_for_drvr_lock(qdev))) {
3416 if ((err = ql_adapter_initialize(qdev))) {
3418 "%s: Unable to initialize adapter.\n",
3423 "%s: Releaseing driver lock.\n",ndev->name);
3424 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3427 "%s: Could not aquire driver lock.\n",
3432 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3434 set_bit(QL_ADAPTER_UP,&qdev->flags);
3436 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3438 netif_poll_enable(ndev);
3439 ql_enable_interrupts(qdev);
3443 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3445 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3446 free_irq(qdev->pdev->irq, ndev);
3448 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3449 printk(KERN_INFO PFX
3450 "%s: calling pci_disable_msi().\n",
3452 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3453 pci_disable_msi(qdev->pdev);
3458 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3460 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3462 "%s: Driver up/down cycle failed, "
3463 "closing device\n",qdev->ndev->name);
3464 dev_close(qdev->ndev);
3470 static int ql3xxx_close(struct net_device *ndev)
3472 struct ql3_adapter *qdev = netdev_priv(ndev);
3475 * Wait for device to recover from a reset.
3476 * (Rarely happens, but possible.)
3478 while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3481 ql_adapter_down(qdev,QL_DO_RESET);
3485 static int ql3xxx_open(struct net_device *ndev)
3487 struct ql3_adapter *qdev = netdev_priv(ndev);
3488 return (ql_adapter_up(qdev));
3491 static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
3493 struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
3494 return &qdev->stats;
3497 static void ql3xxx_set_multicast_list(struct net_device *ndev)
3500 * We are manually parsing the list in the net_device structure.
3505 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3507 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3508 struct ql3xxx_port_registers __iomem *port_regs =
3509 qdev->mem_map_registers;
3510 struct sockaddr *addr = p;
3511 unsigned long hw_flags;
3513 if (netif_running(ndev))
3516 if (!is_valid_ether_addr(addr->sa_data))
3517 return -EADDRNOTAVAIL;
3519 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3521 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3522 /* Program lower 32 bits of the MAC address */
3523 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3524 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3525 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3526 ((ndev->dev_addr[2] << 24) | (ndev->
3527 dev_addr[3] << 16) |
3528 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3530 /* Program top 16 bits of the MAC address */
3531 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3532 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3533 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3534 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3535 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3540 static void ql3xxx_tx_timeout(struct net_device *ndev)
3542 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3544 printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
3546 * Stop the queues, we've got a problem.
3548 netif_stop_queue(ndev);
3551 * Wake up the worker to process this event.
3553 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3556 static void ql_reset_work(struct work_struct *work)
3558 struct ql3_adapter *qdev =
3559 container_of(work, struct ql3_adapter, reset_work.work);
3560 struct net_device *ndev = qdev->ndev;
3562 struct ql_tx_buf_cb *tx_cb;
3563 int max_wait_time, i;
3564 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3565 unsigned long hw_flags;
3567 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3568 clear_bit(QL_LINK_MASTER,&qdev->flags);
3571 * Loop through the active list and return the skb.
3573 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3575 tx_cb = &qdev->tx_buf[i];
3577 printk(KERN_DEBUG PFX
3578 "%s: Freeing lost SKB.\n",
3580 pci_unmap_single(qdev->pdev,
3581 pci_unmap_addr(&tx_cb->map[0], mapaddr),
3582 pci_unmap_len(&tx_cb->map[0], maplen),
3584 for(j=1;j<tx_cb->seg_count;j++) {
3585 pci_unmap_page(qdev->pdev,
3586 pci_unmap_addr(&tx_cb->map[j],mapaddr),
3587 pci_unmap_len(&tx_cb->map[j],maplen),
3590 dev_kfree_skb(tx_cb->skb);
3596 "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3597 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3598 ql_write_common_reg(qdev,
3599 &port_regs->CommonRegs.
3601 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3603 * Wait the for Soft Reset to Complete.
3607 value = ql_read_common_reg(qdev,
3608 &port_regs->CommonRegs.
3611 if ((value & ISP_CONTROL_SR) == 0) {
3612 printk(KERN_DEBUG PFX
3613 "%s: reset completed.\n",
3618 if (value & ISP_CONTROL_RI) {
3619 printk(KERN_DEBUG PFX
3620 "%s: clearing NRI after reset.\n",
3622 ql_write_common_reg(qdev,
3627 16) | ISP_CONTROL_RI));
3631 } while (--max_wait_time);
3632 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3634 if (value & ISP_CONTROL_SR) {
3637 * Set the reset flags and clear the board again.
3638 * Nothing else to do...
3641 "%s: Timed out waiting for reset to "
3642 "complete.\n", ndev->name);
3644 "%s: Do a reset.\n", ndev->name);
3645 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3646 clear_bit(QL_RESET_START,&qdev->flags);
3647 ql_cycle_adapter(qdev,QL_DO_RESET);
3651 clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3652 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3653 clear_bit(QL_RESET_START,&qdev->flags);
3654 ql_cycle_adapter(qdev,QL_NO_RESET);
3658 static void ql_tx_timeout_work(struct work_struct *work)
3660 struct ql3_adapter *qdev =
3661 container_of(work, struct ql3_adapter, tx_timeout_work.work);
3663 ql_cycle_adapter(qdev, QL_DO_RESET);
3666 static void ql_get_board_info(struct ql3_adapter *qdev)
3668 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3671 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3673 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3674 if (value & PORT_STATUS_64)
3675 qdev->pci_width = 64;
3677 qdev->pci_width = 32;
3678 if (value & PORT_STATUS_X)
3682 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3685 static void ql3xxx_timer(unsigned long ptr)
3687 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3689 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
3690 printk(KERN_DEBUG PFX
3691 "%s: Reset in progress.\n",
3696 ql_link_state_machine(qdev);
3698 /* Restart timer on 2 second interval. */
3700 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3703 static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3704 const struct pci_device_id *pci_entry)
3706 struct net_device *ndev = NULL;
3707 struct ql3_adapter *qdev = NULL;
3708 static int cards_found = 0;
3709 int pci_using_dac, err;
3711 err = pci_enable_device(pdev);
3713 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3718 err = pci_request_regions(pdev, DRV_NAME);
3720 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3722 goto err_out_disable_pdev;
3725 pci_set_master(pdev);
3727 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3729 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3730 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3732 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3736 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3738 goto err_out_free_regions;
3741 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3743 printk(KERN_ERR PFX "%s could not alloc etherdev\n",
3746 goto err_out_free_regions;
3749 SET_MODULE_OWNER(ndev);
3750 SET_NETDEV_DEV(ndev, &pdev->dev);
3752 pci_set_drvdata(pdev, ndev);
3754 qdev = netdev_priv(ndev);
3755 qdev->index = cards_found;
3758 qdev->device_id = pci_entry->device;
3759 qdev->port_link_state = LS_DOWN;
3763 qdev->msg_enable = netif_msg_init(debug, default_msg);
3766 ndev->features |= NETIF_F_HIGHDMA;
3767 if (qdev->device_id == QL3032_DEVICE_ID)
3768 ndev->features |= (NETIF_F_HW_CSUM | NETIF_F_SG);
3770 qdev->mem_map_registers =
3771 ioremap_nocache(pci_resource_start(pdev, 1),
3772 pci_resource_len(qdev->pdev, 1));
3773 if (!qdev->mem_map_registers) {
3774 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3777 goto err_out_free_ndev;
3780 spin_lock_init(&qdev->adapter_lock);
3781 spin_lock_init(&qdev->hw_lock);
3783 /* Set driver entry points */
3784 ndev->open = ql3xxx_open;
3785 ndev->hard_start_xmit = ql3xxx_send;
3786 ndev->stop = ql3xxx_close;
3787 ndev->get_stats = ql3xxx_get_stats;
3788 ndev->set_multicast_list = ql3xxx_set_multicast_list;
3789 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3790 ndev->set_mac_address = ql3xxx_set_mac_address;
3791 ndev->tx_timeout = ql3xxx_tx_timeout;
3792 ndev->watchdog_timeo = 5 * HZ;
3794 ndev->poll = &ql_poll;
3797 ndev->irq = pdev->irq;
3799 /* make sure the EEPROM is good */
3800 if (ql_get_nvram_params(qdev)) {
3801 printk(KERN_ALERT PFX
3802 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
3805 goto err_out_iounmap;
3808 ql_set_mac_info(qdev);
3810 /* Validate and set parameters */
3811 if (qdev->mac_index) {
3812 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
3813 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
3816 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
3817 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
3820 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3822 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3824 /* Turn off support for multicasting */
3825 ndev->flags &= ~IFF_MULTICAST;
3827 /* Record PCI bus information. */
3828 ql_get_board_info(qdev);
3831 * Set the Maximum Memory Read Byte Count value. We do this to handle
3835 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3838 err = register_netdev(ndev);
3840 printk(KERN_ERR PFX "%s: cannot register net device\n",
3842 goto err_out_iounmap;
3845 /* we're going to reset, so assume we have no link for now */
3847 netif_carrier_off(ndev);
3848 netif_stop_queue(ndev);
3850 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3851 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
3852 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
3854 init_timer(&qdev->adapter_timer);
3855 qdev->adapter_timer.function = ql3xxx_timer;
3856 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3857 qdev->adapter_timer.data = (unsigned long)qdev;
3860 printk(KERN_ALERT PFX "%s\n", DRV_STRING);
3861 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
3862 DRV_NAME, DRV_VERSION);
3864 ql_display_dev_info(ndev);
3870 iounmap(qdev->mem_map_registers);
3873 err_out_free_regions:
3874 pci_release_regions(pdev);
3875 err_out_disable_pdev:
3876 pci_disable_device(pdev);
3877 pci_set_drvdata(pdev, NULL);
3882 static void __devexit ql3xxx_remove(struct pci_dev *pdev)
3884 struct net_device *ndev = pci_get_drvdata(pdev);
3885 struct ql3_adapter *qdev = netdev_priv(ndev);
3887 unregister_netdev(ndev);
3888 qdev = netdev_priv(ndev);
3890 ql_disable_interrupts(qdev);
3892 if (qdev->workqueue) {
3893 cancel_delayed_work(&qdev->reset_work);
3894 cancel_delayed_work(&qdev->tx_timeout_work);
3895 destroy_workqueue(qdev->workqueue);
3896 qdev->workqueue = NULL;
3899 iounmap(qdev->mem_map_registers);
3900 pci_release_regions(pdev);
3901 pci_set_drvdata(pdev, NULL);
3905 static struct pci_driver ql3xxx_driver = {
3908 .id_table = ql3xxx_pci_tbl,
3909 .probe = ql3xxx_probe,
3910 .remove = __devexit_p(ql3xxx_remove),
3913 static int __init ql3xxx_init_module(void)
3915 return pci_register_driver(&ql3xxx_driver);
3918 static void __exit ql3xxx_exit(void)
3920 pci_unregister_driver(&ql3xxx_driver);
3923 module_init(ql3xxx_init_module);
3924 module_exit(ql3xxx_exit);