2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
5 * See LICENSE.qla3xxx for copyright and licensing details.
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/types.h>
11 #include <linux/module.h>
12 #include <linux/list.h>
13 #include <linux/pci.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dmapool.h>
18 #include <linux/mempool.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
21 #include <linux/interrupt.h>
22 #include <linux/errno.h>
23 #include <linux/ioport.h>
26 #include <linux/if_arp.h>
27 #include <linux/if_ether.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/ethtool.h>
31 #include <linux/skbuff.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/if_vlan.h>
34 #include <linux/init.h>
35 #include <linux/delay.h>
40 #define DRV_NAME "qla3xxx"
41 #define DRV_STRING "QLogic ISP3XXX Network Driver"
42 #define DRV_VERSION "v2.03.00-k4"
43 #define PFX DRV_NAME " "
45 static const char ql3xxx_driver_name[] = DRV_NAME;
46 static const char ql3xxx_driver_version[] = DRV_VERSION;
48 MODULE_AUTHOR("QLogic Corporation");
49 MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(DRV_VERSION);
53 static const u32 default_msg
54 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
55 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
57 static int debug = -1; /* defaults above */
58 module_param(debug, int, 0);
59 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
62 module_param(msi, int, 0);
63 MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
65 static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
66 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
67 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
68 /* required last entry */
72 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
75 * These are the known PHY's which are used
85 PHY_DEVICE_et phyDevice;
91 static const PHY_DEVICE_INFO_t PHY_DEVICES[] =
92 {{PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
93 {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
94 {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
99 * Caller must take hw_lock.
101 static int ql_sem_spinlock(struct ql3_adapter *qdev,
102 u32 sem_mask, u32 sem_bits)
104 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
106 unsigned int seconds = 3;
109 writel((sem_mask | sem_bits),
110 &port_regs->CommonRegs.semaphoreReg);
111 value = readl(&port_regs->CommonRegs.semaphoreReg);
112 if ((value & (sem_mask >> 16)) == sem_bits)
119 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
121 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
122 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
123 readl(&port_regs->CommonRegs.semaphoreReg);
126 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
128 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
131 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
132 value = readl(&port_regs->CommonRegs.semaphoreReg);
133 return ((value & (sem_mask >> 16)) == sem_bits);
137 * Caller holds hw_lock.
139 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
144 if (!ql_sem_lock(qdev,
146 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
152 printk(KERN_ERR PFX "%s: Timed out waiting for "
158 printk(KERN_DEBUG PFX
159 "%s: driver lock acquired.\n",
166 static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
168 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
170 writel(((ISP_CONTROL_NP_MASK << 16) | page),
171 &port_regs->CommonRegs.ispControlStatus);
172 readl(&port_regs->CommonRegs.ispControlStatus);
173 qdev->current_page = page;
176 static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
180 unsigned long hw_flags;
182 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
184 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
189 static u32 ql_read_common_reg(struct ql3_adapter *qdev,
195 static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
198 unsigned long hw_flags;
200 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
202 if (qdev->current_page != 0)
203 ql_set_register_page(qdev,0);
206 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
210 static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
212 if (qdev->current_page != 0)
213 ql_set_register_page(qdev,0);
217 static void ql_write_common_reg_l(struct ql3_adapter *qdev,
218 u32 __iomem *reg, u32 value)
220 unsigned long hw_flags;
222 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
225 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
229 static void ql_write_common_reg(struct ql3_adapter *qdev,
230 u32 __iomem *reg, u32 value)
237 static void ql_write_nvram_reg(struct ql3_adapter *qdev,
238 u32 __iomem *reg, u32 value)
246 static void ql_write_page0_reg(struct ql3_adapter *qdev,
247 u32 __iomem *reg, u32 value)
249 if (qdev->current_page != 0)
250 ql_set_register_page(qdev,0);
257 * Caller holds hw_lock. Only called during init.
259 static void ql_write_page1_reg(struct ql3_adapter *qdev,
260 u32 __iomem *reg, u32 value)
262 if (qdev->current_page != 1)
263 ql_set_register_page(qdev,1);
270 * Caller holds hw_lock. Only called during init.
272 static void ql_write_page2_reg(struct ql3_adapter *qdev,
273 u32 __iomem *reg, u32 value)
275 if (qdev->current_page != 2)
276 ql_set_register_page(qdev,2);
282 static void ql_disable_interrupts(struct ql3_adapter *qdev)
284 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
286 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
287 (ISP_IMR_ENABLE_INT << 16));
291 static void ql_enable_interrupts(struct ql3_adapter *qdev)
293 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
295 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
296 ((0xff << 16) | ISP_IMR_ENABLE_INT));
300 static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
301 struct ql_rcv_buf_cb *lrg_buf_cb)
305 lrg_buf_cb->next = NULL;
307 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
308 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
310 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
311 qdev->lrg_buf_free_tail = lrg_buf_cb;
314 if (!lrg_buf_cb->skb) {
315 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
316 qdev->lrg_buffer_len);
317 if (unlikely(!lrg_buf_cb->skb)) {
318 printk(KERN_ERR PFX "%s: failed netdev_alloc_skb().\n",
320 qdev->lrg_buf_skb_check++;
323 * We save some space to copy the ethhdr from first
326 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
327 map = pci_map_single(qdev->pdev,
328 lrg_buf_cb->skb->data,
329 qdev->lrg_buffer_len -
332 err = pci_dma_mapping_error(map);
334 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
335 qdev->ndev->name, err);
336 dev_kfree_skb(lrg_buf_cb->skb);
337 lrg_buf_cb->skb = NULL;
339 qdev->lrg_buf_skb_check++;
343 lrg_buf_cb->buf_phy_addr_low =
344 cpu_to_le32(LS_64BITS(map));
345 lrg_buf_cb->buf_phy_addr_high =
346 cpu_to_le32(MS_64BITS(map));
347 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
348 pci_unmap_len_set(lrg_buf_cb, maplen,
349 qdev->lrg_buffer_len -
354 qdev->lrg_buf_free_count++;
357 static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
360 struct ql_rcv_buf_cb *lrg_buf_cb;
362 if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
363 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
364 qdev->lrg_buf_free_tail = NULL;
365 qdev->lrg_buf_free_count--;
371 static u32 addrBits = EEPROM_NO_ADDR_BITS;
372 static u32 dataBits = EEPROM_NO_DATA_BITS;
374 static void fm93c56a_deselect(struct ql3_adapter *qdev);
375 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
376 unsigned short *value);
379 * Caller holds hw_lock.
381 static void fm93c56a_select(struct ql3_adapter *qdev)
383 struct ql3xxx_port_registers __iomem *port_regs =
384 qdev->mem_map_registers;
386 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
387 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
388 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
389 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
390 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
394 * Caller holds hw_lock.
396 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
402 struct ql3xxx_port_registers __iomem *port_regs =
403 qdev->mem_map_registers;
405 /* Clock in a zero, then do the start bit */
406 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
407 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
409 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
410 ISP_NVRAM_MASK | qdev->
411 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
412 AUBURN_EEPROM_CLK_RISE);
413 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
414 ISP_NVRAM_MASK | qdev->
415 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
416 AUBURN_EEPROM_CLK_FALL);
418 mask = 1 << (FM93C56A_CMD_BITS - 1);
419 /* Force the previous data bit to be different */
420 previousBit = 0xffff;
421 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
423 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
424 if (previousBit != dataBit) {
426 * If the bit changed, then change the DO state to
429 ql_write_nvram_reg(qdev,
430 &port_regs->CommonRegs.
431 serialPortInterfaceReg,
432 ISP_NVRAM_MASK | qdev->
433 eeprom_cmd_data | dataBit);
434 previousBit = dataBit;
436 ql_write_nvram_reg(qdev,
437 &port_regs->CommonRegs.
438 serialPortInterfaceReg,
439 ISP_NVRAM_MASK | qdev->
440 eeprom_cmd_data | dataBit |
441 AUBURN_EEPROM_CLK_RISE);
442 ql_write_nvram_reg(qdev,
443 &port_regs->CommonRegs.
444 serialPortInterfaceReg,
445 ISP_NVRAM_MASK | qdev->
446 eeprom_cmd_data | dataBit |
447 AUBURN_EEPROM_CLK_FALL);
451 mask = 1 << (addrBits - 1);
452 /* Force the previous data bit to be different */
453 previousBit = 0xffff;
454 for (i = 0; i < addrBits; i++) {
456 (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
458 if (previousBit != dataBit) {
460 * If the bit changed, then change the DO state to
463 ql_write_nvram_reg(qdev,
464 &port_regs->CommonRegs.
465 serialPortInterfaceReg,
466 ISP_NVRAM_MASK | qdev->
467 eeprom_cmd_data | dataBit);
468 previousBit = dataBit;
470 ql_write_nvram_reg(qdev,
471 &port_regs->CommonRegs.
472 serialPortInterfaceReg,
473 ISP_NVRAM_MASK | qdev->
474 eeprom_cmd_data | dataBit |
475 AUBURN_EEPROM_CLK_RISE);
476 ql_write_nvram_reg(qdev,
477 &port_regs->CommonRegs.
478 serialPortInterfaceReg,
479 ISP_NVRAM_MASK | qdev->
480 eeprom_cmd_data | dataBit |
481 AUBURN_EEPROM_CLK_FALL);
482 eepromAddr = eepromAddr << 1;
487 * Caller holds hw_lock.
489 static void fm93c56a_deselect(struct ql3_adapter *qdev)
491 struct ql3xxx_port_registers __iomem *port_regs =
492 qdev->mem_map_registers;
493 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
494 ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
495 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
499 * Caller holds hw_lock.
501 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
506 struct ql3xxx_port_registers __iomem *port_regs =
507 qdev->mem_map_registers;
509 /* Read the data bits */
510 /* The first bit is a dummy. Clock right over it. */
511 for (i = 0; i < dataBits; i++) {
512 ql_write_nvram_reg(qdev,
513 &port_regs->CommonRegs.
514 serialPortInterfaceReg,
515 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
516 AUBURN_EEPROM_CLK_RISE);
517 ql_write_nvram_reg(qdev,
518 &port_regs->CommonRegs.
519 serialPortInterfaceReg,
520 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
521 AUBURN_EEPROM_CLK_FALL);
525 &port_regs->CommonRegs.
526 serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
527 data = (data << 1) | dataBit;
533 * Caller holds hw_lock.
535 static void eeprom_readword(struct ql3_adapter *qdev,
536 u32 eepromAddr, unsigned short *value)
538 fm93c56a_select(qdev);
539 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
540 fm93c56a_datain(qdev, value);
541 fm93c56a_deselect(qdev);
544 static void ql_swap_mac_addr(u8 * macAddress)
548 temp = macAddress[0];
549 macAddress[0] = macAddress[1];
550 macAddress[1] = temp;
551 temp = macAddress[2];
552 macAddress[2] = macAddress[3];
553 macAddress[3] = temp;
554 temp = macAddress[4];
555 macAddress[4] = macAddress[5];
556 macAddress[5] = temp;
560 static int ql_get_nvram_params(struct ql3_adapter *qdev)
565 unsigned long hw_flags;
567 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
569 pEEPROMData = (u16 *) & qdev->nvram_data;
570 qdev->eeprom_cmd_data = 0;
571 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
572 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
574 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
576 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
580 for (index = 0; index < EEPROM_SIZE; index++) {
581 eeprom_readword(qdev, index, pEEPROMData);
582 checksum += *pEEPROMData;
585 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
588 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
589 qdev->ndev->name, checksum);
590 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
595 * We have a problem with endianness for the MAC addresses
596 * and the two 8-bit values version, and numPorts. We
597 * have to swap them on big endian systems.
599 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
600 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
601 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
602 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
603 pEEPROMData = (u16 *) & qdev->nvram_data.version;
604 *pEEPROMData = le16_to_cpu(*pEEPROMData);
606 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
610 static const u32 PHYAddr[2] = {
611 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
614 static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
616 struct ql3xxx_port_registers __iomem *port_regs =
617 qdev->mem_map_registers;
622 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
623 if (!(temp & MAC_MII_STATUS_BSY))
631 static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
633 struct ql3xxx_port_registers __iomem *port_regs =
634 qdev->mem_map_registers;
637 if (qdev->numPorts > 1) {
638 /* Auto scan will cycle through multiple ports */
639 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
641 scanControl = MAC_MII_CONTROL_SC;
645 * Scan register 1 of PHY/PETBI,
646 * Set up to scan both devices
647 * The autoscan starts from the first register, completes
648 * the last one before rolling over to the first
650 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
651 PHYAddr[0] | MII_SCAN_REGISTER);
653 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
655 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
658 static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
661 struct ql3xxx_port_registers __iomem *port_regs =
662 qdev->mem_map_registers;
664 /* See if scan mode is enabled before we turn it off */
665 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
666 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
667 /* Scan is enabled */
670 /* Scan is disabled */
675 * When disabling scan mode you must first change the MII register
678 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
679 PHYAddr[0] | MII_SCAN_REGISTER);
681 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
682 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
683 MAC_MII_CONTROL_RC) << 16));
688 static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
689 u16 regAddr, u16 value, u32 phyAddr)
691 struct ql3xxx_port_registers __iomem *port_regs =
692 qdev->mem_map_registers;
695 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
697 if (ql_wait_for_mii_ready(qdev)) {
698 if (netif_msg_link(qdev))
699 printk(KERN_WARNING PFX
700 "%s Timed out waiting for management port to "
701 "get free before issuing command.\n",
706 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
709 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
711 /* Wait for write to complete 9/10/04 SJP */
712 if (ql_wait_for_mii_ready(qdev)) {
713 if (netif_msg_link(qdev))
714 printk(KERN_WARNING PFX
715 "%s: Timed out waiting for management port to"
716 "get free before issuing command.\n",
722 ql_mii_enable_scan_mode(qdev);
727 static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
728 u16 * value, u32 phyAddr)
730 struct ql3xxx_port_registers __iomem *port_regs =
731 qdev->mem_map_registers;
735 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
737 if (ql_wait_for_mii_ready(qdev)) {
738 if (netif_msg_link(qdev))
739 printk(KERN_WARNING PFX
740 "%s: Timed out waiting for management port to "
741 "get free before issuing command.\n",
746 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
749 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
750 (MAC_MII_CONTROL_RC << 16));
752 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
753 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
755 /* Wait for the read to complete */
756 if (ql_wait_for_mii_ready(qdev)) {
757 if (netif_msg_link(qdev))
758 printk(KERN_WARNING PFX
759 "%s: Timed out waiting for management port to "
760 "get free after issuing command.\n",
765 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
769 ql_mii_enable_scan_mode(qdev);
774 static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
776 struct ql3xxx_port_registers __iomem *port_regs =
777 qdev->mem_map_registers;
779 ql_mii_disable_scan_mode(qdev);
781 if (ql_wait_for_mii_ready(qdev)) {
782 if (netif_msg_link(qdev))
783 printk(KERN_WARNING PFX
784 "%s: Timed out waiting for management port to "
785 "get free before issuing command.\n",
790 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
791 qdev->PHYAddr | regAddr);
793 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
795 /* Wait for write to complete. */
796 if (ql_wait_for_mii_ready(qdev)) {
797 if (netif_msg_link(qdev))
798 printk(KERN_WARNING PFX
799 "%s: Timed out waiting for management port to "
800 "get free before issuing command.\n",
805 ql_mii_enable_scan_mode(qdev);
810 static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
813 struct ql3xxx_port_registers __iomem *port_regs =
814 qdev->mem_map_registers;
816 ql_mii_disable_scan_mode(qdev);
818 if (ql_wait_for_mii_ready(qdev)) {
819 if (netif_msg_link(qdev))
820 printk(KERN_WARNING PFX
821 "%s: Timed out waiting for management port to "
822 "get free before issuing command.\n",
827 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
828 qdev->PHYAddr | regAddr);
830 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
831 (MAC_MII_CONTROL_RC << 16));
833 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
834 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
836 /* Wait for the read to complete */
837 if (ql_wait_for_mii_ready(qdev)) {
838 if (netif_msg_link(qdev))
839 printk(KERN_WARNING PFX
840 "%s: Timed out waiting for management port to "
841 "get free before issuing command.\n",
846 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
849 ql_mii_enable_scan_mode(qdev);
854 static void ql_petbi_reset(struct ql3_adapter *qdev)
856 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
859 static void ql_petbi_start_neg(struct ql3_adapter *qdev)
863 /* Enable Auto-negotiation sense */
864 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, ®);
865 reg |= PETBI_TBI_AUTO_SENSE;
866 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
868 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
869 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
871 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
872 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
873 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
877 static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
879 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
880 PHYAddr[qdev->mac_index]);
883 static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
887 /* Enable Auto-negotiation sense */
888 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, ®,
889 PHYAddr[qdev->mac_index]);
890 reg |= PETBI_TBI_AUTO_SENSE;
891 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
892 PHYAddr[qdev->mac_index]);
894 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
895 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
896 PHYAddr[qdev->mac_index]);
898 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
899 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
900 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
901 PHYAddr[qdev->mac_index]);
904 static void ql_petbi_init(struct ql3_adapter *qdev)
906 ql_petbi_reset(qdev);
907 ql_petbi_start_neg(qdev);
910 static void ql_petbi_init_ex(struct ql3_adapter *qdev)
912 ql_petbi_reset_ex(qdev);
913 ql_petbi_start_neg_ex(qdev);
916 static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
920 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, ®) < 0)
923 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
926 static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
928 printk(KERN_INFO "%s: enabling Agere specific PHY\n", qdev->ndev->name);
929 /* power down device bit 11 = 1 */
930 ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
931 /* enable diagnostic mode bit 2 = 1 */
932 ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
933 /* 1000MB amplitude adjust (see Agere errata) */
934 ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
935 /* 1000MB amplitude adjust (see Agere errata) */
936 ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
937 /* 100MB amplitude adjust (see Agere errata) */
938 ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
939 /* 100MB amplitude adjust (see Agere errata) */
940 ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
941 /* 10MB amplitude adjust (see Agere errata) */
942 ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
943 /* 10MB amplitude adjust (see Agere errata) */
944 ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
945 /* point to hidden reg 0x2806 */
946 ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
947 /* Write new PHYAD w/bit 5 set */
948 ql_mii_write_reg_ex(qdev, 0x11, 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
950 * Disable diagnostic mode bit 2 = 0
951 * Power up device bit 11 = 0
952 * Link up (on) and activity (blink)
954 ql_mii_write_reg(qdev, 0x12, 0x840a);
955 ql_mii_write_reg(qdev, 0x00, 0x1140);
956 ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
959 static PHY_DEVICE_et getPhyType (struct ql3_adapter *qdev,
960 u16 phyIdReg0, u16 phyIdReg1)
962 PHY_DEVICE_et result = PHY_TYPE_UNKNOWN;
967 if (phyIdReg0 == 0xffff) {
971 if (phyIdReg1 == 0xffff) {
975 /* oui is split between two registers */
976 oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
978 model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
980 /* Scan table for this PHY */
981 for(i = 0; i < MAX_PHY_DEV_TYPES; i++) {
982 if ((oui == PHY_DEVICES[i].phyIdOUI) && (model == PHY_DEVICES[i].phyIdModel))
984 result = PHY_DEVICES[i].phyDevice;
986 printk(KERN_INFO "%s: Phy: %s\n",
987 qdev->ndev->name, PHY_DEVICES[i].name);
996 static int ql_phy_get_speed(struct ql3_adapter *qdev)
1000 switch(qdev->phyType) {
1001 case PHY_AGERE_ET1011C:
1003 if (ql_mii_read_reg(qdev, 0x1A, ®) < 0)
1006 reg = (reg >> 8) & 3;
1010 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0)
1013 reg = (((reg & 0x18) >> 3) & 3);
1028 static int ql_is_full_dup(struct ql3_adapter *qdev)
1032 switch(qdev->phyType) {
1033 case PHY_AGERE_ET1011C:
1035 if (ql_mii_read_reg(qdev, 0x1A, ®))
1038 return ((reg & 0x0080) && (reg & 0x1000)) != 0;
1040 case PHY_VITESSE_VSC8211:
1043 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, ®) < 0)
1045 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
1050 static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
1054 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, ®) < 0)
1057 return (reg & PHY_NEG_PAUSE) != 0;
1060 static int PHY_Setup(struct ql3_adapter *qdev)
1064 bool agereAddrChangeNeeded = false;
1068 /* Determine the PHY we are using by reading the ID's */
1069 err = ql_mii_read_reg(qdev, PHY_ID_0_REG, ®1);
1071 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
1076 err = ql_mii_read_reg(qdev, PHY_ID_1_REG, ®2);
1078 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG\n",
1083 /* Check if we have a Agere PHY */
1084 if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
1086 /* Determine which MII address we should be using
1087 determined by the index of the card */
1088 if (qdev->mac_index == 0) {
1089 miiAddr = MII_AGERE_ADDR_1;
1091 miiAddr = MII_AGERE_ADDR_2;
1094 err =ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, ®1, miiAddr);
1096 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
1101 err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, ®2, miiAddr);
1103 printk(KERN_ERR "%s: Could not read from reg PHY_ID_0_REG after Agere detected\n",
1108 /* We need to remember to initialize the Agere PHY */
1109 agereAddrChangeNeeded = true;
1112 /* Determine the particular PHY we have on board to apply
1113 PHY specific initializations */
1114 qdev->phyType = getPhyType(qdev, reg1, reg2);
1116 if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
1117 /* need this here so address gets changed */
1118 phyAgereSpecificInit(qdev, miiAddr);
1119 } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
1120 printk(KERN_ERR "%s: PHY is unknown\n", qdev->ndev->name);
1128 * Caller holds hw_lock.
1130 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
1132 struct ql3xxx_port_registers __iomem *port_regs =
1133 qdev->mem_map_registers;
1137 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
1139 value = (MAC_CONFIG_REG_PE << 16);
1141 if (qdev->mac_index)
1142 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1144 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1148 * Caller holds hw_lock.
1150 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
1152 struct ql3xxx_port_registers __iomem *port_regs =
1153 qdev->mem_map_registers;
1157 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
1159 value = (MAC_CONFIG_REG_SR << 16);
1161 if (qdev->mac_index)
1162 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1164 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1168 * Caller holds hw_lock.
1170 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
1172 struct ql3xxx_port_registers __iomem *port_regs =
1173 qdev->mem_map_registers;
1177 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
1179 value = (MAC_CONFIG_REG_GM << 16);
1181 if (qdev->mac_index)
1182 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1184 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1188 * Caller holds hw_lock.
1190 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
1192 struct ql3xxx_port_registers __iomem *port_regs =
1193 qdev->mem_map_registers;
1197 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
1199 value = (MAC_CONFIG_REG_FD << 16);
1201 if (qdev->mac_index)
1202 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1204 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1208 * Caller holds hw_lock.
1210 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
1212 struct ql3xxx_port_registers __iomem *port_regs =
1213 qdev->mem_map_registers;
1218 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1219 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1221 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1223 if (qdev->mac_index)
1224 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1226 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1230 * Caller holds hw_lock.
1232 static int ql_is_fiber(struct ql3_adapter *qdev)
1234 struct ql3xxx_port_registers __iomem *port_regs =
1235 qdev->mem_map_registers;
1239 switch (qdev->mac_index) {
1241 bitToCheck = PORT_STATUS_SM0;
1244 bitToCheck = PORT_STATUS_SM1;
1248 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1249 return (temp & bitToCheck) != 0;
1252 static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1255 ql_mii_read_reg(qdev, 0x00, ®);
1256 return (reg & 0x1000) != 0;
1260 * Caller holds hw_lock.
1262 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1264 struct ql3xxx_port_registers __iomem *port_regs =
1265 qdev->mem_map_registers;
1269 switch (qdev->mac_index) {
1271 bitToCheck = PORT_STATUS_AC0;
1274 bitToCheck = PORT_STATUS_AC1;
1278 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1279 if (temp & bitToCheck) {
1280 if (netif_msg_link(qdev))
1281 printk(KERN_INFO PFX
1282 "%s: Auto-Negotiate complete.\n",
1286 if (netif_msg_link(qdev))
1287 printk(KERN_WARNING PFX
1288 "%s: Auto-Negotiate incomplete.\n",
1295 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1297 static int ql_is_neg_pause(struct ql3_adapter *qdev)
1299 if (ql_is_fiber(qdev))
1300 return ql_is_petbi_neg_pause(qdev);
1302 return ql_is_phy_neg_pause(qdev);
1305 static int ql_auto_neg_error(struct ql3_adapter *qdev)
1307 struct ql3xxx_port_registers __iomem *port_regs =
1308 qdev->mem_map_registers;
1312 switch (qdev->mac_index) {
1314 bitToCheck = PORT_STATUS_AE0;
1317 bitToCheck = PORT_STATUS_AE1;
1320 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1321 return (temp & bitToCheck) != 0;
1324 static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1326 if (ql_is_fiber(qdev))
1329 return ql_phy_get_speed(qdev);
1332 static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1334 if (ql_is_fiber(qdev))
1337 return ql_is_full_dup(qdev);
1341 * Caller holds hw_lock.
1343 static int ql_link_down_detect(struct ql3_adapter *qdev)
1345 struct ql3xxx_port_registers __iomem *port_regs =
1346 qdev->mem_map_registers;
1350 switch (qdev->mac_index) {
1352 bitToCheck = ISP_CONTROL_LINK_DN_0;
1355 bitToCheck = ISP_CONTROL_LINK_DN_1;
1360 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1361 return (temp & bitToCheck) != 0;
1365 * Caller holds hw_lock.
1367 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1369 struct ql3xxx_port_registers __iomem *port_regs =
1370 qdev->mem_map_registers;
1372 switch (qdev->mac_index) {
1374 ql_write_common_reg(qdev,
1375 &port_regs->CommonRegs.ispControlStatus,
1376 (ISP_CONTROL_LINK_DN_0) |
1377 (ISP_CONTROL_LINK_DN_0 << 16));
1381 ql_write_common_reg(qdev,
1382 &port_regs->CommonRegs.ispControlStatus,
1383 (ISP_CONTROL_LINK_DN_1) |
1384 (ISP_CONTROL_LINK_DN_1 << 16));
1395 * Caller holds hw_lock.
1397 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
1399 struct ql3xxx_port_registers __iomem *port_regs =
1400 qdev->mem_map_registers;
1404 switch (qdev->mac_index) {
1406 bitToCheck = PORT_STATUS_F1_ENABLED;
1409 bitToCheck = PORT_STATUS_F3_ENABLED;
1415 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1416 if (temp & bitToCheck) {
1417 if (netif_msg_link(qdev))
1418 printk(KERN_DEBUG PFX
1419 "%s: is not link master.\n", qdev->ndev->name);
1422 if (netif_msg_link(qdev))
1423 printk(KERN_DEBUG PFX
1424 "%s: is link master.\n", qdev->ndev->name);
1429 static void ql_phy_reset_ex(struct ql3_adapter *qdev)
1431 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
1432 PHYAddr[qdev->mac_index]);
1435 static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
1438 u16 portConfiguration;
1440 if(qdev->phyType == PHY_AGERE_ET1011C) {
1441 /* turn off external loopback */
1442 ql_mii_write_reg(qdev, 0x13, 0x0000);
1445 if(qdev->mac_index == 0)
1446 portConfiguration = qdev->nvram_data.macCfg_port0.portConfiguration;
1448 portConfiguration = qdev->nvram_data.macCfg_port1.portConfiguration;
1450 /* Some HBA's in the field are set to 0 and they need to
1451 be reinterpreted with a default value */
1452 if(portConfiguration == 0)
1453 portConfiguration = PORT_CONFIG_DEFAULT;
1455 /* Set the 1000 advertisements */
1456 ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, ®,
1457 PHYAddr[qdev->mac_index]);
1458 reg &= ~PHY_GIG_ALL_PARAMS;
1460 if(portConfiguration &
1461 PORT_CONFIG_FULL_DUPLEX_ENABLED &
1462 PORT_CONFIG_1000MB_SPEED) {
1463 reg |= PHY_GIG_ADV_1000F;
1466 if(portConfiguration &
1467 PORT_CONFIG_HALF_DUPLEX_ENABLED &
1468 PORT_CONFIG_1000MB_SPEED) {
1469 reg |= PHY_GIG_ADV_1000H;
1472 ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
1473 PHYAddr[qdev->mac_index]);
1475 /* Set the 10/100 & pause negotiation advertisements */
1476 ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, ®,
1477 PHYAddr[qdev->mac_index]);
1478 reg &= ~PHY_NEG_ALL_PARAMS;
1480 if(portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
1481 reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
1483 if(portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
1484 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1485 reg |= PHY_NEG_ADV_100F;
1487 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1488 reg |= PHY_NEG_ADV_10F;
1491 if(portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
1492 if(portConfiguration & PORT_CONFIG_100MB_SPEED)
1493 reg |= PHY_NEG_ADV_100H;
1495 if(portConfiguration & PORT_CONFIG_10MB_SPEED)
1496 reg |= PHY_NEG_ADV_10H;
1499 if(portConfiguration &
1500 PORT_CONFIG_1000MB_SPEED) {
1504 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
1505 PHYAddr[qdev->mac_index]);
1507 ql_mii_read_reg_ex(qdev, CONTROL_REG, ®, PHYAddr[qdev->mac_index]);
1509 ql_mii_write_reg_ex(qdev, CONTROL_REG,
1510 reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
1511 PHYAddr[qdev->mac_index]);
1514 static void ql_phy_init_ex(struct ql3_adapter *qdev)
1516 ql_phy_reset_ex(qdev);
1518 ql_phy_start_neg_ex(qdev);
1522 * Caller holds hw_lock.
1524 static u32 ql_get_link_state(struct ql3_adapter *qdev)
1526 struct ql3xxx_port_registers __iomem *port_regs =
1527 qdev->mem_map_registers;
1529 u32 temp, linkState;
1531 switch (qdev->mac_index) {
1533 bitToCheck = PORT_STATUS_UP0;
1536 bitToCheck = PORT_STATUS_UP1;
1539 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1540 if (temp & bitToCheck) {
1543 linkState = LS_DOWN;
1544 if (netif_msg_link(qdev))
1545 printk(KERN_WARNING PFX
1546 "%s: Link is down.\n", qdev->ndev->name);
1551 static int ql_port_start(struct ql3_adapter *qdev)
1553 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1554 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1556 printk(KERN_ERR "%s: Could not get hw lock for GIO\n",
1561 if (ql_is_fiber(qdev)) {
1562 ql_petbi_init(qdev);
1565 ql_phy_init_ex(qdev);
1568 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1572 static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1575 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1576 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1580 if (!ql_auto_neg_error(qdev)) {
1581 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1582 /* configure the MAC */
1583 if (netif_msg_link(qdev))
1584 printk(KERN_DEBUG PFX
1585 "%s: Configuring link.\n",
1588 ql_mac_cfg_soft_reset(qdev, 1);
1589 ql_mac_cfg_gig(qdev,
1593 ql_mac_cfg_full_dup(qdev,
1596 ql_mac_cfg_pause(qdev,
1599 ql_mac_cfg_soft_reset(qdev, 0);
1601 /* enable the MAC */
1602 if (netif_msg_link(qdev))
1603 printk(KERN_DEBUG PFX
1604 "%s: Enabling mac.\n",
1607 ql_mac_enable(qdev, 1);
1610 if (netif_msg_link(qdev))
1611 printk(KERN_DEBUG PFX
1612 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1614 qdev->port_link_state = LS_UP;
1615 netif_start_queue(qdev->ndev);
1616 netif_carrier_on(qdev->ndev);
1617 if (netif_msg_link(qdev))
1618 printk(KERN_INFO PFX
1619 "%s: Link is up at %d Mbps, %s duplex.\n",
1621 ql_get_link_speed(qdev),
1622 ql_is_link_full_dup(qdev)
1625 } else { /* Remote error detected */
1627 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1628 if (netif_msg_link(qdev))
1629 printk(KERN_DEBUG PFX
1630 "%s: Remote error detected. "
1631 "Calling ql_port_start().\n",
1635 * ql_port_start() is shared code and needs
1636 * to lock the PHY on it's own.
1638 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1639 if(ql_port_start(qdev)) {/* Restart port */
1645 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1649 static void ql_link_state_machine(struct ql3_adapter *qdev)
1651 u32 curr_link_state;
1652 unsigned long hw_flags;
1654 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1656 curr_link_state = ql_get_link_state(qdev);
1658 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1659 if (netif_msg_link(qdev))
1660 printk(KERN_INFO PFX
1661 "%s: Reset in progress, skip processing link "
1662 "state.\n", qdev->ndev->name);
1664 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1668 switch (qdev->port_link_state) {
1670 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1671 ql_port_start(qdev);
1673 qdev->port_link_state = LS_DOWN;
1677 if (netif_msg_link(qdev))
1678 printk(KERN_DEBUG PFX
1679 "%s: port_link_state = LS_DOWN.\n",
1681 if (curr_link_state == LS_UP) {
1682 if (netif_msg_link(qdev))
1683 printk(KERN_DEBUG PFX
1684 "%s: curr_link_state = LS_UP.\n",
1686 if (ql_is_auto_neg_complete(qdev))
1687 ql_finish_auto_neg(qdev);
1689 if (qdev->port_link_state == LS_UP)
1690 ql_link_down_detect_clear(qdev);
1697 * See if the link is currently down or went down and came
1700 if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
1701 if (netif_msg_link(qdev))
1702 printk(KERN_INFO PFX "%s: Link is down.\n",
1704 qdev->port_link_state = LS_DOWN;
1708 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1712 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1714 static void ql_get_phy_owner(struct ql3_adapter *qdev)
1716 if (ql_this_adapter_controls_port(qdev))
1717 set_bit(QL_LINK_MASTER,&qdev->flags);
1719 clear_bit(QL_LINK_MASTER,&qdev->flags);
1723 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1725 static void ql_init_scan_mode(struct ql3_adapter *qdev)
1727 ql_mii_enable_scan_mode(qdev);
1729 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1730 if (ql_this_adapter_controls_port(qdev))
1731 ql_petbi_init_ex(qdev);
1733 if (ql_this_adapter_controls_port(qdev))
1734 ql_phy_init_ex(qdev);
1739 * MII_Setup needs to be called before taking the PHY out of reset so that the
1740 * management interface clock speed can be set properly. It would be better if
1741 * we had a way to disable MDC until after the PHY is out of reset, but we
1742 * don't have that capability.
1744 static int ql_mii_setup(struct ql3_adapter *qdev)
1747 struct ql3xxx_port_registers __iomem *port_regs =
1748 qdev->mem_map_registers;
1750 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1751 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1755 if (qdev->device_id == QL3032_DEVICE_ID)
1756 ql_write_page0_reg(qdev,
1757 &port_regs->macMIIMgmtControlReg, 0x0f00000);
1759 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1760 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1762 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1763 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1765 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1769 static u32 ql_supported_modes(struct ql3_adapter *qdev)
1773 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1774 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1775 | SUPPORTED_Autoneg;
1777 supported = SUPPORTED_10baseT_Half
1778 | SUPPORTED_10baseT_Full
1779 | SUPPORTED_100baseT_Half
1780 | SUPPORTED_100baseT_Full
1781 | SUPPORTED_1000baseT_Half
1782 | SUPPORTED_1000baseT_Full
1783 | SUPPORTED_Autoneg | SUPPORTED_TP;
1789 static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1792 unsigned long hw_flags;
1793 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1794 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1795 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1797 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1800 status = ql_is_auto_cfg(qdev);
1801 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1802 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1806 static u32 ql_get_speed(struct ql3_adapter *qdev)
1809 unsigned long hw_flags;
1810 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1811 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1812 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1814 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1817 status = ql_get_link_speed(qdev);
1818 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1819 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1823 static int ql_get_full_dup(struct ql3_adapter *qdev)
1826 unsigned long hw_flags;
1827 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1828 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1829 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1831 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1834 status = ql_is_link_full_dup(qdev);
1835 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1836 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1841 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1843 struct ql3_adapter *qdev = netdev_priv(ndev);
1845 ecmd->transceiver = XCVR_INTERNAL;
1846 ecmd->supported = ql_supported_modes(qdev);
1848 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1849 ecmd->port = PORT_FIBRE;
1851 ecmd->port = PORT_TP;
1852 ecmd->phy_address = qdev->PHYAddr;
1854 ecmd->advertising = ql_supported_modes(qdev);
1855 ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1856 ecmd->speed = ql_get_speed(qdev);
1857 ecmd->duplex = ql_get_full_dup(qdev);
1861 static void ql_get_drvinfo(struct net_device *ndev,
1862 struct ethtool_drvinfo *drvinfo)
1864 struct ql3_adapter *qdev = netdev_priv(ndev);
1865 strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1866 strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1867 strncpy(drvinfo->fw_version, "N/A", 32);
1868 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1869 drvinfo->n_stats = 0;
1870 drvinfo->testinfo_len = 0;
1871 drvinfo->regdump_len = 0;
1872 drvinfo->eedump_len = 0;
1875 static u32 ql_get_msglevel(struct net_device *ndev)
1877 struct ql3_adapter *qdev = netdev_priv(ndev);
1878 return qdev->msg_enable;
1881 static void ql_set_msglevel(struct net_device *ndev, u32 value)
1883 struct ql3_adapter *qdev = netdev_priv(ndev);
1884 qdev->msg_enable = value;
1887 static void ql_get_pauseparam(struct net_device *ndev,
1888 struct ethtool_pauseparam *pause)
1890 struct ql3_adapter *qdev = netdev_priv(ndev);
1891 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1894 if(qdev->mac_index == 0)
1895 reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
1897 reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
1899 pause->autoneg = ql_get_auto_cfg_status(qdev);
1900 pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
1901 pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
1904 static const struct ethtool_ops ql3xxx_ethtool_ops = {
1905 .get_settings = ql_get_settings,
1906 .get_drvinfo = ql_get_drvinfo,
1907 .get_link = ethtool_op_get_link,
1908 .get_msglevel = ql_get_msglevel,
1909 .set_msglevel = ql_set_msglevel,
1910 .get_pauseparam = ql_get_pauseparam,
1913 static int ql_populate_free_queue(struct ql3_adapter *qdev)
1915 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1919 while (lrg_buf_cb) {
1920 if (!lrg_buf_cb->skb) {
1921 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
1922 qdev->lrg_buffer_len);
1923 if (unlikely(!lrg_buf_cb->skb)) {
1924 printk(KERN_DEBUG PFX
1925 "%s: Failed netdev_alloc_skb().\n",
1930 * We save some space to copy the ethhdr from
1933 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1934 map = pci_map_single(qdev->pdev,
1935 lrg_buf_cb->skb->data,
1936 qdev->lrg_buffer_len -
1938 PCI_DMA_FROMDEVICE);
1940 err = pci_dma_mapping_error(map);
1942 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
1943 qdev->ndev->name, err);
1944 dev_kfree_skb(lrg_buf_cb->skb);
1945 lrg_buf_cb->skb = NULL;
1950 lrg_buf_cb->buf_phy_addr_low =
1951 cpu_to_le32(LS_64BITS(map));
1952 lrg_buf_cb->buf_phy_addr_high =
1953 cpu_to_le32(MS_64BITS(map));
1954 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1955 pci_unmap_len_set(lrg_buf_cb, maplen,
1956 qdev->lrg_buffer_len -
1958 --qdev->lrg_buf_skb_check;
1959 if (!qdev->lrg_buf_skb_check)
1963 lrg_buf_cb = lrg_buf_cb->next;
1969 * Caller holds hw_lock.
1971 static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
1973 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1974 if (qdev->small_buf_release_cnt >= 16) {
1975 while (qdev->small_buf_release_cnt >= 16) {
1976 qdev->small_buf_q_producer_index++;
1978 if (qdev->small_buf_q_producer_index ==
1980 qdev->small_buf_q_producer_index = 0;
1981 qdev->small_buf_release_cnt -= 8;
1984 writel(qdev->small_buf_q_producer_index,
1985 &port_regs->CommonRegs.rxSmallQProducerIndex);
1990 * Caller holds hw_lock.
1992 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1994 struct bufq_addr_element *lrg_buf_q_ele;
1996 struct ql_rcv_buf_cb *lrg_buf_cb;
1997 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1999 if ((qdev->lrg_buf_free_count >= 8)
2000 && (qdev->lrg_buf_release_cnt >= 16)) {
2002 if (qdev->lrg_buf_skb_check)
2003 if (!ql_populate_free_queue(qdev))
2006 lrg_buf_q_ele = qdev->lrg_buf_next_free;
2008 while ((qdev->lrg_buf_release_cnt >= 16)
2009 && (qdev->lrg_buf_free_count >= 8)) {
2011 for (i = 0; i < 8; i++) {
2013 ql_get_from_lrg_buf_free_list(qdev);
2014 lrg_buf_q_ele->addr_high =
2015 lrg_buf_cb->buf_phy_addr_high;
2016 lrg_buf_q_ele->addr_low =
2017 lrg_buf_cb->buf_phy_addr_low;
2020 qdev->lrg_buf_release_cnt--;
2023 qdev->lrg_buf_q_producer_index++;
2025 if (qdev->lrg_buf_q_producer_index == qdev->num_lbufq_entries)
2026 qdev->lrg_buf_q_producer_index = 0;
2028 if (qdev->lrg_buf_q_producer_index ==
2029 (qdev->num_lbufq_entries - 1)) {
2030 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
2034 qdev->lrg_buf_next_free = lrg_buf_q_ele;
2035 writel(qdev->lrg_buf_q_producer_index,
2036 &port_regs->CommonRegs.rxLargeQProducerIndex);
2040 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2041 struct ob_mac_iocb_rsp *mac_rsp)
2043 struct ql_tx_buf_cb *tx_cb;
2047 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2048 printk(KERN_WARNING "Frame short but, frame was padded and sent.\n");
2051 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
2053 /* Check the transmit response flags for any errors */
2054 if(mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
2055 printk(KERN_ERR "Frame too short to be legal, frame not sent.\n");
2057 qdev->stats.tx_errors++;
2059 goto frame_not_sent;
2062 if(tx_cb->seg_count == 0) {
2063 printk(KERN_ERR "tx_cb->seg_count == 0: %d\n", mac_rsp->transaction_id);
2065 qdev->stats.tx_errors++;
2067 goto invalid_seg_count;
2070 pci_unmap_single(qdev->pdev,
2071 pci_unmap_addr(&tx_cb->map[0], mapaddr),
2072 pci_unmap_len(&tx_cb->map[0], maplen),
2075 if (tx_cb->seg_count) {
2076 for (i = 1; i < tx_cb->seg_count; i++) {
2077 pci_unmap_page(qdev->pdev,
2078 pci_unmap_addr(&tx_cb->map[i],
2080 pci_unmap_len(&tx_cb->map[i], maplen),
2084 qdev->stats.tx_packets++;
2085 qdev->stats.tx_bytes += tx_cb->skb->len;
2088 dev_kfree_skb_irq(tx_cb->skb);
2092 atomic_inc(&qdev->tx_count);
2095 static void ql_get_sbuf(struct ql3_adapter *qdev)
2097 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
2098 qdev->small_buf_index = 0;
2099 qdev->small_buf_release_cnt++;
2102 static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
2104 struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
2105 lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
2106 qdev->lrg_buf_release_cnt++;
2107 if (++qdev->lrg_buf_index == qdev->num_large_buffers)
2108 qdev->lrg_buf_index = 0;
2113 * The difference between 3022 and 3032 for inbound completions:
2114 * 3022 uses two buffers per completion. The first buffer contains
2115 * (some) header info, the second the remainder of the headers plus
2116 * the data. For this chip we reserve some space at the top of the
2117 * receive buffer so that the header info in buffer one can be
2118 * prepended to the buffer two. Buffer two is the sent up while
2119 * buffer one is returned to the hardware to be reused.
2120 * 3032 receives all of it's data and headers in one buffer for a
2121 * simpler process. 3032 also supports checksum verification as
2122 * can be seen in ql_process_macip_rx_intr().
2124 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2125 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
2127 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2128 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2129 struct sk_buff *skb;
2130 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
2133 * Get the inbound address list (small buffer).
2137 if (qdev->device_id == QL3022_DEVICE_ID)
2138 lrg_buf_cb1 = ql_get_lbuf(qdev);
2140 /* start of second buffer */
2141 lrg_buf_cb2 = ql_get_lbuf(qdev);
2142 skb = lrg_buf_cb2->skb;
2144 qdev->stats.rx_packets++;
2145 qdev->stats.rx_bytes += length;
2147 skb_put(skb, length);
2148 pci_unmap_single(qdev->pdev,
2149 pci_unmap_addr(lrg_buf_cb2, mapaddr),
2150 pci_unmap_len(lrg_buf_cb2, maplen),
2151 PCI_DMA_FROMDEVICE);
2152 prefetch(skb->data);
2153 skb->ip_summed = CHECKSUM_NONE;
2154 skb->protocol = eth_type_trans(skb, qdev->ndev);
2156 netif_receive_skb(skb);
2157 qdev->ndev->last_rx = jiffies;
2158 lrg_buf_cb2->skb = NULL;
2160 if (qdev->device_id == QL3022_DEVICE_ID)
2161 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2162 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2165 static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2166 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
2168 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
2169 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
2170 struct sk_buff *skb1 = NULL, *skb2;
2171 struct net_device *ndev = qdev->ndev;
2172 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
2176 * Get the inbound address list (small buffer).
2181 if (qdev->device_id == QL3022_DEVICE_ID) {
2182 /* start of first buffer on 3022 */
2183 lrg_buf_cb1 = ql_get_lbuf(qdev);
2184 skb1 = lrg_buf_cb1->skb;
2186 if (*((u16 *) skb1->data) != 0xFFFF)
2187 size += VLAN_ETH_HLEN - ETH_HLEN;
2190 /* start of second buffer */
2191 lrg_buf_cb2 = ql_get_lbuf(qdev);
2192 skb2 = lrg_buf_cb2->skb;
2194 skb_put(skb2, length); /* Just the second buffer length here. */
2195 pci_unmap_single(qdev->pdev,
2196 pci_unmap_addr(lrg_buf_cb2, mapaddr),
2197 pci_unmap_len(lrg_buf_cb2, maplen),
2198 PCI_DMA_FROMDEVICE);
2199 prefetch(skb2->data);
2201 skb2->ip_summed = CHECKSUM_NONE;
2202 if (qdev->device_id == QL3022_DEVICE_ID) {
2204 * Copy the ethhdr from first buffer to second. This
2205 * is necessary for 3022 IP completions.
2207 skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
2208 skb_push(skb2, size), size);
2210 u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
2212 (IB_IP_IOCB_RSP_3032_ICE |
2213 IB_IP_IOCB_RSP_3032_CE)) {
2215 "%s: Bad checksum for this %s packet, checksum = %x.\n",
2218 IB_IP_IOCB_RSP_3032_TCP) ? "TCP" :
2220 } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
2221 (checksum & IB_IP_IOCB_RSP_3032_UDP &&
2222 !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
2223 skb2->ip_summed = CHECKSUM_UNNECESSARY;
2226 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
2228 netif_receive_skb(skb2);
2229 qdev->stats.rx_packets++;
2230 qdev->stats.rx_bytes += length;
2231 ndev->last_rx = jiffies;
2232 lrg_buf_cb2->skb = NULL;
2234 if (qdev->device_id == QL3022_DEVICE_ID)
2235 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
2236 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
2239 static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2240 int *tx_cleaned, int *rx_cleaned, int work_to_do)
2242 struct net_rsp_iocb *net_rsp;
2243 struct net_device *ndev = qdev->ndev;
2246 /* While there are entries in the completion queue. */
2247 while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
2248 qdev->rsp_consumer_index) && (work_done < work_to_do)) {
2250 net_rsp = qdev->rsp_current;
2251 switch (net_rsp->opcode) {
2253 case OPCODE_OB_MAC_IOCB_FN0:
2254 case OPCODE_OB_MAC_IOCB_FN2:
2255 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
2260 case OPCODE_IB_MAC_IOCB:
2261 case OPCODE_IB_3032_MAC_IOCB:
2262 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
2267 case OPCODE_IB_IP_IOCB:
2268 case OPCODE_IB_3032_IP_IOCB:
2269 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
2275 u32 *tmp = (u32 *) net_rsp;
2277 "%s: Hit default case, not "
2279 " dropping the packet, opcode = "
2281 ndev->name, net_rsp->opcode);
2283 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
2284 (unsigned long int)tmp[0],
2285 (unsigned long int)tmp[1],
2286 (unsigned long int)tmp[2],
2287 (unsigned long int)tmp[3]);
2291 qdev->rsp_consumer_index++;
2293 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
2294 qdev->rsp_consumer_index = 0;
2295 qdev->rsp_current = qdev->rsp_q_virt_addr;
2297 qdev->rsp_current++;
2300 work_done = *tx_cleaned + *rx_cleaned;
2306 static int ql_poll(struct net_device *ndev, int *budget)
2308 struct ql3_adapter *qdev = netdev_priv(ndev);
2309 int work_to_do = min(*budget, ndev->quota);
2310 int rx_cleaned = 0, tx_cleaned = 0;
2311 unsigned long hw_flags;
2312 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2314 if (!netif_carrier_ok(ndev))
2317 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
2318 *budget -= rx_cleaned;
2319 ndev->quota -= rx_cleaned;
2321 if( tx_cleaned + rx_cleaned != work_to_do ||
2322 !netif_running(ndev)) {
2324 netif_rx_complete(ndev);
2326 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2327 ql_update_small_bufq_prod_index(qdev);
2328 ql_update_lrg_bufq_prod_index(qdev);
2329 writel(qdev->rsp_consumer_index,
2330 &port_regs->CommonRegs.rspQConsumerIndex);
2331 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2333 ql_enable_interrupts(qdev);
2339 static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
2342 struct net_device *ndev = dev_id;
2343 struct ql3_adapter *qdev = netdev_priv(ndev);
2344 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2349 port_regs = qdev->mem_map_registers;
2352 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2354 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
2355 spin_lock(&qdev->adapter_lock);
2356 netif_stop_queue(qdev->ndev);
2357 netif_carrier_off(qdev->ndev);
2358 ql_disable_interrupts(qdev);
2359 qdev->port_link_state = LS_DOWN;
2360 set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
2362 if (value & ISP_CONTROL_FE) {
2367 ql_read_page0_reg_l(qdev,
2368 &port_regs->PortFatalErrStatus);
2369 printk(KERN_WARNING PFX
2370 "%s: Resetting chip. PortFatalErrStatus "
2371 "register = 0x%x\n", ndev->name, var);
2372 set_bit(QL_RESET_START,&qdev->flags) ;
2375 * Soft Reset Requested.
2377 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2379 "%s: Another function issued a reset to the "
2380 "chip. ISR value = %x.\n", ndev->name, value);
2382 queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
2383 spin_unlock(&qdev->adapter_lock);
2384 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2385 ql_disable_interrupts(qdev);
2386 if (likely(netif_rx_schedule_prep(ndev))) {
2387 __netif_rx_schedule(ndev);
2393 return IRQ_RETVAL(handled);
2397 * Get the total number of segments needed for the
2398 * given number of fragments. This is necessary because
2399 * outbound address lists (OAL) will be used when more than
2400 * two frags are given. Each address list has 5 addr/len
2401 * pairs. The 5th pair in each AOL is used to point to
2402 * the next AOL if more frags are coming.
2403 * That is why the frags:segment count ratio is not linear.
2405 static int ql_get_seg_count(struct ql3_adapter *qdev,
2406 unsigned short frags)
2408 if (qdev->device_id == QL3022_DEVICE_ID)
2412 case 0: return 1; /* just the skb->data seg */
2413 case 1: return 2; /* skb->data + 1 frag */
2414 case 2: return 3; /* skb->data + 2 frags */
2415 case 3: return 5; /* skb->data + 1 frag + 1 AOL containting 2 frags */
2435 static void ql_hw_csum_setup(const struct sk_buff *skb,
2436 struct ob_mac_iocb_req *mac_iocb_ptr)
2438 const struct iphdr *ip = ip_hdr(skb);
2440 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2441 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2443 if (ip->protocol == IPPROTO_TCP) {
2444 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2445 OB_3032MAC_IOCB_REQ_IC;
2447 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2448 OB_3032MAC_IOCB_REQ_IC;
2454 * Map the buffers for this transmit. This will return
2455 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
2457 static int ql_send_map(struct ql3_adapter *qdev,
2458 struct ob_mac_iocb_req *mac_iocb_ptr,
2459 struct ql_tx_buf_cb *tx_cb,
2460 struct sk_buff *skb)
2463 struct oal_entry *oal_entry;
2464 int len = skb_headlen(skb);
2467 int completed_segs, i;
2468 int seg_cnt, seg = 0;
2469 int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
2471 seg_cnt = tx_cb->seg_count;
2473 * Map the skb buffer first.
2475 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
2477 err = pci_dma_mapping_error(map);
2479 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2480 qdev->ndev->name, err);
2482 return NETDEV_TX_BUSY;
2485 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2486 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2487 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2488 oal_entry->len = cpu_to_le32(len);
2489 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2490 pci_unmap_len_set(&tx_cb->map[seg], maplen, len);
2494 /* Terminate the last segment. */
2496 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2499 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
2500 skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
2502 if ((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2503 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2504 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2505 (seg == 17 && seg_cnt > 18)) {
2506 /* Continuation entry points to outbound address list. */
2507 map = pci_map_single(qdev->pdev, oal,
2511 err = pci_dma_mapping_error(map);
2514 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
2515 qdev->ndev->name, err);
2519 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2520 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2522 cpu_to_le32(sizeof(struct oal) |
2524 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2526 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2527 sizeof(struct oal));
2528 oal_entry = (struct oal_entry *)oal;
2534 pci_map_page(qdev->pdev, frag->page,
2535 frag->page_offset, frag->size,
2538 err = pci_dma_mapping_error(map);
2540 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
2541 qdev->ndev->name, err);
2545 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2546 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2547 oal_entry->len = cpu_to_le32(frag->size);
2548 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2549 pci_unmap_len_set(&tx_cb->map[seg], maplen,
2552 /* Terminate the last segment. */
2554 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2557 return NETDEV_TX_OK;
2560 /* A PCI mapping failed and now we will need to back out
2561 * We need to traverse through the oal's and associated pages which
2562 * have been mapped and now we must unmap them to clean up properly
2566 oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
2568 for (i=0; i<completed_segs; i++,seg++) {
2571 if((seg == 2 && seg_cnt > 3) || /* Check for continuation */
2572 (seg == 7 && seg_cnt > 8) || /* requirements. It's strange */
2573 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2574 (seg == 17 && seg_cnt > 18)) {
2575 pci_unmap_single(qdev->pdev,
2576 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2577 pci_unmap_len(&tx_cb->map[seg], maplen),
2583 pci_unmap_page(qdev->pdev,
2584 pci_unmap_addr(&tx_cb->map[seg], mapaddr),
2585 pci_unmap_len(&tx_cb->map[seg], maplen),
2589 pci_unmap_single(qdev->pdev,
2590 pci_unmap_addr(&tx_cb->map[0], mapaddr),
2591 pci_unmap_addr(&tx_cb->map[0], maplen),
2594 return NETDEV_TX_BUSY;
2599 * The difference between 3022 and 3032 sends:
2600 * 3022 only supports a simple single segment transmission.
2601 * 3032 supports checksumming and scatter/gather lists (fragments).
2602 * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
2603 * in the IOCB plus a chain of outbound address lists (OAL) that
2604 * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
2605 * will used to point to an OAL when more ALP entries are required.
2606 * The IOCB is always the top of the chain followed by one or more
2607 * OALs (when necessary).
2609 static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2611 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2612 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2613 struct ql_tx_buf_cb *tx_cb;
2614 u32 tot_len = skb->len;
2615 struct ob_mac_iocb_req *mac_iocb_ptr;
2617 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2618 return NETDEV_TX_BUSY;
2621 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2622 if((tx_cb->seg_count = ql_get_seg_count(qdev,
2623 (skb_shinfo(skb)->nr_frags))) == -1) {
2624 printk(KERN_ERR PFX"%s: invalid segment count!\n",__func__);
2625 return NETDEV_TX_OK;
2628 mac_iocb_ptr = tx_cb->queue_entry;
2629 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2630 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2631 mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
2632 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2633 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2634 mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
2636 if (qdev->device_id == QL3032_DEVICE_ID &&
2637 skb->ip_summed == CHECKSUM_PARTIAL)
2638 ql_hw_csum_setup(skb, mac_iocb_ptr);
2640 if(ql_send_map(qdev,mac_iocb_ptr,tx_cb,skb) != NETDEV_TX_OK) {
2641 printk(KERN_ERR PFX"%s: Could not map the segments!\n",__func__);
2642 return NETDEV_TX_BUSY;
2646 qdev->req_producer_index++;
2647 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2648 qdev->req_producer_index = 0;
2650 ql_write_common_reg_l(qdev,
2651 &port_regs->CommonRegs.reqQProducerIndex,
2652 qdev->req_producer_index);
2654 ndev->trans_start = jiffies;
2655 if (netif_msg_tx_queued(qdev))
2656 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2657 ndev->name, qdev->req_producer_index, skb->len);
2659 atomic_dec(&qdev->tx_count);
2660 return NETDEV_TX_OK;
2663 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2666 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2668 qdev->req_q_virt_addr =
2669 pci_alloc_consistent(qdev->pdev,
2670 (size_t) qdev->req_q_size,
2671 &qdev->req_q_phy_addr);
2673 if ((qdev->req_q_virt_addr == NULL) ||
2674 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2675 printk(KERN_ERR PFX "%s: reqQ failed.\n",
2680 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2682 qdev->rsp_q_virt_addr =
2683 pci_alloc_consistent(qdev->pdev,
2684 (size_t) qdev->rsp_q_size,
2685 &qdev->rsp_q_phy_addr);
2687 if ((qdev->rsp_q_virt_addr == NULL) ||
2688 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2690 "%s: rspQ allocation failed\n",
2692 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2693 qdev->req_q_virt_addr,
2694 qdev->req_q_phy_addr);
2698 set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2703 static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2705 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2706 printk(KERN_INFO PFX
2707 "%s: Already done.\n", qdev->ndev->name);
2711 pci_free_consistent(qdev->pdev,
2713 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2715 qdev->req_q_virt_addr = NULL;
2717 pci_free_consistent(qdev->pdev,
2719 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2721 qdev->rsp_q_virt_addr = NULL;
2723 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2726 static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2728 /* Create Large Buffer Queue */
2729 qdev->lrg_buf_q_size =
2730 qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
2731 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2732 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2734 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2736 qdev->lrg_buf = kmalloc(qdev->num_large_buffers * sizeof(struct ql_rcv_buf_cb),GFP_KERNEL);
2737 if (qdev->lrg_buf == NULL) {
2739 "%s: qdev->lrg_buf alloc failed.\n", qdev->ndev->name);
2743 qdev->lrg_buf_q_alloc_virt_addr =
2744 pci_alloc_consistent(qdev->pdev,
2745 qdev->lrg_buf_q_alloc_size,
2746 &qdev->lrg_buf_q_alloc_phy_addr);
2748 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2750 "%s: lBufQ failed\n", qdev->ndev->name);
2753 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2754 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2756 /* Create Small Buffer Queue */
2757 qdev->small_buf_q_size =
2758 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2759 if (qdev->small_buf_q_size < PAGE_SIZE)
2760 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2762 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2764 qdev->small_buf_q_alloc_virt_addr =
2765 pci_alloc_consistent(qdev->pdev,
2766 qdev->small_buf_q_alloc_size,
2767 &qdev->small_buf_q_alloc_phy_addr);
2769 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2771 "%s: Small Buffer Queue allocation failed.\n",
2773 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2774 qdev->lrg_buf_q_alloc_virt_addr,
2775 qdev->lrg_buf_q_alloc_phy_addr);
2779 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2780 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2781 set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2785 static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2787 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2788 printk(KERN_INFO PFX
2789 "%s: Already done.\n", qdev->ndev->name);
2792 if(qdev->lrg_buf) kfree(qdev->lrg_buf);
2793 pci_free_consistent(qdev->pdev,
2794 qdev->lrg_buf_q_alloc_size,
2795 qdev->lrg_buf_q_alloc_virt_addr,
2796 qdev->lrg_buf_q_alloc_phy_addr);
2798 qdev->lrg_buf_q_virt_addr = NULL;
2800 pci_free_consistent(qdev->pdev,
2801 qdev->small_buf_q_alloc_size,
2802 qdev->small_buf_q_alloc_virt_addr,
2803 qdev->small_buf_q_alloc_phy_addr);
2805 qdev->small_buf_q_virt_addr = NULL;
2807 clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2810 static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2813 struct bufq_addr_element *small_buf_q_entry;
2815 /* Currently we allocate on one of memory and use it for smallbuffers */
2816 qdev->small_buf_total_size =
2817 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2818 QL_SMALL_BUFFER_SIZE);
2820 qdev->small_buf_virt_addr =
2821 pci_alloc_consistent(qdev->pdev,
2822 qdev->small_buf_total_size,
2823 &qdev->small_buf_phy_addr);
2825 if (qdev->small_buf_virt_addr == NULL) {
2827 "%s: Failed to get small buffer memory.\n",
2832 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2833 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2835 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2837 /* Initialize the small buffer queue. */
2838 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2839 small_buf_q_entry->addr_high =
2840 cpu_to_le32(qdev->small_buf_phy_addr_high);
2841 small_buf_q_entry->addr_low =
2842 cpu_to_le32(qdev->small_buf_phy_addr_low +
2843 (i * QL_SMALL_BUFFER_SIZE));
2844 small_buf_q_entry++;
2846 qdev->small_buf_index = 0;
2847 set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2851 static void ql_free_small_buffers(struct ql3_adapter *qdev)
2853 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2854 printk(KERN_INFO PFX
2855 "%s: Already done.\n", qdev->ndev->name);
2858 if (qdev->small_buf_virt_addr != NULL) {
2859 pci_free_consistent(qdev->pdev,
2860 qdev->small_buf_total_size,
2861 qdev->small_buf_virt_addr,
2862 qdev->small_buf_phy_addr);
2864 qdev->small_buf_virt_addr = NULL;
2868 static void ql_free_large_buffers(struct ql3_adapter *qdev)
2871 struct ql_rcv_buf_cb *lrg_buf_cb;
2873 for (i = 0; i < qdev->num_large_buffers; i++) {
2874 lrg_buf_cb = &qdev->lrg_buf[i];
2875 if (lrg_buf_cb->skb) {
2876 dev_kfree_skb(lrg_buf_cb->skb);
2877 pci_unmap_single(qdev->pdev,
2878 pci_unmap_addr(lrg_buf_cb, mapaddr),
2879 pci_unmap_len(lrg_buf_cb, maplen),
2880 PCI_DMA_FROMDEVICE);
2881 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2888 static void ql_init_large_buffers(struct ql3_adapter *qdev)
2891 struct ql_rcv_buf_cb *lrg_buf_cb;
2892 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2894 for (i = 0; i < qdev->num_large_buffers; i++) {
2895 lrg_buf_cb = &qdev->lrg_buf[i];
2896 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2897 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2900 qdev->lrg_buf_index = 0;
2901 qdev->lrg_buf_skb_check = 0;
2904 static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2907 struct ql_rcv_buf_cb *lrg_buf_cb;
2908 struct sk_buff *skb;
2912 for (i = 0; i < qdev->num_large_buffers; i++) {
2913 skb = netdev_alloc_skb(qdev->ndev,
2914 qdev->lrg_buffer_len);
2915 if (unlikely(!skb)) {
2916 /* Better luck next round */
2918 "%s: large buff alloc failed, "
2919 "for %d bytes at index %d.\n",
2921 qdev->lrg_buffer_len * 2, i);
2922 ql_free_large_buffers(qdev);
2926 lrg_buf_cb = &qdev->lrg_buf[i];
2927 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2928 lrg_buf_cb->index = i;
2929 lrg_buf_cb->skb = skb;
2931 * We save some space to copy the ethhdr from first
2934 skb_reserve(skb, QL_HEADER_SPACE);
2935 map = pci_map_single(qdev->pdev,
2937 qdev->lrg_buffer_len -
2939 PCI_DMA_FROMDEVICE);
2941 err = pci_dma_mapping_error(map);
2943 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
2944 qdev->ndev->name, err);
2945 ql_free_large_buffers(qdev);
2949 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2950 pci_unmap_len_set(lrg_buf_cb, maplen,
2951 qdev->lrg_buffer_len -
2953 lrg_buf_cb->buf_phy_addr_low =
2954 cpu_to_le32(LS_64BITS(map));
2955 lrg_buf_cb->buf_phy_addr_high =
2956 cpu_to_le32(MS_64BITS(map));
2962 static void ql_free_send_free_list(struct ql3_adapter *qdev)
2964 struct ql_tx_buf_cb *tx_cb;
2967 tx_cb = &qdev->tx_buf[0];
2968 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2977 static int ql_create_send_free_list(struct ql3_adapter *qdev)
2979 struct ql_tx_buf_cb *tx_cb;
2981 struct ob_mac_iocb_req *req_q_curr =
2982 qdev->req_q_virt_addr;
2984 /* Create free list of transmit buffers */
2985 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2987 tx_cb = &qdev->tx_buf[i];
2989 tx_cb->queue_entry = req_q_curr;
2991 tx_cb->oal = kmalloc(512, GFP_KERNEL);
2992 if (tx_cb->oal == NULL)
2998 static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
3000 if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
3001 qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
3002 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
3004 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
3006 * Bigger buffers, so less of them.
3008 qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
3009 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
3012 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
3016 qdev->num_large_buffers = qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
3017 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
3018 qdev->max_frame_size =
3019 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
3022 * First allocate a page of shared memory and use it for shadow
3023 * locations of Network Request Queue Consumer Address Register and
3024 * Network Completion Queue Producer Index Register
3026 qdev->shadow_reg_virt_addr =
3027 pci_alloc_consistent(qdev->pdev,
3028 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
3030 if (qdev->shadow_reg_virt_addr != NULL) {
3031 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
3032 qdev->req_consumer_index_phy_addr_high =
3033 MS_64BITS(qdev->shadow_reg_phy_addr);
3034 qdev->req_consumer_index_phy_addr_low =
3035 LS_64BITS(qdev->shadow_reg_phy_addr);
3037 qdev->prsp_producer_index =
3038 (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
3039 qdev->rsp_producer_index_phy_addr_high =
3040 qdev->req_consumer_index_phy_addr_high;
3041 qdev->rsp_producer_index_phy_addr_low =
3042 qdev->req_consumer_index_phy_addr_low + 8;
3045 "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
3049 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
3051 "%s: ql_alloc_net_req_rsp_queues failed.\n",
3056 if (ql_alloc_buffer_queues(qdev) != 0) {
3058 "%s: ql_alloc_buffer_queues failed.\n",
3060 goto err_buffer_queues;
3063 if (ql_alloc_small_buffers(qdev) != 0) {
3065 "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
3066 goto err_small_buffers;
3069 if (ql_alloc_large_buffers(qdev) != 0) {
3071 "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
3072 goto err_small_buffers;
3075 /* Initialize the large buffer queue. */
3076 ql_init_large_buffers(qdev);
3077 if (ql_create_send_free_list(qdev))
3080 qdev->rsp_current = qdev->rsp_q_virt_addr;
3084 ql_free_send_free_list(qdev);
3086 ql_free_buffer_queues(qdev);
3088 ql_free_net_req_rsp_queues(qdev);
3090 pci_free_consistent(qdev->pdev,
3092 qdev->shadow_reg_virt_addr,
3093 qdev->shadow_reg_phy_addr);
3098 static void ql_free_mem_resources(struct ql3_adapter *qdev)
3100 ql_free_send_free_list(qdev);
3101 ql_free_large_buffers(qdev);
3102 ql_free_small_buffers(qdev);
3103 ql_free_buffer_queues(qdev);
3104 ql_free_net_req_rsp_queues(qdev);
3105 if (qdev->shadow_reg_virt_addr != NULL) {
3106 pci_free_consistent(qdev->pdev,
3108 qdev->shadow_reg_virt_addr,
3109 qdev->shadow_reg_phy_addr);
3110 qdev->shadow_reg_virt_addr = NULL;
3114 static int ql_init_misc_registers(struct ql3_adapter *qdev)
3116 struct ql3xxx_local_ram_registers __iomem *local_ram =
3117 (void __iomem *)qdev->mem_map_registers;
3119 if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
3120 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3124 ql_write_page2_reg(qdev,
3125 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
3127 ql_write_page2_reg(qdev,
3128 &local_ram->maxBufletCount,
3129 qdev->nvram_data.bufletCount);
3131 ql_write_page2_reg(qdev,
3132 &local_ram->freeBufletThresholdLow,
3133 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
3134 (qdev->nvram_data.tcpWindowThreshold0));
3136 ql_write_page2_reg(qdev,
3137 &local_ram->freeBufletThresholdHigh,
3138 qdev->nvram_data.tcpWindowThreshold50);
3140 ql_write_page2_reg(qdev,
3141 &local_ram->ipHashTableBase,
3142 (qdev->nvram_data.ipHashTableBaseHi << 16) |
3143 qdev->nvram_data.ipHashTableBaseLo);
3144 ql_write_page2_reg(qdev,
3145 &local_ram->ipHashTableCount,
3146 qdev->nvram_data.ipHashTableSize);
3147 ql_write_page2_reg(qdev,
3148 &local_ram->tcpHashTableBase,
3149 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
3150 qdev->nvram_data.tcpHashTableBaseLo);
3151 ql_write_page2_reg(qdev,
3152 &local_ram->tcpHashTableCount,
3153 qdev->nvram_data.tcpHashTableSize);
3154 ql_write_page2_reg(qdev,
3155 &local_ram->ncbBase,
3156 (qdev->nvram_data.ncbTableBaseHi << 16) |
3157 qdev->nvram_data.ncbTableBaseLo);
3158 ql_write_page2_reg(qdev,
3159 &local_ram->maxNcbCount,
3160 qdev->nvram_data.ncbTableSize);
3161 ql_write_page2_reg(qdev,
3162 &local_ram->drbBase,
3163 (qdev->nvram_data.drbTableBaseHi << 16) |
3164 qdev->nvram_data.drbTableBaseLo);
3165 ql_write_page2_reg(qdev,
3166 &local_ram->maxDrbCount,
3167 qdev->nvram_data.drbTableSize);
3168 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
3172 static int ql_adapter_initialize(struct ql3_adapter *qdev)
3175 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3176 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
3177 (void __iomem *)port_regs;
3181 if(ql_mii_setup(qdev))
3184 /* Bring out PHY out of reset */
3185 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3186 (ISP_SERIAL_PORT_IF_WE |
3187 (ISP_SERIAL_PORT_IF_WE << 16)));
3189 qdev->port_link_state = LS_DOWN;
3190 netif_carrier_off(qdev->ndev);
3192 /* V2 chip fix for ARS-39168. */
3193 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
3194 (ISP_SERIAL_PORT_IF_SDE |
3195 (ISP_SERIAL_PORT_IF_SDE << 16)));
3197 /* Request Queue Registers */
3198 *((u32 *) (qdev->preq_consumer_index)) = 0;
3199 atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
3200 qdev->req_producer_index = 0;
3202 ql_write_page1_reg(qdev,
3203 &hmem_regs->reqConsumerIndexAddrHigh,
3204 qdev->req_consumer_index_phy_addr_high);
3205 ql_write_page1_reg(qdev,
3206 &hmem_regs->reqConsumerIndexAddrLow,
3207 qdev->req_consumer_index_phy_addr_low);
3209 ql_write_page1_reg(qdev,
3210 &hmem_regs->reqBaseAddrHigh,
3211 MS_64BITS(qdev->req_q_phy_addr));
3212 ql_write_page1_reg(qdev,
3213 &hmem_regs->reqBaseAddrLow,
3214 LS_64BITS(qdev->req_q_phy_addr));
3215 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
3217 /* Response Queue Registers */
3218 *((u16 *) (qdev->prsp_producer_index)) = 0;
3219 qdev->rsp_consumer_index = 0;
3220 qdev->rsp_current = qdev->rsp_q_virt_addr;
3222 ql_write_page1_reg(qdev,
3223 &hmem_regs->rspProducerIndexAddrHigh,
3224 qdev->rsp_producer_index_phy_addr_high);
3226 ql_write_page1_reg(qdev,
3227 &hmem_regs->rspProducerIndexAddrLow,
3228 qdev->rsp_producer_index_phy_addr_low);
3230 ql_write_page1_reg(qdev,
3231 &hmem_regs->rspBaseAddrHigh,
3232 MS_64BITS(qdev->rsp_q_phy_addr));
3234 ql_write_page1_reg(qdev,
3235 &hmem_regs->rspBaseAddrLow,
3236 LS_64BITS(qdev->rsp_q_phy_addr));
3238 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
3240 /* Large Buffer Queue */
3241 ql_write_page1_reg(qdev,
3242 &hmem_regs->rxLargeQBaseAddrHigh,
3243 MS_64BITS(qdev->lrg_buf_q_phy_addr));
3245 ql_write_page1_reg(qdev,
3246 &hmem_regs->rxLargeQBaseAddrLow,
3247 LS_64BITS(qdev->lrg_buf_q_phy_addr));
3249 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, qdev->num_lbufq_entries);
3251 ql_write_page1_reg(qdev,
3252 &hmem_regs->rxLargeBufferLength,
3253 qdev->lrg_buffer_len);
3255 /* Small Buffer Queue */
3256 ql_write_page1_reg(qdev,
3257 &hmem_regs->rxSmallQBaseAddrHigh,
3258 MS_64BITS(qdev->small_buf_q_phy_addr));
3260 ql_write_page1_reg(qdev,
3261 &hmem_regs->rxSmallQBaseAddrLow,
3262 LS_64BITS(qdev->small_buf_q_phy_addr));
3264 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
3265 ql_write_page1_reg(qdev,
3266 &hmem_regs->rxSmallBufferLength,
3267 QL_SMALL_BUFFER_SIZE);
3269 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
3270 qdev->small_buf_release_cnt = 8;
3271 qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
3272 qdev->lrg_buf_release_cnt = 8;
3273 qdev->lrg_buf_next_free =
3274 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
3275 qdev->small_buf_index = 0;
3276 qdev->lrg_buf_index = 0;
3277 qdev->lrg_buf_free_count = 0;
3278 qdev->lrg_buf_free_head = NULL;
3279 qdev->lrg_buf_free_tail = NULL;
3281 ql_write_common_reg(qdev,
3282 &port_regs->CommonRegs.
3283 rxSmallQProducerIndex,
3284 qdev->small_buf_q_producer_index);
3285 ql_write_common_reg(qdev,
3286 &port_regs->CommonRegs.
3287 rxLargeQProducerIndex,
3288 qdev->lrg_buf_q_producer_index);
3291 * Find out if the chip has already been initialized. If it has, then
3292 * we skip some of the initialization.
3294 clear_bit(QL_LINK_MASTER, &qdev->flags);
3295 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3296 if ((value & PORT_STATUS_IC) == 0) {
3298 /* Chip has not been configured yet, so let it rip. */
3299 if(ql_init_misc_registers(qdev)) {
3304 value = qdev->nvram_data.tcpMaxWindowSize;
3305 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
3307 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
3309 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
3310 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
3315 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
3316 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
3317 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
3318 16) | (INTERNAL_CHIP_SD |
3319 INTERNAL_CHIP_WE)));
3320 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
3323 if (qdev->mac_index)
3324 ql_write_page0_reg(qdev,
3325 &port_regs->mac1MaxFrameLengthReg,
3326 qdev->max_frame_size);
3328 ql_write_page0_reg(qdev,
3329 &port_regs->mac0MaxFrameLengthReg,
3330 qdev->max_frame_size);
3332 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
3333 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
3340 ql_init_scan_mode(qdev);
3341 ql_get_phy_owner(qdev);
3343 /* Load the MAC Configuration */
3345 /* Program lower 32 bits of the MAC address */
3346 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3347 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3348 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3349 ((qdev->ndev->dev_addr[2] << 24)
3350 | (qdev->ndev->dev_addr[3] << 16)
3351 | (qdev->ndev->dev_addr[4] << 8)
3352 | qdev->ndev->dev_addr[5]));
3354 /* Program top 16 bits of the MAC address */
3355 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3356 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3357 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3358 ((qdev->ndev->dev_addr[0] << 8)
3359 | qdev->ndev->dev_addr[1]));
3361 /* Enable Primary MAC */
3362 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3363 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
3364 MAC_ADDR_INDIRECT_PTR_REG_PE));
3366 /* Clear Primary and Secondary IP addresses */
3367 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3368 ((IP_ADDR_INDEX_REG_MASK << 16) |
3369 (qdev->mac_index << 2)));
3370 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3372 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
3373 ((IP_ADDR_INDEX_REG_MASK << 16) |
3374 ((qdev->mac_index << 2) + 1)));
3375 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
3377 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
3379 /* Indicate Configuration Complete */
3380 ql_write_page0_reg(qdev,
3381 &port_regs->portControl,
3382 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
3385 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
3386 if (value & PORT_STATUS_IC)
3393 "%s: Hw Initialization timeout.\n", qdev->ndev->name);
3398 /* Enable Ethernet Function */
3399 if (qdev->device_id == QL3032_DEVICE_ID) {
3401 (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
3402 QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
3403 QL3032_PORT_CONTROL_ET);
3404 ql_write_page0_reg(qdev, &port_regs->functionControl,
3405 ((value << 16) | value));
3408 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
3410 ql_write_page0_reg(qdev, &port_regs->portControl,
3411 ((value << 16) | value));
3420 * Caller holds hw_lock.
3422 static int ql_adapter_reset(struct ql3_adapter *qdev)
3424 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3429 set_bit(QL_RESET_ACTIVE, &qdev->flags);
3430 clear_bit(QL_RESET_DONE, &qdev->flags);
3433 * Issue soft reset to chip.
3435 printk(KERN_DEBUG PFX
3436 "%s: Issue soft reset to chip.\n",
3438 ql_write_common_reg(qdev,
3439 &port_regs->CommonRegs.ispControlStatus,
3440 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
3442 /* Wait 3 seconds for reset to complete. */
3443 printk(KERN_DEBUG PFX
3444 "%s: Wait 10 milliseconds for reset to complete.\n",
3447 /* Wait until the firmware tells us the Soft Reset is done */
3451 ql_read_common_reg(qdev,
3452 &port_regs->CommonRegs.ispControlStatus);
3453 if ((value & ISP_CONTROL_SR) == 0)
3457 } while ((--max_wait_time));
3460 * Also, make sure that the Network Reset Interrupt bit has been
3461 * cleared after the soft reset has taken place.
3464 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
3465 if (value & ISP_CONTROL_RI) {
3466 printk(KERN_DEBUG PFX
3467 "ql_adapter_reset: clearing RI after reset.\n");
3468 ql_write_common_reg(qdev,
3469 &port_regs->CommonRegs.
3471 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3474 if (max_wait_time == 0) {
3475 /* Issue Force Soft Reset */
3476 ql_write_common_reg(qdev,
3477 &port_regs->CommonRegs.
3479 ((ISP_CONTROL_FSR << 16) |
3482 * Wait until the firmware tells us the Force Soft Reset is
3488 ql_read_common_reg(qdev,
3489 &port_regs->CommonRegs.
3491 if ((value & ISP_CONTROL_FSR) == 0) {
3495 } while ((--max_wait_time));
3497 if (max_wait_time == 0)
3500 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
3501 set_bit(QL_RESET_DONE, &qdev->flags);
3505 static void ql_set_mac_info(struct ql3_adapter *qdev)
3507 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3508 u32 value, port_status;
3511 /* Get the function number */
3513 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
3514 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
3515 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
3516 switch (value & ISP_CONTROL_FN_MASK) {
3517 case ISP_CONTROL_FN0_NET:
3518 qdev->mac_index = 0;
3519 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3520 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3521 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3522 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
3523 qdev->PHYAddr = PORT0_PHY_ADDRESS;
3524 if (port_status & PORT_STATUS_SM0)
3525 set_bit(QL_LINK_OPTICAL,&qdev->flags);
3527 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3530 case ISP_CONTROL_FN1_NET:
3531 qdev->mac_index = 1;
3532 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
3533 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
3534 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
3535 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
3536 qdev->PHYAddr = PORT1_PHY_ADDRESS;
3537 if (port_status & PORT_STATUS_SM1)
3538 set_bit(QL_LINK_OPTICAL,&qdev->flags);
3540 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
3543 case ISP_CONTROL_FN0_SCSI:
3544 case ISP_CONTROL_FN1_SCSI:
3546 printk(KERN_DEBUG PFX
3547 "%s: Invalid function number, ispControlStatus = 0x%x\n",
3548 qdev->ndev->name,value);
3551 qdev->numPorts = qdev->nvram_data.numPorts;
3554 static void ql_display_dev_info(struct net_device *ndev)
3556 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3557 struct pci_dev *pdev = qdev->pdev;
3559 printk(KERN_INFO PFX
3560 "\n%s Adapter %d RevisionID %d found %s on PCI slot %d.\n",
3561 DRV_NAME, qdev->index, qdev->chip_rev_id,
3562 (qdev->device_id == QL3032_DEVICE_ID) ? "QLA3032" : "QLA3022",
3564 printk(KERN_INFO PFX
3566 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
3569 * Print PCI bus width/type.
3571 printk(KERN_INFO PFX
3572 "Bus interface is %s %s.\n",
3573 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
3574 ((qdev->pci_x) ? "PCI-X" : "PCI"));
3576 printk(KERN_INFO PFX
3577 "mem IO base address adjusted = 0x%p\n",
3578 qdev->mem_map_registers);
3579 printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
3581 if (netif_msg_probe(qdev))
3582 printk(KERN_INFO PFX
3583 "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
3584 ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
3585 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
3589 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
3591 struct net_device *ndev = qdev->ndev;
3594 netif_stop_queue(ndev);
3595 netif_carrier_off(ndev);
3597 clear_bit(QL_ADAPTER_UP,&qdev->flags);
3598 clear_bit(QL_LINK_MASTER,&qdev->flags);
3600 ql_disable_interrupts(qdev);
3602 free_irq(qdev->pdev->irq, ndev);
3604 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3605 printk(KERN_INFO PFX
3606 "%s: calling pci_disable_msi().\n", qdev->ndev->name);
3607 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3608 pci_disable_msi(qdev->pdev);
3611 del_timer_sync(&qdev->adapter_timer);
3613 netif_poll_disable(ndev);
3617 unsigned long hw_flags;
3619 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3620 if (ql_wait_for_drvr_lock(qdev)) {
3621 if ((soft_reset = ql_adapter_reset(qdev))) {
3623 "%s: ql_adapter_reset(%d) FAILED!\n",
3624 ndev->name, qdev->index);
3627 "%s: Releaseing driver lock via chip reset.\n",ndev->name);
3630 "%s: Could not acquire driver lock to do "
3631 "reset!\n", ndev->name);
3634 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3636 ql_free_mem_resources(qdev);
3640 static int ql_adapter_up(struct ql3_adapter *qdev)
3642 struct net_device *ndev = qdev->ndev;
3644 unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
3645 unsigned long hw_flags;
3647 if (ql_alloc_mem_resources(qdev)) {
3649 "%s Unable to allocate buffers.\n", ndev->name);
3654 if (pci_enable_msi(qdev->pdev)) {
3656 "%s: User requested MSI, but MSI failed to "
3657 "initialize. Continuing without MSI.\n",
3661 printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
3662 set_bit(QL_MSI_ENABLED,&qdev->flags);
3663 irq_flags &= ~IRQF_SHARED;
3667 if ((err = request_irq(qdev->pdev->irq,
3669 irq_flags, ndev->name, ndev))) {
3671 "%s: Failed to reserve interrupt %d already in use.\n",
3672 ndev->name, qdev->pdev->irq);
3676 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3678 if ((err = ql_wait_for_drvr_lock(qdev))) {
3679 if ((err = ql_adapter_initialize(qdev))) {
3681 "%s: Unable to initialize adapter.\n",
3686 "%s: Releaseing driver lock.\n",ndev->name);
3687 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3690 "%s: Could not aquire driver lock.\n",
3695 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3697 set_bit(QL_ADAPTER_UP,&qdev->flags);
3699 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3701 netif_poll_enable(ndev);
3702 ql_enable_interrupts(qdev);
3706 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3708 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3709 free_irq(qdev->pdev->irq, ndev);
3711 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3712 printk(KERN_INFO PFX
3713 "%s: calling pci_disable_msi().\n",
3715 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3716 pci_disable_msi(qdev->pdev);
3721 static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3723 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3725 "%s: Driver up/down cycle failed, "
3726 "closing device\n",qdev->ndev->name);
3727 dev_close(qdev->ndev);
3733 static int ql3xxx_close(struct net_device *ndev)
3735 struct ql3_adapter *qdev = netdev_priv(ndev);
3738 * Wait for device to recover from a reset.
3739 * (Rarely happens, but possible.)
3741 while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3744 ql_adapter_down(qdev,QL_DO_RESET);
3748 static int ql3xxx_open(struct net_device *ndev)
3750 struct ql3_adapter *qdev = netdev_priv(ndev);
3751 return (ql_adapter_up(qdev));
3754 static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
3756 struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
3757 return &qdev->stats;
3760 static void ql3xxx_set_multicast_list(struct net_device *ndev)
3763 * We are manually parsing the list in the net_device structure.
3768 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3770 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3771 struct ql3xxx_port_registers __iomem *port_regs =
3772 qdev->mem_map_registers;
3773 struct sockaddr *addr = p;
3774 unsigned long hw_flags;
3776 if (netif_running(ndev))
3779 if (!is_valid_ether_addr(addr->sa_data))
3780 return -EADDRNOTAVAIL;
3782 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3784 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3785 /* Program lower 32 bits of the MAC address */
3786 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3787 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3788 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3789 ((ndev->dev_addr[2] << 24) | (ndev->
3790 dev_addr[3] << 16) |
3791 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3793 /* Program top 16 bits of the MAC address */
3794 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3795 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3796 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3797 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3798 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3803 static void ql3xxx_tx_timeout(struct net_device *ndev)
3805 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3807 printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
3809 * Stop the queues, we've got a problem.
3811 netif_stop_queue(ndev);
3814 * Wake up the worker to process this event.
3816 queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
3819 static void ql_reset_work(struct work_struct *work)
3821 struct ql3_adapter *qdev =
3822 container_of(work, struct ql3_adapter, reset_work.work);
3823 struct net_device *ndev = qdev->ndev;
3825 struct ql_tx_buf_cb *tx_cb;
3826 int max_wait_time, i;
3827 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3828 unsigned long hw_flags;
3830 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3831 clear_bit(QL_LINK_MASTER,&qdev->flags);
3834 * Loop through the active list and return the skb.
3836 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3838 tx_cb = &qdev->tx_buf[i];
3840 printk(KERN_DEBUG PFX
3841 "%s: Freeing lost SKB.\n",
3843 pci_unmap_single(qdev->pdev,
3844 pci_unmap_addr(&tx_cb->map[0], mapaddr),
3845 pci_unmap_len(&tx_cb->map[0], maplen),
3847 for(j=1;j<tx_cb->seg_count;j++) {
3848 pci_unmap_page(qdev->pdev,
3849 pci_unmap_addr(&tx_cb->map[j],mapaddr),
3850 pci_unmap_len(&tx_cb->map[j],maplen),
3853 dev_kfree_skb(tx_cb->skb);
3859 "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3860 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3861 ql_write_common_reg(qdev,
3862 &port_regs->CommonRegs.
3864 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3866 * Wait the for Soft Reset to Complete.
3870 value = ql_read_common_reg(qdev,
3871 &port_regs->CommonRegs.
3874 if ((value & ISP_CONTROL_SR) == 0) {
3875 printk(KERN_DEBUG PFX
3876 "%s: reset completed.\n",
3881 if (value & ISP_CONTROL_RI) {
3882 printk(KERN_DEBUG PFX
3883 "%s: clearing NRI after reset.\n",
3885 ql_write_common_reg(qdev,
3890 16) | ISP_CONTROL_RI));
3894 } while (--max_wait_time);
3895 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3897 if (value & ISP_CONTROL_SR) {
3900 * Set the reset flags and clear the board again.
3901 * Nothing else to do...
3904 "%s: Timed out waiting for reset to "
3905 "complete.\n", ndev->name);
3907 "%s: Do a reset.\n", ndev->name);
3908 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3909 clear_bit(QL_RESET_START,&qdev->flags);
3910 ql_cycle_adapter(qdev,QL_DO_RESET);
3914 clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3915 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3916 clear_bit(QL_RESET_START,&qdev->flags);
3917 ql_cycle_adapter(qdev,QL_NO_RESET);
3921 static void ql_tx_timeout_work(struct work_struct *work)
3923 struct ql3_adapter *qdev =
3924 container_of(work, struct ql3_adapter, tx_timeout_work.work);
3926 ql_cycle_adapter(qdev, QL_DO_RESET);
3929 static void ql_get_board_info(struct ql3_adapter *qdev)
3931 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3934 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3936 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3937 if (value & PORT_STATUS_64)
3938 qdev->pci_width = 64;
3940 qdev->pci_width = 32;
3941 if (value & PORT_STATUS_X)
3945 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3948 static void ql3xxx_timer(unsigned long ptr)
3950 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3952 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
3953 printk(KERN_DEBUG PFX
3954 "%s: Reset in progress.\n",
3959 ql_link_state_machine(qdev);
3961 /* Restart timer on 2 second interval. */
3963 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3966 static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3967 const struct pci_device_id *pci_entry)
3969 struct net_device *ndev = NULL;
3970 struct ql3_adapter *qdev = NULL;
3971 static int cards_found = 0;
3972 int pci_using_dac, err;
3974 err = pci_enable_device(pdev);
3976 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3981 err = pci_request_regions(pdev, DRV_NAME);
3983 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3985 goto err_out_disable_pdev;
3988 pci_set_master(pdev);
3990 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3992 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3993 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3995 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3999 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
4001 goto err_out_free_regions;
4004 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
4006 printk(KERN_ERR PFX "%s could not alloc etherdev\n",
4009 goto err_out_free_regions;
4012 SET_MODULE_OWNER(ndev);
4013 SET_NETDEV_DEV(ndev, &pdev->dev);
4015 pci_set_drvdata(pdev, ndev);
4017 qdev = netdev_priv(ndev);
4018 qdev->index = cards_found;
4021 qdev->device_id = pci_entry->device;
4022 qdev->port_link_state = LS_DOWN;
4026 qdev->msg_enable = netif_msg_init(debug, default_msg);
4029 ndev->features |= NETIF_F_HIGHDMA;
4030 if (qdev->device_id == QL3032_DEVICE_ID)
4031 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4033 qdev->mem_map_registers =
4034 ioremap_nocache(pci_resource_start(pdev, 1),
4035 pci_resource_len(qdev->pdev, 1));
4036 if (!qdev->mem_map_registers) {
4037 printk(KERN_ERR PFX "%s: cannot map device registers\n",
4040 goto err_out_free_ndev;
4043 spin_lock_init(&qdev->adapter_lock);
4044 spin_lock_init(&qdev->hw_lock);
4046 /* Set driver entry points */
4047 ndev->open = ql3xxx_open;
4048 ndev->hard_start_xmit = ql3xxx_send;
4049 ndev->stop = ql3xxx_close;
4050 ndev->get_stats = ql3xxx_get_stats;
4051 ndev->set_multicast_list = ql3xxx_set_multicast_list;
4052 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
4053 ndev->set_mac_address = ql3xxx_set_mac_address;
4054 ndev->tx_timeout = ql3xxx_tx_timeout;
4055 ndev->watchdog_timeo = 5 * HZ;
4057 ndev->poll = &ql_poll;
4060 ndev->irq = pdev->irq;
4062 /* make sure the EEPROM is good */
4063 if (ql_get_nvram_params(qdev)) {
4064 printk(KERN_ALERT PFX
4065 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
4068 goto err_out_iounmap;
4071 ql_set_mac_info(qdev);
4073 /* Validate and set parameters */
4074 if (qdev->mac_index) {
4075 ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
4076 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
4079 ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
4080 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
4083 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4085 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
4087 /* Turn off support for multicasting */
4088 ndev->flags &= ~IFF_MULTICAST;
4090 /* Record PCI bus information. */
4091 ql_get_board_info(qdev);
4094 * Set the Maximum Memory Read Byte Count value. We do this to handle
4098 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
4101 err = register_netdev(ndev);
4103 printk(KERN_ERR PFX "%s: cannot register net device\n",
4105 goto err_out_iounmap;
4108 /* we're going to reset, so assume we have no link for now */
4110 netif_carrier_off(ndev);
4111 netif_stop_queue(ndev);
4113 qdev->workqueue = create_singlethread_workqueue(ndev->name);
4114 INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
4115 INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
4117 init_timer(&qdev->adapter_timer);
4118 qdev->adapter_timer.function = ql3xxx_timer;
4119 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
4120 qdev->adapter_timer.data = (unsigned long)qdev;
4123 printk(KERN_ALERT PFX "%s\n", DRV_STRING);
4124 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
4125 DRV_NAME, DRV_VERSION);
4127 ql_display_dev_info(ndev);
4133 iounmap(qdev->mem_map_registers);
4136 err_out_free_regions:
4137 pci_release_regions(pdev);
4138 err_out_disable_pdev:
4139 pci_disable_device(pdev);
4140 pci_set_drvdata(pdev, NULL);
4145 static void __devexit ql3xxx_remove(struct pci_dev *pdev)
4147 struct net_device *ndev = pci_get_drvdata(pdev);
4148 struct ql3_adapter *qdev = netdev_priv(ndev);
4150 unregister_netdev(ndev);
4151 qdev = netdev_priv(ndev);
4153 ql_disable_interrupts(qdev);
4155 if (qdev->workqueue) {
4156 cancel_delayed_work(&qdev->reset_work);
4157 cancel_delayed_work(&qdev->tx_timeout_work);
4158 destroy_workqueue(qdev->workqueue);
4159 qdev->workqueue = NULL;
4162 iounmap(qdev->mem_map_registers);
4163 pci_release_regions(pdev);
4164 pci_set_drvdata(pdev, NULL);
4168 static struct pci_driver ql3xxx_driver = {
4171 .id_table = ql3xxx_pci_tbl,
4172 .probe = ql3xxx_probe,
4173 .remove = __devexit_p(ql3xxx_remove),
4176 static int __init ql3xxx_init_module(void)
4178 return pci_register_driver(&ql3xxx_driver);
4181 static void __exit ql3xxx_exit(void)
4183 pci_unregister_driver(&ql3xxx_driver);
4186 module_init(ql3xxx_init_module);
4187 module_exit(ql3xxx_exit);