2 * Copyright (C) 2003 - 2009 NetXen, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
23 * Contact Information:
27 * Cupertino, CA 95014-0701
31 #include <linux/netdevice.h>
32 #include <linux/delay.h>
33 #include "netxen_nic.h"
34 #include "netxen_nic_hw.h"
35 #include "netxen_nic_phan_reg.h"
37 struct crb_addr_pair {
42 #define NETXEN_MAX_CRB_XFORM 60
43 static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
44 #define NETXEN_ADDR_ERROR (0xffffffff)
46 #define crb_addr_transform(name) \
47 crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
48 NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
50 #define NETXEN_NIC_XDMA_RESET 0x8000ff
53 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid);
55 static void crb_addr_transform_setup(void)
57 crb_addr_transform(XDMA);
58 crb_addr_transform(TIMR);
59 crb_addr_transform(SRE);
60 crb_addr_transform(SQN3);
61 crb_addr_transform(SQN2);
62 crb_addr_transform(SQN1);
63 crb_addr_transform(SQN0);
64 crb_addr_transform(SQS3);
65 crb_addr_transform(SQS2);
66 crb_addr_transform(SQS1);
67 crb_addr_transform(SQS0);
68 crb_addr_transform(RPMX7);
69 crb_addr_transform(RPMX6);
70 crb_addr_transform(RPMX5);
71 crb_addr_transform(RPMX4);
72 crb_addr_transform(RPMX3);
73 crb_addr_transform(RPMX2);
74 crb_addr_transform(RPMX1);
75 crb_addr_transform(RPMX0);
76 crb_addr_transform(ROMUSB);
77 crb_addr_transform(SN);
78 crb_addr_transform(QMN);
79 crb_addr_transform(QMS);
80 crb_addr_transform(PGNI);
81 crb_addr_transform(PGND);
82 crb_addr_transform(PGN3);
83 crb_addr_transform(PGN2);
84 crb_addr_transform(PGN1);
85 crb_addr_transform(PGN0);
86 crb_addr_transform(PGSI);
87 crb_addr_transform(PGSD);
88 crb_addr_transform(PGS3);
89 crb_addr_transform(PGS2);
90 crb_addr_transform(PGS1);
91 crb_addr_transform(PGS0);
92 crb_addr_transform(PS);
93 crb_addr_transform(PH);
94 crb_addr_transform(NIU);
95 crb_addr_transform(I2Q);
96 crb_addr_transform(EG);
97 crb_addr_transform(MN);
98 crb_addr_transform(MS);
99 crb_addr_transform(CAS2);
100 crb_addr_transform(CAS1);
101 crb_addr_transform(CAS0);
102 crb_addr_transform(CAM);
103 crb_addr_transform(C2C1);
104 crb_addr_transform(C2C0);
105 crb_addr_transform(SMB);
106 crb_addr_transform(OCM0);
107 crb_addr_transform(I2C0);
110 int netxen_init_firmware(struct netxen_adapter *adapter)
112 u32 state = 0, loops = 0, err = 0;
115 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
117 if (state == PHAN_INITIALIZE_ACK)
120 while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) {
123 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
128 printk(KERN_ERR "Cmd Peg initialization not complete:%x.\n",
134 adapter->pci_write_normalize(adapter,
135 CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
136 adapter->pci_write_normalize(adapter,
137 CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
138 adapter->pci_write_normalize(adapter,
139 CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
140 adapter->pci_write_normalize(adapter,
141 CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
146 void netxen_release_rx_buffers(struct netxen_adapter *adapter)
148 struct netxen_recv_context *recv_ctx;
149 struct nx_host_rds_ring *rds_ring;
150 struct netxen_rx_buffer *rx_buf;
153 recv_ctx = &adapter->recv_ctx;
154 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
155 rds_ring = &recv_ctx->rds_rings[ring];
156 for (i = 0; i < rds_ring->max_rx_desc_count; ++i) {
157 rx_buf = &(rds_ring->rx_buf_arr[i]);
158 if (rx_buf->state == NETXEN_BUFFER_FREE)
160 pci_unmap_single(adapter->pdev,
164 if (rx_buf->skb != NULL)
165 dev_kfree_skb_any(rx_buf->skb);
170 void netxen_release_tx_buffers(struct netxen_adapter *adapter)
172 struct netxen_cmd_buffer *cmd_buf;
173 struct netxen_skb_frag *buffrag;
176 cmd_buf = adapter->cmd_buf_arr;
177 for (i = 0; i < adapter->max_tx_desc_count; i++) {
178 buffrag = cmd_buf->frag_array;
180 pci_unmap_single(adapter->pdev, buffrag->dma,
181 buffrag->length, PCI_DMA_TODEVICE);
184 for (j = 0; j < cmd_buf->frag_count; j++) {
187 pci_unmap_page(adapter->pdev, buffrag->dma,
193 /* Free the skb we received in netxen_nic_xmit_frame */
195 dev_kfree_skb_any(cmd_buf->skb);
202 void netxen_free_sw_resources(struct netxen_adapter *adapter)
204 struct netxen_recv_context *recv_ctx;
205 struct nx_host_rds_ring *rds_ring;
208 recv_ctx = &adapter->recv_ctx;
209 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
210 rds_ring = &recv_ctx->rds_rings[ring];
211 if (rds_ring->rx_buf_arr) {
212 vfree(rds_ring->rx_buf_arr);
213 rds_ring->rx_buf_arr = NULL;
217 if (adapter->cmd_buf_arr)
218 vfree(adapter->cmd_buf_arr);
222 int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
224 struct netxen_recv_context *recv_ctx;
225 struct nx_host_rds_ring *rds_ring;
226 struct netxen_rx_buffer *rx_buf;
227 int ring, i, num_rx_bufs;
229 struct netxen_cmd_buffer *cmd_buf_arr;
230 struct net_device *netdev = adapter->netdev;
232 cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
233 if (cmd_buf_arr == NULL) {
234 printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n",
238 memset(cmd_buf_arr, 0, TX_RINGSIZE);
239 adapter->cmd_buf_arr = cmd_buf_arr;
241 recv_ctx = &adapter->recv_ctx;
242 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
243 rds_ring = &recv_ctx->rds_rings[ring];
244 switch (RCV_DESC_TYPE(ring)) {
245 case RCV_DESC_NORMAL:
246 rds_ring->max_rx_desc_count =
247 adapter->max_rx_desc_count;
248 rds_ring->flags = RCV_DESC_NORMAL;
249 if (adapter->ahw.cut_through) {
251 NX_CT_DEFAULT_RX_BUF_LEN;
253 NX_CT_DEFAULT_RX_BUF_LEN;
255 rds_ring->dma_size = RX_DMA_MAP_LEN;
257 MAX_RX_BUFFER_LENGTH;
262 rds_ring->max_rx_desc_count =
263 adapter->max_jumbo_rx_desc_count;
264 rds_ring->flags = RCV_DESC_JUMBO;
265 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
267 NX_P3_RX_JUMBO_BUF_MAX_LEN;
270 NX_P2_RX_JUMBO_BUF_MAX_LEN;
272 rds_ring->dma_size + NET_IP_ALIGN;
276 rds_ring->max_rx_desc_count =
277 adapter->max_lro_rx_desc_count;
278 rds_ring->flags = RCV_DESC_LRO;
279 rds_ring->dma_size = RX_LRO_DMA_MAP_LEN;
280 rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
284 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
285 vmalloc(RCV_BUFFSIZE);
286 if (rds_ring->rx_buf_arr == NULL) {
287 printk(KERN_ERR "%s: Failed to allocate "
288 "rx buffer ring %d\n",
290 /* free whatever was already allocated */
293 memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE);
294 INIT_LIST_HEAD(&rds_ring->free_list);
296 * Now go through all of them, set reference handles
297 * and put them in the queues.
299 num_rx_bufs = rds_ring->max_rx_desc_count;
300 rx_buf = rds_ring->rx_buf_arr;
301 for (i = 0; i < num_rx_bufs; i++) {
302 list_add_tail(&rx_buf->list,
303 &rds_ring->free_list);
304 rx_buf->ref_handle = i;
305 rx_buf->state = NETXEN_BUFFER_FREE;
313 netxen_free_sw_resources(adapter);
317 void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
319 switch (adapter->ahw.port_type) {
321 adapter->enable_phy_interrupts =
322 netxen_niu_gbe_enable_phy_interrupts;
323 adapter->disable_phy_interrupts =
324 netxen_niu_gbe_disable_phy_interrupts;
325 adapter->macaddr_set = netxen_niu_macaddr_set;
326 adapter->set_mtu = netxen_nic_set_mtu_gb;
327 adapter->set_promisc = netxen_niu_set_promiscuous_mode;
328 adapter->phy_read = netxen_niu_gbe_phy_read;
329 adapter->phy_write = netxen_niu_gbe_phy_write;
330 adapter->init_port = netxen_niu_gbe_init_port;
331 adapter->stop_port = netxen_niu_disable_gbe_port;
334 case NETXEN_NIC_XGBE:
335 adapter->enable_phy_interrupts =
336 netxen_niu_xgbe_enable_phy_interrupts;
337 adapter->disable_phy_interrupts =
338 netxen_niu_xgbe_disable_phy_interrupts;
339 adapter->macaddr_set = netxen_niu_xg_macaddr_set;
340 adapter->set_mtu = netxen_nic_set_mtu_xgb;
341 adapter->init_port = netxen_niu_xg_init_port;
342 adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode;
343 adapter->stop_port = netxen_niu_disable_xg_port;
350 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
351 adapter->set_mtu = nx_fw_cmd_set_mtu;
352 adapter->set_promisc = netxen_p3_nic_set_promisc;
357 * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
358 * address to external PCI CRB address.
360 static u32 netxen_decode_crb_addr(u32 addr)
363 u32 base_addr, offset, pci_base;
365 crb_addr_transform_setup();
367 pci_base = NETXEN_ADDR_ERROR;
368 base_addr = addr & 0xfff00000;
369 offset = addr & 0x000fffff;
371 for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
372 if (crb_addr_xform[i] == base_addr) {
377 if (pci_base == NETXEN_ADDR_ERROR)
380 return (pci_base + offset);
383 static long rom_max_timeout = 100;
384 static long rom_lock_timeout = 10000;
386 static int rom_lock(struct netxen_adapter *adapter)
393 /* acquire semaphore2 from PCI HW block */
394 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK),
398 if (timeout >= rom_lock_timeout)
408 for (iter = 0; iter < 20; iter++)
409 cpu_relax(); /*This a nop instr on i386 */
412 netxen_nic_reg_write(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER);
416 static int netxen_wait_rom_done(struct netxen_adapter *adapter)
424 done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
427 if (timeout >= rom_max_timeout) {
428 printk("Timeout reached waiting for rom done");
435 static void netxen_rom_unlock(struct netxen_adapter *adapter)
439 /* release semaphore2 */
440 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK), &val);
444 static int do_rom_fast_read(struct netxen_adapter *adapter,
447 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
448 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
449 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
450 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
451 if (netxen_wait_rom_done(adapter)) {
452 printk("Error waiting for rom done\n");
455 /* reset abyte_cnt and dummy_byte_cnt */
456 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
458 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
460 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
464 static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
465 u8 *bytes, size_t size)
470 for (addridx = addr; addridx < (addr + size); addridx += 4) {
472 ret = do_rom_fast_read(adapter, addridx, &v);
475 *(__le32 *)bytes = cpu_to_le32(v);
483 netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
484 u8 *bytes, size_t size)
488 ret = rom_lock(adapter);
492 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
494 netxen_rom_unlock(adapter);
498 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
502 if (rom_lock(adapter) != 0)
505 ret = do_rom_fast_read(adapter, addr, valp);
506 netxen_rom_unlock(adapter);
510 #define NETXEN_BOARDTYPE 0x4008
511 #define NETXEN_BOARDNUM 0x400c
512 #define NETXEN_CHIPNUM 0x4010
514 int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
517 int i, n, init_delay = 0;
518 struct crb_addr_pair *buf;
524 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
526 netxen_rom_unlock(adapter);
529 if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
530 printk("P2 ROM board type: 0x%08x\n", val);
532 printk("Could not read board type\n");
533 if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0)
534 printk("P2 ROM board num: 0x%08x\n", val);
536 printk("Could not read board number\n");
537 if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0)
538 printk("P2 ROM chip num: 0x%08x\n", val);
540 printk("Could not read chip number\n");
543 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
544 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
546 netxen_rom_fast_read(adapter, 4, &n) != 0) {
547 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
548 "n: %08x\n", netxen_nic_driver_name, n);
551 offset = n & 0xffffU;
552 n = (n >> 16) & 0xffffU;
554 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
556 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
557 "n: %08x\n", netxen_nic_driver_name, n);
566 printk(KERN_DEBUG "%s: %d CRB init values found"
567 " in ROM.\n", netxen_nic_driver_name, n);
569 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
570 " initialized.\n", __func__, n);
574 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
576 printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n",
577 netxen_nic_driver_name);
580 for (i = 0; i < n; i++) {
581 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
582 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
591 printk(KERN_DEBUG "%s: PCI: 0x%08x == 0x%08x\n",
592 netxen_nic_driver_name,
593 (u32)netxen_decode_crb_addr(addr), val);
595 for (i = 0; i < n; i++) {
597 off = netxen_decode_crb_addr(buf[i].addr);
598 if (off == NETXEN_ADDR_ERROR) {
599 printk(KERN_ERR"CRB init value out of range %x\n",
603 off += NETXEN_PCI_CRBSPACE;
604 /* skipping cold reboot MAGIC */
605 if (off == NETXEN_CAM_RAM(0x1fc))
608 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
609 /* do not reset PCI */
610 if (off == (ROMUSB_GLB + 0xbc))
612 if (off == (ROMUSB_GLB + 0xa8))
614 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
616 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
618 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
620 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
621 buf[i].data = 0x1020;
622 /* skip the function enable register */
623 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
625 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
627 if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
631 if (off == NETXEN_ADDR_ERROR) {
632 printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
633 netxen_nic_driver_name, buf[i].addr);
638 /* After writing this register, HW needs time for CRB */
639 /* to quiet down (else crb_window returns 0xffffffff) */
640 if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
642 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
643 /* hold xdma in reset also */
644 buf[i].data = NETXEN_NIC_XDMA_RESET;
645 buf[i].data = 0x8000ff;
649 adapter->hw_write_wx(adapter, off, &buf[i].data, 4);
655 /* disable_peg_cache_all */
657 /* unreset_net_cache */
658 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
659 adapter->hw_read_wx(adapter,
660 NETXEN_ROMUSB_GLB_SW_RESET, &val, 4);
661 netxen_crb_writelit_adapter(adapter,
662 NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
665 /* p2dn replyCount */
666 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
667 /* disable_peg_cache 0 */
668 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
669 /* disable_peg_cache 1 */
670 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
675 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
676 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
678 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
679 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
681 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
682 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
684 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
685 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
689 int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
695 adapter->dummy_dma.addr =
696 pci_alloc_consistent(adapter->pdev,
697 NETXEN_HOST_DUMMY_DMA_SIZE,
698 &adapter->dummy_dma.phys_addr);
699 if (adapter->dummy_dma.addr == NULL) {
700 printk("%s: ERROR: Could not allocate dummy DMA memory\n",
705 addr = (uint64_t) adapter->dummy_dma.phys_addr;
706 hi = (addr >> 32) & 0xffffffff;
707 lo = addr & 0xffffffff;
709 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
710 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
712 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
714 adapter->hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, &temp, 4);
720 void netxen_free_adapter_offload(struct netxen_adapter *adapter)
724 if (!adapter->dummy_dma.addr)
727 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
729 if (dma_watchdog_shutdown_request(adapter) == 1)
732 if (dma_watchdog_shutdown_poll_result(adapter) == 1)
738 pci_free_consistent(adapter->pdev,
739 NETXEN_HOST_DUMMY_DMA_SIZE,
740 adapter->dummy_dma.addr,
741 adapter->dummy_dma.phys_addr);
742 adapter->dummy_dma.addr = NULL;
744 printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
745 adapter->netdev->name);
749 int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
756 val = adapter->pci_read_normalize(adapter,
759 if (val == PHAN_INITIALIZE_COMPLETE ||
760 val == PHAN_INITIALIZE_ACK)
768 pegtune_val = adapter->pci_read_normalize(adapter,
769 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
770 printk(KERN_WARNING "netxen_phantom_init: init failed, "
771 "pegtune_val=%x\n", pegtune_val);
779 int netxen_receive_peg_ready(struct netxen_adapter *adapter)
785 val = adapter->pci_read_normalize(adapter, CRB_RCVPEG_STATE);
787 if (val == PHAN_PEG_RCV_INITIALIZED)
795 printk(KERN_ERR "Receive Peg initialization not "
796 "complete, state: 0x%x.\n", val);
803 static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
804 struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
806 struct netxen_rx_buffer *buffer;
809 buffer = &rds_ring->rx_buf_arr[index];
811 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
818 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
819 adapter->stats.csummed++;
820 skb->ip_summed = CHECKSUM_UNNECESSARY;
822 skb->ip_summed = CHECKSUM_NONE;
824 skb->dev = adapter->netdev;
829 buffer->state = NETXEN_BUFFER_FREE;
830 buffer->lro_current_frags = 0;
831 buffer->lro_expected_frags = 0;
832 list_add_tail(&buffer->list, &rds_ring->free_list);
836 static void netxen_process_rcv(struct netxen_adapter *adapter,
837 struct status_desc *desc)
839 struct net_device *netdev = adapter->netdev;
840 u64 sts_data = le64_to_cpu(desc->status_desc_data);
841 int index = netxen_get_sts_refhandle(sts_data);
842 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
843 struct netxen_rx_buffer *buffer;
845 u32 length = netxen_get_sts_totallength(sts_data);
847 u16 pkt_offset = 0, cksum;
848 struct nx_host_rds_ring *rds_ring;
850 desc_ctx = netxen_get_sts_type(sts_data);
851 if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) {
855 rds_ring = &recv_ctx->rds_rings[desc_ctx];
856 if (unlikely(index > rds_ring->max_rx_desc_count)) {
859 buffer = &rds_ring->rx_buf_arr[index];
860 if (desc_ctx == RCV_DESC_LRO_CTXID) {
861 buffer->lro_current_frags++;
862 if (netxen_get_sts_desc_lro_last_frag(desc)) {
863 buffer->lro_expected_frags =
864 netxen_get_sts_desc_lro_cnt(desc);
865 buffer->lro_length = length;
867 if (buffer->lro_current_frags != buffer->lro_expected_frags) {
872 cksum = netxen_get_sts_status(sts_data);
874 skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
878 if (desc_ctx == RCV_DESC_LRO_CTXID) {
879 /* True length was only available on the last pkt */
880 skb_put(skb, buffer->lro_length);
882 if (length > rds_ring->skb_size)
883 skb_put(skb, rds_ring->skb_size);
885 skb_put(skb, length);
887 pkt_offset = netxen_get_sts_pkt_offset(sts_data);
889 skb_pull(skb, pkt_offset);
892 skb->protocol = eth_type_trans(skb, netdev);
894 netif_receive_skb(skb);
896 adapter->stats.no_rcv++;
897 adapter->stats.rxbytes += length;
901 netxen_process_rcv_ring(struct netxen_adapter *adapter, int max)
903 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
904 struct status_desc *desc_head = recv_ctx->rcv_status_desc_head;
905 struct status_desc *desc;
906 u32 consumer = recv_ctx->status_rx_consumer;
911 while (count < max) {
912 desc = &desc_head[consumer];
913 sts_data = le64_to_cpu(desc->status_desc_data);
915 if (!(sts_data & STATUS_OWNER_HOST))
918 opcode = netxen_get_sts_opcode(sts_data);
920 netxen_process_rcv(adapter, desc);
922 desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM);
924 consumer = get_next_index(consumer,
925 adapter->max_rx_desc_count);
929 for (ring = 0; ring < adapter->max_rds_rings; ring++)
930 netxen_post_rx_buffers_nodb(adapter, ring);
933 recv_ctx->status_rx_consumer = consumer;
934 adapter->pci_write_normalize(adapter,
935 recv_ctx->crb_sts_consumer, consumer);
941 /* Process Command status ring */
942 int netxen_process_cmd_ring(struct netxen_adapter *adapter)
944 u32 last_consumer, consumer;
946 struct netxen_cmd_buffer *buffer;
947 struct pci_dev *pdev = adapter->pdev;
948 struct net_device *netdev = adapter->netdev;
949 struct netxen_skb_frag *frag;
952 last_consumer = adapter->last_cmd_consumer;
953 barrier(); /* cmd_consumer can change underneath */
954 consumer = le32_to_cpu(*(adapter->cmd_consumer));
956 while (last_consumer != consumer) {
957 buffer = &adapter->cmd_buf_arr[last_consumer];
959 frag = &buffer->frag_array[0];
960 pci_unmap_single(pdev, frag->dma, frag->length,
963 for (i = 1; i < buffer->frag_count; i++) {
964 frag++; /* Get the next frag */
965 pci_unmap_page(pdev, frag->dma, frag->length,
970 adapter->stats.xmitfinished++;
971 dev_kfree_skb_any(buffer->skb);
975 last_consumer = get_next_index(last_consumer,
976 adapter->max_tx_desc_count);
977 if (++count >= MAX_STATUS_HANDLE)
982 adapter->last_cmd_consumer = last_consumer;
984 if (netif_queue_stopped(netdev) && netif_running(netdev)) {
985 netif_tx_lock(netdev);
986 netif_wake_queue(netdev);
988 netif_tx_unlock(netdev);
992 * If everything is freed up to consumer then check if the ring is full
993 * If the ring is full then check if more needs to be freed and
994 * schedule the call back again.
996 * This happens when there are 2 CPUs. One could be freeing and the
997 * other filling it. If the ring is full when we get out of here and
998 * the card has already interrupted the host then the host can miss the
1001 * There is still a possible race condition and the host could miss an
1002 * interrupt. The card has to take care of this.
1004 barrier(); /* cmd_consumer can change underneath */
1005 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1006 done = (last_consumer == consumer);
1012 netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid)
1014 struct pci_dev *pdev = adapter->pdev;
1015 struct sk_buff *skb;
1016 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
1017 struct nx_host_rds_ring *rds_ring = NULL;
1019 struct rcv_desc *pdesc;
1020 struct netxen_rx_buffer *buffer;
1022 netxen_ctx_msg msg = 0;
1024 struct list_head *head;
1026 rds_ring = &recv_ctx->rds_rings[ringid];
1028 producer = rds_ring->producer;
1029 head = &rds_ring->free_list;
1031 /* We can start writing rx descriptors into the phantom memory. */
1032 while (!list_empty(head)) {
1034 skb = dev_alloc_skb(rds_ring->skb_size);
1035 if (unlikely(!skb)) {
1039 if (!adapter->ahw.cut_through)
1040 skb_reserve(skb, 2);
1042 dma = pci_map_single(pdev, skb->data,
1043 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1044 if (pci_dma_mapping_error(pdev, dma)) {
1045 dev_kfree_skb_any(skb);
1050 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1051 list_del(&buffer->list);
1054 buffer->state = NETXEN_BUFFER_BUSY;
1057 /* make a rcv descriptor */
1058 pdesc = &rds_ring->desc_head[producer];
1059 pdesc->addr_buffer = cpu_to_le64(dma);
1060 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1061 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1063 producer = get_next_index(producer, rds_ring->max_rx_desc_count);
1065 /* if we did allocate buffers, then write the count to Phantom */
1067 rds_ring->producer = producer;
1069 adapter->pci_write_normalize(adapter,
1070 rds_ring->crb_rcv_producer,
1071 (producer-1) & (rds_ring->max_rx_desc_count-1));
1073 if (adapter->fw_major < 4) {
1075 * Write a doorbell msg to tell phanmon of change in
1076 * receive ring producer
1077 * Only for firmware version < 4.0.0
1079 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
1080 netxen_set_msg_privid(msg);
1081 netxen_set_msg_count(msg,
1084 max_rx_desc_count - 1)));
1085 netxen_set_msg_ctxid(msg, adapter->portnum);
1086 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
1088 DB_NORMALIZE(adapter,
1089 NETXEN_RCV_PRODUCER_OFFSET));
1095 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid)
1097 struct pci_dev *pdev = adapter->pdev;
1098 struct sk_buff *skb;
1099 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
1100 struct nx_host_rds_ring *rds_ring = NULL;
1102 struct rcv_desc *pdesc;
1103 struct netxen_rx_buffer *buffer;
1105 struct list_head *head;
1108 rds_ring = &recv_ctx->rds_rings[ringid];
1110 producer = rds_ring->producer;
1111 head = &rds_ring->free_list;
1112 /* We can start writing rx descriptors into the phantom memory. */
1113 while (!list_empty(head)) {
1115 skb = dev_alloc_skb(rds_ring->skb_size);
1116 if (unlikely(!skb)) {
1120 if (!adapter->ahw.cut_through)
1121 skb_reserve(skb, 2);
1123 dma = pci_map_single(pdev, skb->data,
1124 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1125 if (pci_dma_mapping_error(pdev, dma)) {
1126 dev_kfree_skb_any(skb);
1131 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1132 list_del(&buffer->list);
1135 buffer->state = NETXEN_BUFFER_BUSY;
1138 /* make a rcv descriptor */
1139 pdesc = &rds_ring->desc_head[producer];
1140 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1141 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1142 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1144 producer = get_next_index(producer, rds_ring->max_rx_desc_count);
1147 /* if we did allocate buffers, then write the count to Phantom */
1149 rds_ring->producer = producer;
1151 adapter->pci_write_normalize(adapter,
1152 rds_ring->crb_rcv_producer,
1153 (producer-1) & (rds_ring->max_rx_desc_count-1));
1158 void netxen_nic_clear_stats(struct netxen_adapter *adapter)
1160 memset(&adapter->stats, 0, sizeof(adapter->stats));