2 * Copyright (C) 2003 - 2006 NetXen, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
23 * Contact Information:
26 * 3965 Freedom Circle, Fourth floor,
27 * Santa Clara, CA 95054
30 * Source file for NIC routines to initialize the Phantom Hardware
34 #include <linux/netdevice.h>
35 #include <linux/delay.h>
36 #include "netxen_nic.h"
37 #include "netxen_nic_hw.h"
38 #include "netxen_nic_phan_reg.h"
40 struct crb_addr_pair {
45 #define NETXEN_MAX_CRB_XFORM 60
46 static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
47 #define NETXEN_ADDR_ERROR (0xffffffff)
49 #define crb_addr_transform(name) \
50 crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
51 NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
53 #define NETXEN_NIC_XDMA_RESET 0x8000ff
55 static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
56 uint32_t ctx, uint32_t ringid);
58 static void crb_addr_transform_setup(void)
60 crb_addr_transform(XDMA);
61 crb_addr_transform(TIMR);
62 crb_addr_transform(SRE);
63 crb_addr_transform(SQN3);
64 crb_addr_transform(SQN2);
65 crb_addr_transform(SQN1);
66 crb_addr_transform(SQN0);
67 crb_addr_transform(SQS3);
68 crb_addr_transform(SQS2);
69 crb_addr_transform(SQS1);
70 crb_addr_transform(SQS0);
71 crb_addr_transform(RPMX7);
72 crb_addr_transform(RPMX6);
73 crb_addr_transform(RPMX5);
74 crb_addr_transform(RPMX4);
75 crb_addr_transform(RPMX3);
76 crb_addr_transform(RPMX2);
77 crb_addr_transform(RPMX1);
78 crb_addr_transform(RPMX0);
79 crb_addr_transform(ROMUSB);
80 crb_addr_transform(SN);
81 crb_addr_transform(QMN);
82 crb_addr_transform(QMS);
83 crb_addr_transform(PGNI);
84 crb_addr_transform(PGND);
85 crb_addr_transform(PGN3);
86 crb_addr_transform(PGN2);
87 crb_addr_transform(PGN1);
88 crb_addr_transform(PGN0);
89 crb_addr_transform(PGSI);
90 crb_addr_transform(PGSD);
91 crb_addr_transform(PGS3);
92 crb_addr_transform(PGS2);
93 crb_addr_transform(PGS1);
94 crb_addr_transform(PGS0);
95 crb_addr_transform(PS);
96 crb_addr_transform(PH);
97 crb_addr_transform(NIU);
98 crb_addr_transform(I2Q);
99 crb_addr_transform(EG);
100 crb_addr_transform(MN);
101 crb_addr_transform(MS);
102 crb_addr_transform(CAS2);
103 crb_addr_transform(CAS1);
104 crb_addr_transform(CAS0);
105 crb_addr_transform(CAM);
106 crb_addr_transform(C2C1);
107 crb_addr_transform(C2C0);
108 crb_addr_transform(SMB);
109 crb_addr_transform(OCM0);
110 crb_addr_transform(I2C0);
113 int netxen_init_firmware(struct netxen_adapter *adapter)
115 u32 state = 0, loops = 0, err = 0;
118 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
120 if (state == PHAN_INITIALIZE_ACK)
123 while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) {
126 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
131 printk(KERN_ERR "Cmd Peg initialization not complete:%x.\n",
137 adapter->pci_write_normalize(adapter,
138 CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
139 adapter->pci_write_normalize(adapter,
140 CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
141 adapter->pci_write_normalize(adapter,
142 CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
143 adapter->pci_write_normalize(adapter,
144 CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
149 void netxen_release_rx_buffers(struct netxen_adapter *adapter)
151 struct netxen_recv_context *recv_ctx;
152 struct nx_host_rds_ring *rds_ring;
153 struct netxen_rx_buffer *rx_buf;
156 for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
157 recv_ctx = &adapter->recv_ctx[ctxid];
158 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
159 rds_ring = &recv_ctx->rds_rings[ring];
160 for (i = 0; i < rds_ring->max_rx_desc_count; ++i) {
161 rx_buf = &(rds_ring->rx_buf_arr[i]);
162 if (rx_buf->state == NETXEN_BUFFER_FREE)
164 pci_unmap_single(adapter->pdev,
168 if (rx_buf->skb != NULL)
169 dev_kfree_skb_any(rx_buf->skb);
175 void netxen_release_tx_buffers(struct netxen_adapter *adapter)
177 struct netxen_cmd_buffer *cmd_buf;
178 struct netxen_skb_frag *buffrag;
181 cmd_buf = adapter->cmd_buf_arr;
182 for (i = 0; i < adapter->max_tx_desc_count; i++) {
183 buffrag = cmd_buf->frag_array;
185 pci_unmap_single(adapter->pdev, buffrag->dma,
186 buffrag->length, PCI_DMA_TODEVICE);
189 for (j = 0; j < cmd_buf->frag_count; j++) {
192 pci_unmap_page(adapter->pdev, buffrag->dma,
198 /* Free the skb we received in netxen_nic_xmit_frame */
200 dev_kfree_skb_any(cmd_buf->skb);
207 void netxen_free_sw_resources(struct netxen_adapter *adapter)
209 struct netxen_recv_context *recv_ctx;
210 struct nx_host_rds_ring *rds_ring;
213 for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) {
214 recv_ctx = &adapter->recv_ctx[ctx];
215 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
216 rds_ring = &recv_ctx->rds_rings[ring];
217 if (rds_ring->rx_buf_arr) {
218 vfree(rds_ring->rx_buf_arr);
219 rds_ring->rx_buf_arr = NULL;
223 if (adapter->cmd_buf_arr)
224 vfree(adapter->cmd_buf_arr);
228 int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
230 struct netxen_recv_context *recv_ctx;
231 struct nx_host_rds_ring *rds_ring;
232 struct netxen_rx_buffer *rx_buf;
233 int ctx, ring, i, num_rx_bufs;
235 struct netxen_cmd_buffer *cmd_buf_arr;
236 struct net_device *netdev = adapter->netdev;
238 cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
239 if (cmd_buf_arr == NULL) {
240 printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n",
244 memset(cmd_buf_arr, 0, TX_RINGSIZE);
245 adapter->cmd_buf_arr = cmd_buf_arr;
247 for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) {
248 recv_ctx = &adapter->recv_ctx[ctx];
249 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
250 rds_ring = &recv_ctx->rds_rings[ring];
251 switch (RCV_DESC_TYPE(ring)) {
252 case RCV_DESC_NORMAL:
253 rds_ring->max_rx_desc_count =
254 adapter->max_rx_desc_count;
255 rds_ring->flags = RCV_DESC_NORMAL;
256 if (adapter->ahw.cut_through) {
258 NX_CT_DEFAULT_RX_BUF_LEN;
260 NX_CT_DEFAULT_RX_BUF_LEN;
262 rds_ring->dma_size = RX_DMA_MAP_LEN;
264 MAX_RX_BUFFER_LENGTH;
269 rds_ring->max_rx_desc_count =
270 adapter->max_jumbo_rx_desc_count;
271 rds_ring->flags = RCV_DESC_JUMBO;
272 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
274 NX_P3_RX_JUMBO_BUF_MAX_LEN;
277 NX_P2_RX_JUMBO_BUF_MAX_LEN;
279 rds_ring->dma_size + NET_IP_ALIGN;
283 rds_ring->max_rx_desc_count =
284 adapter->max_lro_rx_desc_count;
285 rds_ring->flags = RCV_DESC_LRO;
286 rds_ring->dma_size = RX_LRO_DMA_MAP_LEN;
287 rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
291 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
292 vmalloc(RCV_BUFFSIZE);
293 if (rds_ring->rx_buf_arr == NULL) {
294 printk(KERN_ERR "%s: Failed to allocate "
295 "rx buffer ring %d\n",
297 /* free whatever was already allocated */
300 memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE);
301 INIT_LIST_HEAD(&rds_ring->free_list);
303 * Now go through all of them, set reference handles
304 * and put them in the queues.
306 num_rx_bufs = rds_ring->max_rx_desc_count;
307 rx_buf = rds_ring->rx_buf_arr;
308 for (i = 0; i < num_rx_bufs; i++) {
309 list_add_tail(&rx_buf->list,
310 &rds_ring->free_list);
311 rx_buf->ref_handle = i;
312 rx_buf->state = NETXEN_BUFFER_FREE;
321 netxen_free_sw_resources(adapter);
325 void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
327 switch (adapter->ahw.board_type) {
329 adapter->enable_phy_interrupts =
330 netxen_niu_gbe_enable_phy_interrupts;
331 adapter->disable_phy_interrupts =
332 netxen_niu_gbe_disable_phy_interrupts;
333 adapter->macaddr_set = netxen_niu_macaddr_set;
334 adapter->set_mtu = netxen_nic_set_mtu_gb;
335 adapter->set_promisc = netxen_niu_set_promiscuous_mode;
336 adapter->phy_read = netxen_niu_gbe_phy_read;
337 adapter->phy_write = netxen_niu_gbe_phy_write;
338 adapter->init_port = netxen_niu_gbe_init_port;
339 adapter->stop_port = netxen_niu_disable_gbe_port;
342 case NETXEN_NIC_XGBE:
343 adapter->enable_phy_interrupts =
344 netxen_niu_xgbe_enable_phy_interrupts;
345 adapter->disable_phy_interrupts =
346 netxen_niu_xgbe_disable_phy_interrupts;
347 adapter->macaddr_set = netxen_niu_xg_macaddr_set;
348 adapter->set_mtu = netxen_nic_set_mtu_xgb;
349 adapter->init_port = netxen_niu_xg_init_port;
350 adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode;
351 adapter->stop_port = netxen_niu_disable_xg_port;
358 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
359 adapter->set_mtu = nx_fw_cmd_set_mtu;
360 adapter->set_promisc = netxen_p3_nic_set_promisc;
365 * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
366 * address to external PCI CRB address.
368 static u32 netxen_decode_crb_addr(u32 addr)
371 u32 base_addr, offset, pci_base;
373 crb_addr_transform_setup();
375 pci_base = NETXEN_ADDR_ERROR;
376 base_addr = addr & 0xfff00000;
377 offset = addr & 0x000fffff;
379 for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
380 if (crb_addr_xform[i] == base_addr) {
385 if (pci_base == NETXEN_ADDR_ERROR)
388 return (pci_base + offset);
391 static long rom_max_timeout = 100;
392 static long rom_lock_timeout = 10000;
394 static int rom_lock(struct netxen_adapter *adapter)
401 /* acquire semaphore2 from PCI HW block */
402 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK),
406 if (timeout >= rom_lock_timeout)
416 for (iter = 0; iter < 20; iter++)
417 cpu_relax(); /*This a nop instr on i386 */
420 netxen_nic_reg_write(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER);
424 static int netxen_wait_rom_done(struct netxen_adapter *adapter)
432 done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
435 if (timeout >= rom_max_timeout) {
436 printk("Timeout reached waiting for rom done");
443 static void netxen_rom_unlock(struct netxen_adapter *adapter)
447 /* release semaphore2 */
448 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK), &val);
452 static int do_rom_fast_read(struct netxen_adapter *adapter,
455 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
456 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
457 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
458 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
459 if (netxen_wait_rom_done(adapter)) {
460 printk("Error waiting for rom done\n");
463 /* reset abyte_cnt and dummy_byte_cnt */
464 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
466 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
468 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
472 static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
473 u8 *bytes, size_t size)
478 for (addridx = addr; addridx < (addr + size); addridx += 4) {
480 ret = do_rom_fast_read(adapter, addridx, &v);
483 *(__le32 *)bytes = cpu_to_le32(v);
491 netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
492 u8 *bytes, size_t size)
496 ret = rom_lock(adapter);
500 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
502 netxen_rom_unlock(adapter);
506 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
510 if (rom_lock(adapter) != 0)
513 ret = do_rom_fast_read(adapter, addr, valp);
514 netxen_rom_unlock(adapter);
518 #define NETXEN_BOARDTYPE 0x4008
519 #define NETXEN_BOARDNUM 0x400c
520 #define NETXEN_CHIPNUM 0x4010
522 int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
525 int i, n, init_delay = 0;
526 struct crb_addr_pair *buf;
532 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
534 netxen_rom_unlock(adapter);
537 if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
538 printk("P2 ROM board type: 0x%08x\n", val);
540 printk("Could not read board type\n");
541 if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0)
542 printk("P2 ROM board num: 0x%08x\n", val);
544 printk("Could not read board number\n");
545 if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0)
546 printk("P2 ROM chip num: 0x%08x\n", val);
548 printk("Could not read chip number\n");
551 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
552 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
554 netxen_rom_fast_read(adapter, 4, &n) != 0) {
555 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
556 "n: %08x\n", netxen_nic_driver_name, n);
559 offset = n & 0xffffU;
560 n = (n >> 16) & 0xffffU;
562 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
564 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
565 "n: %08x\n", netxen_nic_driver_name, n);
574 printk(KERN_DEBUG "%s: %d CRB init values found"
575 " in ROM.\n", netxen_nic_driver_name, n);
577 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
578 " initialized.\n", __func__, n);
582 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
584 printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n",
585 netxen_nic_driver_name);
588 for (i = 0; i < n; i++) {
589 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
590 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
599 printk(KERN_DEBUG "%s: PCI: 0x%08x == 0x%08x\n",
600 netxen_nic_driver_name,
601 (u32)netxen_decode_crb_addr(addr), val);
603 for (i = 0; i < n; i++) {
605 off = netxen_decode_crb_addr(buf[i].addr);
606 if (off == NETXEN_ADDR_ERROR) {
607 printk(KERN_ERR"CRB init value out of range %x\n",
611 off += NETXEN_PCI_CRBSPACE;
612 /* skipping cold reboot MAGIC */
613 if (off == NETXEN_CAM_RAM(0x1fc))
616 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
617 /* do not reset PCI */
618 if (off == (ROMUSB_GLB + 0xbc))
620 if (off == (ROMUSB_GLB + 0xa8))
622 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
624 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
626 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
628 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
629 buf[i].data = 0x1020;
630 /* skip the function enable register */
631 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
633 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
635 if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
639 if (off == NETXEN_ADDR_ERROR) {
640 printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
641 netxen_nic_driver_name, buf[i].addr);
646 /* After writing this register, HW needs time for CRB */
647 /* to quiet down (else crb_window returns 0xffffffff) */
648 if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
650 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
651 /* hold xdma in reset also */
652 buf[i].data = NETXEN_NIC_XDMA_RESET;
653 buf[i].data = 0x8000ff;
657 adapter->hw_write_wx(adapter, off, &buf[i].data, 4);
663 /* disable_peg_cache_all */
665 /* unreset_net_cache */
666 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
667 adapter->hw_read_wx(adapter,
668 NETXEN_ROMUSB_GLB_SW_RESET, &val, 4);
669 netxen_crb_writelit_adapter(adapter,
670 NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
673 /* p2dn replyCount */
674 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
675 /* disable_peg_cache 0 */
676 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
677 /* disable_peg_cache 1 */
678 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
683 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
684 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
686 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
687 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
689 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
690 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
692 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
693 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
697 int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
703 adapter->dummy_dma.addr =
704 pci_alloc_consistent(adapter->pdev,
705 NETXEN_HOST_DUMMY_DMA_SIZE,
706 &adapter->dummy_dma.phys_addr);
707 if (adapter->dummy_dma.addr == NULL) {
708 printk("%s: ERROR: Could not allocate dummy DMA memory\n",
713 addr = (uint64_t) adapter->dummy_dma.phys_addr;
714 hi = (addr >> 32) & 0xffffffff;
715 lo = addr & 0xffffffff;
717 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
718 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
720 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
722 adapter->hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, &temp, 4);
728 void netxen_free_adapter_offload(struct netxen_adapter *adapter)
732 if (!adapter->dummy_dma.addr)
735 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
737 if (dma_watchdog_shutdown_request(adapter) == 1)
740 if (dma_watchdog_shutdown_poll_result(adapter) == 1)
746 pci_free_consistent(adapter->pdev,
747 NETXEN_HOST_DUMMY_DMA_SIZE,
748 adapter->dummy_dma.addr,
749 adapter->dummy_dma.phys_addr);
750 adapter->dummy_dma.addr = NULL;
752 printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
753 adapter->netdev->name);
757 int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
764 val = adapter->pci_read_normalize(adapter,
767 if (val == PHAN_INITIALIZE_COMPLETE ||
768 val == PHAN_INITIALIZE_ACK)
776 pegtune_val = adapter->pci_read_normalize(adapter,
777 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
778 printk(KERN_WARNING "netxen_phantom_init: init failed, "
779 "pegtune_val=%x\n", pegtune_val);
787 int netxen_receive_peg_ready(struct netxen_adapter *adapter)
793 val = adapter->pci_read_normalize(adapter, CRB_RCVPEG_STATE);
795 if (val == PHAN_PEG_RCV_INITIALIZED)
803 printk(KERN_ERR "Receive Peg initialization not "
804 "complete, state: 0x%x.\n", val);
811 static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
812 struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
814 struct netxen_rx_buffer *buffer;
817 buffer = &rds_ring->rx_buf_arr[index];
819 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
826 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
827 adapter->stats.csummed++;
828 skb->ip_summed = CHECKSUM_UNNECESSARY;
830 skb->ip_summed = CHECKSUM_NONE;
832 skb->dev = adapter->netdev;
837 buffer->state = NETXEN_BUFFER_FREE;
838 buffer->lro_current_frags = 0;
839 buffer->lro_expected_frags = 0;
840 list_add_tail(&buffer->list, &rds_ring->free_list);
845 * netxen_process_rcv() send the received packet to the protocol stack.
846 * and if the number of receives exceeds RX_BUFFERS_REFILL, then we
847 * invoke the routine to send more rx buffers to the Phantom...
849 static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
850 struct status_desc *desc, struct status_desc *frag_desc)
852 struct net_device *netdev = adapter->netdev;
853 u64 sts_data = le64_to_cpu(desc->status_desc_data);
854 int index = netxen_get_sts_refhandle(sts_data);
855 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
856 struct netxen_rx_buffer *buffer;
858 u32 length = netxen_get_sts_totallength(sts_data);
860 u16 pkt_offset = 0, cksum;
861 struct nx_host_rds_ring *rds_ring;
863 desc_ctx = netxen_get_sts_type(sts_data);
864 if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) {
865 printk("%s: %s Bad Rcv descriptor ring\n",
866 netxen_nic_driver_name, netdev->name);
870 rds_ring = &recv_ctx->rds_rings[desc_ctx];
871 if (unlikely(index > rds_ring->max_rx_desc_count)) {
872 DPRINTK(ERR, "Got a buffer index:%x Max is %x\n",
873 index, rds_ring->max_rx_desc_count);
876 buffer = &rds_ring->rx_buf_arr[index];
877 if (desc_ctx == RCV_DESC_LRO_CTXID) {
878 buffer->lro_current_frags++;
879 if (netxen_get_sts_desc_lro_last_frag(desc)) {
880 buffer->lro_expected_frags =
881 netxen_get_sts_desc_lro_cnt(desc);
882 buffer->lro_length = length;
884 if (buffer->lro_current_frags != buffer->lro_expected_frags) {
885 if (buffer->lro_expected_frags != 0) {
886 printk("LRO: (refhandle:%x) recv frag. "
887 "wait for last. flags: %x expected:%d "
889 netxen_get_sts_desc_lro_last_frag(desc),
890 buffer->lro_expected_frags,
891 buffer->lro_current_frags);
897 cksum = netxen_get_sts_status(sts_data);
899 skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
903 if (desc_ctx == RCV_DESC_LRO_CTXID) {
904 /* True length was only available on the last pkt */
905 skb_put(skb, buffer->lro_length);
907 if (length > rds_ring->skb_size)
908 skb_put(skb, rds_ring->skb_size);
910 skb_put(skb, length);
912 pkt_offset = netxen_get_sts_pkt_offset(sts_data);
914 skb_pull(skb, pkt_offset);
917 skb->protocol = eth_type_trans(skb, netdev);
920 * rx buffer chaining is disabled, walk and free
921 * any spurious rx buffer chain.
924 u16 i, nr_frags = desc->nr_frags;
926 dev_kfree_skb_any(skb);
927 for (i = 0; i < nr_frags; i++) {
928 index = le16_to_cpu(frag_desc->frag_handles[i]);
929 skb = netxen_process_rxbuf(adapter,
930 rds_ring, index, cksum);
932 dev_kfree_skb_any(skb);
934 adapter->stats.rxdropped++;
936 netif_receive_skb(skb);
938 adapter->stats.no_rcv++;
939 adapter->stats.rxbytes += length;
943 /* Process Receive status ring */
944 u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
946 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
947 struct status_desc *desc_head = recv_ctx->rcv_status_desc_head;
948 struct status_desc *desc, *frag_desc;
949 u32 consumer = recv_ctx->status_rx_consumer;
954 while (count < max) {
955 desc = &desc_head[consumer];
956 if (!(netxen_get_sts_owner(desc) & STATUS_OWNER_HOST)) {
957 DPRINTK(ERR, "desc %p ownedby %x\n", desc,
958 netxen_get_sts_owner(desc));
962 sts_data = le64_to_cpu(desc->status_desc_data);
963 opcode = netxen_get_sts_opcode(sts_data);
965 if (opcode == NETXEN_NIC_RXPKT_DESC) {
966 if (desc->nr_frags) {
967 consumer = get_next_index(consumer,
968 adapter->max_rx_desc_count);
969 frag_desc = &desc_head[consumer];
970 netxen_set_sts_owner(frag_desc,
971 STATUS_OWNER_PHANTOM);
975 netxen_process_rcv(adapter, ctxid, desc, frag_desc);
977 netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM);
979 consumer = get_next_index(consumer,
980 adapter->max_rx_desc_count);
983 for (ring = 0; ring < adapter->max_rds_rings; ring++)
984 netxen_post_rx_buffers_nodb(adapter, ctxid, ring);
986 /* update the consumer index in phantom */
988 recv_ctx->status_rx_consumer = consumer;
991 adapter->pci_write_normalize(adapter,
992 recv_ctx->crb_sts_consumer, consumer);
998 /* Process Command status ring */
999 int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1001 u32 last_consumer, consumer;
1003 struct netxen_cmd_buffer *buffer;
1004 struct pci_dev *pdev = adapter->pdev;
1005 struct net_device *netdev = adapter->netdev;
1006 struct netxen_skb_frag *frag;
1009 last_consumer = adapter->last_cmd_consumer;
1010 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1012 while (last_consumer != consumer) {
1013 buffer = &adapter->cmd_buf_arr[last_consumer];
1015 frag = &buffer->frag_array[0];
1016 pci_unmap_single(pdev, frag->dma, frag->length,
1019 for (i = 1; i < buffer->frag_count; i++) {
1020 frag++; /* Get the next frag */
1021 pci_unmap_page(pdev, frag->dma, frag->length,
1026 adapter->stats.xmitfinished++;
1027 dev_kfree_skb_any(buffer->skb);
1031 last_consumer = get_next_index(last_consumer,
1032 adapter->max_tx_desc_count);
1033 if (++count >= MAX_STATUS_HANDLE)
1038 adapter->last_cmd_consumer = last_consumer;
1040 if (netif_queue_stopped(netdev) && netif_running(netdev)) {
1041 netif_tx_lock(netdev);
1042 netif_wake_queue(netdev);
1044 netif_tx_unlock(netdev);
1048 * If everything is freed up to consumer then check if the ring is full
1049 * If the ring is full then check if more needs to be freed and
1050 * schedule the call back again.
1052 * This happens when there are 2 CPUs. One could be freeing and the
1053 * other filling it. If the ring is full when we get out of here and
1054 * the card has already interrupted the host then the host can miss the
1057 * There is still a possible race condition and the host could miss an
1058 * interrupt. The card has to take care of this.
1060 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1061 done = (last_consumer == consumer);
1067 * netxen_post_rx_buffers puts buffer in the Phantom memory
1069 void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1071 struct pci_dev *pdev = adapter->pdev;
1072 struct sk_buff *skb;
1073 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
1074 struct nx_host_rds_ring *rds_ring = NULL;
1076 struct rcv_desc *pdesc;
1077 struct netxen_rx_buffer *buffer;
1079 netxen_ctx_msg msg = 0;
1081 struct list_head *head;
1083 rds_ring = &recv_ctx->rds_rings[ringid];
1085 producer = rds_ring->producer;
1086 head = &rds_ring->free_list;
1088 /* We can start writing rx descriptors into the phantom memory. */
1089 while (!list_empty(head)) {
1091 skb = dev_alloc_skb(rds_ring->skb_size);
1092 if (unlikely(!skb)) {
1096 if (!adapter->ahw.cut_through)
1097 skb_reserve(skb, 2);
1099 dma = pci_map_single(pdev, skb->data,
1100 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1101 if (pci_dma_mapping_error(pdev, dma)) {
1102 dev_kfree_skb_any(skb);
1107 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1108 list_del(&buffer->list);
1111 buffer->state = NETXEN_BUFFER_BUSY;
1114 /* make a rcv descriptor */
1115 pdesc = &rds_ring->desc_head[producer];
1116 pdesc->addr_buffer = cpu_to_le64(dma);
1117 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1118 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1120 producer = get_next_index(producer, rds_ring->max_rx_desc_count);
1122 /* if we did allocate buffers, then write the count to Phantom */
1124 rds_ring->producer = producer;
1126 adapter->pci_write_normalize(adapter,
1127 rds_ring->crb_rcv_producer,
1128 (producer-1) & (rds_ring->max_rx_desc_count-1));
1130 if (adapter->fw_major < 4) {
1132 * Write a doorbell msg to tell phanmon of change in
1133 * receive ring producer
1134 * Only for firmware version < 4.0.0
1136 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
1137 netxen_set_msg_privid(msg);
1138 netxen_set_msg_count(msg,
1141 max_rx_desc_count - 1)));
1142 netxen_set_msg_ctxid(msg, adapter->portnum);
1143 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
1145 DB_NORMALIZE(adapter,
1146 NETXEN_RCV_PRODUCER_OFFSET));
1151 static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
1152 uint32_t ctx, uint32_t ringid)
1154 struct pci_dev *pdev = adapter->pdev;
1155 struct sk_buff *skb;
1156 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
1157 struct nx_host_rds_ring *rds_ring = NULL;
1159 struct rcv_desc *pdesc;
1160 struct netxen_rx_buffer *buffer;
1162 struct list_head *head;
1165 rds_ring = &recv_ctx->rds_rings[ringid];
1167 producer = rds_ring->producer;
1168 head = &rds_ring->free_list;
1169 /* We can start writing rx descriptors into the phantom memory. */
1170 while (!list_empty(head)) {
1172 skb = dev_alloc_skb(rds_ring->skb_size);
1173 if (unlikely(!skb)) {
1177 if (!adapter->ahw.cut_through)
1178 skb_reserve(skb, 2);
1180 dma = pci_map_single(pdev, skb->data,
1181 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1182 if (pci_dma_mapping_error(pdev, dma)) {
1183 dev_kfree_skb_any(skb);
1188 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1189 list_del(&buffer->list);
1192 buffer->state = NETXEN_BUFFER_BUSY;
1195 /* make a rcv descriptor */
1196 pdesc = &rds_ring->desc_head[producer];
1197 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1198 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1199 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1201 producer = get_next_index(producer, rds_ring->max_rx_desc_count);
1204 /* if we did allocate buffers, then write the count to Phantom */
1206 rds_ring->producer = producer;
1208 adapter->pci_write_normalize(adapter,
1209 rds_ring->crb_rcv_producer,
1210 (producer-1) & (rds_ring->max_rx_desc_count-1));
1215 void netxen_nic_clear_stats(struct netxen_adapter *adapter)
1217 memset(&adapter->stats, 0, sizeof(adapter->stats));