2 * Copyright (C) 2003 - 2009 NetXen, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
23 * Contact Information:
27 * Cupertino, CA 95014-0701
31 #include <linux/netdevice.h>
32 #include <linux/delay.h>
33 #include "netxen_nic.h"
34 #include "netxen_nic_hw.h"
35 #include "netxen_nic_phan_reg.h"
37 struct crb_addr_pair {
42 #define NETXEN_MAX_CRB_XFORM 60
43 static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
44 #define NETXEN_ADDR_ERROR (0xffffffff)
46 #define crb_addr_transform(name) \
47 crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
48 NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
50 #define NETXEN_NIC_XDMA_RESET 0x8000ff
53 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
54 struct nx_host_rds_ring *rds_ring);
56 static void crb_addr_transform_setup(void)
58 crb_addr_transform(XDMA);
59 crb_addr_transform(TIMR);
60 crb_addr_transform(SRE);
61 crb_addr_transform(SQN3);
62 crb_addr_transform(SQN2);
63 crb_addr_transform(SQN1);
64 crb_addr_transform(SQN0);
65 crb_addr_transform(SQS3);
66 crb_addr_transform(SQS2);
67 crb_addr_transform(SQS1);
68 crb_addr_transform(SQS0);
69 crb_addr_transform(RPMX7);
70 crb_addr_transform(RPMX6);
71 crb_addr_transform(RPMX5);
72 crb_addr_transform(RPMX4);
73 crb_addr_transform(RPMX3);
74 crb_addr_transform(RPMX2);
75 crb_addr_transform(RPMX1);
76 crb_addr_transform(RPMX0);
77 crb_addr_transform(ROMUSB);
78 crb_addr_transform(SN);
79 crb_addr_transform(QMN);
80 crb_addr_transform(QMS);
81 crb_addr_transform(PGNI);
82 crb_addr_transform(PGND);
83 crb_addr_transform(PGN3);
84 crb_addr_transform(PGN2);
85 crb_addr_transform(PGN1);
86 crb_addr_transform(PGN0);
87 crb_addr_transform(PGSI);
88 crb_addr_transform(PGSD);
89 crb_addr_transform(PGS3);
90 crb_addr_transform(PGS2);
91 crb_addr_transform(PGS1);
92 crb_addr_transform(PGS0);
93 crb_addr_transform(PS);
94 crb_addr_transform(PH);
95 crb_addr_transform(NIU);
96 crb_addr_transform(I2Q);
97 crb_addr_transform(EG);
98 crb_addr_transform(MN);
99 crb_addr_transform(MS);
100 crb_addr_transform(CAS2);
101 crb_addr_transform(CAS1);
102 crb_addr_transform(CAS0);
103 crb_addr_transform(CAM);
104 crb_addr_transform(C2C1);
105 crb_addr_transform(C2C0);
106 crb_addr_transform(SMB);
107 crb_addr_transform(OCM0);
108 crb_addr_transform(I2C0);
111 int netxen_init_firmware(struct netxen_adapter *adapter)
113 u32 state = 0, loops = 0, err = 0;
116 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
118 if (state == PHAN_INITIALIZE_ACK)
121 while (state != PHAN_INITIALIZE_COMPLETE && loops < 2000) {
124 state = adapter->pci_read_normalize(adapter, CRB_CMDPEG_STATE);
129 printk(KERN_ERR "Cmd Peg initialization not complete:%x.\n",
135 adapter->pci_write_normalize(adapter,
136 CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
137 adapter->pci_write_normalize(adapter,
138 CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
139 adapter->pci_write_normalize(adapter,
140 CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
141 adapter->pci_write_normalize(adapter,
142 CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
147 void netxen_release_rx_buffers(struct netxen_adapter *adapter)
149 struct netxen_recv_context *recv_ctx;
150 struct nx_host_rds_ring *rds_ring;
151 struct netxen_rx_buffer *rx_buf;
154 recv_ctx = &adapter->recv_ctx;
155 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
156 rds_ring = &recv_ctx->rds_rings[ring];
157 for (i = 0; i < rds_ring->num_desc; ++i) {
158 rx_buf = &(rds_ring->rx_buf_arr[i]);
159 if (rx_buf->state == NETXEN_BUFFER_FREE)
161 pci_unmap_single(adapter->pdev,
165 if (rx_buf->skb != NULL)
166 dev_kfree_skb_any(rx_buf->skb);
171 void netxen_release_tx_buffers(struct netxen_adapter *adapter)
173 struct netxen_cmd_buffer *cmd_buf;
174 struct netxen_skb_frag *buffrag;
177 cmd_buf = adapter->cmd_buf_arr;
178 for (i = 0; i < adapter->num_txd; i++) {
179 buffrag = cmd_buf->frag_array;
181 pci_unmap_single(adapter->pdev, buffrag->dma,
182 buffrag->length, PCI_DMA_TODEVICE);
185 for (j = 0; j < cmd_buf->frag_count; j++) {
188 pci_unmap_page(adapter->pdev, buffrag->dma,
195 dev_kfree_skb_any(cmd_buf->skb);
202 void netxen_free_sw_resources(struct netxen_adapter *adapter)
204 struct netxen_recv_context *recv_ctx;
205 struct nx_host_rds_ring *rds_ring;
208 recv_ctx = &adapter->recv_ctx;
209 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
210 rds_ring = &recv_ctx->rds_rings[ring];
211 if (rds_ring->rx_buf_arr) {
212 vfree(rds_ring->rx_buf_arr);
213 rds_ring->rx_buf_arr = NULL;
217 if (adapter->cmd_buf_arr)
218 vfree(adapter->cmd_buf_arr);
222 int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
224 struct netxen_recv_context *recv_ctx;
225 struct nx_host_rds_ring *rds_ring;
226 struct nx_host_sds_ring *sds_ring;
227 struct netxen_rx_buffer *rx_buf;
228 int ring, i, num_rx_bufs;
230 struct netxen_cmd_buffer *cmd_buf_arr;
231 struct net_device *netdev = adapter->netdev;
234 (struct netxen_cmd_buffer *)vmalloc(TX_BUFF_RINGSIZE(adapter));
235 if (cmd_buf_arr == NULL) {
236 printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n",
240 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(adapter));
241 adapter->cmd_buf_arr = cmd_buf_arr;
243 recv_ctx = &adapter->recv_ctx;
244 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
245 rds_ring = &recv_ctx->rds_rings[ring];
247 case RCV_RING_NORMAL:
248 rds_ring->num_desc = adapter->num_rxd;
249 if (adapter->ahw.cut_through) {
251 NX_CT_DEFAULT_RX_BUF_LEN;
253 NX_CT_DEFAULT_RX_BUF_LEN;
255 rds_ring->dma_size = RX_DMA_MAP_LEN;
257 MAX_RX_BUFFER_LENGTH;
262 rds_ring->num_desc = adapter->num_jumbo_rxd;
263 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
265 NX_P3_RX_JUMBO_BUF_MAX_LEN;
268 NX_P2_RX_JUMBO_BUF_MAX_LEN;
270 rds_ring->dma_size + NET_IP_ALIGN;
274 rds_ring->num_desc = adapter->num_lro_rxd;
275 rds_ring->dma_size = RX_LRO_DMA_MAP_LEN;
276 rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
280 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
281 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
282 if (rds_ring->rx_buf_arr == NULL) {
283 printk(KERN_ERR "%s: Failed to allocate "
284 "rx buffer ring %d\n",
286 /* free whatever was already allocated */
289 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
290 INIT_LIST_HEAD(&rds_ring->free_list);
292 * Now go through all of them, set reference handles
293 * and put them in the queues.
295 num_rx_bufs = rds_ring->num_desc;
296 rx_buf = rds_ring->rx_buf_arr;
297 for (i = 0; i < num_rx_bufs; i++) {
298 list_add_tail(&rx_buf->list,
299 &rds_ring->free_list);
300 rx_buf->ref_handle = i;
301 rx_buf->state = NETXEN_BUFFER_FREE;
304 spin_lock_init(&rds_ring->lock);
307 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
308 sds_ring = &recv_ctx->sds_rings[ring];
309 sds_ring->irq = adapter->msix_entries[ring].vector;
310 sds_ring->clean_tx = (ring == 0);
311 sds_ring->post_rxd = (ring == 0);
312 sds_ring->adapter = adapter;
313 sds_ring->num_desc = adapter->num_rxd;
315 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
316 INIT_LIST_HEAD(&sds_ring->free_list[i]);
322 netxen_free_sw_resources(adapter);
326 void netxen_initialize_adapter_ops(struct netxen_adapter *adapter)
328 switch (adapter->ahw.port_type) {
330 adapter->enable_phy_interrupts =
331 netxen_niu_gbe_enable_phy_interrupts;
332 adapter->disable_phy_interrupts =
333 netxen_niu_gbe_disable_phy_interrupts;
334 adapter->macaddr_set = netxen_niu_macaddr_set;
335 adapter->set_mtu = netxen_nic_set_mtu_gb;
336 adapter->set_promisc = netxen_niu_set_promiscuous_mode;
337 adapter->phy_read = netxen_niu_gbe_phy_read;
338 adapter->phy_write = netxen_niu_gbe_phy_write;
339 adapter->init_port = netxen_niu_gbe_init_port;
340 adapter->stop_port = netxen_niu_disable_gbe_port;
343 case NETXEN_NIC_XGBE:
344 adapter->enable_phy_interrupts =
345 netxen_niu_xgbe_enable_phy_interrupts;
346 adapter->disable_phy_interrupts =
347 netxen_niu_xgbe_disable_phy_interrupts;
348 adapter->macaddr_set = netxen_niu_xg_macaddr_set;
349 adapter->set_mtu = netxen_nic_set_mtu_xgb;
350 adapter->init_port = netxen_niu_xg_init_port;
351 adapter->set_promisc = netxen_niu_xg_set_promiscuous_mode;
352 adapter->stop_port = netxen_niu_disable_xg_port;
359 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
360 adapter->set_mtu = nx_fw_cmd_set_mtu;
361 adapter->set_promisc = netxen_p3_nic_set_promisc;
366 * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
367 * address to external PCI CRB address.
369 static u32 netxen_decode_crb_addr(u32 addr)
372 u32 base_addr, offset, pci_base;
374 crb_addr_transform_setup();
376 pci_base = NETXEN_ADDR_ERROR;
377 base_addr = addr & 0xfff00000;
378 offset = addr & 0x000fffff;
380 for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
381 if (crb_addr_xform[i] == base_addr) {
386 if (pci_base == NETXEN_ADDR_ERROR)
389 return (pci_base + offset);
392 static long rom_max_timeout = 100;
393 static long rom_lock_timeout = 10000;
395 static int rom_lock(struct netxen_adapter *adapter)
402 /* acquire semaphore2 from PCI HW block */
403 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_LOCK),
407 if (timeout >= rom_lock_timeout)
417 for (iter = 0; iter < 20; iter++)
418 cpu_relax(); /*This a nop instr on i386 */
421 netxen_nic_reg_write(adapter, NETXEN_ROM_LOCK_ID, ROM_LOCK_DRIVER);
425 static int netxen_wait_rom_done(struct netxen_adapter *adapter)
433 done = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_GLB_STATUS);
436 if (timeout >= rom_max_timeout) {
437 printk("Timeout reached waiting for rom done");
444 static void netxen_rom_unlock(struct netxen_adapter *adapter)
448 /* release semaphore2 */
449 netxen_nic_read_w0(adapter, NETXEN_PCIE_REG(PCIE_SEM2_UNLOCK), &val);
453 static int do_rom_fast_read(struct netxen_adapter *adapter,
456 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
457 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
458 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
459 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
460 if (netxen_wait_rom_done(adapter)) {
461 printk("Error waiting for rom done\n");
464 /* reset abyte_cnt and dummy_byte_cnt */
465 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
467 netxen_nic_reg_write(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
469 *valp = netxen_nic_reg_read(adapter, NETXEN_ROMUSB_ROM_RDATA);
473 static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
474 u8 *bytes, size_t size)
479 for (addridx = addr; addridx < (addr + size); addridx += 4) {
481 ret = do_rom_fast_read(adapter, addridx, &v);
484 *(__le32 *)bytes = cpu_to_le32(v);
492 netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
493 u8 *bytes, size_t size)
497 ret = rom_lock(adapter);
501 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
503 netxen_rom_unlock(adapter);
507 int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
511 if (rom_lock(adapter) != 0)
514 ret = do_rom_fast_read(adapter, addr, valp);
515 netxen_rom_unlock(adapter);
519 #define NETXEN_BOARDTYPE 0x4008
520 #define NETXEN_BOARDNUM 0x400c
521 #define NETXEN_CHIPNUM 0x4010
523 int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
526 int i, n, init_delay = 0;
527 struct crb_addr_pair *buf;
533 netxen_crb_writelit_adapter(adapter, NETXEN_ROMUSB_GLB_SW_RESET,
535 netxen_rom_unlock(adapter);
538 if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
539 printk("P2 ROM board type: 0x%08x\n", val);
541 printk("Could not read board type\n");
542 if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0)
543 printk("P2 ROM board num: 0x%08x\n", val);
545 printk("Could not read board number\n");
546 if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0)
547 printk("P2 ROM chip num: 0x%08x\n", val);
549 printk("Could not read chip number\n");
552 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
553 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
555 netxen_rom_fast_read(adapter, 4, &n) != 0) {
556 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
557 "n: %08x\n", netxen_nic_driver_name, n);
560 offset = n & 0xffffU;
561 n = (n >> 16) & 0xffffU;
563 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
565 printk(KERN_ERR "%s: ERROR Reading crb_init area: "
566 "n: %08x\n", netxen_nic_driver_name, n);
575 printk(KERN_DEBUG "%s: %d CRB init values found"
576 " in ROM.\n", netxen_nic_driver_name, n);
578 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
579 " initialized.\n", __func__, n);
583 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
585 printk("%s: netxen_pinit_from_rom: Unable to calloc memory.\n",
586 netxen_nic_driver_name);
589 for (i = 0; i < n; i++) {
590 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
591 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
600 printk(KERN_DEBUG "%s: PCI: 0x%08x == 0x%08x\n",
601 netxen_nic_driver_name,
602 (u32)netxen_decode_crb_addr(addr), val);
604 for (i = 0; i < n; i++) {
606 off = netxen_decode_crb_addr(buf[i].addr);
607 if (off == NETXEN_ADDR_ERROR) {
608 printk(KERN_ERR"CRB init value out of range %x\n",
612 off += NETXEN_PCI_CRBSPACE;
613 /* skipping cold reboot MAGIC */
614 if (off == NETXEN_CAM_RAM(0x1fc))
617 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
618 /* do not reset PCI */
619 if (off == (ROMUSB_GLB + 0xbc))
621 if (off == (ROMUSB_GLB + 0xa8))
623 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
625 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
627 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
629 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
630 buf[i].data = 0x1020;
631 /* skip the function enable register */
632 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
634 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
636 if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
640 if (off == NETXEN_ADDR_ERROR) {
641 printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n",
642 netxen_nic_driver_name, buf[i].addr);
647 /* After writing this register, HW needs time for CRB */
648 /* to quiet down (else crb_window returns 0xffffffff) */
649 if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
651 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
652 /* hold xdma in reset also */
653 buf[i].data = NETXEN_NIC_XDMA_RESET;
654 buf[i].data = 0x8000ff;
658 adapter->hw_write_wx(adapter, off, &buf[i].data, 4);
664 /* disable_peg_cache_all */
666 /* unreset_net_cache */
667 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
668 adapter->hw_read_wx(adapter,
669 NETXEN_ROMUSB_GLB_SW_RESET, &val, 4);
670 netxen_crb_writelit_adapter(adapter,
671 NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
674 /* p2dn replyCount */
675 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
676 /* disable_peg_cache 0 */
677 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
678 /* disable_peg_cache 1 */
679 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
684 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
685 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
687 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
688 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
690 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
691 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
693 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
694 netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
698 int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
704 adapter->dummy_dma.addr =
705 pci_alloc_consistent(adapter->pdev,
706 NETXEN_HOST_DUMMY_DMA_SIZE,
707 &adapter->dummy_dma.phys_addr);
708 if (adapter->dummy_dma.addr == NULL) {
709 printk("%s: ERROR: Could not allocate dummy DMA memory\n",
714 addr = (uint64_t) adapter->dummy_dma.phys_addr;
715 hi = (addr >> 32) & 0xffffffff;
716 lo = addr & 0xffffffff;
718 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
719 adapter->pci_write_normalize(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
721 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
723 adapter->hw_write_wx(adapter, CRB_HOST_DUMMY_BUF, &temp, 4);
729 void netxen_free_adapter_offload(struct netxen_adapter *adapter)
733 if (!adapter->dummy_dma.addr)
736 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
738 if (dma_watchdog_shutdown_request(adapter) == 1)
741 if (dma_watchdog_shutdown_poll_result(adapter) == 1)
747 pci_free_consistent(adapter->pdev,
748 NETXEN_HOST_DUMMY_DMA_SIZE,
749 adapter->dummy_dma.addr,
750 adapter->dummy_dma.phys_addr);
751 adapter->dummy_dma.addr = NULL;
753 printk(KERN_ERR "%s: dma_watchdog_shutdown failed\n",
754 adapter->netdev->name);
758 int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
765 val = adapter->pci_read_normalize(adapter,
768 if (val == PHAN_INITIALIZE_COMPLETE ||
769 val == PHAN_INITIALIZE_ACK)
777 pegtune_val = adapter->pci_read_normalize(adapter,
778 NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
779 printk(KERN_WARNING "netxen_phantom_init: init failed, "
780 "pegtune_val=%x\n", pegtune_val);
788 int netxen_receive_peg_ready(struct netxen_adapter *adapter)
794 val = adapter->pci_read_normalize(adapter, CRB_RCVPEG_STATE);
796 if (val == PHAN_PEG_RCV_INITIALIZED)
804 printk(KERN_ERR "Receive Peg initialization not "
805 "complete, state: 0x%x.\n", val);
813 netxen_alloc_rx_skb(struct netxen_adapter *adapter,
814 struct nx_host_rds_ring *rds_ring,
815 struct netxen_rx_buffer *buffer)
819 struct pci_dev *pdev = adapter->pdev;
821 buffer->skb = dev_alloc_skb(rds_ring->skb_size);
827 if (!adapter->ahw.cut_through)
830 dma = pci_map_single(pdev, skb->data,
831 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
833 if (pci_dma_mapping_error(pdev, dma)) {
834 dev_kfree_skb_any(skb);
841 buffer->state = NETXEN_BUFFER_BUSY;
846 static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
847 struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
849 struct netxen_rx_buffer *buffer;
852 buffer = &rds_ring->rx_buf_arr[index];
854 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
861 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
862 adapter->stats.csummed++;
863 skb->ip_summed = CHECKSUM_UNNECESSARY;
865 skb->ip_summed = CHECKSUM_NONE;
867 skb->dev = adapter->netdev;
871 buffer->state = NETXEN_BUFFER_FREE;
875 static struct netxen_rx_buffer *
876 netxen_process_rcv(struct netxen_adapter *adapter,
877 int ring, int index, int length, int cksum, int pkt_offset)
879 struct net_device *netdev = adapter->netdev;
880 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
881 struct netxen_rx_buffer *buffer;
883 struct nx_host_rds_ring *rds_ring = &recv_ctx->rds_rings[ring];
885 if (unlikely(index > rds_ring->num_desc))
888 buffer = &rds_ring->rx_buf_arr[index];
890 skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
894 if (length > rds_ring->skb_size)
895 skb_put(skb, rds_ring->skb_size);
897 skb_put(skb, length);
901 skb_pull(skb, pkt_offset);
903 skb->protocol = eth_type_trans(skb, netdev);
905 netif_receive_skb(skb);
907 adapter->stats.no_rcv++;
908 adapter->stats.rxbytes += length;
913 #define netxen_merge_rx_buffers(list, head) \
914 do { list_splice_tail_init(list, head); } while (0);
917 netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
919 struct netxen_adapter *adapter = sds_ring->adapter;
921 struct list_head *cur;
923 struct status_desc *desc;
924 struct netxen_rx_buffer *rxbuf;
926 u32 consumer = sds_ring->consumer;
930 int opcode, ring, index, length, cksum, pkt_offset;
932 while (count < max) {
933 desc = &sds_ring->desc_head[consumer];
934 sts_data = le64_to_cpu(desc->status_desc_data);
936 if (!(sts_data & STATUS_OWNER_HOST))
939 ring = netxen_get_sts_type(sts_data);
940 if (ring > RCV_RING_JUMBO)
943 opcode = netxen_get_sts_opcode(sts_data);
945 index = netxen_get_sts_refhandle(sts_data);
946 length = netxen_get_sts_totallength(sts_data);
947 cksum = netxen_get_sts_status(sts_data);
948 pkt_offset = netxen_get_sts_pkt_offset(sts_data);
950 rxbuf = netxen_process_rcv(adapter, ring, index,
951 length, cksum, pkt_offset);
954 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
956 desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM);
958 consumer = get_next_index(consumer, sds_ring->num_desc);
962 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
963 struct nx_host_rds_ring *rds_ring =
964 &adapter->recv_ctx.rds_rings[ring];
966 if (!list_empty(&sds_ring->free_list[ring])) {
967 list_for_each(cur, &sds_ring->free_list[ring]) {
968 rxbuf = list_entry(cur,
969 struct netxen_rx_buffer, list);
970 netxen_alloc_rx_skb(adapter, rds_ring, rxbuf);
972 spin_lock(&rds_ring->lock);
973 netxen_merge_rx_buffers(&sds_ring->free_list[ring],
974 &rds_ring->free_list);
975 spin_unlock(&rds_ring->lock);
978 netxen_post_rx_buffers_nodb(adapter, rds_ring);
982 sds_ring->consumer = consumer;
983 adapter->pci_write_normalize(adapter,
984 sds_ring->crb_sts_consumer, consumer);
990 /* Process Command status ring */
991 int netxen_process_cmd_ring(struct netxen_adapter *adapter)
993 u32 last_consumer, consumer;
995 struct netxen_cmd_buffer *buffer;
996 struct pci_dev *pdev = adapter->pdev;
997 struct net_device *netdev = adapter->netdev;
998 struct netxen_skb_frag *frag;
1001 if (!spin_trylock(&adapter->tx_clean_lock))
1004 last_consumer = adapter->last_cmd_consumer;
1005 barrier(); /* cmd_consumer can change underneath */
1006 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1008 while (last_consumer != consumer) {
1009 buffer = &adapter->cmd_buf_arr[last_consumer];
1011 frag = &buffer->frag_array[0];
1012 pci_unmap_single(pdev, frag->dma, frag->length,
1015 for (i = 1; i < buffer->frag_count; i++) {
1016 frag++; /* Get the next frag */
1017 pci_unmap_page(pdev, frag->dma, frag->length,
1022 adapter->stats.xmitfinished++;
1023 dev_kfree_skb_any(buffer->skb);
1027 last_consumer = get_next_index(last_consumer,
1029 if (++count >= MAX_STATUS_HANDLE)
1034 adapter->last_cmd_consumer = last_consumer;
1036 if (netif_queue_stopped(netdev) && netif_running(netdev)) {
1037 netif_tx_lock(netdev);
1038 netif_wake_queue(netdev);
1040 netif_tx_unlock(netdev);
1044 * If everything is freed up to consumer then check if the ring is full
1045 * If the ring is full then check if more needs to be freed and
1046 * schedule the call back again.
1048 * This happens when there are 2 CPUs. One could be freeing and the
1049 * other filling it. If the ring is full when we get out of here and
1050 * the card has already interrupted the host then the host can miss the
1053 * There is still a possible race condition and the host could miss an
1054 * interrupt. The card has to take care of this.
1056 barrier(); /* cmd_consumer can change underneath */
1057 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1058 done = (last_consumer == consumer);
1059 spin_unlock(&adapter->tx_clean_lock);
1065 netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
1066 struct nx_host_rds_ring *rds_ring)
1068 struct rcv_desc *pdesc;
1069 struct netxen_rx_buffer *buffer;
1070 int producer, count = 0;
1071 netxen_ctx_msg msg = 0;
1072 struct list_head *head;
1074 producer = rds_ring->producer;
1076 spin_lock(&rds_ring->lock);
1077 head = &rds_ring->free_list;
1078 while (!list_empty(head)) {
1080 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1083 if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
1088 list_del(&buffer->list);
1090 /* make a rcv descriptor */
1091 pdesc = &rds_ring->desc_head[producer];
1092 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1093 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1094 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1096 producer = get_next_index(producer, rds_ring->num_desc);
1098 spin_unlock(&rds_ring->lock);
1101 rds_ring->producer = producer;
1102 adapter->pci_write_normalize(adapter,
1103 rds_ring->crb_rcv_producer,
1104 (producer-1) & (rds_ring->num_desc-1));
1106 if (adapter->fw_major < 4) {
1108 * Write a doorbell msg to tell phanmon of change in
1109 * receive ring producer
1110 * Only for firmware version < 4.0.0
1112 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
1113 netxen_set_msg_privid(msg);
1114 netxen_set_msg_count(msg,
1116 (rds_ring->num_desc - 1)));
1117 netxen_set_msg_ctxid(msg, adapter->portnum);
1118 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
1120 DB_NORMALIZE(adapter,
1121 NETXEN_RCV_PRODUCER_OFFSET));
1127 netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
1128 struct nx_host_rds_ring *rds_ring)
1130 struct rcv_desc *pdesc;
1131 struct netxen_rx_buffer *buffer;
1132 int producer, count = 0;
1133 struct list_head *head;
1135 producer = rds_ring->producer;
1136 if (!spin_trylock(&rds_ring->lock))
1139 head = &rds_ring->free_list;
1140 while (!list_empty(head)) {
1142 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1145 if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
1150 list_del(&buffer->list);
1152 /* make a rcv descriptor */
1153 pdesc = &rds_ring->desc_head[producer];
1154 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1155 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1156 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1158 producer = get_next_index(producer, rds_ring->num_desc);
1162 rds_ring->producer = producer;
1163 adapter->pci_write_normalize(adapter,
1164 rds_ring->crb_rcv_producer,
1165 (producer - 1) & (rds_ring->num_desc - 1));
1168 spin_unlock(&rds_ring->lock);
1171 void netxen_nic_clear_stats(struct netxen_adapter *adapter)
1173 memset(&adapter->stats, 0, sizeof(adapter->stats));