2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
4 * Author: Shlomi Gridish <gridish@freescale.com>
7 * QE UCC Gigabit Ethernet Driver
10 * Jul 6, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/slab.h>
22 #include <linux/stddef.h>
23 #include <linux/interrupt.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/spinlock.h>
29 #include <linux/ethtool.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/fsl_devices.h>
33 #include <linux/ethtool.h>
34 #include <linux/platform_device.h>
35 #include <linux/mii.h>
37 #include <asm/uaccess.h>
40 #include <asm/immap_qe.h>
43 #include <asm/ucc_fast.h>
46 #include "ucc_geth_phy.h"
50 #define DRV_DESC "QE UCC Gigabit Ethernet Controller version:Sept 11, 2006"
51 #define DRV_NAME "ucc_geth"
53 #define ugeth_printk(level, format, arg...) \
54 printk(level format "\n", ## arg)
56 #define ugeth_dbg(format, arg...) \
57 ugeth_printk(KERN_DEBUG , format , ## arg)
58 #define ugeth_err(format, arg...) \
59 ugeth_printk(KERN_ERR , format , ## arg)
60 #define ugeth_info(format, arg...) \
61 ugeth_printk(KERN_INFO , format , ## arg)
62 #define ugeth_warn(format, arg...) \
63 ugeth_printk(KERN_WARNING , format , ## arg)
65 #ifdef UGETH_VERBOSE_DEBUG
66 #define ugeth_vdbg ugeth_dbg
68 #define ugeth_vdbg(fmt, args...) do { } while (0)
69 #endif /* UGETH_VERBOSE_DEBUG */
71 static DEFINE_SPINLOCK(ugeth_lock);
73 static ucc_geth_info_t ugeth_primary_info = {
75 .bd_mem_part = MEM_PART_SYSTEM,
76 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
77 .max_rx_buf_length = 1536,
78 /* FIXME: should be changed in run time for 1G and 100M */
79 #ifdef CONFIG_UGETH_HAS_GIGA
80 .urfs = UCC_GETH_URFS_GIGA_INIT,
81 .urfet = UCC_GETH_URFET_GIGA_INIT,
82 .urfset = UCC_GETH_URFSET_GIGA_INIT,
83 .utfs = UCC_GETH_UTFS_GIGA_INIT,
84 .utfet = UCC_GETH_UTFET_GIGA_INIT,
85 .utftt = UCC_GETH_UTFTT_GIGA_INIT,
87 .urfs = UCC_GETH_URFS_INIT,
88 .urfet = UCC_GETH_URFET_INIT,
89 .urfset = UCC_GETH_URFSET_INIT,
90 .utfs = UCC_GETH_UTFS_INIT,
91 .utfet = UCC_GETH_UTFET_INIT,
92 .utftt = UCC_GETH_UTFTT_INIT,
95 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
96 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
97 .tenc = UCC_FAST_TX_ENCODING_NRZ,
98 .renc = UCC_FAST_RX_ENCODING_NRZ,
99 .tcrc = UCC_FAST_16_BIT_CRC,
100 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
104 .extendedFilteringChainPointer = ((uint32_t) NULL),
105 .typeorlen = 3072 /*1536 */ ,
106 .nonBackToBackIfgPart1 = 0x40,
107 .nonBackToBackIfgPart2 = 0x60,
108 .miminumInterFrameGapEnforcement = 0x50,
109 .backToBackInterFrameGap = 0x60,
113 .strictpriorityq = 0xff,
114 .altBebTruncation = 0xa,
116 .maxRetransmission = 0xf,
117 .collisionWindow = 0x37,
118 .receiveFlowControl = 1,
119 .maxGroupAddrInHash = 4,
120 .maxIndAddrInHash = 4,
122 .maxFrameLength = 1518,
123 .minFrameLength = 64,
127 .ecamptr = ((uint32_t) NULL),
128 .eventRegMask = UCCE_OTHER,
129 .pausePeriod = 0xf000,
130 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
151 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
152 .largestexternallookupkeysize =
153 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
154 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
155 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
156 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
157 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
158 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
159 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
160 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
161 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
162 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
163 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
166 static ucc_geth_info_t ugeth_info[8];
169 static void mem_disp(u8 *addr, int size)
172 int size16Aling = (size >> 4) << 4;
173 int size4Aling = (size >> 2) << 2;
178 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
179 printk("0x%08x: %08x %08x %08x %08x\r\n",
183 *((u32 *) (i + 8)), *((u32 *) (i + 12)));
185 printk("0x%08x: ", (u32) i);
186 for (; (u32) i < (u32) addr + size4Aling; i += 4)
187 printk("%08x ", *((u32 *) (i)));
188 for (; (u32) i < (u32) addr + size; i++)
189 printk("%02x", *((u8 *) (i)));
195 #ifdef CONFIG_UGETH_FILTERING
196 static void enqueue(struct list_head *node, struct list_head *lh)
200 spin_lock_irqsave(ugeth_lock, flags);
201 list_add_tail(node, lh);
202 spin_unlock_irqrestore(ugeth_lock, flags);
204 #endif /* CONFIG_UGETH_FILTERING */
206 static struct list_head *dequeue(struct list_head *lh)
210 spin_lock_irqsave(ugeth_lock, flags);
211 if (!list_empty(lh)) {
212 struct list_head *node = lh->next;
214 spin_unlock_irqrestore(ugeth_lock, flags);
217 spin_unlock_irqrestore(ugeth_lock, flags);
222 static int get_interface_details(enet_interface_e enet_interface,
227 int *tbi, int *limited_to_full_duplex)
229 /* Analyze enet_interface according to Interface Mode
230 Configuration table */
231 switch (enet_interface) {
233 *speed = ENET_SPEED_10BT;
236 *speed = ENET_SPEED_10BT;
241 *speed = ENET_SPEED_10BT;
244 *limited_to_full_duplex = 1;
247 *speed = ENET_SPEED_100BT;
250 *speed = ENET_SPEED_100BT;
254 *speed = ENET_SPEED_100BT;
256 *limited_to_full_duplex = 1;
259 *speed = ENET_SPEED_1000BT;
260 *limited_to_full_duplex = 1;
262 case ENET_1000_RGMII:
263 *speed = ENET_SPEED_1000BT;
265 *limited_to_full_duplex = 1;
268 *speed = ENET_SPEED_1000BT;
270 *limited_to_full_duplex = 1;
273 *speed = ENET_SPEED_1000BT;
276 *limited_to_full_duplex = 1;
286 static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd)
288 struct sk_buff *skb = NULL;
290 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
291 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
296 /* We need the data buffer to be aligned properly. We will reserve
297 * as many bytes as needed to align the data properly
300 UCC_GETH_RX_DATA_BUF_ALIGNMENT -
301 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
304 skb->dev = ugeth->dev;
309 ugeth->ug_info->uf_info.max_rx_buf_length +
310 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
313 BD_STATUS_AND_LENGTH_SET(bd,
315 (BD_STATUS_AND_LENGTH(bd) & R_W)));
320 static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ)
327 bd = ugeth->p_rx_bd_ring[rxQ];
331 bd_status = BD_STATUS_AND_LENGTH(bd);
332 skb = get_new_skb(ugeth, bd);
334 if (!skb) /* If can not allocate data buffer,
335 abort. Cleanup will be elsewhere */
338 ugeth->rx_skbuff[rxQ][i] = skb;
340 /* advance the BD pointer */
341 bd += UCC_GETH_SIZE_OF_BD;
343 } while (!(bd_status & R_W));
348 static int fill_init_enet_entries(ucc_geth_private_t *ugeth,
349 volatile u32 *p_start,
352 u32 thread_alignment,
353 qe_risc_allocation_e risc,
354 int skip_page_for_first_entry)
356 u32 init_enet_offset;
360 for (i = 0; i < num_entries; i++) {
361 if ((snum = qe_get_snum()) < 0) {
362 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
365 if ((i == 0) && skip_page_for_first_entry)
366 /* First entry of Rx does not have page */
367 init_enet_offset = 0;
370 qe_muram_alloc(thread_size, thread_alignment);
371 if (IS_MURAM_ERR(init_enet_offset)) {
373 ("fill_init_enet_entries: Can not allocate DPRAM memory.");
374 qe_put_snum((u8) snum);
379 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
386 static int return_init_enet_entries(ucc_geth_private_t *ugeth,
387 volatile u32 *p_start,
389 qe_risc_allocation_e risc,
390 int skip_page_for_first_entry)
392 u32 init_enet_offset;
396 for (i = 0; i < num_entries; i++) {
397 /* Check that this entry was actually valid --
398 needed in case failed in allocations */
399 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
401 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
402 ENET_INIT_PARAM_SNUM_SHIFT;
403 qe_put_snum((u8) snum);
404 if (!((i == 0) && skip_page_for_first_entry)) {
405 /* First entry of Rx does not have page */
408 ENET_INIT_PARAM_PTR_MASK);
409 qe_muram_free(init_enet_offset);
411 *(p_start++) = 0; /* Just for cosmetics */
419 static int dump_init_enet_entries(ucc_geth_private_t *ugeth,
420 volatile u32 *p_start,
423 qe_risc_allocation_e risc,
424 int skip_page_for_first_entry)
426 u32 init_enet_offset;
430 for (i = 0; i < num_entries; i++) {
431 /* Check that this entry was actually valid --
432 needed in case failed in allocations */
433 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
435 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
436 ENET_INIT_PARAM_SNUM_SHIFT;
437 qe_put_snum((u8) snum);
438 if (!((i == 0) && skip_page_for_first_entry)) {
439 /* First entry of Rx does not have page */
442 ENET_INIT_PARAM_PTR_MASK);
443 ugeth_info("Init enet entry %d:", i);
444 ugeth_info("Base address: 0x%08x",
446 qe_muram_addr(init_enet_offset));
447 mem_disp(qe_muram_addr(init_enet_offset),
458 #ifdef CONFIG_UGETH_FILTERING
459 static enet_addr_container_t *get_enet_addr_container(void)
461 enet_addr_container_t *enet_addr_cont;
463 /* allocate memory */
464 enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL);
465 if (!enet_addr_cont) {
466 ugeth_err("%s: No memory for enet_addr_container_t object.",
471 return enet_addr_cont;
473 #endif /* CONFIG_UGETH_FILTERING */
475 static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont)
477 kfree(enet_addr_cont);
480 #ifdef CONFIG_UGETH_FILTERING
481 static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth,
482 enet_addr_t *p_enet_addr, u8 paddr_num)
484 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
486 if (!(paddr_num < NUM_OF_PADDRS)) {
487 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
492 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
495 /* Ethernet frames are defined in Little Endian mode, */
496 /* therefore to insert the address we reverse the bytes. */
497 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h,
498 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
499 (u16) (*p_enet_addr)[4]));
500 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m,
501 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
502 (u16) (*p_enet_addr)[2]));
503 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l,
504 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
505 (u16) (*p_enet_addr)[0]));
509 #endif /* CONFIG_UGETH_FILTERING */
511 static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num)
513 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
515 if (!(paddr_num < NUM_OF_PADDRS)) {
516 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
521 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
524 /* Writing address ff.ff.ff.ff.ff.ff disables address
525 recognition for this register */
526 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
527 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
528 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
533 static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth,
534 enet_addr_t *p_enet_addr)
536 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
540 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
544 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
546 /* Ethernet frames are defined in Little Endian mode,
547 therefor to insert */
548 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
549 out_be16(&p_82xx_addr_filt->taddr.h,
550 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
551 (u16) (*p_enet_addr)[4]));
552 out_be16(&p_82xx_addr_filt->taddr.m,
553 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
554 (u16) (*p_enet_addr)[2]));
555 out_be16(&p_82xx_addr_filt->taddr.l,
556 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
557 (u16) (*p_enet_addr)[0]));
559 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
560 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
563 #ifdef CONFIG_UGETH_MAGIC_PACKET
564 static void magic_packet_detection_enable(ucc_geth_private_t *ugeth)
566 ucc_fast_private_t *uccf;
571 ug_regs = ugeth->ug_regs;
573 /* Enable interrupts for magic packet detection */
574 uccm = in_be32(uccf->p_uccm);
576 out_be32(uccf->p_uccm, uccm);
578 /* Enable magic packet detection */
579 maccfg2 = in_be32(&ug_regs->maccfg2);
580 maccfg2 |= MACCFG2_MPE;
581 out_be32(&ug_regs->maccfg2, maccfg2);
584 static void magic_packet_detection_disable(ucc_geth_private_t *ugeth)
586 ucc_fast_private_t *uccf;
591 ug_regs = ugeth->ug_regs;
593 /* Disable interrupts for magic packet detection */
594 uccm = in_be32(uccf->p_uccm);
596 out_be32(uccf->p_uccm, uccm);
598 /* Disable magic packet detection */
599 maccfg2 = in_be32(&ug_regs->maccfg2);
600 maccfg2 &= ~MACCFG2_MPE;
601 out_be32(&ug_regs->maccfg2, maccfg2);
603 #endif /* MAGIC_PACKET */
605 static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2)
607 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
611 static void get_statistics(ucc_geth_private_t *ugeth,
612 ucc_geth_tx_firmware_statistics_t *
613 tx_firmware_statistics,
614 ucc_geth_rx_firmware_statistics_t *
615 rx_firmware_statistics,
616 ucc_geth_hardware_statistics_t *hardware_statistics)
620 ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
621 ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
623 ug_regs = ugeth->ug_regs;
624 uf_regs = (ucc_fast_t *) ug_regs;
625 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
626 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
628 /* Tx firmware only if user handed pointer and driver actually
629 gathers Tx firmware statistics */
630 if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
631 tx_firmware_statistics->sicoltx =
632 in_be32(&p_tx_fw_statistics_pram->sicoltx);
633 tx_firmware_statistics->mulcoltx =
634 in_be32(&p_tx_fw_statistics_pram->mulcoltx);
635 tx_firmware_statistics->latecoltxfr =
636 in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
637 tx_firmware_statistics->frabortduecol =
638 in_be32(&p_tx_fw_statistics_pram->frabortduecol);
639 tx_firmware_statistics->frlostinmactxer =
640 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
641 tx_firmware_statistics->carriersenseertx =
642 in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
643 tx_firmware_statistics->frtxok =
644 in_be32(&p_tx_fw_statistics_pram->frtxok);
645 tx_firmware_statistics->txfrexcessivedefer =
646 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
647 tx_firmware_statistics->txpkts256 =
648 in_be32(&p_tx_fw_statistics_pram->txpkts256);
649 tx_firmware_statistics->txpkts512 =
650 in_be32(&p_tx_fw_statistics_pram->txpkts512);
651 tx_firmware_statistics->txpkts1024 =
652 in_be32(&p_tx_fw_statistics_pram->txpkts1024);
653 tx_firmware_statistics->txpktsjumbo =
654 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
657 /* Rx firmware only if user handed pointer and driver actually
658 * gathers Rx firmware statistics */
659 if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
661 rx_firmware_statistics->frrxfcser =
662 in_be32(&p_rx_fw_statistics_pram->frrxfcser);
663 rx_firmware_statistics->fraligner =
664 in_be32(&p_rx_fw_statistics_pram->fraligner);
665 rx_firmware_statistics->inrangelenrxer =
666 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
667 rx_firmware_statistics->outrangelenrxer =
668 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
669 rx_firmware_statistics->frtoolong =
670 in_be32(&p_rx_fw_statistics_pram->frtoolong);
671 rx_firmware_statistics->runt =
672 in_be32(&p_rx_fw_statistics_pram->runt);
673 rx_firmware_statistics->verylongevent =
674 in_be32(&p_rx_fw_statistics_pram->verylongevent);
675 rx_firmware_statistics->symbolerror =
676 in_be32(&p_rx_fw_statistics_pram->symbolerror);
677 rx_firmware_statistics->dropbsy =
678 in_be32(&p_rx_fw_statistics_pram->dropbsy);
679 for (i = 0; i < 0x8; i++)
680 rx_firmware_statistics->res0[i] =
681 p_rx_fw_statistics_pram->res0[i];
682 rx_firmware_statistics->mismatchdrop =
683 in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
684 rx_firmware_statistics->underpkts =
685 in_be32(&p_rx_fw_statistics_pram->underpkts);
686 rx_firmware_statistics->pkts256 =
687 in_be32(&p_rx_fw_statistics_pram->pkts256);
688 rx_firmware_statistics->pkts512 =
689 in_be32(&p_rx_fw_statistics_pram->pkts512);
690 rx_firmware_statistics->pkts1024 =
691 in_be32(&p_rx_fw_statistics_pram->pkts1024);
692 rx_firmware_statistics->pktsjumbo =
693 in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
694 rx_firmware_statistics->frlossinmacer =
695 in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
696 rx_firmware_statistics->pausefr =
697 in_be32(&p_rx_fw_statistics_pram->pausefr);
698 for (i = 0; i < 0x4; i++)
699 rx_firmware_statistics->res1[i] =
700 p_rx_fw_statistics_pram->res1[i];
701 rx_firmware_statistics->removevlan =
702 in_be32(&p_rx_fw_statistics_pram->removevlan);
703 rx_firmware_statistics->replacevlan =
704 in_be32(&p_rx_fw_statistics_pram->replacevlan);
705 rx_firmware_statistics->insertvlan =
706 in_be32(&p_rx_fw_statistics_pram->insertvlan);
709 /* Hardware only if user handed pointer and driver actually
710 gathers hardware statistics */
711 if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
712 hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
713 hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
714 hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
715 hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
716 hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
717 hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
718 hardware_statistics->txok = in_be32(&ug_regs->txok);
719 hardware_statistics->txcf = in_be16(&ug_regs->txcf);
720 hardware_statistics->tmca = in_be32(&ug_regs->tmca);
721 hardware_statistics->tbca = in_be32(&ug_regs->tbca);
722 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
723 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
724 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
725 hardware_statistics->rmca = in_be32(&ug_regs->rmca);
726 hardware_statistics->rbca = in_be32(&ug_regs->rbca);
730 static void dump_bds(ucc_geth_private_t *ugeth)
735 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
736 if (ugeth->p_tx_bd_ring[i]) {
738 (ugeth->ug_info->bdRingLenTx[i] *
739 UCC_GETH_SIZE_OF_BD);
740 ugeth_info("TX BDs[%d]", i);
741 mem_disp(ugeth->p_tx_bd_ring[i], length);
744 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
745 if (ugeth->p_rx_bd_ring[i]) {
747 (ugeth->ug_info->bdRingLenRx[i] *
748 UCC_GETH_SIZE_OF_BD);
749 ugeth_info("RX BDs[%d]", i);
750 mem_disp(ugeth->p_rx_bd_ring[i], length);
755 static void dump_regs(ucc_geth_private_t *ugeth)
759 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
760 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
762 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
763 (u32) & ugeth->ug_regs->maccfg1,
764 in_be32(&ugeth->ug_regs->maccfg1));
765 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
766 (u32) & ugeth->ug_regs->maccfg2,
767 in_be32(&ugeth->ug_regs->maccfg2));
768 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
769 (u32) & ugeth->ug_regs->ipgifg,
770 in_be32(&ugeth->ug_regs->ipgifg));
771 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
772 (u32) & ugeth->ug_regs->hafdup,
773 in_be32(&ugeth->ug_regs->hafdup));
774 ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x",
775 (u32) & ugeth->ug_regs->miimng.miimcfg,
776 in_be32(&ugeth->ug_regs->miimng.miimcfg));
777 ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x",
778 (u32) & ugeth->ug_regs->miimng.miimcom,
779 in_be32(&ugeth->ug_regs->miimng.miimcom));
780 ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x",
781 (u32) & ugeth->ug_regs->miimng.miimadd,
782 in_be32(&ugeth->ug_regs->miimng.miimadd));
783 ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x",
784 (u32) & ugeth->ug_regs->miimng.miimcon,
785 in_be32(&ugeth->ug_regs->miimng.miimcon));
786 ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x",
787 (u32) & ugeth->ug_regs->miimng.miimstat,
788 in_be32(&ugeth->ug_regs->miimng.miimstat));
789 ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x",
790 (u32) & ugeth->ug_regs->miimng.miimind,
791 in_be32(&ugeth->ug_regs->miimng.miimind));
792 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
793 (u32) & ugeth->ug_regs->ifctl,
794 in_be32(&ugeth->ug_regs->ifctl));
795 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
796 (u32) & ugeth->ug_regs->ifstat,
797 in_be32(&ugeth->ug_regs->ifstat));
798 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
799 (u32) & ugeth->ug_regs->macstnaddr1,
800 in_be32(&ugeth->ug_regs->macstnaddr1));
801 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
802 (u32) & ugeth->ug_regs->macstnaddr2,
803 in_be32(&ugeth->ug_regs->macstnaddr2));
804 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
805 (u32) & ugeth->ug_regs->uempr,
806 in_be32(&ugeth->ug_regs->uempr));
807 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
808 (u32) & ugeth->ug_regs->utbipar,
809 in_be32(&ugeth->ug_regs->utbipar));
810 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
811 (u32) & ugeth->ug_regs->uescr,
812 in_be16(&ugeth->ug_regs->uescr));
813 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
814 (u32) & ugeth->ug_regs->tx64,
815 in_be32(&ugeth->ug_regs->tx64));
816 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
817 (u32) & ugeth->ug_regs->tx127,
818 in_be32(&ugeth->ug_regs->tx127));
819 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
820 (u32) & ugeth->ug_regs->tx255,
821 in_be32(&ugeth->ug_regs->tx255));
822 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
823 (u32) & ugeth->ug_regs->rx64,
824 in_be32(&ugeth->ug_regs->rx64));
825 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
826 (u32) & ugeth->ug_regs->rx127,
827 in_be32(&ugeth->ug_regs->rx127));
828 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
829 (u32) & ugeth->ug_regs->rx255,
830 in_be32(&ugeth->ug_regs->rx255));
831 ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
832 (u32) & ugeth->ug_regs->txok,
833 in_be32(&ugeth->ug_regs->txok));
834 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
835 (u32) & ugeth->ug_regs->txcf,
836 in_be16(&ugeth->ug_regs->txcf));
837 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
838 (u32) & ugeth->ug_regs->tmca,
839 in_be32(&ugeth->ug_regs->tmca));
840 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
841 (u32) & ugeth->ug_regs->tbca,
842 in_be32(&ugeth->ug_regs->tbca));
843 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
844 (u32) & ugeth->ug_regs->rxfok,
845 in_be32(&ugeth->ug_regs->rxfok));
846 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
847 (u32) & ugeth->ug_regs->rxbok,
848 in_be32(&ugeth->ug_regs->rxbok));
849 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
850 (u32) & ugeth->ug_regs->rbyt,
851 in_be32(&ugeth->ug_regs->rbyt));
852 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
853 (u32) & ugeth->ug_regs->rmca,
854 in_be32(&ugeth->ug_regs->rmca));
855 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
856 (u32) & ugeth->ug_regs->rbca,
857 in_be32(&ugeth->ug_regs->rbca));
858 ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
859 (u32) & ugeth->ug_regs->scar,
860 in_be32(&ugeth->ug_regs->scar));
861 ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
862 (u32) & ugeth->ug_regs->scam,
863 in_be32(&ugeth->ug_regs->scam));
865 if (ugeth->p_thread_data_tx) {
866 int numThreadsTxNumerical;
867 switch (ugeth->ug_info->numThreadsTx) {
868 case UCC_GETH_NUM_OF_THREADS_1:
869 numThreadsTxNumerical = 1;
871 case UCC_GETH_NUM_OF_THREADS_2:
872 numThreadsTxNumerical = 2;
874 case UCC_GETH_NUM_OF_THREADS_4:
875 numThreadsTxNumerical = 4;
877 case UCC_GETH_NUM_OF_THREADS_6:
878 numThreadsTxNumerical = 6;
880 case UCC_GETH_NUM_OF_THREADS_8:
881 numThreadsTxNumerical = 8;
884 numThreadsTxNumerical = 0;
888 ugeth_info("Thread data TXs:");
889 ugeth_info("Base address: 0x%08x",
890 (u32) ugeth->p_thread_data_tx);
891 for (i = 0; i < numThreadsTxNumerical; i++) {
892 ugeth_info("Thread data TX[%d]:", i);
893 ugeth_info("Base address: 0x%08x",
894 (u32) & ugeth->p_thread_data_tx[i]);
895 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
896 sizeof(ucc_geth_thread_data_tx_t));
899 if (ugeth->p_thread_data_rx) {
900 int numThreadsRxNumerical;
901 switch (ugeth->ug_info->numThreadsRx) {
902 case UCC_GETH_NUM_OF_THREADS_1:
903 numThreadsRxNumerical = 1;
905 case UCC_GETH_NUM_OF_THREADS_2:
906 numThreadsRxNumerical = 2;
908 case UCC_GETH_NUM_OF_THREADS_4:
909 numThreadsRxNumerical = 4;
911 case UCC_GETH_NUM_OF_THREADS_6:
912 numThreadsRxNumerical = 6;
914 case UCC_GETH_NUM_OF_THREADS_8:
915 numThreadsRxNumerical = 8;
918 numThreadsRxNumerical = 0;
922 ugeth_info("Thread data RX:");
923 ugeth_info("Base address: 0x%08x",
924 (u32) ugeth->p_thread_data_rx);
925 for (i = 0; i < numThreadsRxNumerical; i++) {
926 ugeth_info("Thread data RX[%d]:", i);
927 ugeth_info("Base address: 0x%08x",
928 (u32) & ugeth->p_thread_data_rx[i]);
929 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
930 sizeof(ucc_geth_thread_data_rx_t));
933 if (ugeth->p_exf_glbl_param) {
934 ugeth_info("EXF global param:");
935 ugeth_info("Base address: 0x%08x",
936 (u32) ugeth->p_exf_glbl_param);
937 mem_disp((u8 *) ugeth->p_exf_glbl_param,
938 sizeof(*ugeth->p_exf_glbl_param));
940 if (ugeth->p_tx_glbl_pram) {
941 ugeth_info("TX global param:");
942 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
943 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
944 (u32) & ugeth->p_tx_glbl_pram->temoder,
945 in_be16(&ugeth->p_tx_glbl_pram->temoder));
946 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
947 (u32) & ugeth->p_tx_glbl_pram->sqptr,
948 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
949 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
950 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
951 in_be32(&ugeth->p_tx_glbl_pram->
952 schedulerbasepointer));
953 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
954 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
955 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
956 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
957 (u32) & ugeth->p_tx_glbl_pram->tstate,
958 in_be32(&ugeth->p_tx_glbl_pram->tstate));
959 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
960 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
961 ugeth->p_tx_glbl_pram->iphoffset[0]);
962 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
963 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
964 ugeth->p_tx_glbl_pram->iphoffset[1]);
965 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
966 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
967 ugeth->p_tx_glbl_pram->iphoffset[2]);
968 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
969 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
970 ugeth->p_tx_glbl_pram->iphoffset[3]);
971 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
972 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
973 ugeth->p_tx_glbl_pram->iphoffset[4]);
974 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
975 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
976 ugeth->p_tx_glbl_pram->iphoffset[5]);
977 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
978 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
979 ugeth->p_tx_glbl_pram->iphoffset[6]);
980 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
981 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
982 ugeth->p_tx_glbl_pram->iphoffset[7]);
983 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
984 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
985 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
986 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
987 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
988 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
989 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
990 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
991 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
992 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
993 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
994 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
995 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
996 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
997 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
998 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
999 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
1000 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
1001 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
1002 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
1003 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
1004 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
1005 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
1006 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
1007 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
1008 (u32) & ugeth->p_tx_glbl_pram->tqptr,
1009 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
1011 if (ugeth->p_rx_glbl_pram) {
1012 ugeth_info("RX global param:");
1013 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
1014 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
1015 (u32) & ugeth->p_rx_glbl_pram->remoder,
1016 in_be32(&ugeth->p_rx_glbl_pram->remoder));
1017 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
1018 (u32) & ugeth->p_rx_glbl_pram->rqptr,
1019 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
1020 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
1021 (u32) & ugeth->p_rx_glbl_pram->typeorlen,
1022 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
1023 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
1024 (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
1025 ugeth->p_rx_glbl_pram->rxgstpack);
1026 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
1027 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
1028 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
1029 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
1030 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
1031 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
1032 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
1033 (u32) & ugeth->p_rx_glbl_pram->rstate,
1034 ugeth->p_rx_glbl_pram->rstate);
1035 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
1036 (u32) & ugeth->p_rx_glbl_pram->mrblr,
1037 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
1038 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
1039 (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
1040 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
1041 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
1042 (u32) & ugeth->p_rx_glbl_pram->mflr,
1043 in_be16(&ugeth->p_rx_glbl_pram->mflr));
1044 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
1045 (u32) & ugeth->p_rx_glbl_pram->minflr,
1046 in_be16(&ugeth->p_rx_glbl_pram->minflr));
1047 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
1048 (u32) & ugeth->p_rx_glbl_pram->maxd1,
1049 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
1050 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
1051 (u32) & ugeth->p_rx_glbl_pram->maxd2,
1052 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
1053 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
1054 (u32) & ugeth->p_rx_glbl_pram->ecamptr,
1055 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
1056 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
1057 (u32) & ugeth->p_rx_glbl_pram->l2qt,
1058 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
1059 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
1060 (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
1061 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
1062 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
1063 (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
1064 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
1065 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
1066 (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
1067 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
1068 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
1069 (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
1070 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
1071 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
1072 (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
1073 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
1074 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
1075 (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
1076 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
1077 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
1078 (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
1079 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
1080 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
1081 (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
1082 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
1083 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
1084 (u32) & ugeth->p_rx_glbl_pram->vlantype,
1085 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
1086 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
1087 (u32) & ugeth->p_rx_glbl_pram->vlantci,
1088 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
1089 for (i = 0; i < 64; i++)
1091 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
1093 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
1094 ugeth->p_rx_glbl_pram->addressfiltering[i]);
1095 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
1096 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
1097 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
1099 if (ugeth->p_send_q_mem_reg) {
1100 ugeth_info("Send Q memory registers:");
1101 ugeth_info("Base address: 0x%08x",
1102 (u32) ugeth->p_send_q_mem_reg);
1103 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1104 ugeth_info("SQQD[%d]:", i);
1105 ugeth_info("Base address: 0x%08x",
1106 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
1107 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
1108 sizeof(ucc_geth_send_queue_qd_t));
1111 if (ugeth->p_scheduler) {
1112 ugeth_info("Scheduler:");
1113 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
1114 mem_disp((u8 *) ugeth->p_scheduler,
1115 sizeof(*ugeth->p_scheduler));
1117 if (ugeth->p_tx_fw_statistics_pram) {
1118 ugeth_info("TX FW statistics pram:");
1119 ugeth_info("Base address: 0x%08x",
1120 (u32) ugeth->p_tx_fw_statistics_pram);
1121 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
1122 sizeof(*ugeth->p_tx_fw_statistics_pram));
1124 if (ugeth->p_rx_fw_statistics_pram) {
1125 ugeth_info("RX FW statistics pram:");
1126 ugeth_info("Base address: 0x%08x",
1127 (u32) ugeth->p_rx_fw_statistics_pram);
1128 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
1129 sizeof(*ugeth->p_rx_fw_statistics_pram));
1131 if (ugeth->p_rx_irq_coalescing_tbl) {
1132 ugeth_info("RX IRQ coalescing tables:");
1133 ugeth_info("Base address: 0x%08x",
1134 (u32) ugeth->p_rx_irq_coalescing_tbl);
1135 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1136 ugeth_info("RX IRQ coalescing table entry[%d]:", i);
1137 ugeth_info("Base address: 0x%08x",
1138 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1139 coalescingentry[i]);
1141 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
1142 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1143 coalescingentry[i].interruptcoalescingmaxvalue,
1144 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1146 interruptcoalescingmaxvalue));
1148 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
1149 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1150 coalescingentry[i].interruptcoalescingcounter,
1151 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1153 interruptcoalescingcounter));
1156 if (ugeth->p_rx_bd_qs_tbl) {
1157 ugeth_info("RX BD QS tables:");
1158 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
1159 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1160 ugeth_info("RX BD QS table[%d]:", i);
1161 ugeth_info("Base address: 0x%08x",
1162 (u32) & ugeth->p_rx_bd_qs_tbl[i]);
1164 ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
1165 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
1166 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
1168 ("bdptr : addr - 0x%08x, val - 0x%08x",
1169 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
1170 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
1172 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
1173 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
1174 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
1175 externalbdbaseptr));
1177 ("externalbdptr : addr - 0x%08x, val - 0x%08x",
1178 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
1179 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
1180 ugeth_info("ucode RX Prefetched BDs:");
1181 ugeth_info("Base address: 0x%08x",
1183 qe_muram_addr(in_be32
1184 (&ugeth->p_rx_bd_qs_tbl[i].
1187 qe_muram_addr(in_be32
1188 (&ugeth->p_rx_bd_qs_tbl[i].
1190 sizeof(ucc_geth_rx_prefetched_bds_t));
1193 if (ugeth->p_init_enet_param_shadow) {
1195 ugeth_info("Init enet param shadow:");
1196 ugeth_info("Base address: 0x%08x",
1197 (u32) ugeth->p_init_enet_param_shadow);
1198 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1199 sizeof(*ugeth->p_init_enet_param_shadow));
1201 size = sizeof(ucc_geth_thread_rx_pram_t);
1202 if (ugeth->ug_info->rxExtendedFiltering) {
1204 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1205 if (ugeth->ug_info->largestexternallookupkeysize ==
1206 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1208 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1209 if (ugeth->ug_info->largestexternallookupkeysize ==
1210 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1212 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1215 dump_init_enet_entries(ugeth,
1216 &(ugeth->p_init_enet_param_shadow->
1218 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1219 sizeof(ucc_geth_thread_tx_pram_t),
1220 ugeth->ug_info->riscTx, 0);
1221 dump_init_enet_entries(ugeth,
1222 &(ugeth->p_init_enet_param_shadow->
1224 ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1225 ugeth->ug_info->riscRx, 1);
1230 static void init_default_reg_vals(volatile u32 *upsmr_register,
1231 volatile u32 *maccfg1_register,
1232 volatile u32 *maccfg2_register)
1234 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1235 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1236 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1239 static int init_half_duplex_params(int alt_beb,
1240 int back_pressure_no_backoff,
1243 u8 alt_beb_truncation,
1244 u8 max_retransmissions,
1245 u8 collision_window,
1246 volatile u32 *hafdup_register)
1250 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1251 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1252 (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1255 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1258 value |= HALFDUP_ALT_BEB;
1259 if (back_pressure_no_backoff)
1260 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1262 value |= HALFDUP_NO_BACKOFF;
1264 value |= HALFDUP_EXCESSIVE_DEFER;
1266 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1268 value |= collision_window;
1270 out_be32(hafdup_register, value);
1274 static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1278 volatile u32 *ipgifg_register)
1282 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1284 if (non_btb_cs_ipg > non_btb_ipg)
1287 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1288 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1289 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1290 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1294 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1295 IPGIFG_NBTB_CS_IPG_MASK);
1297 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1298 IPGIFG_NBTB_IPG_MASK);
1300 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1301 IPGIFG_MIN_IFG_MASK);
1302 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1304 out_be32(ipgifg_register, value);
1308 static int init_flow_control_params(u32 automatic_flow_control_mode,
1309 int rx_flow_control_enable,
1310 int tx_flow_control_enable,
1312 u16 extension_field,
1313 volatile u32 *upsmr_register,
1314 volatile u32 *uempr_register,
1315 volatile u32 *maccfg1_register)
1319 /* Set UEMPR register */
1320 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1321 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1322 out_be32(uempr_register, value);
1324 /* Set UPSMR register */
1325 value = in_be32(upsmr_register);
1326 value |= automatic_flow_control_mode;
1327 out_be32(upsmr_register, value);
1329 value = in_be32(maccfg1_register);
1330 if (rx_flow_control_enable)
1331 value |= MACCFG1_FLOW_RX;
1332 if (tx_flow_control_enable)
1333 value |= MACCFG1_FLOW_TX;
1334 out_be32(maccfg1_register, value);
1339 static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1340 int auto_zero_hardware_statistics,
1341 volatile u32 *upsmr_register,
1342 volatile u16 *uescr_register)
1344 u32 upsmr_value = 0;
1345 u16 uescr_value = 0;
1346 /* Enable hardware statistics gathering if requested */
1347 if (enable_hardware_statistics) {
1348 upsmr_value = in_be32(upsmr_register);
1349 upsmr_value |= UPSMR_HSE;
1350 out_be32(upsmr_register, upsmr_value);
1353 /* Clear hardware statistics counters */
1354 uescr_value = in_be16(uescr_register);
1355 uescr_value |= UESCR_CLRCNT;
1356 /* Automatically zero hardware statistics counters on read,
1358 if (auto_zero_hardware_statistics)
1359 uescr_value |= UESCR_AUTOZ;
1360 out_be16(uescr_register, uescr_value);
1365 static int init_firmware_statistics_gathering_mode(int
1366 enable_tx_firmware_statistics,
1367 int enable_rx_firmware_statistics,
1368 volatile u32 *tx_rmon_base_ptr,
1369 u32 tx_firmware_statistics_structure_address,
1370 volatile u32 *rx_rmon_base_ptr,
1371 u32 rx_firmware_statistics_structure_address,
1372 volatile u16 *temoder_register,
1373 volatile u32 *remoder_register)
1375 /* Note: this function does not check if */
1376 /* the parameters it receives are NULL */
1380 if (enable_tx_firmware_statistics) {
1381 out_be32(tx_rmon_base_ptr,
1382 tx_firmware_statistics_structure_address);
1383 temoder_value = in_be16(temoder_register);
1384 temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
1385 out_be16(temoder_register, temoder_value);
1388 if (enable_rx_firmware_statistics) {
1389 out_be32(rx_rmon_base_ptr,
1390 rx_firmware_statistics_structure_address);
1391 remoder_value = in_be32(remoder_register);
1392 remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
1393 out_be32(remoder_register, remoder_value);
1399 static int init_mac_station_addr_regs(u8 address_byte_0,
1405 volatile u32 *macstnaddr1_register,
1406 volatile u32 *macstnaddr2_register)
1410 /* Example: for a station address of 0x12345678ABCD, */
1411 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1413 /* MACSTNADDR1 Register: */
1416 /* station address byte 5 station address byte 4 */
1418 /* station address byte 3 station address byte 2 */
1419 value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1420 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1421 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1422 value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1424 out_be32(macstnaddr1_register, value);
1426 /* MACSTNADDR2 Register: */
1429 /* station address byte 1 station address byte 0 */
1431 /* reserved reserved */
1433 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1434 value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1436 out_be32(macstnaddr2_register, value);
1441 static int init_mac_duplex_mode(int full_duplex,
1442 int limited_to_full_duplex,
1443 volatile u32 *maccfg2_register)
1447 /* some interfaces must work in full duplex mode */
1448 if ((full_duplex == 0) && (limited_to_full_duplex == 1))
1451 value = in_be32(maccfg2_register);
1454 value |= MACCFG2_FDX;
1456 value &= ~MACCFG2_FDX;
1458 out_be32(maccfg2_register, value);
1462 static int init_check_frame_length_mode(int length_check,
1463 volatile u32 *maccfg2_register)
1467 value = in_be32(maccfg2_register);
1470 value |= MACCFG2_LC;
1472 value &= ~MACCFG2_LC;
1474 out_be32(maccfg2_register, value);
1478 static int init_preamble_length(u8 preamble_length,
1479 volatile u32 *maccfg2_register)
1483 if ((preamble_length < 3) || (preamble_length > 7))
1486 value = in_be32(maccfg2_register);
1487 value &= ~MACCFG2_PREL_MASK;
1488 value |= (preamble_length << MACCFG2_PREL_SHIFT);
1489 out_be32(maccfg2_register, value);
1493 static int init_mii_management_configuration(int reset_mgmt,
1494 int preamble_supress,
1495 volatile u32 *miimcfg_register,
1496 volatile u32 *miimind_register)
1498 unsigned int timeout = PHY_INIT_TIMEOUT;
1501 value = in_be32(miimcfg_register);
1503 value |= MIIMCFG_RESET_MANAGEMENT;
1504 out_be32(miimcfg_register, value);
1509 if (preamble_supress)
1510 value |= MIIMCFG_NO_PREAMBLE;
1512 value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT;
1513 out_be32(miimcfg_register, value);
1515 /* Wait until the bus is free */
1516 while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--)
1520 ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__);
1527 static int init_rx_parameters(int reject_broadcast,
1528 int receive_short_frames,
1529 int promiscuous, volatile u32 *upsmr_register)
1533 value = in_be32(upsmr_register);
1535 if (reject_broadcast)
1538 value &= ~UPSMR_BRO;
1540 if (receive_short_frames)
1543 value &= ~UPSMR_RSH;
1548 value &= ~UPSMR_PRO;
1550 out_be32(upsmr_register, value);
1555 static int init_max_rx_buff_len(u16 max_rx_buf_len,
1556 volatile u16 *mrblr_register)
1558 /* max_rx_buf_len value must be a multiple of 128 */
1559 if ((max_rx_buf_len == 0)
1560 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1563 out_be16(mrblr_register, max_rx_buf_len);
1567 static int init_min_frame_len(u16 min_frame_length,
1568 volatile u16 *minflr_register,
1569 volatile u16 *mrblr_register)
1571 u16 mrblr_value = 0;
1573 mrblr_value = in_be16(mrblr_register);
1574 if (min_frame_length >= (mrblr_value - 4))
1577 out_be16(minflr_register, min_frame_length);
1581 static int adjust_enet_interface(ucc_geth_private_t *ugeth)
1583 ucc_geth_info_t *ug_info;
1584 ucc_geth_t *ug_regs;
1585 ucc_fast_t *uf_regs;
1587 int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm =
1588 0, limited_to_full_duplex = 0;
1589 u32 upsmr, maccfg2, utbipar, tbiBaseAddress;
1592 ugeth_vdbg("%s: IN", __FUNCTION__);
1594 ug_info = ugeth->ug_info;
1595 ug_regs = ugeth->ug_regs;
1596 uf_regs = ugeth->uccf->uf_regs;
1598 /* Analyze enet_interface according to Interface Mode Configuration
1601 get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm,
1602 &rpm, &tbi, &limited_to_full_duplex);
1605 ("%s: half duplex not supported in requested configuration.",
1611 maccfg2 = in_be32(&ug_regs->maccfg2);
1612 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1613 if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT))
1614 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1615 else if (speed == ENET_SPEED_1000BT)
1616 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1617 maccfg2 |= ug_info->padAndCrc;
1618 out_be32(&ug_regs->maccfg2, maccfg2);
1621 upsmr = in_be32(&uf_regs->upsmr);
1622 upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
1626 upsmr |= UPSMR_R10M;
1628 upsmr |= UPSMR_TBIM;
1631 out_be32(&uf_regs->upsmr, upsmr);
1634 utbipar = in_be32(&ug_regs->utbipar);
1635 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1638 (ug_info->phy_address +
1639 ugeth->ug_info->uf_info.
1640 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1644 ugeth->ug_info->uf_info.
1645 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1646 out_be32(&ug_regs->utbipar, utbipar);
1648 /* Disable autonegotiation in tbi mode, because by default it
1649 comes up in autonegotiation mode. */
1650 /* Note that this depends on proper setting in utbipar register. */
1652 tbiBaseAddress = in_be32(&ug_regs->utbipar);
1653 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
1654 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
1656 ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress,
1658 value &= ~0x1000; /* Turn off autonegotiation */
1659 ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress,
1660 ENET_TBI_MII_CR, value);
1663 ret_val = init_mac_duplex_mode(1,
1664 limited_to_full_duplex,
1668 ("%s: half duplex not supported in requested configuration.",
1673 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1675 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1678 ("%s: Preamble length must be between 3 and 7 inclusive.",
1686 /* Called every time the controller might need to be made
1687 * aware of new link state. The PHY code conveys this
1688 * information through variables in the ugeth structure, and this
1689 * function converts those variables into the appropriate
1690 * register values, and can bring down the device if needed.
1692 static void adjust_link(struct net_device *dev)
1694 ucc_geth_private_t *ugeth = netdev_priv(dev);
1695 ucc_geth_t *ug_regs;
1697 struct ugeth_mii_info *mii_info = ugeth->mii_info;
1699 ug_regs = ugeth->ug_regs;
1701 if (mii_info->link) {
1702 /* Now we make sure that we can be in full duplex mode.
1703 * If not, we operate in half-duplex mode. */
1704 if (mii_info->duplex != ugeth->oldduplex) {
1705 if (!(mii_info->duplex)) {
1706 tempval = in_be32(&ug_regs->maccfg2);
1707 tempval &= ~(MACCFG2_FDX);
1708 out_be32(&ug_regs->maccfg2, tempval);
1710 ugeth_info("%s: Half Duplex", dev->name);
1712 tempval = in_be32(&ug_regs->maccfg2);
1713 tempval |= MACCFG2_FDX;
1714 out_be32(&ug_regs->maccfg2, tempval);
1716 ugeth_info("%s: Full Duplex", dev->name);
1719 ugeth->oldduplex = mii_info->duplex;
1722 if (mii_info->speed != ugeth->oldspeed) {
1723 switch (mii_info->speed) {
1725 #ifdef CONFIG_MPC836x
1726 /* FIXME: This code is for 100Mbs BUG fixing,
1727 remove this when it is fixed!!! */
1728 if (ugeth->ug_info->enet_interface ==
1730 /* Run the commands which initialize the PHY */
1733 (u32) mii_info->mdio_read(ugeth->
1734 dev, mii_info->mii_id, 0x1b);
1736 mii_info->mdio_write(ugeth->dev,
1737 mii_info->mii_id, 0x1b,
1740 (u32) mii_info->mdio_read(ugeth->
1741 dev, mii_info->mii_id,
1743 mii_info->mdio_write(ugeth->dev,
1744 mii_info->mii_id, MII_BMCR,
1745 (u16) (tempval | BMCR_RESET));
1746 } else if (ugeth->ug_info->enet_interface ==
1748 /* Run the commands which initialize the PHY */
1751 (u32) mii_info->mdio_read(ugeth->
1752 dev, mii_info->mii_id, 0x1b);
1753 tempval = (tempval & ~0x000f) | 0x000b;
1754 mii_info->mdio_write(ugeth->dev,
1755 mii_info->mii_id, 0x1b,
1758 (u32) mii_info->mdio_read(ugeth->
1759 dev, mii_info->mii_id,
1761 mii_info->mdio_write(ugeth->dev,
1762 mii_info->mii_id, MII_BMCR,
1763 (u16) (tempval | BMCR_RESET));
1766 #endif /* CONFIG_MPC8360 */
1767 adjust_enet_interface(ugeth);
1771 #ifdef CONFIG_MPC836x
1772 /* FIXME: This code is for 100Mbs BUG fixing,
1773 remove this lines when it will be fixed!!! */
1774 ugeth->ug_info->enet_interface = ENET_100_RGMII;
1776 (u32) mii_info->mdio_read(ugeth->dev,
1779 tempval = (tempval & ~0x000f) | 0x000b;
1780 mii_info->mdio_write(ugeth->dev,
1781 mii_info->mii_id, 0x1b,
1784 (u32) mii_info->mdio_read(ugeth->dev,
1787 mii_info->mdio_write(ugeth->dev,
1788 mii_info->mii_id, MII_BMCR,
1792 #endif /* CONFIG_MPC8360 */
1793 adjust_enet_interface(ugeth);
1797 ("%s: Ack! Speed (%d) is not 10/100/1000!",
1798 dev->name, mii_info->speed);
1802 ugeth_info("%s: Speed %dBT", dev->name,
1805 ugeth->oldspeed = mii_info->speed;
1808 if (!ugeth->oldlink) {
1809 ugeth_info("%s: Link is up", dev->name);
1811 netif_carrier_on(dev);
1812 netif_schedule(dev);
1815 if (ugeth->oldlink) {
1816 ugeth_info("%s: Link is down", dev->name);
1818 ugeth->oldspeed = 0;
1819 ugeth->oldduplex = -1;
1820 netif_carrier_off(dev);
1825 /* Configure the PHY for dev.
1826 * returns 0 if success. -1 if failure
1828 static int init_phy(struct net_device *dev)
1830 ucc_geth_private_t *ugeth = netdev_priv(dev);
1831 struct phy_info *curphy;
1832 ucc_mii_mng_t *mii_regs;
1833 struct ugeth_mii_info *mii_info;
1836 mii_regs = &ugeth->ug_regs->miimng;
1839 ugeth->oldspeed = 0;
1840 ugeth->oldduplex = -1;
1842 mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL);
1844 if (NULL == mii_info) {
1845 ugeth_err("%s: Could not allocate mii_info", dev->name);
1849 mii_info->mii_regs = mii_regs;
1850 mii_info->speed = SPEED_1000;
1851 mii_info->duplex = DUPLEX_FULL;
1852 mii_info->pause = 0;
1855 mii_info->advertising = (ADVERTISED_10baseT_Half |
1856 ADVERTISED_10baseT_Full |
1857 ADVERTISED_100baseT_Half |
1858 ADVERTISED_100baseT_Full |
1859 ADVERTISED_1000baseT_Full);
1860 mii_info->autoneg = 1;
1862 mii_info->mii_id = ugeth->ug_info->phy_address;
1864 mii_info->dev = dev;
1866 mii_info->mdio_read = &read_phy_reg;
1867 mii_info->mdio_write = &write_phy_reg;
1869 ugeth->mii_info = mii_info;
1871 spin_lock_irq(&ugeth->lock);
1873 /* Set this UCC to be the master of the MII managment */
1874 ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
1876 if (init_mii_management_configuration(1,
1880 &mii_regs->miimind)) {
1881 ugeth_err("%s: The MII Bus is stuck!", dev->name);
1886 spin_unlock_irq(&ugeth->lock);
1888 /* get info for this PHY */
1889 curphy = get_phy_info(ugeth->mii_info);
1891 if (curphy == NULL) {
1892 ugeth_err("%s: No PHY found", dev->name);
1897 mii_info->phyinfo = curphy;
1899 /* Run the commands which initialize the PHY */
1901 err = curphy->init(ugeth->mii_info);
1916 #ifdef CONFIG_UGETH_TX_ON_DEMOND
1917 static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth)
1919 ucc_fast_transmit_on_demand(ugeth->uccf);
1925 static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth)
1927 ucc_fast_private_t *uccf;
1933 /* Mask GRACEFUL STOP TX interrupt bit and clear it */
1934 temp = in_be32(uccf->p_uccm);
1936 out_be32(uccf->p_uccm, temp);
1937 out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
1939 /* Issue host command */
1941 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1942 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1943 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1945 /* Wait for command to complete */
1947 temp = in_be32(uccf->p_ucce);
1948 } while (!(temp & UCCE_GRA));
1950 uccf->stopped_tx = 1;
1955 static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth)
1957 ucc_fast_private_t *uccf;
1963 /* Clear acknowledge bit */
1964 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1965 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1966 ugeth->p_rx_glbl_pram->rxgstpack = temp;
1968 /* Keep issuing command and checking acknowledge bit until
1969 it is asserted, according to spec */
1971 /* Issue host command */
1973 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1975 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1976 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1978 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1979 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1981 uccf->stopped_rx = 1;
1986 static int ugeth_restart_tx(ucc_geth_private_t *ugeth)
1988 ucc_fast_private_t *uccf;
1994 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1995 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
1997 uccf->stopped_tx = 0;
2002 static int ugeth_restart_rx(ucc_geth_private_t *ugeth)
2004 ucc_fast_private_t *uccf;
2010 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
2011 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
2013 uccf->stopped_rx = 0;
2018 static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode)
2020 ucc_fast_private_t *uccf;
2021 int enabled_tx, enabled_rx;
2025 /* check if the UCC number is in range. */
2026 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2027 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2031 enabled_tx = uccf->enabled_tx;
2032 enabled_rx = uccf->enabled_rx;
2034 /* Get Tx and Rx going again, in case this channel was actively
2036 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
2037 ugeth_restart_tx(ugeth);
2038 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
2039 ugeth_restart_rx(ugeth);
2041 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
2047 static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode)
2049 ucc_fast_private_t *uccf;
2053 /* check if the UCC number is in range. */
2054 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2055 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2059 /* Stop any transmissions */
2060 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
2061 ugeth_graceful_stop_tx(ugeth);
2063 /* Stop any receptions */
2064 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
2065 ugeth_graceful_stop_rx(ugeth);
2067 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
2072 static void ugeth_dump_regs(ucc_geth_private_t *ugeth)
2075 ucc_fast_dump_regs(ugeth->uccf);
2081 #ifdef CONFIG_UGETH_FILTERING
2082 static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t *
2084 qe_fltr_tad_t *qe_fltr_tad)
2088 /* Zero serialized TAD */
2089 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
2091 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
2092 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
2093 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2094 || (p_UccGethTadParams->vnontag_op !=
2095 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
2097 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
2098 if (p_UccGethTadParams->reject_frame)
2099 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
2101 (u16) (((u16) p_UccGethTadParams->
2102 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
2103 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
2105 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
2106 if (p_UccGethTadParams->vnontag_op ==
2107 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
2108 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
2109 qe_fltr_tad->serialized[1] |=
2110 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
2112 qe_fltr_tad->serialized[2] |=
2113 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
2115 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
2117 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
2122 static enet_addr_container_t
2123 *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth,
2124 enet_addr_t *p_enet_addr)
2126 enet_addr_container_t *enet_addr_cont;
2127 struct list_head *p_lh;
2132 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2133 p_lh = &ugeth->group_hash_q;
2134 p_counter = &(ugeth->numGroupAddrInHash);
2136 p_lh = &ugeth->ind_hash_q;
2137 p_counter = &(ugeth->numIndAddrInHash);
2145 for (i = 0; i < num; i++) {
2147 (enet_addr_container_t *)
2148 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2149 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
2150 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
2153 return enet_addr_cont; /* Found */
2155 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2160 static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth,
2161 enet_addr_t *p_enet_addr)
2163 ucc_geth_enet_address_recognition_location_e location;
2164 enet_addr_container_t *enet_addr_cont;
2165 struct list_head *p_lh;
2170 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2171 p_lh = &ugeth->group_hash_q;
2172 limit = ugeth->ug_info->maxGroupAddrInHash;
2174 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
2175 p_counter = &(ugeth->numGroupAddrInHash);
2177 p_lh = &ugeth->ind_hash_q;
2178 limit = ugeth->ug_info->maxIndAddrInHash;
2180 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
2181 p_counter = &(ugeth->numIndAddrInHash);
2184 if ((enet_addr_cont =
2185 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
2186 list_add(p_lh, &enet_addr_cont->node); /* Put it back */
2189 if ((!p_lh) || (!(*p_counter < limit)))
2191 if (!(enet_addr_cont = get_enet_addr_container()))
2193 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2194 (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
2195 enet_addr_cont->location = location;
2196 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2199 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2204 static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
2205 enet_addr_t *p_enet_addr)
2207 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2208 enet_addr_container_t *enet_addr_cont;
2209 ucc_fast_private_t *uccf;
2210 comm_dir_e comm_dir;
2212 struct list_head *p_lh;
2213 u32 *addr_h, *addr_l;
2219 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2224 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
2227 /* It's been found and removed from the CQ. */
2228 /* Now destroy its container */
2229 put_enet_addr_container(enet_addr_cont);
2231 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2232 addr_h = &(p_82xx_addr_filt->gaddr_h);
2233 addr_l = &(p_82xx_addr_filt->gaddr_l);
2234 p_lh = &ugeth->group_hash_q;
2235 p_counter = &(ugeth->numGroupAddrInHash);
2237 addr_h = &(p_82xx_addr_filt->iaddr_h);
2238 addr_l = &(p_82xx_addr_filt->iaddr_l);
2239 p_lh = &ugeth->ind_hash_q;
2240 p_counter = &(ugeth->numIndAddrInHash);
2244 if (uccf->enabled_tx)
2245 comm_dir |= COMM_DIR_TX;
2246 if (uccf->enabled_rx)
2247 comm_dir |= COMM_DIR_RX;
2249 ugeth_disable(ugeth, comm_dir);
2251 /* Clear the hash table. */
2252 out_be32(addr_h, 0x00000000);
2253 out_be32(addr_l, 0x00000000);
2255 /* Add all remaining CQ elements back into hash */
2256 num = --(*p_counter);
2257 for (i = 0; i < num; i++) {
2259 (enet_addr_container_t *)
2260 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2261 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2262 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2266 ugeth_enable(ugeth, comm_dir);
2270 #endif /* CONFIG_UGETH_FILTERING */
2272 static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t *
2277 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2278 ucc_fast_private_t *uccf;
2279 comm_dir_e comm_dir;
2280 struct list_head *p_lh;
2282 u32 *addr_h, *addr_l;
2288 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2291 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
2292 addr_h = &(p_82xx_addr_filt->gaddr_h);
2293 addr_l = &(p_82xx_addr_filt->gaddr_l);
2294 p_lh = &ugeth->group_hash_q;
2295 p_counter = &(ugeth->numGroupAddrInHash);
2296 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
2297 addr_h = &(p_82xx_addr_filt->iaddr_h);
2298 addr_l = &(p_82xx_addr_filt->iaddr_l);
2299 p_lh = &ugeth->ind_hash_q;
2300 p_counter = &(ugeth->numIndAddrInHash);
2305 if (uccf->enabled_tx)
2306 comm_dir |= COMM_DIR_TX;
2307 if (uccf->enabled_rx)
2308 comm_dir |= COMM_DIR_RX;
2310 ugeth_disable(ugeth, comm_dir);
2312 /* Clear the hash table. */
2313 out_be32(addr_h, 0x00000000);
2314 out_be32(addr_l, 0x00000000);
2321 /* Delete all remaining CQ elements */
2322 for (i = 0; i < num; i++)
2323 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
2328 ugeth_enable(ugeth, comm_dir);
2333 #ifdef CONFIG_UGETH_FILTERING
2334 static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth,
2335 enet_addr_t *p_enet_addr,
2340 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
2342 ("%s: multicast address added to paddr will have no "
2343 "effect - is this what you wanted?",
2346 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2347 /* store address in our database */
2348 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2349 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
2350 /* put in hardware */
2351 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
2353 #endif /* CONFIG_UGETH_FILTERING */
2355 static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth,
2358 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
2359 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
2362 static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
2371 ucc_fast_free(ugeth->uccf);
2373 if (ugeth->p_thread_data_tx) {
2374 qe_muram_free(ugeth->thread_dat_tx_offset);
2375 ugeth->p_thread_data_tx = NULL;
2377 if (ugeth->p_thread_data_rx) {
2378 qe_muram_free(ugeth->thread_dat_rx_offset);
2379 ugeth->p_thread_data_rx = NULL;
2381 if (ugeth->p_exf_glbl_param) {
2382 qe_muram_free(ugeth->exf_glbl_param_offset);
2383 ugeth->p_exf_glbl_param = NULL;
2385 if (ugeth->p_rx_glbl_pram) {
2386 qe_muram_free(ugeth->rx_glbl_pram_offset);
2387 ugeth->p_rx_glbl_pram = NULL;
2389 if (ugeth->p_tx_glbl_pram) {
2390 qe_muram_free(ugeth->tx_glbl_pram_offset);
2391 ugeth->p_tx_glbl_pram = NULL;
2393 if (ugeth->p_send_q_mem_reg) {
2394 qe_muram_free(ugeth->send_q_mem_reg_offset);
2395 ugeth->p_send_q_mem_reg = NULL;
2397 if (ugeth->p_scheduler) {
2398 qe_muram_free(ugeth->scheduler_offset);
2399 ugeth->p_scheduler = NULL;
2401 if (ugeth->p_tx_fw_statistics_pram) {
2402 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
2403 ugeth->p_tx_fw_statistics_pram = NULL;
2405 if (ugeth->p_rx_fw_statistics_pram) {
2406 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
2407 ugeth->p_rx_fw_statistics_pram = NULL;
2409 if (ugeth->p_rx_irq_coalescing_tbl) {
2410 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
2411 ugeth->p_rx_irq_coalescing_tbl = NULL;
2413 if (ugeth->p_rx_bd_qs_tbl) {
2414 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
2415 ugeth->p_rx_bd_qs_tbl = NULL;
2417 if (ugeth->p_init_enet_param_shadow) {
2418 return_init_enet_entries(ugeth,
2419 &(ugeth->p_init_enet_param_shadow->
2421 ENET_INIT_PARAM_MAX_ENTRIES_RX,
2422 ugeth->ug_info->riscRx, 1);
2423 return_init_enet_entries(ugeth,
2424 &(ugeth->p_init_enet_param_shadow->
2426 ENET_INIT_PARAM_MAX_ENTRIES_TX,
2427 ugeth->ug_info->riscTx, 0);
2428 kfree(ugeth->p_init_enet_param_shadow);
2429 ugeth->p_init_enet_param_shadow = NULL;
2431 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
2432 bd = ugeth->p_tx_bd_ring[i];
2433 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2434 if (ugeth->tx_skbuff[i][j]) {
2435 dma_unmap_single(NULL,
2437 (BD_STATUS_AND_LENGTH(bd) &
2440 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
2441 ugeth->tx_skbuff[i][j] = NULL;
2445 kfree(ugeth->tx_skbuff[i]);
2447 if (ugeth->p_tx_bd_ring[i]) {
2448 if (ugeth->ug_info->uf_info.bd_mem_part ==
2450 kfree((void *)ugeth->tx_bd_ring_offset[i]);
2451 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2453 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
2454 ugeth->p_tx_bd_ring[i] = NULL;
2457 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
2458 if (ugeth->p_rx_bd_ring[i]) {
2459 /* Return existing data buffers in ring */
2460 bd = ugeth->p_rx_bd_ring[i];
2461 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2462 if (ugeth->rx_skbuff[i][j]) {
2463 dma_unmap_single(NULL, BD_BUFFER(bd),
2467 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
2470 dev_kfree_skb_any(ugeth->
2472 ugeth->rx_skbuff[i][j] = NULL;
2474 bd += UCC_GETH_SIZE_OF_BD;
2477 kfree(ugeth->rx_skbuff[i]);
2479 if (ugeth->ug_info->uf_info.bd_mem_part ==
2481 kfree((void *)ugeth->rx_bd_ring_offset[i]);
2482 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2484 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
2485 ugeth->p_rx_bd_ring[i] = NULL;
2488 while (!list_empty(&ugeth->group_hash_q))
2489 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2490 (dequeue(&ugeth->group_hash_q)));
2491 while (!list_empty(&ugeth->ind_hash_q))
2492 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2493 (dequeue(&ugeth->ind_hash_q)));
2497 static void ucc_geth_set_multi(struct net_device *dev)
2499 ucc_geth_private_t *ugeth;
2500 struct dev_mc_list *dmi;
2501 ucc_fast_t *uf_regs;
2502 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2503 enet_addr_t tempaddr;
2507 ugeth = netdev_priv(dev);
2509 uf_regs = ugeth->uccf->uf_regs;
2511 if (dev->flags & IFF_PROMISC) {
2513 uf_regs->upsmr |= UPSMR_PRO;
2517 uf_regs->upsmr &= ~UPSMR_PRO;
2520 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
2521 p_rx_glbl_pram->addressfiltering;
2523 if (dev->flags & IFF_ALLMULTI) {
2524 /* Catch all multicast addresses, so set the
2525 * filter to all 1's.
2527 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
2528 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
2530 /* Clear filter and add the addresses in the list.
2532 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2533 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2537 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
2539 /* Only support group multicast for now.
2541 if (!(dmi->dmi_addr[0] & 1))
2544 /* The address in dmi_addr is LSB first,
2545 * and taddr is MSB first. We have to
2546 * copy bytes MSB first from dmi_addr.
2548 mcptr = (u8 *) dmi->dmi_addr + 5;
2549 tdptr = (u8 *) & tempaddr;
2550 for (j = 0; j < 6; j++)
2551 *tdptr++ = *mcptr--;
2553 /* Ask CPM to run CRC and set bit in
2556 hw_add_addr_in_hash(ugeth, &tempaddr);
2563 static void ucc_geth_stop(ucc_geth_private_t *ugeth)
2565 ucc_geth_t *ug_regs = ugeth->ug_regs;
2568 ugeth_vdbg("%s: IN", __FUNCTION__);
2570 /* Disable the controller */
2571 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2573 /* Tell the kernel the link is down */
2574 ugeth->mii_info->link = 0;
2575 adjust_link(ugeth->dev);
2577 /* Mask all interrupts */
2578 out_be32(ugeth->uccf->p_ucce, 0x00000000);
2580 /* Clear all interrupts */
2581 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2583 /* Disable Rx and Tx */
2584 tempval = in_be32(&ug_regs->maccfg1);
2585 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2586 out_be32(&ug_regs->maccfg1, tempval);
2588 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2589 /* Clear any pending interrupts */
2590 mii_clear_phy_interrupt(ugeth->mii_info);
2592 /* Disable PHY Interrupts */
2593 mii_configure_phy_interrupt(ugeth->mii_info,
2594 MII_INTERRUPT_DISABLED);
2597 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
2599 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2600 free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev);
2602 del_timer_sync(&ugeth->phy_info_timer);
2605 ucc_geth_memclean(ugeth);
2608 static int ucc_geth_startup(ucc_geth_private_t *ugeth)
2610 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2611 ucc_geth_init_pram_t *p_init_enet_pram;
2612 ucc_fast_private_t *uccf;
2613 ucc_geth_info_t *ug_info;
2614 ucc_fast_info_t *uf_info;
2615 ucc_fast_t *uf_regs;
2616 ucc_geth_t *ug_regs;
2617 int ret_val = -EINVAL;
2618 u32 remoder = UCC_GETH_REMODER_INIT;
2619 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
2620 u32 ifstat, i, j, size, l2qt, l3qt, length;
2621 u16 temoder = UCC_GETH_TEMODER_INIT;
2623 u8 function_code = 0;
2625 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2627 ugeth_vdbg("%s: IN", __FUNCTION__);
2629 ug_info = ugeth->ug_info;
2630 uf_info = &ug_info->uf_info;
2632 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2633 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2634 ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
2639 for (i = 0; i < ug_info->numQueuesRx; i++) {
2640 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2641 (ug_info->bdRingLenRx[i] %
2642 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2644 ("%s: Rx BD ring length must be multiple of 4,"
2645 " no smaller than 8.", __FUNCTION__);
2651 for (i = 0; i < ug_info->numQueuesTx; i++) {
2652 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2654 ("%s: Tx BD ring length must be no smaller than 2.",
2661 if ((uf_info->max_rx_buf_length == 0) ||
2662 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2664 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2670 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2671 ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
2676 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2677 ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
2682 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2683 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2685 ("%s: VLAN priority table entry must not be"
2686 " larger than number of Rx queues.",
2693 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2694 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2696 ("%s: IP priority table entry must not be"
2697 " larger than number of Rx queues.",
2703 if (ug_info->cam && !ug_info->ecamptr) {
2704 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2709 if ((ug_info->numStationAddresses !=
2710 UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2711 && ug_info->rxExtendedFiltering) {
2712 ugeth_err("%s: Number of station addresses greater than 1 "
2713 "not allowed in extended parsing mode.",
2718 /* Generate uccm_mask for receive */
2719 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2720 for (i = 0; i < ug_info->numQueuesRx; i++)
2721 uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
2723 for (i = 0; i < ug_info->numQueuesTx; i++)
2724 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
2725 /* Initialize the general fast UCC block. */
2726 if (ucc_fast_init(uf_info, &uccf)) {
2727 ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
2728 ucc_geth_memclean(ugeth);
2733 switch (ug_info->numThreadsRx) {
2734 case UCC_GETH_NUM_OF_THREADS_1:
2735 numThreadsRxNumerical = 1;
2737 case UCC_GETH_NUM_OF_THREADS_2:
2738 numThreadsRxNumerical = 2;
2740 case UCC_GETH_NUM_OF_THREADS_4:
2741 numThreadsRxNumerical = 4;
2743 case UCC_GETH_NUM_OF_THREADS_6:
2744 numThreadsRxNumerical = 6;
2746 case UCC_GETH_NUM_OF_THREADS_8:
2747 numThreadsRxNumerical = 8;
2750 ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
2751 ucc_geth_memclean(ugeth);
2756 switch (ug_info->numThreadsTx) {
2757 case UCC_GETH_NUM_OF_THREADS_1:
2758 numThreadsTxNumerical = 1;
2760 case UCC_GETH_NUM_OF_THREADS_2:
2761 numThreadsTxNumerical = 2;
2763 case UCC_GETH_NUM_OF_THREADS_4:
2764 numThreadsTxNumerical = 4;
2766 case UCC_GETH_NUM_OF_THREADS_6:
2767 numThreadsTxNumerical = 6;
2769 case UCC_GETH_NUM_OF_THREADS_8:
2770 numThreadsTxNumerical = 8;
2773 ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
2774 ucc_geth_memclean(ugeth);
2779 /* Calculate rx_extended_features */
2780 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2781 ug_info->ipAddressAlignment ||
2782 (ug_info->numStationAddresses !=
2783 UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2785 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2786 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2787 || (ug_info->vlanOperationNonTagged !=
2788 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2790 uf_regs = uccf->uf_regs;
2791 ug_regs = (ucc_geth_t *) (uccf->uf_regs);
2792 ugeth->ug_regs = ug_regs;
2794 init_default_reg_vals(&uf_regs->upsmr,
2795 &ug_regs->maccfg1, &ug_regs->maccfg2);
2798 /* For more details see the hardware spec. */
2799 init_rx_parameters(ug_info->bro,
2800 ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2802 /* We're going to ignore other registers for now, */
2803 /* except as needed to get up and running */
2806 /* For more details see the hardware spec. */
2807 init_flow_control_params(ug_info->aufc,
2808 ug_info->receiveFlowControl,
2810 ug_info->pausePeriod,
2811 ug_info->extensionField,
2813 &ug_regs->uempr, &ug_regs->maccfg1);
2815 maccfg1 = in_be32(&ug_regs->maccfg1);
2816 maccfg1 |= MACCFG1_ENABLE_RX;
2817 maccfg1 |= MACCFG1_ENABLE_TX;
2818 out_be32(&ug_regs->maccfg1, maccfg1);
2821 /* For more details see the hardware spec. */
2822 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2823 ug_info->nonBackToBackIfgPart2,
2825 miminumInterFrameGapEnforcement,
2826 ug_info->backToBackInterFrameGap,
2829 ugeth_err("%s: IPGIFG initialization parameter too large.",
2831 ucc_geth_memclean(ugeth);
2836 /* For more details see the hardware spec. */
2837 ret_val = init_half_duplex_params(ug_info->altBeb,
2838 ug_info->backPressureNoBackoff,
2840 ug_info->excessDefer,
2841 ug_info->altBebTruncation,
2842 ug_info->maxRetransmission,
2843 ug_info->collisionWindow,
2846 ugeth_err("%s: Half Duplex initialization parameter too large.",
2848 ucc_geth_memclean(ugeth);
2853 /* For more details see the hardware spec. */
2854 /* Read only - resets upon read */
2855 ifstat = in_be32(&ug_regs->ifstat);
2858 /* For more details see the hardware spec. */
2859 out_be32(&ug_regs->uempr, 0);
2862 /* For more details see the hardware spec. */
2863 init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2864 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2865 0, &uf_regs->upsmr, &ug_regs->uescr);
2867 /* Allocate Tx bds */
2868 for (j = 0; j < ug_info->numQueuesTx; j++) {
2869 /* Allocate in multiple of
2870 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2871 according to spec */
2872 length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD)
2873 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2874 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2875 if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) %
2876 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2877 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2878 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2880 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2881 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2882 ugeth->tx_bd_ring_offset[j] =
2883 (u32) (kmalloc((u32) (length + align),
2885 if (ugeth->tx_bd_ring_offset[j] != 0)
2886 ugeth->p_tx_bd_ring[j] =
2887 (void*)((ugeth->tx_bd_ring_offset[j] +
2888 align) & ~(align - 1));
2889 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2890 ugeth->tx_bd_ring_offset[j] =
2891 qe_muram_alloc(length,
2892 UCC_GETH_TX_BD_RING_ALIGNMENT);
2893 if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j]))
2894 ugeth->p_tx_bd_ring[j] =
2895 (u8 *) qe_muram_addr(ugeth->
2896 tx_bd_ring_offset[j]);
2898 if (!ugeth->p_tx_bd_ring[j]) {
2900 ("%s: Can not allocate memory for Tx bd rings.",
2902 ucc_geth_memclean(ugeth);
2905 /* Zero unused end of bd ring, according to spec */
2906 memset(ugeth->p_tx_bd_ring[j] +
2907 ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0,
2908 length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD);
2911 /* Allocate Rx bds */
2912 for (j = 0; j < ug_info->numQueuesRx; j++) {
2913 length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD;
2914 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2916 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2917 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2918 ugeth->rx_bd_ring_offset[j] =
2919 (u32) (kmalloc((u32) (length + align), GFP_KERNEL));
2920 if (ugeth->rx_bd_ring_offset[j] != 0)
2921 ugeth->p_rx_bd_ring[j] =
2922 (void*)((ugeth->rx_bd_ring_offset[j] +
2923 align) & ~(align - 1));
2924 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2925 ugeth->rx_bd_ring_offset[j] =
2926 qe_muram_alloc(length,
2927 UCC_GETH_RX_BD_RING_ALIGNMENT);
2928 if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j]))
2929 ugeth->p_rx_bd_ring[j] =
2930 (u8 *) qe_muram_addr(ugeth->
2931 rx_bd_ring_offset[j]);
2933 if (!ugeth->p_rx_bd_ring[j]) {
2935 ("%s: Can not allocate memory for Rx bd rings.",
2937 ucc_geth_memclean(ugeth);
2943 for (j = 0; j < ug_info->numQueuesTx; j++) {
2944 /* Setup the skbuff rings */
2945 ugeth->tx_skbuff[j] =
2946 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2947 ugeth->ug_info->bdRingLenTx[j],
2950 if (ugeth->tx_skbuff[j] == NULL) {
2951 ugeth_err("%s: Could not allocate tx_skbuff",
2953 ucc_geth_memclean(ugeth);
2957 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2958 ugeth->tx_skbuff[j][i] = NULL;
2960 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2961 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2962 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2963 BD_BUFFER_CLEAR(bd);
2964 BD_STATUS_AND_LENGTH_SET(bd, 0);
2965 bd += UCC_GETH_SIZE_OF_BD;
2967 bd -= UCC_GETH_SIZE_OF_BD;
2968 BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */
2972 for (j = 0; j < ug_info->numQueuesRx; j++) {
2973 /* Setup the skbuff rings */
2974 ugeth->rx_skbuff[j] =
2975 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2976 ugeth->ug_info->bdRingLenRx[j],
2979 if (ugeth->rx_skbuff[j] == NULL) {
2980 ugeth_err("%s: Could not allocate rx_skbuff",
2982 ucc_geth_memclean(ugeth);
2986 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2987 ugeth->rx_skbuff[j][i] = NULL;
2989 ugeth->skb_currx[j] = 0;
2990 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2991 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2992 BD_STATUS_AND_LENGTH_SET(bd, R_I);
2993 BD_BUFFER_CLEAR(bd);
2994 bd += UCC_GETH_SIZE_OF_BD;
2996 bd -= UCC_GETH_SIZE_OF_BD;
2997 BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */
3003 /* Tx global PRAM */
3004 /* Allocate global tx parameter RAM page */
3005 ugeth->tx_glbl_pram_offset =
3006 qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t),
3007 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
3008 if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) {
3010 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
3012 ucc_geth_memclean(ugeth);
3015 ugeth->p_tx_glbl_pram =
3016 (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth->
3017 tx_glbl_pram_offset);
3018 /* Zero out p_tx_glbl_pram */
3019 memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t));
3021 /* Fill global PRAM */
3024 /* Size varies with number of Tx threads */
3025 ugeth->thread_dat_tx_offset =
3026 qe_muram_alloc(numThreadsTxNumerical *
3027 sizeof(ucc_geth_thread_data_tx_t) +
3028 32 * (numThreadsTxNumerical == 1),
3029 UCC_GETH_THREAD_DATA_ALIGNMENT);
3030 if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) {
3032 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
3034 ucc_geth_memclean(ugeth);
3038 ugeth->p_thread_data_tx =
3039 (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth->
3040 thread_dat_tx_offset);
3041 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
3044 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
3045 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
3046 ug_info->vtagtable[i]);
3049 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
3050 ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
3053 /* Size varies with number of Tx queues */
3054 ugeth->send_q_mem_reg_offset =
3055 qe_muram_alloc(ug_info->numQueuesTx *
3056 sizeof(ucc_geth_send_queue_qd_t),
3057 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
3058 if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) {
3060 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
3062 ucc_geth_memclean(ugeth);
3066 ugeth->p_send_q_mem_reg =
3067 (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth->
3068 send_q_mem_reg_offset);
3069 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
3071 /* Setup the table */
3072 /* Assume BD rings are already established */
3073 for (i = 0; i < ug_info->numQueuesTx; i++) {
3075 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
3076 1) * UCC_GETH_SIZE_OF_BD;
3077 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3078 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3079 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
3080 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3081 last_bd_completed_address,
3082 (u32) virt_to_phys(endOfRing));
3083 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3085 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3086 (u32) immrbar_virt_to_phys(ugeth->
3088 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3089 last_bd_completed_address,
3090 (u32) immrbar_virt_to_phys(endOfRing));
3094 /* schedulerbasepointer */
3096 if (ug_info->numQueuesTx > 1) {
3097 /* scheduler exists only if more than 1 tx queue */
3098 ugeth->scheduler_offset =
3099 qe_muram_alloc(sizeof(ucc_geth_scheduler_t),
3100 UCC_GETH_SCHEDULER_ALIGNMENT);
3101 if (IS_MURAM_ERR(ugeth->scheduler_offset)) {
3103 ("%s: Can not allocate DPRAM memory for p_scheduler.",
3105 ucc_geth_memclean(ugeth);
3109 ugeth->p_scheduler =
3110 (ucc_geth_scheduler_t *) qe_muram_addr(ugeth->
3112 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
3113 ugeth->scheduler_offset);
3114 /* Zero out p_scheduler */
3115 memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t));
3117 /* Set values in scheduler */
3118 out_be32(&ugeth->p_scheduler->mblinterval,
3119 ug_info->mblinterval);
3120 out_be16(&ugeth->p_scheduler->nortsrbytetime,
3121 ug_info->nortsrbytetime);
3122 ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
3123 ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
3124 ugeth->p_scheduler->txasap = ug_info->txasap;
3125 ugeth->p_scheduler->extrabw = ug_info->extrabw;
3126 for (i = 0; i < NUM_TX_QUEUES; i++)
3127 ugeth->p_scheduler->weightfactor[i] =
3128 ug_info->weightfactor[i];
3130 /* Set pointers to cpucount registers in scheduler */
3131 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
3132 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
3133 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
3134 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
3135 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
3136 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
3137 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
3138 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
3141 /* schedulerbasepointer */
3142 /* TxRMON_PTR (statistics) */
3144 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
3145 ugeth->tx_fw_statistics_pram_offset =
3146 qe_muram_alloc(sizeof
3147 (ucc_geth_tx_firmware_statistics_pram_t),
3148 UCC_GETH_TX_STATISTICS_ALIGNMENT);
3149 if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) {
3151 ("%s: Can not allocate DPRAM memory for"
3152 " p_tx_fw_statistics_pram.", __FUNCTION__);
3153 ucc_geth_memclean(ugeth);
3156 ugeth->p_tx_fw_statistics_pram =
3157 (ucc_geth_tx_firmware_statistics_pram_t *)
3158 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
3159 /* Zero out p_tx_fw_statistics_pram */
3160 memset(ugeth->p_tx_fw_statistics_pram,
3161 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t));
3165 /* Already has speed set */
3167 if (ug_info->numQueuesTx > 1)
3168 temoder |= TEMODER_SCHEDULER_ENABLE;
3169 if (ug_info->ipCheckSumGenerate)
3170 temoder |= TEMODER_IP_CHECKSUM_GENERATE;
3171 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
3172 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
3174 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
3176 /* Function code register value to be used later */
3177 function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL;
3178 /* Required for QE */
3180 /* function code register */
3181 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
3183 /* Rx global PRAM */
3184 /* Allocate global rx parameter RAM page */
3185 ugeth->rx_glbl_pram_offset =
3186 qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t),
3187 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
3188 if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) {
3190 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
3192 ucc_geth_memclean(ugeth);
3195 ugeth->p_rx_glbl_pram =
3196 (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth->
3197 rx_glbl_pram_offset);
3198 /* Zero out p_rx_glbl_pram */
3199 memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t));
3201 /* Fill global PRAM */
3204 /* Size varies with number of Rx threads */
3205 ugeth->thread_dat_rx_offset =
3206 qe_muram_alloc(numThreadsRxNumerical *
3207 sizeof(ucc_geth_thread_data_rx_t),
3208 UCC_GETH_THREAD_DATA_ALIGNMENT);
3209 if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) {
3211 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
3213 ucc_geth_memclean(ugeth);
3217 ugeth->p_thread_data_rx =
3218 (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth->
3219 thread_dat_rx_offset);
3220 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
3223 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
3225 /* rxrmonbaseptr (statistics) */
3227 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
3228 ugeth->rx_fw_statistics_pram_offset =
3229 qe_muram_alloc(sizeof
3230 (ucc_geth_rx_firmware_statistics_pram_t),
3231 UCC_GETH_RX_STATISTICS_ALIGNMENT);
3232 if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) {
3234 ("%s: Can not allocate DPRAM memory for"
3235 " p_rx_fw_statistics_pram.", __FUNCTION__);
3236 ucc_geth_memclean(ugeth);
3239 ugeth->p_rx_fw_statistics_pram =
3240 (ucc_geth_rx_firmware_statistics_pram_t *)
3241 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
3242 /* Zero out p_rx_fw_statistics_pram */
3243 memset(ugeth->p_rx_fw_statistics_pram, 0,
3244 sizeof(ucc_geth_rx_firmware_statistics_pram_t));
3247 /* intCoalescingPtr */
3249 /* Size varies with number of Rx queues */
3250 ugeth->rx_irq_coalescing_tbl_offset =
3251 qe_muram_alloc(ug_info->numQueuesRx *
3252 sizeof(ucc_geth_rx_interrupt_coalescing_entry_t),
3253 UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
3254 if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) {
3256 ("%s: Can not allocate DPRAM memory for"
3257 " p_rx_irq_coalescing_tbl.", __FUNCTION__);
3258 ucc_geth_memclean(ugeth);
3262 ugeth->p_rx_irq_coalescing_tbl =
3263 (ucc_geth_rx_interrupt_coalescing_table_t *)
3264 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
3265 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
3266 ugeth->rx_irq_coalescing_tbl_offset);
3268 /* Fill interrupt coalescing table */
3269 for (i = 0; i < ug_info->numQueuesRx; i++) {
3270 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3271 interruptcoalescingmaxvalue,
3272 ug_info->interruptcoalescingmaxvalue[i]);
3273 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3274 interruptcoalescingcounter,
3275 ug_info->interruptcoalescingmaxvalue[i]);
3279 init_max_rx_buff_len(uf_info->max_rx_buf_length,
3280 &ugeth->p_rx_glbl_pram->mrblr);
3282 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
3284 init_min_frame_len(ug_info->minFrameLength,
3285 &ugeth->p_rx_glbl_pram->minflr,
3286 &ugeth->p_rx_glbl_pram->mrblr);
3288 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
3290 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
3294 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
3295 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
3296 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
3299 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
3301 for (i = 0; i < 8; i++)
3302 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
3303 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt);
3307 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
3310 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
3313 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
3316 /* Size varies with number of Rx queues */
3317 ugeth->rx_bd_qs_tbl_offset =
3318 qe_muram_alloc(ug_info->numQueuesRx *
3319 (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3320 sizeof(ucc_geth_rx_prefetched_bds_t)),
3321 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
3322 if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) {
3324 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3326 ucc_geth_memclean(ugeth);
3330 ugeth->p_rx_bd_qs_tbl =
3331 (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth->
3332 rx_bd_qs_tbl_offset);
3333 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3334 /* Zero out p_rx_bd_qs_tbl */
3335 memset(ugeth->p_rx_bd_qs_tbl,
3337 ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3338 sizeof(ucc_geth_rx_prefetched_bds_t)));
3340 /* Setup the table */
3341 /* Assume BD rings are already established */
3342 for (i = 0; i < ug_info->numQueuesRx; i++) {
3343 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3344 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3345 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
3346 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3348 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3349 (u32) immrbar_virt_to_phys(ugeth->
3352 /* rest of fields handled by QE */
3356 /* Already has speed set */
3358 if (ugeth->rx_extended_features)
3359 remoder |= REMODER_RX_EXTENDED_FEATURES;
3360 if (ug_info->rxExtendedFiltering)
3361 remoder |= REMODER_RX_EXTENDED_FILTERING;
3362 if (ug_info->dynamicMaxFrameLength)
3363 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
3364 if (ug_info->dynamicMinFrameLength)
3365 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
3367 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
3370 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
3371 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
3372 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
3373 if (ug_info->ipCheckSumCheck)
3374 remoder |= REMODER_IP_CHECKSUM_CHECK;
3375 if (ug_info->ipAddressAlignment)
3376 remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
3377 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
3379 /* Note that this function must be called */
3380 /* ONLY AFTER p_tx_fw_statistics_pram */
3381 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
3382 init_firmware_statistics_gathering_mode((ug_info->
3384 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
3385 (ug_info->statisticsMode &
3386 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
3387 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
3388 ugeth->tx_fw_statistics_pram_offset,
3389 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
3390 ugeth->rx_fw_statistics_pram_offset,
3391 &ugeth->p_tx_glbl_pram->temoder,
3392 &ugeth->p_rx_glbl_pram->remoder);
3394 /* function code register */
3395 ugeth->p_rx_glbl_pram->rstate = function_code;
3397 /* initialize extended filtering */
3398 if (ug_info->rxExtendedFiltering) {
3399 if (!ug_info->extendedFilteringChainPointer) {
3400 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3402 ucc_geth_memclean(ugeth);
3406 /* Allocate memory for extended filtering Mode Global
3408 ugeth->exf_glbl_param_offset =
3409 qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t),
3410 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
3411 if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) {
3413 ("%s: Can not allocate DPRAM memory for"
3414 " p_exf_glbl_param.", __FUNCTION__);
3415 ucc_geth_memclean(ugeth);
3419 ugeth->p_exf_glbl_param =
3420 (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth->
3421 exf_glbl_param_offset);
3422 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3423 ugeth->exf_glbl_param_offset);
3424 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
3425 (u32) ug_info->extendedFilteringChainPointer);
3427 } else { /* initialize 82xx style address filtering */
3429 /* Init individual address recognition registers to disabled */
3431 for (j = 0; j < NUM_OF_PADDRS; j++)
3432 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3434 /* Create CQs for hash tables */
3435 if (ug_info->maxGroupAddrInHash > 0) {
3436 INIT_LIST_HEAD(&ugeth->group_hash_q);
3438 if (ug_info->maxIndAddrInHash > 0) {
3439 INIT_LIST_HEAD(&ugeth->ind_hash_q);
3442 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
3443 p_rx_glbl_pram->addressfiltering;
3445 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3446 ENET_ADDR_TYPE_GROUP);
3447 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3448 ENET_ADDR_TYPE_INDIVIDUAL);
3452 * Initialize UCC at QE level
3455 command = QE_INIT_TX_RX;
3457 /* Allocate shadow InitEnet command parameter structure.
3458 * This is needed because after the InitEnet command is executed,
3459 * the structure in DPRAM is released, because DPRAM is a premium
3461 * This shadow structure keeps a copy of what was done so that the
3462 * allocated resources can be released when the channel is freed.
3464 if (!(ugeth->p_init_enet_param_shadow =
3465 (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t),
3468 ("%s: Can not allocate memory for"
3469 " p_UccInitEnetParamShadows.", __FUNCTION__);
3470 ucc_geth_memclean(ugeth);
3473 /* Zero out *p_init_enet_param_shadow */
3474 memset((char *)ugeth->p_init_enet_param_shadow,
3475 0, sizeof(ucc_geth_init_pram_t));
3477 /* Fill shadow InitEnet command parameter structure */
3479 ugeth->p_init_enet_param_shadow->resinit1 =
3480 ENET_INIT_PARAM_MAGIC_RES_INIT1;
3481 ugeth->p_init_enet_param_shadow->resinit2 =
3482 ENET_INIT_PARAM_MAGIC_RES_INIT2;
3483 ugeth->p_init_enet_param_shadow->resinit3 =
3484 ENET_INIT_PARAM_MAGIC_RES_INIT3;
3485 ugeth->p_init_enet_param_shadow->resinit4 =
3486 ENET_INIT_PARAM_MAGIC_RES_INIT4;
3487 ugeth->p_init_enet_param_shadow->resinit5 =
3488 ENET_INIT_PARAM_MAGIC_RES_INIT5;
3489 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3490 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
3491 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3492 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
3494 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3495 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
3496 if ((ug_info->largestexternallookupkeysize !=
3497 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
3498 && (ug_info->largestexternallookupkeysize !=
3499 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3500 && (ug_info->largestexternallookupkeysize !=
3501 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3502 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3504 ucc_geth_memclean(ugeth);
3507 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
3508 ug_info->largestexternallookupkeysize;
3509 size = sizeof(ucc_geth_thread_rx_pram_t);
3510 if (ug_info->rxExtendedFiltering) {
3511 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
3512 if (ug_info->largestexternallookupkeysize ==
3513 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3515 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
3516 if (ug_info->largestexternallookupkeysize ==
3517 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
3519 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3522 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
3523 p_init_enet_param_shadow->rxthread[0]),
3524 (u8) (numThreadsRxNumerical + 1)
3525 /* Rx needs one extra for terminator */
3526 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3527 ug_info->riscRx, 1)) != 0) {
3528 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3530 ucc_geth_memclean(ugeth);
3534 ugeth->p_init_enet_param_shadow->txglobal =
3535 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3537 fill_init_enet_entries(ugeth,
3538 &(ugeth->p_init_enet_param_shadow->
3539 txthread[0]), numThreadsTxNumerical,
3540 sizeof(ucc_geth_thread_tx_pram_t),
3541 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3542 ug_info->riscTx, 0)) != 0) {
3543 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3545 ucc_geth_memclean(ugeth);
3549 /* Load Rx bds with buffers */
3550 for (i = 0; i < ug_info->numQueuesRx; i++) {
3551 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3552 ugeth_err("%s: Can not fill Rx bds with buffers.",
3554 ucc_geth_memclean(ugeth);
3559 /* Allocate InitEnet command parameter structure */
3560 init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4);
3561 if (IS_MURAM_ERR(init_enet_pram_offset)) {
3563 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3565 ucc_geth_memclean(ugeth);
3569 (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset);
3571 /* Copy shadow InitEnet command parameter structure into PRAM */
3572 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
3573 p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
3574 p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
3575 p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
3576 out_be16(&p_init_enet_pram->resinit5,
3577 ugeth->p_init_enet_param_shadow->resinit5);
3578 p_init_enet_pram->largestexternallookupkeysize =
3579 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
3580 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3581 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3582 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3583 out_be32(&p_init_enet_pram->rxthread[i],
3584 ugeth->p_init_enet_param_shadow->rxthread[i]);
3585 out_be32(&p_init_enet_pram->txglobal,
3586 ugeth->p_init_enet_param_shadow->txglobal);
3587 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3588 out_be32(&p_init_enet_pram->txthread[i],
3589 ugeth->p_init_enet_param_shadow->txthread[i]);
3591 /* Issue QE command */
3593 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3594 qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
3595 init_enet_pram_offset);
3597 /* Free InitEnet command parameter */
3598 qe_muram_free(init_enet_pram_offset);
3603 /* returns a net_device_stats structure pointer */
3604 static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
3606 ucc_geth_private_t *ugeth = netdev_priv(dev);
3608 return &(ugeth->stats);
3611 /* ucc_geth_timeout gets called when a packet has not been
3612 * transmitted after a set amount of time.
3613 * For now, assume that clearing out all the structures, and
3614 * starting over will fix the problem. */
3615 static void ucc_geth_timeout(struct net_device *dev)
3617 ucc_geth_private_t *ugeth = netdev_priv(dev);
3619 ugeth_vdbg("%s: IN", __FUNCTION__);
3621 ugeth->stats.tx_errors++;
3623 ugeth_dump_regs(ugeth);
3625 if (dev->flags & IFF_UP) {
3626 ucc_geth_stop(ugeth);
3627 ucc_geth_startup(ugeth);
3630 netif_schedule(dev);
3633 /* This is called by the kernel when a frame is ready for transmission. */
3634 /* It is pointed to by the dev->hard_start_xmit function pointer */
3635 static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3637 ucc_geth_private_t *ugeth = netdev_priv(dev);
3638 u8 *bd; /* BD pointer */
3642 ugeth_vdbg("%s: IN", __FUNCTION__);
3644 spin_lock_irq(&ugeth->lock);
3646 ugeth->stats.tx_bytes += skb->len;
3648 /* Start from the next BD that should be filled */
3649 bd = ugeth->txBd[txQ];
3650 bd_status = BD_STATUS_AND_LENGTH(bd);
3651 /* Save the skb pointer so we can free it later */
3652 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3654 /* Update the current skb pointer (wrapping if this was the last) */
3655 ugeth->skb_curtx[txQ] =
3656 (ugeth->skb_curtx[txQ] +
3657 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3659 /* set up the buffer descriptor */
3661 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3663 //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data);
3665 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3667 BD_STATUS_AND_LENGTH_SET(bd, bd_status);
3669 dev->trans_start = jiffies;
3671 /* Move to next BD in the ring */
3672 if (!(bd_status & T_W))
3673 ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD;
3675 ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3677 /* If the next BD still needs to be cleaned up, then the bds
3678 are full. We need to tell the kernel to stop sending us stuff. */
3679 if (bd == ugeth->confBd[txQ]) {
3680 if (!netif_queue_stopped(dev))
3681 netif_stop_queue(dev);
3684 if (ugeth->p_scheduler) {
3685 ugeth->cpucount[txQ]++;
3686 /* Indicate to QE that there are more Tx bds ready for
3688 /* This is done by writing a running counter of the bd
3689 count to the scheduler PRAM. */
3690 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3693 spin_unlock_irq(&ugeth->lock);
3698 static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
3700 struct sk_buff *skb;
3702 u16 length, howmany = 0;
3706 ugeth_vdbg("%s: IN", __FUNCTION__);
3708 spin_lock(&ugeth->lock);
3709 /* collect received buffers */
3710 bd = ugeth->rxBd[rxQ];
3712 bd_status = BD_STATUS_AND_LENGTH(bd);
3714 /* while there are received buffers and BD is full (~R_E) */
3715 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3716 bdBuffer = (u8 *) BD_BUFFER(bd);
3717 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3718 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3720 /* determine whether buffer is first, last, first and last
3721 (single buffer frame) or middle (not first and not last) */
3723 (!(bd_status & (R_F | R_L))) ||
3724 (bd_status & R_ERRORS_FATAL)) {
3725 ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
3726 __FUNCTION__, __LINE__, (u32) skb);
3728 dev_kfree_skb_any(skb);
3730 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3731 ugeth->stats.rx_dropped++;
3733 ugeth->stats.rx_packets++;
3736 /* Prep the skb for the packet */
3737 skb_put(skb, length);
3739 /* Tell the skb what kind of packet this is */
3740 skb->protocol = eth_type_trans(skb, ugeth->dev);
3742 ugeth->stats.rx_bytes += length;
3743 /* Send the packet up the stack */
3744 #ifdef CONFIG_UGETH_NAPI
3745 netif_receive_skb(skb);
3748 #endif /* CONFIG_UGETH_NAPI */
3751 ugeth->dev->last_rx = jiffies;
3753 skb = get_new_skb(ugeth, bd);
3755 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
3756 spin_unlock(&ugeth->lock);
3757 ugeth->stats.rx_dropped++;
3761 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3763 /* update to point at the next skb */
3764 ugeth->skb_currx[rxQ] =
3765 (ugeth->skb_currx[rxQ] +
3766 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3768 if (bd_status & R_W)
3769 bd = ugeth->p_rx_bd_ring[rxQ];
3771 bd += UCC_GETH_SIZE_OF_BD;
3773 bd_status = BD_STATUS_AND_LENGTH(bd);
3776 ugeth->rxBd[rxQ] = bd;
3777 spin_unlock(&ugeth->lock);
3781 static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3783 /* Start from the next BD that should be filled */
3784 ucc_geth_private_t *ugeth = netdev_priv(dev);
3785 u8 *bd; /* BD pointer */
3788 bd = ugeth->confBd[txQ];
3789 bd_status = BD_STATUS_AND_LENGTH(bd);
3791 /* Normal processing. */
3792 while ((bd_status & T_R) == 0) {
3793 /* BD contains already transmitted buffer. */
3794 /* Handle the transmitted buffer and release */
3795 /* the BD to be used with the current frame */
3797 if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3800 ugeth->stats.tx_packets++;
3802 /* Free the sk buffer associated with this TxBD */
3803 dev_kfree_skb_irq(ugeth->
3804 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3805 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3806 ugeth->skb_dirtytx[txQ] =
3807 (ugeth->skb_dirtytx[txQ] +
3808 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3810 /* We freed a buffer, so now we can restart transmission */
3811 if (netif_queue_stopped(dev))
3812 netif_wake_queue(dev);
3814 /* Advance the confirmation BD pointer */
3815 if (!(bd_status & T_W))
3816 ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD;
3818 ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3823 #ifdef CONFIG_UGETH_NAPI
3824 static int ucc_geth_poll(struct net_device *dev, int *budget)
3826 ucc_geth_private_t *ugeth = netdev_priv(dev);
3828 int rx_work_limit = *budget;
3831 if (rx_work_limit > dev->quota)
3832 rx_work_limit = dev->quota;
3834 howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit);
3836 dev->quota -= howmany;
3837 rx_work_limit -= howmany;
3840 if (rx_work_limit >= 0)
3841 netif_rx_complete(dev);
3843 return (rx_work_limit < 0) ? 1 : 0;
3845 #endif /* CONFIG_UGETH_NAPI */
3847 static irqreturn_t ucc_geth_irq_handler(int irq, void *info,
3848 struct pt_regs *regs)
3850 struct net_device *dev = (struct net_device *)info;
3851 ucc_geth_private_t *ugeth = netdev_priv(dev);
3852 ucc_fast_private_t *uccf;
3853 ucc_geth_info_t *ug_info;
3854 register u32 ucce = 0;
3855 register u32 bit_mask = UCCE_RXBF_SINGLE_MASK;
3856 register u32 tx_mask = UCCE_TXBF_SINGLE_MASK;
3859 ugeth_vdbg("%s: IN", __FUNCTION__);
3865 ug_info = ugeth->ug_info;
3868 ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm));
3870 /* clear event bits for next time */
3871 /* Side effect here is to mask ucce variable
3872 for future processing below. */
3873 out_be32(uccf->p_ucce, ucce); /* Clear with ones,
3874 but only bits in UCCM */
3876 /* We ignore Tx interrupts because Tx confirmation is
3877 done inside Tx routine */
3879 for (i = 0; i < ug_info->numQueuesRx; i++) {
3880 if (ucce & bit_mask)
3881 ucc_geth_rx(ugeth, i,
3882 (int)ugeth->ug_info->
3888 for (i = 0; i < ug_info->numQueuesTx; i++) {
3890 ucc_geth_tx(dev, i);
3896 if (ucce & UCCE_BSY) {
3897 ugeth_vdbg("Got BUSY irq!!!!");
3898 ugeth->stats.rx_errors++;
3901 if (ucce & UCCE_OTHER) {
3902 ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!",
3904 ugeth->stats.rx_errors++;
3913 static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3915 struct net_device *dev = (struct net_device *)dev_id;
3916 ucc_geth_private_t *ugeth = netdev_priv(dev);
3918 ugeth_vdbg("%s: IN", __FUNCTION__);
3920 /* Clear the interrupt */
3921 mii_clear_phy_interrupt(ugeth->mii_info);
3923 /* Disable PHY interrupts */
3924 mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED);
3926 /* Schedule the phy change */
3927 schedule_work(&ugeth->tq);
3932 /* Scheduled by the phy_interrupt/timer to handle PHY changes */
3933 static void ugeth_phy_change(void *data)
3935 struct net_device *dev = (struct net_device *)data;
3936 ucc_geth_private_t *ugeth = netdev_priv(dev);
3937 ucc_geth_t *ug_regs;
3940 ugeth_vdbg("%s: IN", __FUNCTION__);
3942 ug_regs = ugeth->ug_regs;
3944 /* Delay to give the PHY a chance to change the
3948 /* Update the link, speed, duplex */
3949 result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info);
3951 /* Adjust the known status as long as the link
3952 * isn't still coming up */
3953 if ((0 == result) || (ugeth->mii_info->link == 0))
3956 /* Reenable interrupts, if needed */
3957 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR)
3958 mii_configure_phy_interrupt(ugeth->mii_info,
3959 MII_INTERRUPT_ENABLED);
3962 /* Called every so often on systems that don't interrupt
3963 * the core for PHY changes */
3964 static void ugeth_phy_timer(unsigned long data)
3966 struct net_device *dev = (struct net_device *)data;
3967 ucc_geth_private_t *ugeth = netdev_priv(dev);
3969 schedule_work(&ugeth->tq);
3971 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
3974 /* Keep trying aneg for some time
3975 * If, after GFAR_AN_TIMEOUT seconds, it has not
3976 * finished, we switch to forced.
3977 * Either way, once the process has completed, we either
3978 * request the interrupt, or switch the timer over to
3979 * using ugeth_phy_timer to check status */
3980 static void ugeth_phy_startup_timer(unsigned long data)
3982 struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
3983 ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev);
3984 static int secondary = UGETH_AN_TIMEOUT;
3987 /* Configure the Auto-negotiation */
3988 result = mii_info->phyinfo->config_aneg(mii_info);
3990 /* If autonegotiation failed to start, and
3991 * we haven't timed out, reset the timer, and return */
3992 if (result && secondary--) {
3993 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
3995 } else if (result) {
3996 /* Couldn't start autonegotiation.
3997 * Try switching to forced */
3998 mii_info->autoneg = 0;
3999 result = mii_info->phyinfo->config_aneg(mii_info);
4001 /* Forcing failed! Give up */
4003 ugeth_err("%s: Forcing failed!", mii_info->dev->name);
4008 /* Kill the timer so it can be restarted */
4009 del_timer_sync(&ugeth->phy_info_timer);
4011 /* Grab the PHY interrupt, if necessary/possible */
4012 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
4013 if (request_irq(ugeth->ug_info->phy_interrupt,
4015 SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) {
4016 ugeth_err("%s: Can't get IRQ %d (PHY)",
4017 mii_info->dev->name,
4018 ugeth->ug_info->phy_interrupt);
4020 mii_configure_phy_interrupt(ugeth->mii_info,
4021 MII_INTERRUPT_ENABLED);
4026 /* Start the timer again, this time in order to
4027 * handle a change in status */
4028 init_timer(&ugeth->phy_info_timer);
4029 ugeth->phy_info_timer.function = &ugeth_phy_timer;
4030 ugeth->phy_info_timer.data = (unsigned long)mii_info->dev;
4031 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
4034 /* Called when something needs to use the ethernet device */
4035 /* Returns 0 for success. */
4036 static int ucc_geth_open(struct net_device *dev)
4038 ucc_geth_private_t *ugeth = netdev_priv(dev);
4041 ugeth_vdbg("%s: IN", __FUNCTION__);
4043 /* Test station address */
4044 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
4045 ugeth_err("%s: Multicast address used for station address"
4046 " - is this what you wanted?", __FUNCTION__);
4050 err = ucc_geth_startup(ugeth);
4052 ugeth_err("%s: Cannot configure net device, aborting.",
4057 err = adjust_enet_interface(ugeth);
4059 ugeth_err("%s: Cannot configure net device, aborting.",
4064 /* Set MACSTNADDR1, MACSTNADDR2 */
4065 /* For more details see the hardware spec. */
4066 init_mac_station_addr_regs(dev->dev_addr[0],
4072 &ugeth->ug_regs->macstnaddr1,
4073 &ugeth->ug_regs->macstnaddr2);
4075 err = init_phy(dev);
4077 ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name);
4080 #ifndef CONFIG_UGETH_NAPI
4082 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
4085 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
4087 ucc_geth_stop(ugeth);
4090 #endif /* CONFIG_UGETH_NAPI */
4092 /* Set up the PHY change work queue */
4093 INIT_WORK(&ugeth->tq, ugeth_phy_change, dev);
4095 init_timer(&ugeth->phy_info_timer);
4096 ugeth->phy_info_timer.function = &ugeth_phy_startup_timer;
4097 ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info;
4098 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
4100 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
4102 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
4103 ucc_geth_stop(ugeth);
4107 netif_start_queue(dev);
4112 /* Stops the kernel queue, and halts the controller */
4113 static int ucc_geth_close(struct net_device *dev)
4115 ucc_geth_private_t *ugeth = netdev_priv(dev);
4117 ugeth_vdbg("%s: IN", __FUNCTION__);
4119 ucc_geth_stop(ugeth);
4121 /* Shutdown the PHY */
4122 if (ugeth->mii_info->phyinfo->close)
4123 ugeth->mii_info->phyinfo->close(ugeth->mii_info);
4125 kfree(ugeth->mii_info);
4127 netif_stop_queue(dev);
4132 const struct ethtool_ops ucc_geth_ethtool_ops = { };
4134 static int ucc_geth_probe(struct device *device)
4136 struct platform_device *pdev = to_platform_device(device);
4137 struct ucc_geth_platform_data *ugeth_pdata;
4138 struct net_device *dev = NULL;
4139 struct ucc_geth_private *ugeth = NULL;
4140 struct ucc_geth_info *ug_info;
4142 static int mii_mng_configured = 0;
4144 ugeth_vdbg("%s: IN", __FUNCTION__);
4146 ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data;
4148 ug_info = &ugeth_info[pdev->id];
4149 ug_info->uf_info.ucc_num = pdev->id;
4150 ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock;
4151 ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock;
4152 ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr;
4153 ug_info->uf_info.irq = platform_get_irq(pdev, 0);
4154 ug_info->phy_address = ugeth_pdata->phy_id;
4155 ug_info->enet_interface = ugeth_pdata->phy_interface;
4156 ug_info->board_flags = ugeth_pdata->board_flags;
4157 ug_info->phy_interrupt = ugeth_pdata->phy_interrupt;
4159 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
4160 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
4161 ug_info->uf_info.irq);
4163 if (ug_info == NULL) {
4164 ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
4169 if (!mii_mng_configured) {
4170 ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num);
4171 mii_mng_configured = 1;
4174 /* Create an ethernet device instance */
4175 dev = alloc_etherdev(sizeof(*ugeth));
4180 ugeth = netdev_priv(dev);
4181 spin_lock_init(&ugeth->lock);
4183 dev_set_drvdata(device, dev);
4185 /* Set the dev->base_addr to the gfar reg region */
4186 dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
4188 SET_MODULE_OWNER(dev);
4189 SET_NETDEV_DEV(dev, device);
4191 /* Fill in the dev structure */
4192 dev->open = ucc_geth_open;
4193 dev->hard_start_xmit = ucc_geth_start_xmit;
4194 dev->tx_timeout = ucc_geth_timeout;
4195 dev->watchdog_timeo = TX_TIMEOUT;
4196 #ifdef CONFIG_UGETH_NAPI
4197 dev->poll = ucc_geth_poll;
4198 dev->weight = UCC_GETH_DEV_WEIGHT;
4199 #endif /* CONFIG_UGETH_NAPI */
4200 dev->stop = ucc_geth_close;
4201 dev->get_stats = ucc_geth_get_stats;
4202 // dev->change_mtu = ucc_geth_change_mtu;
4204 dev->set_multicast_list = ucc_geth_set_multi;
4205 dev->ethtool_ops = &ucc_geth_ethtool_ops;
4207 err = register_netdev(dev);
4209 ugeth_err("%s: Cannot register net device, aborting.",
4215 ugeth->ug_info = ug_info;
4217 memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6);
4222 static int ucc_geth_remove(struct device *device)
4224 struct net_device *dev = dev_get_drvdata(device);
4225 struct ucc_geth_private *ugeth = netdev_priv(dev);
4227 dev_set_drvdata(device, NULL);
4228 ucc_geth_memclean(ugeth);
4234 /* Structure for a device driver */
4235 static struct device_driver ucc_geth_driver = {
4237 .bus = &platform_bus_type,
4238 .probe = ucc_geth_probe,
4239 .remove = ucc_geth_remove,
4242 static int __init ucc_geth_init(void)
4245 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
4246 for (i = 0; i < 8; i++)
4247 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
4248 sizeof(ugeth_primary_info));
4250 return driver_register(&ucc_geth_driver);
4253 static void __exit ucc_geth_exit(void)
4255 driver_unregister(&ucc_geth_driver);
4258 module_init(ucc_geth_init);
4259 module_exit(ucc_geth_exit);
4261 MODULE_AUTHOR("Freescale Semiconductor, Inc");
4262 MODULE_DESCRIPTION(DRV_DESC);
4263 MODULE_LICENSE("GPL");