2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
8 * QE UCC Gigabit Ethernet Driver
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/stddef.h>
20 #include <linux/interrupt.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/spinlock.h>
26 #include <linux/ethtool.h>
27 #include <linux/delay.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/fsl_devices.h>
30 #include <linux/ethtool.h>
31 #include <linux/mii.h>
32 #include <linux/workqueue.h>
34 #include <asm/of_platform.h>
35 #include <asm/uaccess.h>
38 #include <asm/immap_qe.h>
41 #include <asm/ucc_fast.h>
44 #include "ucc_geth_phy.h"
48 #define DRV_DESC "QE UCC Gigabit Ethernet Controller version:Sept 11, 2006"
49 #define DRV_NAME "ucc_geth"
51 #define ugeth_printk(level, format, arg...) \
52 printk(level format "\n", ## arg)
54 #define ugeth_dbg(format, arg...) \
55 ugeth_printk(KERN_DEBUG , format , ## arg)
56 #define ugeth_err(format, arg...) \
57 ugeth_printk(KERN_ERR , format , ## arg)
58 #define ugeth_info(format, arg...) \
59 ugeth_printk(KERN_INFO , format , ## arg)
60 #define ugeth_warn(format, arg...) \
61 ugeth_printk(KERN_WARNING , format , ## arg)
63 #ifdef UGETH_VERBOSE_DEBUG
64 #define ugeth_vdbg ugeth_dbg
66 #define ugeth_vdbg(fmt, args...) do { } while (0)
67 #endif /* UGETH_VERBOSE_DEBUG */
69 static DEFINE_SPINLOCK(ugeth_lock);
71 static struct ucc_geth_info ugeth_primary_info = {
73 .bd_mem_part = MEM_PART_SYSTEM,
74 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
75 .max_rx_buf_length = 1536,
76 /* FIXME: should be changed in run time for 1G and 100M */
77 #ifdef CONFIG_UGETH_HAS_GIGA
78 .urfs = UCC_GETH_URFS_GIGA_INIT,
79 .urfet = UCC_GETH_URFET_GIGA_INIT,
80 .urfset = UCC_GETH_URFSET_GIGA_INIT,
81 .utfs = UCC_GETH_UTFS_GIGA_INIT,
82 .utfet = UCC_GETH_UTFET_GIGA_INIT,
83 .utftt = UCC_GETH_UTFTT_GIGA_INIT,
85 .urfs = UCC_GETH_URFS_INIT,
86 .urfet = UCC_GETH_URFET_INIT,
87 .urfset = UCC_GETH_URFSET_INIT,
88 .utfs = UCC_GETH_UTFS_INIT,
89 .utfet = UCC_GETH_UTFET_INIT,
90 .utftt = UCC_GETH_UTFTT_INIT,
93 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
94 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
95 .tenc = UCC_FAST_TX_ENCODING_NRZ,
96 .renc = UCC_FAST_RX_ENCODING_NRZ,
97 .tcrc = UCC_FAST_16_BIT_CRC,
98 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
102 .extendedFilteringChainPointer = ((uint32_t) NULL),
103 .typeorlen = 3072 /*1536 */ ,
104 .nonBackToBackIfgPart1 = 0x40,
105 .nonBackToBackIfgPart2 = 0x60,
106 .miminumInterFrameGapEnforcement = 0x50,
107 .backToBackInterFrameGap = 0x60,
111 .strictpriorityq = 0xff,
112 .altBebTruncation = 0xa,
114 .maxRetransmission = 0xf,
115 .collisionWindow = 0x37,
116 .receiveFlowControl = 1,
117 .maxGroupAddrInHash = 4,
118 .maxIndAddrInHash = 4,
120 .maxFrameLength = 1518,
121 .minFrameLength = 64,
125 .ecamptr = ((uint32_t) NULL),
126 .eventRegMask = UCCE_OTHER,
127 .pausePeriod = 0xf000,
128 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
149 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
150 .largestexternallookupkeysize =
151 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
152 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
153 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
154 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
155 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
156 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
157 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
158 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
159 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
160 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
161 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
164 static struct ucc_geth_info ugeth_info[8];
167 static void mem_disp(u8 *addr, int size)
170 int size16Aling = (size >> 4) << 4;
171 int size4Aling = (size >> 2) << 2;
176 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
177 printk("0x%08x: %08x %08x %08x %08x\r\n",
181 *((u32 *) (i + 8)), *((u32 *) (i + 12)));
183 printk("0x%08x: ", (u32) i);
184 for (; (u32) i < (u32) addr + size4Aling; i += 4)
185 printk("%08x ", *((u32 *) (i)));
186 for (; (u32) i < (u32) addr + size; i++)
187 printk("%02x", *((u8 *) (i)));
193 #ifdef CONFIG_UGETH_FILTERING
194 static void enqueue(struct list_head *node, struct list_head *lh)
198 spin_lock_irqsave(&ugeth_lock, flags);
199 list_add_tail(node, lh);
200 spin_unlock_irqrestore(&ugeth_lock, flags);
202 #endif /* CONFIG_UGETH_FILTERING */
204 static struct list_head *dequeue(struct list_head *lh)
208 spin_lock_irqsave(&ugeth_lock, flags);
209 if (!list_empty(lh)) {
210 struct list_head *node = lh->next;
212 spin_unlock_irqrestore(&ugeth_lock, flags);
215 spin_unlock_irqrestore(&ugeth_lock, flags);
220 static int get_interface_details(enum enet_interface enet_interface,
221 enum enet_speed *speed,
225 int *tbi, int *limited_to_full_duplex)
227 /* Analyze enet_interface according to Interface Mode
228 Configuration table */
229 switch (enet_interface) {
231 *speed = ENET_SPEED_10BT;
234 *speed = ENET_SPEED_10BT;
239 *speed = ENET_SPEED_10BT;
242 *limited_to_full_duplex = 1;
245 *speed = ENET_SPEED_100BT;
248 *speed = ENET_SPEED_100BT;
252 *speed = ENET_SPEED_100BT;
254 *limited_to_full_duplex = 1;
257 *speed = ENET_SPEED_1000BT;
258 *limited_to_full_duplex = 1;
260 case ENET_1000_RGMII:
261 *speed = ENET_SPEED_1000BT;
263 *limited_to_full_duplex = 1;
266 *speed = ENET_SPEED_1000BT;
268 *limited_to_full_duplex = 1;
271 *speed = ENET_SPEED_1000BT;
274 *limited_to_full_duplex = 1;
284 static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, u8 *bd)
286 struct sk_buff *skb = NULL;
288 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
289 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
294 /* We need the data buffer to be aligned properly. We will reserve
295 * as many bytes as needed to align the data properly
298 UCC_GETH_RX_DATA_BUF_ALIGNMENT -
299 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
302 skb->dev = ugeth->dev;
304 out_be32(&((struct qe_bd *)bd)->buf,
307 ugeth->ug_info->uf_info.max_rx_buf_length +
308 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
311 out_be32((u32 *)bd, (R_E | R_I | (in_be32((u32 *)bd) & R_W)));
316 static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
323 bd = ugeth->p_rx_bd_ring[rxQ];
327 bd_status = in_be32((u32*)bd);
328 skb = get_new_skb(ugeth, bd);
330 if (!skb) /* If can not allocate data buffer,
331 abort. Cleanup will be elsewhere */
334 ugeth->rx_skbuff[rxQ][i] = skb;
336 /* advance the BD pointer */
337 bd += sizeof(struct qe_bd);
339 } while (!(bd_status & R_W));
344 static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
345 volatile u32 *p_start,
348 u32 thread_alignment,
349 enum qe_risc_allocation risc,
350 int skip_page_for_first_entry)
352 u32 init_enet_offset;
356 for (i = 0; i < num_entries; i++) {
357 if ((snum = qe_get_snum()) < 0) {
358 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
361 if ((i == 0) && skip_page_for_first_entry)
362 /* First entry of Rx does not have page */
363 init_enet_offset = 0;
366 qe_muram_alloc(thread_size, thread_alignment);
367 if (IS_MURAM_ERR(init_enet_offset)) {
369 ("fill_init_enet_entries: Can not allocate DPRAM memory.");
370 qe_put_snum((u8) snum);
375 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
382 static int return_init_enet_entries(struct ucc_geth_private *ugeth,
383 volatile u32 *p_start,
385 enum qe_risc_allocation risc,
386 int skip_page_for_first_entry)
388 u32 init_enet_offset;
392 for (i = 0; i < num_entries; i++) {
393 /* Check that this entry was actually valid --
394 needed in case failed in allocations */
395 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
397 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
398 ENET_INIT_PARAM_SNUM_SHIFT;
399 qe_put_snum((u8) snum);
400 if (!((i == 0) && skip_page_for_first_entry)) {
401 /* First entry of Rx does not have page */
404 ENET_INIT_PARAM_PTR_MASK);
405 qe_muram_free(init_enet_offset);
407 *(p_start++) = 0; /* Just for cosmetics */
415 static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
416 volatile u32 *p_start,
419 enum qe_risc_allocation risc,
420 int skip_page_for_first_entry)
422 u32 init_enet_offset;
426 for (i = 0; i < num_entries; i++) {
427 /* Check that this entry was actually valid --
428 needed in case failed in allocations */
429 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
431 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
432 ENET_INIT_PARAM_SNUM_SHIFT;
433 qe_put_snum((u8) snum);
434 if (!((i == 0) && skip_page_for_first_entry)) {
435 /* First entry of Rx does not have page */
438 ENET_INIT_PARAM_PTR_MASK);
439 ugeth_info("Init enet entry %d:", i);
440 ugeth_info("Base address: 0x%08x",
442 qe_muram_addr(init_enet_offset));
443 mem_disp(qe_muram_addr(init_enet_offset),
454 #ifdef CONFIG_UGETH_FILTERING
455 static struct enet_addr_container *get_enet_addr_container(void)
457 struct enet_addr_container *enet_addr_cont;
459 /* allocate memory */
460 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL);
461 if (!enet_addr_cont) {
462 ugeth_err("%s: No memory for enet_addr_container object.",
467 return enet_addr_cont;
469 #endif /* CONFIG_UGETH_FILTERING */
471 static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
473 kfree(enet_addr_cont);
476 static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
478 out_be16(®[0], ((u16)mac[5] << 8) | mac[4]);
479 out_be16(®[1], ((u16)mac[3] << 8) | mac[2]);
480 out_be16(®[2], ((u16)mac[1] << 8) | mac[0]);
483 #ifdef CONFIG_UGETH_FILTERING
484 static int hw_add_addr_in_paddr(struct ucc_geth_private *ugeth,
485 u8 *p_enet_addr, u8 paddr_num)
487 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
489 if (!(paddr_num < NUM_OF_PADDRS)) {
490 ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__);
495 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
498 /* Ethernet frames are defined in Little Endian mode, */
499 /* therefore to insert the address we reverse the bytes. */
500 set_mac_addr(&p_82xx_addr_filt->paddr[paddr_num].h, p_enet_addr);
503 #endif /* CONFIG_UGETH_FILTERING */
505 static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
507 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
509 if (!(paddr_num < NUM_OF_PADDRS)) {
510 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
515 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
518 /* Writing address ff.ff.ff.ff.ff.ff disables address
519 recognition for this register */
520 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
521 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
522 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
527 static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
530 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
534 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
538 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
540 /* Ethernet frames are defined in Little Endian mode,
541 therefor to insert */
542 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
544 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
546 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
547 QE_CR_PROTOCOL_ETHERNET, 0);
550 #ifdef CONFIG_UGETH_MAGIC_PACKET
551 static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
553 struct ucc_fast_private *uccf;
554 struct ucc_geth *ug_regs;
558 ug_regs = ugeth->ug_regs;
560 /* Enable interrupts for magic packet detection */
561 uccm = in_be32(uccf->p_uccm);
563 out_be32(uccf->p_uccm, uccm);
565 /* Enable magic packet detection */
566 maccfg2 = in_be32(&ug_regs->maccfg2);
567 maccfg2 |= MACCFG2_MPE;
568 out_be32(&ug_regs->maccfg2, maccfg2);
571 static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
573 struct ucc_fast_private *uccf;
574 struct ucc_geth *ug_regs;
578 ug_regs = ugeth->ug_regs;
580 /* Disable interrupts for magic packet detection */
581 uccm = in_be32(uccf->p_uccm);
583 out_be32(uccf->p_uccm, uccm);
585 /* Disable magic packet detection */
586 maccfg2 = in_be32(&ug_regs->maccfg2);
587 maccfg2 &= ~MACCFG2_MPE;
588 out_be32(&ug_regs->maccfg2, maccfg2);
590 #endif /* MAGIC_PACKET */
592 static inline int compare_addr(u8 **addr1, u8 **addr2)
594 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
598 static void get_statistics(struct ucc_geth_private *ugeth,
599 struct ucc_geth_tx_firmware_statistics *
600 tx_firmware_statistics,
601 struct ucc_geth_rx_firmware_statistics *
602 rx_firmware_statistics,
603 struct ucc_geth_hardware_statistics *hardware_statistics)
605 struct ucc_fast *uf_regs;
606 struct ucc_geth *ug_regs;
607 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
608 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
610 ug_regs = ugeth->ug_regs;
611 uf_regs = (struct ucc_fast *) ug_regs;
612 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
613 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
615 /* Tx firmware only if user handed pointer and driver actually
616 gathers Tx firmware statistics */
617 if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
618 tx_firmware_statistics->sicoltx =
619 in_be32(&p_tx_fw_statistics_pram->sicoltx);
620 tx_firmware_statistics->mulcoltx =
621 in_be32(&p_tx_fw_statistics_pram->mulcoltx);
622 tx_firmware_statistics->latecoltxfr =
623 in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
624 tx_firmware_statistics->frabortduecol =
625 in_be32(&p_tx_fw_statistics_pram->frabortduecol);
626 tx_firmware_statistics->frlostinmactxer =
627 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
628 tx_firmware_statistics->carriersenseertx =
629 in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
630 tx_firmware_statistics->frtxok =
631 in_be32(&p_tx_fw_statistics_pram->frtxok);
632 tx_firmware_statistics->txfrexcessivedefer =
633 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
634 tx_firmware_statistics->txpkts256 =
635 in_be32(&p_tx_fw_statistics_pram->txpkts256);
636 tx_firmware_statistics->txpkts512 =
637 in_be32(&p_tx_fw_statistics_pram->txpkts512);
638 tx_firmware_statistics->txpkts1024 =
639 in_be32(&p_tx_fw_statistics_pram->txpkts1024);
640 tx_firmware_statistics->txpktsjumbo =
641 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
644 /* Rx firmware only if user handed pointer and driver actually
645 * gathers Rx firmware statistics */
646 if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
648 rx_firmware_statistics->frrxfcser =
649 in_be32(&p_rx_fw_statistics_pram->frrxfcser);
650 rx_firmware_statistics->fraligner =
651 in_be32(&p_rx_fw_statistics_pram->fraligner);
652 rx_firmware_statistics->inrangelenrxer =
653 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
654 rx_firmware_statistics->outrangelenrxer =
655 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
656 rx_firmware_statistics->frtoolong =
657 in_be32(&p_rx_fw_statistics_pram->frtoolong);
658 rx_firmware_statistics->runt =
659 in_be32(&p_rx_fw_statistics_pram->runt);
660 rx_firmware_statistics->verylongevent =
661 in_be32(&p_rx_fw_statistics_pram->verylongevent);
662 rx_firmware_statistics->symbolerror =
663 in_be32(&p_rx_fw_statistics_pram->symbolerror);
664 rx_firmware_statistics->dropbsy =
665 in_be32(&p_rx_fw_statistics_pram->dropbsy);
666 for (i = 0; i < 0x8; i++)
667 rx_firmware_statistics->res0[i] =
668 p_rx_fw_statistics_pram->res0[i];
669 rx_firmware_statistics->mismatchdrop =
670 in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
671 rx_firmware_statistics->underpkts =
672 in_be32(&p_rx_fw_statistics_pram->underpkts);
673 rx_firmware_statistics->pkts256 =
674 in_be32(&p_rx_fw_statistics_pram->pkts256);
675 rx_firmware_statistics->pkts512 =
676 in_be32(&p_rx_fw_statistics_pram->pkts512);
677 rx_firmware_statistics->pkts1024 =
678 in_be32(&p_rx_fw_statistics_pram->pkts1024);
679 rx_firmware_statistics->pktsjumbo =
680 in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
681 rx_firmware_statistics->frlossinmacer =
682 in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
683 rx_firmware_statistics->pausefr =
684 in_be32(&p_rx_fw_statistics_pram->pausefr);
685 for (i = 0; i < 0x4; i++)
686 rx_firmware_statistics->res1[i] =
687 p_rx_fw_statistics_pram->res1[i];
688 rx_firmware_statistics->removevlan =
689 in_be32(&p_rx_fw_statistics_pram->removevlan);
690 rx_firmware_statistics->replacevlan =
691 in_be32(&p_rx_fw_statistics_pram->replacevlan);
692 rx_firmware_statistics->insertvlan =
693 in_be32(&p_rx_fw_statistics_pram->insertvlan);
696 /* Hardware only if user handed pointer and driver actually
697 gathers hardware statistics */
698 if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
699 hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
700 hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
701 hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
702 hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
703 hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
704 hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
705 hardware_statistics->txok = in_be32(&ug_regs->txok);
706 hardware_statistics->txcf = in_be16(&ug_regs->txcf);
707 hardware_statistics->tmca = in_be32(&ug_regs->tmca);
708 hardware_statistics->tbca = in_be32(&ug_regs->tbca);
709 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
710 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
711 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
712 hardware_statistics->rmca = in_be32(&ug_regs->rmca);
713 hardware_statistics->rbca = in_be32(&ug_regs->rbca);
717 static void dump_bds(struct ucc_geth_private *ugeth)
722 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
723 if (ugeth->p_tx_bd_ring[i]) {
725 (ugeth->ug_info->bdRingLenTx[i] *
726 sizeof(struct qe_bd));
727 ugeth_info("TX BDs[%d]", i);
728 mem_disp(ugeth->p_tx_bd_ring[i], length);
731 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
732 if (ugeth->p_rx_bd_ring[i]) {
734 (ugeth->ug_info->bdRingLenRx[i] *
735 sizeof(struct qe_bd));
736 ugeth_info("RX BDs[%d]", i);
737 mem_disp(ugeth->p_rx_bd_ring[i], length);
742 static void dump_regs(struct ucc_geth_private *ugeth)
746 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
747 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
749 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
750 (u32) & ugeth->ug_regs->maccfg1,
751 in_be32(&ugeth->ug_regs->maccfg1));
752 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
753 (u32) & ugeth->ug_regs->maccfg2,
754 in_be32(&ugeth->ug_regs->maccfg2));
755 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
756 (u32) & ugeth->ug_regs->ipgifg,
757 in_be32(&ugeth->ug_regs->ipgifg));
758 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
759 (u32) & ugeth->ug_regs->hafdup,
760 in_be32(&ugeth->ug_regs->hafdup));
761 ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x",
762 (u32) & ugeth->ug_regs->miimng.miimcfg,
763 in_be32(&ugeth->ug_regs->miimng.miimcfg));
764 ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x",
765 (u32) & ugeth->ug_regs->miimng.miimcom,
766 in_be32(&ugeth->ug_regs->miimng.miimcom));
767 ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x",
768 (u32) & ugeth->ug_regs->miimng.miimadd,
769 in_be32(&ugeth->ug_regs->miimng.miimadd));
770 ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x",
771 (u32) & ugeth->ug_regs->miimng.miimcon,
772 in_be32(&ugeth->ug_regs->miimng.miimcon));
773 ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x",
774 (u32) & ugeth->ug_regs->miimng.miimstat,
775 in_be32(&ugeth->ug_regs->miimng.miimstat));
776 ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x",
777 (u32) & ugeth->ug_regs->miimng.miimind,
778 in_be32(&ugeth->ug_regs->miimng.miimind));
779 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
780 (u32) & ugeth->ug_regs->ifctl,
781 in_be32(&ugeth->ug_regs->ifctl));
782 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
783 (u32) & ugeth->ug_regs->ifstat,
784 in_be32(&ugeth->ug_regs->ifstat));
785 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
786 (u32) & ugeth->ug_regs->macstnaddr1,
787 in_be32(&ugeth->ug_regs->macstnaddr1));
788 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
789 (u32) & ugeth->ug_regs->macstnaddr2,
790 in_be32(&ugeth->ug_regs->macstnaddr2));
791 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
792 (u32) & ugeth->ug_regs->uempr,
793 in_be32(&ugeth->ug_regs->uempr));
794 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
795 (u32) & ugeth->ug_regs->utbipar,
796 in_be32(&ugeth->ug_regs->utbipar));
797 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
798 (u32) & ugeth->ug_regs->uescr,
799 in_be16(&ugeth->ug_regs->uescr));
800 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
801 (u32) & ugeth->ug_regs->tx64,
802 in_be32(&ugeth->ug_regs->tx64));
803 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
804 (u32) & ugeth->ug_regs->tx127,
805 in_be32(&ugeth->ug_regs->tx127));
806 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
807 (u32) & ugeth->ug_regs->tx255,
808 in_be32(&ugeth->ug_regs->tx255));
809 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
810 (u32) & ugeth->ug_regs->rx64,
811 in_be32(&ugeth->ug_regs->rx64));
812 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
813 (u32) & ugeth->ug_regs->rx127,
814 in_be32(&ugeth->ug_regs->rx127));
815 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
816 (u32) & ugeth->ug_regs->rx255,
817 in_be32(&ugeth->ug_regs->rx255));
818 ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
819 (u32) & ugeth->ug_regs->txok,
820 in_be32(&ugeth->ug_regs->txok));
821 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
822 (u32) & ugeth->ug_regs->txcf,
823 in_be16(&ugeth->ug_regs->txcf));
824 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
825 (u32) & ugeth->ug_regs->tmca,
826 in_be32(&ugeth->ug_regs->tmca));
827 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
828 (u32) & ugeth->ug_regs->tbca,
829 in_be32(&ugeth->ug_regs->tbca));
830 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
831 (u32) & ugeth->ug_regs->rxfok,
832 in_be32(&ugeth->ug_regs->rxfok));
833 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
834 (u32) & ugeth->ug_regs->rxbok,
835 in_be32(&ugeth->ug_regs->rxbok));
836 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
837 (u32) & ugeth->ug_regs->rbyt,
838 in_be32(&ugeth->ug_regs->rbyt));
839 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
840 (u32) & ugeth->ug_regs->rmca,
841 in_be32(&ugeth->ug_regs->rmca));
842 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
843 (u32) & ugeth->ug_regs->rbca,
844 in_be32(&ugeth->ug_regs->rbca));
845 ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
846 (u32) & ugeth->ug_regs->scar,
847 in_be32(&ugeth->ug_regs->scar));
848 ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
849 (u32) & ugeth->ug_regs->scam,
850 in_be32(&ugeth->ug_regs->scam));
852 if (ugeth->p_thread_data_tx) {
853 int numThreadsTxNumerical;
854 switch (ugeth->ug_info->numThreadsTx) {
855 case UCC_GETH_NUM_OF_THREADS_1:
856 numThreadsTxNumerical = 1;
858 case UCC_GETH_NUM_OF_THREADS_2:
859 numThreadsTxNumerical = 2;
861 case UCC_GETH_NUM_OF_THREADS_4:
862 numThreadsTxNumerical = 4;
864 case UCC_GETH_NUM_OF_THREADS_6:
865 numThreadsTxNumerical = 6;
867 case UCC_GETH_NUM_OF_THREADS_8:
868 numThreadsTxNumerical = 8;
871 numThreadsTxNumerical = 0;
875 ugeth_info("Thread data TXs:");
876 ugeth_info("Base address: 0x%08x",
877 (u32) ugeth->p_thread_data_tx);
878 for (i = 0; i < numThreadsTxNumerical; i++) {
879 ugeth_info("Thread data TX[%d]:", i);
880 ugeth_info("Base address: 0x%08x",
881 (u32) & ugeth->p_thread_data_tx[i]);
882 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
883 sizeof(struct ucc_geth_thread_data_tx));
886 if (ugeth->p_thread_data_rx) {
887 int numThreadsRxNumerical;
888 switch (ugeth->ug_info->numThreadsRx) {
889 case UCC_GETH_NUM_OF_THREADS_1:
890 numThreadsRxNumerical = 1;
892 case UCC_GETH_NUM_OF_THREADS_2:
893 numThreadsRxNumerical = 2;
895 case UCC_GETH_NUM_OF_THREADS_4:
896 numThreadsRxNumerical = 4;
898 case UCC_GETH_NUM_OF_THREADS_6:
899 numThreadsRxNumerical = 6;
901 case UCC_GETH_NUM_OF_THREADS_8:
902 numThreadsRxNumerical = 8;
905 numThreadsRxNumerical = 0;
909 ugeth_info("Thread data RX:");
910 ugeth_info("Base address: 0x%08x",
911 (u32) ugeth->p_thread_data_rx);
912 for (i = 0; i < numThreadsRxNumerical; i++) {
913 ugeth_info("Thread data RX[%d]:", i);
914 ugeth_info("Base address: 0x%08x",
915 (u32) & ugeth->p_thread_data_rx[i]);
916 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
917 sizeof(struct ucc_geth_thread_data_rx));
920 if (ugeth->p_exf_glbl_param) {
921 ugeth_info("EXF global param:");
922 ugeth_info("Base address: 0x%08x",
923 (u32) ugeth->p_exf_glbl_param);
924 mem_disp((u8 *) ugeth->p_exf_glbl_param,
925 sizeof(*ugeth->p_exf_glbl_param));
927 if (ugeth->p_tx_glbl_pram) {
928 ugeth_info("TX global param:");
929 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
930 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
931 (u32) & ugeth->p_tx_glbl_pram->temoder,
932 in_be16(&ugeth->p_tx_glbl_pram->temoder));
933 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
934 (u32) & ugeth->p_tx_glbl_pram->sqptr,
935 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
936 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
937 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
938 in_be32(&ugeth->p_tx_glbl_pram->
939 schedulerbasepointer));
940 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
941 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
942 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
943 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
944 (u32) & ugeth->p_tx_glbl_pram->tstate,
945 in_be32(&ugeth->p_tx_glbl_pram->tstate));
946 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
947 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
948 ugeth->p_tx_glbl_pram->iphoffset[0]);
949 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
950 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
951 ugeth->p_tx_glbl_pram->iphoffset[1]);
952 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
953 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
954 ugeth->p_tx_glbl_pram->iphoffset[2]);
955 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
956 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
957 ugeth->p_tx_glbl_pram->iphoffset[3]);
958 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
959 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
960 ugeth->p_tx_glbl_pram->iphoffset[4]);
961 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
962 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
963 ugeth->p_tx_glbl_pram->iphoffset[5]);
964 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
965 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
966 ugeth->p_tx_glbl_pram->iphoffset[6]);
967 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
968 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
969 ugeth->p_tx_glbl_pram->iphoffset[7]);
970 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
971 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
972 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
973 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
974 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
975 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
976 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
977 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
978 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
979 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
980 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
981 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
982 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
983 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
984 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
985 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
986 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
987 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
988 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
989 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
990 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
991 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
992 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
993 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
994 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
995 (u32) & ugeth->p_tx_glbl_pram->tqptr,
996 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
998 if (ugeth->p_rx_glbl_pram) {
999 ugeth_info("RX global param:");
1000 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
1001 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
1002 (u32) & ugeth->p_rx_glbl_pram->remoder,
1003 in_be32(&ugeth->p_rx_glbl_pram->remoder));
1004 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
1005 (u32) & ugeth->p_rx_glbl_pram->rqptr,
1006 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
1007 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
1008 (u32) & ugeth->p_rx_glbl_pram->typeorlen,
1009 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
1010 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
1011 (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
1012 ugeth->p_rx_glbl_pram->rxgstpack);
1013 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
1014 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
1015 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
1016 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
1017 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
1018 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
1019 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
1020 (u32) & ugeth->p_rx_glbl_pram->rstate,
1021 ugeth->p_rx_glbl_pram->rstate);
1022 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
1023 (u32) & ugeth->p_rx_glbl_pram->mrblr,
1024 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
1025 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
1026 (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
1027 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
1028 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
1029 (u32) & ugeth->p_rx_glbl_pram->mflr,
1030 in_be16(&ugeth->p_rx_glbl_pram->mflr));
1031 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
1032 (u32) & ugeth->p_rx_glbl_pram->minflr,
1033 in_be16(&ugeth->p_rx_glbl_pram->minflr));
1034 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
1035 (u32) & ugeth->p_rx_glbl_pram->maxd1,
1036 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
1037 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
1038 (u32) & ugeth->p_rx_glbl_pram->maxd2,
1039 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
1040 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
1041 (u32) & ugeth->p_rx_glbl_pram->ecamptr,
1042 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
1043 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
1044 (u32) & ugeth->p_rx_glbl_pram->l2qt,
1045 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
1046 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
1047 (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
1048 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
1049 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
1050 (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
1051 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
1052 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
1053 (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
1054 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
1055 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
1056 (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
1057 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
1058 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
1059 (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
1060 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
1061 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
1062 (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
1063 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
1064 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
1065 (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
1066 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
1067 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
1068 (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
1069 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
1070 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
1071 (u32) & ugeth->p_rx_glbl_pram->vlantype,
1072 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
1073 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
1074 (u32) & ugeth->p_rx_glbl_pram->vlantci,
1075 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
1076 for (i = 0; i < 64; i++)
1078 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
1080 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
1081 ugeth->p_rx_glbl_pram->addressfiltering[i]);
1082 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
1083 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
1084 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
1086 if (ugeth->p_send_q_mem_reg) {
1087 ugeth_info("Send Q memory registers:");
1088 ugeth_info("Base address: 0x%08x",
1089 (u32) ugeth->p_send_q_mem_reg);
1090 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1091 ugeth_info("SQQD[%d]:", i);
1092 ugeth_info("Base address: 0x%08x",
1093 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
1094 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
1095 sizeof(struct ucc_geth_send_queue_qd));
1098 if (ugeth->p_scheduler) {
1099 ugeth_info("Scheduler:");
1100 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
1101 mem_disp((u8 *) ugeth->p_scheduler,
1102 sizeof(*ugeth->p_scheduler));
1104 if (ugeth->p_tx_fw_statistics_pram) {
1105 ugeth_info("TX FW statistics pram:");
1106 ugeth_info("Base address: 0x%08x",
1107 (u32) ugeth->p_tx_fw_statistics_pram);
1108 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
1109 sizeof(*ugeth->p_tx_fw_statistics_pram));
1111 if (ugeth->p_rx_fw_statistics_pram) {
1112 ugeth_info("RX FW statistics pram:");
1113 ugeth_info("Base address: 0x%08x",
1114 (u32) ugeth->p_rx_fw_statistics_pram);
1115 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
1116 sizeof(*ugeth->p_rx_fw_statistics_pram));
1118 if (ugeth->p_rx_irq_coalescing_tbl) {
1119 ugeth_info("RX IRQ coalescing tables:");
1120 ugeth_info("Base address: 0x%08x",
1121 (u32) ugeth->p_rx_irq_coalescing_tbl);
1122 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1123 ugeth_info("RX IRQ coalescing table entry[%d]:", i);
1124 ugeth_info("Base address: 0x%08x",
1125 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1126 coalescingentry[i]);
1128 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
1129 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1130 coalescingentry[i].interruptcoalescingmaxvalue,
1131 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1133 interruptcoalescingmaxvalue));
1135 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
1136 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1137 coalescingentry[i].interruptcoalescingcounter,
1138 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1140 interruptcoalescingcounter));
1143 if (ugeth->p_rx_bd_qs_tbl) {
1144 ugeth_info("RX BD QS tables:");
1145 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
1146 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1147 ugeth_info("RX BD QS table[%d]:", i);
1148 ugeth_info("Base address: 0x%08x",
1149 (u32) & ugeth->p_rx_bd_qs_tbl[i]);
1151 ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
1152 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
1153 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
1155 ("bdptr : addr - 0x%08x, val - 0x%08x",
1156 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
1157 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
1159 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
1160 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
1161 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
1162 externalbdbaseptr));
1164 ("externalbdptr : addr - 0x%08x, val - 0x%08x",
1165 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
1166 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
1167 ugeth_info("ucode RX Prefetched BDs:");
1168 ugeth_info("Base address: 0x%08x",
1170 qe_muram_addr(in_be32
1171 (&ugeth->p_rx_bd_qs_tbl[i].
1174 qe_muram_addr(in_be32
1175 (&ugeth->p_rx_bd_qs_tbl[i].
1177 sizeof(struct ucc_geth_rx_prefetched_bds));
1180 if (ugeth->p_init_enet_param_shadow) {
1182 ugeth_info("Init enet param shadow:");
1183 ugeth_info("Base address: 0x%08x",
1184 (u32) ugeth->p_init_enet_param_shadow);
1185 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1186 sizeof(*ugeth->p_init_enet_param_shadow));
1188 size = sizeof(struct ucc_geth_thread_rx_pram);
1189 if (ugeth->ug_info->rxExtendedFiltering) {
1191 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1192 if (ugeth->ug_info->largestexternallookupkeysize ==
1193 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1195 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1196 if (ugeth->ug_info->largestexternallookupkeysize ==
1197 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1199 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1202 dump_init_enet_entries(ugeth,
1203 &(ugeth->p_init_enet_param_shadow->
1205 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1206 sizeof(struct ucc_geth_thread_tx_pram),
1207 ugeth->ug_info->riscTx, 0);
1208 dump_init_enet_entries(ugeth,
1209 &(ugeth->p_init_enet_param_shadow->
1211 ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1212 ugeth->ug_info->riscRx, 1);
1217 static void init_default_reg_vals(volatile u32 *upsmr_register,
1218 volatile u32 *maccfg1_register,
1219 volatile u32 *maccfg2_register)
1221 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1222 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1223 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1226 static int init_half_duplex_params(int alt_beb,
1227 int back_pressure_no_backoff,
1230 u8 alt_beb_truncation,
1231 u8 max_retransmissions,
1232 u8 collision_window,
1233 volatile u32 *hafdup_register)
1237 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1238 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1239 (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1242 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1245 value |= HALFDUP_ALT_BEB;
1246 if (back_pressure_no_backoff)
1247 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1249 value |= HALFDUP_NO_BACKOFF;
1251 value |= HALFDUP_EXCESSIVE_DEFER;
1253 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1255 value |= collision_window;
1257 out_be32(hafdup_register, value);
1261 static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1265 volatile u32 *ipgifg_register)
1269 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1271 if (non_btb_cs_ipg > non_btb_ipg)
1274 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1275 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1276 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1277 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1281 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1282 IPGIFG_NBTB_CS_IPG_MASK);
1284 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1285 IPGIFG_NBTB_IPG_MASK);
1287 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1288 IPGIFG_MIN_IFG_MASK);
1289 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1291 out_be32(ipgifg_register, value);
1295 static int init_flow_control_params(u32 automatic_flow_control_mode,
1296 int rx_flow_control_enable,
1297 int tx_flow_control_enable,
1299 u16 extension_field,
1300 volatile u32 *upsmr_register,
1301 volatile u32 *uempr_register,
1302 volatile u32 *maccfg1_register)
1306 /* Set UEMPR register */
1307 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1308 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1309 out_be32(uempr_register, value);
1311 /* Set UPSMR register */
1312 value = in_be32(upsmr_register);
1313 value |= automatic_flow_control_mode;
1314 out_be32(upsmr_register, value);
1316 value = in_be32(maccfg1_register);
1317 if (rx_flow_control_enable)
1318 value |= MACCFG1_FLOW_RX;
1319 if (tx_flow_control_enable)
1320 value |= MACCFG1_FLOW_TX;
1321 out_be32(maccfg1_register, value);
1326 static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1327 int auto_zero_hardware_statistics,
1328 volatile u32 *upsmr_register,
1329 volatile u16 *uescr_register)
1331 u32 upsmr_value = 0;
1332 u16 uescr_value = 0;
1333 /* Enable hardware statistics gathering if requested */
1334 if (enable_hardware_statistics) {
1335 upsmr_value = in_be32(upsmr_register);
1336 upsmr_value |= UPSMR_HSE;
1337 out_be32(upsmr_register, upsmr_value);
1340 /* Clear hardware statistics counters */
1341 uescr_value = in_be16(uescr_register);
1342 uescr_value |= UESCR_CLRCNT;
1343 /* Automatically zero hardware statistics counters on read,
1345 if (auto_zero_hardware_statistics)
1346 uescr_value |= UESCR_AUTOZ;
1347 out_be16(uescr_register, uescr_value);
1352 static int init_firmware_statistics_gathering_mode(int
1353 enable_tx_firmware_statistics,
1354 int enable_rx_firmware_statistics,
1355 volatile u32 *tx_rmon_base_ptr,
1356 u32 tx_firmware_statistics_structure_address,
1357 volatile u32 *rx_rmon_base_ptr,
1358 u32 rx_firmware_statistics_structure_address,
1359 volatile u16 *temoder_register,
1360 volatile u32 *remoder_register)
1362 /* Note: this function does not check if */
1363 /* the parameters it receives are NULL */
1367 if (enable_tx_firmware_statistics) {
1368 out_be32(tx_rmon_base_ptr,
1369 tx_firmware_statistics_structure_address);
1370 temoder_value = in_be16(temoder_register);
1371 temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
1372 out_be16(temoder_register, temoder_value);
1375 if (enable_rx_firmware_statistics) {
1376 out_be32(rx_rmon_base_ptr,
1377 rx_firmware_statistics_structure_address);
1378 remoder_value = in_be32(remoder_register);
1379 remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
1380 out_be32(remoder_register, remoder_value);
1386 static int init_mac_station_addr_regs(u8 address_byte_0,
1392 volatile u32 *macstnaddr1_register,
1393 volatile u32 *macstnaddr2_register)
1397 /* Example: for a station address of 0x12345678ABCD, */
1398 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1400 /* MACSTNADDR1 Register: */
1403 /* station address byte 5 station address byte 4 */
1405 /* station address byte 3 station address byte 2 */
1406 value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1407 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1408 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1409 value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1411 out_be32(macstnaddr1_register, value);
1413 /* MACSTNADDR2 Register: */
1416 /* station address byte 1 station address byte 0 */
1418 /* reserved reserved */
1420 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1421 value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1423 out_be32(macstnaddr2_register, value);
1428 static int init_mac_duplex_mode(int full_duplex,
1429 int limited_to_full_duplex,
1430 volatile u32 *maccfg2_register)
1434 /* some interfaces must work in full duplex mode */
1435 if ((full_duplex == 0) && (limited_to_full_duplex == 1))
1438 value = in_be32(maccfg2_register);
1441 value |= MACCFG2_FDX;
1443 value &= ~MACCFG2_FDX;
1445 out_be32(maccfg2_register, value);
1449 static int init_check_frame_length_mode(int length_check,
1450 volatile u32 *maccfg2_register)
1454 value = in_be32(maccfg2_register);
1457 value |= MACCFG2_LC;
1459 value &= ~MACCFG2_LC;
1461 out_be32(maccfg2_register, value);
1465 static int init_preamble_length(u8 preamble_length,
1466 volatile u32 *maccfg2_register)
1470 if ((preamble_length < 3) || (preamble_length > 7))
1473 value = in_be32(maccfg2_register);
1474 value &= ~MACCFG2_PREL_MASK;
1475 value |= (preamble_length << MACCFG2_PREL_SHIFT);
1476 out_be32(maccfg2_register, value);
1480 static int init_mii_management_configuration(int reset_mgmt,
1481 int preamble_supress,
1482 volatile u32 *miimcfg_register,
1483 volatile u32 *miimind_register)
1485 unsigned int timeout = PHY_INIT_TIMEOUT;
1488 value = in_be32(miimcfg_register);
1490 value |= MIIMCFG_RESET_MANAGEMENT;
1491 out_be32(miimcfg_register, value);
1496 if (preamble_supress)
1497 value |= MIIMCFG_NO_PREAMBLE;
1499 value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT;
1500 out_be32(miimcfg_register, value);
1502 /* Wait until the bus is free */
1503 while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--)
1507 ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__);
1514 static int init_rx_parameters(int reject_broadcast,
1515 int receive_short_frames,
1516 int promiscuous, volatile u32 *upsmr_register)
1520 value = in_be32(upsmr_register);
1522 if (reject_broadcast)
1525 value &= ~UPSMR_BRO;
1527 if (receive_short_frames)
1530 value &= ~UPSMR_RSH;
1535 value &= ~UPSMR_PRO;
1537 out_be32(upsmr_register, value);
1542 static int init_max_rx_buff_len(u16 max_rx_buf_len,
1543 volatile u16 *mrblr_register)
1545 /* max_rx_buf_len value must be a multiple of 128 */
1546 if ((max_rx_buf_len == 0)
1547 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1550 out_be16(mrblr_register, max_rx_buf_len);
1554 static int init_min_frame_len(u16 min_frame_length,
1555 volatile u16 *minflr_register,
1556 volatile u16 *mrblr_register)
1558 u16 mrblr_value = 0;
1560 mrblr_value = in_be16(mrblr_register);
1561 if (min_frame_length >= (mrblr_value - 4))
1564 out_be16(minflr_register, min_frame_length);
1568 static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1570 struct ucc_geth_info *ug_info;
1571 struct ucc_geth *ug_regs;
1572 struct ucc_fast *uf_regs;
1573 enum enet_speed speed;
1574 int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm =
1575 0, limited_to_full_duplex = 0;
1576 u32 upsmr, maccfg2, utbipar, tbiBaseAddress;
1579 ugeth_vdbg("%s: IN", __FUNCTION__);
1581 ug_info = ugeth->ug_info;
1582 ug_regs = ugeth->ug_regs;
1583 uf_regs = ugeth->uccf->uf_regs;
1585 /* Analyze enet_interface according to Interface Mode Configuration
1588 get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm,
1589 &rpm, &tbi, &limited_to_full_duplex);
1592 ("%s: half duplex not supported in requested configuration.",
1598 maccfg2 = in_be32(&ug_regs->maccfg2);
1599 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1600 if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT))
1601 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1602 else if (speed == ENET_SPEED_1000BT)
1603 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1604 maccfg2 |= ug_info->padAndCrc;
1605 out_be32(&ug_regs->maccfg2, maccfg2);
1608 upsmr = in_be32(&uf_regs->upsmr);
1609 upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
1613 upsmr |= UPSMR_R10M;
1615 upsmr |= UPSMR_TBIM;
1618 out_be32(&uf_regs->upsmr, upsmr);
1621 utbipar = in_be32(&ug_regs->utbipar);
1622 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1625 (ug_info->phy_address +
1626 ugeth->ug_info->uf_info.
1627 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1631 ugeth->ug_info->uf_info.
1632 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1633 out_be32(&ug_regs->utbipar, utbipar);
1635 /* Disable autonegotiation in tbi mode, because by default it
1636 comes up in autonegotiation mode. */
1637 /* Note that this depends on proper setting in utbipar register. */
1639 tbiBaseAddress = in_be32(&ug_regs->utbipar);
1640 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
1641 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
1643 ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress,
1645 value &= ~0x1000; /* Turn off autonegotiation */
1646 ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress,
1647 ENET_TBI_MII_CR, value);
1650 ret_val = init_mac_duplex_mode(1,
1651 limited_to_full_duplex,
1655 ("%s: half duplex not supported in requested configuration.",
1660 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1662 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1665 ("%s: Preamble length must be between 3 and 7 inclusive.",
1673 /* Called every time the controller might need to be made
1674 * aware of new link state. The PHY code conveys this
1675 * information through variables in the ugeth structure, and this
1676 * function converts those variables into the appropriate
1677 * register values, and can bring down the device if needed.
1679 static void adjust_link(struct net_device *dev)
1681 struct ucc_geth_private *ugeth = netdev_priv(dev);
1682 struct ucc_geth *ug_regs;
1684 struct ugeth_mii_info *mii_info = ugeth->mii_info;
1686 ug_regs = ugeth->ug_regs;
1688 if (mii_info->link) {
1689 /* Now we make sure that we can be in full duplex mode.
1690 * If not, we operate in half-duplex mode. */
1691 if (mii_info->duplex != ugeth->oldduplex) {
1692 if (!(mii_info->duplex)) {
1693 tempval = in_be32(&ug_regs->maccfg2);
1694 tempval &= ~(MACCFG2_FDX);
1695 out_be32(&ug_regs->maccfg2, tempval);
1697 ugeth_info("%s: Half Duplex", dev->name);
1699 tempval = in_be32(&ug_regs->maccfg2);
1700 tempval |= MACCFG2_FDX;
1701 out_be32(&ug_regs->maccfg2, tempval);
1703 ugeth_info("%s: Full Duplex", dev->name);
1706 ugeth->oldduplex = mii_info->duplex;
1709 if (mii_info->speed != ugeth->oldspeed) {
1710 switch (mii_info->speed) {
1712 ugeth->ug_info->enet_interface = ENET_1000_RGMII;
1715 ugeth->ug_info->enet_interface = ENET_100_RGMII;
1718 ugeth->ug_info->enet_interface = ENET_10_RGMII;
1722 ("%s: Ack! Speed (%d) is not 10/100/1000!",
1723 dev->name, mii_info->speed);
1726 adjust_enet_interface(ugeth);
1728 ugeth_info("%s: Speed %dBT", dev->name,
1731 ugeth->oldspeed = mii_info->speed;
1734 if (!ugeth->oldlink) {
1735 ugeth_info("%s: Link is up", dev->name);
1737 netif_carrier_on(dev);
1738 netif_schedule(dev);
1741 if (ugeth->oldlink) {
1742 ugeth_info("%s: Link is down", dev->name);
1744 ugeth->oldspeed = 0;
1745 ugeth->oldduplex = -1;
1746 netif_carrier_off(dev);
1751 /* Configure the PHY for dev.
1752 * returns 0 if success. -1 if failure
1754 static int init_phy(struct net_device *dev)
1756 struct ucc_geth_private *ugeth = netdev_priv(dev);
1757 struct phy_info *curphy;
1758 struct ucc_mii_mng *mii_regs;
1759 struct ugeth_mii_info *mii_info;
1762 mii_regs = &ugeth->ug_regs->miimng;
1765 ugeth->oldspeed = 0;
1766 ugeth->oldduplex = -1;
1768 mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL);
1770 if (NULL == mii_info) {
1771 ugeth_err("%s: Could not allocate mii_info", dev->name);
1775 mii_info->mii_regs = mii_regs;
1776 mii_info->speed = SPEED_1000;
1777 mii_info->duplex = DUPLEX_FULL;
1778 mii_info->pause = 0;
1781 mii_info->advertising = (ADVERTISED_10baseT_Half |
1782 ADVERTISED_10baseT_Full |
1783 ADVERTISED_100baseT_Half |
1784 ADVERTISED_100baseT_Full |
1785 ADVERTISED_1000baseT_Full);
1786 mii_info->autoneg = 1;
1788 mii_info->mii_id = ugeth->ug_info->phy_address;
1790 mii_info->dev = dev;
1792 mii_info->mdio_read = &read_phy_reg;
1793 mii_info->mdio_write = &write_phy_reg;
1795 spin_lock_init(&mii_info->mdio_lock);
1797 ugeth->mii_info = mii_info;
1799 spin_lock_irq(&ugeth->lock);
1801 /* Set this UCC to be the master of the MII managment */
1802 ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
1804 if (init_mii_management_configuration(1,
1808 &mii_regs->miimind)) {
1809 ugeth_err("%s: The MII Bus is stuck!", dev->name);
1814 spin_unlock_irq(&ugeth->lock);
1816 /* get info for this PHY */
1817 curphy = get_phy_info(ugeth->mii_info);
1819 if (curphy == NULL) {
1820 ugeth_err("%s: No PHY found", dev->name);
1825 mii_info->phyinfo = curphy;
1827 /* Run the commands which initialize the PHY */
1829 err = curphy->init(ugeth->mii_info);
1844 #ifdef CONFIG_UGETH_TX_ON_DEMOND
1845 static int ugeth_transmit_on_demand(struct ucc_geth_private *ugeth)
1847 struct ucc_fastransmit_on_demand(ugeth->uccf);
1853 static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
1855 struct ucc_fast_private *uccf;
1861 /* Mask GRACEFUL STOP TX interrupt bit and clear it */
1862 temp = in_be32(uccf->p_uccm);
1864 out_be32(uccf->p_uccm, temp);
1865 out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
1867 /* Issue host command */
1869 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1870 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1871 QE_CR_PROTOCOL_ETHERNET, 0);
1873 /* Wait for command to complete */
1875 temp = in_be32(uccf->p_ucce);
1876 } while (!(temp & UCCE_GRA));
1878 uccf->stopped_tx = 1;
1883 static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
1885 struct ucc_fast_private *uccf;
1891 /* Clear acknowledge bit */
1892 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1893 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1894 ugeth->p_rx_glbl_pram->rxgstpack = temp;
1896 /* Keep issuing command and checking acknowledge bit until
1897 it is asserted, according to spec */
1899 /* Issue host command */
1901 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1903 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1904 QE_CR_PROTOCOL_ETHERNET, 0);
1906 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1907 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1909 uccf->stopped_rx = 1;
1914 static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
1916 struct ucc_fast_private *uccf;
1922 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1923 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
1924 uccf->stopped_tx = 0;
1929 static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
1931 struct ucc_fast_private *uccf;
1937 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1938 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
1940 uccf->stopped_rx = 0;
1945 static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1947 struct ucc_fast_private *uccf;
1948 int enabled_tx, enabled_rx;
1952 /* check if the UCC number is in range. */
1953 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1954 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
1958 enabled_tx = uccf->enabled_tx;
1959 enabled_rx = uccf->enabled_rx;
1961 /* Get Tx and Rx going again, in case this channel was actively
1963 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
1964 ugeth_restart_tx(ugeth);
1965 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
1966 ugeth_restart_rx(ugeth);
1968 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
1974 static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
1976 struct ucc_fast_private *uccf;
1980 /* check if the UCC number is in range. */
1981 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1982 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
1986 /* Stop any transmissions */
1987 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
1988 ugeth_graceful_stop_tx(ugeth);
1990 /* Stop any receptions */
1991 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
1992 ugeth_graceful_stop_rx(ugeth);
1994 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
1999 static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
2002 ucc_fast_dump_regs(ugeth->uccf);
2008 #ifdef CONFIG_UGETH_FILTERING
2009 static int ugeth_ext_filtering_serialize_tad(struct ucc_geth_tad_params *
2011 struct qe_fltr_tad *qe_fltr_tad)
2015 /* Zero serialized TAD */
2016 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
2018 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
2019 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
2020 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2021 || (p_UccGethTadParams->vnontag_op !=
2022 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
2024 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
2025 if (p_UccGethTadParams->reject_frame)
2026 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
2028 (u16) (((u16) p_UccGethTadParams->
2029 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
2030 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
2032 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
2033 if (p_UccGethTadParams->vnontag_op ==
2034 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
2035 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
2036 qe_fltr_tad->serialized[1] |=
2037 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
2039 qe_fltr_tad->serialized[2] |=
2040 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
2042 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
2044 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
2049 static struct enet_addr_container_t
2050 *ugeth_82xx_filtering_get_match_addr_in_hash(struct ucc_geth_private *ugeth,
2051 struct enet_addr *p_enet_addr)
2053 struct enet_addr_container *enet_addr_cont;
2054 struct list_head *p_lh;
2059 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2060 p_lh = &ugeth->group_hash_q;
2061 p_counter = &(ugeth->numGroupAddrInHash);
2063 p_lh = &ugeth->ind_hash_q;
2064 p_counter = &(ugeth->numIndAddrInHash);
2072 for (i = 0; i < num; i++) {
2074 (struct enet_addr_container *)
2075 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2076 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
2077 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
2080 return enet_addr_cont; /* Found */
2082 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2087 static int ugeth_82xx_filtering_add_addr_in_hash(struct ucc_geth_private *ugeth,
2088 struct enet_addr *p_enet_addr)
2090 enum ucc_geth_enet_address_recognition_location location;
2091 struct enet_addr_container *enet_addr_cont;
2092 struct list_head *p_lh;
2097 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2098 p_lh = &ugeth->group_hash_q;
2099 limit = ugeth->ug_info->maxGroupAddrInHash;
2101 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
2102 p_counter = &(ugeth->numGroupAddrInHash);
2104 p_lh = &ugeth->ind_hash_q;
2105 limit = ugeth->ug_info->maxIndAddrInHash;
2107 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
2108 p_counter = &(ugeth->numIndAddrInHash);
2111 if ((enet_addr_cont =
2112 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
2113 list_add(p_lh, &enet_addr_cont->node); /* Put it back */
2116 if ((!p_lh) || (!(*p_counter < limit)))
2118 if (!(enet_addr_cont = get_enet_addr_container()))
2120 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2121 (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
2122 enet_addr_cont->location = location;
2123 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2126 hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
2130 static int ugeth_82xx_filtering_clear_addr_in_hash(struct ucc_geth_private *ugeth,
2131 struct enet_addr *p_enet_addr)
2133 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
2134 struct enet_addr_container *enet_addr_cont;
2135 struct ucc_fast_private *uccf;
2136 enum comm_dir comm_dir;
2138 struct list_head *p_lh;
2139 u32 *addr_h, *addr_l;
2145 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
2150 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
2153 /* It's been found and removed from the CQ. */
2154 /* Now destroy its container */
2155 put_enet_addr_container(enet_addr_cont);
2157 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2158 addr_h = &(p_82xx_addr_filt->gaddr_h);
2159 addr_l = &(p_82xx_addr_filt->gaddr_l);
2160 p_lh = &ugeth->group_hash_q;
2161 p_counter = &(ugeth->numGroupAddrInHash);
2163 addr_h = &(p_82xx_addr_filt->iaddr_h);
2164 addr_l = &(p_82xx_addr_filt->iaddr_l);
2165 p_lh = &ugeth->ind_hash_q;
2166 p_counter = &(ugeth->numIndAddrInHash);
2170 if (uccf->enabled_tx)
2171 comm_dir |= COMM_DIR_TX;
2172 if (uccf->enabled_rx)
2173 comm_dir |= COMM_DIR_RX;
2175 ugeth_disable(ugeth, comm_dir);
2177 /* Clear the hash table. */
2178 out_be32(addr_h, 0x00000000);
2179 out_be32(addr_l, 0x00000000);
2181 /* Add all remaining CQ elements back into hash */
2182 num = --(*p_counter);
2183 for (i = 0; i < num; i++) {
2185 (struct enet_addr_container *)
2186 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2187 hw_add_addr_in_hash(ugeth, enet_addr_cont->address);
2188 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2192 ugeth_enable(ugeth, comm_dir);
2196 #endif /* CONFIG_UGETH_FILTERING */
2198 static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
2203 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
2204 struct ucc_fast_private *uccf;
2205 enum comm_dir comm_dir;
2206 struct list_head *p_lh;
2208 u32 *addr_h, *addr_l;
2214 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->p_rx_glbl_pram->
2217 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
2218 addr_h = &(p_82xx_addr_filt->gaddr_h);
2219 addr_l = &(p_82xx_addr_filt->gaddr_l);
2220 p_lh = &ugeth->group_hash_q;
2221 p_counter = &(ugeth->numGroupAddrInHash);
2222 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
2223 addr_h = &(p_82xx_addr_filt->iaddr_h);
2224 addr_l = &(p_82xx_addr_filt->iaddr_l);
2225 p_lh = &ugeth->ind_hash_q;
2226 p_counter = &(ugeth->numIndAddrInHash);
2231 if (uccf->enabled_tx)
2232 comm_dir |= COMM_DIR_TX;
2233 if (uccf->enabled_rx)
2234 comm_dir |= COMM_DIR_RX;
2236 ugeth_disable(ugeth, comm_dir);
2238 /* Clear the hash table. */
2239 out_be32(addr_h, 0x00000000);
2240 out_be32(addr_l, 0x00000000);
2247 /* Delete all remaining CQ elements */
2248 for (i = 0; i < num; i++)
2249 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
2254 ugeth_enable(ugeth, comm_dir);
2259 #ifdef CONFIG_UGETH_FILTERING
2260 static int ugeth_82xx_filtering_add_addr_in_paddr(struct ucc_geth_private *ugeth,
2261 struct enet_addr *p_enet_addr,
2266 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
2268 ("%s: multicast address added to paddr will have no "
2269 "effect - is this what you wanted?",
2272 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2273 /* store address in our database */
2274 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2275 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
2276 /* put in hardware */
2277 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
2279 #endif /* CONFIG_UGETH_FILTERING */
2281 static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
2284 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
2285 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
2288 static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2297 ucc_fast_free(ugeth->uccf);
2299 if (ugeth->p_thread_data_tx) {
2300 qe_muram_free(ugeth->thread_dat_tx_offset);
2301 ugeth->p_thread_data_tx = NULL;
2303 if (ugeth->p_thread_data_rx) {
2304 qe_muram_free(ugeth->thread_dat_rx_offset);
2305 ugeth->p_thread_data_rx = NULL;
2307 if (ugeth->p_exf_glbl_param) {
2308 qe_muram_free(ugeth->exf_glbl_param_offset);
2309 ugeth->p_exf_glbl_param = NULL;
2311 if (ugeth->p_rx_glbl_pram) {
2312 qe_muram_free(ugeth->rx_glbl_pram_offset);
2313 ugeth->p_rx_glbl_pram = NULL;
2315 if (ugeth->p_tx_glbl_pram) {
2316 qe_muram_free(ugeth->tx_glbl_pram_offset);
2317 ugeth->p_tx_glbl_pram = NULL;
2319 if (ugeth->p_send_q_mem_reg) {
2320 qe_muram_free(ugeth->send_q_mem_reg_offset);
2321 ugeth->p_send_q_mem_reg = NULL;
2323 if (ugeth->p_scheduler) {
2324 qe_muram_free(ugeth->scheduler_offset);
2325 ugeth->p_scheduler = NULL;
2327 if (ugeth->p_tx_fw_statistics_pram) {
2328 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
2329 ugeth->p_tx_fw_statistics_pram = NULL;
2331 if (ugeth->p_rx_fw_statistics_pram) {
2332 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
2333 ugeth->p_rx_fw_statistics_pram = NULL;
2335 if (ugeth->p_rx_irq_coalescing_tbl) {
2336 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
2337 ugeth->p_rx_irq_coalescing_tbl = NULL;
2339 if (ugeth->p_rx_bd_qs_tbl) {
2340 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
2341 ugeth->p_rx_bd_qs_tbl = NULL;
2343 if (ugeth->p_init_enet_param_shadow) {
2344 return_init_enet_entries(ugeth,
2345 &(ugeth->p_init_enet_param_shadow->
2347 ENET_INIT_PARAM_MAX_ENTRIES_RX,
2348 ugeth->ug_info->riscRx, 1);
2349 return_init_enet_entries(ugeth,
2350 &(ugeth->p_init_enet_param_shadow->
2352 ENET_INIT_PARAM_MAX_ENTRIES_TX,
2353 ugeth->ug_info->riscTx, 0);
2354 kfree(ugeth->p_init_enet_param_shadow);
2355 ugeth->p_init_enet_param_shadow = NULL;
2357 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
2358 bd = ugeth->p_tx_bd_ring[i];
2359 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2360 if (ugeth->tx_skbuff[i][j]) {
2361 dma_unmap_single(NULL,
2362 ((qe_bd_t *)bd)->buf,
2363 (in_be32((u32 *)bd) &
2366 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
2367 ugeth->tx_skbuff[i][j] = NULL;
2371 kfree(ugeth->tx_skbuff[i]);
2373 if (ugeth->p_tx_bd_ring[i]) {
2374 if (ugeth->ug_info->uf_info.bd_mem_part ==
2376 kfree((void *)ugeth->tx_bd_ring_offset[i]);
2377 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2379 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
2380 ugeth->p_tx_bd_ring[i] = NULL;
2383 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
2384 if (ugeth->p_rx_bd_ring[i]) {
2385 /* Return existing data buffers in ring */
2386 bd = ugeth->p_rx_bd_ring[i];
2387 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2388 if (ugeth->rx_skbuff[i][j]) {
2389 dma_unmap_single(NULL,
2390 ((struct qe_bd *)bd)->buf,
2392 uf_info.max_rx_buf_length +
2393 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
2396 ugeth->rx_skbuff[i][j]);
2397 ugeth->rx_skbuff[i][j] = NULL;
2399 bd += sizeof(struct qe_bd);
2402 kfree(ugeth->rx_skbuff[i]);
2404 if (ugeth->ug_info->uf_info.bd_mem_part ==
2406 kfree((void *)ugeth->rx_bd_ring_offset[i]);
2407 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2409 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
2410 ugeth->p_rx_bd_ring[i] = NULL;
2413 while (!list_empty(&ugeth->group_hash_q))
2414 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2415 (dequeue(&ugeth->group_hash_q)));
2416 while (!list_empty(&ugeth->ind_hash_q))
2417 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2418 (dequeue(&ugeth->ind_hash_q)));
2422 static void ucc_geth_set_multi(struct net_device *dev)
2424 struct ucc_geth_private *ugeth;
2425 struct dev_mc_list *dmi;
2426 struct ucc_fast *uf_regs;
2427 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
2432 ugeth = netdev_priv(dev);
2434 uf_regs = ugeth->uccf->uf_regs;
2436 if (dev->flags & IFF_PROMISC) {
2438 uf_regs->upsmr |= UPSMR_PRO;
2442 uf_regs->upsmr &= ~UPSMR_PRO;
2445 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->
2446 p_rx_glbl_pram->addressfiltering;
2448 if (dev->flags & IFF_ALLMULTI) {
2449 /* Catch all multicast addresses, so set the
2450 * filter to all 1's.
2452 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
2453 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
2455 /* Clear filter and add the addresses in the list.
2457 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2458 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2462 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
2464 /* Only support group multicast for now.
2466 if (!(dmi->dmi_addr[0] & 1))
2469 /* The address in dmi_addr is LSB first,
2470 * and taddr is MSB first. We have to
2471 * copy bytes MSB first from dmi_addr.
2473 mcptr = (u8 *) dmi->dmi_addr + 5;
2474 tdptr = (u8 *) tempaddr;
2475 for (j = 0; j < 6; j++)
2476 *tdptr++ = *mcptr--;
2478 /* Ask CPM to run CRC and set bit in
2481 hw_add_addr_in_hash(ugeth, tempaddr);
2487 static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2489 struct ucc_geth *ug_regs = ugeth->ug_regs;
2492 ugeth_vdbg("%s: IN", __FUNCTION__);
2494 /* Disable the controller */
2495 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2497 /* Tell the kernel the link is down */
2498 ugeth->mii_info->link = 0;
2499 adjust_link(ugeth->dev);
2501 /* Mask all interrupts */
2502 out_be32(ugeth->uccf->p_ucce, 0x00000000);
2504 /* Clear all interrupts */
2505 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2507 /* Disable Rx and Tx */
2508 tempval = in_be32(&ug_regs->maccfg1);
2509 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2510 out_be32(&ug_regs->maccfg1, tempval);
2512 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2513 /* Clear any pending interrupts */
2514 mii_clear_phy_interrupt(ugeth->mii_info);
2516 /* Disable PHY Interrupts */
2517 mii_configure_phy_interrupt(ugeth->mii_info,
2518 MII_INTERRUPT_DISABLED);
2521 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
2523 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2524 free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev);
2526 del_timer_sync(&ugeth->phy_info_timer);
2529 ucc_geth_memclean(ugeth);
2532 static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2534 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt;
2535 struct ucc_geth_init_pram *p_init_enet_pram;
2536 struct ucc_fast_private *uccf;
2537 struct ucc_geth_info *ug_info;
2538 struct ucc_fast_info *uf_info;
2539 struct ucc_fast *uf_regs;
2540 struct ucc_geth *ug_regs;
2541 int ret_val = -EINVAL;
2542 u32 remoder = UCC_GETH_REMODER_INIT;
2543 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
2544 u32 ifstat, i, j, size, l2qt, l3qt, length;
2545 u16 temoder = UCC_GETH_TEMODER_INIT;
2547 u8 function_code = 0;
2549 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2551 ugeth_vdbg("%s: IN", __FUNCTION__);
2553 ug_info = ugeth->ug_info;
2554 uf_info = &ug_info->uf_info;
2556 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2557 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2558 ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
2563 for (i = 0; i < ug_info->numQueuesRx; i++) {
2564 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2565 (ug_info->bdRingLenRx[i] %
2566 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2568 ("%s: Rx BD ring length must be multiple of 4,"
2569 " no smaller than 8.", __FUNCTION__);
2575 for (i = 0; i < ug_info->numQueuesTx; i++) {
2576 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2578 ("%s: Tx BD ring length must be no smaller than 2.",
2585 if ((uf_info->max_rx_buf_length == 0) ||
2586 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2588 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2594 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2595 ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
2600 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2601 ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
2606 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2607 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2609 ("%s: VLAN priority table entry must not be"
2610 " larger than number of Rx queues.",
2617 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2618 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2620 ("%s: IP priority table entry must not be"
2621 " larger than number of Rx queues.",
2627 if (ug_info->cam && !ug_info->ecamptr) {
2628 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2633 if ((ug_info->numStationAddresses !=
2634 UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2635 && ug_info->rxExtendedFiltering) {
2636 ugeth_err("%s: Number of station addresses greater than 1 "
2637 "not allowed in extended parsing mode.",
2642 /* Generate uccm_mask for receive */
2643 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2644 for (i = 0; i < ug_info->numQueuesRx; i++)
2645 uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
2647 for (i = 0; i < ug_info->numQueuesTx; i++)
2648 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
2649 /* Initialize the general fast UCC block. */
2650 if (ucc_fast_init(uf_info, &uccf)) {
2651 ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
2652 ucc_geth_memclean(ugeth);
2657 switch (ug_info->numThreadsRx) {
2658 case UCC_GETH_NUM_OF_THREADS_1:
2659 numThreadsRxNumerical = 1;
2661 case UCC_GETH_NUM_OF_THREADS_2:
2662 numThreadsRxNumerical = 2;
2664 case UCC_GETH_NUM_OF_THREADS_4:
2665 numThreadsRxNumerical = 4;
2667 case UCC_GETH_NUM_OF_THREADS_6:
2668 numThreadsRxNumerical = 6;
2670 case UCC_GETH_NUM_OF_THREADS_8:
2671 numThreadsRxNumerical = 8;
2674 ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
2675 ucc_geth_memclean(ugeth);
2680 switch (ug_info->numThreadsTx) {
2681 case UCC_GETH_NUM_OF_THREADS_1:
2682 numThreadsTxNumerical = 1;
2684 case UCC_GETH_NUM_OF_THREADS_2:
2685 numThreadsTxNumerical = 2;
2687 case UCC_GETH_NUM_OF_THREADS_4:
2688 numThreadsTxNumerical = 4;
2690 case UCC_GETH_NUM_OF_THREADS_6:
2691 numThreadsTxNumerical = 6;
2693 case UCC_GETH_NUM_OF_THREADS_8:
2694 numThreadsTxNumerical = 8;
2697 ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
2698 ucc_geth_memclean(ugeth);
2703 /* Calculate rx_extended_features */
2704 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2705 ug_info->ipAddressAlignment ||
2706 (ug_info->numStationAddresses !=
2707 UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2709 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2710 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2711 || (ug_info->vlanOperationNonTagged !=
2712 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2714 uf_regs = uccf->uf_regs;
2715 ug_regs = (struct ucc_geth *) (uccf->uf_regs);
2716 ugeth->ug_regs = ug_regs;
2718 init_default_reg_vals(&uf_regs->upsmr,
2719 &ug_regs->maccfg1, &ug_regs->maccfg2);
2722 /* For more details see the hardware spec. */
2723 init_rx_parameters(ug_info->bro,
2724 ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2726 /* We're going to ignore other registers for now, */
2727 /* except as needed to get up and running */
2730 /* For more details see the hardware spec. */
2731 init_flow_control_params(ug_info->aufc,
2732 ug_info->receiveFlowControl,
2734 ug_info->pausePeriod,
2735 ug_info->extensionField,
2737 &ug_regs->uempr, &ug_regs->maccfg1);
2739 maccfg1 = in_be32(&ug_regs->maccfg1);
2740 maccfg1 |= MACCFG1_ENABLE_RX;
2741 maccfg1 |= MACCFG1_ENABLE_TX;
2742 out_be32(&ug_regs->maccfg1, maccfg1);
2745 /* For more details see the hardware spec. */
2746 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2747 ug_info->nonBackToBackIfgPart2,
2749 miminumInterFrameGapEnforcement,
2750 ug_info->backToBackInterFrameGap,
2753 ugeth_err("%s: IPGIFG initialization parameter too large.",
2755 ucc_geth_memclean(ugeth);
2760 /* For more details see the hardware spec. */
2761 ret_val = init_half_duplex_params(ug_info->altBeb,
2762 ug_info->backPressureNoBackoff,
2764 ug_info->excessDefer,
2765 ug_info->altBebTruncation,
2766 ug_info->maxRetransmission,
2767 ug_info->collisionWindow,
2770 ugeth_err("%s: Half Duplex initialization parameter too large.",
2772 ucc_geth_memclean(ugeth);
2777 /* For more details see the hardware spec. */
2778 /* Read only - resets upon read */
2779 ifstat = in_be32(&ug_regs->ifstat);
2782 /* For more details see the hardware spec. */
2783 out_be32(&ug_regs->uempr, 0);
2786 /* For more details see the hardware spec. */
2787 init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2788 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2789 0, &uf_regs->upsmr, &ug_regs->uescr);
2791 /* Allocate Tx bds */
2792 for (j = 0; j < ug_info->numQueuesTx; j++) {
2793 /* Allocate in multiple of
2794 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2795 according to spec */
2796 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
2797 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2798 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2799 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
2800 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2801 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2802 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2804 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2805 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2806 ugeth->tx_bd_ring_offset[j] =
2807 kmalloc((u32) (length + align), GFP_KERNEL);
2809 if (ugeth->tx_bd_ring_offset[j] != 0)
2810 ugeth->p_tx_bd_ring[j] =
2811 (void*)((ugeth->tx_bd_ring_offset[j] +
2812 align) & ~(align - 1));
2813 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2814 ugeth->tx_bd_ring_offset[j] =
2815 qe_muram_alloc(length,
2816 UCC_GETH_TX_BD_RING_ALIGNMENT);
2817 if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j]))
2818 ugeth->p_tx_bd_ring[j] =
2819 (u8 *) qe_muram_addr(ugeth->
2820 tx_bd_ring_offset[j]);
2822 if (!ugeth->p_tx_bd_ring[j]) {
2824 ("%s: Can not allocate memory for Tx bd rings.",
2826 ucc_geth_memclean(ugeth);
2829 /* Zero unused end of bd ring, according to spec */
2830 memset(ugeth->p_tx_bd_ring[j] +
2831 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd), 0,
2832 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
2835 /* Allocate Rx bds */
2836 for (j = 0; j < ug_info->numQueuesRx; j++) {
2837 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
2838 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2840 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2841 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2842 ugeth->rx_bd_ring_offset[j] =
2843 kmalloc((u32) (length + align), GFP_KERNEL);
2844 if (ugeth->rx_bd_ring_offset[j] != 0)
2845 ugeth->p_rx_bd_ring[j] =
2846 (void*)((ugeth->rx_bd_ring_offset[j] +
2847 align) & ~(align - 1));
2848 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2849 ugeth->rx_bd_ring_offset[j] =
2850 qe_muram_alloc(length,
2851 UCC_GETH_RX_BD_RING_ALIGNMENT);
2852 if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j]))
2853 ugeth->p_rx_bd_ring[j] =
2854 (u8 *) qe_muram_addr(ugeth->
2855 rx_bd_ring_offset[j]);
2857 if (!ugeth->p_rx_bd_ring[j]) {
2859 ("%s: Can not allocate memory for Rx bd rings.",
2861 ucc_geth_memclean(ugeth);
2867 for (j = 0; j < ug_info->numQueuesTx; j++) {
2868 /* Setup the skbuff rings */
2869 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2870 ugeth->ug_info->bdRingLenTx[j],
2873 if (ugeth->tx_skbuff[j] == NULL) {
2874 ugeth_err("%s: Could not allocate tx_skbuff",
2876 ucc_geth_memclean(ugeth);
2880 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2881 ugeth->tx_skbuff[j][i] = NULL;
2883 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2884 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2885 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2886 /* clear bd buffer */
2887 out_be32(&((struct qe_bd *)bd)->buf, 0);
2888 /* set bd status and length */
2889 out_be32((u32 *)bd, 0);
2890 bd += sizeof(struct qe_bd);
2892 bd -= sizeof(struct qe_bd);
2893 /* set bd status and length */
2894 out_be32((u32 *)bd, T_W); /* for last BD set Wrap bit */
2898 for (j = 0; j < ug_info->numQueuesRx; j++) {
2899 /* Setup the skbuff rings */
2900 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2901 ugeth->ug_info->bdRingLenRx[j],
2904 if (ugeth->rx_skbuff[j] == NULL) {
2905 ugeth_err("%s: Could not allocate rx_skbuff",
2907 ucc_geth_memclean(ugeth);
2911 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2912 ugeth->rx_skbuff[j][i] = NULL;
2914 ugeth->skb_currx[j] = 0;
2915 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2916 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2917 /* set bd status and length */
2918 out_be32((u32 *)bd, R_I);
2919 /* clear bd buffer */
2920 out_be32(&((struct qe_bd *)bd)->buf, 0);
2921 bd += sizeof(struct qe_bd);
2923 bd -= sizeof(struct qe_bd);
2924 /* set bd status and length */
2925 out_be32((u32 *)bd, R_W); /* for last BD set Wrap bit */
2931 /* Tx global PRAM */
2932 /* Allocate global tx parameter RAM page */
2933 ugeth->tx_glbl_pram_offset =
2934 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
2935 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
2936 if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) {
2938 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2940 ucc_geth_memclean(ugeth);
2943 ugeth->p_tx_glbl_pram =
2944 (struct ucc_geth_tx_global_pram *) qe_muram_addr(ugeth->
2945 tx_glbl_pram_offset);
2946 /* Zero out p_tx_glbl_pram */
2947 memset(ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
2949 /* Fill global PRAM */
2952 /* Size varies with number of Tx threads */
2953 ugeth->thread_dat_tx_offset =
2954 qe_muram_alloc(numThreadsTxNumerical *
2955 sizeof(struct ucc_geth_thread_data_tx) +
2956 32 * (numThreadsTxNumerical == 1),
2957 UCC_GETH_THREAD_DATA_ALIGNMENT);
2958 if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) {
2960 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2962 ucc_geth_memclean(ugeth);
2966 ugeth->p_thread_data_tx =
2967 (struct ucc_geth_thread_data_tx *) qe_muram_addr(ugeth->
2968 thread_dat_tx_offset);
2969 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
2972 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
2973 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
2974 ug_info->vtagtable[i]);
2977 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
2978 ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
2981 /* Size varies with number of Tx queues */
2982 ugeth->send_q_mem_reg_offset =
2983 qe_muram_alloc(ug_info->numQueuesTx *
2984 sizeof(struct ucc_geth_send_queue_qd),
2985 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
2986 if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) {
2988 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2990 ucc_geth_memclean(ugeth);
2994 ugeth->p_send_q_mem_reg =
2995 (struct ucc_geth_send_queue_mem_region *) qe_muram_addr(ugeth->
2996 send_q_mem_reg_offset);
2997 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
2999 /* Setup the table */
3000 /* Assume BD rings are already established */
3001 for (i = 0; i < ug_info->numQueuesTx; i++) {
3003 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
3004 1) * sizeof(struct qe_bd);
3005 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3006 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3007 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
3008 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3009 last_bd_completed_address,
3010 (u32) virt_to_phys(endOfRing));
3011 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3013 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3014 (u32) immrbar_virt_to_phys(ugeth->
3016 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3017 last_bd_completed_address,
3018 (u32) immrbar_virt_to_phys(endOfRing));
3022 /* schedulerbasepointer */
3024 if (ug_info->numQueuesTx > 1) {
3025 /* scheduler exists only if more than 1 tx queue */
3026 ugeth->scheduler_offset =
3027 qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
3028 UCC_GETH_SCHEDULER_ALIGNMENT);
3029 if (IS_MURAM_ERR(ugeth->scheduler_offset)) {
3031 ("%s: Can not allocate DPRAM memory for p_scheduler.",
3033 ucc_geth_memclean(ugeth);
3037 ugeth->p_scheduler =
3038 (struct ucc_geth_scheduler *) qe_muram_addr(ugeth->
3040 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
3041 ugeth->scheduler_offset);
3042 /* Zero out p_scheduler */
3043 memset(ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
3045 /* Set values in scheduler */
3046 out_be32(&ugeth->p_scheduler->mblinterval,
3047 ug_info->mblinterval);
3048 out_be16(&ugeth->p_scheduler->nortsrbytetime,
3049 ug_info->nortsrbytetime);
3050 ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
3051 ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
3052 ugeth->p_scheduler->txasap = ug_info->txasap;
3053 ugeth->p_scheduler->extrabw = ug_info->extrabw;
3054 for (i = 0; i < NUM_TX_QUEUES; i++)
3055 ugeth->p_scheduler->weightfactor[i] =
3056 ug_info->weightfactor[i];
3058 /* Set pointers to cpucount registers in scheduler */
3059 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
3060 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
3061 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
3062 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
3063 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
3064 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
3065 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
3066 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
3069 /* schedulerbasepointer */
3070 /* TxRMON_PTR (statistics) */
3072 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
3073 ugeth->tx_fw_statistics_pram_offset =
3074 qe_muram_alloc(sizeof
3075 (struct ucc_geth_tx_firmware_statistics_pram),
3076 UCC_GETH_TX_STATISTICS_ALIGNMENT);
3077 if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) {
3079 ("%s: Can not allocate DPRAM memory for"
3080 " p_tx_fw_statistics_pram.", __FUNCTION__);
3081 ucc_geth_memclean(ugeth);
3084 ugeth->p_tx_fw_statistics_pram =
3085 (struct ucc_geth_tx_firmware_statistics_pram *)
3086 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
3087 /* Zero out p_tx_fw_statistics_pram */
3088 memset(ugeth->p_tx_fw_statistics_pram,
3089 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
3093 /* Already has speed set */
3095 if (ug_info->numQueuesTx > 1)
3096 temoder |= TEMODER_SCHEDULER_ENABLE;
3097 if (ug_info->ipCheckSumGenerate)
3098 temoder |= TEMODER_IP_CHECKSUM_GENERATE;
3099 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
3100 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
3102 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
3104 /* Function code register value to be used later */
3105 function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL;
3106 /* Required for QE */
3108 /* function code register */
3109 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
3111 /* Rx global PRAM */
3112 /* Allocate global rx parameter RAM page */
3113 ugeth->rx_glbl_pram_offset =
3114 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
3115 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
3116 if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) {
3118 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
3120 ucc_geth_memclean(ugeth);
3123 ugeth->p_rx_glbl_pram =
3124 (struct ucc_geth_rx_global_pram *) qe_muram_addr(ugeth->
3125 rx_glbl_pram_offset);
3126 /* Zero out p_rx_glbl_pram */
3127 memset(ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
3129 /* Fill global PRAM */
3132 /* Size varies with number of Rx threads */
3133 ugeth->thread_dat_rx_offset =
3134 qe_muram_alloc(numThreadsRxNumerical *
3135 sizeof(struct ucc_geth_thread_data_rx),
3136 UCC_GETH_THREAD_DATA_ALIGNMENT);
3137 if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) {
3139 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
3141 ucc_geth_memclean(ugeth);
3145 ugeth->p_thread_data_rx =
3146 (struct ucc_geth_thread_data_rx *) qe_muram_addr(ugeth->
3147 thread_dat_rx_offset);
3148 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
3151 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
3153 /* rxrmonbaseptr (statistics) */
3155 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
3156 ugeth->rx_fw_statistics_pram_offset =
3157 qe_muram_alloc(sizeof
3158 (struct ucc_geth_rx_firmware_statistics_pram),
3159 UCC_GETH_RX_STATISTICS_ALIGNMENT);
3160 if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) {
3162 ("%s: Can not allocate DPRAM memory for"
3163 " p_rx_fw_statistics_pram.", __FUNCTION__);
3164 ucc_geth_memclean(ugeth);
3167 ugeth->p_rx_fw_statistics_pram =
3168 (struct ucc_geth_rx_firmware_statistics_pram *)
3169 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
3170 /* Zero out p_rx_fw_statistics_pram */
3171 memset(ugeth->p_rx_fw_statistics_pram, 0,
3172 sizeof(struct ucc_geth_rx_firmware_statistics_pram));
3175 /* intCoalescingPtr */
3177 /* Size varies with number of Rx queues */
3178 ugeth->rx_irq_coalescing_tbl_offset =
3179 qe_muram_alloc(ug_info->numQueuesRx *
3180 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry),
3181 UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
3182 if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) {
3184 ("%s: Can not allocate DPRAM memory for"
3185 " p_rx_irq_coalescing_tbl.", __FUNCTION__);
3186 ucc_geth_memclean(ugeth);
3190 ugeth->p_rx_irq_coalescing_tbl =
3191 (struct ucc_geth_rx_interrupt_coalescing_table *)
3192 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
3193 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
3194 ugeth->rx_irq_coalescing_tbl_offset);
3196 /* Fill interrupt coalescing table */
3197 for (i = 0; i < ug_info->numQueuesRx; i++) {
3198 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3199 interruptcoalescingmaxvalue,
3200 ug_info->interruptcoalescingmaxvalue[i]);
3201 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3202 interruptcoalescingcounter,
3203 ug_info->interruptcoalescingmaxvalue[i]);
3207 init_max_rx_buff_len(uf_info->max_rx_buf_length,
3208 &ugeth->p_rx_glbl_pram->mrblr);
3210 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
3212 init_min_frame_len(ug_info->minFrameLength,
3213 &ugeth->p_rx_glbl_pram->minflr,
3214 &ugeth->p_rx_glbl_pram->mrblr);
3216 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
3218 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
3222 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
3223 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
3224 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
3227 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
3229 for (i = 0; i < 8; i++)
3230 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
3231 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
3235 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
3238 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
3241 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
3244 /* Size varies with number of Rx queues */
3245 ugeth->rx_bd_qs_tbl_offset =
3246 qe_muram_alloc(ug_info->numQueuesRx *
3247 (sizeof(struct ucc_geth_rx_bd_queues_entry) +
3248 sizeof(struct ucc_geth_rx_prefetched_bds)),
3249 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
3250 if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) {
3252 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3254 ucc_geth_memclean(ugeth);
3258 ugeth->p_rx_bd_qs_tbl =
3259 (struct ucc_geth_rx_bd_queues_entry *) qe_muram_addr(ugeth->
3260 rx_bd_qs_tbl_offset);
3261 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3262 /* Zero out p_rx_bd_qs_tbl */
3263 memset(ugeth->p_rx_bd_qs_tbl,
3265 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
3266 sizeof(struct ucc_geth_rx_prefetched_bds)));
3268 /* Setup the table */
3269 /* Assume BD rings are already established */
3270 for (i = 0; i < ug_info->numQueuesRx; i++) {
3271 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3272 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3273 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
3274 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3276 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3277 (u32) immrbar_virt_to_phys(ugeth->
3280 /* rest of fields handled by QE */
3284 /* Already has speed set */
3286 if (ugeth->rx_extended_features)
3287 remoder |= REMODER_RX_EXTENDED_FEATURES;
3288 if (ug_info->rxExtendedFiltering)
3289 remoder |= REMODER_RX_EXTENDED_FILTERING;
3290 if (ug_info->dynamicMaxFrameLength)
3291 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
3292 if (ug_info->dynamicMinFrameLength)
3293 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
3295 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
3298 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
3299 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
3300 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
3301 if (ug_info->ipCheckSumCheck)
3302 remoder |= REMODER_IP_CHECKSUM_CHECK;
3303 if (ug_info->ipAddressAlignment)
3304 remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
3305 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
3307 /* Note that this function must be called */
3308 /* ONLY AFTER p_tx_fw_statistics_pram */
3309 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
3310 init_firmware_statistics_gathering_mode((ug_info->
3312 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
3313 (ug_info->statisticsMode &
3314 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
3315 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
3316 ugeth->tx_fw_statistics_pram_offset,
3317 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
3318 ugeth->rx_fw_statistics_pram_offset,
3319 &ugeth->p_tx_glbl_pram->temoder,
3320 &ugeth->p_rx_glbl_pram->remoder);
3322 /* function code register */
3323 ugeth->p_rx_glbl_pram->rstate = function_code;
3325 /* initialize extended filtering */
3326 if (ug_info->rxExtendedFiltering) {
3327 if (!ug_info->extendedFilteringChainPointer) {
3328 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3330 ucc_geth_memclean(ugeth);
3334 /* Allocate memory for extended filtering Mode Global
3336 ugeth->exf_glbl_param_offset =
3337 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
3338 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
3339 if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) {
3341 ("%s: Can not allocate DPRAM memory for"
3342 " p_exf_glbl_param.", __FUNCTION__);
3343 ucc_geth_memclean(ugeth);
3347 ugeth->p_exf_glbl_param =
3348 (struct ucc_geth_exf_global_pram *) qe_muram_addr(ugeth->
3349 exf_glbl_param_offset);
3350 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3351 ugeth->exf_glbl_param_offset);
3352 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
3353 (u32) ug_info->extendedFilteringChainPointer);
3355 } else { /* initialize 82xx style address filtering */
3357 /* Init individual address recognition registers to disabled */
3359 for (j = 0; j < NUM_OF_PADDRS; j++)
3360 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3362 /* Create CQs for hash tables */
3363 if (ug_info->maxGroupAddrInHash > 0) {
3364 INIT_LIST_HEAD(&ugeth->group_hash_q);
3366 if (ug_info->maxIndAddrInHash > 0) {
3367 INIT_LIST_HEAD(&ugeth->ind_hash_q);
3370 (struct ucc_geth_82xx_address_filtering_pram *) ugeth->
3371 p_rx_glbl_pram->addressfiltering;
3373 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3374 ENET_ADDR_TYPE_GROUP);
3375 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3376 ENET_ADDR_TYPE_INDIVIDUAL);
3380 * Initialize UCC at QE level
3383 command = QE_INIT_TX_RX;
3385 /* Allocate shadow InitEnet command parameter structure.
3386 * This is needed because after the InitEnet command is executed,
3387 * the structure in DPRAM is released, because DPRAM is a premium
3389 * This shadow structure keeps a copy of what was done so that the
3390 * allocated resources can be released when the channel is freed.
3392 if (!(ugeth->p_init_enet_param_shadow =
3393 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
3395 ("%s: Can not allocate memory for"
3396 " p_UccInitEnetParamShadows.", __FUNCTION__);
3397 ucc_geth_memclean(ugeth);
3400 /* Zero out *p_init_enet_param_shadow */
3401 memset((char *)ugeth->p_init_enet_param_shadow,
3402 0, sizeof(struct ucc_geth_init_pram));
3404 /* Fill shadow InitEnet command parameter structure */
3406 ugeth->p_init_enet_param_shadow->resinit1 =
3407 ENET_INIT_PARAM_MAGIC_RES_INIT1;
3408 ugeth->p_init_enet_param_shadow->resinit2 =
3409 ENET_INIT_PARAM_MAGIC_RES_INIT2;
3410 ugeth->p_init_enet_param_shadow->resinit3 =
3411 ENET_INIT_PARAM_MAGIC_RES_INIT3;
3412 ugeth->p_init_enet_param_shadow->resinit4 =
3413 ENET_INIT_PARAM_MAGIC_RES_INIT4;
3414 ugeth->p_init_enet_param_shadow->resinit5 =
3415 ENET_INIT_PARAM_MAGIC_RES_INIT5;
3416 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3417 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
3418 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3419 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
3421 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3422 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
3423 if ((ug_info->largestexternallookupkeysize !=
3424 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
3425 && (ug_info->largestexternallookupkeysize !=
3426 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3427 && (ug_info->largestexternallookupkeysize !=
3428 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3429 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3431 ucc_geth_memclean(ugeth);
3434 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
3435 ug_info->largestexternallookupkeysize;
3436 size = sizeof(struct ucc_geth_thread_rx_pram);
3437 if (ug_info->rxExtendedFiltering) {
3438 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
3439 if (ug_info->largestexternallookupkeysize ==
3440 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3442 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
3443 if (ug_info->largestexternallookupkeysize ==
3444 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
3446 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3449 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
3450 p_init_enet_param_shadow->rxthread[0]),
3451 (u8) (numThreadsRxNumerical + 1)
3452 /* Rx needs one extra for terminator */
3453 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3454 ug_info->riscRx, 1)) != 0) {
3455 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3457 ucc_geth_memclean(ugeth);
3461 ugeth->p_init_enet_param_shadow->txglobal =
3462 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3464 fill_init_enet_entries(ugeth,
3465 &(ugeth->p_init_enet_param_shadow->
3466 txthread[0]), numThreadsTxNumerical,
3467 sizeof(struct ucc_geth_thread_tx_pram),
3468 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3469 ug_info->riscTx, 0)) != 0) {
3470 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3472 ucc_geth_memclean(ugeth);
3476 /* Load Rx bds with buffers */
3477 for (i = 0; i < ug_info->numQueuesRx; i++) {
3478 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3479 ugeth_err("%s: Can not fill Rx bds with buffers.",
3481 ucc_geth_memclean(ugeth);
3486 /* Allocate InitEnet command parameter structure */
3487 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
3488 if (IS_MURAM_ERR(init_enet_pram_offset)) {
3490 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3492 ucc_geth_memclean(ugeth);
3496 (struct ucc_geth_init_pram *) qe_muram_addr(init_enet_pram_offset);
3498 /* Copy shadow InitEnet command parameter structure into PRAM */
3499 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
3500 p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
3501 p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
3502 p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
3503 out_be16(&p_init_enet_pram->resinit5,
3504 ugeth->p_init_enet_param_shadow->resinit5);
3505 p_init_enet_pram->largestexternallookupkeysize =
3506 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
3507 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3508 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3509 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3510 out_be32(&p_init_enet_pram->rxthread[i],
3511 ugeth->p_init_enet_param_shadow->rxthread[i]);
3512 out_be32(&p_init_enet_pram->txglobal,
3513 ugeth->p_init_enet_param_shadow->txglobal);
3514 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3515 out_be32(&p_init_enet_pram->txthread[i],
3516 ugeth->p_init_enet_param_shadow->txthread[i]);
3518 /* Issue QE command */
3520 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3521 qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
3522 init_enet_pram_offset);
3524 /* Free InitEnet command parameter */
3525 qe_muram_free(init_enet_pram_offset);
3530 /* returns a net_device_stats structure pointer */
3531 static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
3533 struct ucc_geth_private *ugeth = netdev_priv(dev);
3535 return &(ugeth->stats);
3538 /* ucc_geth_timeout gets called when a packet has not been
3539 * transmitted after a set amount of time.
3540 * For now, assume that clearing out all the structures, and
3541 * starting over will fix the problem. */
3542 static void ucc_geth_timeout(struct net_device *dev)
3544 struct ucc_geth_private *ugeth = netdev_priv(dev);
3546 ugeth_vdbg("%s: IN", __FUNCTION__);
3548 ugeth->stats.tx_errors++;
3550 ugeth_dump_regs(ugeth);
3552 if (dev->flags & IFF_UP) {
3553 ucc_geth_stop(ugeth);
3554 ucc_geth_startup(ugeth);
3557 netif_schedule(dev);
3560 /* This is called by the kernel when a frame is ready for transmission. */
3561 /* It is pointed to by the dev->hard_start_xmit function pointer */
3562 static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3564 struct ucc_geth_private *ugeth = netdev_priv(dev);
3565 u8 *bd; /* BD pointer */
3569 ugeth_vdbg("%s: IN", __FUNCTION__);
3571 spin_lock_irq(&ugeth->lock);
3573 ugeth->stats.tx_bytes += skb->len;
3575 /* Start from the next BD that should be filled */
3576 bd = ugeth->txBd[txQ];
3577 bd_status = in_be32((u32 *)bd);
3578 /* Save the skb pointer so we can free it later */
3579 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3581 /* Update the current skb pointer (wrapping if this was the last) */
3582 ugeth->skb_curtx[txQ] =
3583 (ugeth->skb_curtx[txQ] +
3584 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3586 /* set up the buffer descriptor */
3587 out_be32(&((struct qe_bd *)bd)->buf,
3588 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3590 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
3592 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3594 /* set bd status and length */
3595 out_be32((u32 *)bd, bd_status);
3597 dev->trans_start = jiffies;
3599 /* Move to next BD in the ring */
3600 if (!(bd_status & T_W))
3601 bd += sizeof(struct qe_bd);
3603 bd = ugeth->p_tx_bd_ring[txQ];
3605 /* If the next BD still needs to be cleaned up, then the bds
3606 are full. We need to tell the kernel to stop sending us stuff. */
3607 if (bd == ugeth->confBd[txQ]) {
3608 if (!netif_queue_stopped(dev))
3609 netif_stop_queue(dev);
3612 ugeth->txBd[txQ] = bd;
3614 if (ugeth->p_scheduler) {
3615 ugeth->cpucount[txQ]++;
3616 /* Indicate to QE that there are more Tx bds ready for
3618 /* This is done by writing a running counter of the bd
3619 count to the scheduler PRAM. */
3620 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3623 spin_unlock_irq(&ugeth->lock);
3628 static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
3630 struct sk_buff *skb;
3632 u16 length, howmany = 0;
3636 ugeth_vdbg("%s: IN", __FUNCTION__);
3638 spin_lock(&ugeth->lock);
3639 /* collect received buffers */
3640 bd = ugeth->rxBd[rxQ];
3642 bd_status = in_be32((u32 *)bd);
3644 /* while there are received buffers and BD is full (~R_E) */
3645 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3646 bdBuffer = (u8 *) in_be32(&((struct qe_bd *)bd)->buf);
3647 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3648 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3650 /* determine whether buffer is first, last, first and last
3651 (single buffer frame) or middle (not first and not last) */
3653 (!(bd_status & (R_F | R_L))) ||
3654 (bd_status & R_ERRORS_FATAL)) {
3655 ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
3656 __FUNCTION__, __LINE__, (u32) skb);
3658 dev_kfree_skb_any(skb);
3660 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3661 ugeth->stats.rx_dropped++;
3663 ugeth->stats.rx_packets++;
3666 /* Prep the skb for the packet */
3667 skb_put(skb, length);
3669 /* Tell the skb what kind of packet this is */
3670 skb->protocol = eth_type_trans(skb, ugeth->dev);
3672 ugeth->stats.rx_bytes += length;
3673 /* Send the packet up the stack */
3674 #ifdef CONFIG_UGETH_NAPI
3675 netif_receive_skb(skb);
3678 #endif /* CONFIG_UGETH_NAPI */
3681 ugeth->dev->last_rx = jiffies;
3683 skb = get_new_skb(ugeth, bd);
3685 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
3686 spin_unlock(&ugeth->lock);
3687 ugeth->stats.rx_dropped++;
3691 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3693 /* update to point at the next skb */
3694 ugeth->skb_currx[rxQ] =
3695 (ugeth->skb_currx[rxQ] +
3696 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3698 if (bd_status & R_W)
3699 bd = ugeth->p_rx_bd_ring[rxQ];
3701 bd += sizeof(struct qe_bd);
3703 bd_status = in_be32((u32 *)bd);
3706 ugeth->rxBd[rxQ] = bd;
3707 spin_unlock(&ugeth->lock);
3711 static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3713 /* Start from the next BD that should be filled */
3714 struct ucc_geth_private *ugeth = netdev_priv(dev);
3715 u8 *bd; /* BD pointer */
3718 bd = ugeth->confBd[txQ];
3719 bd_status = in_be32((u32 *)bd);
3721 /* Normal processing. */
3722 while ((bd_status & T_R) == 0) {
3723 /* BD contains already transmitted buffer. */
3724 /* Handle the transmitted buffer and release */
3725 /* the BD to be used with the current frame */
3727 if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3730 ugeth->stats.tx_packets++;
3732 /* Free the sk buffer associated with this TxBD */
3733 dev_kfree_skb_irq(ugeth->
3734 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3735 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3736 ugeth->skb_dirtytx[txQ] =
3737 (ugeth->skb_dirtytx[txQ] +
3738 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3740 /* We freed a buffer, so now we can restart transmission */
3741 if (netif_queue_stopped(dev))
3742 netif_wake_queue(dev);
3744 /* Advance the confirmation BD pointer */
3745 if (!(bd_status & T_W))
3746 bd += sizeof(struct qe_bd);
3748 bd = ugeth->p_tx_bd_ring[txQ];
3749 bd_status = in_be32((u32 *)bd);
3751 ugeth->confBd[txQ] = bd;
3755 #ifdef CONFIG_UGETH_NAPI
3756 static int ucc_geth_poll(struct net_device *dev, int *budget)
3758 struct ucc_geth_private *ugeth = netdev_priv(dev);
3760 int rx_work_limit = *budget;
3763 if (rx_work_limit > dev->quota)
3764 rx_work_limit = dev->quota;
3766 howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit);
3768 dev->quota -= howmany;
3769 rx_work_limit -= howmany;
3772 if (rx_work_limit >= 0)
3773 netif_rx_complete(dev);
3775 return (rx_work_limit < 0) ? 1 : 0;
3777 #endif /* CONFIG_UGETH_NAPI */
3779 static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3781 struct net_device *dev = (struct net_device *)info;
3782 struct ucc_geth_private *ugeth = netdev_priv(dev);
3783 struct ucc_fast_private *uccf;
3784 struct ucc_geth_info *ug_info;
3785 register u32 ucce = 0;
3786 register u32 bit_mask = UCCE_RXBF_SINGLE_MASK;
3787 register u32 tx_mask = UCCE_TXBF_SINGLE_MASK;
3790 ugeth_vdbg("%s: IN", __FUNCTION__);
3796 ug_info = ugeth->ug_info;
3799 ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm));
3801 /* clear event bits for next time */
3802 /* Side effect here is to mask ucce variable
3803 for future processing below. */
3804 out_be32(uccf->p_ucce, ucce); /* Clear with ones,
3805 but only bits in UCCM */
3807 /* We ignore Tx interrupts because Tx confirmation is
3808 done inside Tx routine */
3810 for (i = 0; i < ug_info->numQueuesRx; i++) {
3811 if (ucce & bit_mask)
3812 ucc_geth_rx(ugeth, i,
3813 (int)ugeth->ug_info->
3819 for (i = 0; i < ug_info->numQueuesTx; i++) {
3821 ucc_geth_tx(dev, i);
3827 if (ucce & UCCE_BSY) {
3828 ugeth_vdbg("Got BUSY irq!!!!");
3829 ugeth->stats.rx_errors++;
3832 if (ucce & UCCE_OTHER) {
3833 ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!",
3835 ugeth->stats.rx_errors++;
3844 static irqreturn_t phy_interrupt(int irq, void *dev_id)
3846 struct net_device *dev = (struct net_device *)dev_id;
3847 struct ucc_geth_private *ugeth = netdev_priv(dev);
3849 ugeth_vdbg("%s: IN", __FUNCTION__);
3851 /* Clear the interrupt */
3852 mii_clear_phy_interrupt(ugeth->mii_info);
3854 /* Disable PHY interrupts */
3855 mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED);
3857 /* Schedule the phy change */
3858 schedule_work(&ugeth->tq);
3863 /* Scheduled by the phy_interrupt/timer to handle PHY changes */
3864 static void ugeth_phy_change(struct work_struct *work)
3866 struct ucc_geth_private *ugeth =
3867 container_of(work, struct ucc_geth_private, tq);
3868 struct net_device *dev = ugeth->dev;
3869 struct ucc_geth *ug_regs;
3872 ugeth_vdbg("%s: IN", __FUNCTION__);
3874 ug_regs = ugeth->ug_regs;
3876 /* Delay to give the PHY a chance to change the
3880 /* Update the link, speed, duplex */
3881 result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info);
3883 /* Adjust the known status as long as the link
3884 * isn't still coming up */
3885 if ((0 == result) || (ugeth->mii_info->link == 0))
3888 /* Reenable interrupts, if needed */
3889 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR)
3890 mii_configure_phy_interrupt(ugeth->mii_info,
3891 MII_INTERRUPT_ENABLED);
3894 /* Called every so often on systems that don't interrupt
3895 * the core for PHY changes */
3896 static void ugeth_phy_timer(unsigned long data)
3898 struct net_device *dev = (struct net_device *)data;
3899 struct ucc_geth_private *ugeth = netdev_priv(dev);
3901 schedule_work(&ugeth->tq);
3903 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
3906 /* Keep trying aneg for some time
3907 * If, after GFAR_AN_TIMEOUT seconds, it has not
3908 * finished, we switch to forced.
3909 * Either way, once the process has completed, we either
3910 * request the interrupt, or switch the timer over to
3911 * using ugeth_phy_timer to check status */
3912 static void ugeth_phy_startup_timer(unsigned long data)
3914 struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
3915 struct ucc_geth_private *ugeth = netdev_priv(mii_info->dev);
3916 static int secondary = UGETH_AN_TIMEOUT;
3919 /* Configure the Auto-negotiation */
3920 result = mii_info->phyinfo->config_aneg(mii_info);
3922 /* If autonegotiation failed to start, and
3923 * we haven't timed out, reset the timer, and return */
3924 if (result && secondary--) {
3925 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
3927 } else if (result) {
3928 /* Couldn't start autonegotiation.
3929 * Try switching to forced */
3930 mii_info->autoneg = 0;
3931 result = mii_info->phyinfo->config_aneg(mii_info);
3933 /* Forcing failed! Give up */
3935 ugeth_err("%s: Forcing failed!", mii_info->dev->name);
3940 /* Kill the timer so it can be restarted */
3941 del_timer_sync(&ugeth->phy_info_timer);
3943 /* Grab the PHY interrupt, if necessary/possible */
3944 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
3945 if (request_irq(ugeth->ug_info->phy_interrupt,
3946 phy_interrupt, IRQF_SHARED,
3947 "phy_interrupt", mii_info->dev) < 0) {
3948 ugeth_err("%s: Can't get IRQ %d (PHY)",
3949 mii_info->dev->name,
3950 ugeth->ug_info->phy_interrupt);
3952 mii_configure_phy_interrupt(ugeth->mii_info,
3953 MII_INTERRUPT_ENABLED);
3958 /* Start the timer again, this time in order to
3959 * handle a change in status */
3960 init_timer(&ugeth->phy_info_timer);
3961 ugeth->phy_info_timer.function = &ugeth_phy_timer;
3962 ugeth->phy_info_timer.data = (unsigned long)mii_info->dev;
3963 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
3966 /* Called when something needs to use the ethernet device */
3967 /* Returns 0 for success. */
3968 static int ucc_geth_open(struct net_device *dev)
3970 struct ucc_geth_private *ugeth = netdev_priv(dev);
3973 ugeth_vdbg("%s: IN", __FUNCTION__);
3975 /* Test station address */
3976 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
3977 ugeth_err("%s: Multicast address used for station address"
3978 " - is this what you wanted?", __FUNCTION__);
3982 err = ucc_geth_startup(ugeth);
3984 ugeth_err("%s: Cannot configure net device, aborting.",
3989 err = adjust_enet_interface(ugeth);
3991 ugeth_err("%s: Cannot configure net device, aborting.",
3996 /* Set MACSTNADDR1, MACSTNADDR2 */
3997 /* For more details see the hardware spec. */
3998 init_mac_station_addr_regs(dev->dev_addr[0],
4004 &ugeth->ug_regs->macstnaddr1,
4005 &ugeth->ug_regs->macstnaddr2);
4007 err = init_phy(dev);
4009 ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name);
4012 #ifndef CONFIG_UGETH_NAPI
4014 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
4017 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
4019 ucc_geth_stop(ugeth);
4022 #endif /* CONFIG_UGETH_NAPI */
4024 /* Set up the PHY change work queue */
4025 INIT_WORK(&ugeth->tq, ugeth_phy_change);
4027 init_timer(&ugeth->phy_info_timer);
4028 ugeth->phy_info_timer.function = &ugeth_phy_startup_timer;
4029 ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info;
4030 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
4032 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
4034 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
4035 ucc_geth_stop(ugeth);
4039 netif_start_queue(dev);
4044 /* Stops the kernel queue, and halts the controller */
4045 static int ucc_geth_close(struct net_device *dev)
4047 struct ucc_geth_private *ugeth = netdev_priv(dev);
4049 ugeth_vdbg("%s: IN", __FUNCTION__);
4051 ucc_geth_stop(ugeth);
4053 /* Shutdown the PHY */
4054 if (ugeth->mii_info->phyinfo->close)
4055 ugeth->mii_info->phyinfo->close(ugeth->mii_info);
4057 kfree(ugeth->mii_info);
4059 netif_stop_queue(dev);
4064 const struct ethtool_ops ucc_geth_ethtool_ops = { };
4066 static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
4068 struct device *device = &ofdev->dev;
4069 struct device_node *np = ofdev->node;
4070 struct net_device *dev = NULL;
4071 struct ucc_geth_private *ugeth = NULL;
4072 struct ucc_geth_info *ug_info;
4073 struct resource res;
4074 struct device_node *phy;
4075 int err, ucc_num, phy_interface;
4076 static int mii_mng_configured = 0;
4078 const unsigned int *prop;
4079 const void *mac_addr;
4081 ugeth_vdbg("%s: IN", __FUNCTION__);
4083 prop = get_property(np, "device-id", NULL);
4084 ucc_num = *prop - 1;
4085 if ((ucc_num < 0) || (ucc_num > 7))
4088 ug_info = &ugeth_info[ucc_num];
4089 ug_info->uf_info.ucc_num = ucc_num;
4090 prop = get_property(np, "rx-clock", NULL);
4091 ug_info->uf_info.rx_clock = *prop;
4092 prop = get_property(np, "tx-clock", NULL);
4093 ug_info->uf_info.tx_clock = *prop;
4094 err = of_address_to_resource(np, 0, &res);
4098 ug_info->uf_info.regs = res.start;
4099 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
4101 ph = get_property(np, "phy-handle", NULL);
4102 phy = of_find_node_by_phandle(*ph);
4107 prop = get_property(phy, "reg", NULL);
4108 ug_info->phy_address = *prop;
4109 prop = get_property(phy, "interface", NULL);
4110 ug_info->enet_interface = *prop;
4111 ug_info->phy_interrupt = irq_of_parse_and_map(phy, 0);
4112 ug_info->board_flags = (ug_info->phy_interrupt == NO_IRQ)?
4113 0:FSL_UGETH_BRD_HAS_PHY_INTR;
4115 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
4116 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
4117 ug_info->uf_info.irq);
4119 if (ug_info == NULL) {
4120 ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
4125 /* FIXME: Work around for early chip rev. */
4126 /* There's a bug in initial chip rev(s) in the RGMII ac */
4128 /* The following compensates by writing to the reserved */
4129 /* QE Port Output Hold Registers (CPOH1?). */
4130 prop = get_property(phy, "interface", NULL);
4131 phy_interface = *prop;
4132 if ((phy_interface == ENET_1000_RGMII) ||
4133 (phy_interface == ENET_100_RGMII) ||
4134 (phy_interface == ENET_10_RGMII)) {
4135 struct device_node *soc;
4136 phys_addr_t immrbase = -1;
4140 soc = of_find_node_by_type(NULL, "soc");
4143 const void *prop = get_property(soc, "reg", &size);
4144 immrbase = of_translate_address(soc, prop);
4148 tmp_reg = (u32 *) ioremap(immrbase + 0x14A8, 0x4);
4149 tmp_val = in_be32(tmp_reg);
4151 out_be32(tmp_reg, tmp_val | 0x00003000);
4152 else if (ucc_num == 2)
4153 out_be32(tmp_reg, tmp_val | 0x0c000000);
4157 if (!mii_mng_configured) {
4158 ucc_set_qe_mux_mii_mng(ucc_num);
4159 mii_mng_configured = 1;
4162 /* Create an ethernet device instance */
4163 dev = alloc_etherdev(sizeof(*ugeth));
4168 ugeth = netdev_priv(dev);
4169 spin_lock_init(&ugeth->lock);
4171 dev_set_drvdata(device, dev);
4173 /* Set the dev->base_addr to the gfar reg region */
4174 dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
4176 SET_MODULE_OWNER(dev);
4177 SET_NETDEV_DEV(dev, device);
4179 /* Fill in the dev structure */
4180 dev->open = ucc_geth_open;
4181 dev->hard_start_xmit = ucc_geth_start_xmit;
4182 dev->tx_timeout = ucc_geth_timeout;
4183 dev->watchdog_timeo = TX_TIMEOUT;
4184 #ifdef CONFIG_UGETH_NAPI
4185 dev->poll = ucc_geth_poll;
4186 dev->weight = UCC_GETH_DEV_WEIGHT;
4187 #endif /* CONFIG_UGETH_NAPI */
4188 dev->stop = ucc_geth_close;
4189 dev->get_stats = ucc_geth_get_stats;
4190 // dev->change_mtu = ucc_geth_change_mtu;
4192 dev->set_multicast_list = ucc_geth_set_multi;
4193 dev->ethtool_ops = &ucc_geth_ethtool_ops;
4195 err = register_netdev(dev);
4197 ugeth_err("%s: Cannot register net device, aborting.",
4203 ugeth->ug_info = ug_info;
4206 mac_addr = of_get_mac_address(np);
4208 memcpy(dev->dev_addr, mac_addr, 6);
4213 static int ucc_geth_remove(struct of_device* ofdev)
4215 struct device *device = &ofdev->dev;
4216 struct net_device *dev = dev_get_drvdata(device);
4217 struct ucc_geth_private *ugeth = netdev_priv(dev);
4219 dev_set_drvdata(device, NULL);
4220 ucc_geth_memclean(ugeth);
4226 static struct of_device_id ucc_geth_match[] = {
4229 .compatible = "ucc_geth",
4234 MODULE_DEVICE_TABLE(of, ucc_geth_match);
4236 static struct of_platform_driver ucc_geth_driver = {
4238 .match_table = ucc_geth_match,
4239 .probe = ucc_geth_probe,
4240 .remove = ucc_geth_remove,
4243 static int __init ucc_geth_init(void)
4247 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
4248 for (i = 0; i < 8; i++)
4249 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
4250 sizeof(ugeth_primary_info));
4252 return of_register_platform_driver(&ucc_geth_driver);
4255 static void __exit ucc_geth_exit(void)
4257 of_unregister_platform_driver(&ucc_geth_driver);
4260 module_init(ucc_geth_init);
4261 module_exit(ucc_geth_exit);
4263 MODULE_AUTHOR("Freescale Semiconductor, Inc");
4264 MODULE_DESCRIPTION(DRV_DESC);
4265 MODULE_LICENSE("GPL");