2 * Copyright (C) 2006-2007 Freescale Semicondutor, Inc. All rights reserved.
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 * Li Yang <leoli@freescale.com>
8 * QE UCC Gigabit Ethernet Driver
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/stddef.h>
20 #include <linux/interrupt.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/skbuff.h>
24 #include <linux/spinlock.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/fsl_devices.h>
28 #include <linux/mii.h>
29 #include <linux/phy.h>
30 #include <linux/workqueue.h>
31 #include <linux/of_platform.h>
33 #include <asm/uaccess.h>
36 #include <asm/immap_qe.h>
39 #include <asm/ucc_fast.h>
42 #include "ucc_geth_mii.h"
46 #define ugeth_printk(level, format, arg...) \
47 printk(level format "\n", ## arg)
49 #define ugeth_dbg(format, arg...) \
50 ugeth_printk(KERN_DEBUG , format , ## arg)
51 #define ugeth_err(format, arg...) \
52 ugeth_printk(KERN_ERR , format , ## arg)
53 #define ugeth_info(format, arg...) \
54 ugeth_printk(KERN_INFO , format , ## arg)
55 #define ugeth_warn(format, arg...) \
56 ugeth_printk(KERN_WARNING , format , ## arg)
58 #ifdef UGETH_VERBOSE_DEBUG
59 #define ugeth_vdbg ugeth_dbg
61 #define ugeth_vdbg(fmt, args...) do { } while (0)
62 #endif /* UGETH_VERBOSE_DEBUG */
63 #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1
66 static DEFINE_SPINLOCK(ugeth_lock);
72 module_param_named(debug, debug.msg_enable, int, 0);
73 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
75 static struct ucc_geth_info ugeth_primary_info = {
77 .bd_mem_part = MEM_PART_SYSTEM,
78 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
79 .max_rx_buf_length = 1536,
80 /* adjusted at startup if max-speed 1000 */
81 .urfs = UCC_GETH_URFS_INIT,
82 .urfet = UCC_GETH_URFET_INIT,
83 .urfset = UCC_GETH_URFSET_INIT,
84 .utfs = UCC_GETH_UTFS_INIT,
85 .utfet = UCC_GETH_UTFET_INIT,
86 .utftt = UCC_GETH_UTFTT_INIT,
88 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
89 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
90 .tenc = UCC_FAST_TX_ENCODING_NRZ,
91 .renc = UCC_FAST_RX_ENCODING_NRZ,
92 .tcrc = UCC_FAST_16_BIT_CRC,
93 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
97 .extendedFilteringChainPointer = ((uint32_t) NULL),
98 .typeorlen = 3072 /*1536 */ ,
99 .nonBackToBackIfgPart1 = 0x40,
100 .nonBackToBackIfgPart2 = 0x60,
101 .miminumInterFrameGapEnforcement = 0x50,
102 .backToBackInterFrameGap = 0x60,
106 .strictpriorityq = 0xff,
107 .altBebTruncation = 0xa,
109 .maxRetransmission = 0xf,
110 .collisionWindow = 0x37,
111 .receiveFlowControl = 1,
112 .transmitFlowControl = 1,
113 .maxGroupAddrInHash = 4,
114 .maxIndAddrInHash = 4,
116 .maxFrameLength = 1518,
117 .minFrameLength = 64,
121 .ecamptr = ((uint32_t) NULL),
122 .eventRegMask = UCCE_OTHER,
123 .pausePeriod = 0xf000,
124 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
145 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
146 .largestexternallookupkeysize =
147 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
148 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE |
149 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX |
150 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX,
151 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
152 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
153 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
154 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
155 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
156 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1,
157 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1,
158 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
159 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
162 static struct ucc_geth_info ugeth_info[8];
165 static void mem_disp(u8 *addr, int size)
168 int size16Aling = (size >> 4) << 4;
169 int size4Aling = (size >> 2) << 2;
174 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
175 printk("0x%08x: %08x %08x %08x %08x\r\n",
179 *((u32 *) (i + 8)), *((u32 *) (i + 12)));
181 printk("0x%08x: ", (u32) i);
182 for (; (u32) i < (u32) addr + size4Aling; i += 4)
183 printk("%08x ", *((u32 *) (i)));
184 for (; (u32) i < (u32) addr + size; i++)
185 printk("%02x", *((u8 *) (i)));
191 static struct list_head *dequeue(struct list_head *lh)
195 spin_lock_irqsave(&ugeth_lock, flags);
196 if (!list_empty(lh)) {
197 struct list_head *node = lh->next;
199 spin_unlock_irqrestore(&ugeth_lock, flags);
202 spin_unlock_irqrestore(&ugeth_lock, flags);
207 static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
210 struct sk_buff *skb = NULL;
212 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
213 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
218 /* We need the data buffer to be aligned properly. We will reserve
219 * as many bytes as needed to align the data properly
222 UCC_GETH_RX_DATA_BUF_ALIGNMENT -
223 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
226 skb->dev = ugeth->dev;
228 out_be32(&((struct qe_bd __iomem *)bd)->buf,
229 dma_map_single(&ugeth->dev->dev,
231 ugeth->ug_info->uf_info.max_rx_buf_length +
232 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
235 out_be32((u32 __iomem *)bd,
236 (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W)));
241 static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
248 bd = ugeth->p_rx_bd_ring[rxQ];
252 bd_status = in_be32((u32 __iomem *)bd);
253 skb = get_new_skb(ugeth, bd);
255 if (!skb) /* If can not allocate data buffer,
256 abort. Cleanup will be elsewhere */
259 ugeth->rx_skbuff[rxQ][i] = skb;
261 /* advance the BD pointer */
262 bd += sizeof(struct qe_bd);
264 } while (!(bd_status & R_W));
269 static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
273 u32 thread_alignment,
274 enum qe_risc_allocation risc,
275 int skip_page_for_first_entry)
277 u32 init_enet_offset;
281 for (i = 0; i < num_entries; i++) {
282 if ((snum = qe_get_snum()) < 0) {
283 if (netif_msg_ifup(ugeth))
284 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
287 if ((i == 0) && skip_page_for_first_entry)
288 /* First entry of Rx does not have page */
289 init_enet_offset = 0;
292 qe_muram_alloc(thread_size, thread_alignment);
293 if (IS_ERR_VALUE(init_enet_offset)) {
294 if (netif_msg_ifup(ugeth))
295 ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory.");
296 qe_put_snum((u8) snum);
301 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
308 static int return_init_enet_entries(struct ucc_geth_private *ugeth,
311 enum qe_risc_allocation risc,
312 int skip_page_for_first_entry)
314 u32 init_enet_offset;
318 for (i = 0; i < num_entries; i++) {
321 /* Check that this entry was actually valid --
322 needed in case failed in allocations */
323 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
325 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
326 ENET_INIT_PARAM_SNUM_SHIFT;
327 qe_put_snum((u8) snum);
328 if (!((i == 0) && skip_page_for_first_entry)) {
329 /* First entry of Rx does not have page */
331 (val & ENET_INIT_PARAM_PTR_MASK);
332 qe_muram_free(init_enet_offset);
342 static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
343 u32 __iomem *p_start,
346 enum qe_risc_allocation risc,
347 int skip_page_for_first_entry)
349 u32 init_enet_offset;
353 for (i = 0; i < num_entries; i++) {
354 u32 val = in_be32(p_start);
356 /* Check that this entry was actually valid --
357 needed in case failed in allocations */
358 if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) {
360 (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >>
361 ENET_INIT_PARAM_SNUM_SHIFT;
362 qe_put_snum((u8) snum);
363 if (!((i == 0) && skip_page_for_first_entry)) {
364 /* First entry of Rx does not have page */
367 ENET_INIT_PARAM_PTR_MASK);
368 ugeth_info("Init enet entry %d:", i);
369 ugeth_info("Base address: 0x%08x",
371 qe_muram_addr(init_enet_offset));
372 mem_disp(qe_muram_addr(init_enet_offset),
383 static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont)
385 kfree(enet_addr_cont);
388 static void set_mac_addr(__be16 __iomem *reg, u8 *mac)
390 out_be16(®[0], ((u16)mac[5] << 8) | mac[4]);
391 out_be16(®[1], ((u16)mac[3] << 8) | mac[2]);
392 out_be16(®[2], ((u16)mac[1] << 8) | mac[0]);
395 static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
397 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
399 if (!(paddr_num < NUM_OF_PADDRS)) {
400 ugeth_warn("%s: Illagel paddr_num.", __func__);
405 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
408 /* Writing address ff.ff.ff.ff.ff.ff disables address
409 recognition for this register */
410 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
411 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
412 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
417 static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
420 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
424 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
428 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
430 /* Ethernet frames are defined in Little Endian mode,
431 therefor to insert */
432 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
434 set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr);
436 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
437 QE_CR_PROTOCOL_ETHERNET, 0);
440 #ifdef CONFIG_UGETH_MAGIC_PACKET
441 static void magic_packet_detection_enable(struct ucc_geth_private *ugeth)
443 struct ucc_fast_private *uccf;
444 struct ucc_geth __iomem *ug_regs;
447 ug_regs = ugeth->ug_regs;
449 /* Enable interrupts for magic packet detection */
450 setbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD);
452 /* Enable magic packet detection */
453 setbits32(&ug_regs->maccfg2, MACCFG2_MPE);
456 static void magic_packet_detection_disable(struct ucc_geth_private *ugeth)
458 struct ucc_fast_private *uccf;
459 struct ucc_geth __iomem *ug_regs;
462 ug_regs = ugeth->ug_regs;
464 /* Disable interrupts for magic packet detection */
465 clrbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD);
467 /* Disable magic packet detection */
468 clrbits32(&ug_regs->maccfg2, MACCFG2_MPE);
470 #endif /* MAGIC_PACKET */
472 static inline int compare_addr(u8 **addr1, u8 **addr2)
474 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
478 static void get_statistics(struct ucc_geth_private *ugeth,
479 struct ucc_geth_tx_firmware_statistics *
480 tx_firmware_statistics,
481 struct ucc_geth_rx_firmware_statistics *
482 rx_firmware_statistics,
483 struct ucc_geth_hardware_statistics *hardware_statistics)
485 struct ucc_fast __iomem *uf_regs;
486 struct ucc_geth __iomem *ug_regs;
487 struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram;
488 struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram;
490 ug_regs = ugeth->ug_regs;
491 uf_regs = (struct ucc_fast __iomem *) ug_regs;
492 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
493 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
495 /* Tx firmware only if user handed pointer and driver actually
496 gathers Tx firmware statistics */
497 if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
498 tx_firmware_statistics->sicoltx =
499 in_be32(&p_tx_fw_statistics_pram->sicoltx);
500 tx_firmware_statistics->mulcoltx =
501 in_be32(&p_tx_fw_statistics_pram->mulcoltx);
502 tx_firmware_statistics->latecoltxfr =
503 in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
504 tx_firmware_statistics->frabortduecol =
505 in_be32(&p_tx_fw_statistics_pram->frabortduecol);
506 tx_firmware_statistics->frlostinmactxer =
507 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
508 tx_firmware_statistics->carriersenseertx =
509 in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
510 tx_firmware_statistics->frtxok =
511 in_be32(&p_tx_fw_statistics_pram->frtxok);
512 tx_firmware_statistics->txfrexcessivedefer =
513 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
514 tx_firmware_statistics->txpkts256 =
515 in_be32(&p_tx_fw_statistics_pram->txpkts256);
516 tx_firmware_statistics->txpkts512 =
517 in_be32(&p_tx_fw_statistics_pram->txpkts512);
518 tx_firmware_statistics->txpkts1024 =
519 in_be32(&p_tx_fw_statistics_pram->txpkts1024);
520 tx_firmware_statistics->txpktsjumbo =
521 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
524 /* Rx firmware only if user handed pointer and driver actually
525 * gathers Rx firmware statistics */
526 if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
528 rx_firmware_statistics->frrxfcser =
529 in_be32(&p_rx_fw_statistics_pram->frrxfcser);
530 rx_firmware_statistics->fraligner =
531 in_be32(&p_rx_fw_statistics_pram->fraligner);
532 rx_firmware_statistics->inrangelenrxer =
533 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
534 rx_firmware_statistics->outrangelenrxer =
535 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
536 rx_firmware_statistics->frtoolong =
537 in_be32(&p_rx_fw_statistics_pram->frtoolong);
538 rx_firmware_statistics->runt =
539 in_be32(&p_rx_fw_statistics_pram->runt);
540 rx_firmware_statistics->verylongevent =
541 in_be32(&p_rx_fw_statistics_pram->verylongevent);
542 rx_firmware_statistics->symbolerror =
543 in_be32(&p_rx_fw_statistics_pram->symbolerror);
544 rx_firmware_statistics->dropbsy =
545 in_be32(&p_rx_fw_statistics_pram->dropbsy);
546 for (i = 0; i < 0x8; i++)
547 rx_firmware_statistics->res0[i] =
548 p_rx_fw_statistics_pram->res0[i];
549 rx_firmware_statistics->mismatchdrop =
550 in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
551 rx_firmware_statistics->underpkts =
552 in_be32(&p_rx_fw_statistics_pram->underpkts);
553 rx_firmware_statistics->pkts256 =
554 in_be32(&p_rx_fw_statistics_pram->pkts256);
555 rx_firmware_statistics->pkts512 =
556 in_be32(&p_rx_fw_statistics_pram->pkts512);
557 rx_firmware_statistics->pkts1024 =
558 in_be32(&p_rx_fw_statistics_pram->pkts1024);
559 rx_firmware_statistics->pktsjumbo =
560 in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
561 rx_firmware_statistics->frlossinmacer =
562 in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
563 rx_firmware_statistics->pausefr =
564 in_be32(&p_rx_fw_statistics_pram->pausefr);
565 for (i = 0; i < 0x4; i++)
566 rx_firmware_statistics->res1[i] =
567 p_rx_fw_statistics_pram->res1[i];
568 rx_firmware_statistics->removevlan =
569 in_be32(&p_rx_fw_statistics_pram->removevlan);
570 rx_firmware_statistics->replacevlan =
571 in_be32(&p_rx_fw_statistics_pram->replacevlan);
572 rx_firmware_statistics->insertvlan =
573 in_be32(&p_rx_fw_statistics_pram->insertvlan);
576 /* Hardware only if user handed pointer and driver actually
577 gathers hardware statistics */
578 if (hardware_statistics &&
579 (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) {
580 hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
581 hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
582 hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
583 hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
584 hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
585 hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
586 hardware_statistics->txok = in_be32(&ug_regs->txok);
587 hardware_statistics->txcf = in_be16(&ug_regs->txcf);
588 hardware_statistics->tmca = in_be32(&ug_regs->tmca);
589 hardware_statistics->tbca = in_be32(&ug_regs->tbca);
590 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
591 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
592 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
593 hardware_statistics->rmca = in_be32(&ug_regs->rmca);
594 hardware_statistics->rbca = in_be32(&ug_regs->rbca);
598 static void dump_bds(struct ucc_geth_private *ugeth)
603 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
604 if (ugeth->p_tx_bd_ring[i]) {
606 (ugeth->ug_info->bdRingLenTx[i] *
607 sizeof(struct qe_bd));
608 ugeth_info("TX BDs[%d]", i);
609 mem_disp(ugeth->p_tx_bd_ring[i], length);
612 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
613 if (ugeth->p_rx_bd_ring[i]) {
615 (ugeth->ug_info->bdRingLenRx[i] *
616 sizeof(struct qe_bd));
617 ugeth_info("RX BDs[%d]", i);
618 mem_disp(ugeth->p_rx_bd_ring[i], length);
623 static void dump_regs(struct ucc_geth_private *ugeth)
627 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
628 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
630 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
631 (u32) & ugeth->ug_regs->maccfg1,
632 in_be32(&ugeth->ug_regs->maccfg1));
633 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
634 (u32) & ugeth->ug_regs->maccfg2,
635 in_be32(&ugeth->ug_regs->maccfg2));
636 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
637 (u32) & ugeth->ug_regs->ipgifg,
638 in_be32(&ugeth->ug_regs->ipgifg));
639 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
640 (u32) & ugeth->ug_regs->hafdup,
641 in_be32(&ugeth->ug_regs->hafdup));
642 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
643 (u32) & ugeth->ug_regs->ifctl,
644 in_be32(&ugeth->ug_regs->ifctl));
645 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
646 (u32) & ugeth->ug_regs->ifstat,
647 in_be32(&ugeth->ug_regs->ifstat));
648 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
649 (u32) & ugeth->ug_regs->macstnaddr1,
650 in_be32(&ugeth->ug_regs->macstnaddr1));
651 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
652 (u32) & ugeth->ug_regs->macstnaddr2,
653 in_be32(&ugeth->ug_regs->macstnaddr2));
654 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
655 (u32) & ugeth->ug_regs->uempr,
656 in_be32(&ugeth->ug_regs->uempr));
657 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
658 (u32) & ugeth->ug_regs->utbipar,
659 in_be32(&ugeth->ug_regs->utbipar));
660 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
661 (u32) & ugeth->ug_regs->uescr,
662 in_be16(&ugeth->ug_regs->uescr));
663 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
664 (u32) & ugeth->ug_regs->tx64,
665 in_be32(&ugeth->ug_regs->tx64));
666 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
667 (u32) & ugeth->ug_regs->tx127,
668 in_be32(&ugeth->ug_regs->tx127));
669 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
670 (u32) & ugeth->ug_regs->tx255,
671 in_be32(&ugeth->ug_regs->tx255));
672 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
673 (u32) & ugeth->ug_regs->rx64,
674 in_be32(&ugeth->ug_regs->rx64));
675 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
676 (u32) & ugeth->ug_regs->rx127,
677 in_be32(&ugeth->ug_regs->rx127));
678 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
679 (u32) & ugeth->ug_regs->rx255,
680 in_be32(&ugeth->ug_regs->rx255));
681 ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
682 (u32) & ugeth->ug_regs->txok,
683 in_be32(&ugeth->ug_regs->txok));
684 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
685 (u32) & ugeth->ug_regs->txcf,
686 in_be16(&ugeth->ug_regs->txcf));
687 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
688 (u32) & ugeth->ug_regs->tmca,
689 in_be32(&ugeth->ug_regs->tmca));
690 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
691 (u32) & ugeth->ug_regs->tbca,
692 in_be32(&ugeth->ug_regs->tbca));
693 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
694 (u32) & ugeth->ug_regs->rxfok,
695 in_be32(&ugeth->ug_regs->rxfok));
696 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
697 (u32) & ugeth->ug_regs->rxbok,
698 in_be32(&ugeth->ug_regs->rxbok));
699 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
700 (u32) & ugeth->ug_regs->rbyt,
701 in_be32(&ugeth->ug_regs->rbyt));
702 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
703 (u32) & ugeth->ug_regs->rmca,
704 in_be32(&ugeth->ug_regs->rmca));
705 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
706 (u32) & ugeth->ug_regs->rbca,
707 in_be32(&ugeth->ug_regs->rbca));
708 ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
709 (u32) & ugeth->ug_regs->scar,
710 in_be32(&ugeth->ug_regs->scar));
711 ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
712 (u32) & ugeth->ug_regs->scam,
713 in_be32(&ugeth->ug_regs->scam));
715 if (ugeth->p_thread_data_tx) {
716 int numThreadsTxNumerical;
717 switch (ugeth->ug_info->numThreadsTx) {
718 case UCC_GETH_NUM_OF_THREADS_1:
719 numThreadsTxNumerical = 1;
721 case UCC_GETH_NUM_OF_THREADS_2:
722 numThreadsTxNumerical = 2;
724 case UCC_GETH_NUM_OF_THREADS_4:
725 numThreadsTxNumerical = 4;
727 case UCC_GETH_NUM_OF_THREADS_6:
728 numThreadsTxNumerical = 6;
730 case UCC_GETH_NUM_OF_THREADS_8:
731 numThreadsTxNumerical = 8;
734 numThreadsTxNumerical = 0;
738 ugeth_info("Thread data TXs:");
739 ugeth_info("Base address: 0x%08x",
740 (u32) ugeth->p_thread_data_tx);
741 for (i = 0; i < numThreadsTxNumerical; i++) {
742 ugeth_info("Thread data TX[%d]:", i);
743 ugeth_info("Base address: 0x%08x",
744 (u32) & ugeth->p_thread_data_tx[i]);
745 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
746 sizeof(struct ucc_geth_thread_data_tx));
749 if (ugeth->p_thread_data_rx) {
750 int numThreadsRxNumerical;
751 switch (ugeth->ug_info->numThreadsRx) {
752 case UCC_GETH_NUM_OF_THREADS_1:
753 numThreadsRxNumerical = 1;
755 case UCC_GETH_NUM_OF_THREADS_2:
756 numThreadsRxNumerical = 2;
758 case UCC_GETH_NUM_OF_THREADS_4:
759 numThreadsRxNumerical = 4;
761 case UCC_GETH_NUM_OF_THREADS_6:
762 numThreadsRxNumerical = 6;
764 case UCC_GETH_NUM_OF_THREADS_8:
765 numThreadsRxNumerical = 8;
768 numThreadsRxNumerical = 0;
772 ugeth_info("Thread data RX:");
773 ugeth_info("Base address: 0x%08x",
774 (u32) ugeth->p_thread_data_rx);
775 for (i = 0; i < numThreadsRxNumerical; i++) {
776 ugeth_info("Thread data RX[%d]:", i);
777 ugeth_info("Base address: 0x%08x",
778 (u32) & ugeth->p_thread_data_rx[i]);
779 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
780 sizeof(struct ucc_geth_thread_data_rx));
783 if (ugeth->p_exf_glbl_param) {
784 ugeth_info("EXF global param:");
785 ugeth_info("Base address: 0x%08x",
786 (u32) ugeth->p_exf_glbl_param);
787 mem_disp((u8 *) ugeth->p_exf_glbl_param,
788 sizeof(*ugeth->p_exf_glbl_param));
790 if (ugeth->p_tx_glbl_pram) {
791 ugeth_info("TX global param:");
792 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
793 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
794 (u32) & ugeth->p_tx_glbl_pram->temoder,
795 in_be16(&ugeth->p_tx_glbl_pram->temoder));
796 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
797 (u32) & ugeth->p_tx_glbl_pram->sqptr,
798 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
799 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
800 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
801 in_be32(&ugeth->p_tx_glbl_pram->
802 schedulerbasepointer));
803 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
804 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
805 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
806 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
807 (u32) & ugeth->p_tx_glbl_pram->tstate,
808 in_be32(&ugeth->p_tx_glbl_pram->tstate));
809 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
810 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
811 ugeth->p_tx_glbl_pram->iphoffset[0]);
812 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
813 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
814 ugeth->p_tx_glbl_pram->iphoffset[1]);
815 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
816 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
817 ugeth->p_tx_glbl_pram->iphoffset[2]);
818 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
819 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
820 ugeth->p_tx_glbl_pram->iphoffset[3]);
821 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
822 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
823 ugeth->p_tx_glbl_pram->iphoffset[4]);
824 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
825 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
826 ugeth->p_tx_glbl_pram->iphoffset[5]);
827 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
828 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
829 ugeth->p_tx_glbl_pram->iphoffset[6]);
830 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
831 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
832 ugeth->p_tx_glbl_pram->iphoffset[7]);
833 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
834 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
835 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
836 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
837 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
838 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
839 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
840 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
841 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
842 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
843 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
844 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
845 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
846 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
847 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
848 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
849 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
850 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
851 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
852 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
853 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
854 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
855 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
856 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
857 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
858 (u32) & ugeth->p_tx_glbl_pram->tqptr,
859 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
861 if (ugeth->p_rx_glbl_pram) {
862 ugeth_info("RX global param:");
863 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
864 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
865 (u32) & ugeth->p_rx_glbl_pram->remoder,
866 in_be32(&ugeth->p_rx_glbl_pram->remoder));
867 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
868 (u32) & ugeth->p_rx_glbl_pram->rqptr,
869 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
870 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
871 (u32) & ugeth->p_rx_glbl_pram->typeorlen,
872 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
873 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
874 (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
875 ugeth->p_rx_glbl_pram->rxgstpack);
876 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
877 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
878 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
879 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
880 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
881 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
882 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
883 (u32) & ugeth->p_rx_glbl_pram->rstate,
884 ugeth->p_rx_glbl_pram->rstate);
885 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
886 (u32) & ugeth->p_rx_glbl_pram->mrblr,
887 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
888 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
889 (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
890 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
891 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
892 (u32) & ugeth->p_rx_glbl_pram->mflr,
893 in_be16(&ugeth->p_rx_glbl_pram->mflr));
894 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
895 (u32) & ugeth->p_rx_glbl_pram->minflr,
896 in_be16(&ugeth->p_rx_glbl_pram->minflr));
897 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
898 (u32) & ugeth->p_rx_glbl_pram->maxd1,
899 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
900 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
901 (u32) & ugeth->p_rx_glbl_pram->maxd2,
902 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
903 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
904 (u32) & ugeth->p_rx_glbl_pram->ecamptr,
905 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
906 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
907 (u32) & ugeth->p_rx_glbl_pram->l2qt,
908 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
909 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
910 (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
911 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
912 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
913 (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
914 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
915 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
916 (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
917 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
918 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
919 (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
920 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
921 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
922 (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
923 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
924 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
925 (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
926 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
927 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
928 (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
929 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
930 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
931 (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
932 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
933 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
934 (u32) & ugeth->p_rx_glbl_pram->vlantype,
935 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
936 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
937 (u32) & ugeth->p_rx_glbl_pram->vlantci,
938 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
939 for (i = 0; i < 64; i++)
941 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
943 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
944 ugeth->p_rx_glbl_pram->addressfiltering[i]);
945 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
946 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
947 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
949 if (ugeth->p_send_q_mem_reg) {
950 ugeth_info("Send Q memory registers:");
951 ugeth_info("Base address: 0x%08x",
952 (u32) ugeth->p_send_q_mem_reg);
953 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
954 ugeth_info("SQQD[%d]:", i);
955 ugeth_info("Base address: 0x%08x",
956 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
957 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
958 sizeof(struct ucc_geth_send_queue_qd));
961 if (ugeth->p_scheduler) {
962 ugeth_info("Scheduler:");
963 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
964 mem_disp((u8 *) ugeth->p_scheduler,
965 sizeof(*ugeth->p_scheduler));
967 if (ugeth->p_tx_fw_statistics_pram) {
968 ugeth_info("TX FW statistics pram:");
969 ugeth_info("Base address: 0x%08x",
970 (u32) ugeth->p_tx_fw_statistics_pram);
971 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
972 sizeof(*ugeth->p_tx_fw_statistics_pram));
974 if (ugeth->p_rx_fw_statistics_pram) {
975 ugeth_info("RX FW statistics pram:");
976 ugeth_info("Base address: 0x%08x",
977 (u32) ugeth->p_rx_fw_statistics_pram);
978 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
979 sizeof(*ugeth->p_rx_fw_statistics_pram));
981 if (ugeth->p_rx_irq_coalescing_tbl) {
982 ugeth_info("RX IRQ coalescing tables:");
983 ugeth_info("Base address: 0x%08x",
984 (u32) ugeth->p_rx_irq_coalescing_tbl);
985 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
986 ugeth_info("RX IRQ coalescing table entry[%d]:", i);
987 ugeth_info("Base address: 0x%08x",
988 (u32) & ugeth->p_rx_irq_coalescing_tbl->
991 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
992 (u32) & ugeth->p_rx_irq_coalescing_tbl->
993 coalescingentry[i].interruptcoalescingmaxvalue,
994 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
996 interruptcoalescingmaxvalue));
998 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
999 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1000 coalescingentry[i].interruptcoalescingcounter,
1001 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1003 interruptcoalescingcounter));
1006 if (ugeth->p_rx_bd_qs_tbl) {
1007 ugeth_info("RX BD QS tables:");
1008 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
1009 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1010 ugeth_info("RX BD QS table[%d]:", i);
1011 ugeth_info("Base address: 0x%08x",
1012 (u32) & ugeth->p_rx_bd_qs_tbl[i]);
1014 ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
1015 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
1016 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
1018 ("bdptr : addr - 0x%08x, val - 0x%08x",
1019 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
1020 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
1022 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
1023 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
1024 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
1025 externalbdbaseptr));
1027 ("externalbdptr : addr - 0x%08x, val - 0x%08x",
1028 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
1029 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
1030 ugeth_info("ucode RX Prefetched BDs:");
1031 ugeth_info("Base address: 0x%08x",
1033 qe_muram_addr(in_be32
1034 (&ugeth->p_rx_bd_qs_tbl[i].
1037 qe_muram_addr(in_be32
1038 (&ugeth->p_rx_bd_qs_tbl[i].
1040 sizeof(struct ucc_geth_rx_prefetched_bds));
1043 if (ugeth->p_init_enet_param_shadow) {
1045 ugeth_info("Init enet param shadow:");
1046 ugeth_info("Base address: 0x%08x",
1047 (u32) ugeth->p_init_enet_param_shadow);
1048 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1049 sizeof(*ugeth->p_init_enet_param_shadow));
1051 size = sizeof(struct ucc_geth_thread_rx_pram);
1052 if (ugeth->ug_info->rxExtendedFiltering) {
1054 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1055 if (ugeth->ug_info->largestexternallookupkeysize ==
1056 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1058 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1059 if (ugeth->ug_info->largestexternallookupkeysize ==
1060 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1062 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1065 dump_init_enet_entries(ugeth,
1066 &(ugeth->p_init_enet_param_shadow->
1068 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1069 sizeof(struct ucc_geth_thread_tx_pram),
1070 ugeth->ug_info->riscTx, 0);
1071 dump_init_enet_entries(ugeth,
1072 &(ugeth->p_init_enet_param_shadow->
1074 ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1075 ugeth->ug_info->riscRx, 1);
1080 static void init_default_reg_vals(u32 __iomem *upsmr_register,
1081 u32 __iomem *maccfg1_register,
1082 u32 __iomem *maccfg2_register)
1084 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1085 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1086 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1089 static int init_half_duplex_params(int alt_beb,
1090 int back_pressure_no_backoff,
1093 u8 alt_beb_truncation,
1094 u8 max_retransmissions,
1095 u8 collision_window,
1096 u32 __iomem *hafdup_register)
1100 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1101 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1102 (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1105 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1108 value |= HALFDUP_ALT_BEB;
1109 if (back_pressure_no_backoff)
1110 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1112 value |= HALFDUP_NO_BACKOFF;
1114 value |= HALFDUP_EXCESSIVE_DEFER;
1116 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1118 value |= collision_window;
1120 out_be32(hafdup_register, value);
1124 static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1128 u32 __iomem *ipgifg_register)
1132 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1134 if (non_btb_cs_ipg > non_btb_ipg)
1137 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1138 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1139 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1140 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1144 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1145 IPGIFG_NBTB_CS_IPG_MASK);
1147 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1148 IPGIFG_NBTB_IPG_MASK);
1150 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1151 IPGIFG_MIN_IFG_MASK);
1152 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1154 out_be32(ipgifg_register, value);
1158 int init_flow_control_params(u32 automatic_flow_control_mode,
1159 int rx_flow_control_enable,
1160 int tx_flow_control_enable,
1162 u16 extension_field,
1163 u32 __iomem *upsmr_register,
1164 u32 __iomem *uempr_register,
1165 u32 __iomem *maccfg1_register)
1169 /* Set UEMPR register */
1170 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1171 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1172 out_be32(uempr_register, value);
1174 /* Set UPSMR register */
1175 setbits32(upsmr_register, automatic_flow_control_mode);
1177 value = in_be32(maccfg1_register);
1178 if (rx_flow_control_enable)
1179 value |= MACCFG1_FLOW_RX;
1180 if (tx_flow_control_enable)
1181 value |= MACCFG1_FLOW_TX;
1182 out_be32(maccfg1_register, value);
1187 static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1188 int auto_zero_hardware_statistics,
1189 u32 __iomem *upsmr_register,
1190 u16 __iomem *uescr_register)
1192 u16 uescr_value = 0;
1194 /* Enable hardware statistics gathering if requested */
1195 if (enable_hardware_statistics)
1196 setbits32(upsmr_register, UCC_GETH_UPSMR_HSE);
1198 /* Clear hardware statistics counters */
1199 uescr_value = in_be16(uescr_register);
1200 uescr_value |= UESCR_CLRCNT;
1201 /* Automatically zero hardware statistics counters on read,
1203 if (auto_zero_hardware_statistics)
1204 uescr_value |= UESCR_AUTOZ;
1205 out_be16(uescr_register, uescr_value);
1210 static int init_firmware_statistics_gathering_mode(int
1211 enable_tx_firmware_statistics,
1212 int enable_rx_firmware_statistics,
1213 u32 __iomem *tx_rmon_base_ptr,
1214 u32 tx_firmware_statistics_structure_address,
1215 u32 __iomem *rx_rmon_base_ptr,
1216 u32 rx_firmware_statistics_structure_address,
1217 u16 __iomem *temoder_register,
1218 u32 __iomem *remoder_register)
1220 /* Note: this function does not check if */
1221 /* the parameters it receives are NULL */
1223 if (enable_tx_firmware_statistics) {
1224 out_be32(tx_rmon_base_ptr,
1225 tx_firmware_statistics_structure_address);
1226 setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE);
1229 if (enable_rx_firmware_statistics) {
1230 out_be32(rx_rmon_base_ptr,
1231 rx_firmware_statistics_structure_address);
1232 setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE);
1238 static int init_mac_station_addr_regs(u8 address_byte_0,
1244 u32 __iomem *macstnaddr1_register,
1245 u32 __iomem *macstnaddr2_register)
1249 /* Example: for a station address of 0x12345678ABCD, */
1250 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1252 /* MACSTNADDR1 Register: */
1255 /* station address byte 5 station address byte 4 */
1257 /* station address byte 3 station address byte 2 */
1258 value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1259 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1260 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1261 value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1263 out_be32(macstnaddr1_register, value);
1265 /* MACSTNADDR2 Register: */
1268 /* station address byte 1 station address byte 0 */
1270 /* reserved reserved */
1272 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1273 value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1275 out_be32(macstnaddr2_register, value);
1280 static int init_check_frame_length_mode(int length_check,
1281 u32 __iomem *maccfg2_register)
1285 value = in_be32(maccfg2_register);
1288 value |= MACCFG2_LC;
1290 value &= ~MACCFG2_LC;
1292 out_be32(maccfg2_register, value);
1296 static int init_preamble_length(u8 preamble_length,
1297 u32 __iomem *maccfg2_register)
1299 if ((preamble_length < 3) || (preamble_length > 7))
1302 clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK,
1303 preamble_length << MACCFG2_PREL_SHIFT);
1308 static int init_rx_parameters(int reject_broadcast,
1309 int receive_short_frames,
1310 int promiscuous, u32 __iomem *upsmr_register)
1314 value = in_be32(upsmr_register);
1316 if (reject_broadcast)
1317 value |= UCC_GETH_UPSMR_BRO;
1319 value &= ~UCC_GETH_UPSMR_BRO;
1321 if (receive_short_frames)
1322 value |= UCC_GETH_UPSMR_RSH;
1324 value &= ~UCC_GETH_UPSMR_RSH;
1327 value |= UCC_GETH_UPSMR_PRO;
1329 value &= ~UCC_GETH_UPSMR_PRO;
1331 out_be32(upsmr_register, value);
1336 static int init_max_rx_buff_len(u16 max_rx_buf_len,
1337 u16 __iomem *mrblr_register)
1339 /* max_rx_buf_len value must be a multiple of 128 */
1340 if ((max_rx_buf_len == 0)
1341 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1344 out_be16(mrblr_register, max_rx_buf_len);
1348 static int init_min_frame_len(u16 min_frame_length,
1349 u16 __iomem *minflr_register,
1350 u16 __iomem *mrblr_register)
1352 u16 mrblr_value = 0;
1354 mrblr_value = in_be16(mrblr_register);
1355 if (min_frame_length >= (mrblr_value - 4))
1358 out_be16(minflr_register, min_frame_length);
1362 static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1364 struct ucc_geth_info *ug_info;
1365 struct ucc_geth __iomem *ug_regs;
1366 struct ucc_fast __iomem *uf_regs;
1368 u32 upsmr, maccfg2, tbiBaseAddress;
1371 ugeth_vdbg("%s: IN", __func__);
1373 ug_info = ugeth->ug_info;
1374 ug_regs = ugeth->ug_regs;
1375 uf_regs = ugeth->uccf->uf_regs;
1378 maccfg2 = in_be32(&ug_regs->maccfg2);
1379 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1380 if ((ugeth->max_speed == SPEED_10) ||
1381 (ugeth->max_speed == SPEED_100))
1382 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1383 else if (ugeth->max_speed == SPEED_1000)
1384 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1385 maccfg2 |= ug_info->padAndCrc;
1386 out_be32(&ug_regs->maccfg2, maccfg2);
1389 upsmr = in_be32(&uf_regs->upsmr);
1390 upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M |
1391 UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM);
1392 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1393 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1394 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1395 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1396 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
1397 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1398 upsmr |= UCC_GETH_UPSMR_RPM;
1399 switch (ugeth->max_speed) {
1401 upsmr |= UCC_GETH_UPSMR_R10M;
1404 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
1405 upsmr |= UCC_GETH_UPSMR_RMM;
1408 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1409 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1410 upsmr |= UCC_GETH_UPSMR_TBIM;
1412 out_be32(&uf_regs->upsmr, upsmr);
1414 /* Disable autonegotiation in tbi mode, because by default it
1415 comes up in autonegotiation mode. */
1416 /* Note that this depends on proper setting in utbipar register. */
1417 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1418 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1419 tbiBaseAddress = in_be32(&ug_regs->utbipar);
1420 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
1421 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
1422 value = ugeth->phydev->bus->read(ugeth->phydev->bus,
1423 (u8) tbiBaseAddress, ENET_TBI_MII_CR);
1424 value &= ~0x1000; /* Turn off autonegotiation */
1425 ugeth->phydev->bus->write(ugeth->phydev->bus,
1426 (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
1429 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1431 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1433 if (netif_msg_probe(ugeth))
1434 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.",
1442 /* Called every time the controller might need to be made
1443 * aware of new link state. The PHY code conveys this
1444 * information through variables in the ugeth structure, and this
1445 * function converts those variables into the appropriate
1446 * register values, and can bring down the device if needed.
1449 static void adjust_link(struct net_device *dev)
1451 struct ucc_geth_private *ugeth = netdev_priv(dev);
1452 struct ucc_geth __iomem *ug_regs;
1453 struct ucc_fast __iomem *uf_regs;
1454 struct phy_device *phydev = ugeth->phydev;
1455 unsigned long flags;
1458 ug_regs = ugeth->ug_regs;
1459 uf_regs = ugeth->uccf->uf_regs;
1461 spin_lock_irqsave(&ugeth->lock, flags);
1464 u32 tempval = in_be32(&ug_regs->maccfg2);
1465 u32 upsmr = in_be32(&uf_regs->upsmr);
1466 /* Now we make sure that we can be in full duplex mode.
1467 * If not, we operate in half-duplex mode. */
1468 if (phydev->duplex != ugeth->oldduplex) {
1470 if (!(phydev->duplex))
1471 tempval &= ~(MACCFG2_FDX);
1473 tempval |= MACCFG2_FDX;
1474 ugeth->oldduplex = phydev->duplex;
1477 if (phydev->speed != ugeth->oldspeed) {
1479 switch (phydev->speed) {
1481 tempval = ((tempval &
1482 ~(MACCFG2_INTERFACE_MODE_MASK)) |
1483 MACCFG2_INTERFACE_MODE_BYTE);
1487 tempval = ((tempval &
1488 ~(MACCFG2_INTERFACE_MODE_MASK)) |
1489 MACCFG2_INTERFACE_MODE_NIBBLE);
1490 /* if reduced mode, re-set UPSMR.R10M */
1491 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1492 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1493 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1494 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1495 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
1496 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1497 if (phydev->speed == SPEED_10)
1498 upsmr |= UCC_GETH_UPSMR_R10M;
1500 upsmr &= ~UCC_GETH_UPSMR_R10M;
1504 if (netif_msg_link(ugeth))
1506 "%s: Ack! Speed (%d) is not 10/100/1000!",
1507 dev->name, phydev->speed);
1510 ugeth->oldspeed = phydev->speed;
1513 out_be32(&ug_regs->maccfg2, tempval);
1514 out_be32(&uf_regs->upsmr, upsmr);
1516 if (!ugeth->oldlink) {
1520 } else if (ugeth->oldlink) {
1523 ugeth->oldspeed = 0;
1524 ugeth->oldduplex = -1;
1527 if (new_state && netif_msg_link(ugeth))
1528 phy_print_status(phydev);
1530 spin_unlock_irqrestore(&ugeth->lock, flags);
1533 /* Configure the PHY for dev.
1534 * returns 0 if success. -1 if failure
1536 static int init_phy(struct net_device *dev)
1538 struct ucc_geth_private *priv = netdev_priv(dev);
1539 struct phy_device *phydev;
1540 char phy_id[BUS_ID_SIZE];
1544 priv->oldduplex = -1;
1546 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->ug_info->mdio_bus,
1547 priv->ug_info->phy_address);
1549 phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface);
1551 if (IS_ERR(phydev)) {
1552 printk("%s: Could not attach to PHY\n", dev->name);
1553 return PTR_ERR(phydev);
1556 phydev->supported &= (ADVERTISED_10baseT_Half |
1557 ADVERTISED_10baseT_Full |
1558 ADVERTISED_100baseT_Half |
1559 ADVERTISED_100baseT_Full);
1561 if (priv->max_speed == SPEED_1000)
1562 phydev->supported |= ADVERTISED_1000baseT_Full;
1564 phydev->advertising = phydev->supported;
1566 priv->phydev = phydev;
1573 static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
1575 struct ucc_fast_private *uccf;
1582 /* Mask GRACEFUL STOP TX interrupt bit and clear it */
1583 clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA);
1584 out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */
1586 /* Issue host command */
1588 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1589 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1590 QE_CR_PROTOCOL_ETHERNET, 0);
1592 /* Wait for command to complete */
1595 temp = in_be32(uccf->p_ucce);
1596 } while (!(temp & UCC_GETH_UCCE_GRA) && --i);
1598 uccf->stopped_tx = 1;
1603 static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth)
1605 struct ucc_fast_private *uccf;
1612 /* Clear acknowledge bit */
1613 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1614 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1615 out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
1617 /* Keep issuing command and checking acknowledge bit until
1618 it is asserted, according to spec */
1620 /* Issue host command */
1622 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1624 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1625 QE_CR_PROTOCOL_ETHERNET, 0);
1627 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1628 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i);
1630 uccf->stopped_rx = 1;
1635 static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
1637 struct ucc_fast_private *uccf;
1643 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1644 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0);
1645 uccf->stopped_tx = 0;
1650 static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
1652 struct ucc_fast_private *uccf;
1658 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1659 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
1661 uccf->stopped_rx = 0;
1666 static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1668 struct ucc_fast_private *uccf;
1669 int enabled_tx, enabled_rx;
1673 /* check if the UCC number is in range. */
1674 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1675 if (netif_msg_probe(ugeth))
1676 ugeth_err("%s: ucc_num out of range.", __func__);
1680 enabled_tx = uccf->enabled_tx;
1681 enabled_rx = uccf->enabled_rx;
1683 /* Get Tx and Rx going again, in case this channel was actively
1685 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
1686 ugeth_restart_tx(ugeth);
1687 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
1688 ugeth_restart_rx(ugeth);
1690 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
1696 static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode)
1698 struct ucc_fast_private *uccf;
1702 /* check if the UCC number is in range. */
1703 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1704 if (netif_msg_probe(ugeth))
1705 ugeth_err("%s: ucc_num out of range.", __func__);
1709 /* Stop any transmissions */
1710 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
1711 ugeth_graceful_stop_tx(ugeth);
1713 /* Stop any receptions */
1714 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
1715 ugeth_graceful_stop_rx(ugeth);
1717 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
1722 static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
1725 ucc_fast_dump_regs(ugeth->uccf);
1731 static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private *
1736 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
1737 struct ucc_fast_private *uccf;
1738 enum comm_dir comm_dir;
1739 struct list_head *p_lh;
1741 u32 __iomem *addr_h;
1742 u32 __iomem *addr_l;
1748 (struct ucc_geth_82xx_address_filtering_pram __iomem *)
1749 ugeth->p_rx_glbl_pram->addressfiltering;
1751 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
1752 addr_h = &(p_82xx_addr_filt->gaddr_h);
1753 addr_l = &(p_82xx_addr_filt->gaddr_l);
1754 p_lh = &ugeth->group_hash_q;
1755 p_counter = &(ugeth->numGroupAddrInHash);
1756 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
1757 addr_h = &(p_82xx_addr_filt->iaddr_h);
1758 addr_l = &(p_82xx_addr_filt->iaddr_l);
1759 p_lh = &ugeth->ind_hash_q;
1760 p_counter = &(ugeth->numIndAddrInHash);
1765 if (uccf->enabled_tx)
1766 comm_dir |= COMM_DIR_TX;
1767 if (uccf->enabled_rx)
1768 comm_dir |= COMM_DIR_RX;
1770 ugeth_disable(ugeth, comm_dir);
1772 /* Clear the hash table. */
1773 out_be32(addr_h, 0x00000000);
1774 out_be32(addr_l, 0x00000000);
1781 /* Delete all remaining CQ elements */
1782 for (i = 0; i < num; i++)
1783 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
1788 ugeth_enable(ugeth, comm_dir);
1793 static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
1796 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
1797 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
1800 static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
1809 ucc_fast_free(ugeth->uccf);
1813 if (ugeth->p_thread_data_tx) {
1814 qe_muram_free(ugeth->thread_dat_tx_offset);
1815 ugeth->p_thread_data_tx = NULL;
1817 if (ugeth->p_thread_data_rx) {
1818 qe_muram_free(ugeth->thread_dat_rx_offset);
1819 ugeth->p_thread_data_rx = NULL;
1821 if (ugeth->p_exf_glbl_param) {
1822 qe_muram_free(ugeth->exf_glbl_param_offset);
1823 ugeth->p_exf_glbl_param = NULL;
1825 if (ugeth->p_rx_glbl_pram) {
1826 qe_muram_free(ugeth->rx_glbl_pram_offset);
1827 ugeth->p_rx_glbl_pram = NULL;
1829 if (ugeth->p_tx_glbl_pram) {
1830 qe_muram_free(ugeth->tx_glbl_pram_offset);
1831 ugeth->p_tx_glbl_pram = NULL;
1833 if (ugeth->p_send_q_mem_reg) {
1834 qe_muram_free(ugeth->send_q_mem_reg_offset);
1835 ugeth->p_send_q_mem_reg = NULL;
1837 if (ugeth->p_scheduler) {
1838 qe_muram_free(ugeth->scheduler_offset);
1839 ugeth->p_scheduler = NULL;
1841 if (ugeth->p_tx_fw_statistics_pram) {
1842 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
1843 ugeth->p_tx_fw_statistics_pram = NULL;
1845 if (ugeth->p_rx_fw_statistics_pram) {
1846 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
1847 ugeth->p_rx_fw_statistics_pram = NULL;
1849 if (ugeth->p_rx_irq_coalescing_tbl) {
1850 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
1851 ugeth->p_rx_irq_coalescing_tbl = NULL;
1853 if (ugeth->p_rx_bd_qs_tbl) {
1854 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
1855 ugeth->p_rx_bd_qs_tbl = NULL;
1857 if (ugeth->p_init_enet_param_shadow) {
1858 return_init_enet_entries(ugeth,
1859 &(ugeth->p_init_enet_param_shadow->
1861 ENET_INIT_PARAM_MAX_ENTRIES_RX,
1862 ugeth->ug_info->riscRx, 1);
1863 return_init_enet_entries(ugeth,
1864 &(ugeth->p_init_enet_param_shadow->
1866 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1867 ugeth->ug_info->riscTx, 0);
1868 kfree(ugeth->p_init_enet_param_shadow);
1869 ugeth->p_init_enet_param_shadow = NULL;
1871 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1872 bd = ugeth->p_tx_bd_ring[i];
1875 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
1876 if (ugeth->tx_skbuff[i][j]) {
1877 dma_unmap_single(&ugeth->dev->dev,
1878 in_be32(&((struct qe_bd __iomem *)bd)->buf),
1879 (in_be32((u32 __iomem *)bd) &
1882 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
1883 ugeth->tx_skbuff[i][j] = NULL;
1887 kfree(ugeth->tx_skbuff[i]);
1889 if (ugeth->p_tx_bd_ring[i]) {
1890 if (ugeth->ug_info->uf_info.bd_mem_part ==
1892 kfree((void *)ugeth->tx_bd_ring_offset[i]);
1893 else if (ugeth->ug_info->uf_info.bd_mem_part ==
1895 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
1896 ugeth->p_tx_bd_ring[i] = NULL;
1899 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1900 if (ugeth->p_rx_bd_ring[i]) {
1901 /* Return existing data buffers in ring */
1902 bd = ugeth->p_rx_bd_ring[i];
1903 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
1904 if (ugeth->rx_skbuff[i][j]) {
1905 dma_unmap_single(&ugeth->dev->dev,
1906 in_be32(&((struct qe_bd __iomem *)bd)->buf),
1908 uf_info.max_rx_buf_length +
1909 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
1912 ugeth->rx_skbuff[i][j]);
1913 ugeth->rx_skbuff[i][j] = NULL;
1915 bd += sizeof(struct qe_bd);
1918 kfree(ugeth->rx_skbuff[i]);
1920 if (ugeth->ug_info->uf_info.bd_mem_part ==
1922 kfree((void *)ugeth->rx_bd_ring_offset[i]);
1923 else if (ugeth->ug_info->uf_info.bd_mem_part ==
1925 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
1926 ugeth->p_rx_bd_ring[i] = NULL;
1929 while (!list_empty(&ugeth->group_hash_q))
1930 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
1931 (dequeue(&ugeth->group_hash_q)));
1932 while (!list_empty(&ugeth->ind_hash_q))
1933 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
1934 (dequeue(&ugeth->ind_hash_q)));
1935 if (ugeth->ug_regs) {
1936 iounmap(ugeth->ug_regs);
1937 ugeth->ug_regs = NULL;
1941 static void ucc_geth_set_multi(struct net_device *dev)
1943 struct ucc_geth_private *ugeth;
1944 struct dev_mc_list *dmi;
1945 struct ucc_fast __iomem *uf_regs;
1946 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
1949 ugeth = netdev_priv(dev);
1951 uf_regs = ugeth->uccf->uf_regs;
1953 if (dev->flags & IFF_PROMISC) {
1954 setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
1956 clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO);
1959 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
1960 p_rx_glbl_pram->addressfiltering;
1962 if (dev->flags & IFF_ALLMULTI) {
1963 /* Catch all multicast addresses, so set the
1964 * filter to all 1's.
1966 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
1967 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
1969 /* Clear filter and add the addresses in the list.
1971 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
1972 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
1976 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
1978 /* Only support group multicast for now.
1980 if (!(dmi->dmi_addr[0] & 1))
1983 /* Ask CPM to run CRC and set bit in
1986 hw_add_addr_in_hash(ugeth, dmi->dmi_addr);
1992 static void ucc_geth_stop(struct ucc_geth_private *ugeth)
1994 struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
1995 struct phy_device *phydev = ugeth->phydev;
1997 ugeth_vdbg("%s: IN", __func__);
1999 /* Disable the controller */
2000 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2002 /* Tell the kernel the link is down */
2005 /* Mask all interrupts */
2006 out_be32(ugeth->uccf->p_uccm, 0x00000000);
2008 /* Clear all interrupts */
2009 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2011 /* Disable Rx and Tx */
2012 clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2014 ucc_geth_memclean(ugeth);
2017 static int ucc_struct_init(struct ucc_geth_private *ugeth)
2019 struct ucc_geth_info *ug_info;
2020 struct ucc_fast_info *uf_info;
2023 ug_info = ugeth->ug_info;
2024 uf_info = &ug_info->uf_info;
2026 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2027 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2028 if (netif_msg_probe(ugeth))
2029 ugeth_err("%s: Bad memory partition value.",
2035 for (i = 0; i < ug_info->numQueuesRx; i++) {
2036 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2037 (ug_info->bdRingLenRx[i] %
2038 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2039 if (netif_msg_probe(ugeth))
2041 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.",
2048 for (i = 0; i < ug_info->numQueuesTx; i++) {
2049 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2050 if (netif_msg_probe(ugeth))
2052 ("%s: Tx BD ring length must be no smaller than 2.",
2059 if ((uf_info->max_rx_buf_length == 0) ||
2060 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2061 if (netif_msg_probe(ugeth))
2063 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2069 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2070 if (netif_msg_probe(ugeth))
2071 ugeth_err("%s: number of tx queues too large.", __func__);
2076 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2077 if (netif_msg_probe(ugeth))
2078 ugeth_err("%s: number of rx queues too large.", __func__);
2083 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2084 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2085 if (netif_msg_probe(ugeth))
2087 ("%s: VLAN priority table entry must not be"
2088 " larger than number of Rx queues.",
2095 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2096 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2097 if (netif_msg_probe(ugeth))
2099 ("%s: IP priority table entry must not be"
2100 " larger than number of Rx queues.",
2106 if (ug_info->cam && !ug_info->ecamptr) {
2107 if (netif_msg_probe(ugeth))
2108 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2113 if ((ug_info->numStationAddresses !=
2114 UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2115 && ug_info->rxExtendedFiltering) {
2116 if (netif_msg_probe(ugeth))
2117 ugeth_err("%s: Number of station addresses greater than 1 "
2118 "not allowed in extended parsing mode.",
2123 /* Generate uccm_mask for receive */
2124 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2125 for (i = 0; i < ug_info->numQueuesRx; i++)
2126 uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
2128 for (i = 0; i < ug_info->numQueuesTx; i++)
2129 uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
2130 /* Initialize the general fast UCC block. */
2131 if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2132 if (netif_msg_probe(ugeth))
2133 ugeth_err("%s: Failed to init uccf.", __func__);
2137 ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
2138 if (!ugeth->ug_regs) {
2139 if (netif_msg_probe(ugeth))
2140 ugeth_err("%s: Failed to ioremap regs.", __func__);
2147 static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2149 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
2150 struct ucc_geth_init_pram __iomem *p_init_enet_pram;
2151 struct ucc_fast_private *uccf;
2152 struct ucc_geth_info *ug_info;
2153 struct ucc_fast_info *uf_info;
2154 struct ucc_fast __iomem *uf_regs;
2155 struct ucc_geth __iomem *ug_regs;
2156 int ret_val = -EINVAL;
2157 u32 remoder = UCC_GETH_REMODER_INIT;
2158 u32 init_enet_pram_offset, cecr_subblock, command;
2159 u32 ifstat, i, j, size, l2qt, l3qt, length;
2160 u16 temoder = UCC_GETH_TEMODER_INIT;
2162 u8 function_code = 0;
2164 u8 __iomem *endOfRing;
2165 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2167 ugeth_vdbg("%s: IN", __func__);
2169 ug_info = ugeth->ug_info;
2170 uf_info = &ug_info->uf_info;
2171 uf_regs = uccf->uf_regs;
2172 ug_regs = ugeth->ug_regs;
2174 switch (ug_info->numThreadsRx) {
2175 case UCC_GETH_NUM_OF_THREADS_1:
2176 numThreadsRxNumerical = 1;
2178 case UCC_GETH_NUM_OF_THREADS_2:
2179 numThreadsRxNumerical = 2;
2181 case UCC_GETH_NUM_OF_THREADS_4:
2182 numThreadsRxNumerical = 4;
2184 case UCC_GETH_NUM_OF_THREADS_6:
2185 numThreadsRxNumerical = 6;
2187 case UCC_GETH_NUM_OF_THREADS_8:
2188 numThreadsRxNumerical = 8;
2191 if (netif_msg_ifup(ugeth))
2192 ugeth_err("%s: Bad number of Rx threads value.",
2198 switch (ug_info->numThreadsTx) {
2199 case UCC_GETH_NUM_OF_THREADS_1:
2200 numThreadsTxNumerical = 1;
2202 case UCC_GETH_NUM_OF_THREADS_2:
2203 numThreadsTxNumerical = 2;
2205 case UCC_GETH_NUM_OF_THREADS_4:
2206 numThreadsTxNumerical = 4;
2208 case UCC_GETH_NUM_OF_THREADS_6:
2209 numThreadsTxNumerical = 6;
2211 case UCC_GETH_NUM_OF_THREADS_8:
2212 numThreadsTxNumerical = 8;
2215 if (netif_msg_ifup(ugeth))
2216 ugeth_err("%s: Bad number of Tx threads value.",
2222 /* Calculate rx_extended_features */
2223 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2224 ug_info->ipAddressAlignment ||
2225 (ug_info->numStationAddresses !=
2226 UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2228 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2229 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2230 || (ug_info->vlanOperationNonTagged !=
2231 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2233 init_default_reg_vals(&uf_regs->upsmr,
2234 &ug_regs->maccfg1, &ug_regs->maccfg2);
2237 /* For more details see the hardware spec. */
2238 init_rx_parameters(ug_info->bro,
2239 ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2241 /* We're going to ignore other registers for now, */
2242 /* except as needed to get up and running */
2245 /* For more details see the hardware spec. */
2246 init_flow_control_params(ug_info->aufc,
2247 ug_info->receiveFlowControl,
2248 ug_info->transmitFlowControl,
2249 ug_info->pausePeriod,
2250 ug_info->extensionField,
2252 &ug_regs->uempr, &ug_regs->maccfg1);
2254 setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2257 /* For more details see the hardware spec. */
2258 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2259 ug_info->nonBackToBackIfgPart2,
2261 miminumInterFrameGapEnforcement,
2262 ug_info->backToBackInterFrameGap,
2265 if (netif_msg_ifup(ugeth))
2266 ugeth_err("%s: IPGIFG initialization parameter too large.",
2272 /* For more details see the hardware spec. */
2273 ret_val = init_half_duplex_params(ug_info->altBeb,
2274 ug_info->backPressureNoBackoff,
2276 ug_info->excessDefer,
2277 ug_info->altBebTruncation,
2278 ug_info->maxRetransmission,
2279 ug_info->collisionWindow,
2282 if (netif_msg_ifup(ugeth))
2283 ugeth_err("%s: Half Duplex initialization parameter too large.",
2289 /* For more details see the hardware spec. */
2290 /* Read only - resets upon read */
2291 ifstat = in_be32(&ug_regs->ifstat);
2294 /* For more details see the hardware spec. */
2295 out_be32(&ug_regs->uempr, 0);
2298 /* For more details see the hardware spec. */
2299 init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2300 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2301 0, &uf_regs->upsmr, &ug_regs->uescr);
2303 /* Allocate Tx bds */
2304 for (j = 0; j < ug_info->numQueuesTx; j++) {
2305 /* Allocate in multiple of
2306 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2307 according to spec */
2308 length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
2309 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2310 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2311 if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
2312 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2313 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2314 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2316 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2317 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2318 ugeth->tx_bd_ring_offset[j] =
2319 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2321 if (ugeth->tx_bd_ring_offset[j] != 0)
2322 ugeth->p_tx_bd_ring[j] =
2323 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
2324 align) & ~(align - 1));
2325 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2326 ugeth->tx_bd_ring_offset[j] =
2327 qe_muram_alloc(length,
2328 UCC_GETH_TX_BD_RING_ALIGNMENT);
2329 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
2330 ugeth->p_tx_bd_ring[j] =
2331 (u8 __iomem *) qe_muram_addr(ugeth->
2332 tx_bd_ring_offset[j]);
2334 if (!ugeth->p_tx_bd_ring[j]) {
2335 if (netif_msg_ifup(ugeth))
2337 ("%s: Can not allocate memory for Tx bd rings.",
2341 /* Zero unused end of bd ring, according to spec */
2342 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
2343 ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
2344 length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
2347 /* Allocate Rx bds */
2348 for (j = 0; j < ug_info->numQueuesRx; j++) {
2349 length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
2350 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2352 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2353 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2354 ugeth->rx_bd_ring_offset[j] =
2355 (u32) kmalloc((u32) (length + align), GFP_KERNEL);
2356 if (ugeth->rx_bd_ring_offset[j] != 0)
2357 ugeth->p_rx_bd_ring[j] =
2358 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
2359 align) & ~(align - 1));
2360 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2361 ugeth->rx_bd_ring_offset[j] =
2362 qe_muram_alloc(length,
2363 UCC_GETH_RX_BD_RING_ALIGNMENT);
2364 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
2365 ugeth->p_rx_bd_ring[j] =
2366 (u8 __iomem *) qe_muram_addr(ugeth->
2367 rx_bd_ring_offset[j]);
2369 if (!ugeth->p_rx_bd_ring[j]) {
2370 if (netif_msg_ifup(ugeth))
2372 ("%s: Can not allocate memory for Rx bd rings.",
2379 for (j = 0; j < ug_info->numQueuesTx; j++) {
2380 /* Setup the skbuff rings */
2381 ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2382 ugeth->ug_info->bdRingLenTx[j],
2385 if (ugeth->tx_skbuff[j] == NULL) {
2386 if (netif_msg_ifup(ugeth))
2387 ugeth_err("%s: Could not allocate tx_skbuff",
2392 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2393 ugeth->tx_skbuff[j][i] = NULL;
2395 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2396 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2397 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2398 /* clear bd buffer */
2399 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2400 /* set bd status and length */
2401 out_be32((u32 __iomem *)bd, 0);
2402 bd += sizeof(struct qe_bd);
2404 bd -= sizeof(struct qe_bd);
2405 /* set bd status and length */
2406 out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */
2410 for (j = 0; j < ug_info->numQueuesRx; j++) {
2411 /* Setup the skbuff rings */
2412 ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) *
2413 ugeth->ug_info->bdRingLenRx[j],
2416 if (ugeth->rx_skbuff[j] == NULL) {
2417 if (netif_msg_ifup(ugeth))
2418 ugeth_err("%s: Could not allocate rx_skbuff",
2423 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2424 ugeth->rx_skbuff[j][i] = NULL;
2426 ugeth->skb_currx[j] = 0;
2427 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2428 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2429 /* set bd status and length */
2430 out_be32((u32 __iomem *)bd, R_I);
2431 /* clear bd buffer */
2432 out_be32(&((struct qe_bd __iomem *)bd)->buf, 0);
2433 bd += sizeof(struct qe_bd);
2435 bd -= sizeof(struct qe_bd);
2436 /* set bd status and length */
2437 out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */
2443 /* Tx global PRAM */
2444 /* Allocate global tx parameter RAM page */
2445 ugeth->tx_glbl_pram_offset =
2446 qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
2447 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
2448 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
2449 if (netif_msg_ifup(ugeth))
2451 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
2455 ugeth->p_tx_glbl_pram =
2456 (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
2457 tx_glbl_pram_offset);
2458 /* Zero out p_tx_glbl_pram */
2459 memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
2461 /* Fill global PRAM */
2464 /* Size varies with number of Tx threads */
2465 ugeth->thread_dat_tx_offset =
2466 qe_muram_alloc(numThreadsTxNumerical *
2467 sizeof(struct ucc_geth_thread_data_tx) +
2468 32 * (numThreadsTxNumerical == 1),
2469 UCC_GETH_THREAD_DATA_ALIGNMENT);
2470 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
2471 if (netif_msg_ifup(ugeth))
2473 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
2478 ugeth->p_thread_data_tx =
2479 (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
2480 thread_dat_tx_offset);
2481 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
2484 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
2485 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
2486 ug_info->vtagtable[i]);
2489 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
2490 out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
2491 ug_info->iphoffset[i]);
2494 /* Size varies with number of Tx queues */
2495 ugeth->send_q_mem_reg_offset =
2496 qe_muram_alloc(ug_info->numQueuesTx *
2497 sizeof(struct ucc_geth_send_queue_qd),
2498 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
2499 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
2500 if (netif_msg_ifup(ugeth))
2502 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
2507 ugeth->p_send_q_mem_reg =
2508 (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
2509 send_q_mem_reg_offset);
2510 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
2512 /* Setup the table */
2513 /* Assume BD rings are already established */
2514 for (i = 0; i < ug_info->numQueuesTx; i++) {
2516 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
2517 1) * sizeof(struct qe_bd);
2518 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
2519 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2520 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
2521 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2522 last_bd_completed_address,
2523 (u32) virt_to_phys(endOfRing));
2524 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
2526 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2527 (u32) immrbar_virt_to_phys(ugeth->
2529 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2530 last_bd_completed_address,
2531 (u32) immrbar_virt_to_phys(endOfRing));
2535 /* schedulerbasepointer */
2537 if (ug_info->numQueuesTx > 1) {
2538 /* scheduler exists only if more than 1 tx queue */
2539 ugeth->scheduler_offset =
2540 qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
2541 UCC_GETH_SCHEDULER_ALIGNMENT);
2542 if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
2543 if (netif_msg_ifup(ugeth))
2545 ("%s: Can not allocate DPRAM memory for p_scheduler.",
2550 ugeth->p_scheduler =
2551 (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
2553 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
2554 ugeth->scheduler_offset);
2555 /* Zero out p_scheduler */
2556 memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
2558 /* Set values in scheduler */
2559 out_be32(&ugeth->p_scheduler->mblinterval,
2560 ug_info->mblinterval);
2561 out_be16(&ugeth->p_scheduler->nortsrbytetime,
2562 ug_info->nortsrbytetime);
2563 out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
2564 out_8(&ugeth->p_scheduler->strictpriorityq,
2565 ug_info->strictpriorityq);
2566 out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
2567 out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
2568 for (i = 0; i < NUM_TX_QUEUES; i++)
2569 out_8(&ugeth->p_scheduler->weightfactor[i],
2570 ug_info->weightfactor[i]);
2572 /* Set pointers to cpucount registers in scheduler */
2573 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
2574 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
2575 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
2576 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
2577 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
2578 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
2579 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
2580 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
2583 /* schedulerbasepointer */
2584 /* TxRMON_PTR (statistics) */
2586 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
2587 ugeth->tx_fw_statistics_pram_offset =
2588 qe_muram_alloc(sizeof
2589 (struct ucc_geth_tx_firmware_statistics_pram),
2590 UCC_GETH_TX_STATISTICS_ALIGNMENT);
2591 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
2592 if (netif_msg_ifup(ugeth))
2594 ("%s: Can not allocate DPRAM memory for"
2595 " p_tx_fw_statistics_pram.",
2599 ugeth->p_tx_fw_statistics_pram =
2600 (struct ucc_geth_tx_firmware_statistics_pram __iomem *)
2601 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
2602 /* Zero out p_tx_fw_statistics_pram */
2603 memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
2604 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
2608 /* Already has speed set */
2610 if (ug_info->numQueuesTx > 1)
2611 temoder |= TEMODER_SCHEDULER_ENABLE;
2612 if (ug_info->ipCheckSumGenerate)
2613 temoder |= TEMODER_IP_CHECKSUM_GENERATE;
2614 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
2615 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
2617 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
2619 /* Function code register value to be used later */
2620 function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
2621 /* Required for QE */
2623 /* function code register */
2624 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
2626 /* Rx global PRAM */
2627 /* Allocate global rx parameter RAM page */
2628 ugeth->rx_glbl_pram_offset =
2629 qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
2630 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
2631 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
2632 if (netif_msg_ifup(ugeth))
2634 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
2638 ugeth->p_rx_glbl_pram =
2639 (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
2640 rx_glbl_pram_offset);
2641 /* Zero out p_rx_glbl_pram */
2642 memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
2644 /* Fill global PRAM */
2647 /* Size varies with number of Rx threads */
2648 ugeth->thread_dat_rx_offset =
2649 qe_muram_alloc(numThreadsRxNumerical *
2650 sizeof(struct ucc_geth_thread_data_rx),
2651 UCC_GETH_THREAD_DATA_ALIGNMENT);
2652 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
2653 if (netif_msg_ifup(ugeth))
2655 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
2660 ugeth->p_thread_data_rx =
2661 (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
2662 thread_dat_rx_offset);
2663 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
2666 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
2668 /* rxrmonbaseptr (statistics) */
2670 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
2671 ugeth->rx_fw_statistics_pram_offset =
2672 qe_muram_alloc(sizeof
2673 (struct ucc_geth_rx_firmware_statistics_pram),
2674 UCC_GETH_RX_STATISTICS_ALIGNMENT);
2675 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
2676 if (netif_msg_ifup(ugeth))
2678 ("%s: Can not allocate DPRAM memory for"
2679 " p_rx_fw_statistics_pram.", __func__);
2682 ugeth->p_rx_fw_statistics_pram =
2683 (struct ucc_geth_rx_firmware_statistics_pram __iomem *)
2684 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
2685 /* Zero out p_rx_fw_statistics_pram */
2686 memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
2687 sizeof(struct ucc_geth_rx_firmware_statistics_pram));
2690 /* intCoalescingPtr */
2692 /* Size varies with number of Rx queues */
2693 ugeth->rx_irq_coalescing_tbl_offset =
2694 qe_muram_alloc(ug_info->numQueuesRx *
2695 sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
2696 + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
2697 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
2698 if (netif_msg_ifup(ugeth))
2700 ("%s: Can not allocate DPRAM memory for"
2701 " p_rx_irq_coalescing_tbl.", __func__);
2705 ugeth->p_rx_irq_coalescing_tbl =
2706 (struct ucc_geth_rx_interrupt_coalescing_table __iomem *)
2707 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
2708 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
2709 ugeth->rx_irq_coalescing_tbl_offset);
2711 /* Fill interrupt coalescing table */
2712 for (i = 0; i < ug_info->numQueuesRx; i++) {
2713 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
2714 interruptcoalescingmaxvalue,
2715 ug_info->interruptcoalescingmaxvalue[i]);
2716 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
2717 interruptcoalescingcounter,
2718 ug_info->interruptcoalescingmaxvalue[i]);
2722 init_max_rx_buff_len(uf_info->max_rx_buf_length,
2723 &ugeth->p_rx_glbl_pram->mrblr);
2725 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
2727 init_min_frame_len(ug_info->minFrameLength,
2728 &ugeth->p_rx_glbl_pram->minflr,
2729 &ugeth->p_rx_glbl_pram->mrblr);
2731 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
2733 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
2737 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
2738 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
2739 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
2742 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
2744 for (i = 0; i < 8; i++)
2745 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
2746 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
2750 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
2753 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
2756 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
2759 /* Size varies with number of Rx queues */
2760 ugeth->rx_bd_qs_tbl_offset =
2761 qe_muram_alloc(ug_info->numQueuesRx *
2762 (sizeof(struct ucc_geth_rx_bd_queues_entry) +
2763 sizeof(struct ucc_geth_rx_prefetched_bds)),
2764 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
2765 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
2766 if (netif_msg_ifup(ugeth))
2768 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
2773 ugeth->p_rx_bd_qs_tbl =
2774 (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
2775 rx_bd_qs_tbl_offset);
2776 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
2777 /* Zero out p_rx_bd_qs_tbl */
2778 memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
2780 ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
2781 sizeof(struct ucc_geth_rx_prefetched_bds)));
2783 /* Setup the table */
2784 /* Assume BD rings are already established */
2785 for (i = 0; i < ug_info->numQueuesRx; i++) {
2786 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
2787 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
2788 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
2789 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
2791 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
2792 (u32) immrbar_virt_to_phys(ugeth->
2795 /* rest of fields handled by QE */
2799 /* Already has speed set */
2801 if (ugeth->rx_extended_features)
2802 remoder |= REMODER_RX_EXTENDED_FEATURES;
2803 if (ug_info->rxExtendedFiltering)
2804 remoder |= REMODER_RX_EXTENDED_FILTERING;
2805 if (ug_info->dynamicMaxFrameLength)
2806 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
2807 if (ug_info->dynamicMinFrameLength)
2808 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
2810 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
2813 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
2814 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
2815 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
2816 if (ug_info->ipCheckSumCheck)
2817 remoder |= REMODER_IP_CHECKSUM_CHECK;
2818 if (ug_info->ipAddressAlignment)
2819 remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
2820 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
2822 /* Note that this function must be called */
2823 /* ONLY AFTER p_tx_fw_statistics_pram */
2824 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
2825 init_firmware_statistics_gathering_mode((ug_info->
2827 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
2828 (ug_info->statisticsMode &
2829 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
2830 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
2831 ugeth->tx_fw_statistics_pram_offset,
2832 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
2833 ugeth->rx_fw_statistics_pram_offset,
2834 &ugeth->p_tx_glbl_pram->temoder,
2835 &ugeth->p_rx_glbl_pram->remoder);
2837 /* function code register */
2838 out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
2840 /* initialize extended filtering */
2841 if (ug_info->rxExtendedFiltering) {
2842 if (!ug_info->extendedFilteringChainPointer) {
2843 if (netif_msg_ifup(ugeth))
2844 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
2849 /* Allocate memory for extended filtering Mode Global
2851 ugeth->exf_glbl_param_offset =
2852 qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram),
2853 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
2854 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
2855 if (netif_msg_ifup(ugeth))
2857 ("%s: Can not allocate DPRAM memory for"
2858 " p_exf_glbl_param.", __func__);
2862 ugeth->p_exf_glbl_param =
2863 (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
2864 exf_glbl_param_offset);
2865 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
2866 ugeth->exf_glbl_param_offset);
2867 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
2868 (u32) ug_info->extendedFilteringChainPointer);
2870 } else { /* initialize 82xx style address filtering */
2872 /* Init individual address recognition registers to disabled */
2874 for (j = 0; j < NUM_OF_PADDRS; j++)
2875 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
2878 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
2879 p_rx_glbl_pram->addressfiltering;
2881 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
2882 ENET_ADDR_TYPE_GROUP);
2883 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
2884 ENET_ADDR_TYPE_INDIVIDUAL);
2888 * Initialize UCC at QE level
2891 command = QE_INIT_TX_RX;
2893 /* Allocate shadow InitEnet command parameter structure.
2894 * This is needed because after the InitEnet command is executed,
2895 * the structure in DPRAM is released, because DPRAM is a premium
2897 * This shadow structure keeps a copy of what was done so that the
2898 * allocated resources can be released when the channel is freed.
2900 if (!(ugeth->p_init_enet_param_shadow =
2901 kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
2902 if (netif_msg_ifup(ugeth))
2904 ("%s: Can not allocate memory for"
2905 " p_UccInitEnetParamShadows.", __func__);
2908 /* Zero out *p_init_enet_param_shadow */
2909 memset((char *)ugeth->p_init_enet_param_shadow,
2910 0, sizeof(struct ucc_geth_init_pram));
2912 /* Fill shadow InitEnet command parameter structure */
2914 ugeth->p_init_enet_param_shadow->resinit1 =
2915 ENET_INIT_PARAM_MAGIC_RES_INIT1;
2916 ugeth->p_init_enet_param_shadow->resinit2 =
2917 ENET_INIT_PARAM_MAGIC_RES_INIT2;
2918 ugeth->p_init_enet_param_shadow->resinit3 =
2919 ENET_INIT_PARAM_MAGIC_RES_INIT3;
2920 ugeth->p_init_enet_param_shadow->resinit4 =
2921 ENET_INIT_PARAM_MAGIC_RES_INIT4;
2922 ugeth->p_init_enet_param_shadow->resinit5 =
2923 ENET_INIT_PARAM_MAGIC_RES_INIT5;
2924 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
2925 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
2926 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
2927 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
2929 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
2930 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
2931 if ((ug_info->largestexternallookupkeysize !=
2932 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
2933 && (ug_info->largestexternallookupkeysize !=
2934 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
2935 && (ug_info->largestexternallookupkeysize !=
2936 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
2937 if (netif_msg_ifup(ugeth))
2938 ugeth_err("%s: Invalid largest External Lookup Key Size.",
2942 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
2943 ug_info->largestexternallookupkeysize;
2944 size = sizeof(struct ucc_geth_thread_rx_pram);
2945 if (ug_info->rxExtendedFiltering) {
2946 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
2947 if (ug_info->largestexternallookupkeysize ==
2948 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
2950 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
2951 if (ug_info->largestexternallookupkeysize ==
2952 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
2954 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
2957 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
2958 p_init_enet_param_shadow->rxthread[0]),
2959 (u8) (numThreadsRxNumerical + 1)
2960 /* Rx needs one extra for terminator */
2961 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
2962 ug_info->riscRx, 1)) != 0) {
2963 if (netif_msg_ifup(ugeth))
2964 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
2969 ugeth->p_init_enet_param_shadow->txglobal =
2970 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
2972 fill_init_enet_entries(ugeth,
2973 &(ugeth->p_init_enet_param_shadow->
2974 txthread[0]), numThreadsTxNumerical,
2975 sizeof(struct ucc_geth_thread_tx_pram),
2976 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
2977 ug_info->riscTx, 0)) != 0) {
2978 if (netif_msg_ifup(ugeth))
2979 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
2984 /* Load Rx bds with buffers */
2985 for (i = 0; i < ug_info->numQueuesRx; i++) {
2986 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
2987 if (netif_msg_ifup(ugeth))
2988 ugeth_err("%s: Can not fill Rx bds with buffers.",
2994 /* Allocate InitEnet command parameter structure */
2995 init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4);
2996 if (IS_ERR_VALUE(init_enet_pram_offset)) {
2997 if (netif_msg_ifup(ugeth))
2999 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3004 (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset);
3006 /* Copy shadow InitEnet command parameter structure into PRAM */
3007 out_8(&p_init_enet_pram->resinit1,
3008 ugeth->p_init_enet_param_shadow->resinit1);
3009 out_8(&p_init_enet_pram->resinit2,
3010 ugeth->p_init_enet_param_shadow->resinit2);
3011 out_8(&p_init_enet_pram->resinit3,
3012 ugeth->p_init_enet_param_shadow->resinit3);
3013 out_8(&p_init_enet_pram->resinit4,
3014 ugeth->p_init_enet_param_shadow->resinit4);
3015 out_be16(&p_init_enet_pram->resinit5,
3016 ugeth->p_init_enet_param_shadow->resinit5);
3017 out_8(&p_init_enet_pram->largestexternallookupkeysize,
3018 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
3019 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3020 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3021 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3022 out_be32(&p_init_enet_pram->rxthread[i],
3023 ugeth->p_init_enet_param_shadow->rxthread[i]);
3024 out_be32(&p_init_enet_pram->txglobal,
3025 ugeth->p_init_enet_param_shadow->txglobal);
3026 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3027 out_be32(&p_init_enet_pram->txthread[i],
3028 ugeth->p_init_enet_param_shadow->txthread[i]);
3030 /* Issue QE command */
3032 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3033 qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET,
3034 init_enet_pram_offset);
3036 /* Free InitEnet command parameter */
3037 qe_muram_free(init_enet_pram_offset);
3042 /* This is called by the kernel when a frame is ready for transmission. */
3043 /* It is pointed to by the dev->hard_start_xmit function pointer */
3044 static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3046 struct ucc_geth_private *ugeth = netdev_priv(dev);
3047 #ifdef CONFIG_UGETH_TX_ON_DEMAND
3048 struct ucc_fast_private *uccf;
3050 u8 __iomem *bd; /* BD pointer */
3054 ugeth_vdbg("%s: IN", __func__);
3056 spin_lock_irq(&ugeth->lock);
3058 dev->stats.tx_bytes += skb->len;
3060 /* Start from the next BD that should be filled */
3061 bd = ugeth->txBd[txQ];
3062 bd_status = in_be32((u32 __iomem *)bd);
3063 /* Save the skb pointer so we can free it later */
3064 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3066 /* Update the current skb pointer (wrapping if this was the last) */
3067 ugeth->skb_curtx[txQ] =
3068 (ugeth->skb_curtx[txQ] +
3069 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3071 /* set up the buffer descriptor */
3072 out_be32(&((struct qe_bd __iomem *)bd)->buf,
3073 dma_map_single(&ugeth->dev->dev, skb->data,
3074 skb->len, DMA_TO_DEVICE));
3076 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */
3078 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3080 /* set bd status and length */
3081 out_be32((u32 __iomem *)bd, bd_status);
3083 dev->trans_start = jiffies;
3085 /* Move to next BD in the ring */
3086 if (!(bd_status & T_W))
3087 bd += sizeof(struct qe_bd);
3089 bd = ugeth->p_tx_bd_ring[txQ];
3091 /* If the next BD still needs to be cleaned up, then the bds
3092 are full. We need to tell the kernel to stop sending us stuff. */
3093 if (bd == ugeth->confBd[txQ]) {
3094 if (!netif_queue_stopped(dev))
3095 netif_stop_queue(dev);
3098 ugeth->txBd[txQ] = bd;
3100 if (ugeth->p_scheduler) {
3101 ugeth->cpucount[txQ]++;
3102 /* Indicate to QE that there are more Tx bds ready for
3104 /* This is done by writing a running counter of the bd
3105 count to the scheduler PRAM. */
3106 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3109 #ifdef CONFIG_UGETH_TX_ON_DEMAND
3111 out_be16(uccf->p_utodr, UCC_FAST_TOD);
3113 spin_unlock_irq(&ugeth->lock);
3118 static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
3120 struct sk_buff *skb;
3122 u16 length, howmany = 0;
3125 struct net_device *dev;
3127 ugeth_vdbg("%s: IN", __func__);
3131 /* collect received buffers */
3132 bd = ugeth->rxBd[rxQ];
3134 bd_status = in_be32((u32 __iomem *)bd);
3136 /* while there are received buffers and BD is full (~R_E) */
3137 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3138 bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf);
3139 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3140 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3142 /* determine whether buffer is first, last, first and last
3143 (single buffer frame) or middle (not first and not last) */
3145 (!(bd_status & (R_F | R_L))) ||
3146 (bd_status & R_ERRORS_FATAL)) {
3147 if (netif_msg_rx_err(ugeth))
3148 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
3149 __func__, __LINE__, (u32) skb);
3151 dev_kfree_skb_any(skb);
3153 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3154 dev->stats.rx_dropped++;
3156 dev->stats.rx_packets++;
3159 /* Prep the skb for the packet */
3160 skb_put(skb, length);
3162 /* Tell the skb what kind of packet this is */
3163 skb->protocol = eth_type_trans(skb, ugeth->dev);
3165 dev->stats.rx_bytes += length;
3166 /* Send the packet up the stack */
3167 netif_receive_skb(skb);
3170 skb = get_new_skb(ugeth, bd);
3172 if (netif_msg_rx_err(ugeth))
3173 ugeth_warn("%s: No Rx Data Buffer", __func__);
3174 dev->stats.rx_dropped++;
3178 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3180 /* update to point at the next skb */
3181 ugeth->skb_currx[rxQ] =
3182 (ugeth->skb_currx[rxQ] +
3183 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3185 if (bd_status & R_W)
3186 bd = ugeth->p_rx_bd_ring[rxQ];
3188 bd += sizeof(struct qe_bd);
3190 bd_status = in_be32((u32 __iomem *)bd);
3193 ugeth->rxBd[rxQ] = bd;
3197 static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3199 /* Start from the next BD that should be filled */
3200 struct ucc_geth_private *ugeth = netdev_priv(dev);
3201 u8 __iomem *bd; /* BD pointer */
3204 bd = ugeth->confBd[txQ];
3205 bd_status = in_be32((u32 __iomem *)bd);
3207 /* Normal processing. */
3208 while ((bd_status & T_R) == 0) {
3209 /* BD contains already transmitted buffer. */
3210 /* Handle the transmitted buffer and release */
3211 /* the BD to be used with the current frame */
3213 if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3216 dev->stats.tx_packets++;
3218 /* Free the sk buffer associated with this TxBD */
3219 dev_kfree_skb_irq(ugeth->
3220 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3221 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3222 ugeth->skb_dirtytx[txQ] =
3223 (ugeth->skb_dirtytx[txQ] +
3224 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3226 /* We freed a buffer, so now we can restart transmission */
3227 if (netif_queue_stopped(dev))
3228 netif_wake_queue(dev);
3230 /* Advance the confirmation BD pointer */
3231 if (!(bd_status & T_W))
3232 bd += sizeof(struct qe_bd);
3234 bd = ugeth->p_tx_bd_ring[txQ];
3235 bd_status = in_be32((u32 __iomem *)bd);
3237 ugeth->confBd[txQ] = bd;
3241 static int ucc_geth_poll(struct napi_struct *napi, int budget)
3243 struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
3244 struct ucc_geth_info *ug_info;
3247 ug_info = ugeth->ug_info;
3250 for (i = 0; i < ug_info->numQueuesRx; i++)
3251 howmany += ucc_geth_rx(ugeth, i, budget - howmany);
3253 if (howmany < budget) {
3254 netif_rx_complete(napi);
3255 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS);
3261 static irqreturn_t ucc_geth_irq_handler(int irq, void *info)
3263 struct net_device *dev = info;
3264 struct ucc_geth_private *ugeth = netdev_priv(dev);
3265 struct ucc_fast_private *uccf;
3266 struct ucc_geth_info *ug_info;
3269 register u32 tx_mask;
3272 ugeth_vdbg("%s: IN", __func__);
3275 ug_info = ugeth->ug_info;
3277 /* read and clear events */
3278 ucce = (u32) in_be32(uccf->p_ucce);
3279 uccm = (u32) in_be32(uccf->p_uccm);
3281 out_be32(uccf->p_ucce, ucce);
3283 /* check for receive events that require processing */
3284 if (ucce & UCCE_RX_EVENTS) {
3285 if (netif_rx_schedule_prep(&ugeth->napi)) {
3286 uccm &= ~UCCE_RX_EVENTS;
3287 out_be32(uccf->p_uccm, uccm);
3288 __netif_rx_schedule(&ugeth->napi);
3292 /* Tx event processing */
3293 if (ucce & UCCE_TX_EVENTS) {
3294 spin_lock(&ugeth->lock);
3295 tx_mask = UCC_GETH_UCCE_TXB0;
3296 for (i = 0; i < ug_info->numQueuesTx; i++) {
3298 ucc_geth_tx(dev, i);
3302 spin_unlock(&ugeth->lock);
3305 /* Errors and other events */
3306 if (ucce & UCCE_OTHER) {
3307 if (ucce & UCC_GETH_UCCE_BSY)
3308 dev->stats.rx_errors++;
3309 if (ucce & UCC_GETH_UCCE_TXE)
3310 dev->stats.tx_errors++;
3316 #ifdef CONFIG_NET_POLL_CONTROLLER
3318 * Polling 'interrupt' - used by things like netconsole to send skbs
3319 * without having to re-enable interrupts. It's not called while
3320 * the interrupt routine is executing.
3322 static void ucc_netpoll(struct net_device *dev)
3324 struct ucc_geth_private *ugeth = netdev_priv(dev);
3325 int irq = ugeth->ug_info->uf_info.irq;
3328 ucc_geth_irq_handler(irq, dev);
3331 #endif /* CONFIG_NET_POLL_CONTROLLER */
3333 /* Called when something needs to use the ethernet device */
3334 /* Returns 0 for success. */
3335 static int ucc_geth_open(struct net_device *dev)
3337 struct ucc_geth_private *ugeth = netdev_priv(dev);
3340 ugeth_vdbg("%s: IN", __func__);
3342 /* Test station address */
3343 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
3344 if (netif_msg_ifup(ugeth))
3345 ugeth_err("%s: Multicast address used for station address"
3346 " - is this what you wanted?", __func__);
3350 err = ucc_struct_init(ugeth);
3352 if (netif_msg_ifup(ugeth))
3353 ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name);
3357 napi_enable(&ugeth->napi);
3359 err = ucc_geth_startup(ugeth);
3361 if (netif_msg_ifup(ugeth))
3362 ugeth_err("%s: Cannot configure net device, aborting.",
3367 err = adjust_enet_interface(ugeth);
3369 if (netif_msg_ifup(ugeth))
3370 ugeth_err("%s: Cannot configure net device, aborting.",
3375 /* Set MACSTNADDR1, MACSTNADDR2 */
3376 /* For more details see the hardware spec. */
3377 init_mac_station_addr_regs(dev->dev_addr[0],
3383 &ugeth->ug_regs->macstnaddr1,
3384 &ugeth->ug_regs->macstnaddr2);
3386 err = init_phy(dev);
3388 if (netif_msg_ifup(ugeth))
3389 ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name);
3393 phy_start(ugeth->phydev);
3395 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
3397 if (netif_msg_ifup(ugeth))
3398 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
3402 err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
3403 0, "UCC Geth", dev);
3405 if (netif_msg_ifup(ugeth))
3406 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
3411 netif_start_queue(dev);
3416 napi_disable(&ugeth->napi);
3418 ucc_geth_stop(ugeth);
3422 /* Stops the kernel queue, and halts the controller */
3423 static int ucc_geth_close(struct net_device *dev)
3425 struct ucc_geth_private *ugeth = netdev_priv(dev);
3427 ugeth_vdbg("%s: IN", __func__);
3429 napi_disable(&ugeth->napi);
3431 ucc_geth_stop(ugeth);
3433 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
3435 phy_disconnect(ugeth->phydev);
3436 ugeth->phydev = NULL;
3438 netif_stop_queue(dev);
3443 /* Reopen device. This will reset the MAC and PHY. */
3444 static void ucc_geth_timeout_work(struct work_struct *work)
3446 struct ucc_geth_private *ugeth;
3447 struct net_device *dev;
3449 ugeth = container_of(work, struct ucc_geth_private, timeout_work);
3452 ugeth_vdbg("%s: IN", __func__);
3454 dev->stats.tx_errors++;
3456 ugeth_dump_regs(ugeth);
3458 if (dev->flags & IFF_UP) {
3460 * Must reset MAC *and* PHY. This is done by reopening
3463 ucc_geth_close(dev);
3467 netif_tx_schedule_all(dev);
3471 * ucc_geth_timeout gets called when a packet has not been
3472 * transmitted after a set amount of time.
3474 static void ucc_geth_timeout(struct net_device *dev)
3476 struct ucc_geth_private *ugeth = netdev_priv(dev);
3478 netif_carrier_off(dev);
3479 schedule_work(&ugeth->timeout_work);
3482 static phy_interface_t to_phy_interface(const char *phy_connection_type)
3484 if (strcasecmp(phy_connection_type, "mii") == 0)
3485 return PHY_INTERFACE_MODE_MII;
3486 if (strcasecmp(phy_connection_type, "gmii") == 0)
3487 return PHY_INTERFACE_MODE_GMII;
3488 if (strcasecmp(phy_connection_type, "tbi") == 0)
3489 return PHY_INTERFACE_MODE_TBI;
3490 if (strcasecmp(phy_connection_type, "rmii") == 0)
3491 return PHY_INTERFACE_MODE_RMII;
3492 if (strcasecmp(phy_connection_type, "rgmii") == 0)
3493 return PHY_INTERFACE_MODE_RGMII;
3494 if (strcasecmp(phy_connection_type, "rgmii-id") == 0)
3495 return PHY_INTERFACE_MODE_RGMII_ID;
3496 if (strcasecmp(phy_connection_type, "rgmii-txid") == 0)
3497 return PHY_INTERFACE_MODE_RGMII_TXID;
3498 if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0)
3499 return PHY_INTERFACE_MODE_RGMII_RXID;
3500 if (strcasecmp(phy_connection_type, "rtbi") == 0)
3501 return PHY_INTERFACE_MODE_RTBI;
3503 return PHY_INTERFACE_MODE_MII;
3506 static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match)
3508 struct device *device = &ofdev->dev;
3509 struct device_node *np = ofdev->node;
3510 struct device_node *mdio;
3511 struct net_device *dev = NULL;
3512 struct ucc_geth_private *ugeth = NULL;
3513 struct ucc_geth_info *ug_info;
3514 struct resource res;
3515 struct device_node *phy;
3516 int err, ucc_num, max_speed = 0;
3518 const u32 *fixed_link;
3519 const unsigned int *prop;
3521 const void *mac_addr;
3522 phy_interface_t phy_interface;
3523 static const int enet_to_speed[] = {
3524 SPEED_10, SPEED_10, SPEED_10,
3525 SPEED_100, SPEED_100, SPEED_100,
3526 SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000,
3528 static const phy_interface_t enet_to_phy_interface[] = {
3529 PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII,
3530 PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII,
3531 PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII,
3532 PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII,
3533 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI,
3536 ugeth_vdbg("%s: IN", __func__);
3538 prop = of_get_property(np, "cell-index", NULL);
3540 prop = of_get_property(np, "device-id", NULL);
3545 ucc_num = *prop - 1;
3546 if ((ucc_num < 0) || (ucc_num > 7))
3549 ug_info = &ugeth_info[ucc_num];
3550 if (ug_info == NULL) {
3551 if (netif_msg_probe(&debug))
3552 ugeth_err("%s: [%d] Missing additional data!",
3557 ug_info->uf_info.ucc_num = ucc_num;
3559 sprop = of_get_property(np, "rx-clock-name", NULL);
3561 ug_info->uf_info.rx_clock = qe_clock_source(sprop);
3562 if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
3563 (ug_info->uf_info.rx_clock > QE_CLK24)) {
3565 "ucc_geth: invalid rx-clock-name property\n");
3569 prop = of_get_property(np, "rx-clock", NULL);
3571 /* If both rx-clock-name and rx-clock are missing,
3572 we want to tell people to use rx-clock-name. */
3574 "ucc_geth: missing rx-clock-name property\n");
3577 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
3579 "ucc_geth: invalid rx-clock propperty\n");
3582 ug_info->uf_info.rx_clock = *prop;
3585 sprop = of_get_property(np, "tx-clock-name", NULL);
3587 ug_info->uf_info.tx_clock = qe_clock_source(sprop);
3588 if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
3589 (ug_info->uf_info.tx_clock > QE_CLK24)) {
3591 "ucc_geth: invalid tx-clock-name property\n");
3595 prop = of_get_property(np, "tx-clock", NULL);
3598 "ucc_geth: mising tx-clock-name property\n");
3601 if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
3603 "ucc_geth: invalid tx-clock property\n");
3606 ug_info->uf_info.tx_clock = *prop;
3609 err = of_address_to_resource(np, 0, &res);
3613 ug_info->uf_info.regs = res.start;
3614 ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
3615 fixed_link = of_get_property(np, "fixed-link", NULL);
3617 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "0");
3618 ug_info->phy_address = fixed_link[0];
3621 ph = of_get_property(np, "phy-handle", NULL);
3622 phy = of_find_node_by_phandle(*ph);
3627 /* set the PHY address */
3628 prop = of_get_property(phy, "reg", NULL);
3631 ug_info->phy_address = *prop;
3633 /* Set the bus id */
3634 mdio = of_get_parent(phy);
3639 err = of_address_to_resource(mdio, 0, &res);
3645 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "%x", res.start);
3648 /* get the phy interface type, or default to MII */
3649 prop = of_get_property(np, "phy-connection-type", NULL);
3651 /* handle interface property present in old trees */
3652 prop = of_get_property(phy, "interface", NULL);
3654 phy_interface = enet_to_phy_interface[*prop];
3655 max_speed = enet_to_speed[*prop];
3657 phy_interface = PHY_INTERFACE_MODE_MII;
3659 phy_interface = to_phy_interface((const char *)prop);
3662 /* get speed, or derive from PHY interface */
3664 switch (phy_interface) {
3665 case PHY_INTERFACE_MODE_GMII:
3666 case PHY_INTERFACE_MODE_RGMII:
3667 case PHY_INTERFACE_MODE_RGMII_ID:
3668 case PHY_INTERFACE_MODE_RGMII_RXID:
3669 case PHY_INTERFACE_MODE_RGMII_TXID:
3670 case PHY_INTERFACE_MODE_TBI:
3671 case PHY_INTERFACE_MODE_RTBI:
3672 max_speed = SPEED_1000;
3675 max_speed = SPEED_100;
3679 if (max_speed == SPEED_1000) {
3680 /* configure muram FIFOs for gigabit operation */
3681 ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT;
3682 ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT;
3683 ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT;
3684 ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT;
3685 ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT;
3686 ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT;
3687 ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4;
3688 ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4;
3691 if (netif_msg_probe(&debug))
3692 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
3693 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
3694 ug_info->uf_info.irq);
3696 /* Create an ethernet device instance */
3697 dev = alloc_etherdev(sizeof(*ugeth));
3702 ugeth = netdev_priv(dev);
3703 spin_lock_init(&ugeth->lock);
3705 /* Create CQs for hash tables */
3706 INIT_LIST_HEAD(&ugeth->group_hash_q);
3707 INIT_LIST_HEAD(&ugeth->ind_hash_q);
3709 dev_set_drvdata(device, dev);
3711 /* Set the dev->base_addr to the gfar reg region */
3712 dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
3714 SET_NETDEV_DEV(dev, device);
3716 /* Fill in the dev structure */
3717 uec_set_ethtool_ops(dev);
3718 dev->open = ucc_geth_open;
3719 dev->hard_start_xmit = ucc_geth_start_xmit;
3720 dev->tx_timeout = ucc_geth_timeout;
3721 dev->watchdog_timeo = TX_TIMEOUT;
3722 INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
3723 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
3724 #ifdef CONFIG_NET_POLL_CONTROLLER
3725 dev->poll_controller = ucc_netpoll;
3727 dev->stop = ucc_geth_close;
3728 // dev->change_mtu = ucc_geth_change_mtu;
3730 dev->set_multicast_list = ucc_geth_set_multi;
3732 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
3733 ugeth->phy_interface = phy_interface;
3734 ugeth->max_speed = max_speed;
3736 err = register_netdev(dev);
3738 if (netif_msg_probe(ugeth))
3739 ugeth_err("%s: Cannot register net device, aborting.",
3745 mac_addr = of_get_mac_address(np);
3747 memcpy(dev->dev_addr, mac_addr, 6);
3749 ugeth->ug_info = ug_info;
3755 static int ucc_geth_remove(struct of_device* ofdev)
3757 struct device *device = &ofdev->dev;
3758 struct net_device *dev = dev_get_drvdata(device);
3759 struct ucc_geth_private *ugeth = netdev_priv(dev);
3761 unregister_netdev(dev);
3763 ucc_geth_memclean(ugeth);
3764 dev_set_drvdata(device, NULL);
3769 static struct of_device_id ucc_geth_match[] = {
3772 .compatible = "ucc_geth",
3777 MODULE_DEVICE_TABLE(of, ucc_geth_match);
3779 static struct of_platform_driver ucc_geth_driver = {
3781 .match_table = ucc_geth_match,
3782 .probe = ucc_geth_probe,
3783 .remove = ucc_geth_remove,
3786 static int __init ucc_geth_init(void)
3790 ret = uec_mdio_init();
3795 if (netif_msg_drv(&debug))
3796 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
3797 for (i = 0; i < 8; i++)
3798 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
3799 sizeof(ugeth_primary_info));
3801 ret = of_register_platform_driver(&ucc_geth_driver);
3809 static void __exit ucc_geth_exit(void)
3811 of_unregister_platform_driver(&ucc_geth_driver);
3815 module_init(ucc_geth_init);
3816 module_exit(ucc_geth_exit);
3818 MODULE_AUTHOR("Freescale Semiconductor, Inc");
3819 MODULE_DESCRIPTION(DRV_DESC);
3820 MODULE_VERSION(DRV_VERSION);
3821 MODULE_LICENSE("GPL");