1 /**************************************************************************/
3 /* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4 /* Copyright (C) 2003 IBM Corp. */
5 /* Originally written by Dave Larson (larson1@us.ibm.com) */
6 /* Maintained by Santiago Leon (santil@us.ibm.com) */
8 /* This program is free software; you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation; either version 2 of the License, or */
11 /* (at your option) any later version. */
13 /* This program is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with this program; if not, write to the Free Software */
20 /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
23 /* This module contains the implementation of a virtual ethernet device */
24 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25 /* option of the RS/6000 Platform Architechture to interface with virtual */
26 /* ethernet NICs that are presented to the partition by the hypervisor. */
28 /**************************************************************************/
31 - remove frag processing code - no longer needed
32 - add support for sysfs
33 - possibly remove procfs support
36 #include <linux/config.h>
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/errno.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <asm/semaphore.h>
52 #include <asm/hvcall.h>
53 #include <asm/atomic.h>
54 #include <asm/iommu.h>
56 #include <asm/uaccess.h>
57 #include <linux/seq_file.h>
63 #define ibmveth_printk(fmt, args...) \
64 printk(KERN_INFO "%s: " fmt, __FILE__, ## args)
66 #define ibmveth_error_printk(fmt, args...) \
67 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
70 #define ibmveth_debug_printk_no_adapter(fmt, args...) \
71 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
72 #define ibmveth_debug_printk(fmt, args...) \
73 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
74 #define ibmveth_assert(expr) \
76 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
80 #define ibmveth_debug_printk_no_adapter(fmt, args...)
81 #define ibmveth_debug_printk(fmt, args...)
82 #define ibmveth_assert(expr)
85 static int ibmveth_open(struct net_device *dev);
86 static int ibmveth_close(struct net_device *dev);
87 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
88 static int ibmveth_poll(struct net_device *dev, int *budget);
89 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
90 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev);
91 static void ibmveth_set_multicast_list(struct net_device *dev);
92 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
93 static void ibmveth_proc_register_driver(void);
94 static void ibmveth_proc_unregister_driver(void);
95 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
96 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
97 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
98 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
99 static struct kobj_type ktype_veth_pool;
101 #ifdef CONFIG_PROC_FS
102 #define IBMVETH_PROC_DIR "net/ibmveth"
103 static struct proc_dir_entry *ibmveth_proc_dir;
106 static const char ibmveth_driver_name[] = "ibmveth";
107 static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
108 #define ibmveth_driver_version "1.03"
110 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
111 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(ibmveth_driver_version);
115 /* simple methods of getting data from the current rxq entry */
116 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
118 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle);
121 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
123 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid);
126 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
128 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset);
131 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
133 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
136 /* setup the initial settings for a buffer pool */
137 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
139 pool->size = pool_size;
140 pool->index = pool_index;
141 pool->buff_size = buff_size;
142 pool->threshold = pool_size / 2;
143 pool->active = pool_active;
146 /* allocate and setup an buffer pool - called during open */
147 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
151 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
153 if(!pool->free_map) {
157 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
158 if(!pool->dma_addr) {
159 kfree(pool->free_map);
160 pool->free_map = NULL;
164 pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
167 kfree(pool->dma_addr);
168 pool->dma_addr = NULL;
170 kfree(pool->free_map);
171 pool->free_map = NULL;
175 memset(pool->skbuff, 0, sizeof(void*) * pool->size);
176 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
178 for(i = 0; i < pool->size; ++i) {
179 pool->free_map[i] = i;
182 atomic_set(&pool->available, 0);
183 pool->producer_index = 0;
184 pool->consumer_index = 0;
189 /* replenish the buffers for a pool. note that we don't need to
190 * skb_reserve these since they are used for incoming...
192 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
195 u32 count = pool->size - atomic_read(&pool->available);
196 u32 buffers_added = 0;
200 for(i = 0; i < count; ++i) {
202 unsigned int free_index, index;
204 union ibmveth_buf_desc desc;
205 unsigned long lpar_rc;
208 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
211 ibmveth_debug_printk("replenish: unable to allocate skb\n");
212 adapter->replenish_no_mem++;
216 free_index = pool->consumer_index++ % pool->size;
217 index = pool->free_map[free_index];
219 ibmveth_assert(index != IBM_VETH_INVALID_MAP);
220 ibmveth_assert(pool->skbuff[index] == NULL);
222 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
223 pool->buff_size, DMA_FROM_DEVICE);
225 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
226 pool->dma_addr[index] = dma_addr;
227 pool->skbuff[index] = skb;
229 correlator = ((u64)pool->index << 32) | index;
230 *(u64*)skb->data = correlator;
233 desc.fields.valid = 1;
234 desc.fields.length = pool->buff_size;
235 desc.fields.address = dma_addr;
237 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
239 if(lpar_rc != H_SUCCESS) {
240 pool->free_map[free_index] = index;
241 pool->skbuff[index] = NULL;
242 pool->consumer_index--;
243 dma_unmap_single(&adapter->vdev->dev,
244 pool->dma_addr[index], pool->buff_size,
246 dev_kfree_skb_any(skb);
247 adapter->replenish_add_buff_failure++;
251 adapter->replenish_add_buff_success++;
256 atomic_add(buffers_added, &(pool->available));
259 /* replenish routine */
260 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
264 adapter->replenish_task_cycles++;
266 for(i = 0; i < IbmVethNumBufferPools; i++)
267 if(adapter->rx_buff_pool[i].active)
268 ibmveth_replenish_buffer_pool(adapter,
269 &adapter->rx_buff_pool[i]);
271 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
274 /* empty and free ana buffer pool - also used to do cleanup in error paths */
275 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
279 kfree(pool->free_map);
280 pool->free_map = NULL;
282 if(pool->skbuff && pool->dma_addr) {
283 for(i = 0; i < pool->size; ++i) {
284 struct sk_buff *skb = pool->skbuff[i];
286 dma_unmap_single(&adapter->vdev->dev,
290 dev_kfree_skb_any(skb);
291 pool->skbuff[i] = NULL;
297 kfree(pool->dma_addr);
298 pool->dma_addr = NULL;
307 /* remove a buffer from a pool */
308 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
310 unsigned int pool = correlator >> 32;
311 unsigned int index = correlator & 0xffffffffUL;
312 unsigned int free_index;
315 ibmveth_assert(pool < IbmVethNumBufferPools);
316 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
318 skb = adapter->rx_buff_pool[pool].skbuff[index];
320 ibmveth_assert(skb != NULL);
322 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
324 dma_unmap_single(&adapter->vdev->dev,
325 adapter->rx_buff_pool[pool].dma_addr[index],
326 adapter->rx_buff_pool[pool].buff_size,
329 free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size;
330 adapter->rx_buff_pool[pool].free_map[free_index] = index;
334 atomic_dec(&(adapter->rx_buff_pool[pool].available));
337 /* get the current buffer on the rx queue */
338 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
340 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
341 unsigned int pool = correlator >> 32;
342 unsigned int index = correlator & 0xffffffffUL;
344 ibmveth_assert(pool < IbmVethNumBufferPools);
345 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
347 return adapter->rx_buff_pool[pool].skbuff[index];
350 /* recycle the current buffer on the rx queue */
351 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
353 u32 q_index = adapter->rx_queue.index;
354 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
355 unsigned int pool = correlator >> 32;
356 unsigned int index = correlator & 0xffffffffUL;
357 union ibmveth_buf_desc desc;
358 unsigned long lpar_rc;
360 ibmveth_assert(pool < IbmVethNumBufferPools);
361 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
363 if(!adapter->rx_buff_pool[pool].active) {
364 ibmveth_rxq_harvest_buffer(adapter);
365 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
370 desc.fields.valid = 1;
371 desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
372 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
374 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
376 if(lpar_rc != H_SUCCESS) {
377 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
378 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
381 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
382 adapter->rx_queue.index = 0;
383 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
387 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
389 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
391 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
392 adapter->rx_queue.index = 0;
393 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
397 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
401 if(adapter->buffer_list_addr != NULL) {
402 if(!dma_mapping_error(adapter->buffer_list_dma)) {
403 dma_unmap_single(&adapter->vdev->dev,
404 adapter->buffer_list_dma, 4096,
406 adapter->buffer_list_dma = DMA_ERROR_CODE;
408 free_page((unsigned long)adapter->buffer_list_addr);
409 adapter->buffer_list_addr = NULL;
412 if(adapter->filter_list_addr != NULL) {
413 if(!dma_mapping_error(adapter->filter_list_dma)) {
414 dma_unmap_single(&adapter->vdev->dev,
415 adapter->filter_list_dma, 4096,
417 adapter->filter_list_dma = DMA_ERROR_CODE;
419 free_page((unsigned long)adapter->filter_list_addr);
420 adapter->filter_list_addr = NULL;
423 if(adapter->rx_queue.queue_addr != NULL) {
424 if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
425 dma_unmap_single(&adapter->vdev->dev,
426 adapter->rx_queue.queue_dma,
427 adapter->rx_queue.queue_len,
429 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
431 kfree(adapter->rx_queue.queue_addr);
432 adapter->rx_queue.queue_addr = NULL;
435 for(i = 0; i<IbmVethNumBufferPools; i++)
436 if (adapter->rx_buff_pool[i].active)
437 ibmveth_free_buffer_pool(adapter,
438 &adapter->rx_buff_pool[i]);
441 static int ibmveth_open(struct net_device *netdev)
443 struct ibmveth_adapter *adapter = netdev->priv;
446 unsigned long lpar_rc;
448 union ibmveth_buf_desc rxq_desc;
451 ibmveth_debug_printk("open starting\n");
453 for(i = 0; i<IbmVethNumBufferPools; i++)
454 rxq_entries += adapter->rx_buff_pool[i].size;
456 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
457 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
459 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
460 ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
461 ibmveth_cleanup(adapter);
465 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
466 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
468 if(!adapter->rx_queue.queue_addr) {
469 ibmveth_error_printk("unable to allocate rx queue pages\n");
470 ibmveth_cleanup(adapter);
474 adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
475 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
476 adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
477 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
478 adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
479 adapter->rx_queue.queue_addr,
480 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
482 if((dma_mapping_error(adapter->buffer_list_dma) ) ||
483 (dma_mapping_error(adapter->filter_list_dma)) ||
484 (dma_mapping_error(adapter->rx_queue.queue_dma))) {
485 ibmveth_error_printk("unable to map filter or buffer list pages\n");
486 ibmveth_cleanup(adapter);
490 adapter->rx_queue.index = 0;
491 adapter->rx_queue.num_slots = rxq_entries;
492 adapter->rx_queue.toggle = 1;
494 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
495 mac_address = mac_address >> 16;
498 rxq_desc.fields.valid = 1;
499 rxq_desc.fields.length = adapter->rx_queue.queue_len;
500 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
502 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
503 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
504 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
507 lpar_rc = h_register_logical_lan(adapter->vdev->unit_address,
508 adapter->buffer_list_dma,
510 adapter->filter_list_dma,
513 if(lpar_rc != H_SUCCESS) {
514 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
515 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
516 adapter->buffer_list_dma,
517 adapter->filter_list_dma,
520 ibmveth_cleanup(adapter);
524 for(i = 0; i<IbmVethNumBufferPools; i++) {
525 if(!adapter->rx_buff_pool[i].active)
527 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
528 ibmveth_error_printk("unable to alloc pool\n");
529 adapter->rx_buff_pool[i].active = 0;
530 ibmveth_cleanup(adapter);
535 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
536 if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
537 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
539 rc = h_free_logical_lan(adapter->vdev->unit_address);
540 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
542 ibmveth_cleanup(adapter);
546 ibmveth_debug_printk("initial replenish cycle\n");
547 ibmveth_interrupt(netdev->irq, netdev, NULL);
549 netif_start_queue(netdev);
551 ibmveth_debug_printk("open complete\n");
556 static int ibmveth_close(struct net_device *netdev)
558 struct ibmveth_adapter *adapter = netdev->priv;
561 ibmveth_debug_printk("close starting\n");
563 if (!adapter->pool_config)
564 netif_stop_queue(netdev);
566 free_irq(netdev->irq, netdev);
569 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
570 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
572 if(lpar_rc != H_SUCCESS)
574 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
578 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
580 ibmveth_cleanup(adapter);
582 ibmveth_debug_printk("close complete\n");
587 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
588 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
589 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
590 cmd->speed = SPEED_1000;
591 cmd->duplex = DUPLEX_FULL;
592 cmd->port = PORT_FIBRE;
593 cmd->phy_address = 0;
594 cmd->transceiver = XCVR_INTERNAL;
595 cmd->autoneg = AUTONEG_ENABLE;
601 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
602 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
603 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
606 static u32 netdev_get_link(struct net_device *dev) {
610 static struct ethtool_ops netdev_ethtool_ops = {
611 .get_drvinfo = netdev_get_drvinfo,
612 .get_settings = netdev_get_settings,
613 .get_link = netdev_get_link,
614 .get_sg = ethtool_op_get_sg,
615 .get_tx_csum = ethtool_op_get_tx_csum,
618 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
623 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
625 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
627 struct ibmveth_adapter *adapter = netdev->priv;
628 union ibmveth_buf_desc desc[IbmVethMaxSendFrags];
629 unsigned long lpar_rc;
630 int nfrags = 0, curfrag;
631 unsigned long correlator;
633 unsigned int retry_count;
634 unsigned int tx_dropped = 0;
635 unsigned int tx_bytes = 0;
636 unsigned int tx_packets = 0;
637 unsigned int tx_send_failed = 0;
638 unsigned int tx_map_failed = 0;
641 if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
646 memset(&desc, 0, sizeof(desc));
648 /* nfrags = number of frags after the initial fragment */
649 nfrags = skb_shinfo(skb)->nr_frags;
652 adapter->tx_multidesc_send++;
654 /* map the initial fragment */
655 desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len;
656 desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
657 desc[0].fields.length, DMA_TO_DEVICE);
658 desc[0].fields.valid = 1;
660 if(dma_mapping_error(desc[0].fields.address)) {
661 ibmveth_error_printk("tx: unable to map initial fragment\n");
669 /* map fragments past the initial portion if there are any */
671 skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag];
672 desc[curfrag+1].fields.address
673 = dma_map_single(&adapter->vdev->dev,
674 page_address(frag->page) + frag->page_offset,
675 frag->size, DMA_TO_DEVICE);
676 desc[curfrag+1].fields.length = frag->size;
677 desc[curfrag+1].fields.valid = 1;
679 if(dma_mapping_error(desc[curfrag+1].fields.address)) {
680 ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
683 /* Free all the mappings we just created */
684 while(curfrag < nfrags) {
685 dma_unmap_single(&adapter->vdev->dev,
686 desc[curfrag+1].fields.address,
687 desc[curfrag+1].fields.length,
695 /* send the frame. Arbitrarily set retrycount to 1024 */
699 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
707 } while ((lpar_rc == H_BUSY) && (retry_count--));
709 if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
711 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
712 for(i = 0; i < 6; i++) {
713 ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
714 desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
720 tx_bytes += skb->len;
721 netdev->trans_start = jiffies;
725 dma_unmap_single(&adapter->vdev->dev,
726 desc[nfrags].fields.address,
727 desc[nfrags].fields.length, DMA_TO_DEVICE);
728 } while(--nfrags >= 0);
730 out: spin_lock_irqsave(&adapter->stats_lock, flags);
731 adapter->stats.tx_dropped += tx_dropped;
732 adapter->stats.tx_bytes += tx_bytes;
733 adapter->stats.tx_packets += tx_packets;
734 adapter->tx_send_failed += tx_send_failed;
735 adapter->tx_map_failed += tx_map_failed;
736 spin_unlock_irqrestore(&adapter->stats_lock, flags);
742 static int ibmveth_poll(struct net_device *netdev, int *budget)
744 struct ibmveth_adapter *adapter = netdev->priv;
745 int max_frames_to_process = netdev->quota;
746 int frames_processed = 0;
748 unsigned long lpar_rc;
752 struct net_device *netdev = adapter->netdev;
754 if(ibmveth_rxq_pending_buffer(adapter)) {
759 if(!ibmveth_rxq_buffer_valid(adapter)) {
760 wmb(); /* suggested by larson1 */
761 adapter->rx_invalid_buffer++;
762 ibmveth_debug_printk("recycling invalid buffer\n");
763 ibmveth_rxq_recycle_buffer(adapter);
765 int length = ibmveth_rxq_frame_length(adapter);
766 int offset = ibmveth_rxq_frame_offset(adapter);
767 skb = ibmveth_rxq_get_buffer(adapter);
769 ibmveth_rxq_harvest_buffer(adapter);
771 skb_reserve(skb, offset);
772 skb_put(skb, length);
774 skb->protocol = eth_type_trans(skb, netdev);
776 netif_receive_skb(skb); /* send it up */
778 adapter->stats.rx_packets++;
779 adapter->stats.rx_bytes += length;
781 netdev->last_rx = jiffies;
786 } while(more_work && (frames_processed < max_frames_to_process));
788 ibmveth_replenish_task(adapter);
791 /* more work to do - return that we are not done yet */
792 netdev->quota -= frames_processed;
793 *budget -= frames_processed;
797 /* we think we are done - reenable interrupts, then check once more to make sure we are done */
798 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
800 ibmveth_assert(lpar_rc == H_SUCCESS);
802 netif_rx_complete(netdev);
804 if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed))
806 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
807 ibmveth_assert(lpar_rc == H_SUCCESS);
812 netdev->quota -= frames_processed;
813 *budget -= frames_processed;
815 /* we really are done */
819 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
821 struct net_device *netdev = dev_instance;
822 struct ibmveth_adapter *adapter = netdev->priv;
823 unsigned long lpar_rc;
825 if(netif_rx_schedule_prep(netdev)) {
826 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
827 ibmveth_assert(lpar_rc == H_SUCCESS);
828 __netif_rx_schedule(netdev);
833 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev)
835 struct ibmveth_adapter *adapter = dev->priv;
836 return &adapter->stats;
839 static void ibmveth_set_multicast_list(struct net_device *netdev)
841 struct ibmveth_adapter *adapter = netdev->priv;
842 unsigned long lpar_rc;
844 if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
845 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
846 IbmVethMcastEnableRecv |
847 IbmVethMcastDisableFiltering,
849 if(lpar_rc != H_SUCCESS) {
850 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
853 struct dev_mc_list *mclist = netdev->mc_list;
855 /* clear the filter table & disable filtering */
856 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
857 IbmVethMcastEnableRecv |
858 IbmVethMcastDisableFiltering |
859 IbmVethMcastClearFilterTable,
861 if(lpar_rc != H_SUCCESS) {
862 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
864 /* add the addresses to the filter table */
865 for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
866 // add the multicast address to the filter table
867 unsigned long mcast_addr = 0;
868 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
869 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
870 IbmVethMcastAddFilter,
872 if(lpar_rc != H_SUCCESS) {
873 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
877 /* re-enable filtering */
878 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
879 IbmVethMcastEnableFiltering,
881 if(lpar_rc != H_SUCCESS) {
882 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
887 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
889 struct ibmveth_adapter *adapter = dev->priv;
890 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
893 if (new_mtu < IBMVETH_MAX_MTU)
896 /* Look for an active buffer pool that can hold the new MTU */
897 for(i = 0; i<IbmVethNumBufferPools; i++) {
898 if (!adapter->rx_buff_pool[i].active)
900 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
908 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
911 struct net_device *netdev;
912 struct ibmveth_adapter *adapter = NULL;
914 unsigned char *mac_addr_p;
915 unsigned int *mcastFilterSize_p;
918 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
921 mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0);
923 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
924 "attribute\n", __FILE__, __LINE__);
928 mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0);
929 if(!mcastFilterSize_p) {
930 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
931 "VETH_MCAST_FILTER_SIZE attribute\n",
936 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
941 SET_MODULE_OWNER(netdev);
943 adapter = netdev->priv;
944 memset(adapter, 0, sizeof(adapter));
945 dev->dev.driver_data = netdev;
948 adapter->netdev = netdev;
949 adapter->mcastFilterSize= *mcastFilterSize_p;
950 adapter->pool_config = 0;
952 /* Some older boxes running PHYP non-natively have an OF that
953 returns a 8-byte local-mac-address field (and the first
954 2 bytes have to be ignored) while newer boxes' OF return
955 a 6-byte field. Note that IEEE 1275 specifies that
956 local-mac-address must be a 6-byte field.
957 The RPA doc specifies that the first byte must be 10b, so
958 we'll just look for it to solve this 8 vs. 6 byte field issue */
960 if ((*mac_addr_p & 0x3) != 0x02)
963 adapter->mac_addr = 0;
964 memcpy(&adapter->mac_addr, mac_addr_p, 6);
966 adapter->liobn = dev->iommu_table->it_index;
968 netdev->irq = dev->irq;
969 netdev->open = ibmveth_open;
970 netdev->poll = ibmveth_poll;
972 netdev->stop = ibmveth_close;
973 netdev->hard_start_xmit = ibmveth_start_xmit;
974 netdev->get_stats = ibmveth_get_stats;
975 netdev->set_multicast_list = ibmveth_set_multicast_list;
976 netdev->do_ioctl = ibmveth_ioctl;
977 netdev->ethtool_ops = &netdev_ethtool_ops;
978 netdev->change_mtu = ibmveth_change_mtu;
979 SET_NETDEV_DEV(netdev, &dev->dev);
980 netdev->features |= NETIF_F_LLTX;
981 spin_lock_init(&adapter->stats_lock);
983 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
985 for(i = 0; i<IbmVethNumBufferPools; i++) {
986 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
987 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
988 pool_count[i], pool_size[i],
990 kobj->parent = &dev->dev.kobj;
991 sprintf(kobj->name, "pool%d", i);
992 kobj->ktype = &ktype_veth_pool;
993 kobject_register(kobj);
996 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
998 adapter->buffer_list_dma = DMA_ERROR_CODE;
999 adapter->filter_list_dma = DMA_ERROR_CODE;
1000 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1002 ibmveth_debug_printk("registering netdev...\n");
1004 rc = register_netdev(netdev);
1007 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
1008 free_netdev(netdev);
1012 ibmveth_debug_printk("registered\n");
1014 ibmveth_proc_register_adapter(adapter);
1019 static int __devexit ibmveth_remove(struct vio_dev *dev)
1021 struct net_device *netdev = dev->dev.driver_data;
1022 struct ibmveth_adapter *adapter = netdev->priv;
1025 for(i = 0; i<IbmVethNumBufferPools; i++)
1026 kobject_unregister(&adapter->rx_buff_pool[i].kobj);
1028 unregister_netdev(netdev);
1030 ibmveth_proc_unregister_adapter(adapter);
1032 free_netdev(netdev);
1036 #ifdef CONFIG_PROC_FS
1037 static void ibmveth_proc_register_driver(void)
1039 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL);
1040 if (ibmveth_proc_dir) {
1041 SET_MODULE_OWNER(ibmveth_proc_dir);
1045 static void ibmveth_proc_unregister_driver(void)
1047 remove_proc_entry(IBMVETH_PROC_DIR, NULL);
1050 static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
1059 static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1065 static void ibmveth_seq_stop(struct seq_file *seq, void *v)
1069 static int ibmveth_seq_show(struct seq_file *seq, void *v)
1071 struct ibmveth_adapter *adapter = seq->private;
1072 char *current_mac = ((char*) &adapter->netdev->dev_addr);
1073 char *firmware_mac = ((char*) &adapter->mac_addr) ;
1075 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1077 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1078 seq_printf(seq, "LIOBN: 0x%lx\n", adapter->liobn);
1079 seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1080 current_mac[0], current_mac[1], current_mac[2],
1081 current_mac[3], current_mac[4], current_mac[5]);
1082 seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1083 firmware_mac[0], firmware_mac[1], firmware_mac[2],
1084 firmware_mac[3], firmware_mac[4], firmware_mac[5]);
1086 seq_printf(seq, "\nAdapter Statistics:\n");
1087 seq_printf(seq, " TX: skbuffs linearized: %ld\n", adapter->tx_linearized);
1088 seq_printf(seq, " multi-descriptor sends: %ld\n", adapter->tx_multidesc_send);
1089 seq_printf(seq, " skb_linearize failures: %ld\n", adapter->tx_linearize_failed);
1090 seq_printf(seq, " vio_map_single failres: %ld\n", adapter->tx_map_failed);
1091 seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed);
1092 seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles);
1093 seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem);
1094 seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure);
1095 seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer);
1096 seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer);
1100 static struct seq_operations ibmveth_seq_ops = {
1101 .start = ibmveth_seq_start,
1102 .next = ibmveth_seq_next,
1103 .stop = ibmveth_seq_stop,
1104 .show = ibmveth_seq_show,
1107 static int ibmveth_proc_open(struct inode *inode, struct file *file)
1109 struct seq_file *seq;
1110 struct proc_dir_entry *proc;
1113 rc = seq_open(file, &ibmveth_seq_ops);
1115 /* recover the pointer buried in proc_dir_entry data */
1116 seq = file->private_data;
1118 seq->private = proc->data;
1123 static struct file_operations ibmveth_proc_fops = {
1124 .owner = THIS_MODULE,
1125 .open = ibmveth_proc_open,
1127 .llseek = seq_lseek,
1128 .release = seq_release,
1131 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1133 struct proc_dir_entry *entry;
1134 if (ibmveth_proc_dir) {
1135 entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir);
1137 ibmveth_error_printk("Cannot create adapter proc entry");
1139 entry->data = (void *) adapter;
1140 entry->proc_fops = &ibmveth_proc_fops;
1141 SET_MODULE_OWNER(entry);
1147 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1149 if (ibmveth_proc_dir) {
1150 remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir);
1154 #else /* CONFIG_PROC_FS */
1155 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1159 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1162 static void ibmveth_proc_register_driver(void)
1166 static void ibmveth_proc_unregister_driver(void)
1169 #endif /* CONFIG_PROC_FS */
1171 static struct attribute veth_active_attr;
1172 static struct attribute veth_num_attr;
1173 static struct attribute veth_size_attr;
1175 static ssize_t veth_pool_show(struct kobject * kobj,
1176 struct attribute * attr, char * buf)
1178 struct ibmveth_buff_pool *pool = container_of(kobj,
1179 struct ibmveth_buff_pool,
1182 if (attr == &veth_active_attr)
1183 return sprintf(buf, "%d\n", pool->active);
1184 else if (attr == &veth_num_attr)
1185 return sprintf(buf, "%d\n", pool->size);
1186 else if (attr == &veth_size_attr)
1187 return sprintf(buf, "%d\n", pool->buff_size);
1191 static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
1192 const char * buf, size_t count)
1194 struct ibmveth_buff_pool *pool = container_of(kobj,
1195 struct ibmveth_buff_pool,
1197 struct net_device *netdev =
1198 container_of(kobj->parent, struct device, kobj)->driver_data;
1199 struct ibmveth_adapter *adapter = netdev->priv;
1200 long value = simple_strtol(buf, NULL, 10);
1203 if (attr == &veth_active_attr) {
1204 if (value && !pool->active) {
1205 if(ibmveth_alloc_buffer_pool(pool)) {
1206 ibmveth_error_printk("unable to alloc pool\n");
1210 adapter->pool_config = 1;
1211 ibmveth_close(netdev);
1212 adapter->pool_config = 0;
1213 if ((rc = ibmveth_open(netdev)))
1215 } else if (!value && pool->active) {
1216 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1218 /* Make sure there is a buffer pool with buffers that
1219 can hold a packet of the size of the MTU */
1220 for(i = 0; i<IbmVethNumBufferPools; i++) {
1221 if (pool == &adapter->rx_buff_pool[i])
1223 if (!adapter->rx_buff_pool[i].active)
1225 if (mtu < adapter->rx_buff_pool[i].buff_size) {
1227 h_free_logical_lan_buffer(adapter->
1235 ibmveth_error_printk("no active pool >= MTU\n");
1239 } else if (attr == &veth_num_attr) {
1240 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
1243 adapter->pool_config = 1;
1244 ibmveth_close(netdev);
1245 adapter->pool_config = 0;
1247 if ((rc = ibmveth_open(netdev)))
1250 } else if (attr == &veth_size_attr) {
1251 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
1254 adapter->pool_config = 1;
1255 ibmveth_close(netdev);
1256 adapter->pool_config = 0;
1257 pool->buff_size = value;
1258 if ((rc = ibmveth_open(netdev)))
1263 /* kick the interrupt handler to allocate/deallocate pools */
1264 ibmveth_interrupt(netdev->irq, netdev, NULL);
1269 #define ATTR(_name, _mode) \
1270 struct attribute veth_##_name##_attr = { \
1271 .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \
1274 static ATTR(active, 0644);
1275 static ATTR(num, 0644);
1276 static ATTR(size, 0644);
1278 static struct attribute * veth_pool_attrs[] = {
1285 static struct sysfs_ops veth_pool_ops = {
1286 .show = veth_pool_show,
1287 .store = veth_pool_store,
1290 static struct kobj_type ktype_veth_pool = {
1292 .sysfs_ops = &veth_pool_ops,
1293 .default_attrs = veth_pool_attrs,
1297 static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1298 { "network", "IBM,l-lan"},
1301 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1303 static struct vio_driver ibmveth_driver = {
1304 .id_table = ibmveth_device_table,
1305 .probe = ibmveth_probe,
1306 .remove = ibmveth_remove,
1308 .name = ibmveth_driver_name,
1309 .owner = THIS_MODULE,
1313 static int __init ibmveth_module_init(void)
1315 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
1317 ibmveth_proc_register_driver();
1319 return vio_register_driver(&ibmveth_driver);
1322 static void __exit ibmveth_module_exit(void)
1324 vio_unregister_driver(&ibmveth_driver);
1325 ibmveth_proc_unregister_driver();
1328 module_init(ibmveth_module_init);
1329 module_exit(ibmveth_module_exit);