1 /**************************************************************************/
3 /* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4 /* Copyright (C) 2003 IBM Corp. */
5 /* Originally written by Dave Larson (larson1@us.ibm.com) */
6 /* Maintained by Santiago Leon (santil@us.ibm.com) */
8 /* This program is free software; you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation; either version 2 of the License, or */
11 /* (at your option) any later version. */
13 /* This program is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with this program; if not, write to the Free Software */
20 /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
23 /* This module contains the implementation of a virtual ethernet device */
24 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25 /* option of the RS/6000 Platform Architechture to interface with virtual */
26 /* ethernet NICs that are presented to the partition by the hypervisor. */
28 /**************************************************************************/
31 - add support for sysfs
32 - possibly remove procfs support
35 #include <linux/module.h>
36 #include <linux/types.h>
37 #include <linux/errno.h>
38 #include <linux/ioport.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/kernel.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/init.h>
45 #include <linux/delay.h>
47 #include <linux/ethtool.h>
48 #include <linux/proc_fs.h>
51 #include <net/net_namespace.h>
52 #include <asm/semaphore.h>
53 #include <asm/hvcall.h>
54 #include <asm/atomic.h>
56 #include <asm/uaccess.h>
57 #include <linux/seq_file.h>
63 #define ibmveth_printk(fmt, args...) \
64 printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
66 #define ibmveth_error_printk(fmt, args...) \
67 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
70 #define ibmveth_debug_printk_no_adapter(fmt, args...) \
71 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
72 #define ibmveth_debug_printk(fmt, args...) \
73 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
74 #define ibmveth_assert(expr) \
76 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
80 #define ibmveth_debug_printk_no_adapter(fmt, args...)
81 #define ibmveth_debug_printk(fmt, args...)
82 #define ibmveth_assert(expr)
85 static int ibmveth_open(struct net_device *dev);
86 static int ibmveth_close(struct net_device *dev);
87 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
88 static int ibmveth_poll(struct napi_struct *napi, int budget);
89 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
90 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev);
91 static void ibmveth_set_multicast_list(struct net_device *dev);
92 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
93 static void ibmveth_proc_register_driver(void);
94 static void ibmveth_proc_unregister_driver(void);
95 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
96 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
97 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
98 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
99 static struct kobj_type ktype_veth_pool;
101 #ifdef CONFIG_PROC_FS
102 #define IBMVETH_PROC_DIR "ibmveth"
103 static struct proc_dir_entry *ibmveth_proc_dir;
106 static const char ibmveth_driver_name[] = "ibmveth";
107 static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
108 #define ibmveth_driver_version "1.03"
110 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
111 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(ibmveth_driver_version);
115 struct ibmveth_stat {
116 char name[ETH_GSTRING_LEN];
120 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
121 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
123 struct ibmveth_stat ibmveth_stats[] = {
124 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
125 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
126 { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) },
127 { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) },
128 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
129 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
130 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
131 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
134 /* simple methods of getting data from the current rxq entry */
135 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
137 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle);
140 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
142 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid);
145 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
147 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset);
150 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
152 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
155 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
157 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].csum_good);
160 /* setup the initial settings for a buffer pool */
161 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
163 pool->size = pool_size;
164 pool->index = pool_index;
165 pool->buff_size = buff_size;
166 pool->threshold = pool_size / 2;
167 pool->active = pool_active;
170 /* allocate and setup an buffer pool - called during open */
171 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
175 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
177 if(!pool->free_map) {
181 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
182 if(!pool->dma_addr) {
183 kfree(pool->free_map);
184 pool->free_map = NULL;
188 pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
191 kfree(pool->dma_addr);
192 pool->dma_addr = NULL;
194 kfree(pool->free_map);
195 pool->free_map = NULL;
199 memset(pool->skbuff, 0, sizeof(void*) * pool->size);
200 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
202 for(i = 0; i < pool->size; ++i) {
203 pool->free_map[i] = i;
206 atomic_set(&pool->available, 0);
207 pool->producer_index = 0;
208 pool->consumer_index = 0;
213 /* replenish the buffers for a pool. note that we don't need to
214 * skb_reserve these since they are used for incoming...
216 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
219 u32 count = pool->size - atomic_read(&pool->available);
220 u32 buffers_added = 0;
224 for(i = 0; i < count; ++i) {
226 unsigned int free_index, index;
228 union ibmveth_buf_desc desc;
229 unsigned long lpar_rc;
232 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
235 ibmveth_debug_printk("replenish: unable to allocate skb\n");
236 adapter->replenish_no_mem++;
240 free_index = pool->consumer_index;
241 pool->consumer_index = (pool->consumer_index + 1) % pool->size;
242 index = pool->free_map[free_index];
244 ibmveth_assert(index != IBM_VETH_INVALID_MAP);
245 ibmveth_assert(pool->skbuff[index] == NULL);
247 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
248 pool->buff_size, DMA_FROM_DEVICE);
250 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
251 pool->dma_addr[index] = dma_addr;
252 pool->skbuff[index] = skb;
254 correlator = ((u64)pool->index << 32) | index;
255 *(u64*)skb->data = correlator;
258 desc.fields.valid = 1;
259 desc.fields.length = pool->buff_size;
260 desc.fields.address = dma_addr;
262 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
264 if(lpar_rc != H_SUCCESS) {
265 pool->free_map[free_index] = index;
266 pool->skbuff[index] = NULL;
267 if (pool->consumer_index == 0)
268 pool->consumer_index = pool->size - 1;
270 pool->consumer_index--;
271 dma_unmap_single(&adapter->vdev->dev,
272 pool->dma_addr[index], pool->buff_size,
274 dev_kfree_skb_any(skb);
275 adapter->replenish_add_buff_failure++;
279 adapter->replenish_add_buff_success++;
284 atomic_add(buffers_added, &(pool->available));
287 /* replenish routine */
288 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
292 adapter->replenish_task_cycles++;
294 for(i = 0; i < IbmVethNumBufferPools; i++)
295 if(adapter->rx_buff_pool[i].active)
296 ibmveth_replenish_buffer_pool(adapter,
297 &adapter->rx_buff_pool[i]);
299 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
302 /* empty and free ana buffer pool - also used to do cleanup in error paths */
303 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
307 kfree(pool->free_map);
308 pool->free_map = NULL;
310 if(pool->skbuff && pool->dma_addr) {
311 for(i = 0; i < pool->size; ++i) {
312 struct sk_buff *skb = pool->skbuff[i];
314 dma_unmap_single(&adapter->vdev->dev,
318 dev_kfree_skb_any(skb);
319 pool->skbuff[i] = NULL;
325 kfree(pool->dma_addr);
326 pool->dma_addr = NULL;
335 /* remove a buffer from a pool */
336 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
338 unsigned int pool = correlator >> 32;
339 unsigned int index = correlator & 0xffffffffUL;
340 unsigned int free_index;
343 ibmveth_assert(pool < IbmVethNumBufferPools);
344 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
346 skb = adapter->rx_buff_pool[pool].skbuff[index];
348 ibmveth_assert(skb != NULL);
350 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
352 dma_unmap_single(&adapter->vdev->dev,
353 adapter->rx_buff_pool[pool].dma_addr[index],
354 adapter->rx_buff_pool[pool].buff_size,
357 free_index = adapter->rx_buff_pool[pool].producer_index;
358 adapter->rx_buff_pool[pool].producer_index
359 = (adapter->rx_buff_pool[pool].producer_index + 1)
360 % adapter->rx_buff_pool[pool].size;
361 adapter->rx_buff_pool[pool].free_map[free_index] = index;
365 atomic_dec(&(adapter->rx_buff_pool[pool].available));
368 /* get the current buffer on the rx queue */
369 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
371 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
372 unsigned int pool = correlator >> 32;
373 unsigned int index = correlator & 0xffffffffUL;
375 ibmveth_assert(pool < IbmVethNumBufferPools);
376 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
378 return adapter->rx_buff_pool[pool].skbuff[index];
381 /* recycle the current buffer on the rx queue */
382 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
384 u32 q_index = adapter->rx_queue.index;
385 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
386 unsigned int pool = correlator >> 32;
387 unsigned int index = correlator & 0xffffffffUL;
388 union ibmveth_buf_desc desc;
389 unsigned long lpar_rc;
391 ibmveth_assert(pool < IbmVethNumBufferPools);
392 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
394 if(!adapter->rx_buff_pool[pool].active) {
395 ibmveth_rxq_harvest_buffer(adapter);
396 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
401 desc.fields.valid = 1;
402 desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
403 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
405 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
407 if(lpar_rc != H_SUCCESS) {
408 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
409 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
412 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
413 adapter->rx_queue.index = 0;
414 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
418 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
420 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
422 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
423 adapter->rx_queue.index = 0;
424 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
428 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
432 if(adapter->buffer_list_addr != NULL) {
433 if(!dma_mapping_error(adapter->buffer_list_dma)) {
434 dma_unmap_single(&adapter->vdev->dev,
435 adapter->buffer_list_dma, 4096,
437 adapter->buffer_list_dma = DMA_ERROR_CODE;
439 free_page((unsigned long)adapter->buffer_list_addr);
440 adapter->buffer_list_addr = NULL;
443 if(adapter->filter_list_addr != NULL) {
444 if(!dma_mapping_error(adapter->filter_list_dma)) {
445 dma_unmap_single(&adapter->vdev->dev,
446 adapter->filter_list_dma, 4096,
448 adapter->filter_list_dma = DMA_ERROR_CODE;
450 free_page((unsigned long)adapter->filter_list_addr);
451 adapter->filter_list_addr = NULL;
454 if(adapter->rx_queue.queue_addr != NULL) {
455 if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
456 dma_unmap_single(&adapter->vdev->dev,
457 adapter->rx_queue.queue_dma,
458 adapter->rx_queue.queue_len,
460 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
462 kfree(adapter->rx_queue.queue_addr);
463 adapter->rx_queue.queue_addr = NULL;
466 for(i = 0; i<IbmVethNumBufferPools; i++)
467 if (adapter->rx_buff_pool[i].active)
468 ibmveth_free_buffer_pool(adapter,
469 &adapter->rx_buff_pool[i]);
472 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
473 union ibmveth_buf_desc rxq_desc, u64 mac_address)
475 int rc, try_again = 1;
477 /* After a kexec the adapter will still be open, so our attempt to
478 * open it will fail. So if we get a failure we free the adapter and
479 * try again, but only once. */
481 rc = h_register_logical_lan(adapter->vdev->unit_address,
482 adapter->buffer_list_dma, rxq_desc.desc,
483 adapter->filter_list_dma, mac_address);
485 if (rc != H_SUCCESS && try_again) {
487 rc = h_free_logical_lan(adapter->vdev->unit_address);
488 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
497 static int ibmveth_open(struct net_device *netdev)
499 struct ibmveth_adapter *adapter = netdev->priv;
502 unsigned long lpar_rc;
504 union ibmveth_buf_desc rxq_desc;
507 ibmveth_debug_printk("open starting\n");
509 napi_enable(&adapter->napi);
511 for(i = 0; i<IbmVethNumBufferPools; i++)
512 rxq_entries += adapter->rx_buff_pool[i].size;
514 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
515 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
517 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
518 ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
519 ibmveth_cleanup(adapter);
520 napi_disable(&adapter->napi);
524 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
525 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
527 if(!adapter->rx_queue.queue_addr) {
528 ibmveth_error_printk("unable to allocate rx queue pages\n");
529 ibmveth_cleanup(adapter);
530 napi_disable(&adapter->napi);
534 adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
535 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
536 adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
537 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
538 adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
539 adapter->rx_queue.queue_addr,
540 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
542 if((dma_mapping_error(adapter->buffer_list_dma) ) ||
543 (dma_mapping_error(adapter->filter_list_dma)) ||
544 (dma_mapping_error(adapter->rx_queue.queue_dma))) {
545 ibmveth_error_printk("unable to map filter or buffer list pages\n");
546 ibmveth_cleanup(adapter);
547 napi_disable(&adapter->napi);
551 adapter->rx_queue.index = 0;
552 adapter->rx_queue.num_slots = rxq_entries;
553 adapter->rx_queue.toggle = 1;
555 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
556 mac_address = mac_address >> 16;
559 rxq_desc.fields.valid = 1;
560 rxq_desc.fields.length = adapter->rx_queue.queue_len;
561 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
563 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
564 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
565 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
567 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
569 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
571 if(lpar_rc != H_SUCCESS) {
572 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
573 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
574 adapter->buffer_list_dma,
575 adapter->filter_list_dma,
578 ibmveth_cleanup(adapter);
579 napi_disable(&adapter->napi);
583 for(i = 0; i<IbmVethNumBufferPools; i++) {
584 if(!adapter->rx_buff_pool[i].active)
586 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
587 ibmveth_error_printk("unable to alloc pool\n");
588 adapter->rx_buff_pool[i].active = 0;
589 ibmveth_cleanup(adapter);
590 napi_disable(&adapter->napi);
595 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
596 if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
597 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
599 rc = h_free_logical_lan(adapter->vdev->unit_address);
600 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
602 ibmveth_cleanup(adapter);
603 napi_disable(&adapter->napi);
607 ibmveth_debug_printk("initial replenish cycle\n");
608 ibmveth_interrupt(netdev->irq, netdev);
610 netif_start_queue(netdev);
612 ibmveth_debug_printk("open complete\n");
617 static int ibmveth_close(struct net_device *netdev)
619 struct ibmveth_adapter *adapter = netdev->priv;
622 ibmveth_debug_printk("close starting\n");
624 napi_disable(&adapter->napi);
626 if (!adapter->pool_config)
627 netif_stop_queue(netdev);
629 free_irq(netdev->irq, netdev);
632 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
633 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
635 if(lpar_rc != H_SUCCESS)
637 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
641 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
643 ibmveth_cleanup(adapter);
645 ibmveth_debug_printk("close complete\n");
650 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
651 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
652 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
653 cmd->speed = SPEED_1000;
654 cmd->duplex = DUPLEX_FULL;
655 cmd->port = PORT_FIBRE;
656 cmd->phy_address = 0;
657 cmd->transceiver = XCVR_INTERNAL;
658 cmd->autoneg = AUTONEG_ENABLE;
664 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
665 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
666 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
669 static u32 netdev_get_link(struct net_device *dev) {
673 static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
675 struct ibmveth_adapter *adapter = dev->priv;
678 adapter->rx_csum = 1;
681 * Since the ibmveth firmware interface does not have the concept of
682 * separate tx/rx checksum offload enable, if rx checksum is disabled
683 * we also have to disable tx checksum offload. Once we disable rx
684 * checksum offload, we are no longer allowed to send tx buffers that
685 * are not properly checksummed.
687 adapter->rx_csum = 0;
688 dev->features &= ~NETIF_F_IP_CSUM;
692 static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
694 struct ibmveth_adapter *adapter = dev->priv;
697 dev->features |= NETIF_F_IP_CSUM;
698 adapter->rx_csum = 1;
700 dev->features &= ~NETIF_F_IP_CSUM;
703 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
704 void (*done) (struct net_device *, u32))
706 struct ibmveth_adapter *adapter = dev->priv;
707 union ibmveth_illan_attributes set_attr, clr_attr, ret_attr;
709 int rc1 = 0, rc2 = 0;
712 if (netif_running(dev)) {
714 adapter->pool_config = 1;
716 adapter->pool_config = 0;
723 set_attr.fields.tcp_csum_offload_ipv4 = 1;
725 clr_attr.fields.tcp_csum_offload_ipv4 = 1;
727 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr.desc);
729 if (ret == H_SUCCESS && !ret_attr.fields.active_trunk &&
730 !ret_attr.fields.trunk_priority &&
731 ret_attr.fields.csum_offload_padded_pkt_support) {
732 ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr.desc,
733 set_attr.desc, &ret_attr.desc);
735 if (ret != H_SUCCESS) {
737 ibmveth_error_printk("unable to change checksum offload settings."
738 " %d rc=%ld\n", data, ret);
740 ret = h_illan_attributes(adapter->vdev->unit_address,
741 set_attr.desc, clr_attr.desc, &ret_attr.desc);
746 ibmveth_error_printk("unable to change checksum offload settings."
747 " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr.desc);
751 rc2 = ibmveth_open(dev);
753 return rc1 ? rc1 : rc2;
756 static int ibmveth_set_rx_csum(struct net_device *dev, u32 data)
758 struct ibmveth_adapter *adapter = dev->priv;
760 if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum))
763 return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags);
766 static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
768 struct ibmveth_adapter *adapter = dev->priv;
771 if (data && (dev->features & NETIF_F_IP_CSUM))
773 if (!data && !(dev->features & NETIF_F_IP_CSUM))
776 if (data && !adapter->rx_csum)
777 rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags);
779 ibmveth_set_tx_csum_flags(dev, data);
784 static u32 ibmveth_get_rx_csum(struct net_device *dev)
786 struct ibmveth_adapter *adapter = dev->priv;
787 return adapter->rx_csum;
790 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
794 if (stringset != ETH_SS_STATS)
797 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
798 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
801 static int ibmveth_get_stats_count(struct net_device *dev)
803 return ARRAY_SIZE(ibmveth_stats);
806 static void ibmveth_get_ethtool_stats(struct net_device *dev,
807 struct ethtool_stats *stats, u64 *data)
810 struct ibmveth_adapter *adapter = dev->priv;
812 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
813 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
816 static const struct ethtool_ops netdev_ethtool_ops = {
817 .get_drvinfo = netdev_get_drvinfo,
818 .get_settings = netdev_get_settings,
819 .get_link = netdev_get_link,
820 .get_sg = ethtool_op_get_sg,
821 .get_tx_csum = ethtool_op_get_tx_csum,
822 .set_tx_csum = ibmveth_set_tx_csum,
823 .get_rx_csum = ibmveth_get_rx_csum,
824 .set_rx_csum = ibmveth_set_rx_csum,
825 .get_tso = ethtool_op_get_tso,
826 .get_ufo = ethtool_op_get_ufo,
827 .get_strings = ibmveth_get_strings,
828 .get_stats_count = ibmveth_get_stats_count,
829 .get_ethtool_stats = ibmveth_get_ethtool_stats,
832 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
837 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
839 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
841 struct ibmveth_adapter *adapter = netdev->priv;
842 union ibmveth_buf_desc desc;
843 unsigned long lpar_rc;
844 unsigned long correlator;
846 unsigned int retry_count;
847 unsigned int tx_dropped = 0;
848 unsigned int tx_bytes = 0;
849 unsigned int tx_packets = 0;
850 unsigned int tx_send_failed = 0;
851 unsigned int tx_map_failed = 0;
854 desc.fields.length = skb->len;
855 desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
856 desc.fields.length, DMA_TO_DEVICE);
857 desc.fields.valid = 1;
859 if (skb->ip_summed == CHECKSUM_PARTIAL &&
860 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
861 ibmveth_error_printk("tx: failed to checksum packet\n");
866 if (skb->ip_summed == CHECKSUM_PARTIAL) {
867 unsigned char *buf = skb_transport_header(skb) + skb->csum_offset;
869 desc.fields.no_csum = 1;
870 desc.fields.csum_good = 1;
872 /* Need to zero out the checksum */
877 if (dma_mapping_error(desc.fields.address)) {
878 ibmveth_error_printk("tx: unable to map xmit buffer\n");
884 /* send the frame. Arbitrarily set retrycount to 1024 */
888 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
889 desc.desc, 0, 0, 0, 0, 0,
890 correlator, &correlator);
891 } while ((lpar_rc == H_BUSY) && (retry_count--));
893 if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
894 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
895 ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n",
896 desc.fields.valid, desc.fields.length, desc.fields.address);
901 tx_bytes += skb->len;
902 netdev->trans_start = jiffies;
905 dma_unmap_single(&adapter->vdev->dev, desc.fields.address,
906 desc.fields.length, DMA_TO_DEVICE);
908 out: spin_lock_irqsave(&adapter->stats_lock, flags);
909 adapter->stats.tx_dropped += tx_dropped;
910 adapter->stats.tx_bytes += tx_bytes;
911 adapter->stats.tx_packets += tx_packets;
912 adapter->tx_send_failed += tx_send_failed;
913 adapter->tx_map_failed += tx_map_failed;
914 spin_unlock_irqrestore(&adapter->stats_lock, flags);
920 static int ibmveth_poll(struct napi_struct *napi, int budget)
922 struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
923 struct net_device *netdev = adapter->netdev;
924 int frames_processed = 0;
925 unsigned long lpar_rc;
931 if (!ibmveth_rxq_pending_buffer(adapter))
935 if (!ibmveth_rxq_buffer_valid(adapter)) {
936 wmb(); /* suggested by larson1 */
937 adapter->rx_invalid_buffer++;
938 ibmveth_debug_printk("recycling invalid buffer\n");
939 ibmveth_rxq_recycle_buffer(adapter);
941 int length = ibmveth_rxq_frame_length(adapter);
942 int offset = ibmveth_rxq_frame_offset(adapter);
943 int csum_good = ibmveth_rxq_csum_good(adapter);
945 skb = ibmveth_rxq_get_buffer(adapter);
947 skb->ip_summed = CHECKSUM_UNNECESSARY;
949 ibmveth_rxq_harvest_buffer(adapter);
951 skb_reserve(skb, offset);
952 skb_put(skb, length);
953 skb->protocol = eth_type_trans(skb, netdev);
955 netif_receive_skb(skb); /* send it up */
957 adapter->stats.rx_packets++;
958 adapter->stats.rx_bytes += length;
960 netdev->last_rx = jiffies;
962 } while (frames_processed < budget);
964 ibmveth_replenish_task(adapter);
966 if (frames_processed < budget) {
967 /* We think we are done - reenable interrupts,
968 * then check once more to make sure we are done.
970 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
973 ibmveth_assert(lpar_rc == H_SUCCESS);
975 netif_rx_complete(netdev, napi);
977 if (ibmveth_rxq_pending_buffer(adapter) &&
978 netif_rx_reschedule(netdev, napi)) {
979 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
985 return frames_processed;
988 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
990 struct net_device *netdev = dev_instance;
991 struct ibmveth_adapter *adapter = netdev->priv;
992 unsigned long lpar_rc;
994 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
995 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
997 ibmveth_assert(lpar_rc == H_SUCCESS);
998 __netif_rx_schedule(netdev, &adapter->napi);
1003 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev)
1005 struct ibmveth_adapter *adapter = dev->priv;
1006 return &adapter->stats;
1009 static void ibmveth_set_multicast_list(struct net_device *netdev)
1011 struct ibmveth_adapter *adapter = netdev->priv;
1012 unsigned long lpar_rc;
1014 if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
1015 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1016 IbmVethMcastEnableRecv |
1017 IbmVethMcastDisableFiltering,
1019 if(lpar_rc != H_SUCCESS) {
1020 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
1023 struct dev_mc_list *mclist = netdev->mc_list;
1025 /* clear the filter table & disable filtering */
1026 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1027 IbmVethMcastEnableRecv |
1028 IbmVethMcastDisableFiltering |
1029 IbmVethMcastClearFilterTable,
1031 if(lpar_rc != H_SUCCESS) {
1032 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
1034 /* add the addresses to the filter table */
1035 for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
1036 // add the multicast address to the filter table
1037 unsigned long mcast_addr = 0;
1038 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
1039 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1040 IbmVethMcastAddFilter,
1042 if(lpar_rc != H_SUCCESS) {
1043 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
1047 /* re-enable filtering */
1048 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1049 IbmVethMcastEnableFiltering,
1051 if(lpar_rc != H_SUCCESS) {
1052 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
1057 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1059 struct ibmveth_adapter *adapter = dev->priv;
1060 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1064 if (new_mtu < IBMVETH_MAX_MTU)
1067 for (i = 0; i < IbmVethNumBufferPools; i++)
1068 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1071 if (i == IbmVethNumBufferPools)
1074 /* Look for an active buffer pool that can hold the new MTU */
1075 for(i = 0; i<IbmVethNumBufferPools; i++) {
1076 if (!adapter->rx_buff_pool[i].active) {
1077 adapter->rx_buff_pool[i].active = 1;
1081 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1082 if (reinit && netif_running(adapter->netdev)) {
1083 adapter->pool_config = 1;
1084 ibmveth_close(adapter->netdev);
1085 adapter->pool_config = 0;
1087 if ((rc = ibmveth_open(adapter->netdev)))
1097 #ifdef CONFIG_NET_POLL_CONTROLLER
1098 static void ibmveth_poll_controller(struct net_device *dev)
1100 ibmveth_replenish_task(dev->priv);
1101 ibmveth_interrupt(dev->irq, dev);
1105 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1109 struct net_device *netdev;
1110 struct ibmveth_adapter *adapter;
1111 union ibmveth_illan_attributes set_attr, ret_attr;
1113 unsigned char *mac_addr_p;
1114 unsigned int *mcastFilterSize_p;
1117 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
1120 mac_addr_p = (unsigned char *) vio_get_attribute(dev,
1121 VETH_MAC_ADDR, NULL);
1123 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
1124 "attribute\n", __FILE__, __LINE__);
1128 mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev,
1129 VETH_MCAST_FILTER_SIZE, NULL);
1130 if(!mcastFilterSize_p) {
1131 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
1132 "VETH_MCAST_FILTER_SIZE attribute\n",
1133 __FILE__, __LINE__);
1137 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1142 SET_MODULE_OWNER(netdev);
1144 adapter = netdev->priv;
1145 dev->dev.driver_data = netdev;
1147 adapter->vdev = dev;
1148 adapter->netdev = netdev;
1149 adapter->mcastFilterSize= *mcastFilterSize_p;
1150 adapter->pool_config = 0;
1152 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1154 /* Some older boxes running PHYP non-natively have an OF that
1155 returns a 8-byte local-mac-address field (and the first
1156 2 bytes have to be ignored) while newer boxes' OF return
1157 a 6-byte field. Note that IEEE 1275 specifies that
1158 local-mac-address must be a 6-byte field.
1159 The RPA doc specifies that the first byte must be 10b, so
1160 we'll just look for it to solve this 8 vs. 6 byte field issue */
1162 if ((*mac_addr_p & 0x3) != 0x02)
1165 adapter->mac_addr = 0;
1166 memcpy(&adapter->mac_addr, mac_addr_p, 6);
1168 netdev->irq = dev->irq;
1169 netdev->open = ibmveth_open;
1170 netdev->stop = ibmveth_close;
1171 netdev->hard_start_xmit = ibmveth_start_xmit;
1172 netdev->get_stats = ibmveth_get_stats;
1173 netdev->set_multicast_list = ibmveth_set_multicast_list;
1174 netdev->do_ioctl = ibmveth_ioctl;
1175 netdev->ethtool_ops = &netdev_ethtool_ops;
1176 netdev->change_mtu = ibmveth_change_mtu;
1177 SET_NETDEV_DEV(netdev, &dev->dev);
1178 #ifdef CONFIG_NET_POLL_CONTROLLER
1179 netdev->poll_controller = ibmveth_poll_controller;
1181 netdev->features |= NETIF_F_LLTX;
1182 spin_lock_init(&adapter->stats_lock);
1184 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1186 for(i = 0; i<IbmVethNumBufferPools; i++) {
1187 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1188 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1189 pool_count[i], pool_size[i],
1191 kobj->parent = &dev->dev.kobj;
1192 sprintf(kobj->name, "pool%d", i);
1193 kobj->ktype = &ktype_veth_pool;
1194 kobject_register(kobj);
1197 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
1199 adapter->buffer_list_dma = DMA_ERROR_CODE;
1200 adapter->filter_list_dma = DMA_ERROR_CODE;
1201 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1203 ibmveth_debug_printk("registering netdev...\n");
1205 ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr.desc);
1207 if (ret == H_SUCCESS && !ret_attr.fields.active_trunk &&
1208 !ret_attr.fields.trunk_priority &&
1209 ret_attr.fields.csum_offload_padded_pkt_support) {
1211 set_attr.fields.tcp_csum_offload_ipv4 = 1;
1213 ret = h_illan_attributes(dev->unit_address, 0, set_attr.desc,
1216 if (ret == H_SUCCESS) {
1217 adapter->rx_csum = 1;
1218 netdev->features |= NETIF_F_IP_CSUM;
1220 ret = h_illan_attributes(dev->unit_address, set_attr.desc,
1224 rc = register_netdev(netdev);
1227 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
1228 free_netdev(netdev);
1232 ibmveth_debug_printk("registered\n");
1234 ibmveth_proc_register_adapter(adapter);
1239 static int __devexit ibmveth_remove(struct vio_dev *dev)
1241 struct net_device *netdev = dev->dev.driver_data;
1242 struct ibmveth_adapter *adapter = netdev->priv;
1245 for(i = 0; i<IbmVethNumBufferPools; i++)
1246 kobject_unregister(&adapter->rx_buff_pool[i].kobj);
1248 unregister_netdev(netdev);
1250 ibmveth_proc_unregister_adapter(adapter);
1252 free_netdev(netdev);
1256 #ifdef CONFIG_PROC_FS
1257 static void ibmveth_proc_register_driver(void)
1259 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net);
1260 if (ibmveth_proc_dir) {
1261 SET_MODULE_OWNER(ibmveth_proc_dir);
1265 static void ibmveth_proc_unregister_driver(void)
1267 remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
1270 static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
1279 static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1285 static void ibmveth_seq_stop(struct seq_file *seq, void *v)
1289 static int ibmveth_seq_show(struct seq_file *seq, void *v)
1291 struct ibmveth_adapter *adapter = seq->private;
1292 char *current_mac = ((char*) &adapter->netdev->dev_addr);
1293 char *firmware_mac = ((char*) &adapter->mac_addr) ;
1295 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1297 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1298 seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1299 current_mac[0], current_mac[1], current_mac[2],
1300 current_mac[3], current_mac[4], current_mac[5]);
1301 seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1302 firmware_mac[0], firmware_mac[1], firmware_mac[2],
1303 firmware_mac[3], firmware_mac[4], firmware_mac[5]);
1305 seq_printf(seq, "\nAdapter Statistics:\n");
1306 seq_printf(seq, " TX: vio_map_single failres: %ld\n", adapter->tx_map_failed);
1307 seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed);
1308 seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles);
1309 seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem);
1310 seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure);
1311 seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer);
1312 seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer);
1316 static struct seq_operations ibmveth_seq_ops = {
1317 .start = ibmveth_seq_start,
1318 .next = ibmveth_seq_next,
1319 .stop = ibmveth_seq_stop,
1320 .show = ibmveth_seq_show,
1323 static int ibmveth_proc_open(struct inode *inode, struct file *file)
1325 struct seq_file *seq;
1326 struct proc_dir_entry *proc;
1329 rc = seq_open(file, &ibmveth_seq_ops);
1331 /* recover the pointer buried in proc_dir_entry data */
1332 seq = file->private_data;
1334 seq->private = proc->data;
1339 static const struct file_operations ibmveth_proc_fops = {
1340 .owner = THIS_MODULE,
1341 .open = ibmveth_proc_open,
1343 .llseek = seq_lseek,
1344 .release = seq_release,
1347 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1349 struct proc_dir_entry *entry;
1350 if (ibmveth_proc_dir) {
1352 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1353 entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir);
1355 ibmveth_error_printk("Cannot create adapter proc entry");
1357 entry->data = (void *) adapter;
1358 entry->proc_fops = &ibmveth_proc_fops;
1359 SET_MODULE_OWNER(entry);
1365 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1367 if (ibmveth_proc_dir) {
1369 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1370 remove_proc_entry(u_addr, ibmveth_proc_dir);
1374 #else /* CONFIG_PROC_FS */
1375 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1379 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1382 static void ibmveth_proc_register_driver(void)
1386 static void ibmveth_proc_unregister_driver(void)
1389 #endif /* CONFIG_PROC_FS */
1391 static struct attribute veth_active_attr;
1392 static struct attribute veth_num_attr;
1393 static struct attribute veth_size_attr;
1395 static ssize_t veth_pool_show(struct kobject * kobj,
1396 struct attribute * attr, char * buf)
1398 struct ibmveth_buff_pool *pool = container_of(kobj,
1399 struct ibmveth_buff_pool,
1402 if (attr == &veth_active_attr)
1403 return sprintf(buf, "%d\n", pool->active);
1404 else if (attr == &veth_num_attr)
1405 return sprintf(buf, "%d\n", pool->size);
1406 else if (attr == &veth_size_attr)
1407 return sprintf(buf, "%d\n", pool->buff_size);
1411 static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
1412 const char * buf, size_t count)
1414 struct ibmveth_buff_pool *pool = container_of(kobj,
1415 struct ibmveth_buff_pool,
1417 struct net_device *netdev =
1418 container_of(kobj->parent, struct device, kobj)->driver_data;
1419 struct ibmveth_adapter *adapter = netdev->priv;
1420 long value = simple_strtol(buf, NULL, 10);
1423 if (attr == &veth_active_attr) {
1424 if (value && !pool->active) {
1425 if (netif_running(netdev)) {
1426 if(ibmveth_alloc_buffer_pool(pool)) {
1427 ibmveth_error_printk("unable to alloc pool\n");
1431 adapter->pool_config = 1;
1432 ibmveth_close(netdev);
1433 adapter->pool_config = 0;
1434 if ((rc = ibmveth_open(netdev)))
1438 } else if (!value && pool->active) {
1439 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1441 /* Make sure there is a buffer pool with buffers that
1442 can hold a packet of the size of the MTU */
1443 for (i = 0; i < IbmVethNumBufferPools; i++) {
1444 if (pool == &adapter->rx_buff_pool[i])
1446 if (!adapter->rx_buff_pool[i].active)
1448 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1452 if (i == IbmVethNumBufferPools) {
1453 ibmveth_error_printk("no active pool >= MTU\n");
1458 if (netif_running(netdev)) {
1459 adapter->pool_config = 1;
1460 ibmveth_close(netdev);
1461 adapter->pool_config = 0;
1462 if ((rc = ibmveth_open(netdev)))
1466 } else if (attr == &veth_num_attr) {
1467 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
1470 if (netif_running(netdev)) {
1471 adapter->pool_config = 1;
1472 ibmveth_close(netdev);
1473 adapter->pool_config = 0;
1475 if ((rc = ibmveth_open(netdev)))
1480 } else if (attr == &veth_size_attr) {
1481 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
1484 if (netif_running(netdev)) {
1485 adapter->pool_config = 1;
1486 ibmveth_close(netdev);
1487 adapter->pool_config = 0;
1488 pool->buff_size = value;
1489 if ((rc = ibmveth_open(netdev)))
1492 pool->buff_size = value;
1496 /* kick the interrupt handler to allocate/deallocate pools */
1497 ibmveth_interrupt(netdev->irq, netdev);
1502 #define ATTR(_name, _mode) \
1503 struct attribute veth_##_name##_attr = { \
1504 .name = __stringify(_name), .mode = _mode, \
1507 static ATTR(active, 0644);
1508 static ATTR(num, 0644);
1509 static ATTR(size, 0644);
1511 static struct attribute * veth_pool_attrs[] = {
1518 static struct sysfs_ops veth_pool_ops = {
1519 .show = veth_pool_show,
1520 .store = veth_pool_store,
1523 static struct kobj_type ktype_veth_pool = {
1525 .sysfs_ops = &veth_pool_ops,
1526 .default_attrs = veth_pool_attrs,
1530 static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1531 { "network", "IBM,l-lan"},
1534 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1536 static struct vio_driver ibmveth_driver = {
1537 .id_table = ibmveth_device_table,
1538 .probe = ibmveth_probe,
1539 .remove = ibmveth_remove,
1541 .name = ibmveth_driver_name,
1542 .owner = THIS_MODULE,
1546 static int __init ibmveth_module_init(void)
1548 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
1550 ibmveth_proc_register_driver();
1552 return vio_register_driver(&ibmveth_driver);
1555 static void __exit ibmveth_module_exit(void)
1557 vio_unregister_driver(&ibmveth_driver);
1558 ibmveth_proc_unregister_driver();
1561 module_init(ibmveth_module_init);
1562 module_exit(ibmveth_module_exit);