[L2TP]: Add the ability to autoload a pppox protocol module.
[linux-2.6] / drivers / net / ibmveth.c
1 /**************************************************************************/
2 /*                                                                        */
3 /* IBM eServer i/pSeries Virtual Ethernet Device Driver                   */
4 /* Copyright (C) 2003 IBM Corp.                                           */
5 /*  Originally written by Dave Larson (larson1@us.ibm.com)                */
6 /*  Maintained by Santiago Leon (santil@us.ibm.com)                       */
7 /*                                                                        */
8 /*  This program is free software; you can redistribute it and/or modify  */
9 /*  it under the terms of the GNU General Public License as published by  */
10 /*  the Free Software Foundation; either version 2 of the License, or     */
11 /*  (at your option) any later version.                                   */
12 /*                                                                        */
13 /*  This program is distributed in the hope that it will be useful,       */
14 /*  but WITHOUT ANY WARRANTY; without even the implied warranty of        */
15 /*  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the         */
16 /*  GNU General Public License for more details.                          */
17 /*                                                                        */
18 /*  You should have received a copy of the GNU General Public License     */
19 /*  along with this program; if not, write to the Free Software           */
20 /*  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  */
21 /*                                                                   USA  */
22 /*                                                                        */
23 /* This module contains the implementation of a virtual ethernet device   */
24 /* for use with IBM i/pSeries LPAR Linux.  It utilizes the logical LAN    */
25 /* option of the RS/6000 Platform Architechture to interface with virtual */
26 /* ethernet NICs that are presented to the partition by the hypervisor.   */
27 /*                                                                        */
28 /**************************************************************************/
29 /*
30   TODO:
31   - remove frag processing code - no longer needed
32   - add support for sysfs
33   - possibly remove procfs support
34 */
35
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/ioport.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/kernel.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/skbuff.h>
45 #include <linux/init.h>
46 #include <linux/delay.h>
47 #include <linux/mm.h>
48 #include <linux/ethtool.h>
49 #include <linux/proc_fs.h>
50 #include <asm/semaphore.h>
51 #include <asm/hvcall.h>
52 #include <asm/atomic.h>
53 #include <asm/vio.h>
54 #include <asm/uaccess.h>
55 #include <linux/seq_file.h>
56
57 #include "ibmveth.h"
58
59 #undef DEBUG
60
61 #define ibmveth_printk(fmt, args...) \
62   printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
63
64 #define ibmveth_error_printk(fmt, args...) \
65   printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
66
67 #ifdef DEBUG
68 #define ibmveth_debug_printk_no_adapter(fmt, args...) \
69   printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
70 #define ibmveth_debug_printk(fmt, args...) \
71   printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
72 #define ibmveth_assert(expr) \
73   if(!(expr)) {                                   \
74     printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
75     BUG(); \
76   }
77 #else
78 #define ibmveth_debug_printk_no_adapter(fmt, args...)
79 #define ibmveth_debug_printk(fmt, args...)
80 #define ibmveth_assert(expr)
81 #endif
82
83 static int ibmveth_open(struct net_device *dev);
84 static int ibmveth_close(struct net_device *dev);
85 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
86 static int ibmveth_poll(struct net_device *dev, int *budget);
87 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
88 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev);
89 static void ibmveth_set_multicast_list(struct net_device *dev);
90 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
91 static void ibmveth_proc_register_driver(void);
92 static void ibmveth_proc_unregister_driver(void);
93 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
94 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
95 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
96 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
97 static struct kobj_type ktype_veth_pool;
98
99 #ifdef CONFIG_PROC_FS
100 #define IBMVETH_PROC_DIR "net/ibmveth"
101 static struct proc_dir_entry *ibmveth_proc_dir;
102 #endif
103
104 static const char ibmveth_driver_name[] = "ibmveth";
105 static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
106 #define ibmveth_driver_version "1.03"
107
108 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
109 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
110 MODULE_LICENSE("GPL");
111 MODULE_VERSION(ibmveth_driver_version);
112
113 /* simple methods of getting data from the current rxq entry */
114 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
115 {
116         return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].toggle == adapter->rx_queue.toggle);
117 }
118
119 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
120 {
121         return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].valid);
122 }
123
124 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
125 {
126         return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].offset);
127 }
128
129 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
130 {
131         return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
132 }
133
134 /* setup the initial settings for a buffer pool */
135 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
136 {
137         pool->size = pool_size;
138         pool->index = pool_index;
139         pool->buff_size = buff_size;
140         pool->threshold = pool_size / 2;
141         pool->active = pool_active;
142 }
143
144 /* allocate and setup an buffer pool - called during open */
145 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
146 {
147         int i;
148
149         pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
150
151         if(!pool->free_map) {
152                 return -1;
153         }
154
155         pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
156         if(!pool->dma_addr) {
157                 kfree(pool->free_map);
158                 pool->free_map = NULL;
159                 return -1;
160         }
161
162         pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
163
164         if(!pool->skbuff) {
165                 kfree(pool->dma_addr);
166                 pool->dma_addr = NULL;
167
168                 kfree(pool->free_map);
169                 pool->free_map = NULL;
170                 return -1;
171         }
172
173         memset(pool->skbuff, 0, sizeof(void*) * pool->size);
174         memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
175
176         for(i = 0; i < pool->size; ++i) {
177                 pool->free_map[i] = i;
178         }
179
180         atomic_set(&pool->available, 0);
181         pool->producer_index = 0;
182         pool->consumer_index = 0;
183
184         return 0;
185 }
186
187 /* replenish the buffers for a pool.  note that we don't need to
188  * skb_reserve these since they are used for incoming...
189  */
190 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
191 {
192         u32 i;
193         u32 count = pool->size - atomic_read(&pool->available);
194         u32 buffers_added = 0;
195
196         mb();
197
198         for(i = 0; i < count; ++i) {
199                 struct sk_buff *skb;
200                 unsigned int free_index, index;
201                 u64 correlator;
202                 union ibmveth_buf_desc desc;
203                 unsigned long lpar_rc;
204                 dma_addr_t dma_addr;
205
206                 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
207
208                 if(!skb) {
209                         ibmveth_debug_printk("replenish: unable to allocate skb\n");
210                         adapter->replenish_no_mem++;
211                         break;
212                 }
213
214                 free_index = pool->consumer_index;
215                 pool->consumer_index = (pool->consumer_index + 1) % pool->size;
216                 index = pool->free_map[free_index];
217
218                 ibmveth_assert(index != IBM_VETH_INVALID_MAP);
219                 ibmveth_assert(pool->skbuff[index] == NULL);
220
221                 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
222                                 pool->buff_size, DMA_FROM_DEVICE);
223
224                 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
225                 pool->dma_addr[index] = dma_addr;
226                 pool->skbuff[index] = skb;
227
228                 correlator = ((u64)pool->index << 32) | index;
229                 *(u64*)skb->data = correlator;
230
231                 desc.desc = 0;
232                 desc.fields.valid = 1;
233                 desc.fields.length = pool->buff_size;
234                 desc.fields.address = dma_addr;
235
236                 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
237
238                 if(lpar_rc != H_SUCCESS) {
239                         pool->free_map[free_index] = index;
240                         pool->skbuff[index] = NULL;
241                         if (pool->consumer_index == 0)
242                                 pool->consumer_index = pool->size - 1;
243                         else
244                                 pool->consumer_index--;
245                         dma_unmap_single(&adapter->vdev->dev,
246                                         pool->dma_addr[index], pool->buff_size,
247                                         DMA_FROM_DEVICE);
248                         dev_kfree_skb_any(skb);
249                         adapter->replenish_add_buff_failure++;
250                         break;
251                 } else {
252                         buffers_added++;
253                         adapter->replenish_add_buff_success++;
254                 }
255         }
256
257         mb();
258         atomic_add(buffers_added, &(pool->available));
259 }
260
261 /* replenish routine */
262 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
263 {
264         int i;
265
266         adapter->replenish_task_cycles++;
267
268         for(i = 0; i < IbmVethNumBufferPools; i++)
269                 if(adapter->rx_buff_pool[i].active)
270                         ibmveth_replenish_buffer_pool(adapter,
271                                                      &adapter->rx_buff_pool[i]);
272
273         adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
274 }
275
276 /* empty and free ana buffer pool - also used to do cleanup in error paths */
277 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
278 {
279         int i;
280
281         kfree(pool->free_map);
282         pool->free_map = NULL;
283
284         if(pool->skbuff && pool->dma_addr) {
285                 for(i = 0; i < pool->size; ++i) {
286                         struct sk_buff *skb = pool->skbuff[i];
287                         if(skb) {
288                                 dma_unmap_single(&adapter->vdev->dev,
289                                                  pool->dma_addr[i],
290                                                  pool->buff_size,
291                                                  DMA_FROM_DEVICE);
292                                 dev_kfree_skb_any(skb);
293                                 pool->skbuff[i] = NULL;
294                         }
295                 }
296         }
297
298         if(pool->dma_addr) {
299                 kfree(pool->dma_addr);
300                 pool->dma_addr = NULL;
301         }
302
303         if(pool->skbuff) {
304                 kfree(pool->skbuff);
305                 pool->skbuff = NULL;
306         }
307 }
308
309 /* remove a buffer from a pool */
310 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
311 {
312         unsigned int pool  = correlator >> 32;
313         unsigned int index = correlator & 0xffffffffUL;
314         unsigned int free_index;
315         struct sk_buff *skb;
316
317         ibmveth_assert(pool < IbmVethNumBufferPools);
318         ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
319
320         skb = adapter->rx_buff_pool[pool].skbuff[index];
321
322         ibmveth_assert(skb != NULL);
323
324         adapter->rx_buff_pool[pool].skbuff[index] = NULL;
325
326         dma_unmap_single(&adapter->vdev->dev,
327                          adapter->rx_buff_pool[pool].dma_addr[index],
328                          adapter->rx_buff_pool[pool].buff_size,
329                          DMA_FROM_DEVICE);
330
331         free_index = adapter->rx_buff_pool[pool].producer_index;
332         adapter->rx_buff_pool[pool].producer_index
333                 = (adapter->rx_buff_pool[pool].producer_index + 1)
334                 % adapter->rx_buff_pool[pool].size;
335         adapter->rx_buff_pool[pool].free_map[free_index] = index;
336
337         mb();
338
339         atomic_dec(&(adapter->rx_buff_pool[pool].available));
340 }
341
342 /* get the current buffer on the rx queue */
343 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
344 {
345         u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
346         unsigned int pool = correlator >> 32;
347         unsigned int index = correlator & 0xffffffffUL;
348
349         ibmveth_assert(pool < IbmVethNumBufferPools);
350         ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
351
352         return adapter->rx_buff_pool[pool].skbuff[index];
353 }
354
355 /* recycle the current buffer on the rx queue */
356 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
357 {
358         u32 q_index = adapter->rx_queue.index;
359         u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
360         unsigned int pool = correlator >> 32;
361         unsigned int index = correlator & 0xffffffffUL;
362         union ibmveth_buf_desc desc;
363         unsigned long lpar_rc;
364
365         ibmveth_assert(pool < IbmVethNumBufferPools);
366         ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
367
368         if(!adapter->rx_buff_pool[pool].active) {
369                 ibmveth_rxq_harvest_buffer(adapter);
370                 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
371                 return;
372         }
373
374         desc.desc = 0;
375         desc.fields.valid = 1;
376         desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
377         desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
378
379         lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
380
381         if(lpar_rc != H_SUCCESS) {
382                 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
383                 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
384         }
385
386         if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
387                 adapter->rx_queue.index = 0;
388                 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
389         }
390 }
391
392 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
393 {
394         ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
395
396         if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
397                 adapter->rx_queue.index = 0;
398                 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
399         }
400 }
401
402 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
403 {
404         int i;
405
406         if(adapter->buffer_list_addr != NULL) {
407                 if(!dma_mapping_error(adapter->buffer_list_dma)) {
408                         dma_unmap_single(&adapter->vdev->dev,
409                                         adapter->buffer_list_dma, 4096,
410                                         DMA_BIDIRECTIONAL);
411                         adapter->buffer_list_dma = DMA_ERROR_CODE;
412                 }
413                 free_page((unsigned long)adapter->buffer_list_addr);
414                 adapter->buffer_list_addr = NULL;
415         }
416
417         if(adapter->filter_list_addr != NULL) {
418                 if(!dma_mapping_error(adapter->filter_list_dma)) {
419                         dma_unmap_single(&adapter->vdev->dev,
420                                         adapter->filter_list_dma, 4096,
421                                         DMA_BIDIRECTIONAL);
422                         adapter->filter_list_dma = DMA_ERROR_CODE;
423                 }
424                 free_page((unsigned long)adapter->filter_list_addr);
425                 adapter->filter_list_addr = NULL;
426         }
427
428         if(adapter->rx_queue.queue_addr != NULL) {
429                 if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
430                         dma_unmap_single(&adapter->vdev->dev,
431                                         adapter->rx_queue.queue_dma,
432                                         adapter->rx_queue.queue_len,
433                                         DMA_BIDIRECTIONAL);
434                         adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
435                 }
436                 kfree(adapter->rx_queue.queue_addr);
437                 adapter->rx_queue.queue_addr = NULL;
438         }
439
440         for(i = 0; i<IbmVethNumBufferPools; i++)
441                 if (adapter->rx_buff_pool[i].active)
442                         ibmveth_free_buffer_pool(adapter,
443                                                  &adapter->rx_buff_pool[i]);
444 }
445
446 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
447         union ibmveth_buf_desc rxq_desc, u64 mac_address)
448 {
449         int rc, try_again = 1;
450
451         /* After a kexec the adapter will still be open, so our attempt to
452         * open it will fail. So if we get a failure we free the adapter and
453         * try again, but only once. */
454 retry:
455         rc = h_register_logical_lan(adapter->vdev->unit_address,
456                                     adapter->buffer_list_dma, rxq_desc.desc,
457                                     adapter->filter_list_dma, mac_address);
458
459         if (rc != H_SUCCESS && try_again) {
460                 do {
461                         rc = h_free_logical_lan(adapter->vdev->unit_address);
462                 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
463
464                 try_again = 0;
465                 goto retry;
466         }
467
468         return rc;
469 }
470
471 static int ibmveth_open(struct net_device *netdev)
472 {
473         struct ibmveth_adapter *adapter = netdev->priv;
474         u64 mac_address = 0;
475         int rxq_entries = 1;
476         unsigned long lpar_rc;
477         int rc;
478         union ibmveth_buf_desc rxq_desc;
479         int i;
480
481         ibmveth_debug_printk("open starting\n");
482
483         for(i = 0; i<IbmVethNumBufferPools; i++)
484                 rxq_entries += adapter->rx_buff_pool[i].size;
485
486         adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
487         adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
488
489         if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
490                 ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
491                 ibmveth_cleanup(adapter);
492                 return -ENOMEM;
493         }
494
495         adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
496         adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
497
498         if(!adapter->rx_queue.queue_addr) {
499                 ibmveth_error_printk("unable to allocate rx queue pages\n");
500                 ibmveth_cleanup(adapter);
501                 return -ENOMEM;
502         }
503
504         adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
505                         adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
506         adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
507                         adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
508         adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
509                         adapter->rx_queue.queue_addr,
510                         adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
511
512         if((dma_mapping_error(adapter->buffer_list_dma) ) ||
513            (dma_mapping_error(adapter->filter_list_dma)) ||
514            (dma_mapping_error(adapter->rx_queue.queue_dma))) {
515                 ibmveth_error_printk("unable to map filter or buffer list pages\n");
516                 ibmveth_cleanup(adapter);
517                 return -ENOMEM;
518         }
519
520         adapter->rx_queue.index = 0;
521         adapter->rx_queue.num_slots = rxq_entries;
522         adapter->rx_queue.toggle = 1;
523
524         memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
525         mac_address = mac_address >> 16;
526
527         rxq_desc.desc = 0;
528         rxq_desc.fields.valid = 1;
529         rxq_desc.fields.length = adapter->rx_queue.queue_len;
530         rxq_desc.fields.address = adapter->rx_queue.queue_dma;
531
532         ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
533         ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
534         ibmveth_debug_printk("receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);
535
536         h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
537
538         lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
539
540         if(lpar_rc != H_SUCCESS) {
541                 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
542                 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
543                                      adapter->buffer_list_dma,
544                                      adapter->filter_list_dma,
545                                      rxq_desc.desc,
546                                      mac_address);
547                 ibmveth_cleanup(adapter);
548                 return -ENONET;
549         }
550
551         for(i = 0; i<IbmVethNumBufferPools; i++) {
552                 if(!adapter->rx_buff_pool[i].active)
553                         continue;
554                 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
555                         ibmveth_error_printk("unable to alloc pool\n");
556                         adapter->rx_buff_pool[i].active = 0;
557                         ibmveth_cleanup(adapter);
558                         return -ENOMEM ;
559                 }
560         }
561
562         ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
563         if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
564                 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
565                 do {
566                         rc = h_free_logical_lan(adapter->vdev->unit_address);
567                 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
568
569                 ibmveth_cleanup(adapter);
570                 return rc;
571         }
572
573         ibmveth_debug_printk("initial replenish cycle\n");
574         ibmveth_interrupt(netdev->irq, netdev);
575
576         netif_start_queue(netdev);
577
578         ibmveth_debug_printk("open complete\n");
579
580         return 0;
581 }
582
583 static int ibmveth_close(struct net_device *netdev)
584 {
585         struct ibmveth_adapter *adapter = netdev->priv;
586         long lpar_rc;
587
588         ibmveth_debug_printk("close starting\n");
589
590         if (!adapter->pool_config)
591                 netif_stop_queue(netdev);
592
593         free_irq(netdev->irq, netdev);
594
595         do {
596                 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
597         } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
598
599         if(lpar_rc != H_SUCCESS)
600         {
601                 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
602                                      lpar_rc);
603         }
604
605         adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
606
607         ibmveth_cleanup(adapter);
608
609         ibmveth_debug_printk("close complete\n");
610
611         return 0;
612 }
613
614 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
615         cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
616         cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
617         cmd->speed = SPEED_1000;
618         cmd->duplex = DUPLEX_FULL;
619         cmd->port = PORT_FIBRE;
620         cmd->phy_address = 0;
621         cmd->transceiver = XCVR_INTERNAL;
622         cmd->autoneg = AUTONEG_ENABLE;
623         cmd->maxtxpkt = 0;
624         cmd->maxrxpkt = 1;
625         return 0;
626 }
627
628 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
629         strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
630         strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
631 }
632
633 static u32 netdev_get_link(struct net_device *dev) {
634         return 1;
635 }
636
637 static const struct ethtool_ops netdev_ethtool_ops = {
638         .get_drvinfo            = netdev_get_drvinfo,
639         .get_settings           = netdev_get_settings,
640         .get_link               = netdev_get_link,
641         .get_sg                 = ethtool_op_get_sg,
642         .get_tx_csum            = ethtool_op_get_tx_csum,
643 };
644
645 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
646 {
647         return -EOPNOTSUPP;
648 }
649
650 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
651
652 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
653 {
654         struct ibmveth_adapter *adapter = netdev->priv;
655         union ibmveth_buf_desc desc[IbmVethMaxSendFrags];
656         unsigned long lpar_rc;
657         int nfrags = 0, curfrag;
658         unsigned long correlator;
659         unsigned long flags;
660         unsigned int retry_count;
661         unsigned int tx_dropped = 0;
662         unsigned int tx_bytes = 0;
663         unsigned int tx_packets = 0;
664         unsigned int tx_send_failed = 0;
665         unsigned int tx_map_failed = 0;
666
667
668         if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
669                 tx_dropped++;
670                 goto out;
671         }
672
673         memset(&desc, 0, sizeof(desc));
674
675         /* nfrags = number of frags after the initial fragment */
676         nfrags = skb_shinfo(skb)->nr_frags;
677
678         if(nfrags)
679                 adapter->tx_multidesc_send++;
680
681         /* map the initial fragment */
682         desc[0].fields.length  = nfrags ? skb->len - skb->data_len : skb->len;
683         desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
684                                         desc[0].fields.length, DMA_TO_DEVICE);
685         desc[0].fields.valid   = 1;
686
687         if(dma_mapping_error(desc[0].fields.address)) {
688                 ibmveth_error_printk("tx: unable to map initial fragment\n");
689                 tx_map_failed++;
690                 tx_dropped++;
691                 goto out;
692         }
693
694         curfrag = nfrags;
695
696         /* map fragments past the initial portion if there are any */
697         while(curfrag--) {
698                 skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag];
699                 desc[curfrag+1].fields.address
700                         = dma_map_single(&adapter->vdev->dev,
701                                 page_address(frag->page) + frag->page_offset,
702                                 frag->size, DMA_TO_DEVICE);
703                 desc[curfrag+1].fields.length = frag->size;
704                 desc[curfrag+1].fields.valid  = 1;
705
706                 if(dma_mapping_error(desc[curfrag+1].fields.address)) {
707                         ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
708                         tx_map_failed++;
709                         tx_dropped++;
710                         /* Free all the mappings we just created */
711                         while(curfrag < nfrags) {
712                                 dma_unmap_single(&adapter->vdev->dev,
713                                                  desc[curfrag+1].fields.address,
714                                                  desc[curfrag+1].fields.length,
715                                                  DMA_TO_DEVICE);
716                                 curfrag++;
717                         }
718                         goto out;
719                 }
720         }
721
722         /* send the frame. Arbitrarily set retrycount to 1024 */
723         correlator = 0;
724         retry_count = 1024;
725         do {
726                 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
727                                              desc[0].desc,
728                                              desc[1].desc,
729                                              desc[2].desc,
730                                              desc[3].desc,
731                                              desc[4].desc,
732                                              desc[5].desc,
733                                              correlator,
734                                              &correlator);
735         } while ((lpar_rc == H_BUSY) && (retry_count--));
736
737         if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
738                 int i;
739                 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
740                 for(i = 0; i < 6; i++) {
741                         ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
742                                              desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
743                 }
744                 tx_send_failed++;
745                 tx_dropped++;
746         } else {
747                 tx_packets++;
748                 tx_bytes += skb->len;
749                 netdev->trans_start = jiffies;
750         }
751
752         do {
753                 dma_unmap_single(&adapter->vdev->dev,
754                                 desc[nfrags].fields.address,
755                                 desc[nfrags].fields.length, DMA_TO_DEVICE);
756         } while(--nfrags >= 0);
757
758 out:    spin_lock_irqsave(&adapter->stats_lock, flags);
759         adapter->stats.tx_dropped += tx_dropped;
760         adapter->stats.tx_bytes += tx_bytes;
761         adapter->stats.tx_packets += tx_packets;
762         adapter->tx_send_failed += tx_send_failed;
763         adapter->tx_map_failed += tx_map_failed;
764         spin_unlock_irqrestore(&adapter->stats_lock, flags);
765
766         dev_kfree_skb(skb);
767         return 0;
768 }
769
770 static int ibmveth_poll(struct net_device *netdev, int *budget)
771 {
772         struct ibmveth_adapter *adapter = netdev->priv;
773         int max_frames_to_process = netdev->quota;
774         int frames_processed = 0;
775         int more_work = 1;
776         unsigned long lpar_rc;
777
778  restart_poll:
779         do {
780                 struct net_device *netdev = adapter->netdev;
781
782                 if(ibmveth_rxq_pending_buffer(adapter)) {
783                         struct sk_buff *skb;
784
785                         rmb();
786
787                         if(!ibmveth_rxq_buffer_valid(adapter)) {
788                                 wmb(); /* suggested by larson1 */
789                                 adapter->rx_invalid_buffer++;
790                                 ibmveth_debug_printk("recycling invalid buffer\n");
791                                 ibmveth_rxq_recycle_buffer(adapter);
792                         } else {
793                                 int length = ibmveth_rxq_frame_length(adapter);
794                                 int offset = ibmveth_rxq_frame_offset(adapter);
795                                 skb = ibmveth_rxq_get_buffer(adapter);
796
797                                 ibmveth_rxq_harvest_buffer(adapter);
798
799                                 skb_reserve(skb, offset);
800                                 skb_put(skb, length);
801                                 skb->protocol = eth_type_trans(skb, netdev);
802
803                                 netif_receive_skb(skb); /* send it up */
804
805                                 adapter->stats.rx_packets++;
806                                 adapter->stats.rx_bytes += length;
807                                 frames_processed++;
808                                 netdev->last_rx = jiffies;
809                         }
810                 } else {
811                         more_work = 0;
812                 }
813         } while(more_work && (frames_processed < max_frames_to_process));
814
815         ibmveth_replenish_task(adapter);
816
817         if(more_work) {
818                 /* more work to do - return that we are not done yet */
819                 netdev->quota -= frames_processed;
820                 *budget -= frames_processed;
821                 return 1;
822         }
823
824         /* we think we are done - reenable interrupts, then check once more to make sure we are done */
825         lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE);
826
827         ibmveth_assert(lpar_rc == H_SUCCESS);
828
829         netif_rx_complete(netdev);
830
831         if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed))
832         {
833                 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
834                 ibmveth_assert(lpar_rc == H_SUCCESS);
835                 more_work = 1;
836                 goto restart_poll;
837         }
838
839         netdev->quota -= frames_processed;
840         *budget -= frames_processed;
841
842         /* we really are done */
843         return 0;
844 }
845
846 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
847 {
848         struct net_device *netdev = dev_instance;
849         struct ibmveth_adapter *adapter = netdev->priv;
850         unsigned long lpar_rc;
851
852         if(netif_rx_schedule_prep(netdev)) {
853                 lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
854                 ibmveth_assert(lpar_rc == H_SUCCESS);
855                 __netif_rx_schedule(netdev);
856         }
857         return IRQ_HANDLED;
858 }
859
860 static struct net_device_stats *ibmveth_get_stats(struct net_device *dev)
861 {
862         struct ibmveth_adapter *adapter = dev->priv;
863         return &adapter->stats;
864 }
865
866 static void ibmveth_set_multicast_list(struct net_device *netdev)
867 {
868         struct ibmveth_adapter *adapter = netdev->priv;
869         unsigned long lpar_rc;
870
871         if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
872                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
873                                            IbmVethMcastEnableRecv |
874                                            IbmVethMcastDisableFiltering,
875                                            0);
876                 if(lpar_rc != H_SUCCESS) {
877                         ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
878                 }
879         } else {
880                 struct dev_mc_list *mclist = netdev->mc_list;
881                 int i;
882                 /* clear the filter table & disable filtering */
883                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
884                                            IbmVethMcastEnableRecv |
885                                            IbmVethMcastDisableFiltering |
886                                            IbmVethMcastClearFilterTable,
887                                            0);
888                 if(lpar_rc != H_SUCCESS) {
889                         ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
890                 }
891                 /* add the addresses to the filter table */
892                 for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
893                         // add the multicast address to the filter table
894                         unsigned long mcast_addr = 0;
895                         memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
896                         lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
897                                                    IbmVethMcastAddFilter,
898                                                    mcast_addr);
899                         if(lpar_rc != H_SUCCESS) {
900                                 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
901                         }
902                 }
903
904                 /* re-enable filtering */
905                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
906                                            IbmVethMcastEnableFiltering,
907                                            0);
908                 if(lpar_rc != H_SUCCESS) {
909                         ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
910                 }
911         }
912 }
913
914 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
915 {
916         struct ibmveth_adapter *adapter = dev->priv;
917         int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
918         int i;
919
920         if (new_mtu < IBMVETH_MAX_MTU)
921                 return -EINVAL;
922
923         /* Look for an active buffer pool that can hold the new MTU */
924         for(i = 0; i<IbmVethNumBufferPools; i++) {
925                 if (!adapter->rx_buff_pool[i].active)
926                         continue;
927                 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
928                         dev->mtu = new_mtu;
929                         return 0;
930                 }
931         }
932         return -EINVAL;
933 }
934
935 #ifdef CONFIG_NET_POLL_CONTROLLER
936 static void ibmveth_poll_controller(struct net_device *dev)
937 {
938         ibmveth_replenish_task(dev->priv);
939         ibmveth_interrupt(dev->irq, dev);
940 }
941 #endif
942
943 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
944 {
945         int rc, i;
946         struct net_device *netdev;
947         struct ibmveth_adapter *adapter = NULL;
948
949         unsigned char *mac_addr_p;
950         unsigned int *mcastFilterSize_p;
951
952
953         ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
954                                         dev->unit_address);
955
956         mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0);
957         if(!mac_addr_p) {
958                 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
959                                 "attribute\n", __FILE__, __LINE__);
960                 return 0;
961         }
962
963         mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0);
964         if(!mcastFilterSize_p) {
965                 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
966                                 "VETH_MCAST_FILTER_SIZE attribute\n",
967                                 __FILE__, __LINE__);
968                 return 0;
969         }
970
971         netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
972
973         if(!netdev)
974                 return -ENOMEM;
975
976         SET_MODULE_OWNER(netdev);
977
978         adapter = netdev->priv;
979         memset(adapter, 0, sizeof(adapter));
980         dev->dev.driver_data = netdev;
981
982         adapter->vdev = dev;
983         adapter->netdev = netdev;
984         adapter->mcastFilterSize= *mcastFilterSize_p;
985         adapter->pool_config = 0;
986
987         /*      Some older boxes running PHYP non-natively have an OF that
988                 returns a 8-byte local-mac-address field (and the first
989                 2 bytes have to be ignored) while newer boxes' OF return
990                 a 6-byte field. Note that IEEE 1275 specifies that
991                 local-mac-address must be a 6-byte field.
992                 The RPA doc specifies that the first byte must be 10b, so
993                 we'll just look for it to solve this 8 vs. 6 byte field issue */
994
995         if ((*mac_addr_p & 0x3) != 0x02)
996                 mac_addr_p += 2;
997
998         adapter->mac_addr = 0;
999         memcpy(&adapter->mac_addr, mac_addr_p, 6);
1000
1001         netdev->irq = dev->irq;
1002         netdev->open               = ibmveth_open;
1003         netdev->poll               = ibmveth_poll;
1004         netdev->weight             = 16;
1005         netdev->stop               = ibmveth_close;
1006         netdev->hard_start_xmit    = ibmveth_start_xmit;
1007         netdev->get_stats          = ibmveth_get_stats;
1008         netdev->set_multicast_list = ibmveth_set_multicast_list;
1009         netdev->do_ioctl           = ibmveth_ioctl;
1010         netdev->ethtool_ops           = &netdev_ethtool_ops;
1011         netdev->change_mtu         = ibmveth_change_mtu;
1012         SET_NETDEV_DEV(netdev, &dev->dev);
1013 #ifdef CONFIG_NET_POLL_CONTROLLER
1014         netdev->poll_controller = ibmveth_poll_controller;
1015 #endif
1016         netdev->features |= NETIF_F_LLTX;
1017         spin_lock_init(&adapter->stats_lock);
1018
1019         memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1020
1021         for(i = 0; i<IbmVethNumBufferPools; i++) {
1022                 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1023                 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1024                                          pool_count[i], pool_size[i],
1025                                          pool_active[i]);
1026                 kobj->parent = &dev->dev.kobj;
1027                 sprintf(kobj->name, "pool%d", i);
1028                 kobj->ktype = &ktype_veth_pool;
1029                 kobject_register(kobj);
1030         }
1031
1032         ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
1033
1034         adapter->buffer_list_dma = DMA_ERROR_CODE;
1035         adapter->filter_list_dma = DMA_ERROR_CODE;
1036         adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1037
1038         ibmveth_debug_printk("registering netdev...\n");
1039
1040         rc = register_netdev(netdev);
1041
1042         if(rc) {
1043                 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
1044                 free_netdev(netdev);
1045                 return rc;
1046         }
1047
1048         ibmveth_debug_printk("registered\n");
1049
1050         ibmveth_proc_register_adapter(adapter);
1051
1052         return 0;
1053 }
1054
1055 static int __devexit ibmveth_remove(struct vio_dev *dev)
1056 {
1057         struct net_device *netdev = dev->dev.driver_data;
1058         struct ibmveth_adapter *adapter = netdev->priv;
1059         int i;
1060
1061         for(i = 0; i<IbmVethNumBufferPools; i++)
1062                 kobject_unregister(&adapter->rx_buff_pool[i].kobj);
1063
1064         unregister_netdev(netdev);
1065
1066         ibmveth_proc_unregister_adapter(adapter);
1067
1068         free_netdev(netdev);
1069         return 0;
1070 }
1071
1072 #ifdef CONFIG_PROC_FS
1073 static void ibmveth_proc_register_driver(void)
1074 {
1075         ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL);
1076         if (ibmveth_proc_dir) {
1077                 SET_MODULE_OWNER(ibmveth_proc_dir);
1078         }
1079 }
1080
1081 static void ibmveth_proc_unregister_driver(void)
1082 {
1083         remove_proc_entry(IBMVETH_PROC_DIR, NULL);
1084 }
1085
1086 static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
1087 {
1088         if (*pos == 0) {
1089                 return (void *)1;
1090         } else {
1091                 return NULL;
1092         }
1093 }
1094
1095 static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1096 {
1097         ++*pos;
1098         return NULL;
1099 }
1100
1101 static void ibmveth_seq_stop(struct seq_file *seq, void *v)
1102 {
1103 }
1104
1105 static int ibmveth_seq_show(struct seq_file *seq, void *v)
1106 {
1107         struct ibmveth_adapter *adapter = seq->private;
1108         char *current_mac = ((char*) &adapter->netdev->dev_addr);
1109         char *firmware_mac = ((char*) &adapter->mac_addr) ;
1110
1111         seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1112
1113         seq_printf(seq, "Unit Address:    0x%x\n", adapter->vdev->unit_address);
1114         seq_printf(seq, "Current MAC:     %02X:%02X:%02X:%02X:%02X:%02X\n",
1115                    current_mac[0], current_mac[1], current_mac[2],
1116                    current_mac[3], current_mac[4], current_mac[5]);
1117         seq_printf(seq, "Firmware MAC:    %02X:%02X:%02X:%02X:%02X:%02X\n",
1118                    firmware_mac[0], firmware_mac[1], firmware_mac[2],
1119                    firmware_mac[3], firmware_mac[4], firmware_mac[5]);
1120
1121         seq_printf(seq, "\nAdapter Statistics:\n");
1122         seq_printf(seq, "  TX:  skbuffs linearized:          %ld\n", adapter->tx_linearized);
1123         seq_printf(seq, "       multi-descriptor sends:      %ld\n", adapter->tx_multidesc_send);
1124         seq_printf(seq, "       skb_linearize failures:      %ld\n", adapter->tx_linearize_failed);
1125         seq_printf(seq, "       vio_map_single failres:      %ld\n", adapter->tx_map_failed);
1126         seq_printf(seq, "       send failures:               %ld\n", adapter->tx_send_failed);
1127         seq_printf(seq, "  RX:  replenish task cycles:       %ld\n", adapter->replenish_task_cycles);
1128         seq_printf(seq, "       alloc_skb_failures:          %ld\n", adapter->replenish_no_mem);
1129         seq_printf(seq, "       add buffer failures:         %ld\n", adapter->replenish_add_buff_failure);
1130         seq_printf(seq, "       invalid buffers:             %ld\n", adapter->rx_invalid_buffer);
1131         seq_printf(seq, "       no buffers:                  %ld\n", adapter->rx_no_buffer);
1132
1133         return 0;
1134 }
1135 static struct seq_operations ibmveth_seq_ops = {
1136         .start = ibmveth_seq_start,
1137         .next  = ibmveth_seq_next,
1138         .stop  = ibmveth_seq_stop,
1139         .show  = ibmveth_seq_show,
1140 };
1141
1142 static int ibmveth_proc_open(struct inode *inode, struct file *file)
1143 {
1144         struct seq_file *seq;
1145         struct proc_dir_entry *proc;
1146         int rc;
1147
1148         rc = seq_open(file, &ibmveth_seq_ops);
1149         if (!rc) {
1150                 /* recover the pointer buried in proc_dir_entry data */
1151                 seq = file->private_data;
1152                 proc = PDE(inode);
1153                 seq->private = proc->data;
1154         }
1155         return rc;
1156 }
1157
1158 static const struct file_operations ibmveth_proc_fops = {
1159         .owner   = THIS_MODULE,
1160         .open    = ibmveth_proc_open,
1161         .read    = seq_read,
1162         .llseek  = seq_lseek,
1163         .release = seq_release,
1164 };
1165
1166 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1167 {
1168         struct proc_dir_entry *entry;
1169         if (ibmveth_proc_dir) {
1170                 char u_addr[10];
1171                 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1172                 entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir);
1173                 if (!entry) {
1174                         ibmveth_error_printk("Cannot create adapter proc entry");
1175                 } else {
1176                         entry->data = (void *) adapter;
1177                         entry->proc_fops = &ibmveth_proc_fops;
1178                         SET_MODULE_OWNER(entry);
1179                 }
1180         }
1181         return;
1182 }
1183
1184 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1185 {
1186         if (ibmveth_proc_dir) {
1187                 char u_addr[10];
1188                 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1189                 remove_proc_entry(u_addr, ibmveth_proc_dir);
1190         }
1191 }
1192
1193 #else /* CONFIG_PROC_FS */
1194 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1195 {
1196 }
1197
1198 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1199 {
1200 }
1201 static void ibmveth_proc_register_driver(void)
1202 {
1203 }
1204
1205 static void ibmveth_proc_unregister_driver(void)
1206 {
1207 }
1208 #endif /* CONFIG_PROC_FS */
1209
1210 static struct attribute veth_active_attr;
1211 static struct attribute veth_num_attr;
1212 static struct attribute veth_size_attr;
1213
1214 static ssize_t veth_pool_show(struct kobject * kobj,
1215                               struct attribute * attr, char * buf)
1216 {
1217         struct ibmveth_buff_pool *pool = container_of(kobj,
1218                                                       struct ibmveth_buff_pool,
1219                                                       kobj);
1220
1221         if (attr == &veth_active_attr)
1222                 return sprintf(buf, "%d\n", pool->active);
1223         else if (attr == &veth_num_attr)
1224                 return sprintf(buf, "%d\n", pool->size);
1225         else if (attr == &veth_size_attr)
1226                 return sprintf(buf, "%d\n", pool->buff_size);
1227         return 0;
1228 }
1229
1230 static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
1231 const char * buf, size_t count)
1232 {
1233         struct ibmveth_buff_pool *pool = container_of(kobj,
1234                                                       struct ibmveth_buff_pool,
1235                                                       kobj);
1236         struct net_device *netdev =
1237             container_of(kobj->parent, struct device, kobj)->driver_data;
1238         struct ibmveth_adapter *adapter = netdev->priv;
1239         long value = simple_strtol(buf, NULL, 10);
1240         long rc;
1241
1242         if (attr == &veth_active_attr) {
1243                 if (value && !pool->active) {
1244                         if(ibmveth_alloc_buffer_pool(pool)) {
1245                                 ibmveth_error_printk("unable to alloc pool\n");
1246                                 return -ENOMEM;
1247                         }
1248                         pool->active = 1;
1249                         adapter->pool_config = 1;
1250                         ibmveth_close(netdev);
1251                         adapter->pool_config = 0;
1252                         if ((rc = ibmveth_open(netdev)))
1253                                 return rc;
1254                 } else if (!value && pool->active) {
1255                         int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1256                         int i;
1257                         /* Make sure there is a buffer pool with buffers that
1258                            can hold a packet of the size of the MTU */
1259                         for(i = 0; i<IbmVethNumBufferPools; i++) {
1260                                 if (pool == &adapter->rx_buff_pool[i])
1261                                         continue;
1262                                 if (!adapter->rx_buff_pool[i].active)
1263                                         continue;
1264                                 if (mtu < adapter->rx_buff_pool[i].buff_size) {
1265                                         pool->active = 0;
1266                                         h_free_logical_lan_buffer(adapter->
1267                                                                   vdev->
1268                                                                   unit_address,
1269                                                                   pool->
1270                                                                   buff_size);
1271                                 }
1272                         }
1273                         if (pool->active) {
1274                                 ibmveth_error_printk("no active pool >= MTU\n");
1275                                 return -EPERM;
1276                         }
1277                 }
1278         } else if (attr == &veth_num_attr) {
1279                 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
1280                         return -EINVAL;
1281                 else {
1282                         adapter->pool_config = 1;
1283                         ibmveth_close(netdev);
1284                         adapter->pool_config = 0;
1285                         pool->size = value;
1286                         if ((rc = ibmveth_open(netdev)))
1287                                 return rc;
1288                 }
1289         } else if (attr == &veth_size_attr) {
1290                 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
1291                         return -EINVAL;
1292                 else {
1293                         adapter->pool_config = 1;
1294                         ibmveth_close(netdev);
1295                         adapter->pool_config = 0;
1296                         pool->buff_size = value;
1297                         if ((rc = ibmveth_open(netdev)))
1298                                 return rc;
1299                 }
1300         }
1301
1302         /* kick the interrupt handler to allocate/deallocate pools */
1303         ibmveth_interrupt(netdev->irq, netdev);
1304         return count;
1305 }
1306
1307
1308 #define ATTR(_name, _mode)      \
1309         struct attribute veth_##_name##_attr = {               \
1310         .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \
1311         };
1312
1313 static ATTR(active, 0644);
1314 static ATTR(num, 0644);
1315 static ATTR(size, 0644);
1316
1317 static struct attribute * veth_pool_attrs[] = {
1318         &veth_active_attr,
1319         &veth_num_attr,
1320         &veth_size_attr,
1321         NULL,
1322 };
1323
1324 static struct sysfs_ops veth_pool_ops = {
1325         .show   = veth_pool_show,
1326         .store  = veth_pool_store,
1327 };
1328
1329 static struct kobj_type ktype_veth_pool = {
1330         .release        = NULL,
1331         .sysfs_ops      = &veth_pool_ops,
1332         .default_attrs  = veth_pool_attrs,
1333 };
1334
1335
1336 static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1337         { "network", "IBM,l-lan"},
1338         { "", "" }
1339 };
1340 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1341
1342 static struct vio_driver ibmveth_driver = {
1343         .id_table       = ibmveth_device_table,
1344         .probe          = ibmveth_probe,
1345         .remove         = ibmveth_remove,
1346         .driver         = {
1347                 .name   = ibmveth_driver_name,
1348                 .owner  = THIS_MODULE,
1349         }
1350 };
1351
1352 static int __init ibmveth_module_init(void)
1353 {
1354         ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
1355
1356         ibmveth_proc_register_driver();
1357
1358         return vio_register_driver(&ibmveth_driver);
1359 }
1360
1361 static void __exit ibmveth_module_exit(void)
1362 {
1363         vio_unregister_driver(&ibmveth_driver);
1364         ibmveth_proc_unregister_driver();
1365 }
1366
1367 module_init(ibmveth_module_init);
1368 module_exit(ibmveth_module_exit);