S2io: Change of driver maintainers
[linux-2.6] / drivers / net / ehea / ehea_main.c
1 /*
2  *  linux/drivers/net/ehea/ehea_main.c
3  *
4  *  eHEA ethernet device driver for IBM eServer System p
5  *
6  *  (C) Copyright IBM Corp. 2006
7  *
8  *  Authors:
9  *       Christoph Raisch <raisch@de.ibm.com>
10  *       Jan-Bernd Themann <themann@de.ibm.com>
11  *       Thomas Klein <tklein@de.ibm.com>
12  *
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28
29 #include <linux/in.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
33 #include <linux/if.h>
34 #include <linux/list.h>
35 #include <linux/if_ether.h>
36 #include <net/ip.h>
37
38 #include "ehea.h"
39 #include "ehea_qmr.h"
40 #include "ehea_phyp.h"
41
42
43 MODULE_LICENSE("GPL");
44 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
45 MODULE_DESCRIPTION("IBM eServer HEA Driver");
46 MODULE_VERSION(DRV_VERSION);
47
48
49 static int msg_level = -1;
50 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
51 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
52 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
53 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
54 static int use_mcs = 0;
55 static int num_tx_qps = EHEA_NUM_TX_QP;
56
57 module_param(msg_level, int, 0);
58 module_param(rq1_entries, int, 0);
59 module_param(rq2_entries, int, 0);
60 module_param(rq3_entries, int, 0);
61 module_param(sq_entries, int, 0);
62 module_param(use_mcs, int, 0);
63 module_param(num_tx_qps, int, 0);
64
65 MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
66 MODULE_PARM_DESC(msg_level, "msg_level");
67 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
68                  "[2^x - 1], x = [6..14]. Default = "
69                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
70 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
71                  "[2^x - 1], x = [6..14]. Default = "
72                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
73 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
74                  "[2^x - 1], x = [6..14]. Default = "
75                  __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
76 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue  "
77                  "[2^x - 1], x = [6..14]. Default = "
78                  __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
79 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 ");
80
81 void ehea_dump(void *adr, int len, char *msg) {
82         int x;
83         unsigned char *deb = adr;
84         for (x = 0; x < len; x += 16) {
85                 printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
86                           deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
87                 deb += 16;
88         }
89 }
90
91 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
92 {
93         struct ehea_port *port = netdev_priv(dev);
94         struct net_device_stats *stats = &port->stats;
95         struct hcp_ehea_port_cb2 *cb2;
96         u64 hret, rx_packets;
97         int i;
98
99         memset(stats, 0, sizeof(*stats));
100
101         cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
102         if (!cb2) {
103                 ehea_error("no mem for cb2");
104                 goto out;
105         }
106
107         hret = ehea_h_query_ehea_port(port->adapter->handle,
108                                       port->logical_port_id,
109                                       H_PORT_CB2, H_PORT_CB2_ALL, cb2);
110         if (hret != H_SUCCESS) {
111                 ehea_error("query_ehea_port failed");
112                 goto out_herr;
113         }
114
115         if (netif_msg_hw(port))
116                 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
117
118         rx_packets = 0;
119         for (i = 0; i < port->num_def_qps; i++)
120                 rx_packets += port->port_res[i].rx_packets;
121
122         stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp;
123         stats->multicast = cb2->rxmcp;
124         stats->rx_errors = cb2->rxuerr;
125         stats->rx_bytes = cb2->rxo;
126         stats->tx_bytes = cb2->txo;
127         stats->rx_packets = rx_packets;
128
129 out_herr:
130         kfree(cb2);
131 out:
132         return stats;
133 }
134
135 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
136 {
137         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
138         struct net_device *dev = pr->port->netdev;
139         int max_index_mask = pr->rq1_skba.len - 1;
140         int i;
141
142         if (!nr_of_wqes)
143                 return;
144
145         for (i = 0; i < nr_of_wqes; i++) {
146                 if (!skb_arr_rq1[index]) {
147                         skb_arr_rq1[index] = netdev_alloc_skb(dev,
148                                                               EHEA_L_PKT_SIZE);
149                         if (!skb_arr_rq1[index]) {
150                                 ehea_error("%s: no mem for skb/%d wqes filled",
151                                            dev->name, i);
152                                 break;
153                         }
154                 }
155                 index--;
156                 index &= max_index_mask;
157         }
158         /* Ring doorbell */
159         ehea_update_rq1a(pr->qp, i);
160 }
161
162 static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
163 {
164         int ret = 0;
165         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
166         struct net_device *dev = pr->port->netdev;
167         int i;
168
169         for (i = 0; i < pr->rq1_skba.len; i++) {
170                 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
171                 if (!skb_arr_rq1[i]) {
172                         ehea_error("%s: no mem for skb/%d wqes filled",
173                                    dev->name, i);
174                         ret = -ENOMEM;
175                         goto out;
176                 }
177         }
178         /* Ring doorbell */
179         ehea_update_rq1a(pr->qp, nr_rq1a);
180 out:
181         return ret;
182 }
183
184 static int ehea_refill_rq_def(struct ehea_port_res *pr,
185                               struct ehea_q_skb_arr *q_skba, int rq_nr,
186                               int num_wqes, int wqe_type, int packet_size)
187 {
188         struct net_device *dev = pr->port->netdev;
189         struct ehea_qp *qp = pr->qp;
190         struct sk_buff **skb_arr = q_skba->arr;
191         struct ehea_rwqe *rwqe;
192         int i, index, max_index_mask, fill_wqes;
193         int ret = 0;
194
195         fill_wqes = q_skba->os_skbs + num_wqes;
196
197         if (!fill_wqes)
198                 return ret;
199
200         index = q_skba->index;
201         max_index_mask = q_skba->len - 1;
202         for (i = 0; i < fill_wqes; i++) {
203                 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
204                 if (!skb) {
205                         ehea_error("%s: no mem for skb/%d wqes filled",
206                                    pr->port->netdev->name, i);
207                         q_skba->os_skbs = fill_wqes - i;
208                         ret = -ENOMEM;
209                         break;
210                 }
211                 skb_reserve(skb, NET_IP_ALIGN);
212
213                 skb_arr[index] = skb;
214
215                 rwqe = ehea_get_next_rwqe(qp, rq_nr);
216                 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
217                             | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
218                 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
219                 rwqe->sg_list[0].vaddr = (u64)skb->data;
220                 rwqe->sg_list[0].len = packet_size;
221                 rwqe->data_segments = 1;
222
223                 index++;
224                 index &= max_index_mask;
225         }
226         q_skba->index = index;
227
228         /* Ring doorbell */
229         iosync();
230         if (rq_nr == 2)
231                 ehea_update_rq2a(pr->qp, i);
232         else
233                 ehea_update_rq3a(pr->qp, i);
234
235         return ret;
236 }
237
238
239 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
240 {
241         return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
242                                   nr_of_wqes, EHEA_RWQE2_TYPE,
243                                   EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
244 }
245
246
247 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
248 {
249         return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
250                                   nr_of_wqes, EHEA_RWQE3_TYPE,
251                                   EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
252 }
253
254 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
255 {
256         *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
257         if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
258                 return 0;
259         if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
260             (cqe->header_length == 0))
261                 return 0;
262         return -EINVAL;
263 }
264
265 static inline void ehea_fill_skb(struct net_device *dev,
266                                  struct sk_buff *skb, struct ehea_cqe *cqe)
267 {
268         int length = cqe->num_bytes_transfered - 4;     /*remove CRC */
269
270         skb_put(skb, length);
271         skb->ip_summed = CHECKSUM_UNNECESSARY;
272         skb->protocol = eth_type_trans(skb, dev);
273 }
274
275 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
276                                                int arr_len,
277                                                struct ehea_cqe *cqe)
278 {
279         int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
280         struct sk_buff *skb;
281         void *pref;
282         int x;
283
284         x = skb_index + 1;
285         x &= (arr_len - 1);
286
287         pref = skb_array[x];
288         prefetchw(pref);
289         prefetchw(pref + EHEA_CACHE_LINE);
290
291         pref = (skb_array[x]->data);
292         prefetch(pref);
293         prefetch(pref + EHEA_CACHE_LINE);
294         prefetch(pref + EHEA_CACHE_LINE * 2);
295         prefetch(pref + EHEA_CACHE_LINE * 3);
296         skb = skb_array[skb_index];
297         skb_array[skb_index] = NULL;
298         return skb;
299 }
300
301 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
302                                                   int arr_len, int wqe_index)
303 {
304         struct sk_buff *skb;
305         void *pref;
306         int x;
307
308         x = wqe_index + 1;
309         x &= (arr_len - 1);
310
311         pref = skb_array[x];
312         prefetchw(pref);
313         prefetchw(pref + EHEA_CACHE_LINE);
314
315         pref = (skb_array[x]->data);
316         prefetchw(pref);
317         prefetchw(pref + EHEA_CACHE_LINE);
318
319         skb = skb_array[wqe_index];
320         skb_array[wqe_index] = NULL;
321         return skb;
322 }
323
324 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
325                                  struct ehea_cqe *cqe, int *processed_rq2,
326                                  int *processed_rq3)
327 {
328         struct sk_buff *skb;
329
330         if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
331                 pr->p_stats.err_tcp_cksum++;
332         if (cqe->status & EHEA_CQE_STAT_ERR_IP)
333                 pr->p_stats.err_ip_cksum++;
334         if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
335                 pr->p_stats.err_frame_crc++;
336
337         if (netif_msg_rx_err(pr->port)) {
338                 ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
339                 ehea_dump(cqe, sizeof(*cqe), "CQE");
340         }
341
342         if (rq == 2) {
343                 *processed_rq2 += 1;
344                 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
345                 dev_kfree_skb(skb);
346         } else if (rq == 3) {
347                 *processed_rq3 += 1;
348                 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
349                 dev_kfree_skb(skb);
350         }
351
352         if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
353                 ehea_error("Critical receive error. Resetting port.");
354                 queue_work(pr->port->adapter->ehea_wq, &pr->port->reset_task);
355                 return 1;
356         }
357
358         return 0;
359 }
360
361 static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
362                                         struct ehea_port_res *pr,
363                                         int *budget)
364 {
365         struct ehea_port *port = pr->port;
366         struct ehea_qp *qp = pr->qp;
367         struct ehea_cqe *cqe;
368         struct sk_buff *skb;
369         struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
370         struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
371         struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
372         int skb_arr_rq1_len = pr->rq1_skba.len;
373         int skb_arr_rq2_len = pr->rq2_skba.len;
374         int skb_arr_rq3_len = pr->rq3_skba.len;
375         int processed, processed_rq1, processed_rq2, processed_rq3;
376         int wqe_index, last_wqe_index, rq, my_quota, port_reset;
377
378         processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
379         last_wqe_index = 0;
380         my_quota = min(*budget, dev->quota);
381
382         cqe = ehea_poll_rq1(qp, &wqe_index);
383         while ((my_quota > 0) && cqe) {
384                 ehea_inc_rq1(qp);
385                 processed_rq1++;
386                 processed++;
387                 my_quota--;
388                 if (netif_msg_rx_status(port))
389                         ehea_dump(cqe, sizeof(*cqe), "CQE");
390
391                 last_wqe_index = wqe_index;
392                 rmb();
393                 if (!ehea_check_cqe(cqe, &rq)) {
394                         if (rq == 1) {  /* LL RQ1 */
395                                 skb = get_skb_by_index_ll(skb_arr_rq1,
396                                                           skb_arr_rq1_len,
397                                                           wqe_index);
398                                 if (unlikely(!skb)) {
399                                         if (netif_msg_rx_err(port))
400                                                 ehea_error("LL rq1: skb=NULL");
401
402                                         skb = netdev_alloc_skb(port->netdev,
403                                                                EHEA_L_PKT_SIZE);
404                                         if (!skb)
405                                                 break;
406                                 }
407                                 skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
408                                                cqe->num_bytes_transfered - 4);
409                                 ehea_fill_skb(dev, skb, cqe);
410                         } else if (rq == 2) {  /* RQ2 */
411                                 skb = get_skb_by_index(skb_arr_rq2,
412                                                        skb_arr_rq2_len, cqe);
413                                 if (unlikely(!skb)) {
414                                         if (netif_msg_rx_err(port))
415                                                 ehea_error("rq2: skb=NULL");
416                                         break;
417                                 }
418                                 ehea_fill_skb(port->netdev, skb, cqe);
419                                 processed_rq2++;
420                         } else {  /* RQ3 */
421                                 skb = get_skb_by_index(skb_arr_rq3,
422                                                        skb_arr_rq3_len, cqe);
423                                 if (unlikely(!skb)) {
424                                         if (netif_msg_rx_err(port))
425                                                 ehea_error("rq3: skb=NULL");
426                                         break;
427                                 }
428                                 ehea_fill_skb(port->netdev, skb, cqe);
429                                 processed_rq3++;
430                         }
431
432                         if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
433                                 vlan_hwaccel_receive_skb(skb, port->vgrp,
434                                                          cqe->vlan_tag);
435                         else
436                                 netif_receive_skb(skb);
437                 } else {
438                         pr->p_stats.poll_receive_errors++;
439                         port_reset = ehea_treat_poll_error(pr, rq, cqe,
440                                                            &processed_rq2,
441                                                            &processed_rq3);
442                         if (port_reset)
443                                 break;
444                 }
445                 cqe = ehea_poll_rq1(qp, &wqe_index);
446         }
447
448         pr->rx_packets += processed;
449         *budget -= processed;
450
451         ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
452         ehea_refill_rq2(pr, processed_rq2);
453         ehea_refill_rq3(pr, processed_rq3);
454
455         cqe = ehea_poll_rq1(qp, &wqe_index);
456         return cqe;
457 }
458
459 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
460 {
461         struct sk_buff *skb;
462         struct ehea_cq *send_cq = pr->send_cq;
463         struct ehea_cqe *cqe;
464         int quota = my_quota;
465         int cqe_counter = 0;
466         int swqe_av = 0;
467         int index;
468         unsigned long flags;
469
470         cqe = ehea_poll_cq(send_cq);
471         while(cqe && (quota > 0)) {
472                 ehea_inc_cq(send_cq);
473
474                 cqe_counter++;
475                 rmb();
476                 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
477                         ehea_error("Send Completion Error: Resetting port");
478                         if (netif_msg_tx_err(pr->port))
479                                 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
480                         queue_work(pr->port->adapter->ehea_wq,
481                                    &pr->port->reset_task);
482                         break;
483                 }
484
485                 if (netif_msg_tx_done(pr->port))
486                         ehea_dump(cqe, sizeof(*cqe), "CQE");
487
488                 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
489                            == EHEA_SWQE2_TYPE)) {
490
491                         index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
492                         skb = pr->sq_skba.arr[index];
493                         dev_kfree_skb(skb);
494                         pr->sq_skba.arr[index] = NULL;
495                 }
496
497                 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
498                 quota--;
499
500                 cqe = ehea_poll_cq(send_cq);
501         };
502
503         ehea_update_feca(send_cq, cqe_counter);
504         atomic_add(swqe_av, &pr->swqe_avail);
505
506         spin_lock_irqsave(&pr->netif_queue, flags);
507
508         if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
509                                   >= pr->swqe_refill_th)) {
510                 netif_wake_queue(pr->port->netdev);
511                 pr->queue_stopped = 0;
512         }
513         spin_unlock_irqrestore(&pr->netif_queue, flags);
514
515         return cqe;
516 }
517
518 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
519
520 static int ehea_poll(struct net_device *dev, int *budget)
521 {
522         struct ehea_port_res *pr = dev->priv;
523         struct ehea_cqe *cqe;
524         struct ehea_cqe *cqe_skb = NULL;
525         int force_irq, wqe_index;
526
527         cqe = ehea_poll_rq1(pr->qp, &wqe_index);
528         cqe_skb = ehea_poll_cq(pr->send_cq);
529
530         force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
531
532         if ((!cqe && !cqe_skb) || force_irq) {
533                 pr->poll_counter = 0;
534                 netif_rx_complete(dev);
535                 ehea_reset_cq_ep(pr->recv_cq);
536                 ehea_reset_cq_ep(pr->send_cq);
537                 ehea_reset_cq_n1(pr->recv_cq);
538                 ehea_reset_cq_n1(pr->send_cq);
539                 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
540                 cqe_skb = ehea_poll_cq(pr->send_cq);
541
542                 if (!cqe && !cqe_skb)
543                         return 0;
544
545                 if (!netif_rx_reschedule(dev, dev->quota))
546                         return 0;
547         }
548
549         cqe = ehea_proc_rwqes(dev, pr, budget);
550         cqe_skb = ehea_proc_cqes(pr, 300);
551
552         if (cqe || cqe_skb)
553                 pr->poll_counter++;
554
555         return 1;
556 }
557
558 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
559 {
560         struct ehea_port_res *pr = param;
561
562         netif_rx_schedule(pr->d_netdev);
563
564         return IRQ_HANDLED;
565 }
566
567 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
568 {
569         struct ehea_port *port = param;
570         struct ehea_eqe *eqe;
571         struct ehea_qp *qp;
572         u32 qp_token;
573
574         eqe = ehea_poll_eq(port->qp_eq);
575
576         while (eqe) {
577                 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
578                 ehea_error("QP aff_err: entry=0x%lx, token=0x%x",
579                            eqe->entry, qp_token);
580
581                 qp = port->port_res[qp_token].qp;
582                 ehea_error_data(port->adapter, qp->fw_handle);
583                 eqe = ehea_poll_eq(port->qp_eq);
584         }
585
586         queue_work(port->adapter->ehea_wq, &port->reset_task);
587
588         return IRQ_HANDLED;
589 }
590
591 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
592                                        int logical_port)
593 {
594         int i;
595
596         for (i = 0; i < EHEA_MAX_PORTS; i++)
597                 if (adapter->port[i])
598                         if (adapter->port[i]->logical_port_id == logical_port)
599                                 return adapter->port[i];
600         return NULL;
601 }
602
603 int ehea_sense_port_attr(struct ehea_port *port)
604 {
605         int ret;
606         u64 hret;
607         struct hcp_ehea_port_cb0 *cb0;
608
609         cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);   /* May be called via */
610         if (!cb0) {                             /* ehea_neq_tasklet() */
611                 ehea_error("no mem for cb0");
612                 ret = -ENOMEM;
613                 goto out;
614         }
615
616         hret = ehea_h_query_ehea_port(port->adapter->handle,
617                                       port->logical_port_id, H_PORT_CB0,
618                                       EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
619                                       cb0);
620         if (hret != H_SUCCESS) {
621                 ret = -EIO;
622                 goto out_free;
623         }
624
625         /* MAC address */
626         port->mac_addr = cb0->port_mac_addr << 16;
627
628         if (!is_valid_ether_addr((u8*)&port->mac_addr)) {
629                 ret = -EADDRNOTAVAIL;
630                 goto out_free;
631         }
632
633         /* Port speed */
634         switch (cb0->port_speed) {
635         case H_SPEED_10M_H:
636                 port->port_speed = EHEA_SPEED_10M;
637                 port->full_duplex = 0;
638                 break;
639         case H_SPEED_10M_F:
640                 port->port_speed = EHEA_SPEED_10M;
641                 port->full_duplex = 1;
642                 break;
643         case H_SPEED_100M_H:
644                 port->port_speed = EHEA_SPEED_100M;
645                 port->full_duplex = 0;
646                 break;
647         case H_SPEED_100M_F:
648                 port->port_speed = EHEA_SPEED_100M;
649                 port->full_duplex = 1;
650                 break;
651         case H_SPEED_1G_F:
652                 port->port_speed = EHEA_SPEED_1G;
653                 port->full_duplex = 1;
654                 break;
655         case H_SPEED_10G_F:
656                 port->port_speed = EHEA_SPEED_10G;
657                 port->full_duplex = 1;
658                 break;
659         default:
660                 port->port_speed = 0;
661                 port->full_duplex = 0;
662                 break;
663         }
664
665         port->autoneg = 1;
666         port->num_mcs = cb0->num_default_qps;
667
668         /* Number of default QPs */
669         if (use_mcs)
670                 port->num_def_qps = cb0->num_default_qps;
671         else
672                 port->num_def_qps = 1;
673
674         if (!port->num_def_qps) {
675                 ret = -EINVAL;
676                 goto out_free;
677         }
678
679         port->num_tx_qps = num_tx_qps;
680
681         if (port->num_def_qps >= port->num_tx_qps)
682                 port->num_add_tx_qps = 0;
683         else
684                 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
685
686         ret = 0;
687 out_free:
688         if (ret || netif_msg_probe(port))
689                 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
690         kfree(cb0);
691 out:
692         return ret;
693 }
694
695 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
696 {
697         struct hcp_ehea_port_cb4 *cb4;
698         u64 hret;
699         int ret = 0;
700
701         cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
702         if (!cb4) {
703                 ehea_error("no mem for cb4");
704                 ret = -ENOMEM;
705                 goto out;
706         }
707
708         cb4->port_speed = port_speed;
709
710         netif_carrier_off(port->netdev);
711
712         hret = ehea_h_modify_ehea_port(port->adapter->handle,
713                                        port->logical_port_id,
714                                        H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
715         if (hret == H_SUCCESS) {
716                 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
717
718                 hret = ehea_h_query_ehea_port(port->adapter->handle,
719                                               port->logical_port_id,
720                                               H_PORT_CB4, H_PORT_CB4_SPEED,
721                                               cb4);
722                 if (hret == H_SUCCESS) {
723                         switch (cb4->port_speed) {
724                         case H_SPEED_10M_H:
725                                 port->port_speed = EHEA_SPEED_10M;
726                                 port->full_duplex = 0;
727                                 break;
728                         case H_SPEED_10M_F:
729                                 port->port_speed = EHEA_SPEED_10M;
730                                 port->full_duplex = 1;
731                                 break;
732                         case H_SPEED_100M_H:
733                                 port->port_speed = EHEA_SPEED_100M;
734                                 port->full_duplex = 0;
735                                 break;
736                         case H_SPEED_100M_F:
737                                 port->port_speed = EHEA_SPEED_100M;
738                                 port->full_duplex = 1;
739                                 break;
740                         case H_SPEED_1G_F:
741                                 port->port_speed = EHEA_SPEED_1G;
742                                 port->full_duplex = 1;
743                                 break;
744                         case H_SPEED_10G_F:
745                                 port->port_speed = EHEA_SPEED_10G;
746                                 port->full_duplex = 1;
747                                 break;
748                         default:
749                                 port->port_speed = 0;
750                                 port->full_duplex = 0;
751                                 break;
752                         }
753                 } else {
754                         ehea_error("Failed sensing port speed");
755                         ret = -EIO;
756                 }
757         } else {
758                 if (hret == H_AUTHORITY) {
759                         ehea_info("Hypervisor denied setting port speed");
760                         ret = -EPERM;
761                 } else {
762                         ret = -EIO;
763                         ehea_error("Failed setting port speed");
764                 }
765         }
766         netif_carrier_on(port->netdev);
767         kfree(cb4);
768 out:
769         return ret;
770 }
771
772 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
773 {
774         int ret;
775         u8 ec;
776         u8 portnum;
777         struct ehea_port *port;
778
779         ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
780         portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
781         port = ehea_get_port(adapter, portnum);
782
783         switch (ec) {
784         case EHEA_EC_PORTSTATE_CHG:     /* port state change */
785
786                 if (!port) {
787                         ehea_error("unknown portnum %x", portnum);
788                         break;
789                 }
790
791                 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
792                         if (!netif_carrier_ok(port->netdev)) {
793                                 ret = ehea_sense_port_attr(port);
794                                 if (ret) {
795                                         ehea_error("failed resensing port "
796                                                    "attributes");
797                                         break;
798                                 }
799
800                                 if (netif_msg_link(port))
801                                         ehea_info("%s: Logical port up: %dMbps "
802                                                   "%s Duplex",
803                                                   port->netdev->name,
804                                                   port->port_speed,
805                                                   port->full_duplex ==
806                                                   1 ? "Full" : "Half");
807
808                                 netif_carrier_on(port->netdev);
809                                 netif_wake_queue(port->netdev);
810                         }
811                 } else
812                         if (netif_carrier_ok(port->netdev)) {
813                                 if (netif_msg_link(port))
814                                         ehea_info("%s: Logical port down",
815                                                   port->netdev->name);
816                                 netif_carrier_off(port->netdev);
817                                 netif_stop_queue(port->netdev);
818                         }
819
820                 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
821                         if (netif_msg_link(port))
822                                 ehea_info("%s: Physical port up",
823                                           port->netdev->name);
824                 } else {
825                         if (netif_msg_link(port))
826                                 ehea_info("%s: Physical port down",
827                                           port->netdev->name);
828                 }
829
830                 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
831                         ehea_info("External switch port is primary port");
832                 else
833                         ehea_info("External switch port is backup port");
834
835                 break;
836         case EHEA_EC_ADAPTER_MALFUNC:
837                 ehea_error("Adapter malfunction");
838                 break;
839         case EHEA_EC_PORT_MALFUNC:
840                 ehea_info("Port malfunction: Device: %s", port->netdev->name);
841                 netif_carrier_off(port->netdev);
842                 netif_stop_queue(port->netdev);
843                 break;
844         default:
845                 ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
846                 break;
847         }
848 }
849
850 static void ehea_neq_tasklet(unsigned long data)
851 {
852         struct ehea_adapter *adapter = (struct ehea_adapter*)data;
853         struct ehea_eqe *eqe;
854         u64 event_mask;
855
856         eqe = ehea_poll_eq(adapter->neq);
857         ehea_debug("eqe=%p", eqe);
858
859         while (eqe) {
860                 ehea_debug("*eqe=%lx", eqe->entry);
861                 ehea_parse_eqe(adapter, eqe->entry);
862                 eqe = ehea_poll_eq(adapter->neq);
863                 ehea_debug("next eqe=%p", eqe);
864         }
865
866         event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
867                    | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
868                    | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
869
870         ehea_h_reset_events(adapter->handle,
871                             adapter->neq->fw_handle, event_mask);
872 }
873
874 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
875 {
876         struct ehea_adapter *adapter = param;
877         tasklet_hi_schedule(&adapter->neq_tasklet);
878         return IRQ_HANDLED;
879 }
880
881
882 static int ehea_fill_port_res(struct ehea_port_res *pr)
883 {
884         int ret;
885         struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
886
887         ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
888                                      - init_attr->act_nr_rwqes_rq2
889                                      - init_attr->act_nr_rwqes_rq3 - 1);
890
891         ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
892
893         ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
894
895         return ret;
896 }
897
898 static int ehea_reg_interrupts(struct net_device *dev)
899 {
900         struct ehea_port *port = netdev_priv(dev);
901         struct ehea_port_res *pr;
902         int i, ret;
903
904
905         snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
906                  dev->name);
907
908         ret = ibmebus_request_irq(NULL, port->qp_eq->attr.ist1,
909                                   ehea_qp_aff_irq_handler,
910                                   IRQF_DISABLED, port->int_aff_name, port);
911         if (ret) {
912                 ehea_error("failed registering irq for qp_aff_irq_handler:"
913                            "ist=%X", port->qp_eq->attr.ist1);
914                 goto out_free_qpeq;
915         }
916
917         if (netif_msg_ifup(port))
918                 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
919                           "registered", port->qp_eq->attr.ist1);
920
921
922         for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
923                 pr = &port->port_res[i];
924                 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
925                          "%s-queue%d", dev->name, i);
926                 ret = ibmebus_request_irq(NULL, pr->eq->attr.ist1,
927                                           ehea_recv_irq_handler,
928                                           IRQF_DISABLED, pr->int_send_name,
929                                           pr);
930                 if (ret) {
931                         ehea_error("failed registering irq for ehea_queue "
932                                    "port_res_nr:%d, ist=%X", i,
933                                    pr->eq->attr.ist1);
934                         goto out_free_req;
935                 }
936                 if (netif_msg_ifup(port))
937                         ehea_info("irq_handle 0x%X for function ehea_queue_int "
938                                   "%d registered", pr->eq->attr.ist1, i);
939         }
940 out:
941         return ret;
942
943
944 out_free_req:
945         while (--i >= 0) {
946                 u32 ist = port->port_res[i].eq->attr.ist1;
947                 ibmebus_free_irq(NULL, ist, &port->port_res[i]);
948         }
949
950 out_free_qpeq:
951         ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
952         i = port->num_def_qps;
953
954         goto out;
955
956 }
957
958 static void ehea_free_interrupts(struct net_device *dev)
959 {
960         struct ehea_port *port = netdev_priv(dev);
961         struct ehea_port_res *pr;
962         int i;
963
964         /* send */
965
966         for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
967                 pr = &port->port_res[i];
968                 ibmebus_free_irq(NULL, pr->eq->attr.ist1, pr);
969                 if (netif_msg_intr(port))
970                         ehea_info("free send irq for res %d with handle 0x%X",
971                                   i, pr->eq->attr.ist1);
972         }
973
974         /* associated events */
975         ibmebus_free_irq(NULL, port->qp_eq->attr.ist1, port);
976         if (netif_msg_intr(port))
977                 ehea_info("associated event interrupt for handle 0x%X freed",
978                           port->qp_eq->attr.ist1);
979 }
980
981 static int ehea_configure_port(struct ehea_port *port)
982 {
983         int ret, i;
984         u64 hret, mask;
985         struct hcp_ehea_port_cb0 *cb0;
986
987         ret = -ENOMEM;
988         cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
989         if (!cb0)
990                 goto out;
991
992         cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
993                      | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
994                      | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
995                      | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
996                      | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
997                                       PXLY_RC_VLAN_FILTER)
998                      | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
999
1000         for (i = 0; i < port->num_mcs; i++)
1001                 if (use_mcs)
1002                         cb0->default_qpn_arr[i] =
1003                                 port->port_res[i].qp->init_attr.qp_nr;
1004                 else
1005                         cb0->default_qpn_arr[i] =
1006                                 port->port_res[0].qp->init_attr.qp_nr;
1007
1008         if (netif_msg_ifup(port))
1009                 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1010
1011         mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1012              | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1013
1014         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1015                                        port->logical_port_id,
1016                                        H_PORT_CB0, mask, cb0);
1017         ret = -EIO;
1018         if (hret != H_SUCCESS)
1019                 goto out_free;
1020
1021         ret = 0;
1022
1023 out_free:
1024         kfree(cb0);
1025 out:
1026         return ret;
1027 }
1028
1029 int ehea_gen_smrs(struct ehea_port_res *pr)
1030 {
1031         int ret;
1032         struct ehea_adapter *adapter = pr->port->adapter;
1033
1034         ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1035         if (ret)
1036                 goto out;
1037
1038         ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1039         if (ret)
1040                 goto out_free;
1041
1042         return 0;
1043
1044 out_free:
1045         ehea_rem_mr(&pr->send_mr);
1046 out:
1047         ehea_error("Generating SMRS failed\n");
1048         return -EIO;
1049 }
1050
1051 int ehea_rem_smrs(struct ehea_port_res *pr)
1052 {
1053         if ((ehea_rem_mr(&pr->send_mr))
1054             || (ehea_rem_mr(&pr->recv_mr)))
1055                 return -EIO;
1056         else
1057                 return 0;
1058 }
1059
1060 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1061 {
1062         int arr_size = sizeof(void*) * max_q_entries;
1063
1064         q_skba->arr = vmalloc(arr_size);
1065         if (!q_skba->arr)
1066                 return -ENOMEM;
1067
1068         memset(q_skba->arr, 0, arr_size);
1069
1070         q_skba->len = max_q_entries;
1071         q_skba->index = 0;
1072         q_skba->os_skbs = 0;
1073
1074         return 0;
1075 }
1076
1077 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1078                               struct port_res_cfg *pr_cfg, int queue_token)
1079 {
1080         struct ehea_adapter *adapter = port->adapter;
1081         enum ehea_eq_type eq_type = EHEA_EQ;
1082         struct ehea_qp_init_attr *init_attr = NULL;
1083         int ret = -EIO;
1084
1085         memset(pr, 0, sizeof(struct ehea_port_res));
1086
1087         pr->port = port;
1088         spin_lock_init(&pr->xmit_lock);
1089         spin_lock_init(&pr->netif_queue);
1090
1091         pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1092         if (!pr->eq) {
1093                 ehea_error("create_eq failed (eq)");
1094                 goto out_free;
1095         }
1096
1097         pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1098                                      pr->eq->fw_handle,
1099                                      port->logical_port_id);
1100         if (!pr->recv_cq) {
1101                 ehea_error("create_cq failed (cq_recv)");
1102                 goto out_free;
1103         }
1104
1105         pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1106                                      pr->eq->fw_handle,
1107                                      port->logical_port_id);
1108         if (!pr->send_cq) {
1109                 ehea_error("create_cq failed (cq_send)");
1110                 goto out_free;
1111         }
1112
1113         if (netif_msg_ifup(port))
1114                 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1115                           pr->send_cq->attr.act_nr_of_cqes,
1116                           pr->recv_cq->attr.act_nr_of_cqes);
1117
1118         init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1119         if (!init_attr) {
1120                 ret = -ENOMEM;
1121                 ehea_error("no mem for ehea_qp_init_attr");
1122                 goto out_free;
1123         }
1124
1125         init_attr->low_lat_rq1 = 1;
1126         init_attr->signalingtype = 1;   /* generate CQE if specified in WQE */
1127         init_attr->rq_count = 3;
1128         init_attr->qp_token = queue_token;
1129         init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1130         init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1131         init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1132         init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1133         init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1134         init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1135         init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1136         init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1137         init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1138         init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1139         init_attr->port_nr = port->logical_port_id;
1140         init_attr->send_cq_handle = pr->send_cq->fw_handle;
1141         init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1142         init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1143
1144         pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1145         if (!pr->qp) {
1146                 ehea_error("create_qp failed");
1147                 ret = -EIO;
1148                 goto out_free;
1149         }
1150
1151         if (netif_msg_ifup(port))
1152                 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1153                           "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1154                           init_attr->act_nr_send_wqes,
1155                           init_attr->act_nr_rwqes_rq1,
1156                           init_attr->act_nr_rwqes_rq2,
1157                           init_attr->act_nr_rwqes_rq3);
1158
1159         ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
1160         ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1161         ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1162         ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1163         if (ret)
1164                 goto out_free;
1165
1166         pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1167         if (ehea_gen_smrs(pr) != 0) {
1168                 ret = -EIO;
1169                 goto out_free;
1170         }
1171
1172         atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1173
1174         kfree(init_attr);
1175
1176         pr->d_netdev = alloc_netdev(0, "", ether_setup);
1177         if (!pr->d_netdev)
1178                 goto out_free;
1179         pr->d_netdev->priv = pr;
1180         pr->d_netdev->weight = 64;
1181         pr->d_netdev->poll = ehea_poll;
1182         set_bit(__LINK_STATE_START, &pr->d_netdev->state);
1183         strcpy(pr->d_netdev->name, port->netdev->name);
1184
1185         ret = 0;
1186         goto out;
1187
1188 out_free:
1189         kfree(init_attr);
1190         vfree(pr->sq_skba.arr);
1191         vfree(pr->rq1_skba.arr);
1192         vfree(pr->rq2_skba.arr);
1193         vfree(pr->rq3_skba.arr);
1194         ehea_destroy_qp(pr->qp);
1195         ehea_destroy_cq(pr->send_cq);
1196         ehea_destroy_cq(pr->recv_cq);
1197         ehea_destroy_eq(pr->eq);
1198 out:
1199         return ret;
1200 }
1201
1202 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1203 {
1204         int ret, i;
1205
1206         free_netdev(pr->d_netdev);
1207
1208         ret = ehea_destroy_qp(pr->qp);
1209
1210         if (!ret) {
1211                 ehea_destroy_cq(pr->send_cq);
1212                 ehea_destroy_cq(pr->recv_cq);
1213                 ehea_destroy_eq(pr->eq);
1214
1215                 for (i = 0; i < pr->rq1_skba.len; i++)
1216                         if (pr->rq1_skba.arr[i])
1217                                 dev_kfree_skb(pr->rq1_skba.arr[i]);
1218
1219                 for (i = 0; i < pr->rq2_skba.len; i++)
1220                         if (pr->rq2_skba.arr[i])
1221                                 dev_kfree_skb(pr->rq2_skba.arr[i]);
1222
1223                 for (i = 0; i < pr->rq3_skba.len; i++)
1224                         if (pr->rq3_skba.arr[i])
1225                                 dev_kfree_skb(pr->rq3_skba.arr[i]);
1226
1227                 for (i = 0; i < pr->sq_skba.len; i++)
1228                         if (pr->sq_skba.arr[i])
1229                                 dev_kfree_skb(pr->sq_skba.arr[i]);
1230
1231                 vfree(pr->rq1_skba.arr);
1232                 vfree(pr->rq2_skba.arr);
1233                 vfree(pr->rq3_skba.arr);
1234                 vfree(pr->sq_skba.arr);
1235                 ret = ehea_rem_smrs(pr);
1236         }
1237         return ret;
1238 }
1239
1240 /*
1241  * The write_* functions store information in swqe which is used by
1242  * the hardware to calculate the ip/tcp/udp checksum
1243  */
1244
1245 static inline void write_ip_start_end(struct ehea_swqe *swqe,
1246                                       const struct sk_buff *skb)
1247 {
1248         swqe->ip_start = skb_network_offset(skb);
1249         swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1250 }
1251
1252 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1253                                         const struct sk_buff *skb)
1254 {
1255         swqe->tcp_offset =
1256                 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1257
1258         swqe->tcp_end = (u16)skb->len - 1;
1259 }
1260
1261 static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1262                                         const struct sk_buff *skb)
1263 {
1264         swqe->tcp_offset =
1265                 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1266
1267         swqe->tcp_end = (u16)skb->len - 1;
1268 }
1269
1270
1271 static void write_swqe2_TSO(struct sk_buff *skb,
1272                             struct ehea_swqe *swqe, u32 lkey)
1273 {
1274         struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1275         u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1276         int skb_data_size = skb->len - skb->data_len;
1277         int headersize;
1278         u64 tmp_addr;
1279
1280         /* Packet is TCP with TSO enabled */
1281         swqe->tx_control |= EHEA_SWQE_TSO;
1282         swqe->mss = skb_shinfo(skb)->gso_size;
1283         /* copy only eth/ip/tcp headers to immediate data and
1284          * the rest of skb->data to sg1entry
1285          */
1286         headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1287
1288         skb_data_size = skb->len - skb->data_len;
1289
1290         if (skb_data_size >= headersize) {
1291                 /* copy immediate data */
1292                 skb_copy_from_linear_data(skb, imm_data, headersize);
1293                 swqe->immediate_data_length = headersize;
1294
1295                 if (skb_data_size > headersize) {
1296                         /* set sg1entry data */
1297                         sg1entry->l_key = lkey;
1298                         sg1entry->len = skb_data_size - headersize;
1299
1300                         tmp_addr = (u64)(skb->data + headersize);
1301                         sg1entry->vaddr = tmp_addr;
1302                         swqe->descriptors++;
1303                 }
1304         } else
1305                 ehea_error("cannot handle fragmented headers");
1306 }
1307
1308 static void write_swqe2_nonTSO(struct sk_buff *skb,
1309                                struct ehea_swqe *swqe, u32 lkey)
1310 {
1311         int skb_data_size = skb->len - skb->data_len;
1312         u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1313         struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1314         u64 tmp_addr;
1315
1316         /* Packet is any nonTSO type
1317          *
1318          * Copy as much as possible skb->data to immediate data and
1319          * the rest to sg1entry
1320          */
1321         if (skb_data_size >= SWQE2_MAX_IMM) {
1322                 /* copy immediate data */
1323                 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1324
1325                 swqe->immediate_data_length = SWQE2_MAX_IMM;
1326
1327                 if (skb_data_size > SWQE2_MAX_IMM) {
1328                         /* copy sg1entry data */
1329                         sg1entry->l_key = lkey;
1330                         sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1331                         tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM);
1332                         sg1entry->vaddr = tmp_addr;
1333                         swqe->descriptors++;
1334                 }
1335         } else {
1336                 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1337                 swqe->immediate_data_length = skb_data_size;
1338         }
1339 }
1340
1341 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1342                                     struct ehea_swqe *swqe, u32 lkey)
1343 {
1344         struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1345         skb_frag_t *frag;
1346         int nfrags, sg1entry_contains_frag_data, i;
1347         u64 tmp_addr;
1348
1349         nfrags = skb_shinfo(skb)->nr_frags;
1350         sg1entry = &swqe->u.immdata_desc.sg_entry;
1351         sg_list = (struct ehea_vsgentry*)&swqe->u.immdata_desc.sg_list;
1352         swqe->descriptors = 0;
1353         sg1entry_contains_frag_data = 0;
1354
1355         if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1356                 write_swqe2_TSO(skb, swqe, lkey);
1357         else
1358                 write_swqe2_nonTSO(skb, swqe, lkey);
1359
1360         /* write descriptors */
1361         if (nfrags > 0) {
1362                 if (swqe->descriptors == 0) {
1363                         /* sg1entry not yet used */
1364                         frag = &skb_shinfo(skb)->frags[0];
1365
1366                         /* copy sg1entry data */
1367                         sg1entry->l_key = lkey;
1368                         sg1entry->len = frag->size;
1369                         tmp_addr =  (u64)(page_address(frag->page)
1370                                           + frag->page_offset);
1371                         sg1entry->vaddr = tmp_addr;
1372                         swqe->descriptors++;
1373                         sg1entry_contains_frag_data = 1;
1374                 }
1375
1376                 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1377
1378                         frag = &skb_shinfo(skb)->frags[i];
1379                         sgentry = &sg_list[i - sg1entry_contains_frag_data];
1380
1381                         sgentry->l_key = lkey;
1382                         sgentry->len = frag->size;
1383
1384                         tmp_addr = (u64)(page_address(frag->page)
1385                                          + frag->page_offset);
1386                         sgentry->vaddr = tmp_addr;
1387                         swqe->descriptors++;
1388                 }
1389         }
1390 }
1391
1392 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1393 {
1394         int ret = 0;
1395         u64 hret;
1396         u8 reg_type;
1397
1398         /* De/Register untagged packets */
1399         reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1400         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1401                                      port->logical_port_id,
1402                                      reg_type, port->mac_addr, 0, hcallid);
1403         if (hret != H_SUCCESS) {
1404                 ehea_error("reg_dereg_bcmc failed (tagged)");
1405                 ret = -EIO;
1406                 goto out_herr;
1407         }
1408
1409         /* De/Register VLAN packets */
1410         reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1411         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1412                                      port->logical_port_id,
1413                                      reg_type, port->mac_addr, 0, hcallid);
1414         if (hret != H_SUCCESS) {
1415                 ehea_error("reg_dereg_bcmc failed (vlan)");
1416                 ret = -EIO;
1417         }
1418 out_herr:
1419         return ret;
1420 }
1421
1422 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1423 {
1424         struct ehea_port *port = netdev_priv(dev);
1425         struct sockaddr *mac_addr = sa;
1426         struct hcp_ehea_port_cb0 *cb0;
1427         int ret;
1428         u64 hret;
1429
1430         if (!is_valid_ether_addr(mac_addr->sa_data)) {
1431                 ret = -EADDRNOTAVAIL;
1432                 goto out;
1433         }
1434
1435         cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1436         if (!cb0) {
1437                 ehea_error("no mem for cb0");
1438                 ret = -ENOMEM;
1439                 goto out;
1440         }
1441
1442         memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1443
1444         cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1445
1446         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1447                                        port->logical_port_id, H_PORT_CB0,
1448                                        EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1449         if (hret != H_SUCCESS) {
1450                 ret = -EIO;
1451                 goto out_free;
1452         }
1453
1454         memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1455
1456         /* Deregister old MAC in pHYP */
1457         ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1458         if (ret)
1459                 goto out_free;
1460
1461         port->mac_addr = cb0->port_mac_addr << 16;
1462
1463         /* Register new MAC in pHYP */
1464         ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1465         if (ret)
1466                 goto out_free;
1467
1468         ret = 0;
1469 out_free:
1470         kfree(cb0);
1471 out:
1472         return ret;
1473 }
1474
1475 static void ehea_promiscuous_error(u64 hret, int enable)
1476 {
1477         if (hret == H_AUTHORITY)
1478                 ehea_info("Hypervisor denied %sabling promiscuous mode",
1479                           enable == 1 ? "en" : "dis");
1480         else
1481                 ehea_error("failed %sabling promiscuous mode",
1482                            enable == 1 ? "en" : "dis");
1483 }
1484
1485 static void ehea_promiscuous(struct net_device *dev, int enable)
1486 {
1487         struct ehea_port *port = netdev_priv(dev);
1488         struct hcp_ehea_port_cb7 *cb7;
1489         u64 hret;
1490
1491         if ((enable && port->promisc) || (!enable && !port->promisc))
1492                 return;
1493
1494         cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
1495         if (!cb7) {
1496                 ehea_error("no mem for cb7");
1497                 goto out;
1498         }
1499
1500         /* Modify Pxs_DUCQPN in CB7 */
1501         cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1502
1503         hret = ehea_h_modify_ehea_port(port->adapter->handle,
1504                                        port->logical_port_id,
1505                                        H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1506         if (hret) {
1507                 ehea_promiscuous_error(hret, enable);
1508                 goto out;
1509         }
1510
1511         port->promisc = enable;
1512 out:
1513         kfree(cb7);
1514         return;
1515 }
1516
1517 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1518                                      u32 hcallid)
1519 {
1520         u64 hret;
1521         u8 reg_type;
1522
1523         reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1524                  | EHEA_BCMC_UNTAGGED;
1525
1526         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1527                                      port->logical_port_id,
1528                                      reg_type, mc_mac_addr, 0, hcallid);
1529         if (hret)
1530                 goto out;
1531
1532         reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1533                  | EHEA_BCMC_VLANID_ALL;
1534
1535         hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1536                                      port->logical_port_id,
1537                                      reg_type, mc_mac_addr, 0, hcallid);
1538 out:
1539         return hret;
1540 }
1541
1542 static int ehea_drop_multicast_list(struct net_device *dev)
1543 {
1544         struct ehea_port *port = netdev_priv(dev);
1545         struct ehea_mc_list *mc_entry = port->mc_list;
1546         struct list_head *pos;
1547         struct list_head *temp;
1548         int ret = 0;
1549         u64 hret;
1550
1551         list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1552                 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1553
1554                 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1555                                                  H_DEREG_BCMC);
1556                 if (hret) {
1557                         ehea_error("failed deregistering mcast MAC");
1558                         ret = -EIO;
1559                 }
1560
1561                 list_del(pos);
1562                 kfree(mc_entry);
1563         }
1564         return ret;
1565 }
1566
1567 static void ehea_allmulti(struct net_device *dev, int enable)
1568 {
1569         struct ehea_port *port = netdev_priv(dev);
1570         u64 hret;
1571
1572         if (!port->allmulti) {
1573                 if (enable) {
1574                         /* Enable ALLMULTI */
1575                         ehea_drop_multicast_list(dev);
1576                         hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1577                         if (!hret)
1578                                 port->allmulti = 1;
1579                         else
1580                                 ehea_error("failed enabling IFF_ALLMULTI");
1581                 }
1582         } else
1583                 if (!enable) {
1584                         /* Disable ALLMULTI */
1585                         hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1586                         if (!hret)
1587                                 port->allmulti = 0;
1588                         else
1589                                 ehea_error("failed disabling IFF_ALLMULTI");
1590                 }
1591 }
1592
1593 static void ehea_add_multicast_entry(struct ehea_port* port, u8* mc_mac_addr)
1594 {
1595         struct ehea_mc_list *ehea_mcl_entry;
1596         u64 hret;
1597
1598         ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1599         if (!ehea_mcl_entry) {
1600                 ehea_error("no mem for mcl_entry");
1601                 return;
1602         }
1603
1604         INIT_LIST_HEAD(&ehea_mcl_entry->list);
1605
1606         memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1607
1608         hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1609                                          H_REG_BCMC);
1610         if (!hret)
1611                 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1612         else {
1613                 ehea_error("failed registering mcast MAC");
1614                 kfree(ehea_mcl_entry);
1615         }
1616 }
1617
1618 static void ehea_set_multicast_list(struct net_device *dev)
1619 {
1620         struct ehea_port *port = netdev_priv(dev);
1621         struct dev_mc_list *k_mcl_entry;
1622         int ret, i;
1623
1624         if (dev->flags & IFF_PROMISC) {
1625                 ehea_promiscuous(dev, 1);
1626                 return;
1627         }
1628         ehea_promiscuous(dev, 0);
1629
1630         if (dev->flags & IFF_ALLMULTI) {
1631                 ehea_allmulti(dev, 1);
1632                 return;
1633         }
1634         ehea_allmulti(dev, 0);
1635
1636         if (dev->mc_count) {
1637                 ret = ehea_drop_multicast_list(dev);
1638                 if (ret) {
1639                         /* Dropping the current multicast list failed.
1640                          * Enabling ALL_MULTI is the best we can do.
1641                          */
1642                         ehea_allmulti(dev, 1);
1643                 }
1644
1645                 if (dev->mc_count > port->adapter->max_mc_mac) {
1646                         ehea_info("Mcast registration limit reached (0x%lx). "
1647                                   "Use ALLMULTI!",
1648                                   port->adapter->max_mc_mac);
1649                         goto out;
1650                 }
1651
1652                 for (i = 0, k_mcl_entry = dev->mc_list;
1653                      i < dev->mc_count;
1654                      i++, k_mcl_entry = k_mcl_entry->next) {
1655                         ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
1656                 }
1657         }
1658 out:
1659         return;
1660 }
1661
1662 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1663 {
1664         if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1665                 return -EINVAL;
1666         dev->mtu = new_mtu;
1667         return 0;
1668 }
1669
1670 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1671                        struct ehea_swqe *swqe, u32 lkey)
1672 {
1673         if (skb->protocol == htons(ETH_P_IP)) {
1674                 const struct iphdr *iph = ip_hdr(skb);
1675                 /* IPv4 */
1676                 swqe->tx_control |= EHEA_SWQE_CRC
1677                                  | EHEA_SWQE_IP_CHECKSUM
1678                                  | EHEA_SWQE_TCP_CHECKSUM
1679                                  | EHEA_SWQE_IMM_DATA_PRESENT
1680                                  | EHEA_SWQE_DESCRIPTORS_PRESENT;
1681
1682                 write_ip_start_end(swqe, skb);
1683
1684                 if (iph->protocol == IPPROTO_UDP) {
1685                         if ((iph->frag_off & IP_MF) ||
1686                             (iph->frag_off & IP_OFFSET))
1687                                 /* IP fragment, so don't change cs */
1688                                 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
1689                         else
1690                                 write_udp_offset_end(swqe, skb);
1691
1692                 } else if (iph->protocol == IPPROTO_TCP) {
1693                         write_tcp_offset_end(swqe, skb);
1694                 }
1695
1696                 /* icmp (big data) and ip segmentation packets (all other ip
1697                    packets) do not require any special handling */
1698
1699         } else {
1700                 /* Other Ethernet Protocol */
1701                 swqe->tx_control |= EHEA_SWQE_CRC
1702                                  | EHEA_SWQE_IMM_DATA_PRESENT
1703                                  | EHEA_SWQE_DESCRIPTORS_PRESENT;
1704         }
1705
1706         write_swqe2_data(skb, dev, swqe, lkey);
1707 }
1708
1709 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1710                        struct ehea_swqe *swqe)
1711 {
1712         int nfrags = skb_shinfo(skb)->nr_frags;
1713         u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
1714         skb_frag_t *frag;
1715         int i;
1716
1717         if (skb->protocol == htons(ETH_P_IP)) {
1718                 const struct iphdr *iph = ip_hdr(skb);
1719                 /* IPv4 */
1720                 write_ip_start_end(swqe, skb);
1721
1722                 if (iph->protocol == IPPROTO_TCP) {
1723                         swqe->tx_control |= EHEA_SWQE_CRC
1724                                          | EHEA_SWQE_IP_CHECKSUM
1725                                          | EHEA_SWQE_TCP_CHECKSUM
1726                                          | EHEA_SWQE_IMM_DATA_PRESENT;
1727
1728                         write_tcp_offset_end(swqe, skb);
1729
1730                 } else if (iph->protocol == IPPROTO_UDP) {
1731                         if ((iph->frag_off & IP_MF) ||
1732                             (iph->frag_off & IP_OFFSET))
1733                                 /* IP fragment, so don't change cs */
1734                                 swqe->tx_control |= EHEA_SWQE_CRC
1735                                                  | EHEA_SWQE_IMM_DATA_PRESENT;
1736                         else {
1737                                 swqe->tx_control |= EHEA_SWQE_CRC
1738                                                  | EHEA_SWQE_IP_CHECKSUM
1739                                                  | EHEA_SWQE_TCP_CHECKSUM
1740                                                  | EHEA_SWQE_IMM_DATA_PRESENT;
1741
1742                                 write_udp_offset_end(swqe, skb);
1743                         }
1744                 } else {
1745                         /* icmp (big data) and
1746                            ip segmentation packets (all other ip packets) */
1747                         swqe->tx_control |= EHEA_SWQE_CRC
1748                                          | EHEA_SWQE_IP_CHECKSUM
1749                                          | EHEA_SWQE_IMM_DATA_PRESENT;
1750                 }
1751         } else {
1752                 /* Other Ethernet Protocol */
1753                 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
1754         }
1755         /* copy (immediate) data */
1756         if (nfrags == 0) {
1757                 /* data is in a single piece */
1758                 skb_copy_from_linear_data(skb, imm_data, skb->len);
1759         } else {
1760                 /* first copy data from the skb->data buffer ... */
1761                 skb_copy_from_linear_data(skb, imm_data,
1762                                           skb->len - skb->data_len);
1763                 imm_data += skb->len - skb->data_len;
1764
1765                 /* ... then copy data from the fragments */
1766                 for (i = 0; i < nfrags; i++) {
1767                         frag = &skb_shinfo(skb)->frags[i];
1768                         memcpy(imm_data,
1769                                page_address(frag->page) + frag->page_offset,
1770                                frag->size);
1771                         imm_data += frag->size;
1772                 }
1773         }
1774         swqe->immediate_data_length = skb->len;
1775         dev_kfree_skb(skb);
1776 }
1777
1778 static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
1779 {
1780         struct tcphdr *tcp;
1781         u32 tmp;
1782
1783         if ((skb->protocol == htons(ETH_P_IP)) &&
1784             (skb->nh.iph->protocol == IPPROTO_TCP)) {
1785                 tcp = (struct tcphdr*)(skb->nh.raw + (skb->nh.iph->ihl * 4));
1786                 tmp = (tcp->source + (tcp->dest << 16)) % 31;
1787                 tmp += skb->nh.iph->daddr % 31;
1788                 return tmp % num_qps;
1789         }
1790         else
1791                 return 0;
1792 }
1793
1794 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1795 {
1796         struct ehea_port *port = netdev_priv(dev);
1797         struct ehea_swqe *swqe;
1798         unsigned long flags;
1799         u32 lkey;
1800         int swqe_index;
1801         struct ehea_port_res *pr;
1802
1803         pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
1804
1805         if (!spin_trylock(&pr->xmit_lock))
1806                 return NETDEV_TX_BUSY;
1807
1808         if (pr->queue_stopped) {
1809                 spin_unlock(&pr->xmit_lock);
1810                 return NETDEV_TX_BUSY;
1811         }
1812
1813         swqe = ehea_get_swqe(pr->qp, &swqe_index);
1814         memset(swqe, 0, SWQE_HEADER_SIZE);
1815         atomic_dec(&pr->swqe_avail);
1816
1817         if (skb->len <= SWQE3_MAX_IMM) {
1818                 u32 sig_iv = port->sig_comp_iv;
1819                 u32 swqe_num = pr->swqe_id_counter;
1820                 ehea_xmit3(skb, dev, swqe);
1821                 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
1822                         | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
1823                 if (pr->swqe_ll_count >= (sig_iv - 1)) {
1824                         swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
1825                                                       sig_iv);
1826                         swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1827                         pr->swqe_ll_count = 0;
1828                 } else
1829                         pr->swqe_ll_count += 1;
1830         } else {
1831                 swqe->wr_id =
1832                         EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
1833                       | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
1834                       | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
1835                       | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
1836                 pr->sq_skba.arr[pr->sq_skba.index] = skb;
1837
1838                 pr->sq_skba.index++;
1839                 pr->sq_skba.index &= (pr->sq_skba.len - 1);
1840
1841                 lkey = pr->send_mr.lkey;
1842                 ehea_xmit2(skb, dev, swqe, lkey);
1843                 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1844         }
1845         pr->swqe_id_counter += 1;
1846
1847         if (port->vgrp && vlan_tx_tag_present(skb)) {
1848                 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
1849                 swqe->vlan_tag = vlan_tx_tag_get(skb);
1850         }
1851
1852         if (netif_msg_tx_queued(port)) {
1853                 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
1854                 ehea_dump(swqe, 512, "swqe");
1855         }
1856
1857         ehea_post_swqe(pr->qp, swqe);
1858         pr->tx_packets++;
1859
1860         if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1861                 spin_lock_irqsave(&pr->netif_queue, flags);
1862                 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1863                         pr->p_stats.queue_stopped++;
1864                         netif_stop_queue(dev);
1865                         pr->queue_stopped = 1;
1866                 }
1867                 spin_unlock_irqrestore(&pr->netif_queue, flags);
1868         }
1869         dev->trans_start = jiffies;
1870         spin_unlock(&pr->xmit_lock);
1871
1872         return NETDEV_TX_OK;
1873 }
1874
1875 static void ehea_vlan_rx_register(struct net_device *dev,
1876                                   struct vlan_group *grp)
1877 {
1878         struct ehea_port *port = netdev_priv(dev);
1879         struct ehea_adapter *adapter = port->adapter;
1880         struct hcp_ehea_port_cb1 *cb1;
1881         u64 hret;
1882
1883         port->vgrp = grp;
1884
1885         cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1886         if (!cb1) {
1887                 ehea_error("no mem for cb1");
1888                 goto out;
1889         }
1890
1891         if (grp)
1892                 memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
1893         else
1894                 memset(cb1->vlan_filter, 0xFF, sizeof(cb1->vlan_filter));
1895
1896         hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1897                                        H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1898         if (hret != H_SUCCESS)
1899                 ehea_error("modify_ehea_port failed");
1900
1901         kfree(cb1);
1902 out:
1903         return;
1904 }
1905
1906 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1907 {
1908         struct ehea_port *port = netdev_priv(dev);
1909         struct ehea_adapter *adapter = port->adapter;
1910         struct hcp_ehea_port_cb1 *cb1;
1911         int index;
1912         u64 hret;
1913
1914         cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1915         if (!cb1) {
1916                 ehea_error("no mem for cb1");
1917                 goto out;
1918         }
1919
1920         hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
1921                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1922         if (hret != H_SUCCESS) {
1923                 ehea_error("query_ehea_port failed");
1924                 goto out;
1925         }
1926
1927         index = (vid / 64);
1928         cb1->vlan_filter[index] |= ((u64)(1 << (vid & 0x3F)));
1929
1930         hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1931                                        H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1932         if (hret != H_SUCCESS)
1933                 ehea_error("modify_ehea_port failed");
1934 out:
1935         kfree(cb1);
1936         return;
1937 }
1938
1939 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1940 {
1941         struct ehea_port *port = netdev_priv(dev);
1942         struct ehea_adapter *adapter = port->adapter;
1943         struct hcp_ehea_port_cb1 *cb1;
1944         int index;
1945         u64 hret;
1946
1947         vlan_group_set_device(port->vgrp, vid, NULL);
1948
1949         cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1950         if (!cb1) {
1951                 ehea_error("no mem for cb1");
1952                 goto out;
1953         }
1954
1955         hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
1956                                       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1957         if (hret != H_SUCCESS) {
1958                 ehea_error("query_ehea_port failed");
1959                 goto out;
1960         }
1961
1962         index = (vid / 64);
1963         cb1->vlan_filter[index] &= ~((u64)(1 << (vid & 0x3F)));
1964
1965         hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
1966                                        H_PORT_CB1, H_PORT_CB1_ALL, cb1);
1967         if (hret != H_SUCCESS)
1968                 ehea_error("modify_ehea_port failed");
1969 out:
1970         kfree(cb1);
1971         return;
1972 }
1973
1974 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
1975 {
1976         int ret = -EIO;
1977         u64 hret;
1978         u16 dummy16 = 0;
1979         u64 dummy64 = 0;
1980         struct hcp_modify_qp_cb0* cb0;
1981
1982         cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1983         if (!cb0) {
1984                 ret = -ENOMEM;
1985                 goto out;
1986         }
1987
1988         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
1989                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
1990         if (hret != H_SUCCESS) {
1991                 ehea_error("query_ehea_qp failed (1)");
1992                 goto out;
1993         }
1994
1995         cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
1996         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
1997                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
1998                                      &dummy64, &dummy64, &dummy16, &dummy16);
1999         if (hret != H_SUCCESS) {
2000                 ehea_error("modify_ehea_qp failed (1)");
2001                 goto out;
2002         }
2003
2004         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2005                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2006         if (hret != H_SUCCESS) {
2007                 ehea_error("query_ehea_qp failed (2)");
2008                 goto out;
2009         }
2010
2011         cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2012         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2013                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2014                                      &dummy64, &dummy64, &dummy16, &dummy16);
2015         if (hret != H_SUCCESS) {
2016                 ehea_error("modify_ehea_qp failed (2)");
2017                 goto out;
2018         }
2019
2020         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2021                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2022         if (hret != H_SUCCESS) {
2023                 ehea_error("query_ehea_qp failed (3)");
2024                 goto out;
2025         }
2026
2027         cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2028         hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2029                                      EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2030                                      &dummy64, &dummy64, &dummy16, &dummy16);
2031         if (hret != H_SUCCESS) {
2032                 ehea_error("modify_ehea_qp failed (3)");
2033                 goto out;
2034         }
2035
2036         hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2037                                     EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2038         if (hret != H_SUCCESS) {
2039                 ehea_error("query_ehea_qp failed (4)");
2040                 goto out;
2041         }
2042
2043         ret = 0;
2044 out:
2045         kfree(cb0);
2046         return ret;
2047 }
2048
2049 static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2050                                int add_tx_qps)
2051 {
2052         int ret, i;
2053         struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2054         enum ehea_eq_type eq_type = EHEA_EQ;
2055
2056         port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2057                                    EHEA_MAX_ENTRIES_EQ, 1);
2058         if (!port->qp_eq) {
2059                 ret = -EINVAL;
2060                 ehea_error("ehea_create_eq failed (qp_eq)");
2061                 goto out_kill_eq;
2062         }
2063
2064         pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2065         pr_cfg.max_entries_scq = sq_entries * 2;
2066         pr_cfg.max_entries_sq = sq_entries;
2067         pr_cfg.max_entries_rq1 = rq1_entries;
2068         pr_cfg.max_entries_rq2 = rq2_entries;
2069         pr_cfg.max_entries_rq3 = rq3_entries;
2070
2071         pr_cfg_small_rx.max_entries_rcq = 1;
2072         pr_cfg_small_rx.max_entries_scq = sq_entries;
2073         pr_cfg_small_rx.max_entries_sq = sq_entries;
2074         pr_cfg_small_rx.max_entries_rq1 = 1;
2075         pr_cfg_small_rx.max_entries_rq2 = 1;
2076         pr_cfg_small_rx.max_entries_rq3 = 1;
2077
2078         for (i = 0; i < def_qps; i++) {
2079                 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2080                 if (ret)
2081                         goto out_clean_pr;
2082         }
2083         for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2084                 ret = ehea_init_port_res(port, &port->port_res[i],
2085                                          &pr_cfg_small_rx, i);
2086                 if (ret)
2087                         goto out_clean_pr;
2088         }
2089
2090         return 0;
2091
2092 out_clean_pr:
2093         while (--i >= 0)
2094                 ehea_clean_portres(port, &port->port_res[i]);
2095
2096 out_kill_eq:
2097         ehea_destroy_eq(port->qp_eq);
2098         return ret;
2099 }
2100
2101 static int ehea_clean_all_portres(struct ehea_port *port)
2102 {
2103         int ret = 0;
2104         int i;
2105
2106         for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2107                 ret |= ehea_clean_portres(port, &port->port_res[i]);
2108
2109         ret |= ehea_destroy_eq(port->qp_eq);
2110
2111         return ret;
2112 }
2113
2114 static int ehea_up(struct net_device *dev)
2115 {
2116         int ret, i;
2117         struct ehea_port *port = netdev_priv(dev);
2118         u64 mac_addr = 0;
2119
2120         if (port->state == EHEA_PORT_UP)
2121                 return 0;
2122
2123         ret = ehea_port_res_setup(port, port->num_def_qps,
2124                                   port->num_add_tx_qps);
2125         if (ret) {
2126                 ehea_error("port_res_failed");
2127                 goto out;
2128         }
2129
2130         /* Set default QP for this port */
2131         ret = ehea_configure_port(port);
2132         if (ret) {
2133                 ehea_error("ehea_configure_port failed. ret:%d", ret);
2134                 goto out_clean_pr;
2135         }
2136
2137         ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2138         if (ret) {
2139                 ret = -EIO;
2140                 ehea_error("out_clean_pr");
2141                 goto out_clean_pr;
2142         }
2143         mac_addr = (*(u64*)dev->dev_addr) >> 16;
2144
2145         ret = ehea_reg_interrupts(dev);
2146         if (ret) {
2147                 ehea_error("out_dereg_bc");
2148                 goto out_dereg_bc;
2149         }
2150
2151         for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2152                 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2153                 if (ret) {
2154                         ehea_error("activate_qp failed");
2155                         goto out_free_irqs;
2156                 }
2157         }
2158
2159         for(i = 0; i < port->num_def_qps; i++) {
2160                 ret = ehea_fill_port_res(&port->port_res[i]);
2161                 if (ret) {
2162                         ehea_error("out_free_irqs");
2163                         goto out_free_irqs;
2164                 }
2165         }
2166
2167         ret = 0;
2168         port->state = EHEA_PORT_UP;
2169         goto out;
2170
2171 out_free_irqs:
2172         ehea_free_interrupts(dev);
2173
2174 out_dereg_bc:
2175         ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2176
2177 out_clean_pr:
2178         ehea_clean_all_portres(port);
2179 out:
2180         return ret;
2181 }
2182
2183 static int ehea_open(struct net_device *dev)
2184 {
2185         int ret;
2186         struct ehea_port *port = netdev_priv(dev);
2187
2188         down(&port->port_lock);
2189
2190         if (netif_msg_ifup(port))
2191                 ehea_info("enabling port %s", dev->name);
2192
2193         ret = ehea_up(dev);
2194         if (!ret)
2195                 netif_start_queue(dev);
2196
2197         up(&port->port_lock);
2198
2199         return ret;
2200 }
2201
2202 static int ehea_down(struct net_device *dev)
2203 {
2204         int ret, i;
2205         struct ehea_port *port = netdev_priv(dev);
2206
2207         if (port->state == EHEA_PORT_DOWN)
2208                 return 0;
2209
2210         ehea_drop_multicast_list(dev);
2211         ehea_free_interrupts(dev);
2212
2213         for (i = 0; i < port->num_def_qps; i++)
2214                 while (test_bit(__LINK_STATE_RX_SCHED,
2215                                 &port->port_res[i].d_netdev->state))
2216                         msleep(1);
2217
2218         ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2219         ret = ehea_clean_all_portres(port);
2220         port->state = EHEA_PORT_DOWN;
2221         return ret;
2222 }
2223
2224 static int ehea_stop(struct net_device *dev)
2225 {
2226         int ret;
2227         struct ehea_port *port = netdev_priv(dev);
2228
2229         if (netif_msg_ifdown(port))
2230                 ehea_info("disabling port %s", dev->name);
2231
2232         flush_workqueue(port->adapter->ehea_wq);
2233         down(&port->port_lock);
2234         netif_stop_queue(dev);
2235         ret = ehea_down(dev);
2236         up(&port->port_lock);
2237         return ret;
2238 }
2239
2240 static void ehea_reset_port(struct work_struct *work)
2241 {
2242         int ret;
2243         struct ehea_port *port =
2244                 container_of(work, struct ehea_port, reset_task);
2245         struct net_device *dev = port->netdev;
2246
2247         port->resets++;
2248         down(&port->port_lock);
2249         netif_stop_queue(dev);
2250         netif_poll_disable(dev);
2251
2252         ret = ehea_down(dev);
2253         if (ret)
2254                 ehea_error("ehea_down failed. not all resources are freed");
2255
2256         ret = ehea_up(dev);
2257         if (ret) {
2258                 ehea_error("Reset device %s failed: ret=%d", dev->name, ret);
2259                 goto out;
2260         }
2261
2262         if (netif_msg_timer(port))
2263                 ehea_info("Device %s resetted successfully", dev->name);
2264
2265         netif_poll_enable(dev);
2266         netif_wake_queue(dev);
2267 out:
2268         up(&port->port_lock);
2269         return;
2270 }
2271
2272 static void ehea_tx_watchdog(struct net_device *dev)
2273 {
2274         struct ehea_port *port = netdev_priv(dev);
2275
2276         if (netif_carrier_ok(dev))
2277                 queue_work(port->adapter->ehea_wq, &port->reset_task);
2278 }
2279
2280 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2281 {
2282         struct hcp_query_ehea *cb;
2283         u64 hret;
2284         int ret;
2285
2286         cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2287         if (!cb) {
2288                 ret = -ENOMEM;
2289                 goto out;
2290         }
2291
2292         hret = ehea_h_query_ehea(adapter->handle, cb);
2293
2294         if (hret != H_SUCCESS) {
2295                 ret = -EIO;
2296                 goto out_herr;
2297         }
2298
2299         adapter->max_mc_mac = cb->max_mc_mac - 1;
2300         ret = 0;
2301
2302 out_herr:
2303         kfree(cb);
2304 out:
2305         return ret;
2306 }
2307
2308 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2309 {
2310         struct hcp_ehea_port_cb4 *cb4;
2311         u64 hret;
2312         int ret = 0;
2313
2314         *jumbo = 0;
2315
2316         /* (Try to) enable *jumbo frames */
2317         cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2318         if (!cb4) {
2319                 ehea_error("no mem for cb4");
2320                 ret = -ENOMEM;
2321                 goto out;
2322         } else {
2323                 hret = ehea_h_query_ehea_port(port->adapter->handle,
2324                                               port->logical_port_id,
2325                                               H_PORT_CB4,
2326                                               H_PORT_CB4_JUMBO, cb4);
2327                 if (hret == H_SUCCESS) {
2328                         if (cb4->jumbo_frame)
2329                                 *jumbo = 1;
2330                         else {
2331                                 cb4->jumbo_frame = 1;
2332                                 hret = ehea_h_modify_ehea_port(port->adapter->
2333                                                                handle,
2334                                                                port->
2335                                                                logical_port_id,
2336                                                                H_PORT_CB4,
2337                                                                H_PORT_CB4_JUMBO,
2338                                                                cb4);
2339                                 if (hret == H_SUCCESS)
2340                                         *jumbo = 1;
2341                         }
2342                 } else
2343                         ret = -EINVAL;
2344
2345                 kfree(cb4);
2346         }
2347 out:
2348         return ret;
2349 }
2350
2351 static ssize_t ehea_show_port_id(struct device *dev,
2352                                  struct device_attribute *attr, char *buf)
2353 {
2354         struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2355         return sprintf(buf, "0x%X", port->logical_port_id);
2356 }
2357
2358 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2359                    NULL);
2360
2361 static void __devinit logical_port_release(struct device *dev)
2362 {
2363         struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2364         of_node_put(port->ofdev.node);
2365 }
2366
2367 static struct device *ehea_register_port(struct ehea_port *port,
2368                                          struct device_node *dn)
2369 {
2370         int ret;
2371
2372         port->ofdev.node = of_node_get(dn);
2373         port->ofdev.dev.parent = &port->adapter->ebus_dev->ofdev.dev;
2374
2375         sprintf(port->ofdev.dev.bus_id, "port%d", port->logical_port_id);
2376         port->ofdev.dev.release = logical_port_release;
2377
2378         ret = of_device_register(&port->ofdev);
2379         if (ret) {
2380                 ehea_error("failed to register device. ret=%d", ret);
2381                 goto out;
2382         }
2383
2384         ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2385         if (ret) {
2386                 ehea_error("failed to register attributes, ret=%d", ret);
2387                 goto out_unreg_of_dev;
2388         }
2389
2390         return &port->ofdev.dev;
2391
2392 out_unreg_of_dev:
2393         of_device_unregister(&port->ofdev);
2394 out:
2395         return NULL;
2396 }
2397
2398 static void ehea_unregister_port(struct ehea_port *port)
2399 {
2400         device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2401         of_device_unregister(&port->ofdev);
2402 }
2403
2404 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2405                                          u32 logical_port_id,
2406                                          struct device_node *dn)
2407 {
2408         int ret;
2409         struct net_device *dev;
2410         struct ehea_port *port;
2411         struct device *port_dev;
2412         int jumbo;
2413
2414         /* allocate memory for the port structures */
2415         dev = alloc_etherdev(sizeof(struct ehea_port));
2416
2417         if (!dev) {
2418                 ehea_error("no mem for net_device");
2419                 ret = -ENOMEM;
2420                 goto out_err;
2421         }
2422
2423         port = netdev_priv(dev);
2424
2425         sema_init(&port->port_lock, 1);
2426         port->state = EHEA_PORT_DOWN;
2427         port->sig_comp_iv = sq_entries / 10;
2428
2429         port->adapter = adapter;
2430         port->netdev = dev;
2431         port->logical_port_id = logical_port_id;
2432
2433         port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2434
2435         port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2436         if (!port->mc_list) {
2437                 ret = -ENOMEM;
2438                 goto out_free_ethdev;
2439         }
2440
2441         INIT_LIST_HEAD(&port->mc_list->list);
2442
2443         ret = ehea_sense_port_attr(port);
2444         if (ret)
2445                 goto out_free_mc_list;
2446
2447         port_dev = ehea_register_port(port, dn);
2448         if (!port_dev)
2449                 goto out_free_mc_list;
2450
2451         SET_NETDEV_DEV(dev, port_dev);
2452
2453         /* initialize net_device structure */
2454         SET_MODULE_OWNER(dev);
2455
2456         memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
2457
2458         dev->open = ehea_open;
2459         dev->poll = ehea_poll;
2460         dev->weight = 64;
2461         dev->stop = ehea_stop;
2462         dev->hard_start_xmit = ehea_start_xmit;
2463         dev->get_stats = ehea_get_stats;
2464         dev->set_multicast_list = ehea_set_multicast_list;
2465         dev->set_mac_address = ehea_set_mac_addr;
2466         dev->change_mtu = ehea_change_mtu;
2467         dev->vlan_rx_register = ehea_vlan_rx_register;
2468         dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
2469         dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
2470         dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
2471                       | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX
2472                       | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
2473                       | NETIF_F_LLTX;
2474         dev->tx_timeout = &ehea_tx_watchdog;
2475         dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
2476
2477         INIT_WORK(&port->reset_task, ehea_reset_port);
2478
2479         ehea_set_ethtool_ops(dev);
2480
2481         ret = register_netdev(dev);
2482         if (ret) {
2483                 ehea_error("register_netdev failed. ret=%d", ret);
2484                 goto out_unreg_port;
2485         }
2486
2487         ret = ehea_get_jumboframe_status(port, &jumbo);
2488         if (ret)
2489                 ehea_error("failed determining jumbo frame status for %s",
2490                            port->netdev->name);
2491
2492         ehea_info("%s: Jumbo frames are %sabled", dev->name,
2493                   jumbo == 1 ? "en" : "dis");
2494
2495         return port;
2496
2497 out_unreg_port:
2498         ehea_unregister_port(port);
2499
2500 out_free_mc_list:
2501         kfree(port->mc_list);
2502
2503 out_free_ethdev:
2504         free_netdev(dev);
2505
2506 out_err:
2507         ehea_error("setting up logical port with id=%d failed, ret=%d",
2508                    logical_port_id, ret);
2509         return NULL;
2510 }
2511
2512 static void ehea_shutdown_single_port(struct ehea_port *port)
2513 {
2514         unregister_netdev(port->netdev);
2515         ehea_unregister_port(port);
2516         kfree(port->mc_list);
2517         free_netdev(port->netdev);
2518 }
2519
2520 static int ehea_setup_ports(struct ehea_adapter *adapter)
2521 {
2522         struct device_node *lhea_dn;
2523         struct device_node *eth_dn = NULL;
2524
2525         u32 *dn_log_port_id;
2526         int port_setup_ok = 0;
2527         int i = 0;
2528
2529         lhea_dn = adapter->ebus_dev->ofdev.node;
2530         while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
2531
2532                 dn_log_port_id = (u32*)get_property(eth_dn, "ibm,hea-port-no",
2533                                                     NULL);
2534                 if (!dn_log_port_id) {
2535                         ehea_error("bad device node: eth_dn name=%s",
2536                                    eth_dn->full_name);
2537                         continue;
2538                 }
2539
2540                 adapter->port[i] = ehea_setup_single_port(adapter,
2541                                                           *dn_log_port_id,
2542                                                           eth_dn);
2543                 if (adapter->port[i])
2544                         ehea_info("%s -> logical port id #%d",
2545                                   adapter->port[i]->netdev->name,
2546                                   *dn_log_port_id);
2547                 i++;
2548         };
2549
2550         /* Check for succesfully set up ports */
2551         for (i = 0; i < EHEA_MAX_PORTS; i++)
2552                 if (adapter->port[i])
2553                         port_setup_ok++;
2554
2555         if (port_setup_ok)
2556                 return 0;       /* At least some ports are setup correctly */
2557
2558         return -EINVAL;
2559 }
2560
2561 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
2562                                            u32 logical_port_id)
2563 {
2564         struct device_node *lhea_dn;
2565         struct device_node *eth_dn = NULL;
2566         u32 *dn_log_port_id;
2567
2568         lhea_dn = adapter->ebus_dev->ofdev.node;
2569         while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
2570
2571                 dn_log_port_id = (u32*)get_property(eth_dn, "ibm,hea-port-no",
2572                                                     NULL);
2573                 if (dn_log_port_id)
2574                         if (*dn_log_port_id == logical_port_id)
2575                                 return eth_dn;
2576         };
2577
2578         return NULL;
2579 }
2580
2581 static ssize_t ehea_probe_port(struct device *dev,
2582                                struct device_attribute *attr,
2583                                const char *buf, size_t count)
2584 {
2585         struct ehea_adapter *adapter = dev->driver_data;
2586         struct ehea_port *port;
2587         struct device_node *eth_dn = NULL;
2588         int i;
2589
2590         u32 logical_port_id;
2591
2592         sscanf(buf, "%X", &logical_port_id);
2593
2594         port = ehea_get_port(adapter, logical_port_id);
2595
2596         if (port) {
2597                 ehea_info("adding port with logical port id=%d failed. port "
2598                           "already configured as %s.", logical_port_id,
2599                           port->netdev->name);
2600                 return -EINVAL;
2601         }
2602
2603         eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
2604
2605         if (!eth_dn) {
2606                 ehea_info("no logical port with id %d found", logical_port_id);
2607                 return -EINVAL;
2608         }
2609
2610         port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
2611
2612         of_node_put(eth_dn);
2613
2614         if (port) {
2615                 for (i=0; i < EHEA_MAX_PORTS; i++)
2616                         if (!adapter->port[i]) {
2617                                 adapter->port[i] = port;
2618                                 break;
2619                         }
2620
2621                 ehea_info("added %s (logical port id=%d)", port->netdev->name,
2622                           logical_port_id);
2623         } else
2624                 return -EIO;
2625
2626         return (ssize_t) count;
2627 }
2628
2629 static ssize_t ehea_remove_port(struct device *dev,
2630                                 struct device_attribute *attr,
2631                                 const char *buf, size_t count)
2632 {
2633         struct ehea_adapter *adapter = dev->driver_data;
2634         struct ehea_port *port;
2635         int i;
2636         u32 logical_port_id;
2637
2638         sscanf(buf, "%X", &logical_port_id);
2639
2640         port = ehea_get_port(adapter, logical_port_id);
2641
2642         if (port) {
2643                 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
2644                           logical_port_id);
2645
2646                 ehea_shutdown_single_port(port);
2647
2648                 for (i=0; i < EHEA_MAX_PORTS; i++)
2649                         if (adapter->port[i] == port) {
2650                                 adapter->port[i] = NULL;
2651                                 break;
2652                         }
2653         } else {
2654                 ehea_error("removing port with logical port id=%d failed. port "
2655                            "not configured.", logical_port_id);
2656                 return -EINVAL;
2657         }
2658
2659         return (ssize_t) count;
2660 }
2661
2662 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
2663 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
2664
2665 int ehea_create_device_sysfs(struct ibmebus_dev *dev)
2666 {
2667         int ret = device_create_file(&dev->ofdev.dev, &dev_attr_probe_port);
2668         if (ret)
2669                 goto out;
2670
2671         ret = device_create_file(&dev->ofdev.dev, &dev_attr_remove_port);
2672 out:
2673         return ret;
2674 }
2675
2676 void ehea_remove_device_sysfs(struct ibmebus_dev *dev)
2677 {
2678         device_remove_file(&dev->ofdev.dev, &dev_attr_probe_port);
2679         device_remove_file(&dev->ofdev.dev, &dev_attr_remove_port);
2680 }
2681
2682 static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
2683                                         const struct of_device_id *id)
2684 {
2685         struct ehea_adapter *adapter;
2686         u64 *adapter_handle;
2687         int ret;
2688
2689         if (!dev || !dev->ofdev.node) {
2690                 ehea_error("Invalid ibmebus device probed");
2691                 return -EINVAL;
2692         }
2693
2694         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2695         if (!adapter) {
2696                 ret = -ENOMEM;
2697                 dev_err(&dev->ofdev.dev, "no mem for ehea_adapter\n");
2698                 goto out;
2699         }
2700
2701         adapter->ebus_dev = dev;
2702
2703         adapter_handle = (u64*)get_property(dev->ofdev.node, "ibm,hea-handle",
2704                                             NULL);
2705         if (adapter_handle)
2706                 adapter->handle = *adapter_handle;
2707
2708         if (!adapter->handle) {
2709                 dev_err(&dev->ofdev.dev, "failed getting handle for adapter"
2710                         " '%s'\n", dev->ofdev.node->full_name);
2711                 ret = -ENODEV;
2712                 goto out_free_ad;
2713         }
2714
2715         adapter->pd = EHEA_PD_ID;
2716
2717         dev->ofdev.dev.driver_data = adapter;
2718
2719         ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2720         if (ret) {
2721                 dev_err(&dev->ofdev.dev, "reg_mr_adapter failed\n");
2722                 goto out_free_ad;
2723         }
2724
2725         /* initialize adapter and ports */
2726         /* get adapter properties */
2727         ret = ehea_sense_adapter_attr(adapter);
2728         if (ret) {
2729                 dev_err(&dev->ofdev.dev, "sense_adapter_attr failed: %d", ret);
2730                 goto out_free_res;
2731         }
2732
2733         adapter->neq = ehea_create_eq(adapter,
2734                                       EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
2735         if (!adapter->neq) {
2736                 ret = -EIO;
2737                 dev_err(&dev->ofdev.dev, "NEQ creation failed");
2738                 goto out_free_res;
2739         }
2740
2741         tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
2742                      (unsigned long)adapter);
2743
2744         ret = ibmebus_request_irq(NULL, adapter->neq->attr.ist1,
2745                                   ehea_interrupt_neq, IRQF_DISABLED,
2746                                   "ehea_neq", adapter);
2747         if (ret) {
2748                 dev_err(&dev->ofdev.dev, "requesting NEQ IRQ failed");
2749                 goto out_kill_eq;
2750         }
2751
2752         adapter->ehea_wq = create_workqueue("ehea_wq");
2753         if (!adapter->ehea_wq) {
2754                 ret = -EIO;
2755                 goto out_free_irq;
2756         }
2757
2758         ret = ehea_create_device_sysfs(dev);
2759         if (ret)
2760                 goto out_kill_wq;
2761
2762         ret = ehea_setup_ports(adapter);
2763         if (ret) {
2764                 dev_err(&dev->ofdev.dev, "setup_ports failed");
2765                 goto out_rem_dev_sysfs;
2766         }
2767
2768         ret = 0;
2769         goto out;
2770
2771 out_rem_dev_sysfs:
2772         ehea_remove_device_sysfs(dev);
2773
2774 out_kill_wq:
2775         destroy_workqueue(adapter->ehea_wq);
2776
2777 out_free_irq:
2778         ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
2779
2780 out_kill_eq:
2781         ehea_destroy_eq(adapter->neq);
2782
2783 out_free_res:
2784         ehea_rem_mr(&adapter->mr);
2785
2786 out_free_ad:
2787         kfree(adapter);
2788 out:
2789         return ret;
2790 }
2791
2792 static int __devexit ehea_remove(struct ibmebus_dev *dev)
2793 {
2794         struct ehea_adapter *adapter = dev->ofdev.dev.driver_data;
2795         int i;
2796
2797         for (i = 0; i < EHEA_MAX_PORTS; i++)
2798                 if (adapter->port[i]) {
2799                         ehea_shutdown_single_port(adapter->port[i]);
2800                         adapter->port[i] = NULL;
2801                 }
2802
2803         ehea_remove_device_sysfs(dev);
2804
2805         destroy_workqueue(adapter->ehea_wq);
2806
2807         ibmebus_free_irq(NULL, adapter->neq->attr.ist1, adapter);
2808         tasklet_kill(&adapter->neq_tasklet);
2809
2810         ehea_destroy_eq(adapter->neq);
2811         ehea_rem_mr(&adapter->mr);
2812         kfree(adapter);
2813         return 0;
2814 }
2815
2816 static int check_module_parm(void)
2817 {
2818         int ret = 0;
2819
2820         if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
2821             (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
2822                 ehea_info("Bad parameter: rq1_entries");
2823                 ret = -EINVAL;
2824         }
2825         if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
2826             (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
2827                 ehea_info("Bad parameter: rq2_entries");
2828                 ret = -EINVAL;
2829         }
2830         if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
2831             (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
2832                 ehea_info("Bad parameter: rq3_entries");
2833                 ret = -EINVAL;
2834         }
2835         if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
2836             (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
2837                 ehea_info("Bad parameter: sq_entries");
2838                 ret = -EINVAL;
2839         }
2840
2841         return ret;
2842 }
2843
2844 static struct of_device_id ehea_device_table[] = {
2845         {
2846                 .name = "lhea",
2847                 .compatible = "IBM,lhea",
2848         },
2849         {},
2850 };
2851
2852 static struct ibmebus_driver ehea_driver = {
2853         .name = "ehea",
2854         .id_table = ehea_device_table,
2855         .probe = ehea_probe_adapter,
2856         .remove = ehea_remove,
2857 };
2858
2859 int __init ehea_module_init(void)
2860 {
2861         int ret;
2862
2863         printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
2864                DRV_VERSION);
2865
2866         ret = check_module_parm();
2867         if (ret)
2868                 goto out;
2869         ret = ibmebus_register_driver(&ehea_driver);
2870         if (ret)
2871                 ehea_error("failed registering eHEA device driver on ebus");
2872
2873 out:
2874         return ret;
2875 }
2876
2877 static void __exit ehea_module_exit(void)
2878 {
2879         ibmebus_unregister_driver(&ehea_driver);
2880 }
2881
2882 module_init(ehea_module_init);
2883 module_exit(ehea_module_exit);