2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 #include <linux/tcp.h>
32 #include <linux/udp.h>
34 #include <linux/list.h>
35 #include <linux/if_ether.h>
36 #include <linux/notifier.h>
37 #include <linux/reboot.h>
43 #include "ehea_phyp.h"
46 MODULE_LICENSE("GPL");
47 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
48 MODULE_DESCRIPTION("IBM eServer HEA Driver");
49 MODULE_VERSION(DRV_VERSION);
52 static int msg_level = -1;
53 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
54 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
55 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
56 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
59 static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
60 static int num_tx_qps = EHEA_NUM_TX_QP;
61 static int prop_carrier_state;
63 module_param(msg_level, int, 0);
64 module_param(rq1_entries, int, 0);
65 module_param(rq2_entries, int, 0);
66 module_param(rq3_entries, int, 0);
67 module_param(sq_entries, int, 0);
68 module_param(prop_carrier_state, int, 0);
69 module_param(use_mcs, int, 0);
70 module_param(use_lro, int, 0);
71 module_param(lro_max_aggr, int, 0);
72 module_param(num_tx_qps, int, 0);
74 MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
75 MODULE_PARM_DESC(msg_level, "msg_level");
76 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
77 "port to stack. 1:yes, 0:no. Default = 0 ");
78 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
79 "[2^x - 1], x = [6..14]. Default = "
80 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
81 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
82 "[2^x - 1], x = [6..14]. Default = "
83 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
84 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
85 "[2^x - 1], x = [6..14]. Default = "
86 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
87 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
88 "[2^x - 1], x = [6..14]. Default = "
89 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
90 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
92 MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
93 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
94 MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
97 static int port_name_cnt;
98 static LIST_HEAD(adapter_list);
99 u64 ehea_driver_flags;
100 struct work_struct ehea_rereg_mr_task;
102 struct semaphore dlpar_mem_lock;
104 static int __devinit ehea_probe_adapter(struct of_device *dev,
105 const struct of_device_id *id);
107 static int __devexit ehea_remove(struct of_device *dev);
109 static struct of_device_id ehea_device_table[] = {
112 .compatible = "IBM,lhea",
117 static struct of_platform_driver ehea_driver = {
119 .match_table = ehea_device_table,
120 .probe = ehea_probe_adapter,
121 .remove = ehea_remove,
124 void ehea_dump(void *adr, int len, char *msg)
127 unsigned char *deb = adr;
128 for (x = 0; x < len; x += 16) {
129 printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
130 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
135 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
137 struct ehea_port *port = netdev_priv(dev);
138 struct net_device_stats *stats = &port->stats;
139 struct hcp_ehea_port_cb2 *cb2;
140 u64 hret, rx_packets, tx_packets;
143 memset(stats, 0, sizeof(*stats));
145 cb2 = kzalloc(PAGE_SIZE, GFP_KERNEL);
147 ehea_error("no mem for cb2");
151 hret = ehea_h_query_ehea_port(port->adapter->handle,
152 port->logical_port_id,
153 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
154 if (hret != H_SUCCESS) {
155 ehea_error("query_ehea_port failed");
159 if (netif_msg_hw(port))
160 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
163 for (i = 0; i < port->num_def_qps; i++)
164 rx_packets += port->port_res[i].rx_packets;
167 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
168 tx_packets += port->port_res[i].tx_packets;
170 stats->tx_packets = tx_packets;
171 stats->multicast = cb2->rxmcp;
172 stats->rx_errors = cb2->rxuerr;
173 stats->rx_bytes = cb2->rxo;
174 stats->tx_bytes = cb2->txo;
175 stats->rx_packets = rx_packets;
183 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
185 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
186 struct net_device *dev = pr->port->netdev;
187 int max_index_mask = pr->rq1_skba.len - 1;
188 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
192 pr->rq1_skba.os_skbs = 0;
194 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
195 pr->rq1_skba.index = index;
196 pr->rq1_skba.os_skbs = fill_wqes;
200 for (i = 0; i < fill_wqes; i++) {
201 if (!skb_arr_rq1[index]) {
202 skb_arr_rq1[index] = netdev_alloc_skb(dev,
204 if (!skb_arr_rq1[index]) {
205 pr->rq1_skba.os_skbs = fill_wqes - i;
206 ehea_error("%s: no mem for skb/%d wqes filled",
212 index &= max_index_mask;
220 ehea_update_rq1a(pr->qp, adder);
223 static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
226 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
227 struct net_device *dev = pr->port->netdev;
230 for (i = 0; i < pr->rq1_skba.len; i++) {
231 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
232 if (!skb_arr_rq1[i]) {
233 ehea_error("%s: no mem for skb/%d wqes filled",
240 ehea_update_rq1a(pr->qp, nr_rq1a);
245 static int ehea_refill_rq_def(struct ehea_port_res *pr,
246 struct ehea_q_skb_arr *q_skba, int rq_nr,
247 int num_wqes, int wqe_type, int packet_size)
249 struct net_device *dev = pr->port->netdev;
250 struct ehea_qp *qp = pr->qp;
251 struct sk_buff **skb_arr = q_skba->arr;
252 struct ehea_rwqe *rwqe;
253 int i, index, max_index_mask, fill_wqes;
257 fill_wqes = q_skba->os_skbs + num_wqes;
260 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
261 q_skba->os_skbs = fill_wqes;
265 index = q_skba->index;
266 max_index_mask = q_skba->len - 1;
267 for (i = 0; i < fill_wqes; i++) {
269 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
271 ehea_error("%s: no mem for skb/%d wqes filled",
272 pr->port->netdev->name, i);
273 q_skba->os_skbs = fill_wqes - i;
277 skb_reserve(skb, NET_IP_ALIGN);
279 skb_arr[index] = skb;
280 tmp_addr = ehea_map_vaddr(skb->data);
281 if (tmp_addr == -1) {
283 q_skba->os_skbs = fill_wqes - i;
288 rwqe = ehea_get_next_rwqe(qp, rq_nr);
289 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
290 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
291 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
292 rwqe->sg_list[0].vaddr = tmp_addr;
293 rwqe->sg_list[0].len = packet_size;
294 rwqe->data_segments = 1;
297 index &= max_index_mask;
301 q_skba->index = index;
308 ehea_update_rq2a(pr->qp, adder);
310 ehea_update_rq3a(pr->qp, adder);
316 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
318 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
319 nr_of_wqes, EHEA_RWQE2_TYPE,
320 EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
324 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
326 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
327 nr_of_wqes, EHEA_RWQE3_TYPE,
328 EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
331 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
333 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
334 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
336 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
337 (cqe->header_length == 0))
342 static inline void ehea_fill_skb(struct net_device *dev,
343 struct sk_buff *skb, struct ehea_cqe *cqe)
345 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
347 skb_put(skb, length);
348 skb->ip_summed = CHECKSUM_UNNECESSARY;
349 skb->protocol = eth_type_trans(skb, dev);
352 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
354 struct ehea_cqe *cqe)
356 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
366 prefetchw(pref + EHEA_CACHE_LINE);
368 pref = (skb_array[x]->data);
370 prefetch(pref + EHEA_CACHE_LINE);
371 prefetch(pref + EHEA_CACHE_LINE * 2);
372 prefetch(pref + EHEA_CACHE_LINE * 3);
373 skb = skb_array[skb_index];
374 skb_array[skb_index] = NULL;
378 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
379 int arr_len, int wqe_index)
390 prefetchw(pref + EHEA_CACHE_LINE);
392 pref = (skb_array[x]->data);
394 prefetchw(pref + EHEA_CACHE_LINE);
396 skb = skb_array[wqe_index];
397 skb_array[wqe_index] = NULL;
401 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
402 struct ehea_cqe *cqe, int *processed_rq2,
407 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
408 pr->p_stats.err_tcp_cksum++;
409 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
410 pr->p_stats.err_ip_cksum++;
411 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
412 pr->p_stats.err_frame_crc++;
416 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
418 } else if (rq == 3) {
420 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
424 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
425 if (netif_msg_rx_err(pr->port)) {
426 ehea_error("Critical receive error for QP %d. "
427 "Resetting port.", pr->qp->init_attr.qp_nr);
428 ehea_dump(cqe, sizeof(*cqe), "CQE");
430 schedule_work(&pr->port->reset_task);
437 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
438 void **tcph, u64 *hdr_flags, void *priv)
440 struct ehea_cqe *cqe = priv;
444 /* non tcp/udp packets */
445 if (!cqe->header_length)
449 skb_reset_network_header(skb);
451 if (iph->protocol != IPPROTO_TCP)
454 ip_len = ip_hdrlen(skb);
455 skb_set_transport_header(skb, ip_len);
456 *tcph = tcp_hdr(skb);
458 /* check if ip header and tcp header are complete */
459 if (iph->tot_len < ip_len + tcp_hdrlen(skb))
462 *hdr_flags = LRO_IPV4 | LRO_TCP;
468 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
471 int vlan_extracted = (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
476 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
481 lro_receive_skb(&pr->lro_mgr, skb, cqe);
484 vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
487 netif_receive_skb(skb);
491 static int ehea_proc_rwqes(struct net_device *dev,
492 struct ehea_port_res *pr,
495 struct ehea_port *port = pr->port;
496 struct ehea_qp *qp = pr->qp;
497 struct ehea_cqe *cqe;
499 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
500 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
501 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
502 int skb_arr_rq1_len = pr->rq1_skba.len;
503 int skb_arr_rq2_len = pr->rq2_skba.len;
504 int skb_arr_rq3_len = pr->rq3_skba.len;
505 int processed, processed_rq1, processed_rq2, processed_rq3;
506 int wqe_index, last_wqe_index, rq, port_reset;
508 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
511 cqe = ehea_poll_rq1(qp, &wqe_index);
512 while ((processed < budget) && cqe) {
516 if (netif_msg_rx_status(port))
517 ehea_dump(cqe, sizeof(*cqe), "CQE");
519 last_wqe_index = wqe_index;
521 if (!ehea_check_cqe(cqe, &rq)) {
524 skb = get_skb_by_index_ll(skb_arr_rq1,
527 if (unlikely(!skb)) {
528 if (netif_msg_rx_err(port))
529 ehea_error("LL rq1: skb=NULL");
531 skb = netdev_alloc_skb(dev,
536 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
537 cqe->num_bytes_transfered - 4);
538 ehea_fill_skb(dev, skb, cqe);
539 } else if (rq == 2) {
541 skb = get_skb_by_index(skb_arr_rq2,
542 skb_arr_rq2_len, cqe);
543 if (unlikely(!skb)) {
544 if (netif_msg_rx_err(port))
545 ehea_error("rq2: skb=NULL");
548 ehea_fill_skb(dev, skb, cqe);
552 skb = get_skb_by_index(skb_arr_rq3,
553 skb_arr_rq3_len, cqe);
554 if (unlikely(!skb)) {
555 if (netif_msg_rx_err(port))
556 ehea_error("rq3: skb=NULL");
559 ehea_fill_skb(dev, skb, cqe);
563 ehea_proc_skb(pr, cqe, skb);
564 dev->last_rx = jiffies;
566 pr->p_stats.poll_receive_errors++;
567 port_reset = ehea_treat_poll_error(pr, rq, cqe,
573 cqe = ehea_poll_rq1(qp, &wqe_index);
576 lro_flush_all(&pr->lro_mgr);
578 pr->rx_packets += processed;
580 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
581 ehea_refill_rq2(pr, processed_rq2);
582 ehea_refill_rq3(pr, processed_rq3);
587 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
590 struct ehea_cq *send_cq = pr->send_cq;
591 struct ehea_cqe *cqe;
592 int quota = my_quota;
598 cqe = ehea_poll_cq(send_cq);
599 while (cqe && (quota > 0)) {
600 ehea_inc_cq(send_cq);
604 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
605 ehea_error("Send Completion Error: Resetting port");
606 if (netif_msg_tx_err(pr->port))
607 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
608 schedule_work(&pr->port->reset_task);
612 if (netif_msg_tx_done(pr->port))
613 ehea_dump(cqe, sizeof(*cqe), "CQE");
615 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
616 == EHEA_SWQE2_TYPE)) {
618 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
619 skb = pr->sq_skba.arr[index];
621 pr->sq_skba.arr[index] = NULL;
624 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
627 cqe = ehea_poll_cq(send_cq);
630 ehea_update_feca(send_cq, cqe_counter);
631 atomic_add(swqe_av, &pr->swqe_avail);
633 spin_lock_irqsave(&pr->netif_queue, flags);
635 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
636 >= pr->swqe_refill_th)) {
637 netif_wake_queue(pr->port->netdev);
638 pr->queue_stopped = 0;
640 spin_unlock_irqrestore(&pr->netif_queue, flags);
645 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
646 #define EHEA_POLL_MAX_CQES 65535
648 static int ehea_poll(struct napi_struct *napi, int budget)
650 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
652 struct net_device *dev = pr->port->netdev;
653 struct ehea_cqe *cqe;
654 struct ehea_cqe *cqe_skb = NULL;
655 int force_irq, wqe_index;
658 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
659 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
662 rx += ehea_proc_rwqes(dev, pr, budget - rx);
664 while ((rx != budget) || force_irq) {
665 pr->poll_counter = 0;
667 netif_rx_complete(dev, napi);
668 ehea_reset_cq_ep(pr->recv_cq);
669 ehea_reset_cq_ep(pr->send_cq);
670 ehea_reset_cq_n1(pr->recv_cq);
671 ehea_reset_cq_n1(pr->send_cq);
672 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
673 cqe_skb = ehea_poll_cq(pr->send_cq);
675 if (!cqe && !cqe_skb)
678 if (!netif_rx_reschedule(dev, napi))
681 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
682 rx += ehea_proc_rwqes(dev, pr, budget - rx);
689 #ifdef CONFIG_NET_POLL_CONTROLLER
690 static void ehea_netpoll(struct net_device *dev)
692 struct ehea_port *port = netdev_priv(dev);
695 for (i = 0; i < port->num_def_qps; i++)
696 netif_rx_schedule(dev, &port->port_res[i].napi);
700 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
702 struct ehea_port_res *pr = param;
704 netif_rx_schedule(pr->port->netdev, &pr->napi);
709 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
711 struct ehea_port *port = param;
712 struct ehea_eqe *eqe;
716 eqe = ehea_poll_eq(port->qp_eq);
719 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
720 ehea_error("QP aff_err: entry=0x%lx, token=0x%x",
721 eqe->entry, qp_token);
723 qp = port->port_res[qp_token].qp;
724 ehea_error_data(port->adapter, qp->fw_handle);
725 eqe = ehea_poll_eq(port->qp_eq);
728 schedule_work(&port->reset_task);
733 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
738 for (i = 0; i < EHEA_MAX_PORTS; i++)
739 if (adapter->port[i])
740 if (adapter->port[i]->logical_port_id == logical_port)
741 return adapter->port[i];
745 int ehea_sense_port_attr(struct ehea_port *port)
749 struct hcp_ehea_port_cb0 *cb0;
751 /* may be called via ehea_neq_tasklet() */
752 cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
754 ehea_error("no mem for cb0");
759 hret = ehea_h_query_ehea_port(port->adapter->handle,
760 port->logical_port_id, H_PORT_CB0,
761 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
763 if (hret != H_SUCCESS) {
769 port->mac_addr = cb0->port_mac_addr << 16;
771 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
772 ret = -EADDRNOTAVAIL;
777 switch (cb0->port_speed) {
779 port->port_speed = EHEA_SPEED_10M;
780 port->full_duplex = 0;
783 port->port_speed = EHEA_SPEED_10M;
784 port->full_duplex = 1;
787 port->port_speed = EHEA_SPEED_100M;
788 port->full_duplex = 0;
791 port->port_speed = EHEA_SPEED_100M;
792 port->full_duplex = 1;
795 port->port_speed = EHEA_SPEED_1G;
796 port->full_duplex = 1;
799 port->port_speed = EHEA_SPEED_10G;
800 port->full_duplex = 1;
803 port->port_speed = 0;
804 port->full_duplex = 0;
809 port->num_mcs = cb0->num_default_qps;
811 /* Number of default QPs */
813 port->num_def_qps = cb0->num_default_qps;
815 port->num_def_qps = 1;
817 if (!port->num_def_qps) {
822 port->num_tx_qps = num_tx_qps;
824 if (port->num_def_qps >= port->num_tx_qps)
825 port->num_add_tx_qps = 0;
827 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
831 if (ret || netif_msg_probe(port))
832 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
838 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
840 struct hcp_ehea_port_cb4 *cb4;
844 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
846 ehea_error("no mem for cb4");
851 cb4->port_speed = port_speed;
853 netif_carrier_off(port->netdev);
855 hret = ehea_h_modify_ehea_port(port->adapter->handle,
856 port->logical_port_id,
857 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
858 if (hret == H_SUCCESS) {
859 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
861 hret = ehea_h_query_ehea_port(port->adapter->handle,
862 port->logical_port_id,
863 H_PORT_CB4, H_PORT_CB4_SPEED,
865 if (hret == H_SUCCESS) {
866 switch (cb4->port_speed) {
868 port->port_speed = EHEA_SPEED_10M;
869 port->full_duplex = 0;
872 port->port_speed = EHEA_SPEED_10M;
873 port->full_duplex = 1;
876 port->port_speed = EHEA_SPEED_100M;
877 port->full_duplex = 0;
880 port->port_speed = EHEA_SPEED_100M;
881 port->full_duplex = 1;
884 port->port_speed = EHEA_SPEED_1G;
885 port->full_duplex = 1;
888 port->port_speed = EHEA_SPEED_10G;
889 port->full_duplex = 1;
892 port->port_speed = 0;
893 port->full_duplex = 0;
897 ehea_error("Failed sensing port speed");
901 if (hret == H_AUTHORITY) {
902 ehea_info("Hypervisor denied setting port speed");
906 ehea_error("Failed setting port speed");
909 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
910 netif_carrier_on(port->netdev);
917 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
922 struct ehea_port *port;
924 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
925 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
926 port = ehea_get_port(adapter, portnum);
929 case EHEA_EC_PORTSTATE_CHG: /* port state change */
932 ehea_error("unknown portnum %x", portnum);
936 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
937 if (!netif_carrier_ok(port->netdev)) {
938 ret = ehea_sense_port_attr(port);
940 ehea_error("failed resensing port "
945 if (netif_msg_link(port))
946 ehea_info("%s: Logical port up: %dMbps "
951 1 ? "Full" : "Half");
953 netif_carrier_on(port->netdev);
954 netif_wake_queue(port->netdev);
957 if (netif_carrier_ok(port->netdev)) {
958 if (netif_msg_link(port))
959 ehea_info("%s: Logical port down",
961 netif_carrier_off(port->netdev);
962 netif_stop_queue(port->netdev);
965 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
966 port->phy_link = EHEA_PHY_LINK_UP;
967 if (netif_msg_link(port))
968 ehea_info("%s: Physical port up",
970 if (prop_carrier_state)
971 netif_carrier_on(port->netdev);
973 port->phy_link = EHEA_PHY_LINK_DOWN;
974 if (netif_msg_link(port))
975 ehea_info("%s: Physical port down",
977 if (prop_carrier_state)
978 netif_carrier_off(port->netdev);
981 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
982 ehea_info("External switch port is primary port");
984 ehea_info("External switch port is backup port");
987 case EHEA_EC_ADAPTER_MALFUNC:
988 ehea_error("Adapter malfunction");
990 case EHEA_EC_PORT_MALFUNC:
991 ehea_info("Port malfunction: Device: %s", port->netdev->name);
992 netif_carrier_off(port->netdev);
993 netif_stop_queue(port->netdev);
996 ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
1001 static void ehea_neq_tasklet(unsigned long data)
1003 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1004 struct ehea_eqe *eqe;
1007 eqe = ehea_poll_eq(adapter->neq);
1008 ehea_debug("eqe=%p", eqe);
1011 ehea_debug("*eqe=%lx", eqe->entry);
1012 ehea_parse_eqe(adapter, eqe->entry);
1013 eqe = ehea_poll_eq(adapter->neq);
1014 ehea_debug("next eqe=%p", eqe);
1017 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1018 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1019 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1021 ehea_h_reset_events(adapter->handle,
1022 adapter->neq->fw_handle, event_mask);
1025 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1027 struct ehea_adapter *adapter = param;
1028 tasklet_hi_schedule(&adapter->neq_tasklet);
1033 static int ehea_fill_port_res(struct ehea_port_res *pr)
1036 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1038 ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1039 - init_attr->act_nr_rwqes_rq2
1040 - init_attr->act_nr_rwqes_rq3 - 1);
1042 ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1044 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1049 static int ehea_reg_interrupts(struct net_device *dev)
1051 struct ehea_port *port = netdev_priv(dev);
1052 struct ehea_port_res *pr;
1056 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1059 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1060 ehea_qp_aff_irq_handler,
1061 IRQF_DISABLED, port->int_aff_name, port);
1063 ehea_error("failed registering irq for qp_aff_irq_handler:"
1064 "ist=%X", port->qp_eq->attr.ist1);
1068 if (netif_msg_ifup(port))
1069 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
1070 "registered", port->qp_eq->attr.ist1);
1073 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1074 pr = &port->port_res[i];
1075 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1076 "%s-queue%d", dev->name, i);
1077 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1078 ehea_recv_irq_handler,
1079 IRQF_DISABLED, pr->int_send_name,
1082 ehea_error("failed registering irq for ehea_queue "
1083 "port_res_nr:%d, ist=%X", i,
1087 if (netif_msg_ifup(port))
1088 ehea_info("irq_handle 0x%X for function ehea_queue_int "
1089 "%d registered", pr->eq->attr.ist1, i);
1097 u32 ist = port->port_res[i].eq->attr.ist1;
1098 ibmebus_free_irq(ist, &port->port_res[i]);
1102 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1103 i = port->num_def_qps;
1109 static void ehea_free_interrupts(struct net_device *dev)
1111 struct ehea_port *port = netdev_priv(dev);
1112 struct ehea_port_res *pr;
1117 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1118 pr = &port->port_res[i];
1119 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1120 if (netif_msg_intr(port))
1121 ehea_info("free send irq for res %d with handle 0x%X",
1122 i, pr->eq->attr.ist1);
1125 /* associated events */
1126 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1127 if (netif_msg_intr(port))
1128 ehea_info("associated event interrupt for handle 0x%X freed",
1129 port->qp_eq->attr.ist1);
1132 static int ehea_configure_port(struct ehea_port *port)
1136 struct hcp_ehea_port_cb0 *cb0;
1139 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1143 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1144 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1145 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1146 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1147 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1148 PXLY_RC_VLAN_FILTER)
1149 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1151 for (i = 0; i < port->num_mcs; i++)
1153 cb0->default_qpn_arr[i] =
1154 port->port_res[i].qp->init_attr.qp_nr;
1156 cb0->default_qpn_arr[i] =
1157 port->port_res[0].qp->init_attr.qp_nr;
1159 if (netif_msg_ifup(port))
1160 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1162 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1163 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1165 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1166 port->logical_port_id,
1167 H_PORT_CB0, mask, cb0);
1169 if (hret != H_SUCCESS)
1180 int ehea_gen_smrs(struct ehea_port_res *pr)
1183 struct ehea_adapter *adapter = pr->port->adapter;
1185 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1189 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1196 ehea_rem_mr(&pr->send_mr);
1198 ehea_error("Generating SMRS failed\n");
1202 int ehea_rem_smrs(struct ehea_port_res *pr)
1204 if ((ehea_rem_mr(&pr->send_mr))
1205 || (ehea_rem_mr(&pr->recv_mr)))
1211 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1213 int arr_size = sizeof(void *) * max_q_entries;
1215 q_skba->arr = vmalloc(arr_size);
1219 memset(q_skba->arr, 0, arr_size);
1221 q_skba->len = max_q_entries;
1223 q_skba->os_skbs = 0;
1228 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1229 struct port_res_cfg *pr_cfg, int queue_token)
1231 struct ehea_adapter *adapter = port->adapter;
1232 enum ehea_eq_type eq_type = EHEA_EQ;
1233 struct ehea_qp_init_attr *init_attr = NULL;
1236 memset(pr, 0, sizeof(struct ehea_port_res));
1239 spin_lock_init(&pr->xmit_lock);
1240 spin_lock_init(&pr->netif_queue);
1242 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1244 ehea_error("create_eq failed (eq)");
1248 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1250 port->logical_port_id);
1252 ehea_error("create_cq failed (cq_recv)");
1256 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1258 port->logical_port_id);
1260 ehea_error("create_cq failed (cq_send)");
1264 if (netif_msg_ifup(port))
1265 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
1266 pr->send_cq->attr.act_nr_of_cqes,
1267 pr->recv_cq->attr.act_nr_of_cqes);
1269 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1272 ehea_error("no mem for ehea_qp_init_attr");
1276 init_attr->low_lat_rq1 = 1;
1277 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1278 init_attr->rq_count = 3;
1279 init_attr->qp_token = queue_token;
1280 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1281 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1282 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1283 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1284 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1285 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1286 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1287 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1288 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1289 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1290 init_attr->port_nr = port->logical_port_id;
1291 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1292 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1293 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1295 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1297 ehea_error("create_qp failed");
1302 if (netif_msg_ifup(port))
1303 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
1304 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
1305 init_attr->act_nr_send_wqes,
1306 init_attr->act_nr_rwqes_rq1,
1307 init_attr->act_nr_rwqes_rq2,
1308 init_attr->act_nr_rwqes_rq3);
1310 ret = ehea_init_q_skba(&pr->sq_skba, init_attr->act_nr_send_wqes + 1);
1311 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1312 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1313 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1317 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1318 if (ehea_gen_smrs(pr) != 0) {
1323 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1327 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1329 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1330 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1331 pr->lro_mgr.lro_arr = pr->lro_desc;
1332 pr->lro_mgr.get_skb_header = get_skb_hdr;
1333 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1334 pr->lro_mgr.dev = port->netdev;
1335 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1336 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1343 vfree(pr->sq_skba.arr);
1344 vfree(pr->rq1_skba.arr);
1345 vfree(pr->rq2_skba.arr);
1346 vfree(pr->rq3_skba.arr);
1347 ehea_destroy_qp(pr->qp);
1348 ehea_destroy_cq(pr->send_cq);
1349 ehea_destroy_cq(pr->recv_cq);
1350 ehea_destroy_eq(pr->eq);
1355 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1359 ret = ehea_destroy_qp(pr->qp);
1362 ehea_destroy_cq(pr->send_cq);
1363 ehea_destroy_cq(pr->recv_cq);
1364 ehea_destroy_eq(pr->eq);
1366 for (i = 0; i < pr->rq1_skba.len; i++)
1367 if (pr->rq1_skba.arr[i])
1368 dev_kfree_skb(pr->rq1_skba.arr[i]);
1370 for (i = 0; i < pr->rq2_skba.len; i++)
1371 if (pr->rq2_skba.arr[i])
1372 dev_kfree_skb(pr->rq2_skba.arr[i]);
1374 for (i = 0; i < pr->rq3_skba.len; i++)
1375 if (pr->rq3_skba.arr[i])
1376 dev_kfree_skb(pr->rq3_skba.arr[i]);
1378 for (i = 0; i < pr->sq_skba.len; i++)
1379 if (pr->sq_skba.arr[i])
1380 dev_kfree_skb(pr->sq_skba.arr[i]);
1382 vfree(pr->rq1_skba.arr);
1383 vfree(pr->rq2_skba.arr);
1384 vfree(pr->rq3_skba.arr);
1385 vfree(pr->sq_skba.arr);
1386 ret = ehea_rem_smrs(pr);
1392 * The write_* functions store information in swqe which is used by
1393 * the hardware to calculate the ip/tcp/udp checksum
1396 static inline void write_ip_start_end(struct ehea_swqe *swqe,
1397 const struct sk_buff *skb)
1399 swqe->ip_start = skb_network_offset(skb);
1400 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1403 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1404 const struct sk_buff *skb)
1407 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1409 swqe->tcp_end = (u16)skb->len - 1;
1412 static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1413 const struct sk_buff *skb)
1416 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1418 swqe->tcp_end = (u16)skb->len - 1;
1422 static void write_swqe2_TSO(struct sk_buff *skb,
1423 struct ehea_swqe *swqe, u32 lkey)
1425 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1426 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1427 int skb_data_size = skb->len - skb->data_len;
1430 /* Packet is TCP with TSO enabled */
1431 swqe->tx_control |= EHEA_SWQE_TSO;
1432 swqe->mss = skb_shinfo(skb)->gso_size;
1433 /* copy only eth/ip/tcp headers to immediate data and
1434 * the rest of skb->data to sg1entry
1436 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1438 skb_data_size = skb->len - skb->data_len;
1440 if (skb_data_size >= headersize) {
1441 /* copy immediate data */
1442 skb_copy_from_linear_data(skb, imm_data, headersize);
1443 swqe->immediate_data_length = headersize;
1445 if (skb_data_size > headersize) {
1446 /* set sg1entry data */
1447 sg1entry->l_key = lkey;
1448 sg1entry->len = skb_data_size - headersize;
1450 ehea_map_vaddr(skb->data + headersize);
1451 swqe->descriptors++;
1454 ehea_error("cannot handle fragmented headers");
1457 static void write_swqe2_nonTSO(struct sk_buff *skb,
1458 struct ehea_swqe *swqe, u32 lkey)
1460 int skb_data_size = skb->len - skb->data_len;
1461 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1462 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1464 /* Packet is any nonTSO type
1466 * Copy as much as possible skb->data to immediate data and
1467 * the rest to sg1entry
1469 if (skb_data_size >= SWQE2_MAX_IMM) {
1470 /* copy immediate data */
1471 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1473 swqe->immediate_data_length = SWQE2_MAX_IMM;
1475 if (skb_data_size > SWQE2_MAX_IMM) {
1476 /* copy sg1entry data */
1477 sg1entry->l_key = lkey;
1478 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1480 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
1481 swqe->descriptors++;
1484 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1485 swqe->immediate_data_length = skb_data_size;
1489 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1490 struct ehea_swqe *swqe, u32 lkey)
1492 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1494 int nfrags, sg1entry_contains_frag_data, i;
1496 nfrags = skb_shinfo(skb)->nr_frags;
1497 sg1entry = &swqe->u.immdata_desc.sg_entry;
1498 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1499 swqe->descriptors = 0;
1500 sg1entry_contains_frag_data = 0;
1502 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1503 write_swqe2_TSO(skb, swqe, lkey);
1505 write_swqe2_nonTSO(skb, swqe, lkey);
1507 /* write descriptors */
1509 if (swqe->descriptors == 0) {
1510 /* sg1entry not yet used */
1511 frag = &skb_shinfo(skb)->frags[0];
1513 /* copy sg1entry data */
1514 sg1entry->l_key = lkey;
1515 sg1entry->len = frag->size;
1517 ehea_map_vaddr(page_address(frag->page)
1518 + frag->page_offset);
1519 swqe->descriptors++;
1520 sg1entry_contains_frag_data = 1;
1523 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1525 frag = &skb_shinfo(skb)->frags[i];
1526 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1528 sgentry->l_key = lkey;
1529 sgentry->len = frag->size;
1531 ehea_map_vaddr(page_address(frag->page)
1532 + frag->page_offset);
1533 swqe->descriptors++;
1538 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1544 /* De/Register untagged packets */
1545 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1546 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1547 port->logical_port_id,
1548 reg_type, port->mac_addr, 0, hcallid);
1549 if (hret != H_SUCCESS) {
1550 ehea_error("%sregistering bc address failed (tagged)",
1551 hcallid == H_REG_BCMC ? "" : "de");
1556 /* De/Register VLAN packets */
1557 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1558 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1559 port->logical_port_id,
1560 reg_type, port->mac_addr, 0, hcallid);
1561 if (hret != H_SUCCESS) {
1562 ehea_error("%sregistering bc address failed (vlan)",
1563 hcallid == H_REG_BCMC ? "" : "de");
1570 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1572 struct ehea_port *port = netdev_priv(dev);
1573 struct sockaddr *mac_addr = sa;
1574 struct hcp_ehea_port_cb0 *cb0;
1578 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1579 ret = -EADDRNOTAVAIL;
1583 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
1585 ehea_error("no mem for cb0");
1590 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1592 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1594 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1595 port->logical_port_id, H_PORT_CB0,
1596 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1597 if (hret != H_SUCCESS) {
1602 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1604 /* Deregister old MAC in pHYP */
1605 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1609 port->mac_addr = cb0->port_mac_addr << 16;
1611 /* Register new MAC in pHYP */
1612 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1623 static void ehea_promiscuous_error(u64 hret, int enable)
1625 if (hret == H_AUTHORITY)
1626 ehea_info("Hypervisor denied %sabling promiscuous mode",
1627 enable == 1 ? "en" : "dis");
1629 ehea_error("failed %sabling promiscuous mode",
1630 enable == 1 ? "en" : "dis");
1633 static void ehea_promiscuous(struct net_device *dev, int enable)
1635 struct ehea_port *port = netdev_priv(dev);
1636 struct hcp_ehea_port_cb7 *cb7;
1639 if ((enable && port->promisc) || (!enable && !port->promisc))
1642 cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
1644 ehea_error("no mem for cb7");
1648 /* Modify Pxs_DUCQPN in CB7 */
1649 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1651 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1652 port->logical_port_id,
1653 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1655 ehea_promiscuous_error(hret, enable);
1659 port->promisc = enable;
1665 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1671 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1672 | EHEA_BCMC_UNTAGGED;
1674 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1675 port->logical_port_id,
1676 reg_type, mc_mac_addr, 0, hcallid);
1680 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1681 | EHEA_BCMC_VLANID_ALL;
1683 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1684 port->logical_port_id,
1685 reg_type, mc_mac_addr, 0, hcallid);
1690 static int ehea_drop_multicast_list(struct net_device *dev)
1692 struct ehea_port *port = netdev_priv(dev);
1693 struct ehea_mc_list *mc_entry = port->mc_list;
1694 struct list_head *pos;
1695 struct list_head *temp;
1699 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1700 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1702 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1705 ehea_error("failed deregistering mcast MAC");
1715 static void ehea_allmulti(struct net_device *dev, int enable)
1717 struct ehea_port *port = netdev_priv(dev);
1720 if (!port->allmulti) {
1722 /* Enable ALLMULTI */
1723 ehea_drop_multicast_list(dev);
1724 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1728 ehea_error("failed enabling IFF_ALLMULTI");
1732 /* Disable ALLMULTI */
1733 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1737 ehea_error("failed disabling IFF_ALLMULTI");
1741 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1743 struct ehea_mc_list *ehea_mcl_entry;
1746 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1747 if (!ehea_mcl_entry) {
1748 ehea_error("no mem for mcl_entry");
1752 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1754 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1756 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1759 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1761 ehea_error("failed registering mcast MAC");
1762 kfree(ehea_mcl_entry);
1766 static void ehea_set_multicast_list(struct net_device *dev)
1768 struct ehea_port *port = netdev_priv(dev);
1769 struct dev_mc_list *k_mcl_entry;
1772 if (dev->flags & IFF_PROMISC) {
1773 ehea_promiscuous(dev, 1);
1776 ehea_promiscuous(dev, 0);
1778 if (dev->flags & IFF_ALLMULTI) {
1779 ehea_allmulti(dev, 1);
1782 ehea_allmulti(dev, 0);
1784 if (dev->mc_count) {
1785 ret = ehea_drop_multicast_list(dev);
1787 /* Dropping the current multicast list failed.
1788 * Enabling ALL_MULTI is the best we can do.
1790 ehea_allmulti(dev, 1);
1793 if (dev->mc_count > port->adapter->max_mc_mac) {
1794 ehea_info("Mcast registration limit reached (0x%lx). "
1796 port->adapter->max_mc_mac);
1800 for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
1801 k_mcl_entry = k_mcl_entry->next)
1802 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
1809 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
1811 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
1817 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1818 struct ehea_swqe *swqe, u32 lkey)
1820 if (skb->protocol == htons(ETH_P_IP)) {
1821 const struct iphdr *iph = ip_hdr(skb);
1824 swqe->tx_control |= EHEA_SWQE_CRC
1825 | EHEA_SWQE_IP_CHECKSUM
1826 | EHEA_SWQE_TCP_CHECKSUM
1827 | EHEA_SWQE_IMM_DATA_PRESENT
1828 | EHEA_SWQE_DESCRIPTORS_PRESENT;
1830 write_ip_start_end(swqe, skb);
1832 if (iph->protocol == IPPROTO_UDP) {
1833 if ((iph->frag_off & IP_MF)
1834 || (iph->frag_off & IP_OFFSET))
1835 /* IP fragment, so don't change cs */
1836 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
1838 write_udp_offset_end(swqe, skb);
1839 } else if (iph->protocol == IPPROTO_TCP) {
1840 write_tcp_offset_end(swqe, skb);
1843 /* icmp (big data) and ip segmentation packets (all other ip
1844 packets) do not require any special handling */
1847 /* Other Ethernet Protocol */
1848 swqe->tx_control |= EHEA_SWQE_CRC
1849 | EHEA_SWQE_IMM_DATA_PRESENT
1850 | EHEA_SWQE_DESCRIPTORS_PRESENT;
1853 write_swqe2_data(skb, dev, swqe, lkey);
1856 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1857 struct ehea_swqe *swqe)
1859 int nfrags = skb_shinfo(skb)->nr_frags;
1860 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
1864 if (skb->protocol == htons(ETH_P_IP)) {
1865 const struct iphdr *iph = ip_hdr(skb);
1868 write_ip_start_end(swqe, skb);
1870 if (iph->protocol == IPPROTO_TCP) {
1871 swqe->tx_control |= EHEA_SWQE_CRC
1872 | EHEA_SWQE_IP_CHECKSUM
1873 | EHEA_SWQE_TCP_CHECKSUM
1874 | EHEA_SWQE_IMM_DATA_PRESENT;
1876 write_tcp_offset_end(swqe, skb);
1878 } else if (iph->protocol == IPPROTO_UDP) {
1879 if ((iph->frag_off & IP_MF)
1880 || (iph->frag_off & IP_OFFSET))
1881 /* IP fragment, so don't change cs */
1882 swqe->tx_control |= EHEA_SWQE_CRC
1883 | EHEA_SWQE_IMM_DATA_PRESENT;
1885 swqe->tx_control |= EHEA_SWQE_CRC
1886 | EHEA_SWQE_IP_CHECKSUM
1887 | EHEA_SWQE_TCP_CHECKSUM
1888 | EHEA_SWQE_IMM_DATA_PRESENT;
1890 write_udp_offset_end(swqe, skb);
1893 /* icmp (big data) and
1894 ip segmentation packets (all other ip packets) */
1895 swqe->tx_control |= EHEA_SWQE_CRC
1896 | EHEA_SWQE_IP_CHECKSUM
1897 | EHEA_SWQE_IMM_DATA_PRESENT;
1900 /* Other Ethernet Protocol */
1901 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
1903 /* copy (immediate) data */
1905 /* data is in a single piece */
1906 skb_copy_from_linear_data(skb, imm_data, skb->len);
1908 /* first copy data from the skb->data buffer ... */
1909 skb_copy_from_linear_data(skb, imm_data,
1910 skb->len - skb->data_len);
1911 imm_data += skb->len - skb->data_len;
1913 /* ... then copy data from the fragments */
1914 for (i = 0; i < nfrags; i++) {
1915 frag = &skb_shinfo(skb)->frags[i];
1917 page_address(frag->page) + frag->page_offset,
1919 imm_data += frag->size;
1922 swqe->immediate_data_length = skb->len;
1926 static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
1931 if ((skb->protocol == htons(ETH_P_IP)) &&
1932 (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
1933 tcp = (struct tcphdr *)(skb_network_header(skb) +
1934 (ip_hdr(skb)->ihl * 4));
1935 tmp = (tcp->source + (tcp->dest << 16)) % 31;
1936 tmp += ip_hdr(skb)->daddr % 31;
1937 return tmp % num_qps;
1942 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1944 struct ehea_port *port = netdev_priv(dev);
1945 struct ehea_swqe *swqe;
1946 unsigned long flags;
1949 struct ehea_port_res *pr;
1951 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
1953 if (!spin_trylock(&pr->xmit_lock))
1954 return NETDEV_TX_BUSY;
1956 if (pr->queue_stopped) {
1957 spin_unlock(&pr->xmit_lock);
1958 return NETDEV_TX_BUSY;
1961 swqe = ehea_get_swqe(pr->qp, &swqe_index);
1962 memset(swqe, 0, SWQE_HEADER_SIZE);
1963 atomic_dec(&pr->swqe_avail);
1965 if (skb->len <= SWQE3_MAX_IMM) {
1966 u32 sig_iv = port->sig_comp_iv;
1967 u32 swqe_num = pr->swqe_id_counter;
1968 ehea_xmit3(skb, dev, swqe);
1969 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
1970 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
1971 if (pr->swqe_ll_count >= (sig_iv - 1)) {
1972 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
1974 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1975 pr->swqe_ll_count = 0;
1977 pr->swqe_ll_count += 1;
1980 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
1981 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
1982 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
1983 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
1984 pr->sq_skba.arr[pr->sq_skba.index] = skb;
1986 pr->sq_skba.index++;
1987 pr->sq_skba.index &= (pr->sq_skba.len - 1);
1989 lkey = pr->send_mr.lkey;
1990 ehea_xmit2(skb, dev, swqe, lkey);
1991 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1993 pr->swqe_id_counter += 1;
1995 if (port->vgrp && vlan_tx_tag_present(skb)) {
1996 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
1997 swqe->vlan_tag = vlan_tx_tag_get(skb);
2000 if (netif_msg_tx_queued(port)) {
2001 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
2002 ehea_dump(swqe, 512, "swqe");
2005 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2006 netif_stop_queue(dev);
2007 swqe->tx_control |= EHEA_SWQE_PURGE;
2010 ehea_post_swqe(pr->qp, swqe);
2013 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2014 spin_lock_irqsave(&pr->netif_queue, flags);
2015 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2016 pr->p_stats.queue_stopped++;
2017 netif_stop_queue(dev);
2018 pr->queue_stopped = 1;
2020 spin_unlock_irqrestore(&pr->netif_queue, flags);
2022 dev->trans_start = jiffies;
2023 spin_unlock(&pr->xmit_lock);
2025 return NETDEV_TX_OK;
2028 static void ehea_vlan_rx_register(struct net_device *dev,
2029 struct vlan_group *grp)
2031 struct ehea_port *port = netdev_priv(dev);
2032 struct ehea_adapter *adapter = port->adapter;
2033 struct hcp_ehea_port_cb1 *cb1;
2038 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2040 ehea_error("no mem for cb1");
2044 memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
2046 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2047 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2048 if (hret != H_SUCCESS)
2049 ehea_error("modify_ehea_port failed");
2056 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2058 struct ehea_port *port = netdev_priv(dev);
2059 struct ehea_adapter *adapter = port->adapter;
2060 struct hcp_ehea_port_cb1 *cb1;
2064 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2066 ehea_error("no mem for cb1");
2070 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2071 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2072 if (hret != H_SUCCESS) {
2073 ehea_error("query_ehea_port failed");
2078 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2080 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2081 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2082 if (hret != H_SUCCESS)
2083 ehea_error("modify_ehea_port failed");
2089 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2091 struct ehea_port *port = netdev_priv(dev);
2092 struct ehea_adapter *adapter = port->adapter;
2093 struct hcp_ehea_port_cb1 *cb1;
2097 vlan_group_set_device(port->vgrp, vid, NULL);
2099 cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2101 ehea_error("no mem for cb1");
2105 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2106 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2107 if (hret != H_SUCCESS) {
2108 ehea_error("query_ehea_port failed");
2113 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2115 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2116 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2117 if (hret != H_SUCCESS)
2118 ehea_error("modify_ehea_port failed");
2124 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2130 struct hcp_modify_qp_cb0 *cb0;
2132 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2138 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2139 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2140 if (hret != H_SUCCESS) {
2141 ehea_error("query_ehea_qp failed (1)");
2145 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2146 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2147 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2148 &dummy64, &dummy64, &dummy16, &dummy16);
2149 if (hret != H_SUCCESS) {
2150 ehea_error("modify_ehea_qp failed (1)");
2154 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2155 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2156 if (hret != H_SUCCESS) {
2157 ehea_error("query_ehea_qp failed (2)");
2161 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2162 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2163 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2164 &dummy64, &dummy64, &dummy16, &dummy16);
2165 if (hret != H_SUCCESS) {
2166 ehea_error("modify_ehea_qp failed (2)");
2170 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2171 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2172 if (hret != H_SUCCESS) {
2173 ehea_error("query_ehea_qp failed (3)");
2177 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2178 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2179 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2180 &dummy64, &dummy64, &dummy16, &dummy16);
2181 if (hret != H_SUCCESS) {
2182 ehea_error("modify_ehea_qp failed (3)");
2186 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2187 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2188 if (hret != H_SUCCESS) {
2189 ehea_error("query_ehea_qp failed (4)");
2199 static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2203 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2204 enum ehea_eq_type eq_type = EHEA_EQ;
2206 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2207 EHEA_MAX_ENTRIES_EQ, 1);
2210 ehea_error("ehea_create_eq failed (qp_eq)");
2214 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2215 pr_cfg.max_entries_scq = sq_entries * 2;
2216 pr_cfg.max_entries_sq = sq_entries;
2217 pr_cfg.max_entries_rq1 = rq1_entries;
2218 pr_cfg.max_entries_rq2 = rq2_entries;
2219 pr_cfg.max_entries_rq3 = rq3_entries;
2221 pr_cfg_small_rx.max_entries_rcq = 1;
2222 pr_cfg_small_rx.max_entries_scq = sq_entries;
2223 pr_cfg_small_rx.max_entries_sq = sq_entries;
2224 pr_cfg_small_rx.max_entries_rq1 = 1;
2225 pr_cfg_small_rx.max_entries_rq2 = 1;
2226 pr_cfg_small_rx.max_entries_rq3 = 1;
2228 for (i = 0; i < def_qps; i++) {
2229 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2233 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2234 ret = ehea_init_port_res(port, &port->port_res[i],
2235 &pr_cfg_small_rx, i);
2244 ehea_clean_portres(port, &port->port_res[i]);
2247 ehea_destroy_eq(port->qp_eq);
2251 static int ehea_clean_all_portres(struct ehea_port *port)
2256 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2257 ret |= ehea_clean_portres(port, &port->port_res[i]);
2259 ret |= ehea_destroy_eq(port->qp_eq);
2264 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2266 if (adapter->active_ports)
2269 ehea_rem_mr(&adapter->mr);
2272 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2274 if (adapter->active_ports)
2277 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2280 static int ehea_up(struct net_device *dev)
2283 struct ehea_port *port = netdev_priv(dev);
2285 if (port->state == EHEA_PORT_UP)
2288 ret = ehea_port_res_setup(port, port->num_def_qps,
2289 port->num_add_tx_qps);
2291 ehea_error("port_res_failed");
2295 /* Set default QP for this port */
2296 ret = ehea_configure_port(port);
2298 ehea_error("ehea_configure_port failed. ret:%d", ret);
2302 ret = ehea_reg_interrupts(dev);
2304 ehea_error("reg_interrupts failed. ret:%d", ret);
2308 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2309 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2311 ehea_error("activate_qp failed");
2316 for (i = 0; i < port->num_def_qps; i++) {
2317 ret = ehea_fill_port_res(&port->port_res[i]);
2319 ehea_error("out_free_irqs");
2325 port->state = EHEA_PORT_UP;
2329 ehea_free_interrupts(dev);
2332 ehea_clean_all_portres(port);
2335 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2340 static void port_napi_disable(struct ehea_port *port)
2344 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2345 napi_disable(&port->port_res[i].napi);
2348 static void port_napi_enable(struct ehea_port *port)
2352 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2353 napi_enable(&port->port_res[i].napi);
2356 static int ehea_open(struct net_device *dev)
2359 struct ehea_port *port = netdev_priv(dev);
2361 down(&port->port_lock);
2363 if (netif_msg_ifup(port))
2364 ehea_info("enabling port %s", dev->name);
2368 port_napi_enable(port);
2369 netif_start_queue(dev);
2372 up(&port->port_lock);
2377 static int ehea_down(struct net_device *dev)
2380 struct ehea_port *port = netdev_priv(dev);
2382 if (port->state == EHEA_PORT_DOWN)
2385 ehea_drop_multicast_list(dev);
2386 ehea_free_interrupts(dev);
2388 port->state = EHEA_PORT_DOWN;
2390 ret = ehea_clean_all_portres(port);
2392 ehea_info("Failed freeing resources for %s. ret=%i",
2398 static int ehea_stop(struct net_device *dev)
2401 struct ehea_port *port = netdev_priv(dev);
2403 if (netif_msg_ifdown(port))
2404 ehea_info("disabling port %s", dev->name);
2406 flush_scheduled_work();
2407 down(&port->port_lock);
2408 netif_stop_queue(dev);
2409 port_napi_disable(port);
2410 ret = ehea_down(dev);
2411 up(&port->port_lock);
2415 void ehea_purge_sq(struct ehea_qp *orig_qp)
2417 struct ehea_qp qp = *orig_qp;
2418 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2419 struct ehea_swqe *swqe;
2423 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2424 swqe = ehea_get_swqe(&qp, &wqe_index);
2425 swqe->tx_control |= EHEA_SWQE_PURGE;
2429 int ehea_stop_qps(struct net_device *dev)
2431 struct ehea_port *port = netdev_priv(dev);
2432 struct ehea_adapter *adapter = port->adapter;
2433 struct hcp_modify_qp_cb0 *cb0;
2441 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2447 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2448 struct ehea_port_res *pr = &port->port_res[i];
2449 struct ehea_qp *qp = pr->qp;
2451 /* Purge send queue */
2454 /* Disable queue pair */
2455 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2456 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2458 if (hret != H_SUCCESS) {
2459 ehea_error("query_ehea_qp failed (1)");
2463 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2464 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2466 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2467 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2469 &dummy64, &dummy16, &dummy16);
2470 if (hret != H_SUCCESS) {
2471 ehea_error("modify_ehea_qp failed (1)");
2475 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2476 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2478 if (hret != H_SUCCESS) {
2479 ehea_error("query_ehea_qp failed (2)");
2483 /* deregister shared memory regions */
2484 dret = ehea_rem_smrs(pr);
2486 ehea_error("unreg shared memory region failed");
2498 void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2500 struct ehea_qp qp = *orig_qp;
2501 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2502 struct ehea_rwqe *rwqe;
2503 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2504 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2505 struct sk_buff *skb;
2506 u32 lkey = pr->recv_mr.lkey;
2512 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2513 rwqe = ehea_get_next_rwqe(&qp, 2);
2514 rwqe->sg_list[0].l_key = lkey;
2515 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2516 skb = skba_rq2[index];
2518 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2521 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2522 rwqe = ehea_get_next_rwqe(&qp, 3);
2523 rwqe->sg_list[0].l_key = lkey;
2524 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2525 skb = skba_rq3[index];
2527 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2531 int ehea_restart_qps(struct net_device *dev)
2533 struct ehea_port *port = netdev_priv(dev);
2534 struct ehea_adapter *adapter = port->adapter;
2538 struct hcp_modify_qp_cb0 *cb0;
2543 cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2549 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2550 struct ehea_port_res *pr = &port->port_res[i];
2551 struct ehea_qp *qp = pr->qp;
2553 ret = ehea_gen_smrs(pr);
2555 ehea_error("creation of shared memory regions failed");
2559 ehea_update_rqs(qp, pr);
2561 /* Enable queue pair */
2562 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2563 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2565 if (hret != H_SUCCESS) {
2566 ehea_error("query_ehea_qp failed (1)");
2570 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2571 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2573 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2574 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2576 &dummy64, &dummy16, &dummy16);
2577 if (hret != H_SUCCESS) {
2578 ehea_error("modify_ehea_qp failed (1)");
2582 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2583 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2585 if (hret != H_SUCCESS) {
2586 ehea_error("query_ehea_qp failed (2)");
2590 /* refill entire queue */
2591 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2592 ehea_refill_rq2(pr, 0);
2593 ehea_refill_rq3(pr, 0);
2601 static void ehea_reset_port(struct work_struct *work)
2604 struct ehea_port *port =
2605 container_of(work, struct ehea_port, reset_task);
2606 struct net_device *dev = port->netdev;
2609 down(&port->port_lock);
2610 netif_stop_queue(dev);
2612 port_napi_disable(port);
2620 ehea_set_multicast_list(dev);
2622 if (netif_msg_timer(port))
2623 ehea_info("Device %s resetted successfully", dev->name);
2625 port_napi_enable(port);
2627 netif_wake_queue(dev);
2629 up(&port->port_lock);
2633 static void ehea_rereg_mrs(struct work_struct *work)
2636 struct ehea_adapter *adapter;
2638 down(&dlpar_mem_lock);
2639 ehea_info("LPAR memory enlarged - re-initializing driver");
2641 list_for_each_entry(adapter, &adapter_list, list)
2642 if (adapter->active_ports) {
2643 /* Shutdown all ports */
2644 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2645 struct ehea_port *port = adapter->port[i];
2648 struct net_device *dev = port->netdev;
2650 if (dev->flags & IFF_UP) {
2651 down(&port->port_lock);
2652 netif_stop_queue(dev);
2653 ret = ehea_stop_qps(dev);
2655 up(&port->port_lock);
2658 port_napi_disable(port);
2659 up(&port->port_lock);
2664 /* Unregister old memory region */
2665 ret = ehea_rem_mr(&adapter->mr);
2667 ehea_error("unregister MR failed - driver"
2673 ehea_destroy_busmap();
2674 ret = ehea_create_busmap();
2676 ehea_error("creating ehea busmap failed");
2680 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2682 list_for_each_entry(adapter, &adapter_list, list)
2683 if (adapter->active_ports) {
2684 /* Register new memory region */
2685 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2687 ehea_error("register MR failed - driver"
2692 /* Restart all ports */
2693 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2694 struct ehea_port *port = adapter->port[i];
2697 struct net_device *dev = port->netdev;
2699 if (dev->flags & IFF_UP) {
2700 down(&port->port_lock);
2701 port_napi_enable(port);
2702 ret = ehea_restart_qps(dev);
2704 netif_wake_queue(dev);
2705 up(&port->port_lock);
2710 up(&dlpar_mem_lock);
2711 ehea_info("re-initializing driver complete");
2716 static void ehea_tx_watchdog(struct net_device *dev)
2718 struct ehea_port *port = netdev_priv(dev);
2720 if (netif_carrier_ok(dev) &&
2721 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2722 schedule_work(&port->reset_task);
2725 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2727 struct hcp_query_ehea *cb;
2731 cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
2737 hret = ehea_h_query_ehea(adapter->handle, cb);
2739 if (hret != H_SUCCESS) {
2744 adapter->max_mc_mac = cb->max_mc_mac - 1;
2753 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2755 struct hcp_ehea_port_cb4 *cb4;
2761 /* (Try to) enable *jumbo frames */
2762 cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
2764 ehea_error("no mem for cb4");
2768 hret = ehea_h_query_ehea_port(port->adapter->handle,
2769 port->logical_port_id,
2771 H_PORT_CB4_JUMBO, cb4);
2772 if (hret == H_SUCCESS) {
2773 if (cb4->jumbo_frame)
2776 cb4->jumbo_frame = 1;
2777 hret = ehea_h_modify_ehea_port(port->adapter->
2784 if (hret == H_SUCCESS)
2796 static ssize_t ehea_show_port_id(struct device *dev,
2797 struct device_attribute *attr, char *buf)
2799 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2800 return sprintf(buf, "%d", port->logical_port_id);
2803 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
2806 static void __devinit logical_port_release(struct device *dev)
2808 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2809 of_node_put(port->ofdev.node);
2812 static struct device *ehea_register_port(struct ehea_port *port,
2813 struct device_node *dn)
2817 port->ofdev.node = of_node_get(dn);
2818 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2819 port->ofdev.dev.bus = &ibmebus_bus_type;
2821 sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++);
2822 port->ofdev.dev.release = logical_port_release;
2824 ret = of_device_register(&port->ofdev);
2826 ehea_error("failed to register device. ret=%d", ret);
2830 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2832 ehea_error("failed to register attributes, ret=%d", ret);
2833 goto out_unreg_of_dev;
2836 return &port->ofdev.dev;
2839 of_device_unregister(&port->ofdev);
2844 static void ehea_unregister_port(struct ehea_port *port)
2846 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2847 of_device_unregister(&port->ofdev);
2850 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2851 u32 logical_port_id,
2852 struct device_node *dn)
2855 struct net_device *dev;
2856 struct ehea_port *port;
2857 struct device *port_dev;
2860 /* allocate memory for the port structures */
2861 dev = alloc_etherdev(sizeof(struct ehea_port));
2864 ehea_error("no mem for net_device");
2869 port = netdev_priv(dev);
2871 sema_init(&port->port_lock, 1);
2872 port->state = EHEA_PORT_DOWN;
2873 port->sig_comp_iv = sq_entries / 10;
2875 port->adapter = adapter;
2877 port->logical_port_id = logical_port_id;
2879 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2881 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2882 if (!port->mc_list) {
2884 goto out_free_ethdev;
2887 INIT_LIST_HEAD(&port->mc_list->list);
2889 ret = ehea_sense_port_attr(port);
2891 goto out_free_mc_list;
2893 port_dev = ehea_register_port(port, dn);
2895 goto out_free_mc_list;
2897 SET_NETDEV_DEV(dev, port_dev);
2899 /* initialize net_device structure */
2900 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
2902 dev->open = ehea_open;
2903 #ifdef CONFIG_NET_POLL_CONTROLLER
2904 dev->poll_controller = ehea_netpoll;
2906 dev->stop = ehea_stop;
2907 dev->hard_start_xmit = ehea_start_xmit;
2908 dev->get_stats = ehea_get_stats;
2909 dev->set_multicast_list = ehea_set_multicast_list;
2910 dev->set_mac_address = ehea_set_mac_addr;
2911 dev->change_mtu = ehea_change_mtu;
2912 dev->vlan_rx_register = ehea_vlan_rx_register;
2913 dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
2914 dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
2915 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
2916 | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX
2917 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
2919 dev->tx_timeout = &ehea_tx_watchdog;
2920 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
2922 INIT_WORK(&port->reset_task, ehea_reset_port);
2924 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2927 goto out_unreg_port;
2930 ehea_set_ethtool_ops(dev);
2932 ret = register_netdev(dev);
2934 ehea_error("register_netdev failed. ret=%d", ret);
2938 port->lro_max_aggr = lro_max_aggr;
2940 ret = ehea_get_jumboframe_status(port, &jumbo);
2942 ehea_error("failed determining jumbo frame status for %s",
2943 port->netdev->name);
2945 ehea_info("%s: Jumbo frames are %sabled", dev->name,
2946 jumbo == 1 ? "en" : "dis");
2948 adapter->active_ports++;
2953 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2956 ehea_unregister_port(port);
2959 kfree(port->mc_list);
2965 ehea_error("setting up logical port with id=%d failed, ret=%d",
2966 logical_port_id, ret);
2970 static void ehea_shutdown_single_port(struct ehea_port *port)
2972 unregister_netdev(port->netdev);
2973 ehea_unregister_port(port);
2974 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2975 kfree(port->mc_list);
2976 free_netdev(port->netdev);
2977 port->adapter->active_ports--;
2980 static int ehea_setup_ports(struct ehea_adapter *adapter)
2982 struct device_node *lhea_dn;
2983 struct device_node *eth_dn = NULL;
2985 const u32 *dn_log_port_id;
2988 lhea_dn = adapter->ofdev->node;
2989 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
2991 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
2993 if (!dn_log_port_id) {
2994 ehea_error("bad device node: eth_dn name=%s",
2999 if (ehea_add_adapter_mr(adapter)) {
3000 ehea_error("creating MR failed");
3001 of_node_put(eth_dn);
3005 adapter->port[i] = ehea_setup_single_port(adapter,
3008 if (adapter->port[i])
3009 ehea_info("%s -> logical port id #%d",
3010 adapter->port[i]->netdev->name,
3013 ehea_remove_adapter_mr(adapter);
3021 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3022 u32 logical_port_id)
3024 struct device_node *lhea_dn;
3025 struct device_node *eth_dn = NULL;
3026 const u32 *dn_log_port_id;
3028 lhea_dn = adapter->ofdev->node;
3029 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3031 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3034 if (*dn_log_port_id == logical_port_id)
3041 static ssize_t ehea_probe_port(struct device *dev,
3042 struct device_attribute *attr,
3043 const char *buf, size_t count)
3045 struct ehea_adapter *adapter = dev->driver_data;
3046 struct ehea_port *port;
3047 struct device_node *eth_dn = NULL;
3050 u32 logical_port_id;
3052 sscanf(buf, "%d", &logical_port_id);
3054 port = ehea_get_port(adapter, logical_port_id);
3057 ehea_info("adding port with logical port id=%d failed. port "
3058 "already configured as %s.", logical_port_id,
3059 port->netdev->name);
3063 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3066 ehea_info("no logical port with id %d found", logical_port_id);
3070 if (ehea_add_adapter_mr(adapter)) {
3071 ehea_error("creating MR failed");
3075 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3077 of_node_put(eth_dn);
3080 for (i = 0; i < EHEA_MAX_PORTS; i++)
3081 if (!adapter->port[i]) {
3082 adapter->port[i] = port;
3086 ehea_info("added %s (logical port id=%d)", port->netdev->name,
3089 ehea_remove_adapter_mr(adapter);
3093 return (ssize_t) count;
3096 static ssize_t ehea_remove_port(struct device *dev,
3097 struct device_attribute *attr,
3098 const char *buf, size_t count)
3100 struct ehea_adapter *adapter = dev->driver_data;
3101 struct ehea_port *port;
3103 u32 logical_port_id;
3105 sscanf(buf, "%d", &logical_port_id);
3107 port = ehea_get_port(adapter, logical_port_id);
3110 ehea_info("removed %s (logical port id=%d)", port->netdev->name,
3113 ehea_shutdown_single_port(port);
3115 for (i = 0; i < EHEA_MAX_PORTS; i++)
3116 if (adapter->port[i] == port) {
3117 adapter->port[i] = NULL;
3121 ehea_error("removing port with logical port id=%d failed. port "
3122 "not configured.", logical_port_id);
3126 ehea_remove_adapter_mr(adapter);
3128 return (ssize_t) count;
3131 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3132 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3134 int ehea_create_device_sysfs(struct of_device *dev)
3136 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3140 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3145 void ehea_remove_device_sysfs(struct of_device *dev)
3147 device_remove_file(&dev->dev, &dev_attr_probe_port);
3148 device_remove_file(&dev->dev, &dev_attr_remove_port);
3151 static int __devinit ehea_probe_adapter(struct of_device *dev,
3152 const struct of_device_id *id)
3154 struct ehea_adapter *adapter;
3155 const u64 *adapter_handle;
3158 if (!dev || !dev->node) {
3159 ehea_error("Invalid ibmebus device probed");
3163 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3166 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3170 list_add(&adapter->list, &adapter_list);
3172 adapter->ofdev = dev;
3174 adapter_handle = of_get_property(dev->node, "ibm,hea-handle",
3177 adapter->handle = *adapter_handle;
3179 if (!adapter->handle) {
3180 dev_err(&dev->dev, "failed getting handle for adapter"
3181 " '%s'\n", dev->node->full_name);
3186 adapter->pd = EHEA_PD_ID;
3188 dev->dev.driver_data = adapter;
3191 /* initialize adapter and ports */
3192 /* get adapter properties */
3193 ret = ehea_sense_adapter_attr(adapter);
3195 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3199 adapter->neq = ehea_create_eq(adapter,
3200 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3201 if (!adapter->neq) {
3203 dev_err(&dev->dev, "NEQ creation failed\n");
3207 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3208 (unsigned long)adapter);
3210 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3211 ehea_interrupt_neq, IRQF_DISABLED,
3212 "ehea_neq", adapter);
3214 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3218 ret = ehea_create_device_sysfs(dev);
3222 ret = ehea_setup_ports(adapter);
3224 dev_err(&dev->dev, "setup_ports failed\n");
3225 goto out_rem_dev_sysfs;
3232 ehea_remove_device_sysfs(dev);
3235 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3238 ehea_destroy_eq(adapter->neq);
3246 static int __devexit ehea_remove(struct of_device *dev)
3248 struct ehea_adapter *adapter = dev->dev.driver_data;
3251 for (i = 0; i < EHEA_MAX_PORTS; i++)
3252 if (adapter->port[i]) {
3253 ehea_shutdown_single_port(adapter->port[i]);
3254 adapter->port[i] = NULL;
3257 ehea_remove_device_sysfs(dev);
3259 flush_scheduled_work();
3261 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3262 tasklet_kill(&adapter->neq_tasklet);
3264 ehea_destroy_eq(adapter->neq);
3265 ehea_remove_adapter_mr(adapter);
3266 list_del(&adapter->list);
3273 static int ehea_reboot_notifier(struct notifier_block *nb,
3274 unsigned long action, void *unused)
3276 if (action == SYS_RESTART) {
3277 ehea_info("Reboot: freeing all eHEA resources");
3278 ibmebus_unregister_driver(&ehea_driver);
3283 static struct notifier_block ehea_reboot_nb = {
3284 .notifier_call = ehea_reboot_notifier,
3287 static int check_module_parm(void)
3291 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3292 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3293 ehea_info("Bad parameter: rq1_entries");
3296 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3297 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3298 ehea_info("Bad parameter: rq2_entries");
3301 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3302 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3303 ehea_info("Bad parameter: rq3_entries");
3306 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3307 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3308 ehea_info("Bad parameter: sq_entries");
3315 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3318 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3321 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3322 ehea_show_capabilities, NULL);
3324 int __init ehea_module_init(void)
3328 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
3332 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3333 sema_init(&dlpar_mem_lock, 1);
3335 ret = check_module_parm();
3339 ret = ehea_create_busmap();
3343 register_reboot_notifier(&ehea_reboot_nb);
3345 ret = ibmebus_register_driver(&ehea_driver);
3347 ehea_error("failed registering eHEA device driver on ebus");
3351 ret = driver_create_file(&ehea_driver.driver,
3352 &driver_attr_capabilities);
3354 ehea_error("failed to register capabilities attribute, ret=%d",
3356 unregister_reboot_notifier(&ehea_reboot_nb);
3357 ibmebus_unregister_driver(&ehea_driver);
3365 static void __exit ehea_module_exit(void)
3367 flush_scheduled_work();
3368 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3369 ibmebus_unregister_driver(&ehea_driver);
3370 unregister_reboot_notifier(&ehea_reboot_nb);
3371 ehea_destroy_busmap();
3374 module_init(ehea_module_init);
3375 module_exit(ehea_module_exit);