[S390] Fix smsgiucv init on no iucv machines
[linux-2.6] / drivers / s390 / net / qeth_eddp.c
1 /*
2  * linux/drivers/s390/net/qeth_eddp.c
3  *
4  * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
5  *
6  * Copyright 2004 IBM Corporation
7  *
8  *    Author(s): Thomas Spatzier <tspat@de.ibm.com>
9  *
10  */
11 #include <linux/errno.h>
12 #include <linux/ip.h>
13 #include <linux/inetdevice.h>
14 #include <linux/netdevice.h>
15 #include <linux/kernel.h>
16 #include <linux/tcp.h>
17 #include <net/tcp.h>
18 #include <linux/skbuff.h>
19
20 #include <net/ip.h>
21
22 #include "qeth.h"
23 #include "qeth_mpc.h"
24 #include "qeth_eddp.h"
25
26 int
27 qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
28                                     struct qeth_eddp_context *ctx)
29 {
30         int index = queue->next_buf_to_fill;
31         int elements_needed = ctx->num_elements;
32         int elements_in_buffer;
33         int skbs_in_buffer;
34         int buffers_needed = 0;
35
36         QETH_DBF_TEXT(trace, 5, "eddpcbfc");
37         while(elements_needed > 0) {
38                 buffers_needed++;
39                 if (atomic_read(&queue->bufs[index].state) !=
40                                 QETH_QDIO_BUF_EMPTY)
41                         return -EBUSY;
42
43                 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
44                                      queue->bufs[index].next_element_to_fill;
45                 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
46                 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
47                 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
48         }
49         return buffers_needed;
50 }
51
52 static void
53 qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54 {
55         int i;
56
57         QETH_DBF_TEXT(trace, 5, "eddpfctx");
58         for (i = 0; i < ctx->num_pages; ++i)
59                 free_page((unsigned long)ctx->pages[i]);
60         kfree(ctx->pages);
61         kfree(ctx->elements);
62         kfree(ctx);
63 }
64
65
66 static inline void
67 qeth_eddp_get_context(struct qeth_eddp_context *ctx)
68 {
69         atomic_inc(&ctx->refcnt);
70 }
71
72 void
73 qeth_eddp_put_context(struct qeth_eddp_context *ctx)
74 {
75         if (atomic_dec_return(&ctx->refcnt) == 0)
76                 qeth_eddp_free_context(ctx);
77 }
78
79 void
80 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
81 {
82         struct qeth_eddp_context_reference *ref;
83
84         QETH_DBF_TEXT(trace, 6, "eddprctx");
85         while (!list_empty(&buf->ctx_list)){
86                 ref = list_entry(buf->ctx_list.next,
87                                  struct qeth_eddp_context_reference, list);
88                 qeth_eddp_put_context(ref->ctx);
89                 list_del(&ref->list);
90                 kfree(ref);
91         }
92 }
93
94 static int
95 qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
96                           struct qeth_eddp_context *ctx)
97 {
98         struct qeth_eddp_context_reference *ref;
99
100         QETH_DBF_TEXT(trace, 6, "eddprfcx");
101         ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
102         if (ref == NULL)
103                 return -ENOMEM;
104         qeth_eddp_get_context(ctx);
105         ref->ctx = ctx;
106         list_add_tail(&ref->list, &buf->ctx_list);
107         return 0;
108 }
109
110 int
111 qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
112                       struct qeth_eddp_context *ctx,
113                       int index)
114 {
115         struct qeth_qdio_out_buffer *buf = NULL;
116         struct qdio_buffer *buffer;
117         int elements = ctx->num_elements;
118         int element = 0;
119         int flush_cnt = 0;
120         int must_refcnt = 1;
121         int i;
122
123         QETH_DBF_TEXT(trace, 5, "eddpfibu");
124         while (elements > 0) {
125                 buf = &queue->bufs[index];
126                 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
127                         /* normally this should not happen since we checked for
128                          * available elements in qeth_check_elements_for_context
129                          */
130                         if (element == 0)
131                                 return -EBUSY;
132                         else {
133                                 PRINT_WARN("could only partially fill eddp "
134                                            "buffer!\n");
135                                 goto out;
136                         }
137                 }
138                 /* check if the whole next skb fits into current buffer */
139                 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
140                                         buf->next_element_to_fill)
141                                 < ctx->elements_per_skb){
142                         /* no -> go to next buffer */
143                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
144                         index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
145                         flush_cnt++;
146                         /* new buffer, so we have to add ctx to buffer'ctx_list
147                          * and increment ctx's refcnt */
148                         must_refcnt = 1;
149                         continue;
150                 }
151                 if (must_refcnt){
152                         must_refcnt = 0;
153                         if (qeth_eddp_buf_ref_context(buf, ctx)){
154                                 PRINT_WARN("no memory to create eddp context "
155                                            "reference\n");
156                                 goto out_check;
157                         }
158                 }
159                 buffer = buf->buffer;
160                 /* fill one skb into buffer */
161                 for (i = 0; i < ctx->elements_per_skb; ++i){
162                         if (ctx->elements[element].length != 0) {
163                                 buffer->element[buf->next_element_to_fill].
164                                 addr = ctx->elements[element].addr;
165                                 buffer->element[buf->next_element_to_fill].
166                                 length = ctx->elements[element].length;
167                                 buffer->element[buf->next_element_to_fill].
168                                 flags = ctx->elements[element].flags;
169                                 buf->next_element_to_fill++;
170                         }
171                         element++;
172                         elements--;
173                 }
174         }
175 out_check:
176         if (!queue->do_pack) {
177                 QETH_DBF_TEXT(trace, 6, "fillbfnp");
178                 /* set state to PRIMED -> will be flushed */
179                 if (buf->next_element_to_fill > 0){
180                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
181                         flush_cnt++;
182                 }
183         } else {
184                 if (queue->card->options.performance_stats)
185                         queue->card->perf_stats.skbs_sent_pack++;
186                 QETH_DBF_TEXT(trace, 6, "fillbfpa");
187                 if (buf->next_element_to_fill >=
188                                 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
189                         /*
190                          * packed buffer if full -> set state PRIMED
191                          * -> will be flushed
192                          */
193                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
194                         flush_cnt++;
195                 }
196         }
197 out:
198         return flush_cnt;
199 }
200
201 static void
202 qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
203                               struct qeth_eddp_data *eddp, int data_len)
204 {
205         u8 *page;
206         int page_remainder;
207         int page_offset;
208         int pkt_len;
209         struct qeth_eddp_element *element;
210
211         QETH_DBF_TEXT(trace, 5, "eddpcrsh");
212         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
213         page_offset = ctx->offset % PAGE_SIZE;
214         element = &ctx->elements[ctx->num_elements];
215         pkt_len = eddp->nhl + eddp->thl + data_len;
216         /* FIXME: layer2 and VLAN !!! */
217         if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
218                 pkt_len += ETH_HLEN;
219         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
220                 pkt_len += VLAN_HLEN;
221         /* does complete packet fit in current page ? */
222         page_remainder = PAGE_SIZE - page_offset;
223         if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
224                 /* no -> go to start of next page */
225                 ctx->offset += page_remainder;
226                 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
227                 page_offset = 0;
228         }
229         memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
230         element->addr = page + page_offset;
231         element->length = sizeof(struct qeth_hdr);
232         ctx->offset += sizeof(struct qeth_hdr);
233         page_offset += sizeof(struct qeth_hdr);
234         /* add mac header (?) */
235         if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
236                 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
237                 element->length += ETH_HLEN;
238                 ctx->offset += ETH_HLEN;
239                 page_offset += ETH_HLEN;
240         }
241         /* add VLAN tag */
242         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
243                 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
244                 element->length += VLAN_HLEN;
245                 ctx->offset += VLAN_HLEN;
246                 page_offset += VLAN_HLEN;
247         }
248         /* add network header */
249         memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
250         element->length += eddp->nhl;
251         eddp->nh_in_ctx = page + page_offset;
252         ctx->offset += eddp->nhl;
253         page_offset += eddp->nhl;
254         /* add transport header */
255         memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
256         element->length += eddp->thl;
257         eddp->th_in_ctx = page + page_offset;
258         ctx->offset += eddp->thl;
259 }
260
261 static void
262 qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
263                         __wsum *hcsum)
264 {
265         struct skb_frag_struct *frag;
266         int left_in_frag;
267         int copy_len;
268         u8 *src;
269
270         QETH_DBF_TEXT(trace, 5, "eddpcdtc");
271         if (skb_shinfo(eddp->skb)->nr_frags == 0) {
272                 skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
273                                                  dst, len);
274                 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
275                                       *hcsum);
276                 eddp->skb_offset += len;
277         } else {
278                 while (len > 0) {
279                         if (eddp->frag < 0) {
280                                 /* we're in skb->data */
281                                 left_in_frag = (eddp->skb->len - eddp->skb->data_len)
282                                                 - eddp->skb_offset;
283                                 src = eddp->skb->data + eddp->skb_offset;
284                         } else {
285                                 frag = &skb_shinfo(eddp->skb)->
286                                         frags[eddp->frag];
287                                 left_in_frag = frag->size - eddp->frag_offset;
288                                 src = (u8 *)(
289                                         (page_to_pfn(frag->page) << PAGE_SHIFT)+
290                                         frag->page_offset + eddp->frag_offset);
291                         }
292                         if (left_in_frag <= 0) {
293                                 eddp->frag++;
294                                 eddp->frag_offset = 0;
295                                 continue;
296                         }
297                         copy_len = min(left_in_frag, len);
298                         memcpy(dst, src, copy_len);
299                         *hcsum = csum_partial(src, copy_len, *hcsum);
300                         dst += copy_len;
301                         eddp->frag_offset += copy_len;
302                         eddp->skb_offset += copy_len;
303                         len -= copy_len;
304                 }
305         }
306 }
307
308 static void
309 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
310                                   struct qeth_eddp_data *eddp, int data_len,
311                                   __wsum hcsum)
312 {
313         u8 *page;
314         int page_remainder;
315         int page_offset;
316         struct qeth_eddp_element *element;
317         int first_lap = 1;
318
319         QETH_DBF_TEXT(trace, 5, "eddpcsdt");
320         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
321         page_offset = ctx->offset % PAGE_SIZE;
322         element = &ctx->elements[ctx->num_elements];
323         while (data_len){
324                 page_remainder = PAGE_SIZE - page_offset;
325                 if (page_remainder < data_len){
326                         qeth_eddp_copy_data_tcp(page + page_offset, eddp,
327                                                 page_remainder, &hcsum);
328                         element->length += page_remainder;
329                         if (first_lap)
330                                 element->flags = SBAL_FLAGS_FIRST_FRAG;
331                         else
332                                 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
333                         ctx->num_elements++;
334                         element++;
335                         data_len -= page_remainder;
336                         ctx->offset += page_remainder;
337                         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
338                         page_offset = 0;
339                         element->addr = page + page_offset;
340                 } else {
341                         qeth_eddp_copy_data_tcp(page + page_offset, eddp,
342                                                 data_len, &hcsum);
343                         element->length += data_len;
344                         if (!first_lap)
345                                 element->flags = SBAL_FLAGS_LAST_FRAG;
346                         ctx->num_elements++;
347                         ctx->offset += data_len;
348                         data_len = 0;
349                 }
350                 first_lap = 0;
351         }
352         ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
353 }
354
355 static __wsum
356 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
357 {
358         __wsum phcsum; /* pseudo header checksum */
359
360         QETH_DBF_TEXT(trace, 5, "eddpckt4");
361         eddp->th.tcp.h.check = 0;
362         /* compute pseudo header checksum */
363         phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
364                                     eddp->thl + data_len, IPPROTO_TCP, 0);
365         /* compute checksum of tcp header */
366         return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
367 }
368
369 static __wsum
370 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
371 {
372         __be32 proto;
373         __wsum phcsum; /* pseudo header checksum */
374
375         QETH_DBF_TEXT(trace, 5, "eddpckt6");
376         eddp->th.tcp.h.check = 0;
377         /* compute pseudo header checksum */
378         phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
379                               sizeof(struct in6_addr), 0);
380         phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
381                               sizeof(struct in6_addr), phcsum);
382         proto = htonl(IPPROTO_TCP);
383         phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
384         return phcsum;
385 }
386
387 static struct qeth_eddp_data *
388 qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
389 {
390         struct qeth_eddp_data *eddp;
391
392         QETH_DBF_TEXT(trace, 5, "eddpcrda");
393         eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
394         if (eddp){
395                 eddp->nhl = nhl;
396                 eddp->thl = thl;
397                 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
398                 memcpy(&eddp->nh, nh, nhl);
399                 memcpy(&eddp->th, th, thl);
400                 eddp->frag = -1; /* initially we're in skb->data */
401         }
402         return eddp;
403 }
404
405 static void
406 __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
407                              struct qeth_eddp_data *eddp)
408 {
409         struct tcphdr *tcph;
410         int data_len;
411         __wsum hcsum;
412
413         QETH_DBF_TEXT(trace, 5, "eddpftcp");
414         eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
415        if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
416                eddp->skb_offset += sizeof(struct ethhdr);
417 #ifdef CONFIG_QETH_VLAN
418                if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
419                        eddp->skb_offset += VLAN_HLEN;
420 #endif /* CONFIG_QETH_VLAN */
421        }
422         tcph = tcp_hdr(eddp->skb);
423         while (eddp->skb_offset < eddp->skb->len) {
424                 data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
425                                (int)(eddp->skb->len - eddp->skb_offset));
426                 /* prepare qdio hdr */
427                 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
428                         eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
429                                                      eddp->nhl + eddp->thl;
430 #ifdef CONFIG_QETH_VLAN
431                         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
432                                 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
433 #endif /* CONFIG_QETH_VLAN */
434                 } else
435                         eddp->qh.hdr.l3.length = data_len + eddp->nhl +
436                                                  eddp->thl;
437                 /* prepare ip hdr */
438                 if (eddp->skb->protocol == htons(ETH_P_IP)){
439                         eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
440                                                  eddp->thl);
441                         eddp->nh.ip4.h.check = 0;
442                         eddp->nh.ip4.h.check =
443                                 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
444                                                 eddp->nh.ip4.h.ihl);
445                 } else
446                         eddp->nh.ip6.h.payload_len = htons(data_len + eddp->thl);
447                 /* prepare tcp hdr */
448                 if (data_len == (eddp->skb->len - eddp->skb_offset)){
449                         /* last segment -> set FIN and PSH flags */
450                         eddp->th.tcp.h.fin = tcph->fin;
451                         eddp->th.tcp.h.psh = tcph->psh;
452                 }
453                 if (eddp->skb->protocol == htons(ETH_P_IP))
454                         hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
455                 else
456                         hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
457                 /* fill the next segment into the context */
458                 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
459                 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
460                 if (eddp->skb_offset >= eddp->skb->len)
461                         break;
462                 /* prepare headers for next round */
463                 if (eddp->skb->protocol == htons(ETH_P_IP))
464                         eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
465                 eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) + data_len);
466         }
467 }
468
469 static int
470 qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
471                            struct sk_buff *skb, struct qeth_hdr *qhdr)
472 {
473         struct qeth_eddp_data *eddp = NULL;
474
475         QETH_DBF_TEXT(trace, 5, "eddpficx");
476         /* create our segmentation headers and copy original headers */
477         if (skb->protocol == htons(ETH_P_IP))
478                 eddp = qeth_eddp_create_eddp_data(qhdr,
479                                                   skb_network_header(skb),
480                                                   ip_hdrlen(skb),
481                                                   skb_transport_header(skb),
482                                                   tcp_hdrlen(skb));
483         else
484                 eddp = qeth_eddp_create_eddp_data(qhdr,
485                                                   skb_network_header(skb),
486                                                   sizeof(struct ipv6hdr),
487                                                   skb_transport_header(skb),
488                                                   tcp_hdrlen(skb));
489
490         if (eddp == NULL) {
491                 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
492                 return -ENOMEM;
493         }
494         if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
495                 skb_set_mac_header(skb, sizeof(struct qeth_hdr));
496                 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
497 #ifdef CONFIG_QETH_VLAN
498                 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
499                         eddp->vlan[0] = skb->protocol;
500                         eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
501                 }
502 #endif /* CONFIG_QETH_VLAN */
503         }
504         /* the next flags will only be set on the last segment */
505         eddp->th.tcp.h.fin = 0;
506         eddp->th.tcp.h.psh = 0;
507         eddp->skb = skb;
508         /* begin segmentation and fill context */
509         __qeth_eddp_fill_context_tcp(ctx, eddp);
510         kfree(eddp);
511         return 0;
512 }
513
514 static void
515 qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
516                          int hdr_len)
517 {
518         int skbs_per_page;
519
520         QETH_DBF_TEXT(trace, 5, "eddpcanp");
521         /* can we put multiple skbs in one page? */
522         skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
523         if (skbs_per_page > 1){
524                 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
525                                  skbs_per_page + 1;
526                 ctx->elements_per_skb = 1;
527         } else {
528                 /* no -> how many elements per skb? */
529                 ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
530                                      PAGE_SIZE) >> PAGE_SHIFT;
531                 ctx->num_pages = ctx->elements_per_skb *
532                                  (skb_shinfo(skb)->gso_segs + 1);
533         }
534         ctx->num_elements = ctx->elements_per_skb *
535                             (skb_shinfo(skb)->gso_segs + 1);
536 }
537
538 static struct qeth_eddp_context *
539 qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
540                                  int hdr_len)
541 {
542         struct qeth_eddp_context *ctx = NULL;
543         u8 *addr;
544         int i;
545
546         QETH_DBF_TEXT(trace, 5, "creddpcg");
547         /* create the context and allocate pages */
548         ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
549         if (ctx == NULL){
550                 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
551                 return NULL;
552         }
553         ctx->type = QETH_LARGE_SEND_EDDP;
554         qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
555         if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
556                 QETH_DBF_TEXT(trace, 2, "ceddpcis");
557                 kfree(ctx);
558                 return NULL;
559         }
560         ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
561         if (ctx->pages == NULL){
562                 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
563                 kfree(ctx);
564                 return NULL;
565         }
566         for (i = 0; i < ctx->num_pages; ++i){
567                 addr = (u8 *)__get_free_page(GFP_ATOMIC);
568                 if (addr == NULL){
569                         QETH_DBF_TEXT(trace, 2, "ceddpcn3");
570                         ctx->num_pages = i;
571                         qeth_eddp_free_context(ctx);
572                         return NULL;
573                 }
574                 memset(addr, 0, PAGE_SIZE);
575                 ctx->pages[i] = addr;
576         }
577         ctx->elements = kcalloc(ctx->num_elements,
578                                 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
579         if (ctx->elements == NULL){
580                 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
581                 qeth_eddp_free_context(ctx);
582                 return NULL;
583         }
584         /* reset num_elements; will be incremented again in fill_buffer to
585          * reflect number of actually used elements */
586         ctx->num_elements = 0;
587         return ctx;
588 }
589
590 static struct qeth_eddp_context *
591 qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
592                              struct qeth_hdr *qhdr)
593 {
594         struct qeth_eddp_context *ctx = NULL;
595
596         QETH_DBF_TEXT(trace, 5, "creddpct");
597         if (skb->protocol == htons(ETH_P_IP))
598                 ctx = qeth_eddp_create_context_generic(card, skb,
599                                                        (sizeof(struct qeth_hdr) +
600                                                         ip_hdrlen(skb) +
601                                                         tcp_hdrlen(skb)));
602         else if (skb->protocol == htons(ETH_P_IPV6))
603                 ctx = qeth_eddp_create_context_generic(card, skb,
604                         sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
605                         tcp_hdrlen(skb));
606         else
607                 QETH_DBF_TEXT(trace, 2, "cetcpinv");
608
609         if (ctx == NULL) {
610                 QETH_DBF_TEXT(trace, 2, "creddpnl");
611                 return NULL;
612         }
613         if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
614                 QETH_DBF_TEXT(trace, 2, "ceddptfe");
615                 qeth_eddp_free_context(ctx);
616                 return NULL;
617         }
618         atomic_set(&ctx->refcnt, 1);
619         return ctx;
620 }
621
622 struct qeth_eddp_context *
623 qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
624                          struct qeth_hdr *qhdr, unsigned char sk_protocol)
625 {
626         QETH_DBF_TEXT(trace, 5, "creddpc");
627         switch (sk_protocol) {
628         case IPPROTO_TCP:
629                 return qeth_eddp_create_context_tcp(card, skb, qhdr);
630         default:
631                 QETH_DBF_TEXT(trace, 2, "eddpinvp");
632         }
633         return NULL;
634 }