Merge ARM fixes
[linux-2.6] / drivers / s390 / net / qeth_eddp.c
1 /*
2  * linux/drivers/s390/net/qeth_eddp.c
3  *
4  * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
5  *
6  * Copyright 2004 IBM Corporation
7  *
8  *    Author(s): Thomas Spatzier <tspat@de.ibm.com>
9  *
10  */
11 #include <linux/errno.h>
12 #include <linux/ip.h>
13 #include <linux/inetdevice.h>
14 #include <linux/netdevice.h>
15 #include <linux/kernel.h>
16 #include <linux/tcp.h>
17 #include <net/tcp.h>
18 #include <linux/skbuff.h>
19
20 #include <net/ip.h>
21
22 #include "qeth.h"
23 #include "qeth_mpc.h"
24 #include "qeth_eddp.h"
25
26 int
27 qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
28                                     struct qeth_eddp_context *ctx)
29 {
30         int index = queue->next_buf_to_fill;
31         int elements_needed = ctx->num_elements;
32         int elements_in_buffer;
33         int skbs_in_buffer;
34         int buffers_needed = 0;
35
36         QETH_DBF_TEXT(trace, 5, "eddpcbfc");
37         while(elements_needed > 0) {
38                 buffers_needed++;
39                 if (atomic_read(&queue->bufs[index].state) !=
40                                 QETH_QDIO_BUF_EMPTY)
41                         return -EBUSY;
42
43                 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
44                                      queue->bufs[index].next_element_to_fill;
45                 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
46                 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
47                 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
48         }
49         return buffers_needed;
50 }
51
52 static void
53 qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54 {
55         int i;
56
57         QETH_DBF_TEXT(trace, 5, "eddpfctx");
58         for (i = 0; i < ctx->num_pages; ++i)
59                 free_page((unsigned long)ctx->pages[i]);
60         kfree(ctx->pages);
61         kfree(ctx->elements);
62         kfree(ctx);
63 }
64
65
66 static inline void
67 qeth_eddp_get_context(struct qeth_eddp_context *ctx)
68 {
69         atomic_inc(&ctx->refcnt);
70 }
71
72 void
73 qeth_eddp_put_context(struct qeth_eddp_context *ctx)
74 {
75         if (atomic_dec_return(&ctx->refcnt) == 0)
76                 qeth_eddp_free_context(ctx);
77 }
78
79 void
80 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
81 {
82         struct qeth_eddp_context_reference *ref;
83
84         QETH_DBF_TEXT(trace, 6, "eddprctx");
85         while (!list_empty(&buf->ctx_list)){
86                 ref = list_entry(buf->ctx_list.next,
87                                  struct qeth_eddp_context_reference, list);
88                 qeth_eddp_put_context(ref->ctx);
89                 list_del(&ref->list);
90                 kfree(ref);
91         }
92 }
93
94 static int
95 qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
96                           struct qeth_eddp_context *ctx)
97 {
98         struct qeth_eddp_context_reference *ref;
99
100         QETH_DBF_TEXT(trace, 6, "eddprfcx");
101         ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
102         if (ref == NULL)
103                 return -ENOMEM;
104         qeth_eddp_get_context(ctx);
105         ref->ctx = ctx;
106         list_add_tail(&ref->list, &buf->ctx_list);
107         return 0;
108 }
109
110 int
111 qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
112                       struct qeth_eddp_context *ctx,
113                       int index)
114 {
115         struct qeth_qdio_out_buffer *buf = NULL;
116         struct qdio_buffer *buffer;
117         int elements = ctx->num_elements;
118         int element = 0;
119         int flush_cnt = 0;
120         int must_refcnt = 1;
121         int i;
122
123         QETH_DBF_TEXT(trace, 5, "eddpfibu");
124         while (elements > 0) {
125                 buf = &queue->bufs[index];
126                 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
127                         /* normally this should not happen since we checked for
128                          * available elements in qeth_check_elements_for_context
129                          */
130                         if (element == 0)
131                                 return -EBUSY;
132                         else {
133                                 PRINT_WARN("could only partially fill eddp "
134                                            "buffer!\n");
135                                 goto out;
136                         }
137                 }
138                 /* check if the whole next skb fits into current buffer */
139                 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
140                                         buf->next_element_to_fill)
141                                 < ctx->elements_per_skb){
142                         /* no -> go to next buffer */
143                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
144                         index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
145                         flush_cnt++;
146                         /* new buffer, so we have to add ctx to buffer'ctx_list
147                          * and increment ctx's refcnt */
148                         must_refcnt = 1;
149                         continue;
150                 }
151                 if (must_refcnt){
152                         must_refcnt = 0;
153                         if (qeth_eddp_buf_ref_context(buf, ctx)){
154                                 PRINT_WARN("no memory to create eddp context "
155                                            "reference\n");
156                                 goto out_check;
157                         }
158                 }
159                 buffer = buf->buffer;
160                 /* fill one skb into buffer */
161                 for (i = 0; i < ctx->elements_per_skb; ++i){
162                         buffer->element[buf->next_element_to_fill].addr =
163                                 ctx->elements[element].addr;
164                         buffer->element[buf->next_element_to_fill].length =
165                                 ctx->elements[element].length;
166                         buffer->element[buf->next_element_to_fill].flags =
167                                 ctx->elements[element].flags;
168                         buf->next_element_to_fill++;
169                         element++;
170                         elements--;
171                 }
172         }
173 out_check:
174         if (!queue->do_pack) {
175                 QETH_DBF_TEXT(trace, 6, "fillbfnp");
176                 /* set state to PRIMED -> will be flushed */
177                 if (buf->next_element_to_fill > 0){
178                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
179                         flush_cnt++;
180                 }
181         } else {
182                 if (queue->card->options.performance_stats)
183                         queue->card->perf_stats.skbs_sent_pack++;
184                 QETH_DBF_TEXT(trace, 6, "fillbfpa");
185                 if (buf->next_element_to_fill >=
186                                 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
187                         /*
188                          * packed buffer if full -> set state PRIMED
189                          * -> will be flushed
190                          */
191                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
192                         flush_cnt++;
193                 }
194         }
195 out:
196         return flush_cnt;
197 }
198
199 static void
200 qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
201                               struct qeth_eddp_data *eddp, int data_len)
202 {
203         u8 *page;
204         int page_remainder;
205         int page_offset;
206         int pkt_len;
207         struct qeth_eddp_element *element;
208
209         QETH_DBF_TEXT(trace, 5, "eddpcrsh");
210         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
211         page_offset = ctx->offset % PAGE_SIZE;
212         element = &ctx->elements[ctx->num_elements];
213         pkt_len = eddp->nhl + eddp->thl + data_len;
214         /* FIXME: layer2 and VLAN !!! */
215         if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
216                 pkt_len += ETH_HLEN;
217         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
218                 pkt_len += VLAN_HLEN;
219         /* does complete packet fit in current page ? */
220         page_remainder = PAGE_SIZE - page_offset;
221         if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
222                 /* no -> go to start of next page */
223                 ctx->offset += page_remainder;
224                 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
225                 page_offset = 0;
226         }
227         memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
228         element->addr = page + page_offset;
229         element->length = sizeof(struct qeth_hdr);
230         ctx->offset += sizeof(struct qeth_hdr);
231         page_offset += sizeof(struct qeth_hdr);
232         /* add mac header (?) */
233         if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
234                 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
235                 element->length += ETH_HLEN;
236                 ctx->offset += ETH_HLEN;
237                 page_offset += ETH_HLEN;
238         }
239         /* add VLAN tag */
240         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
241                 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
242                 element->length += VLAN_HLEN;
243                 ctx->offset += VLAN_HLEN;
244                 page_offset += VLAN_HLEN;
245         }
246         /* add network header */
247         memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
248         element->length += eddp->nhl;
249         eddp->nh_in_ctx = page + page_offset;
250         ctx->offset += eddp->nhl;
251         page_offset += eddp->nhl;
252         /* add transport header */
253         memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
254         element->length += eddp->thl;
255         eddp->th_in_ctx = page + page_offset;
256         ctx->offset += eddp->thl;
257 }
258
259 static void
260 qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
261                         __wsum *hcsum)
262 {
263         struct skb_frag_struct *frag;
264         int left_in_frag;
265         int copy_len;
266         u8 *src;
267
268         QETH_DBF_TEXT(trace, 5, "eddpcdtc");
269         if (skb_shinfo(eddp->skb)->nr_frags == 0) {
270                 memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
271                 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
272                                       *hcsum);
273                 eddp->skb_offset += len;
274         } else {
275                 while (len > 0) {
276                         if (eddp->frag < 0) {
277                                 /* we're in skb->data */
278                                 left_in_frag = (eddp->skb->len - eddp->skb->data_len)
279                                                 - eddp->skb_offset;
280                                 src = eddp->skb->data + eddp->skb_offset;
281                         } else {
282                                 frag = &skb_shinfo(eddp->skb)->
283                                         frags[eddp->frag];
284                                 left_in_frag = frag->size - eddp->frag_offset;
285                                 src = (u8 *)(
286                                         (page_to_pfn(frag->page) << PAGE_SHIFT)+
287                                         frag->page_offset + eddp->frag_offset);
288                         }
289                         if (left_in_frag <= 0) {
290                                 eddp->frag++;
291                                 eddp->frag_offset = 0;
292                                 continue;
293                         }
294                         copy_len = min(left_in_frag, len);
295                         memcpy(dst, src, copy_len);
296                         *hcsum = csum_partial(src, copy_len, *hcsum);
297                         dst += copy_len;
298                         eddp->frag_offset += copy_len;
299                         eddp->skb_offset += copy_len;
300                         len -= copy_len;
301                 }
302         }
303 }
304
305 static void
306 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
307                                   struct qeth_eddp_data *eddp, int data_len,
308                                   __wsum hcsum)
309 {
310         u8 *page;
311         int page_remainder;
312         int page_offset;
313         struct qeth_eddp_element *element;
314         int first_lap = 1;
315
316         QETH_DBF_TEXT(trace, 5, "eddpcsdt");
317         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
318         page_offset = ctx->offset % PAGE_SIZE;
319         element = &ctx->elements[ctx->num_elements];
320         while (data_len){
321                 page_remainder = PAGE_SIZE - page_offset;
322                 if (page_remainder < data_len){
323                         qeth_eddp_copy_data_tcp(page + page_offset, eddp,
324                                                 page_remainder, &hcsum);
325                         element->length += page_remainder;
326                         if (first_lap)
327                                 element->flags = SBAL_FLAGS_FIRST_FRAG;
328                         else
329                                 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
330                         ctx->num_elements++;
331                         element++;
332                         data_len -= page_remainder;
333                         ctx->offset += page_remainder;
334                         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
335                         page_offset = 0;
336                         element->addr = page + page_offset;
337                 } else {
338                         qeth_eddp_copy_data_tcp(page + page_offset, eddp,
339                                                 data_len, &hcsum);
340                         element->length += data_len;
341                         if (!first_lap)
342                                 element->flags = SBAL_FLAGS_LAST_FRAG;
343                         ctx->num_elements++;
344                         ctx->offset += data_len;
345                         data_len = 0;
346                 }
347                 first_lap = 0;
348         }
349         ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
350 }
351
352 static __wsum
353 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
354 {
355         __wsum phcsum; /* pseudo header checksum */
356
357         QETH_DBF_TEXT(trace, 5, "eddpckt4");
358         eddp->th.tcp.h.check = 0;
359         /* compute pseudo header checksum */
360         phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
361                                     eddp->thl + data_len, IPPROTO_TCP, 0);
362         /* compute checksum of tcp header */
363         return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
364 }
365
366 static __wsum
367 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
368 {
369         __be32 proto;
370         __wsum phcsum; /* pseudo header checksum */
371
372         QETH_DBF_TEXT(trace, 5, "eddpckt6");
373         eddp->th.tcp.h.check = 0;
374         /* compute pseudo header checksum */
375         phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
376                               sizeof(struct in6_addr), 0);
377         phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
378                               sizeof(struct in6_addr), phcsum);
379         proto = htonl(IPPROTO_TCP);
380         phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
381         return phcsum;
382 }
383
384 static struct qeth_eddp_data *
385 qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
386 {
387         struct qeth_eddp_data *eddp;
388
389         QETH_DBF_TEXT(trace, 5, "eddpcrda");
390         eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
391         if (eddp){
392                 eddp->nhl = nhl;
393                 eddp->thl = thl;
394                 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
395                 memcpy(&eddp->nh, nh, nhl);
396                 memcpy(&eddp->th, th, thl);
397                 eddp->frag = -1; /* initially we're in skb->data */
398         }
399         return eddp;
400 }
401
402 static void
403 __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
404                              struct qeth_eddp_data *eddp)
405 {
406         struct tcphdr *tcph;
407         int data_len;
408         __wsum hcsum;
409
410         QETH_DBF_TEXT(trace, 5, "eddpftcp");
411         eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
412        if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
413                eddp->skb_offset += sizeof(struct ethhdr);
414 #ifdef CONFIG_QETH_VLAN
415                if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
416                        eddp->skb_offset += VLAN_HLEN;
417 #endif /* CONFIG_QETH_VLAN */
418        }
419         tcph = eddp->skb->h.th;
420         while (eddp->skb_offset < eddp->skb->len) {
421                 data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
422                                (int)(eddp->skb->len - eddp->skb_offset));
423                 /* prepare qdio hdr */
424                 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
425                         eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
426                                                      eddp->nhl + eddp->thl -
427                                                      sizeof(struct qeth_hdr);
428 #ifdef CONFIG_QETH_VLAN
429                         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
430                                 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
431 #endif /* CONFIG_QETH_VLAN */
432                 } else
433                         eddp->qh.hdr.l3.length = data_len + eddp->nhl +
434                                                  eddp->thl;
435                 /* prepare ip hdr */
436                 if (eddp->skb->protocol == htons(ETH_P_IP)){
437                         eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
438                                                  eddp->thl);
439                         eddp->nh.ip4.h.check = 0;
440                         eddp->nh.ip4.h.check =
441                                 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
442                                                 eddp->nh.ip4.h.ihl);
443                 } else
444                         eddp->nh.ip6.h.payload_len = htons(data_len + eddp->thl);
445                 /* prepare tcp hdr */
446                 if (data_len == (eddp->skb->len - eddp->skb_offset)){
447                         /* last segment -> set FIN and PSH flags */
448                         eddp->th.tcp.h.fin = tcph->fin;
449                         eddp->th.tcp.h.psh = tcph->psh;
450                 }
451                 if (eddp->skb->protocol == htons(ETH_P_IP))
452                         hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
453                 else
454                         hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
455                 /* fill the next segment into the context */
456                 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
457                 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
458                 if (eddp->skb_offset >= eddp->skb->len)
459                         break;
460                 /* prepare headers for next round */
461                 if (eddp->skb->protocol == htons(ETH_P_IP))
462                         eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
463                 eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) + data_len);
464         }
465 }
466
467 static int
468 qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
469                            struct sk_buff *skb, struct qeth_hdr *qhdr)
470 {
471         struct qeth_eddp_data *eddp = NULL;
472
473         QETH_DBF_TEXT(trace, 5, "eddpficx");
474         /* create our segmentation headers and copy original headers */
475         if (skb->protocol == htons(ETH_P_IP))
476                 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
477                                 skb->nh.iph->ihl*4,
478                                 (u8 *)skb->h.th, skb->h.th->doff*4);
479         else
480                 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
481                                 sizeof(struct ipv6hdr),
482                                 (u8 *)skb->h.th, skb->h.th->doff*4);
483
484         if (eddp == NULL) {
485                 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
486                 return -ENOMEM;
487         }
488         if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
489                 skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr);
490                 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
491 #ifdef CONFIG_QETH_VLAN
492                 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
493                         eddp->vlan[0] = skb->protocol;
494                         eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
495                 }
496 #endif /* CONFIG_QETH_VLAN */
497         }
498         /* the next flags will only be set on the last segment */
499         eddp->th.tcp.h.fin = 0;
500         eddp->th.tcp.h.psh = 0;
501         eddp->skb = skb;
502         /* begin segmentation and fill context */
503         __qeth_eddp_fill_context_tcp(ctx, eddp);
504         kfree(eddp);
505         return 0;
506 }
507
508 static void
509 qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
510                          int hdr_len)
511 {
512         int skbs_per_page;
513
514         QETH_DBF_TEXT(trace, 5, "eddpcanp");
515         /* can we put multiple skbs in one page? */
516         skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
517         if (skbs_per_page > 1){
518                 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
519                                  skbs_per_page + 1;
520                 ctx->elements_per_skb = 1;
521         } else {
522                 /* no -> how many elements per skb? */
523                 ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
524                                      PAGE_SIZE) >> PAGE_SHIFT;
525                 ctx->num_pages = ctx->elements_per_skb *
526                                  (skb_shinfo(skb)->gso_segs + 1);
527         }
528         ctx->num_elements = ctx->elements_per_skb *
529                             (skb_shinfo(skb)->gso_segs + 1);
530 }
531
532 static struct qeth_eddp_context *
533 qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
534                                  int hdr_len)
535 {
536         struct qeth_eddp_context *ctx = NULL;
537         u8 *addr;
538         int i;
539
540         QETH_DBF_TEXT(trace, 5, "creddpcg");
541         /* create the context and allocate pages */
542         ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
543         if (ctx == NULL){
544                 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
545                 return NULL;
546         }
547         ctx->type = QETH_LARGE_SEND_EDDP;
548         qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
549         if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
550                 QETH_DBF_TEXT(trace, 2, "ceddpcis");
551                 kfree(ctx);
552                 return NULL;
553         }
554         ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
555         if (ctx->pages == NULL){
556                 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
557                 kfree(ctx);
558                 return NULL;
559         }
560         for (i = 0; i < ctx->num_pages; ++i){
561                 addr = (u8 *)__get_free_page(GFP_ATOMIC);
562                 if (addr == NULL){
563                         QETH_DBF_TEXT(trace, 2, "ceddpcn3");
564                         ctx->num_pages = i;
565                         qeth_eddp_free_context(ctx);
566                         return NULL;
567                 }
568                 memset(addr, 0, PAGE_SIZE);
569                 ctx->pages[i] = addr;
570         }
571         ctx->elements = kcalloc(ctx->num_elements,
572                                 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
573         if (ctx->elements == NULL){
574                 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
575                 qeth_eddp_free_context(ctx);
576                 return NULL;
577         }
578         /* reset num_elements; will be incremented again in fill_buffer to
579          * reflect number of actually used elements */
580         ctx->num_elements = 0;
581         return ctx;
582 }
583
584 static struct qeth_eddp_context *
585 qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
586                              struct qeth_hdr *qhdr)
587 {
588         struct qeth_eddp_context *ctx = NULL;
589
590         QETH_DBF_TEXT(trace, 5, "creddpct");
591         if (skb->protocol == htons(ETH_P_IP))
592                 ctx = qeth_eddp_create_context_generic(card, skb,
593                         sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
594                         skb->h.th->doff*4);
595         else if (skb->protocol == htons(ETH_P_IPV6))
596                 ctx = qeth_eddp_create_context_generic(card, skb,
597                         sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
598                         skb->h.th->doff*4);
599         else
600                 QETH_DBF_TEXT(trace, 2, "cetcpinv");
601
602         if (ctx == NULL) {
603                 QETH_DBF_TEXT(trace, 2, "creddpnl");
604                 return NULL;
605         }
606         if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
607                 QETH_DBF_TEXT(trace, 2, "ceddptfe");
608                 qeth_eddp_free_context(ctx);
609                 return NULL;
610         }
611         atomic_set(&ctx->refcnt, 1);
612         return ctx;
613 }
614
615 struct qeth_eddp_context *
616 qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
617                          struct qeth_hdr *qhdr)
618 {
619         QETH_DBF_TEXT(trace, 5, "creddpc");
620         switch (skb->sk->sk_protocol){
621         case IPPROTO_TCP:
622                 return qeth_eddp_create_context_tcp(card, skb, qhdr);
623         default:
624                 QETH_DBF_TEXT(trace, 2, "eddpinvp");
625         }
626         return NULL;
627 }