Merge branch 'upstream'
[linux-2.6] / drivers / s390 / net / qeth_eddp.c
1 /*
2  * linux/drivers/s390/net/qeth_eddp.c
3  *
4  * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
5  *
6  * Copyright 2004 IBM Corporation
7  *
8  *    Author(s): Thomas Spatzier <tspat@de.ibm.com>
9  *
10  */
11 #include <linux/config.h>
12 #include <linux/errno.h>
13 #include <linux/ip.h>
14 #include <linux/inetdevice.h>
15 #include <linux/netdevice.h>
16 #include <linux/kernel.h>
17 #include <linux/tcp.h>
18 #include <net/tcp.h>
19 #include <linux/skbuff.h>
20
21 #include <net/ip.h>
22
23 #include "qeth.h"
24 #include "qeth_mpc.h"
25 #include "qeth_eddp.h"
26
27 int
28 qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
29                                     struct qeth_eddp_context *ctx)
30 {
31         int index = queue->next_buf_to_fill;
32         int elements_needed = ctx->num_elements;
33         int elements_in_buffer;
34         int skbs_in_buffer;
35         int buffers_needed = 0;
36
37         QETH_DBF_TEXT(trace, 5, "eddpcbfc");
38         while(elements_needed > 0) {
39                 buffers_needed++;
40                 if (atomic_read(&queue->bufs[index].state) !=
41                                 QETH_QDIO_BUF_EMPTY)
42                         return -EBUSY;
43
44                 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
45                                      queue->bufs[index].next_element_to_fill;
46                 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
47                 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
48                 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
49         }
50         return buffers_needed;
51 }
52
53 static inline void
54 qeth_eddp_free_context(struct qeth_eddp_context *ctx)
55 {
56         int i;
57
58         QETH_DBF_TEXT(trace, 5, "eddpfctx");
59         for (i = 0; i < ctx->num_pages; ++i)
60                 free_page((unsigned long)ctx->pages[i]);
61         kfree(ctx->pages);
62         kfree(ctx->elements);
63         kfree(ctx);
64 }
65
66
67 static inline void
68 qeth_eddp_get_context(struct qeth_eddp_context *ctx)
69 {
70         atomic_inc(&ctx->refcnt);
71 }
72
73 void
74 qeth_eddp_put_context(struct qeth_eddp_context *ctx)
75 {
76         if (atomic_dec_return(&ctx->refcnt) == 0)
77                 qeth_eddp_free_context(ctx);
78 }
79
80 void
81 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
82 {
83         struct qeth_eddp_context_reference *ref;
84         
85         QETH_DBF_TEXT(trace, 6, "eddprctx");
86         while (!list_empty(&buf->ctx_list)){
87                 ref = list_entry(buf->ctx_list.next,
88                                  struct qeth_eddp_context_reference, list);
89                 qeth_eddp_put_context(ref->ctx);
90                 list_del(&ref->list);
91                 kfree(ref);
92         }
93 }
94
95 static inline int
96 qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
97                           struct qeth_eddp_context *ctx)
98 {
99         struct qeth_eddp_context_reference *ref;
100
101         QETH_DBF_TEXT(trace, 6, "eddprfcx");
102         ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
103         if (ref == NULL)
104                 return -ENOMEM;
105         qeth_eddp_get_context(ctx);
106         ref->ctx = ctx;
107         list_add_tail(&ref->list, &buf->ctx_list);
108         return 0;
109 }
110
111 int
112 qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
113                       struct qeth_eddp_context *ctx,
114                       int index)
115 {
116         struct qeth_qdio_out_buffer *buf = NULL;
117         struct qdio_buffer *buffer;
118         int elements = ctx->num_elements;
119         int element = 0;
120         int flush_cnt = 0;
121         int must_refcnt = 1;
122         int i;
123
124         QETH_DBF_TEXT(trace, 5, "eddpfibu");
125         while (elements > 0) {
126                 buf = &queue->bufs[index];
127                 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
128                         /* normally this should not happen since we checked for
129                          * available elements in qeth_check_elements_for_context
130                          */
131                         if (element == 0)
132                                 return -EBUSY;
133                         else {
134                                 PRINT_WARN("could only partially fill eddp "
135                                            "buffer!\n");
136                                 goto out;
137                         }
138                 }               
139                 /* check if the whole next skb fits into current buffer */
140                 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
141                                         buf->next_element_to_fill)
142                                 < ctx->elements_per_skb){
143                         /* no -> go to next buffer */
144                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
145                         index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
146                         flush_cnt++;
147                         /* new buffer, so we have to add ctx to buffer'ctx_list
148                          * and increment ctx's refcnt */
149                         must_refcnt = 1;
150                         continue;
151                 }       
152                 if (must_refcnt){
153                         must_refcnt = 0;
154                         if (qeth_eddp_buf_ref_context(buf, ctx)){
155                                 PRINT_WARN("no memory to create eddp context "
156                                            "reference\n");
157                                 goto out_check;
158                         }
159                 }
160                 buffer = buf->buffer;
161                 /* fill one skb into buffer */
162                 for (i = 0; i < ctx->elements_per_skb; ++i){
163                         buffer->element[buf->next_element_to_fill].addr =
164                                 ctx->elements[element].addr;
165                         buffer->element[buf->next_element_to_fill].length =
166                                 ctx->elements[element].length;
167                         buffer->element[buf->next_element_to_fill].flags =
168                                 ctx->elements[element].flags;
169                         buf->next_element_to_fill++;
170                         element++;
171                         elements--;
172                 }
173         }
174 out_check:
175         if (!queue->do_pack) {
176                 QETH_DBF_TEXT(trace, 6, "fillbfnp");
177                 /* set state to PRIMED -> will be flushed */
178                 if (buf->next_element_to_fill > 0){
179                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
180                         flush_cnt++;
181                 }
182         } else {
183 #ifdef CONFIG_QETH_PERF_STATS
184                 queue->card->perf_stats.skbs_sent_pack++;
185 #endif
186                 QETH_DBF_TEXT(trace, 6, "fillbfpa");
187                 if (buf->next_element_to_fill >=
188                                 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
189                         /*
190                          * packed buffer if full -> set state PRIMED
191                          * -> will be flushed
192                          */
193                         atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
194                         flush_cnt++;
195                 }
196         }
197 out:
198         return flush_cnt;
199 }
200
201 static inline void
202 qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
203                               struct qeth_eddp_data *eddp, int data_len)
204 {
205         u8 *page;
206         int page_remainder;
207         int page_offset;
208         int pkt_len;
209         struct qeth_eddp_element *element;
210
211         QETH_DBF_TEXT(trace, 5, "eddpcrsh");
212         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
213         page_offset = ctx->offset % PAGE_SIZE;
214         element = &ctx->elements[ctx->num_elements];
215         pkt_len = eddp->nhl + eddp->thl + data_len;
216         /* FIXME: layer2 and VLAN !!! */
217         if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
218                 pkt_len += ETH_HLEN;
219         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
220                 pkt_len += VLAN_HLEN;
221         /* does complete packet fit in current page ? */
222         page_remainder = PAGE_SIZE - page_offset;
223         if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
224                 /* no -> go to start of next page */
225                 ctx->offset += page_remainder;
226                 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
227                 page_offset = 0;
228         }
229         memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
230         element->addr = page + page_offset;
231         element->length = sizeof(struct qeth_hdr);
232         ctx->offset += sizeof(struct qeth_hdr);
233         page_offset += sizeof(struct qeth_hdr);
234         /* add mac header (?) */
235         if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
236                 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
237                 element->length += ETH_HLEN;
238                 ctx->offset += ETH_HLEN;
239                 page_offset += ETH_HLEN;
240         }
241         /* add VLAN tag */
242         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
243                 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
244                 element->length += VLAN_HLEN;
245                 ctx->offset += VLAN_HLEN;
246                 page_offset += VLAN_HLEN;
247         }
248         /* add network header */
249         memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
250         element->length += eddp->nhl;
251         eddp->nh_in_ctx = page + page_offset;
252         ctx->offset += eddp->nhl;
253         page_offset += eddp->nhl;
254         /* add transport header */
255         memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
256         element->length += eddp->thl;
257         eddp->th_in_ctx = page + page_offset;
258         ctx->offset += eddp->thl;
259 }
260
261 static inline void
262 qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
263                         u32 *hcsum)
264 {
265         struct skb_frag_struct *frag;
266         int left_in_frag;
267         int copy_len;
268         u8 *src;
269         
270         QETH_DBF_TEXT(trace, 5, "eddpcdtc");
271         if (skb_shinfo(eddp->skb)->nr_frags == 0) {
272                 memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
273                 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
274                                       *hcsum);
275                 eddp->skb_offset += len;
276         } else {
277                 while (len > 0) {
278                         if (eddp->frag < 0) {
279                                 /* we're in skb->data */
280                                 left_in_frag = (eddp->skb->len - eddp->skb->data_len)
281                                                 - eddp->skb_offset;
282                                 src = eddp->skb->data + eddp->skb_offset;
283                         } else {
284                                 frag = &skb_shinfo(eddp->skb)->
285                                         frags[eddp->frag];
286                                 left_in_frag = frag->size - eddp->frag_offset;
287                                 src = (u8 *)(
288                                         (page_to_pfn(frag->page) << PAGE_SHIFT)+
289                                         frag->page_offset + eddp->frag_offset);
290                         }
291                         if (left_in_frag <= 0) {
292                                 eddp->frag++;
293                                 eddp->frag_offset = 0;
294                                 continue;
295                         }
296                         copy_len = min(left_in_frag, len);
297                         memcpy(dst, src, copy_len);
298                         *hcsum = csum_partial(src, copy_len, *hcsum);
299                         dst += copy_len;
300                         eddp->frag_offset += copy_len;
301                         eddp->skb_offset += copy_len;
302                         len -= copy_len;
303                 }
304         }
305 }
306
307 static inline void
308 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
309                                   struct qeth_eddp_data *eddp, int data_len,
310                                   u32 hcsum)
311 {
312         u8 *page;
313         int page_remainder;
314         int page_offset;
315         struct qeth_eddp_element *element;
316         int first_lap = 1;
317
318         QETH_DBF_TEXT(trace, 5, "eddpcsdt");
319         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
320         page_offset = ctx->offset % PAGE_SIZE;
321         element = &ctx->elements[ctx->num_elements];
322         while (data_len){
323                 page_remainder = PAGE_SIZE - page_offset;
324                 if (page_remainder < data_len){
325                         qeth_eddp_copy_data_tcp(page + page_offset, eddp,
326                                                 page_remainder, &hcsum);
327                         element->length += page_remainder;
328                         if (first_lap)
329                                 element->flags = SBAL_FLAGS_FIRST_FRAG;
330                         else
331                                 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
332                         ctx->num_elements++;
333                         element++;
334                         data_len -= page_remainder;
335                         ctx->offset += page_remainder;
336                         page = ctx->pages[ctx->offset >> PAGE_SHIFT];
337                         page_offset = 0;
338                         element->addr = page + page_offset;
339                 } else {
340                         qeth_eddp_copy_data_tcp(page + page_offset, eddp,
341                                                 data_len, &hcsum);
342                         element->length += data_len;
343                         if (!first_lap)
344                                 element->flags = SBAL_FLAGS_LAST_FRAG;
345                         ctx->num_elements++;
346                         ctx->offset += data_len;
347                         data_len = 0;
348                 }
349                 first_lap = 0;
350         }
351         ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
352 }
353
354 static inline u32
355 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
356 {
357         u32 phcsum; /* pseudo header checksum */
358
359         QETH_DBF_TEXT(trace, 5, "eddpckt4");
360         eddp->th.tcp.h.check = 0;
361         /* compute pseudo header checksum */
362         phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
363                                     eddp->thl + data_len, IPPROTO_TCP, 0);
364         /* compute checksum of tcp header */
365         return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
366 }
367
368 static inline u32
369 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
370 {
371         u32 proto;
372         u32 phcsum; /* pseudo header checksum */
373
374         QETH_DBF_TEXT(trace, 5, "eddpckt6");
375         eddp->th.tcp.h.check = 0;
376         /* compute pseudo header checksum */
377         phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
378                               sizeof(struct in6_addr), 0);
379         phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
380                               sizeof(struct in6_addr), phcsum);
381         proto = htonl(IPPROTO_TCP);
382         phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
383         return phcsum;
384 }
385
386 static inline struct qeth_eddp_data *
387 qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
388 {
389         struct qeth_eddp_data *eddp;
390
391         QETH_DBF_TEXT(trace, 5, "eddpcrda");
392         eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
393         if (eddp){
394                 eddp->nhl = nhl;
395                 eddp->thl = thl;
396                 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
397                 memcpy(&eddp->nh, nh, nhl);
398                 memcpy(&eddp->th, th, thl);
399                 eddp->frag = -1; /* initially we're in skb->data */
400         }
401         return eddp;
402 }
403
404 static inline void
405 __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
406                              struct qeth_eddp_data *eddp)
407 {
408         struct tcphdr *tcph;
409         int data_len;
410         u32 hcsum;
411         
412         QETH_DBF_TEXT(trace, 5, "eddpftcp");
413         eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
414        if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
415                eddp->skb_offset += sizeof(struct ethhdr);
416 #ifdef CONFIG_QETH_VLAN
417                if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
418                        eddp->skb_offset += VLAN_HLEN;
419 #endif /* CONFIG_QETH_VLAN */
420        }
421         tcph = eddp->skb->h.th;
422         while (eddp->skb_offset < eddp->skb->len) {
423                 data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
424                                (int)(eddp->skb->len - eddp->skb_offset));
425                 /* prepare qdio hdr */
426                 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
427                         eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
428                                                      eddp->nhl + eddp->thl -
429                                                      sizeof(struct qeth_hdr);
430 #ifdef CONFIG_QETH_VLAN
431                         if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
432                                 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
433 #endif /* CONFIG_QETH_VLAN */
434                 } else
435                         eddp->qh.hdr.l3.length = data_len + eddp->nhl +
436                                                  eddp->thl;
437                 /* prepare ip hdr */
438                 if (eddp->skb->protocol == ETH_P_IP){
439                         eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
440                                                  eddp->thl;
441                         eddp->nh.ip4.h.check = 0;
442                         eddp->nh.ip4.h.check =
443                                 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
444                                                 eddp->nh.ip4.h.ihl);
445                 } else
446                         eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
447                 /* prepare tcp hdr */
448                 if (data_len == (eddp->skb->len - eddp->skb_offset)){
449                         /* last segment -> set FIN and PSH flags */
450                         eddp->th.tcp.h.fin = tcph->fin;
451                         eddp->th.tcp.h.psh = tcph->psh;
452                 }
453                 if (eddp->skb->protocol == ETH_P_IP)
454                         hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
455                 else
456                         hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
457                 /* fill the next segment into the context */
458                 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
459                 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
460                 if (eddp->skb_offset >= eddp->skb->len)
461                         break;
462                 /* prepare headers for next round */
463                 if (eddp->skb->protocol == ETH_P_IP)
464                         eddp->nh.ip4.h.id++;
465                 eddp->th.tcp.h.seq += data_len;
466         }
467 }
468                            
469 static inline int
470 qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
471                            struct sk_buff *skb, struct qeth_hdr *qhdr)
472 {
473         struct qeth_eddp_data *eddp = NULL;
474         
475         QETH_DBF_TEXT(trace, 5, "eddpficx");
476         /* create our segmentation headers and copy original headers */
477         if (skb->protocol == ETH_P_IP)
478                 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
479                                 skb->nh.iph->ihl*4,
480                                 (u8 *)skb->h.th, skb->h.th->doff*4);
481         else
482                 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
483                                 sizeof(struct ipv6hdr),
484                                 (u8 *)skb->h.th, skb->h.th->doff*4);
485
486         if (eddp == NULL) {
487                 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
488                 return -ENOMEM;
489         }
490         if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
491                 skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr);
492                 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
493 #ifdef CONFIG_QETH_VLAN
494                 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
495                         eddp->vlan[0] = __constant_htons(skb->protocol);
496                         eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
497                 }
498 #endif /* CONFIG_QETH_VLAN */
499         }
500         /* the next flags will only be set on the last segment */
501         eddp->th.tcp.h.fin = 0;
502         eddp->th.tcp.h.psh = 0;
503         eddp->skb = skb;
504         /* begin segmentation and fill context */
505         __qeth_eddp_fill_context_tcp(ctx, eddp);
506         kfree(eddp);
507         return 0;
508 }
509
510 static inline void
511 qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
512                          int hdr_len)
513 {
514         int skbs_per_page;
515         
516         QETH_DBF_TEXT(trace, 5, "eddpcanp");
517         /* can we put multiple skbs in one page? */
518         skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
519         if (skbs_per_page > 1){
520                 ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
521                                  skbs_per_page + 1;
522                 ctx->elements_per_skb = 1;
523         } else {
524                 /* no -> how many elements per skb? */
525                 ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
526                                      PAGE_SIZE) >> PAGE_SHIFT;
527                 ctx->num_pages = ctx->elements_per_skb *
528                                  (skb_shinfo(skb)->tso_segs + 1);
529         }
530         ctx->num_elements = ctx->elements_per_skb *
531                             (skb_shinfo(skb)->tso_segs + 1);
532 }
533
534 static inline struct qeth_eddp_context *
535 qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
536                                  int hdr_len)
537 {
538         struct qeth_eddp_context *ctx = NULL;
539         u8 *addr;
540         int i;
541
542         QETH_DBF_TEXT(trace, 5, "creddpcg");
543         /* create the context and allocate pages */
544         ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
545         if (ctx == NULL){
546                 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
547                 return NULL;
548         }
549         ctx->type = QETH_LARGE_SEND_EDDP;
550         qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
551         if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
552                 QETH_DBF_TEXT(trace, 2, "ceddpcis");
553                 kfree(ctx);
554                 return NULL;
555         }
556         ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
557         if (ctx->pages == NULL){
558                 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
559                 kfree(ctx);
560                 return NULL;
561         }
562         for (i = 0; i < ctx->num_pages; ++i){
563                 addr = (u8 *)__get_free_page(GFP_ATOMIC);
564                 if (addr == NULL){
565                         QETH_DBF_TEXT(trace, 2, "ceddpcn3");
566                         ctx->num_pages = i;
567                         qeth_eddp_free_context(ctx);
568                         return NULL;
569                 }
570                 memset(addr, 0, PAGE_SIZE);
571                 ctx->pages[i] = addr;
572         }
573         ctx->elements = kcalloc(ctx->num_elements,
574                                 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
575         if (ctx->elements == NULL){
576                 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
577                 qeth_eddp_free_context(ctx);
578                 return NULL;
579         }
580         /* reset num_elements; will be incremented again in fill_buffer to
581          * reflect number of actually used elements */
582         ctx->num_elements = 0;
583         return ctx;
584 }
585
586 static inline struct qeth_eddp_context *
587 qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
588                              struct qeth_hdr *qhdr)
589 {
590         struct qeth_eddp_context *ctx = NULL;
591         
592         QETH_DBF_TEXT(trace, 5, "creddpct");
593         if (skb->protocol == ETH_P_IP)
594                 ctx = qeth_eddp_create_context_generic(card, skb,
595                         sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
596                         skb->h.th->doff*4);
597         else if (skb->protocol == ETH_P_IPV6)
598                 ctx = qeth_eddp_create_context_generic(card, skb,
599                         sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
600                         skb->h.th->doff*4);
601         else
602                 QETH_DBF_TEXT(trace, 2, "cetcpinv");
603
604         if (ctx == NULL) {
605                 QETH_DBF_TEXT(trace, 2, "creddpnl");
606                 return NULL;
607         }
608         if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
609                 QETH_DBF_TEXT(trace, 2, "ceddptfe");
610                 qeth_eddp_free_context(ctx);
611                 return NULL;
612         }
613         atomic_set(&ctx->refcnt, 1);
614         return ctx;
615 }
616
617 struct qeth_eddp_context *
618 qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
619                          struct qeth_hdr *qhdr)
620 {
621         QETH_DBF_TEXT(trace, 5, "creddpc");
622         switch (skb->sk->sk_protocol){
623         case IPPROTO_TCP:
624                 return qeth_eddp_create_context_tcp(card, skb, qhdr);
625         default:
626                 QETH_DBF_TEXT(trace, 2, "eddpinvp");
627         }
628         return NULL;
629 }
630
631