2 * drivers/s390/net/qeth_core_offl.c
4 * Copyright IBM Corp. 2007
5 * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
6 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 #include <linux/errno.h>
11 #include <linux/inetdevice.h>
12 #include <linux/netdevice.h>
13 #include <linux/kernel.h>
14 #include <linux/tcp.h>
16 #include <linux/skbuff.h>
19 #include <net/ip6_checksum.h>
21 #include "qeth_core.h"
22 #include "qeth_core_mpc.h"
23 #include "qeth_core_offl.h"
25 int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
26 struct qeth_eddp_context *ctx)
28 int index = queue->next_buf_to_fill;
29 int elements_needed = ctx->num_elements;
30 int elements_in_buffer;
32 int buffers_needed = 0;
34 QETH_DBF_TEXT(TRACE, 5, "eddpcbfc");
35 while (elements_needed > 0) {
37 if (atomic_read(&queue->bufs[index].state) !=
41 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
42 queue->bufs[index].next_element_to_fill;
43 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
44 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
45 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
47 return buffers_needed;
50 static void qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54 QETH_DBF_TEXT(TRACE, 5, "eddpfctx");
55 for (i = 0; i < ctx->num_pages; ++i)
56 free_page((unsigned long)ctx->pages[i]);
63 static void qeth_eddp_get_context(struct qeth_eddp_context *ctx)
65 atomic_inc(&ctx->refcnt);
68 void qeth_eddp_put_context(struct qeth_eddp_context *ctx)
70 if (atomic_dec_return(&ctx->refcnt) == 0)
71 qeth_eddp_free_context(ctx);
73 EXPORT_SYMBOL_GPL(qeth_eddp_put_context);
75 void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
77 struct qeth_eddp_context_reference *ref;
79 QETH_DBF_TEXT(TRACE, 6, "eddprctx");
80 while (!list_empty(&buf->ctx_list)) {
81 ref = list_entry(buf->ctx_list.next,
82 struct qeth_eddp_context_reference, list);
83 qeth_eddp_put_context(ref->ctx);
89 static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
90 struct qeth_eddp_context *ctx)
92 struct qeth_eddp_context_reference *ref;
94 QETH_DBF_TEXT(TRACE, 6, "eddprfcx");
95 ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
98 qeth_eddp_get_context(ctx);
100 list_add_tail(&ref->list, &buf->ctx_list);
104 int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
105 struct qeth_eddp_context *ctx, int index)
107 struct qeth_qdio_out_buffer *buf = NULL;
108 struct qdio_buffer *buffer;
109 int elements = ctx->num_elements;
115 QETH_DBF_TEXT(TRACE, 5, "eddpfibu");
116 while (elements > 0) {
117 buf = &queue->bufs[index];
118 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) {
119 /* normally this should not happen since we checked for
120 * available elements in qeth_check_elements_for_context
125 QETH_DBF_MESSAGE(2, "could only partially fill"
130 /* check if the whole next skb fits into current buffer */
131 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
132 buf->next_element_to_fill)
133 < ctx->elements_per_skb){
134 /* no -> go to next buffer */
135 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
136 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
138 /* new buffer, so we have to add ctx to buffer'ctx_list
139 * and increment ctx's refcnt */
145 if (qeth_eddp_buf_ref_context(buf, ctx)) {
149 buffer = buf->buffer;
150 /* fill one skb into buffer */
151 for (i = 0; i < ctx->elements_per_skb; ++i) {
152 if (ctx->elements[element].length != 0) {
153 buffer->element[buf->next_element_to_fill].
154 addr = ctx->elements[element].addr;
155 buffer->element[buf->next_element_to_fill].
156 length = ctx->elements[element].length;
157 buffer->element[buf->next_element_to_fill].
158 flags = ctx->elements[element].flags;
159 buf->next_element_to_fill++;
166 if (!queue->do_pack) {
167 QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
168 /* set state to PRIMED -> will be flushed */
169 if (buf->next_element_to_fill > 0) {
170 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
174 if (queue->card->options.performance_stats)
175 queue->card->perf_stats.skbs_sent_pack++;
176 QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
177 if (buf->next_element_to_fill >=
178 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
180 * packed buffer if full -> set state PRIMED
183 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
191 static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
192 struct qeth_eddp_data *eddp, int data_len)
198 struct qeth_eddp_element *element;
200 QETH_DBF_TEXT(TRACE, 5, "eddpcrsh");
201 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
202 page_offset = ctx->offset % PAGE_SIZE;
203 element = &ctx->elements[ctx->num_elements];
204 pkt_len = eddp->nhl + eddp->thl + data_len;
205 /* FIXME: layer2 and VLAN !!! */
206 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
208 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
209 pkt_len += VLAN_HLEN;
210 /* does complete packet fit in current page ? */
211 page_remainder = PAGE_SIZE - page_offset;
212 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)) {
213 /* no -> go to start of next page */
214 ctx->offset += page_remainder;
215 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
218 memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
219 element->addr = page + page_offset;
220 element->length = sizeof(struct qeth_hdr);
221 ctx->offset += sizeof(struct qeth_hdr);
222 page_offset += sizeof(struct qeth_hdr);
223 /* add mac header (?) */
224 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
225 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
226 element->length += ETH_HLEN;
227 ctx->offset += ETH_HLEN;
228 page_offset += ETH_HLEN;
231 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
232 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
233 element->length += VLAN_HLEN;
234 ctx->offset += VLAN_HLEN;
235 page_offset += VLAN_HLEN;
237 /* add network header */
238 memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
239 element->length += eddp->nhl;
240 eddp->nh_in_ctx = page + page_offset;
241 ctx->offset += eddp->nhl;
242 page_offset += eddp->nhl;
243 /* add transport header */
244 memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
245 element->length += eddp->thl;
246 eddp->th_in_ctx = page + page_offset;
247 ctx->offset += eddp->thl;
250 static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp,
251 int len, __wsum *hcsum)
253 struct skb_frag_struct *frag;
258 QETH_DBF_TEXT(TRACE, 5, "eddpcdtc");
259 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
260 skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
262 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
264 eddp->skb_offset += len;
267 if (eddp->frag < 0) {
268 /* we're in skb->data */
269 left_in_frag = (eddp->skb->len -
272 src = eddp->skb->data + eddp->skb_offset;
274 frag = &skb_shinfo(eddp->skb)->frags[
276 left_in_frag = frag->size - eddp->frag_offset;
277 src = (u8 *)((page_to_pfn(frag->page) <<
278 PAGE_SHIFT) + frag->page_offset +
281 if (left_in_frag <= 0) {
283 eddp->frag_offset = 0;
286 copy_len = min(left_in_frag, len);
287 memcpy(dst, src, copy_len);
288 *hcsum = csum_partial(src, copy_len, *hcsum);
290 eddp->frag_offset += copy_len;
291 eddp->skb_offset += copy_len;
297 static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
298 struct qeth_eddp_data *eddp, int data_len, __wsum hcsum)
303 struct qeth_eddp_element *element;
306 QETH_DBF_TEXT(TRACE, 5, "eddpcsdt");
307 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
308 page_offset = ctx->offset % PAGE_SIZE;
309 element = &ctx->elements[ctx->num_elements];
311 page_remainder = PAGE_SIZE - page_offset;
312 if (page_remainder < data_len) {
313 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
314 page_remainder, &hcsum);
315 element->length += page_remainder;
317 element->flags = SBAL_FLAGS_FIRST_FRAG;
319 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
322 data_len -= page_remainder;
323 ctx->offset += page_remainder;
324 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
326 element->addr = page + page_offset;
328 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
330 element->length += data_len;
332 element->flags = SBAL_FLAGS_LAST_FRAG;
334 ctx->offset += data_len;
339 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
342 static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp,
345 __wsum phcsum; /* pseudo header checksum */
347 QETH_DBF_TEXT(TRACE, 5, "eddpckt4");
348 eddp->th.tcp.h.check = 0;
349 /* compute pseudo header checksum */
350 phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
351 eddp->thl + data_len, IPPROTO_TCP, 0);
352 /* compute checksum of tcp header */
353 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
356 static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp,
360 __wsum phcsum; /* pseudo header checksum */
362 QETH_DBF_TEXT(TRACE, 5, "eddpckt6");
363 eddp->th.tcp.h.check = 0;
364 /* compute pseudo header checksum */
365 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
366 sizeof(struct in6_addr), 0);
367 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
368 sizeof(struct in6_addr), phcsum);
369 proto = htonl(IPPROTO_TCP);
370 phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
374 static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh,
375 u8 *nh, u8 nhl, u8 *th, u8 thl)
377 struct qeth_eddp_data *eddp;
379 QETH_DBF_TEXT(TRACE, 5, "eddpcrda");
380 eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
384 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
385 memcpy(&eddp->nh, nh, nhl);
386 memcpy(&eddp->th, th, thl);
387 eddp->frag = -1; /* initially we're in skb->data */
392 static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
393 struct qeth_eddp_data *eddp)
399 QETH_DBF_TEXT(TRACE, 5, "eddpftcp");
400 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
401 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
402 eddp->skb_offset += sizeof(struct ethhdr);
403 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
404 eddp->skb_offset += VLAN_HLEN;
406 tcph = tcp_hdr(eddp->skb);
407 while (eddp->skb_offset < eddp->skb->len) {
408 data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
409 (int)(eddp->skb->len - eddp->skb_offset));
410 /* prepare qdio hdr */
411 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
412 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
413 eddp->nhl + eddp->thl;
414 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
415 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
417 eddp->qh.hdr.l3.length = data_len + eddp->nhl +
420 if (eddp->skb->protocol == htons(ETH_P_IP)) {
421 eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
423 eddp->nh.ip4.h.check = 0;
424 eddp->nh.ip4.h.check =
425 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
428 eddp->nh.ip6.h.payload_len = htons(data_len +
430 /* prepare tcp hdr */
431 if (data_len == (eddp->skb->len - eddp->skb_offset)) {
432 /* last segment -> set FIN and PSH flags */
433 eddp->th.tcp.h.fin = tcph->fin;
434 eddp->th.tcp.h.psh = tcph->psh;
436 if (eddp->skb->protocol == htons(ETH_P_IP))
437 hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
439 hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
440 /* fill the next segment into the context */
441 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
442 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
443 if (eddp->skb_offset >= eddp->skb->len)
445 /* prepare headers for next round */
446 if (eddp->skb->protocol == htons(ETH_P_IP))
447 eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
448 eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) +
453 static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
454 struct sk_buff *skb, struct qeth_hdr *qhdr)
456 struct qeth_eddp_data *eddp = NULL;
458 QETH_DBF_TEXT(TRACE, 5, "eddpficx");
459 /* create our segmentation headers and copy original headers */
460 if (skb->protocol == htons(ETH_P_IP))
461 eddp = qeth_eddp_create_eddp_data(qhdr,
462 skb_network_header(skb),
464 skb_transport_header(skb),
467 eddp = qeth_eddp_create_eddp_data(qhdr,
468 skb_network_header(skb),
469 sizeof(struct ipv6hdr),
470 skb_transport_header(skb),
474 QETH_DBF_TEXT(TRACE, 2, "eddpfcnm");
477 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
478 skb_set_mac_header(skb, sizeof(struct qeth_hdr));
479 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
480 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
481 eddp->vlan[0] = skb->protocol;
482 eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
485 /* the next flags will only be set on the last segment */
486 eddp->th.tcp.h.fin = 0;
487 eddp->th.tcp.h.psh = 0;
489 /* begin segmentation and fill context */
490 __qeth_eddp_fill_context_tcp(ctx, eddp);
495 static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx,
496 struct sk_buff *skb, int hdr_len)
500 QETH_DBF_TEXT(TRACE, 5, "eddpcanp");
501 /* can we put multiple skbs in one page? */
502 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
503 if (skbs_per_page > 1) {
504 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
506 ctx->elements_per_skb = 1;
508 /* no -> how many elements per skb? */
509 ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
510 PAGE_SIZE) >> PAGE_SHIFT;
511 ctx->num_pages = ctx->elements_per_skb *
512 (skb_shinfo(skb)->gso_segs + 1);
514 ctx->num_elements = ctx->elements_per_skb *
515 (skb_shinfo(skb)->gso_segs + 1);
518 static struct qeth_eddp_context *qeth_eddp_create_context_generic(
519 struct qeth_card *card, struct sk_buff *skb, int hdr_len)
521 struct qeth_eddp_context *ctx = NULL;
525 QETH_DBF_TEXT(TRACE, 5, "creddpcg");
526 /* create the context and allocate pages */
527 ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
529 QETH_DBF_TEXT(TRACE, 2, "ceddpcn1");
532 ctx->type = QETH_LARGE_SEND_EDDP;
533 qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
534 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) {
535 QETH_DBF_TEXT(TRACE, 2, "ceddpcis");
539 ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
540 if (ctx->pages == NULL) {
541 QETH_DBF_TEXT(TRACE, 2, "ceddpcn2");
545 for (i = 0; i < ctx->num_pages; ++i) {
546 addr = (u8 *)get_zeroed_page(GFP_ATOMIC);
548 QETH_DBF_TEXT(TRACE, 2, "ceddpcn3");
550 qeth_eddp_free_context(ctx);
553 ctx->pages[i] = addr;
555 ctx->elements = kcalloc(ctx->num_elements,
556 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
557 if (ctx->elements == NULL) {
558 QETH_DBF_TEXT(TRACE, 2, "ceddpcn4");
559 qeth_eddp_free_context(ctx);
562 /* reset num_elements; will be incremented again in fill_buffer to
563 * reflect number of actually used elements */
564 ctx->num_elements = 0;
568 static struct qeth_eddp_context *qeth_eddp_create_context_tcp(
569 struct qeth_card *card, struct sk_buff *skb,
570 struct qeth_hdr *qhdr)
572 struct qeth_eddp_context *ctx = NULL;
574 QETH_DBF_TEXT(TRACE, 5, "creddpct");
575 if (skb->protocol == htons(ETH_P_IP))
576 ctx = qeth_eddp_create_context_generic(card, skb,
577 (sizeof(struct qeth_hdr) +
580 else if (skb->protocol == htons(ETH_P_IPV6))
581 ctx = qeth_eddp_create_context_generic(card, skb,
582 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
585 QETH_DBF_TEXT(TRACE, 2, "cetcpinv");
588 QETH_DBF_TEXT(TRACE, 2, "creddpnl");
591 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) {
592 QETH_DBF_TEXT(TRACE, 2, "ceddptfe");
593 qeth_eddp_free_context(ctx);
596 atomic_set(&ctx->refcnt, 1);
600 struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card,
601 struct sk_buff *skb, struct qeth_hdr *qhdr,
602 unsigned char sk_protocol)
604 QETH_DBF_TEXT(TRACE, 5, "creddpc");
605 switch (sk_protocol) {
607 return qeth_eddp_create_context_tcp(card, skb, qhdr);
609 QETH_DBF_TEXT(TRACE, 2, "eddpinvp");
613 EXPORT_SYMBOL_GPL(qeth_eddp_create_context);
615 void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr,
618 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
619 struct tcphdr *tcph = tcp_hdr(skb);
620 struct iphdr *iph = ip_hdr(skb);
621 struct ipv6hdr *ip6h = ipv6_hdr(skb);
623 QETH_DBF_TEXT(TRACE, 5, "tsofhdr");
625 /*fix header to TSO values ...*/
626 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
627 /*set values which are fix for the first approach ...*/
628 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
629 hdr->ext.imb_hdr_no = 1;
630 hdr->ext.hdr_type = 1;
631 hdr->ext.hdr_version = 1;
632 hdr->ext.hdr_len = 28;
633 /*insert non-fix values */
634 hdr->ext.mss = skb_shinfo(skb)->gso_size;
635 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
636 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
637 sizeof(struct qeth_hdr_tso));
639 if (skb->protocol == ETH_P_IPV6) {
640 ip6h->payload_len = 0;
641 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
644 /*OSA want us to set these values ...*/
645 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
651 EXPORT_SYMBOL_GPL(qeth_tso_fill_header);
653 void qeth_tx_csum(struct sk_buff *skb)
656 if (skb->protocol == htons(ETH_P_IP)) {
657 tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
658 switch (ip_hdr(skb)->protocol) {
660 tcp_hdr(skb)->check = 0;
661 tcp_hdr(skb)->check = csum_tcpudp_magic(
662 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
663 tlen, ip_hdr(skb)->protocol,
664 skb_checksum(skb, skb_transport_offset(skb),
668 udp_hdr(skb)->check = 0;
669 udp_hdr(skb)->check = csum_tcpudp_magic(
670 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
671 tlen, ip_hdr(skb)->protocol,
672 skb_checksum(skb, skb_transport_offset(skb),
676 } else if (skb->protocol == htons(ETH_P_IPV6)) {
677 switch (ipv6_hdr(skb)->nexthdr) {
679 tcp_hdr(skb)->check = 0;
680 tcp_hdr(skb)->check = csum_ipv6_magic(
681 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
682 ipv6_hdr(skb)->payload_len,
683 ipv6_hdr(skb)->nexthdr,
684 skb_checksum(skb, skb_transport_offset(skb),
685 ipv6_hdr(skb)->payload_len, 0));
688 udp_hdr(skb)->check = 0;
689 udp_hdr(skb)->check = csum_ipv6_magic(
690 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
691 ipv6_hdr(skb)->payload_len,
692 ipv6_hdr(skb)->nexthdr,
693 skb_checksum(skb, skb_transport_offset(skb),
694 ipv6_hdr(skb)->payload_len, 0));
699 EXPORT_SYMBOL_GPL(qeth_tx_csum);