2 * drivers/s390/net/qeth_core_offl.c
4 * Copyright IBM Corp. 2007
5 * Author(s): Thomas Spatzier <tspat@de.ibm.com>,
6 * Frank Blaschka <frank.blaschka@de.ibm.com>
9 #include <linux/errno.h>
11 #include <linux/inetdevice.h>
12 #include <linux/netdevice.h>
13 #include <linux/kernel.h>
14 #include <linux/tcp.h>
16 #include <linux/skbuff.h>
19 #include <net/ip6_checksum.h>
21 #include "qeth_core.h"
22 #include "qeth_core_mpc.h"
23 #include "qeth_core_offl.h"
25 int qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
26 struct qeth_eddp_context *ctx)
28 int index = queue->next_buf_to_fill;
29 int elements_needed = ctx->num_elements;
30 int elements_in_buffer;
32 int buffers_needed = 0;
34 QETH_DBF_TEXT(TRACE, 5, "eddpcbfc");
35 while (elements_needed > 0) {
37 if (atomic_read(&queue->bufs[index].state) !=
41 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
42 queue->bufs[index].next_element_to_fill;
43 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
44 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
45 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
47 return buffers_needed;
50 static void qeth_eddp_free_context(struct qeth_eddp_context *ctx)
54 QETH_DBF_TEXT(TRACE, 5, "eddpfctx");
55 for (i = 0; i < ctx->num_pages; ++i)
56 free_page((unsigned long)ctx->pages[i]);
63 static void qeth_eddp_get_context(struct qeth_eddp_context *ctx)
65 atomic_inc(&ctx->refcnt);
68 void qeth_eddp_put_context(struct qeth_eddp_context *ctx)
70 if (atomic_dec_return(&ctx->refcnt) == 0)
71 qeth_eddp_free_context(ctx);
73 EXPORT_SYMBOL_GPL(qeth_eddp_put_context);
75 void qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
77 struct qeth_eddp_context_reference *ref;
79 QETH_DBF_TEXT(TRACE, 6, "eddprctx");
80 while (!list_empty(&buf->ctx_list)) {
81 ref = list_entry(buf->ctx_list.next,
82 struct qeth_eddp_context_reference, list);
83 qeth_eddp_put_context(ref->ctx);
89 static int qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
90 struct qeth_eddp_context *ctx)
92 struct qeth_eddp_context_reference *ref;
94 QETH_DBF_TEXT(TRACE, 6, "eddprfcx");
95 ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
98 qeth_eddp_get_context(ctx);
100 list_add_tail(&ref->list, &buf->ctx_list);
104 int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
105 struct qeth_eddp_context *ctx, int index)
107 struct qeth_qdio_out_buffer *buf = NULL;
108 struct qdio_buffer *buffer;
109 int elements = ctx->num_elements;
115 QETH_DBF_TEXT(TRACE, 5, "eddpfibu");
116 while (elements > 0) {
117 buf = &queue->bufs[index];
118 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY) {
119 /* normally this should not happen since we checked for
120 * available elements in qeth_check_elements_for_context
125 PRINT_WARN("could only partially fill eddp "
130 /* check if the whole next skb fits into current buffer */
131 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
132 buf->next_element_to_fill)
133 < ctx->elements_per_skb){
134 /* no -> go to next buffer */
135 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
136 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
138 /* new buffer, so we have to add ctx to buffer'ctx_list
139 * and increment ctx's refcnt */
145 if (qeth_eddp_buf_ref_context(buf, ctx)) {
146 PRINT_WARN("no memory to create eddp context "
151 buffer = buf->buffer;
152 /* fill one skb into buffer */
153 for (i = 0; i < ctx->elements_per_skb; ++i) {
154 if (ctx->elements[element].length != 0) {
155 buffer->element[buf->next_element_to_fill].
156 addr = ctx->elements[element].addr;
157 buffer->element[buf->next_element_to_fill].
158 length = ctx->elements[element].length;
159 buffer->element[buf->next_element_to_fill].
160 flags = ctx->elements[element].flags;
161 buf->next_element_to_fill++;
168 if (!queue->do_pack) {
169 QETH_DBF_TEXT(TRACE, 6, "fillbfnp");
170 /* set state to PRIMED -> will be flushed */
171 if (buf->next_element_to_fill > 0) {
172 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
176 if (queue->card->options.performance_stats)
177 queue->card->perf_stats.skbs_sent_pack++;
178 QETH_DBF_TEXT(TRACE, 6, "fillbfpa");
179 if (buf->next_element_to_fill >=
180 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
182 * packed buffer if full -> set state PRIMED
185 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
193 static void qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
194 struct qeth_eddp_data *eddp, int data_len)
200 struct qeth_eddp_element *element;
202 QETH_DBF_TEXT(TRACE, 5, "eddpcrsh");
203 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
204 page_offset = ctx->offset % PAGE_SIZE;
205 element = &ctx->elements[ctx->num_elements];
206 pkt_len = eddp->nhl + eddp->thl + data_len;
207 /* FIXME: layer2 and VLAN !!! */
208 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
210 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
211 pkt_len += VLAN_HLEN;
212 /* does complete packet fit in current page ? */
213 page_remainder = PAGE_SIZE - page_offset;
214 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)) {
215 /* no -> go to start of next page */
216 ctx->offset += page_remainder;
217 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
220 memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
221 element->addr = page + page_offset;
222 element->length = sizeof(struct qeth_hdr);
223 ctx->offset += sizeof(struct qeth_hdr);
224 page_offset += sizeof(struct qeth_hdr);
225 /* add mac header (?) */
226 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
227 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
228 element->length += ETH_HLEN;
229 ctx->offset += ETH_HLEN;
230 page_offset += ETH_HLEN;
233 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
234 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
235 element->length += VLAN_HLEN;
236 ctx->offset += VLAN_HLEN;
237 page_offset += VLAN_HLEN;
239 /* add network header */
240 memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
241 element->length += eddp->nhl;
242 eddp->nh_in_ctx = page + page_offset;
243 ctx->offset += eddp->nhl;
244 page_offset += eddp->nhl;
245 /* add transport header */
246 memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
247 element->length += eddp->thl;
248 eddp->th_in_ctx = page + page_offset;
249 ctx->offset += eddp->thl;
252 static void qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp,
253 int len, __wsum *hcsum)
255 struct skb_frag_struct *frag;
260 QETH_DBF_TEXT(TRACE, 5, "eddpcdtc");
261 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
262 skb_copy_from_linear_data_offset(eddp->skb, eddp->skb_offset,
264 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
266 eddp->skb_offset += len;
269 if (eddp->frag < 0) {
270 /* we're in skb->data */
271 left_in_frag = (eddp->skb->len -
274 src = eddp->skb->data + eddp->skb_offset;
276 frag = &skb_shinfo(eddp->skb)->frags[
278 left_in_frag = frag->size - eddp->frag_offset;
279 src = (u8 *)((page_to_pfn(frag->page) <<
280 PAGE_SHIFT) + frag->page_offset +
283 if (left_in_frag <= 0) {
285 eddp->frag_offset = 0;
288 copy_len = min(left_in_frag, len);
289 memcpy(dst, src, copy_len);
290 *hcsum = csum_partial(src, copy_len, *hcsum);
292 eddp->frag_offset += copy_len;
293 eddp->skb_offset += copy_len;
299 static void qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
300 struct qeth_eddp_data *eddp, int data_len, __wsum hcsum)
305 struct qeth_eddp_element *element;
308 QETH_DBF_TEXT(TRACE, 5, "eddpcsdt");
309 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
310 page_offset = ctx->offset % PAGE_SIZE;
311 element = &ctx->elements[ctx->num_elements];
313 page_remainder = PAGE_SIZE - page_offset;
314 if (page_remainder < data_len) {
315 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
316 page_remainder, &hcsum);
317 element->length += page_remainder;
319 element->flags = SBAL_FLAGS_FIRST_FRAG;
321 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
324 data_len -= page_remainder;
325 ctx->offset += page_remainder;
326 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
328 element->addr = page + page_offset;
330 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
332 element->length += data_len;
334 element->flags = SBAL_FLAGS_LAST_FRAG;
336 ctx->offset += data_len;
341 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
344 static __wsum qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp,
347 __wsum phcsum; /* pseudo header checksum */
349 QETH_DBF_TEXT(TRACE, 5, "eddpckt4");
350 eddp->th.tcp.h.check = 0;
351 /* compute pseudo header checksum */
352 phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
353 eddp->thl + data_len, IPPROTO_TCP, 0);
354 /* compute checksum of tcp header */
355 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
358 static __wsum qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp,
362 __wsum phcsum; /* pseudo header checksum */
364 QETH_DBF_TEXT(TRACE, 5, "eddpckt6");
365 eddp->th.tcp.h.check = 0;
366 /* compute pseudo header checksum */
367 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
368 sizeof(struct in6_addr), 0);
369 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
370 sizeof(struct in6_addr), phcsum);
371 proto = htonl(IPPROTO_TCP);
372 phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
376 static struct qeth_eddp_data *qeth_eddp_create_eddp_data(struct qeth_hdr *qh,
377 u8 *nh, u8 nhl, u8 *th, u8 thl)
379 struct qeth_eddp_data *eddp;
381 QETH_DBF_TEXT(TRACE, 5, "eddpcrda");
382 eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
386 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
387 memcpy(&eddp->nh, nh, nhl);
388 memcpy(&eddp->th, th, thl);
389 eddp->frag = -1; /* initially we're in skb->data */
394 static void __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
395 struct qeth_eddp_data *eddp)
401 QETH_DBF_TEXT(TRACE, 5, "eddpftcp");
402 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
403 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
404 eddp->skb_offset += sizeof(struct ethhdr);
405 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
406 eddp->skb_offset += VLAN_HLEN;
408 tcph = tcp_hdr(eddp->skb);
409 while (eddp->skb_offset < eddp->skb->len) {
410 data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
411 (int)(eddp->skb->len - eddp->skb_offset));
412 /* prepare qdio hdr */
413 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
414 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
415 eddp->nhl + eddp->thl;
416 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
417 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
419 eddp->qh.hdr.l3.length = data_len + eddp->nhl +
422 if (eddp->skb->protocol == htons(ETH_P_IP)) {
423 eddp->nh.ip4.h.tot_len = htons(data_len + eddp->nhl +
425 eddp->nh.ip4.h.check = 0;
426 eddp->nh.ip4.h.check =
427 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
430 eddp->nh.ip6.h.payload_len = htons(data_len +
432 /* prepare tcp hdr */
433 if (data_len == (eddp->skb->len - eddp->skb_offset)) {
434 /* last segment -> set FIN and PSH flags */
435 eddp->th.tcp.h.fin = tcph->fin;
436 eddp->th.tcp.h.psh = tcph->psh;
438 if (eddp->skb->protocol == htons(ETH_P_IP))
439 hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
441 hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
442 /* fill the next segment into the context */
443 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
444 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
445 if (eddp->skb_offset >= eddp->skb->len)
447 /* prepare headers for next round */
448 if (eddp->skb->protocol == htons(ETH_P_IP))
449 eddp->nh.ip4.h.id = htons(ntohs(eddp->nh.ip4.h.id) + 1);
450 eddp->th.tcp.h.seq = htonl(ntohl(eddp->th.tcp.h.seq) +
455 static int qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
456 struct sk_buff *skb, struct qeth_hdr *qhdr)
458 struct qeth_eddp_data *eddp = NULL;
460 QETH_DBF_TEXT(TRACE, 5, "eddpficx");
461 /* create our segmentation headers and copy original headers */
462 if (skb->protocol == htons(ETH_P_IP))
463 eddp = qeth_eddp_create_eddp_data(qhdr,
464 skb_network_header(skb),
466 skb_transport_header(skb),
469 eddp = qeth_eddp_create_eddp_data(qhdr,
470 skb_network_header(skb),
471 sizeof(struct ipv6hdr),
472 skb_transport_header(skb),
476 QETH_DBF_TEXT(TRACE, 2, "eddpfcnm");
479 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
480 skb_set_mac_header(skb, sizeof(struct qeth_hdr));
481 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
482 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
483 eddp->vlan[0] = skb->protocol;
484 eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
487 /* the next flags will only be set on the last segment */
488 eddp->th.tcp.h.fin = 0;
489 eddp->th.tcp.h.psh = 0;
491 /* begin segmentation and fill context */
492 __qeth_eddp_fill_context_tcp(ctx, eddp);
497 static void qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx,
498 struct sk_buff *skb, int hdr_len)
502 QETH_DBF_TEXT(TRACE, 5, "eddpcanp");
503 /* can we put multiple skbs in one page? */
504 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
505 if (skbs_per_page > 1) {
506 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
508 ctx->elements_per_skb = 1;
510 /* no -> how many elements per skb? */
511 ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
512 PAGE_SIZE) >> PAGE_SHIFT;
513 ctx->num_pages = ctx->elements_per_skb *
514 (skb_shinfo(skb)->gso_segs + 1);
516 ctx->num_elements = ctx->elements_per_skb *
517 (skb_shinfo(skb)->gso_segs + 1);
520 static struct qeth_eddp_context *qeth_eddp_create_context_generic(
521 struct qeth_card *card, struct sk_buff *skb, int hdr_len)
523 struct qeth_eddp_context *ctx = NULL;
527 QETH_DBF_TEXT(TRACE, 5, "creddpcg");
528 /* create the context and allocate pages */
529 ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
531 QETH_DBF_TEXT(TRACE, 2, "ceddpcn1");
534 ctx->type = QETH_LARGE_SEND_EDDP;
535 qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
536 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)) {
537 QETH_DBF_TEXT(TRACE, 2, "ceddpcis");
541 ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
542 if (ctx->pages == NULL) {
543 QETH_DBF_TEXT(TRACE, 2, "ceddpcn2");
547 for (i = 0; i < ctx->num_pages; ++i) {
548 addr = (u8 *)get_zeroed_page(GFP_ATOMIC);
550 QETH_DBF_TEXT(TRACE, 2, "ceddpcn3");
552 qeth_eddp_free_context(ctx);
555 ctx->pages[i] = addr;
557 ctx->elements = kcalloc(ctx->num_elements,
558 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
559 if (ctx->elements == NULL) {
560 QETH_DBF_TEXT(TRACE, 2, "ceddpcn4");
561 qeth_eddp_free_context(ctx);
564 /* reset num_elements; will be incremented again in fill_buffer to
565 * reflect number of actually used elements */
566 ctx->num_elements = 0;
570 static struct qeth_eddp_context *qeth_eddp_create_context_tcp(
571 struct qeth_card *card, struct sk_buff *skb,
572 struct qeth_hdr *qhdr)
574 struct qeth_eddp_context *ctx = NULL;
576 QETH_DBF_TEXT(TRACE, 5, "creddpct");
577 if (skb->protocol == htons(ETH_P_IP))
578 ctx = qeth_eddp_create_context_generic(card, skb,
579 (sizeof(struct qeth_hdr) +
582 else if (skb->protocol == htons(ETH_P_IPV6))
583 ctx = qeth_eddp_create_context_generic(card, skb,
584 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
587 QETH_DBF_TEXT(TRACE, 2, "cetcpinv");
590 QETH_DBF_TEXT(TRACE, 2, "creddpnl");
593 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)) {
594 QETH_DBF_TEXT(TRACE, 2, "ceddptfe");
595 qeth_eddp_free_context(ctx);
598 atomic_set(&ctx->refcnt, 1);
602 struct qeth_eddp_context *qeth_eddp_create_context(struct qeth_card *card,
603 struct sk_buff *skb, struct qeth_hdr *qhdr,
604 unsigned char sk_protocol)
606 QETH_DBF_TEXT(TRACE, 5, "creddpc");
607 switch (sk_protocol) {
609 return qeth_eddp_create_context_tcp(card, skb, qhdr);
611 QETH_DBF_TEXT(TRACE, 2, "eddpinvp");
615 EXPORT_SYMBOL_GPL(qeth_eddp_create_context);
617 void qeth_tso_fill_header(struct qeth_card *card, struct qeth_hdr *qhdr,
620 struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr;
621 struct tcphdr *tcph = tcp_hdr(skb);
622 struct iphdr *iph = ip_hdr(skb);
623 struct ipv6hdr *ip6h = ipv6_hdr(skb);
625 QETH_DBF_TEXT(TRACE, 5, "tsofhdr");
627 /*fix header to TSO values ...*/
628 hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
629 /*set values which are fix for the first approach ...*/
630 hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
631 hdr->ext.imb_hdr_no = 1;
632 hdr->ext.hdr_type = 1;
633 hdr->ext.hdr_version = 1;
634 hdr->ext.hdr_len = 28;
635 /*insert non-fix values */
636 hdr->ext.mss = skb_shinfo(skb)->gso_size;
637 hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4);
638 hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len -
639 sizeof(struct qeth_hdr_tso));
641 if (skb->protocol == ETH_P_IPV6) {
642 ip6h->payload_len = 0;
643 tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
646 /*OSA want us to set these values ...*/
647 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
653 EXPORT_SYMBOL_GPL(qeth_tso_fill_header);
655 void qeth_tx_csum(struct sk_buff *skb)
658 if (skb->protocol == htons(ETH_P_IP)) {
659 tlen = ntohs(ip_hdr(skb)->tot_len) - (ip_hdr(skb)->ihl << 2);
660 switch (ip_hdr(skb)->protocol) {
662 tcp_hdr(skb)->check = 0;
663 tcp_hdr(skb)->check = csum_tcpudp_magic(
664 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
665 tlen, ip_hdr(skb)->protocol,
666 skb_checksum(skb, skb_transport_offset(skb),
670 udp_hdr(skb)->check = 0;
671 udp_hdr(skb)->check = csum_tcpudp_magic(
672 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
673 tlen, ip_hdr(skb)->protocol,
674 skb_checksum(skb, skb_transport_offset(skb),
678 } else if (skb->protocol == htons(ETH_P_IPV6)) {
679 switch (ipv6_hdr(skb)->nexthdr) {
681 tcp_hdr(skb)->check = 0;
682 tcp_hdr(skb)->check = csum_ipv6_magic(
683 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
684 ipv6_hdr(skb)->payload_len,
685 ipv6_hdr(skb)->nexthdr,
686 skb_checksum(skb, skb_transport_offset(skb),
687 ipv6_hdr(skb)->payload_len, 0));
690 udp_hdr(skb)->check = 0;
691 udp_hdr(skb)->check = csum_ipv6_magic(
692 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
693 ipv6_hdr(skb)->payload_len,
694 ipv6_hdr(skb)->nexthdr,
695 skb_checksum(skb, skb_transport_offset(skb),
696 ipv6_hdr(skb)->payload_len, 0));
701 EXPORT_SYMBOL_GPL(qeth_tx_csum);