3 * linux/drivers/s390/net/qeth_eddp.c ($Revision: 1.13 $)
5 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
7 * Copyright 2004 IBM Corporation
9 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 * $Revision: 1.13 $ $Date: 2005/05/04 20:19:18 $
14 #include <linux/config.h>
15 #include <linux/errno.h>
17 #include <linux/inetdevice.h>
18 #include <linux/netdevice.h>
19 #include <linux/kernel.h>
20 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
28 #include "qeth_eddp.h"
31 qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
32 struct qeth_eddp_context *ctx)
34 int index = queue->next_buf_to_fill;
35 int elements_needed = ctx->num_elements;
36 int elements_in_buffer;
38 int buffers_needed = 0;
40 QETH_DBF_TEXT(trace, 5, "eddpcbfc");
41 while(elements_needed > 0) {
43 if (atomic_read(&queue->bufs[index].state) !=
47 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
48 queue->bufs[index].next_element_to_fill;
49 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
50 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
51 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
53 return buffers_needed;
57 qeth_eddp_free_context(struct qeth_eddp_context *ctx)
61 QETH_DBF_TEXT(trace, 5, "eddpfctx");
62 for (i = 0; i < ctx->num_pages; ++i)
63 free_page((unsigned long)ctx->pages[i]);
71 qeth_eddp_get_context(struct qeth_eddp_context *ctx)
73 atomic_inc(&ctx->refcnt);
77 qeth_eddp_put_context(struct qeth_eddp_context *ctx)
79 if (atomic_dec_return(&ctx->refcnt) == 0)
80 qeth_eddp_free_context(ctx);
84 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
86 struct qeth_eddp_context_reference *ref;
88 QETH_DBF_TEXT(trace, 6, "eddprctx");
89 while (!list_empty(&buf->ctx_list)){
90 ref = list_entry(buf->ctx_list.next,
91 struct qeth_eddp_context_reference, list);
92 qeth_eddp_put_context(ref->ctx);
99 qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
100 struct qeth_eddp_context *ctx)
102 struct qeth_eddp_context_reference *ref;
104 QETH_DBF_TEXT(trace, 6, "eddprfcx");
105 ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
108 qeth_eddp_get_context(ctx);
110 list_add_tail(&ref->list, &buf->ctx_list);
115 qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
116 struct qeth_eddp_context *ctx,
119 struct qeth_qdio_out_buffer *buf = NULL;
120 struct qdio_buffer *buffer;
121 int elements = ctx->num_elements;
127 QETH_DBF_TEXT(trace, 5, "eddpfibu");
128 while (elements > 0) {
129 buf = &queue->bufs[index];
130 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
131 /* normally this should not happen since we checked for
132 * available elements in qeth_check_elements_for_context
137 PRINT_WARN("could only partially fill eddp "
142 /* check if the whole next skb fits into current buffer */
143 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
144 buf->next_element_to_fill)
145 < ctx->elements_per_skb){
146 /* no -> go to next buffer */
147 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
148 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
150 /* new buffer, so we have to add ctx to buffer'ctx_list
151 * and increment ctx's refcnt */
157 if (qeth_eddp_buf_ref_context(buf, ctx)){
158 PRINT_WARN("no memory to create eddp context "
163 buffer = buf->buffer;
164 /* fill one skb into buffer */
165 for (i = 0; i < ctx->elements_per_skb; ++i){
166 buffer->element[buf->next_element_to_fill].addr =
167 ctx->elements[element].addr;
168 buffer->element[buf->next_element_to_fill].length =
169 ctx->elements[element].length;
170 buffer->element[buf->next_element_to_fill].flags =
171 ctx->elements[element].flags;
172 buf->next_element_to_fill++;
178 if (!queue->do_pack) {
179 QETH_DBF_TEXT(trace, 6, "fillbfnp");
180 /* set state to PRIMED -> will be flushed */
181 if (buf->next_element_to_fill > 0){
182 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
186 #ifdef CONFIG_QETH_PERF_STATS
187 queue->card->perf_stats.skbs_sent_pack++;
189 QETH_DBF_TEXT(trace, 6, "fillbfpa");
190 if (buf->next_element_to_fill >=
191 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
193 * packed buffer if full -> set state PRIMED
196 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
205 qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
206 struct qeth_eddp_data *eddp, int data_len)
212 struct qeth_eddp_element *element;
214 QETH_DBF_TEXT(trace, 5, "eddpcrsh");
215 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
216 page_offset = ctx->offset % PAGE_SIZE;
217 element = &ctx->elements[ctx->num_elements];
218 pkt_len = eddp->nhl + eddp->thl + data_len;
219 /* FIXME: layer2 and VLAN !!! */
220 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
222 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
223 pkt_len += VLAN_HLEN;
224 /* does complete packet fit in current page ? */
225 page_remainder = PAGE_SIZE - page_offset;
226 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
227 /* no -> go to start of next page */
228 ctx->offset += page_remainder;
229 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
232 memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
233 element->addr = page + page_offset;
234 element->length = sizeof(struct qeth_hdr);
235 ctx->offset += sizeof(struct qeth_hdr);
236 page_offset += sizeof(struct qeth_hdr);
237 /* add mac header (?) */
238 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
239 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
240 element->length += ETH_HLEN;
241 ctx->offset += ETH_HLEN;
242 page_offset += ETH_HLEN;
245 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
246 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
247 element->length += VLAN_HLEN;
248 ctx->offset += VLAN_HLEN;
249 page_offset += VLAN_HLEN;
251 /* add network header */
252 memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
253 element->length += eddp->nhl;
254 eddp->nh_in_ctx = page + page_offset;
255 ctx->offset += eddp->nhl;
256 page_offset += eddp->nhl;
257 /* add transport header */
258 memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
259 element->length += eddp->thl;
260 eddp->th_in_ctx = page + page_offset;
261 ctx->offset += eddp->thl;
265 qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
268 struct skb_frag_struct *frag;
273 QETH_DBF_TEXT(trace, 5, "eddpcdtc");
274 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
275 memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
276 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
278 eddp->skb_offset += len;
281 if (eddp->frag < 0) {
282 /* we're in skb->data */
283 left_in_frag = (eddp->skb->len - eddp->skb->data_len)
285 src = eddp->skb->data + eddp->skb_offset;
287 frag = &skb_shinfo(eddp->skb)->
289 left_in_frag = frag->size - eddp->frag_offset;
291 (page_to_pfn(frag->page) << PAGE_SHIFT)+
292 frag->page_offset + eddp->frag_offset);
294 if (left_in_frag <= 0) {
296 eddp->frag_offset = 0;
299 copy_len = min(left_in_frag, len);
300 memcpy(dst, src, copy_len);
301 *hcsum = csum_partial(src, copy_len, *hcsum);
303 eddp->frag_offset += copy_len;
304 eddp->skb_offset += copy_len;
311 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
312 struct qeth_eddp_data *eddp, int data_len,
318 struct qeth_eddp_element *element;
321 QETH_DBF_TEXT(trace, 5, "eddpcsdt");
322 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
323 page_offset = ctx->offset % PAGE_SIZE;
324 element = &ctx->elements[ctx->num_elements];
326 page_remainder = PAGE_SIZE - page_offset;
327 if (page_remainder < data_len){
328 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
329 page_remainder, &hcsum);
330 element->length += page_remainder;
332 element->flags = SBAL_FLAGS_FIRST_FRAG;
334 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
337 data_len -= page_remainder;
338 ctx->offset += page_remainder;
339 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
341 element->addr = page + page_offset;
343 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
345 element->length += data_len;
347 element->flags = SBAL_FLAGS_LAST_FRAG;
349 ctx->offset += data_len;
354 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
358 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
360 u32 phcsum; /* pseudo header checksum */
362 QETH_DBF_TEXT(trace, 5, "eddpckt4");
363 eddp->th.tcp.h.check = 0;
364 /* compute pseudo header checksum */
365 phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
366 eddp->thl + data_len, IPPROTO_TCP, 0);
367 /* compute checksum of tcp header */
368 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
372 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
375 u32 phcsum; /* pseudo header checksum */
377 QETH_DBF_TEXT(trace, 5, "eddpckt6");
378 eddp->th.tcp.h.check = 0;
379 /* compute pseudo header checksum */
380 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
381 sizeof(struct in6_addr), 0);
382 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
383 sizeof(struct in6_addr), phcsum);
384 proto = htonl(IPPROTO_TCP);
385 phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
389 static inline struct qeth_eddp_data *
390 qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
392 struct qeth_eddp_data *eddp;
394 QETH_DBF_TEXT(trace, 5, "eddpcrda");
395 eddp = kmalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
397 memset(eddp, 0, sizeof(struct qeth_eddp_data));
400 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
401 memcpy(&eddp->nh, nh, nhl);
402 memcpy(&eddp->th, th, thl);
403 eddp->frag = -1; /* initially we're in skb->data */
409 __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
410 struct qeth_eddp_data *eddp)
416 QETH_DBF_TEXT(trace, 5, "eddpftcp");
417 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
418 tcph = eddp->skb->h.th;
419 while (eddp->skb_offset < eddp->skb->len) {
420 data_len = min((int)skb_shinfo(eddp->skb)->tso_size,
421 (int)(eddp->skb->len - eddp->skb_offset));
422 /* prepare qdio hdr */
423 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
424 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
425 eddp->nhl + eddp->thl -
426 sizeof(struct qeth_hdr);
427 #ifdef CONFIG_QETH_VLAN
428 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
429 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
430 #endif /* CONFIG_QETH_VLAN */
432 eddp->qh.hdr.l3.length = data_len + eddp->nhl +
435 if (eddp->skb->protocol == ETH_P_IP){
436 eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
438 eddp->nh.ip4.h.check = 0;
439 eddp->nh.ip4.h.check =
440 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
443 eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
444 /* prepare tcp hdr */
445 if (data_len == (eddp->skb->len - eddp->skb_offset)){
446 /* last segment -> set FIN and PSH flags */
447 eddp->th.tcp.h.fin = tcph->fin;
448 eddp->th.tcp.h.psh = tcph->psh;
450 if (eddp->skb->protocol == ETH_P_IP)
451 hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
453 hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
454 /* fill the next segment into the context */
455 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
456 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
457 if (eddp->skb_offset >= eddp->skb->len)
459 /* prepare headers for next round */
460 if (eddp->skb->protocol == ETH_P_IP)
462 eddp->th.tcp.h.seq += data_len;
467 qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
468 struct sk_buff *skb, struct qeth_hdr *qhdr)
470 struct qeth_eddp_data *eddp = NULL;
472 QETH_DBF_TEXT(trace, 5, "eddpficx");
473 /* create our segmentation headers and copy original headers */
474 if (skb->protocol == ETH_P_IP)
475 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
477 (u8 *)skb->h.th, skb->h.th->doff*4);
479 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
480 sizeof(struct ipv6hdr),
481 (u8 *)skb->h.th, skb->h.th->doff*4);
484 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
487 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
488 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
489 #ifdef CONFIG_QETH_VLAN
490 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
491 eddp->vlan[0] = __constant_htons(skb->protocol);
492 eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
494 #endif /* CONFIG_QETH_VLAN */
496 /* the next flags will only be set on the last segment */
497 eddp->th.tcp.h.fin = 0;
498 eddp->th.tcp.h.psh = 0;
500 /* begin segmentation and fill context */
501 __qeth_eddp_fill_context_tcp(ctx, eddp);
507 qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
512 QETH_DBF_TEXT(trace, 5, "eddpcanp");
513 /* can we put multiple skbs in one page? */
514 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->tso_size + hdr_len);
515 if (skbs_per_page > 1){
516 ctx->num_pages = (skb_shinfo(skb)->tso_segs + 1) /
518 ctx->elements_per_skb = 1;
520 /* no -> how many elements per skb? */
521 ctx->elements_per_skb = (skb_shinfo(skb)->tso_size + hdr_len +
522 PAGE_SIZE) >> PAGE_SHIFT;
523 ctx->num_pages = ctx->elements_per_skb *
524 (skb_shinfo(skb)->tso_segs + 1);
526 ctx->num_elements = ctx->elements_per_skb *
527 (skb_shinfo(skb)->tso_segs + 1);
530 static inline struct qeth_eddp_context *
531 qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
534 struct qeth_eddp_context *ctx = NULL;
538 QETH_DBF_TEXT(trace, 5, "creddpcg");
539 /* create the context and allocate pages */
540 ctx = kmalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
542 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
545 memset(ctx, 0, sizeof(struct qeth_eddp_context));
546 ctx->type = QETH_LARGE_SEND_EDDP;
547 qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
548 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
549 QETH_DBF_TEXT(trace, 2, "ceddpcis");
553 ctx->pages = kmalloc(ctx->num_pages * sizeof(u8 *), GFP_ATOMIC);
554 if (ctx->pages == NULL){
555 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
559 memset(ctx->pages, 0, ctx->num_pages * sizeof(u8 *));
560 for (i = 0; i < ctx->num_pages; ++i){
561 addr = (u8 *)__get_free_page(GFP_ATOMIC);
563 QETH_DBF_TEXT(trace, 2, "ceddpcn3");
565 qeth_eddp_free_context(ctx);
568 memset(addr, 0, PAGE_SIZE);
569 ctx->pages[i] = addr;
571 ctx->elements = kmalloc(ctx->num_elements *
572 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
573 if (ctx->elements == NULL){
574 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
575 qeth_eddp_free_context(ctx);
578 memset(ctx->elements, 0,
579 ctx->num_elements * sizeof(struct qeth_eddp_element));
580 /* reset num_elements; will be incremented again in fill_buffer to
581 * reflect number of actually used elements */
582 ctx->num_elements = 0;
586 static inline struct qeth_eddp_context *
587 qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
588 struct qeth_hdr *qhdr)
590 struct qeth_eddp_context *ctx = NULL;
592 QETH_DBF_TEXT(trace, 5, "creddpct");
593 if (skb->protocol == ETH_P_IP)
594 ctx = qeth_eddp_create_context_generic(card, skb,
595 sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
597 else if (skb->protocol == ETH_P_IPV6)
598 ctx = qeth_eddp_create_context_generic(card, skb,
599 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
602 QETH_DBF_TEXT(trace, 2, "cetcpinv");
605 QETH_DBF_TEXT(trace, 2, "creddpnl");
608 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
609 QETH_DBF_TEXT(trace, 2, "ceddptfe");
610 qeth_eddp_free_context(ctx);
613 atomic_set(&ctx->refcnt, 1);
617 struct qeth_eddp_context *
618 qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
619 struct qeth_hdr *qhdr)
621 QETH_DBF_TEXT(trace, 5, "creddpc");
622 switch (skb->sk->sk_protocol){
624 return qeth_eddp_create_context_tcp(card, skb, qhdr);
626 QETH_DBF_TEXT(trace, 2, "eddpinvp");