2 * linux/drivers/s390/net/qeth_eddp.c
4 * Enhanced Device Driver Packing (EDDP) support for the qeth driver.
6 * Copyright 2004 IBM Corporation
8 * Author(s): Thomas Spatzier <tspat@de.ibm.com>
11 #include <linux/errno.h>
13 #include <linux/inetdevice.h>
14 #include <linux/netdevice.h>
15 #include <linux/kernel.h>
16 #include <linux/tcp.h>
18 #include <linux/skbuff.h>
24 #include "qeth_eddp.h"
27 qeth_eddp_check_buffers_for_context(struct qeth_qdio_out_q *queue,
28 struct qeth_eddp_context *ctx)
30 int index = queue->next_buf_to_fill;
31 int elements_needed = ctx->num_elements;
32 int elements_in_buffer;
34 int buffers_needed = 0;
36 QETH_DBF_TEXT(trace, 5, "eddpcbfc");
37 while(elements_needed > 0) {
39 if (atomic_read(&queue->bufs[index].state) !=
43 elements_in_buffer = QETH_MAX_BUFFER_ELEMENTS(queue->card) -
44 queue->bufs[index].next_element_to_fill;
45 skbs_in_buffer = elements_in_buffer / ctx->elements_per_skb;
46 elements_needed -= skbs_in_buffer * ctx->elements_per_skb;
47 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
49 return buffers_needed;
53 qeth_eddp_free_context(struct qeth_eddp_context *ctx)
57 QETH_DBF_TEXT(trace, 5, "eddpfctx");
58 for (i = 0; i < ctx->num_pages; ++i)
59 free_page((unsigned long)ctx->pages[i]);
67 qeth_eddp_get_context(struct qeth_eddp_context *ctx)
69 atomic_inc(&ctx->refcnt);
73 qeth_eddp_put_context(struct qeth_eddp_context *ctx)
75 if (atomic_dec_return(&ctx->refcnt) == 0)
76 qeth_eddp_free_context(ctx);
80 qeth_eddp_buf_release_contexts(struct qeth_qdio_out_buffer *buf)
82 struct qeth_eddp_context_reference *ref;
84 QETH_DBF_TEXT(trace, 6, "eddprctx");
85 while (!list_empty(&buf->ctx_list)){
86 ref = list_entry(buf->ctx_list.next,
87 struct qeth_eddp_context_reference, list);
88 qeth_eddp_put_context(ref->ctx);
95 qeth_eddp_buf_ref_context(struct qeth_qdio_out_buffer *buf,
96 struct qeth_eddp_context *ctx)
98 struct qeth_eddp_context_reference *ref;
100 QETH_DBF_TEXT(trace, 6, "eddprfcx");
101 ref = kmalloc(sizeof(struct qeth_eddp_context_reference), GFP_ATOMIC);
104 qeth_eddp_get_context(ctx);
106 list_add_tail(&ref->list, &buf->ctx_list);
111 qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue,
112 struct qeth_eddp_context *ctx,
115 struct qeth_qdio_out_buffer *buf = NULL;
116 struct qdio_buffer *buffer;
117 int elements = ctx->num_elements;
123 QETH_DBF_TEXT(trace, 5, "eddpfibu");
124 while (elements > 0) {
125 buf = &queue->bufs[index];
126 if (atomic_read(&buf->state) != QETH_QDIO_BUF_EMPTY){
127 /* normally this should not happen since we checked for
128 * available elements in qeth_check_elements_for_context
133 PRINT_WARN("could only partially fill eddp "
138 /* check if the whole next skb fits into current buffer */
139 if ((QETH_MAX_BUFFER_ELEMENTS(queue->card) -
140 buf->next_element_to_fill)
141 < ctx->elements_per_skb){
142 /* no -> go to next buffer */
143 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
144 index = (index + 1) % QDIO_MAX_BUFFERS_PER_Q;
146 /* new buffer, so we have to add ctx to buffer'ctx_list
147 * and increment ctx's refcnt */
153 if (qeth_eddp_buf_ref_context(buf, ctx)){
154 PRINT_WARN("no memory to create eddp context "
159 buffer = buf->buffer;
160 /* fill one skb into buffer */
161 for (i = 0; i < ctx->elements_per_skb; ++i){
162 buffer->element[buf->next_element_to_fill].addr =
163 ctx->elements[element].addr;
164 buffer->element[buf->next_element_to_fill].length =
165 ctx->elements[element].length;
166 buffer->element[buf->next_element_to_fill].flags =
167 ctx->elements[element].flags;
168 buf->next_element_to_fill++;
174 if (!queue->do_pack) {
175 QETH_DBF_TEXT(trace, 6, "fillbfnp");
176 /* set state to PRIMED -> will be flushed */
177 if (buf->next_element_to_fill > 0){
178 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
182 if (queue->card->options.performance_stats)
183 queue->card->perf_stats.skbs_sent_pack++;
184 QETH_DBF_TEXT(trace, 6, "fillbfpa");
185 if (buf->next_element_to_fill >=
186 QETH_MAX_BUFFER_ELEMENTS(queue->card)) {
188 * packed buffer if full -> set state PRIMED
191 atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
200 qeth_eddp_create_segment_hdrs(struct qeth_eddp_context *ctx,
201 struct qeth_eddp_data *eddp, int data_len)
207 struct qeth_eddp_element *element;
209 QETH_DBF_TEXT(trace, 5, "eddpcrsh");
210 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
211 page_offset = ctx->offset % PAGE_SIZE;
212 element = &ctx->elements[ctx->num_elements];
213 pkt_len = eddp->nhl + eddp->thl + data_len;
214 /* FIXME: layer2 and VLAN !!! */
215 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2)
217 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
218 pkt_len += VLAN_HLEN;
219 /* does complete packet fit in current page ? */
220 page_remainder = PAGE_SIZE - page_offset;
221 if (page_remainder < (sizeof(struct qeth_hdr) + pkt_len)){
222 /* no -> go to start of next page */
223 ctx->offset += page_remainder;
224 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
227 memcpy(page + page_offset, &eddp->qh, sizeof(struct qeth_hdr));
228 element->addr = page + page_offset;
229 element->length = sizeof(struct qeth_hdr);
230 ctx->offset += sizeof(struct qeth_hdr);
231 page_offset += sizeof(struct qeth_hdr);
232 /* add mac header (?) */
233 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
234 memcpy(page + page_offset, &eddp->mac, ETH_HLEN);
235 element->length += ETH_HLEN;
236 ctx->offset += ETH_HLEN;
237 page_offset += ETH_HLEN;
240 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)){
241 memcpy(page + page_offset, &eddp->vlan, VLAN_HLEN);
242 element->length += VLAN_HLEN;
243 ctx->offset += VLAN_HLEN;
244 page_offset += VLAN_HLEN;
246 /* add network header */
247 memcpy(page + page_offset, (u8 *)&eddp->nh, eddp->nhl);
248 element->length += eddp->nhl;
249 eddp->nh_in_ctx = page + page_offset;
250 ctx->offset += eddp->nhl;
251 page_offset += eddp->nhl;
252 /* add transport header */
253 memcpy(page + page_offset, (u8 *)&eddp->th, eddp->thl);
254 element->length += eddp->thl;
255 eddp->th_in_ctx = page + page_offset;
256 ctx->offset += eddp->thl;
260 qeth_eddp_copy_data_tcp(char *dst, struct qeth_eddp_data *eddp, int len,
263 struct skb_frag_struct *frag;
268 QETH_DBF_TEXT(trace, 5, "eddpcdtc");
269 if (skb_shinfo(eddp->skb)->nr_frags == 0) {
270 memcpy(dst, eddp->skb->data + eddp->skb_offset, len);
271 *hcsum = csum_partial(eddp->skb->data + eddp->skb_offset, len,
273 eddp->skb_offset += len;
276 if (eddp->frag < 0) {
277 /* we're in skb->data */
278 left_in_frag = (eddp->skb->len - eddp->skb->data_len)
280 src = eddp->skb->data + eddp->skb_offset;
282 frag = &skb_shinfo(eddp->skb)->
284 left_in_frag = frag->size - eddp->frag_offset;
286 (page_to_pfn(frag->page) << PAGE_SHIFT)+
287 frag->page_offset + eddp->frag_offset);
289 if (left_in_frag <= 0) {
291 eddp->frag_offset = 0;
294 copy_len = min(left_in_frag, len);
295 memcpy(dst, src, copy_len);
296 *hcsum = csum_partial(src, copy_len, *hcsum);
298 eddp->frag_offset += copy_len;
299 eddp->skb_offset += copy_len;
306 qeth_eddp_create_segment_data_tcp(struct qeth_eddp_context *ctx,
307 struct qeth_eddp_data *eddp, int data_len,
313 struct qeth_eddp_element *element;
316 QETH_DBF_TEXT(trace, 5, "eddpcsdt");
317 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
318 page_offset = ctx->offset % PAGE_SIZE;
319 element = &ctx->elements[ctx->num_elements];
321 page_remainder = PAGE_SIZE - page_offset;
322 if (page_remainder < data_len){
323 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
324 page_remainder, &hcsum);
325 element->length += page_remainder;
327 element->flags = SBAL_FLAGS_FIRST_FRAG;
329 element->flags = SBAL_FLAGS_MIDDLE_FRAG;
332 data_len -= page_remainder;
333 ctx->offset += page_remainder;
334 page = ctx->pages[ctx->offset >> PAGE_SHIFT];
336 element->addr = page + page_offset;
338 qeth_eddp_copy_data_tcp(page + page_offset, eddp,
340 element->length += data_len;
342 element->flags = SBAL_FLAGS_LAST_FRAG;
344 ctx->offset += data_len;
349 ((struct tcphdr *)eddp->th_in_ctx)->check = csum_fold(hcsum);
353 qeth_eddp_check_tcp4_hdr(struct qeth_eddp_data *eddp, int data_len)
355 u32 phcsum; /* pseudo header checksum */
357 QETH_DBF_TEXT(trace, 5, "eddpckt4");
358 eddp->th.tcp.h.check = 0;
359 /* compute pseudo header checksum */
360 phcsum = csum_tcpudp_nofold(eddp->nh.ip4.h.saddr, eddp->nh.ip4.h.daddr,
361 eddp->thl + data_len, IPPROTO_TCP, 0);
362 /* compute checksum of tcp header */
363 return csum_partial((u8 *)&eddp->th, eddp->thl, phcsum);
367 qeth_eddp_check_tcp6_hdr(struct qeth_eddp_data *eddp, int data_len)
370 u32 phcsum; /* pseudo header checksum */
372 QETH_DBF_TEXT(trace, 5, "eddpckt6");
373 eddp->th.tcp.h.check = 0;
374 /* compute pseudo header checksum */
375 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.saddr,
376 sizeof(struct in6_addr), 0);
377 phcsum = csum_partial((u8 *)&eddp->nh.ip6.h.daddr,
378 sizeof(struct in6_addr), phcsum);
379 proto = htonl(IPPROTO_TCP);
380 phcsum = csum_partial((u8 *)&proto, sizeof(u32), phcsum);
384 static inline struct qeth_eddp_data *
385 qeth_eddp_create_eddp_data(struct qeth_hdr *qh, u8 *nh, u8 nhl, u8 *th, u8 thl)
387 struct qeth_eddp_data *eddp;
389 QETH_DBF_TEXT(trace, 5, "eddpcrda");
390 eddp = kzalloc(sizeof(struct qeth_eddp_data), GFP_ATOMIC);
394 memcpy(&eddp->qh, qh, sizeof(struct qeth_hdr));
395 memcpy(&eddp->nh, nh, nhl);
396 memcpy(&eddp->th, th, thl);
397 eddp->frag = -1; /* initially we're in skb->data */
403 __qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
404 struct qeth_eddp_data *eddp)
410 QETH_DBF_TEXT(trace, 5, "eddpftcp");
411 eddp->skb_offset = sizeof(struct qeth_hdr) + eddp->nhl + eddp->thl;
412 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
413 eddp->skb_offset += sizeof(struct ethhdr);
414 #ifdef CONFIG_QETH_VLAN
415 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
416 eddp->skb_offset += VLAN_HLEN;
417 #endif /* CONFIG_QETH_VLAN */
419 tcph = eddp->skb->h.th;
420 while (eddp->skb_offset < eddp->skb->len) {
421 data_len = min((int)skb_shinfo(eddp->skb)->gso_size,
422 (int)(eddp->skb->len - eddp->skb_offset));
423 /* prepare qdio hdr */
424 if (eddp->qh.hdr.l2.id == QETH_HEADER_TYPE_LAYER2){
425 eddp->qh.hdr.l2.pkt_length = data_len + ETH_HLEN +
426 eddp->nhl + eddp->thl -
427 sizeof(struct qeth_hdr);
428 #ifdef CONFIG_QETH_VLAN
429 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q))
430 eddp->qh.hdr.l2.pkt_length += VLAN_HLEN;
431 #endif /* CONFIG_QETH_VLAN */
433 eddp->qh.hdr.l3.length = data_len + eddp->nhl +
436 if (eddp->skb->protocol == ETH_P_IP){
437 eddp->nh.ip4.h.tot_len = data_len + eddp->nhl +
439 eddp->nh.ip4.h.check = 0;
440 eddp->nh.ip4.h.check =
441 ip_fast_csum((u8 *)&eddp->nh.ip4.h,
444 eddp->nh.ip6.h.payload_len = data_len + eddp->thl;
445 /* prepare tcp hdr */
446 if (data_len == (eddp->skb->len - eddp->skb_offset)){
447 /* last segment -> set FIN and PSH flags */
448 eddp->th.tcp.h.fin = tcph->fin;
449 eddp->th.tcp.h.psh = tcph->psh;
451 if (eddp->skb->protocol == ETH_P_IP)
452 hcsum = qeth_eddp_check_tcp4_hdr(eddp, data_len);
454 hcsum = qeth_eddp_check_tcp6_hdr(eddp, data_len);
455 /* fill the next segment into the context */
456 qeth_eddp_create_segment_hdrs(ctx, eddp, data_len);
457 qeth_eddp_create_segment_data_tcp(ctx, eddp, data_len, hcsum);
458 if (eddp->skb_offset >= eddp->skb->len)
460 /* prepare headers for next round */
461 if (eddp->skb->protocol == ETH_P_IP)
463 eddp->th.tcp.h.seq += data_len;
468 qeth_eddp_fill_context_tcp(struct qeth_eddp_context *ctx,
469 struct sk_buff *skb, struct qeth_hdr *qhdr)
471 struct qeth_eddp_data *eddp = NULL;
473 QETH_DBF_TEXT(trace, 5, "eddpficx");
474 /* create our segmentation headers and copy original headers */
475 if (skb->protocol == ETH_P_IP)
476 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.iph,
478 (u8 *)skb->h.th, skb->h.th->doff*4);
480 eddp = qeth_eddp_create_eddp_data(qhdr, (u8 *)skb->nh.ipv6h,
481 sizeof(struct ipv6hdr),
482 (u8 *)skb->h.th, skb->h.th->doff*4);
485 QETH_DBF_TEXT(trace, 2, "eddpfcnm");
488 if (qhdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
489 skb->mac.raw = (skb->data) + sizeof(struct qeth_hdr);
490 memcpy(&eddp->mac, eth_hdr(skb), ETH_HLEN);
491 #ifdef CONFIG_QETH_VLAN
492 if (eddp->mac.h_proto == __constant_htons(ETH_P_8021Q)) {
493 eddp->vlan[0] = __constant_htons(skb->protocol);
494 eddp->vlan[1] = htons(vlan_tx_tag_get(skb));
496 #endif /* CONFIG_QETH_VLAN */
498 /* the next flags will only be set on the last segment */
499 eddp->th.tcp.h.fin = 0;
500 eddp->th.tcp.h.psh = 0;
502 /* begin segmentation and fill context */
503 __qeth_eddp_fill_context_tcp(ctx, eddp);
509 qeth_eddp_calc_num_pages(struct qeth_eddp_context *ctx, struct sk_buff *skb,
514 QETH_DBF_TEXT(trace, 5, "eddpcanp");
515 /* can we put multiple skbs in one page? */
516 skbs_per_page = PAGE_SIZE / (skb_shinfo(skb)->gso_size + hdr_len);
517 if (skbs_per_page > 1){
518 ctx->num_pages = (skb_shinfo(skb)->gso_segs + 1) /
520 ctx->elements_per_skb = 1;
522 /* no -> how many elements per skb? */
523 ctx->elements_per_skb = (skb_shinfo(skb)->gso_size + hdr_len +
524 PAGE_SIZE) >> PAGE_SHIFT;
525 ctx->num_pages = ctx->elements_per_skb *
526 (skb_shinfo(skb)->gso_segs + 1);
528 ctx->num_elements = ctx->elements_per_skb *
529 (skb_shinfo(skb)->gso_segs + 1);
532 static inline struct qeth_eddp_context *
533 qeth_eddp_create_context_generic(struct qeth_card *card, struct sk_buff *skb,
536 struct qeth_eddp_context *ctx = NULL;
540 QETH_DBF_TEXT(trace, 5, "creddpcg");
541 /* create the context and allocate pages */
542 ctx = kzalloc(sizeof(struct qeth_eddp_context), GFP_ATOMIC);
544 QETH_DBF_TEXT(trace, 2, "ceddpcn1");
547 ctx->type = QETH_LARGE_SEND_EDDP;
548 qeth_eddp_calc_num_pages(ctx, skb, hdr_len);
549 if (ctx->elements_per_skb > QETH_MAX_BUFFER_ELEMENTS(card)){
550 QETH_DBF_TEXT(trace, 2, "ceddpcis");
554 ctx->pages = kcalloc(ctx->num_pages, sizeof(u8 *), GFP_ATOMIC);
555 if (ctx->pages == NULL){
556 QETH_DBF_TEXT(trace, 2, "ceddpcn2");
560 for (i = 0; i < ctx->num_pages; ++i){
561 addr = (u8 *)__get_free_page(GFP_ATOMIC);
563 QETH_DBF_TEXT(trace, 2, "ceddpcn3");
565 qeth_eddp_free_context(ctx);
568 memset(addr, 0, PAGE_SIZE);
569 ctx->pages[i] = addr;
571 ctx->elements = kcalloc(ctx->num_elements,
572 sizeof(struct qeth_eddp_element), GFP_ATOMIC);
573 if (ctx->elements == NULL){
574 QETH_DBF_TEXT(trace, 2, "ceddpcn4");
575 qeth_eddp_free_context(ctx);
578 /* reset num_elements; will be incremented again in fill_buffer to
579 * reflect number of actually used elements */
580 ctx->num_elements = 0;
584 static inline struct qeth_eddp_context *
585 qeth_eddp_create_context_tcp(struct qeth_card *card, struct sk_buff *skb,
586 struct qeth_hdr *qhdr)
588 struct qeth_eddp_context *ctx = NULL;
590 QETH_DBF_TEXT(trace, 5, "creddpct");
591 if (skb->protocol == ETH_P_IP)
592 ctx = qeth_eddp_create_context_generic(card, skb,
593 sizeof(struct qeth_hdr) + skb->nh.iph->ihl*4 +
595 else if (skb->protocol == ETH_P_IPV6)
596 ctx = qeth_eddp_create_context_generic(card, skb,
597 sizeof(struct qeth_hdr) + sizeof(struct ipv6hdr) +
600 QETH_DBF_TEXT(trace, 2, "cetcpinv");
603 QETH_DBF_TEXT(trace, 2, "creddpnl");
606 if (qeth_eddp_fill_context_tcp(ctx, skb, qhdr)){
607 QETH_DBF_TEXT(trace, 2, "ceddptfe");
608 qeth_eddp_free_context(ctx);
611 atomic_set(&ctx->refcnt, 1);
615 struct qeth_eddp_context *
616 qeth_eddp_create_context(struct qeth_card *card, struct sk_buff *skb,
617 struct qeth_hdr *qhdr)
619 QETH_DBF_TEXT(trace, 5, "creddpc");
620 switch (skb->sk->sk_protocol){
622 return qeth_eddp_create_context_tcp(card, skb, qhdr);
624 QETH_DBF_TEXT(trace, 2, "eddpinvp");