2 * WUSB Wire Adapter: WLP interface
3 * Deal with TX (massaging data to transmit, handling it)
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * Transmission engine. Get an skb, create from that a WLP transmit
24 * context, add a WLP TX header (which we keep prefilled in the
25 * device's instance), fill out the target-specific fields and
32 * i1480u_tx_release(): called by i1480u_disconnect() to release
33 * pending tx contexts.
35 * i1480u_tx_cb(): callback for TX contexts (USB URBs)
36 * i1480u_tx_destroy():
38 * i1480u_tx_timeout(): called for timeout handling from the
41 * i1480u_hard_start_xmit(): called for transmitting an skb from
42 * the network stack. Will interact with WLP
43 * substack to verify and prepare frame.
44 * i1480u_xmit_frame(): actual transmission on hardware
46 * i1480u_tx_create() Creates TX context
47 * i1480u_tx_create_1() For packets in 1 fragment
48 * i1480u_tx_create_n() For packets in >1 fragments
52 * - FIXME: rewrite using usb_sg_*(), add asynch support to
53 * usb_sg_*(). It might not make too much sense as most of
54 * the times the MTU will be smaller than one page...
57 #include "i1480u-wlp.h"
59 #include <linux/uwb/debug.h>
62 /* This is only for Next and Last TX packets */
63 i1480u_MAX_PL_SIZE = i1480u_MAX_FRG_SIZE
64 - sizeof(struct untd_hdr_rst),
67 /** Free resources allocated to a i1480u tx context. */
69 void i1480u_tx_free(struct i1480u_tx *wtx)
73 dev_kfree_skb_irq(wtx->skb);
74 usb_free_urb(wtx->urb);
79 void i1480u_tx_destroy(struct i1480u *i1480u, struct i1480u_tx *wtx)
82 spin_lock_irqsave(&i1480u->tx_list_lock, flags); /* not active any more */
83 list_del(&wtx->list_node);
85 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
89 void i1480u_tx_unlink_urbs(struct i1480u *i1480u)
92 struct i1480u_tx *wtx, *next;
94 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
95 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
96 usb_unlink_urb(wtx->urb);
98 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
103 * Callback for a completed tx USB URB.
107 * - FIXME: recover errors more gracefully
108 * - FIXME: handle NAKs (I dont think they come here) for flow ctl
111 void i1480u_tx_cb(struct urb *urb)
113 struct i1480u_tx *wtx = urb->context;
114 struct i1480u *i1480u = wtx->i1480u;
115 struct net_device *net_dev = i1480u->net_dev;
116 struct device *dev = &i1480u->usb_iface->dev;
119 switch (urb->status) {
121 spin_lock_irqsave(&i1480u->lock, flags);
122 i1480u->stats.tx_packets++;
123 i1480u->stats.tx_bytes += urb->actual_length;
124 spin_unlock_irqrestore(&i1480u->lock, flags);
126 case -ECONNRESET: /* Not an error, but a controlled situation; */
127 case -ENOENT: /* (we killed the URB)...so, no broadcast */
128 dev_dbg(dev, "notif endp: reset/noent %d\n", urb->status);
129 netif_stop_queue(net_dev);
131 case -ESHUTDOWN: /* going away! */
132 dev_dbg(dev, "notif endp: down %d\n", urb->status);
133 netif_stop_queue(net_dev);
136 dev_err(dev, "TX: unknown URB status %d\n", urb->status);
137 if (edc_inc(&i1480u->tx_errors, EDC_MAX_ERRORS,
138 EDC_ERROR_TIMEFRAME)) {
139 dev_err(dev, "TX: max acceptable errors exceeded."
141 netif_stop_queue(net_dev);
142 i1480u_tx_unlink_urbs(i1480u);
143 wlp_reset_all(&i1480u->wlp);
147 i1480u_tx_destroy(i1480u, wtx);
148 if (atomic_dec_return(&i1480u->tx_inflight.count)
149 <= i1480u->tx_inflight.threshold
150 && netif_queue_stopped(net_dev)
151 && i1480u->tx_inflight.threshold != 0) {
152 if (d_test(2) && printk_ratelimit())
153 d_printf(2, dev, "Restart queue. \n");
154 netif_start_queue(net_dev);
155 atomic_inc(&i1480u->tx_inflight.restart_count);
162 * Given a buffer that doesn't fit in a single fragment, create an
163 * scatter/gather structure for delivery to the USB pipe.
165 * Implements functionality of i1480u_tx_create().
167 * @wtx: tx descriptor
169 * @gfp_mask: gfp allocation mask
170 * @returns: Pointer to @wtx if ok, NULL on error.
172 * Sorry, TOO LONG a function, but breaking it up is kind of hard
174 * This will break the buffer in chunks smaller than
175 * i1480u_MAX_FRG_SIZE (including the header) and add proper headers
179 * i1480 tx header | fragment 1
181 * nxt header \ fragment 2
185 * last header \ fragment 3
186 * last fragment data /
188 * This does not fill the i1480 TX header, it is left up to the
189 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
191 * This function consumes the skb unless there is an error.
194 int i1480u_tx_create_n(struct i1480u_tx *wtx, struct sk_buff *skb,
201 void *pl_itr, *buf_itr;
202 size_t pl_size_left, frgs, pl_size_1st, frg_pl_size = 0;
203 struct untd_hdr_1st *untd_hdr_1st;
204 struct wlp_tx_hdr *wlp_tx_hdr;
205 struct untd_hdr_rst *untd_hdr_rst;
211 pl_size_left = pl_size; /* payload size */
212 /* First fragment; fits as much as i1480u_MAX_FRG_SIZE minus
214 pl_size_1st = i1480u_MAX_FRG_SIZE
215 - sizeof(struct untd_hdr_1st) - sizeof(struct wlp_tx_hdr);
216 BUG_ON(pl_size_1st > pl_size);
217 pl_size_left -= pl_size_1st;
218 /* The rest have an smaller header (no i1480 TX header). We
219 * need to break up the payload in blocks smaller than
220 * i1480u_MAX_PL_SIZE (payload excluding header). */
221 frgs = (pl_size_left + i1480u_MAX_PL_SIZE - 1) / i1480u_MAX_PL_SIZE;
222 /* Allocate space for the new buffer. In this new buffer we'll
223 * place the headers followed by the data fragment, headers,
224 * data fragments, etc..
227 wtx->buf_size = sizeof(*untd_hdr_1st)
228 + sizeof(*wlp_tx_hdr)
229 + frgs * sizeof(*untd_hdr_rst)
231 wtx->buf = kmalloc(wtx->buf_size, gfp_mask);
232 if (wtx->buf == NULL)
233 goto error_buf_alloc;
235 buf_itr = wtx->buf; /* We got the space, let's fill it up */
236 /* Fill 1st fragment */
237 untd_hdr_1st = buf_itr;
238 buf_itr += sizeof(*untd_hdr_1st);
239 untd_hdr_set_type(&untd_hdr_1st->hdr, i1480u_PKT_FRAG_1ST);
240 untd_hdr_set_rx_tx(&untd_hdr_1st->hdr, 0);
241 untd_hdr_1st->hdr.len = cpu_to_le16(pl_size + sizeof(*wlp_tx_hdr));
242 untd_hdr_1st->fragment_len =
243 cpu_to_le16(pl_size_1st + sizeof(*wlp_tx_hdr));
244 memset(untd_hdr_1st->padding, 0, sizeof(untd_hdr_1st->padding));
245 /* Set up i1480 header info */
246 wlp_tx_hdr = wtx->wlp_tx_hdr = buf_itr;
247 buf_itr += sizeof(*wlp_tx_hdr);
248 /* Copy the first fragment */
249 memcpy(buf_itr, pl_itr, pl_size_1st);
250 pl_itr += pl_size_1st;
251 buf_itr += pl_size_1st;
253 /* Now do each remaining fragment */
255 while (pl_size_left > 0) {
256 d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n",
257 pl_size_left, buf_itr - wtx->buf);
258 if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf
260 printk(KERN_ERR "BUG: no space for header\n");
263 d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n",
264 pl_size_left, buf_itr - wtx->buf);
265 untd_hdr_rst = buf_itr;
266 buf_itr += sizeof(*untd_hdr_rst);
267 if (pl_size_left > i1480u_MAX_PL_SIZE) {
268 frg_pl_size = i1480u_MAX_PL_SIZE;
269 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_NXT);
271 frg_pl_size = pl_size_left;
272 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST);
275 "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
276 pl_size_left, buf_itr - wtx->buf, frg_pl_size);
277 untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0);
278 untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size);
279 untd_hdr_rst->padding = 0;
280 if (buf_itr + frg_pl_size - wtx->buf
282 printk(KERN_ERR "BUG: no space for payload\n");
285 memcpy(buf_itr, pl_itr, frg_pl_size);
286 buf_itr += frg_pl_size;
287 pl_itr += frg_pl_size;
288 pl_size_left -= frg_pl_size;
290 "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n",
291 pl_size_left, buf_itr - wtx->buf, frg_pl_size);
293 dev_kfree_skb_irq(skb);
298 "BUG: skb %u bytes\n"
299 "BUG: frg_pl_size %zd i1480u_MAX_FRG_SIZE %u\n"
300 "BUG: buf_itr %zu buf_size %zu pl_size_left %zu\n",
302 frg_pl_size, i1480u_MAX_FRG_SIZE,
303 buf_itr - wtx->buf, wtx->buf_size, pl_size_left);
312 * Given a buffer that fits in a single fragment, fill out a @wtx
313 * struct for transmitting it down the USB pipe.
315 * Uses the fact that we have space reserved in front of the skbuff
316 * for hardware headers :]
318 * This does not fill the i1480 TX header, it is left up to the
319 * caller to do that; you can get it from @wtx->wlp_tx_hdr.
321 * @pl: pointer to payload data
322 * @pl_size: size of the payuload
324 * This function does not consume the @skb.
327 int i1480u_tx_create_1(struct i1480u_tx *wtx, struct sk_buff *skb,
330 struct untd_hdr_cmp *untd_hdr_cmp;
331 struct wlp_tx_hdr *wlp_tx_hdr;
335 BUG_ON(skb_headroom(skb) < sizeof(*wlp_tx_hdr));
336 wlp_tx_hdr = (void *) __skb_push(skb, sizeof(*wlp_tx_hdr));
337 wtx->wlp_tx_hdr = wlp_tx_hdr;
338 BUG_ON(skb_headroom(skb) < sizeof(*untd_hdr_cmp));
339 untd_hdr_cmp = (void *) __skb_push(skb, sizeof(*untd_hdr_cmp));
341 untd_hdr_set_type(&untd_hdr_cmp->hdr, i1480u_PKT_FRAG_CMP);
342 untd_hdr_set_rx_tx(&untd_hdr_cmp->hdr, 0);
343 untd_hdr_cmp->hdr.len = cpu_to_le16(skb->len - sizeof(*untd_hdr_cmp));
344 untd_hdr_cmp->padding = 0;
350 * Given a skb to transmit, massage it to become palatable for the TX pipe
352 * This will break the buffer in chunks smaller than
353 * i1480u_MAX_FRG_SIZE and add proper headers to each.
356 * i1480 tx header | fragment 1
358 * nxt header \ fragment 2
362 * last header \ fragment 3
363 * last fragment data /
365 * Each fragment will be always smaller or equal to i1480u_MAX_FRG_SIZE.
367 * If the first fragment is smaller than i1480u_MAX_FRG_SIZE, then the
368 * following is composed:
371 * i1480 tx header | single fragment
374 * We were going to use s/g support, but because the interface is
375 * synch and at the end there is plenty of overhead to do it, it
376 * didn't seem that worth for data that is going to be smaller than
380 struct i1480u_tx *i1480u_tx_create(struct i1480u *i1480u,
381 struct sk_buff *skb, gfp_t gfp_mask)
384 struct usb_endpoint_descriptor *epd;
388 struct i1480u_tx *wtx;
389 const size_t pl_max_size =
390 i1480u_MAX_FRG_SIZE - sizeof(struct untd_hdr_cmp)
391 - sizeof(struct wlp_tx_hdr);
393 wtx = kmalloc(sizeof(*wtx), gfp_mask);
395 goto error_wtx_alloc;
396 wtx->urb = usb_alloc_urb(0, gfp_mask);
397 if (wtx->urb == NULL)
398 goto error_urb_alloc;
399 epd = &i1480u->usb_iface->cur_altsetting->endpoint[2].desc;
400 usb_pipe = usb_sndbulkpipe(i1480u->usb_dev, epd->bEndpointAddress);
401 /* Fits in a single complete packet or need to split? */
402 if (skb->len > pl_max_size) {
403 result = i1480u_tx_create_n(wtx, skb, gfp_mask);
406 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
407 wtx->buf, wtx->buf_size, i1480u_tx_cb, wtx);
409 result = i1480u_tx_create_1(wtx, skb, gfp_mask);
412 usb_fill_bulk_urb(wtx->urb, i1480u->usb_dev, usb_pipe,
413 skb->data, skb->len, i1480u_tx_cb, wtx);
415 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
416 list_add(&wtx->list_node, &i1480u->tx_list);
417 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
429 * Actual fragmentation and transmission of frame
431 * @wlp: WLP substack data structure
432 * @skb: To be transmitted
433 * @dst: Device address of destination
434 * @returns: 0 on success, <0 on failure
436 * This function can also be called directly (not just from
437 * hard_start_xmit), so we also check here if the interface is up before
438 * taking sending anything.
440 int i1480u_xmit_frame(struct wlp *wlp, struct sk_buff *skb,
441 struct uwb_dev_addr *dst)
444 struct i1480u *i1480u = container_of(wlp, struct i1480u, wlp);
445 struct device *dev = &i1480u->usb_iface->dev;
446 struct net_device *net_dev = i1480u->net_dev;
447 struct i1480u_tx *wtx;
448 struct wlp_tx_hdr *wlp_tx_hdr;
449 static unsigned char dev_bcast[2] = { 0xff, 0xff };
454 d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
456 BUG_ON(i1480u->wlp.rc == NULL);
457 if ((net_dev->flags & IFF_UP) == 0)
460 if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) {
461 if (d_test(2) && printk_ratelimit())
462 d_printf(2, dev, "Max frames in flight "
463 "stopping queue.\n");
464 netif_stop_queue(net_dev);
465 goto error_max_inflight;
468 wtx = i1480u_tx_create(i1480u, skb, GFP_ATOMIC);
469 if (unlikely(wtx == NULL)) {
470 if (printk_ratelimit())
471 dev_err(dev, "TX: no memory for WLP TX URB,"
472 "dropping packet (in flight %d)\n",
473 atomic_read(&i1480u->tx_inflight.count));
474 netif_stop_queue(net_dev);
475 goto error_wtx_alloc;
477 wtx->i1480u = i1480u;
478 /* Fill out the i1480 header; @i1480u->def_tx_hdr read without
479 * locking. We do so because they are kind of orthogonal to
480 * each other (and thus not changed in an atomic batch).
481 * The ETH header is right after the WLP TX header. */
482 wlp_tx_hdr = wtx->wlp_tx_hdr;
483 *wlp_tx_hdr = i1480u->options.def_tx_hdr;
484 wlp_tx_hdr->dstaddr = *dst;
485 if (!memcmp(&wlp_tx_hdr->dstaddr, dev_bcast, sizeof(dev_bcast))
486 && (wlp_tx_hdr_delivery_id_type(wlp_tx_hdr) & WLP_DRP)) {
487 /*Broadcast message directed to DRP host. Send as best effort
489 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority);
493 dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len);
494 dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len);
497 /* simulates a device lockup after every lockup# packets */
498 if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) {
499 /* Simulate a dropped transmit interrupt */
500 net_dev->trans_start = jiffies;
501 netif_stop_queue(net_dev);
502 dev_err(dev, "Simulate lockup at %ld\n", jiffies);
507 result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */
509 dev_err(dev, "TX: cannot submit URB: %d\n", result);
510 /* We leave the freeing of skb to calling function */
512 goto error_tx_urb_submit;
514 atomic_inc(&i1480u->tx_inflight.count);
515 net_dev->trans_start = jiffies;
516 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
521 i1480u_tx_destroy(i1480u, wtx);
525 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
532 * Transmit an skb Called when an skbuf has to be transmitted
534 * The skb is first passed to WLP substack to ensure this is a valid
535 * frame. If valid the device address of destination will be filled and
536 * the WLP header prepended to the skb. If this step fails we fake sending
537 * the frame, if we return an error the network stack will just keep trying.
539 * Broadcast frames inside a WSS needs to be treated special as multicast is
540 * not supported. A broadcast frame is sent as unicast to each member of the
541 * WSS - this is done by the WLP substack when it finds a broadcast frame.
542 * So, we test if the WLP substack took over the skb and only transmit it
543 * if it has not (been taken over).
545 * @net_dev->xmit_lock is held
547 int i1480u_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
550 struct i1480u *i1480u = netdev_priv(net_dev);
551 struct device *dev = &i1480u->usb_iface->dev;
552 struct uwb_dev_addr dst;
554 d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len,
556 BUG_ON(i1480u->wlp.rc == NULL);
557 if ((net_dev->flags & IFF_UP) == 0)
559 result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst);
561 dev_err(dev, "WLP verification of TX frame failed (%d). "
562 "Dropping packet.\n", result);
564 } else if (result == 1) {
565 d_printf(6, dev, "WLP will transmit frame. \n");
566 /* trans_start time will be set when WLP actually transmits
570 d_printf(6, dev, "Transmitting frame. \n");
571 result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst);
573 dev_err(dev, "Frame TX failed (%d).\n", result);
576 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
580 dev_kfree_skb_any(skb);
581 i1480u->stats.tx_dropped++;
583 d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len,
590 * Called when a pkt transmission doesn't complete in a reasonable period
591 * Device reset may sleep - do it outside of interrupt context (delayed)
593 void i1480u_tx_timeout(struct net_device *net_dev)
595 struct i1480u *i1480u = netdev_priv(net_dev);
597 wlp_reset_all(&i1480u->wlp);
601 void i1480u_tx_release(struct i1480u *i1480u)
604 struct i1480u_tx *wtx, *next;
605 int count = 0, empty;
607 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
608 list_for_each_entry_safe(wtx, next, &i1480u->tx_list, list_node) {
610 usb_unlink_urb(wtx->urb);
612 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);
613 count = count*10; /* i1480ut 200ms per unlinked urb (intervals of 20ms) */
615 * We don't like this sollution too much (dirty as it is), but
616 * it is cheaper than putting a refcount on each i1480u_tx and
617 * i1480uting for all of them to go away...
619 * Called when no more packets can be added to tx_list
620 * so can i1480ut for it to be empty.
623 spin_lock_irqsave(&i1480u->tx_list_lock, flags);
624 empty = list_empty(&i1480u->tx_list);
625 spin_unlock_irqrestore(&i1480u->tx_list_lock, flags);