Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[linux-2.6] / drivers / usb / wusbcore / wa-xfer.c
1 /*
2  * WUSB Wire Adapter
3  * Data transfer and URB enqueing
4  *
5  * Copyright (C) 2005-2006 Intel Corporation
6  * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version
10  * 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20  * 02110-1301, USA.
21  *
22  *
23  * How transfers work: get a buffer, break it up in segments (segment
24  * size is a multiple of the maxpacket size). For each segment issue a
25  * segment request (struct wa_xfer_*), then send the data buffer if
26  * out or nothing if in (all over the DTO endpoint).
27  *
28  * For each submitted segment request, a notification will come over
29  * the NEP endpoint and a transfer result (struct xfer_result) will
30  * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31  * data coming (inbound transfer), schedule a read and handle it.
32  *
33  * Sounds simple, it is a pain to implement.
34  *
35  *
36  * ENTRY POINTS
37  *
38  *   FIXME
39  *
40  * LIFE CYCLE / STATE DIAGRAM
41  *
42  *   FIXME
43  *
44  * THIS CODE IS DISGUSTING
45  *
46  *   Warned you are; it's my second try and still not happy with it.
47  *
48  * NOTES:
49  *
50  *   - No iso
51  *
52  *   - Supports DMA xfers, control, bulk and maybe interrupt
53  *
54  *   - Does not recycle unused rpipes
55  *
56  *     An rpipe is assigned to an endpoint the first time it is used,
57  *     and then it's there, assigned, until the endpoint is disabled
58  *     (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59  *     rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60  *     (should be a mutex).
61  *
62  *     Two methods it could be done:
63  *
64  *     (a) set up a timer everytime an rpipe's use count drops to 1
65  *         (which means unused) or when a transfer ends. Reset the
66  *         timer when a xfer is queued. If the timer expires, release
67  *         the rpipe [see rpipe_ep_disable()].
68  *
69  *     (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70  *         when none are found go over the list, check their endpoint
71  *         and their activity record (if no last-xfer-done-ts in the
72  *         last x seconds) take it
73  *
74  *     However, due to the fact that we have a set of limited
75  *     resources (max-segments-at-the-same-time per xfer,
76  *     xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77  *     we are going to have to rebuild all this based on an scheduler,
78  *     to where we have a list of transactions to do and based on the
79  *     availability of the different requried components (blocks,
80  *     rpipes, segment slots, etc), we go scheduling them. Painful.
81  */
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/hash.h>
85 #include "wa-hc.h"
86 #include "wusbhc.h"
87
88 #undef D_LOCAL
89 #define D_LOCAL 0 /* 0 disabled, > 0 different levels... */
90 #include <linux/uwb/debug.h>
91
92 enum {
93         WA_SEGS_MAX = 255,
94 };
95
96 enum wa_seg_status {
97         WA_SEG_NOTREADY,
98         WA_SEG_READY,
99         WA_SEG_DELAYED,
100         WA_SEG_SUBMITTED,
101         WA_SEG_PENDING,
102         WA_SEG_DTI_PENDING,
103         WA_SEG_DONE,
104         WA_SEG_ERROR,
105         WA_SEG_ABORTED,
106 };
107
108 static void wa_xfer_delayed_run(struct wa_rpipe *);
109
110 /*
111  * Life cycle governed by 'struct urb' (the refcount of the struct is
112  * that of the 'struct urb' and usb_free_urb() would free the whole
113  * struct).
114  */
115 struct wa_seg {
116         struct urb urb;
117         struct urb *dto_urb;            /* for data output? */
118         struct list_head list_node;     /* for rpipe->req_list */
119         struct wa_xfer *xfer;           /* out xfer */
120         u8 index;                       /* which segment we are */
121         enum wa_seg_status status;
122         ssize_t result;                 /* bytes xfered or error */
123         struct wa_xfer_hdr xfer_hdr;
124         u8 xfer_extra[];                /* xtra space for xfer_hdr_ctl */
125 };
126
127 static void wa_seg_init(struct wa_seg *seg)
128 {
129         /* usb_init_urb() repeats a lot of work, so we do it here */
130         kref_init(&seg->urb.kref);
131 }
132
133 /*
134  * Protected by xfer->lock
135  *
136  */
137 struct wa_xfer {
138         struct kref refcnt;
139         struct list_head list_node;
140         spinlock_t lock;
141         u32 id;
142
143         struct wahc *wa;                /* Wire adapter we are plugged to */
144         struct usb_host_endpoint *ep;
145         struct urb *urb;                /* URB we are transfering for */
146         struct wa_seg **seg;            /* transfer segments */
147         u8 segs, segs_submitted, segs_done;
148         unsigned is_inbound:1;
149         unsigned is_dma:1;
150         size_t seg_size;
151         int result;
152
153         gfp_t gfp;                      /* allocation mask */
154
155         struct wusb_dev *wusb_dev;      /* for activity timestamps */
156 };
157
158 static inline void wa_xfer_init(struct wa_xfer *xfer)
159 {
160         kref_init(&xfer->refcnt);
161         INIT_LIST_HEAD(&xfer->list_node);
162         spin_lock_init(&xfer->lock);
163 }
164
165 /*
166  * Destory a transfer structure
167  *
168  * Note that the xfer->seg[index] thingies follow the URB life cycle,
169  * so we need to put them, not free them.
170  */
171 static void wa_xfer_destroy(struct kref *_xfer)
172 {
173         struct wa_xfer *xfer = container_of(_xfer, struct wa_xfer, refcnt);
174         if (xfer->seg) {
175                 unsigned cnt;
176                 for (cnt = 0; cnt < xfer->segs; cnt++) {
177                         if (xfer->is_inbound)
178                                 usb_put_urb(xfer->seg[cnt]->dto_urb);
179                         usb_put_urb(&xfer->seg[cnt]->urb);
180                 }
181         }
182         kfree(xfer);
183         d_printf(2, NULL, "xfer %p destroyed\n", xfer);
184 }
185
186 static void wa_xfer_get(struct wa_xfer *xfer)
187 {
188         kref_get(&xfer->refcnt);
189 }
190
191 static void wa_xfer_put(struct wa_xfer *xfer)
192 {
193         d_fnstart(3, NULL, "(xfer %p) -- ref count bef put %d\n",
194                     xfer, atomic_read(&xfer->refcnt.refcount));
195         kref_put(&xfer->refcnt, wa_xfer_destroy);
196         d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
197 }
198
199 /*
200  * xfer is referenced
201  *
202  * xfer->lock has to be unlocked
203  *
204  * We take xfer->lock for setting the result; this is a barrier
205  * against drivers/usb/core/hcd.c:unlink1() being called after we call
206  * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
207  * reference to the transfer.
208  */
209 static void wa_xfer_giveback(struct wa_xfer *xfer)
210 {
211         unsigned long flags;
212         d_fnstart(3, NULL, "(xfer %p)\n", xfer);
213         spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags);
214         list_del_init(&xfer->list_node);
215         spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags);
216         /* FIXME: segmentation broken -- kills DWA */
217         wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result);
218         wa_put(xfer->wa);
219         wa_xfer_put(xfer);
220         d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
221 }
222
223 /*
224  * xfer is referenced
225  *
226  * xfer->lock has to be unlocked
227  */
228 static void wa_xfer_completion(struct wa_xfer *xfer)
229 {
230         d_fnstart(3, NULL, "(xfer %p)\n", xfer);
231         if (xfer->wusb_dev)
232                 wusb_dev_put(xfer->wusb_dev);
233         rpipe_put(xfer->ep->hcpriv);
234         wa_xfer_giveback(xfer);
235         d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
236         return;
237 }
238
239 /*
240  * If transfer is done, wrap it up and return true
241  *
242  * xfer->lock has to be locked
243  */
244 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer)
245 {
246         unsigned result, cnt;
247         struct wa_seg *seg;
248         struct urb *urb = xfer->urb;
249         unsigned found_short = 0;
250
251         d_fnstart(3, NULL, "(xfer %p)\n", xfer);
252         result = xfer->segs_done == xfer->segs_submitted;
253         if (result == 0)
254                 goto out;
255         urb->actual_length = 0;
256         for (cnt = 0; cnt < xfer->segs; cnt++) {
257                 seg = xfer->seg[cnt];
258                 switch (seg->status) {
259                 case WA_SEG_DONE:
260                         if (found_short && seg->result > 0) {
261                                 if (printk_ratelimit())
262                                         printk(KERN_ERR "xfer %p#%u: bad short "
263                                                "segments (%zu)\n", xfer, cnt,
264                                                seg->result);
265                                 urb->status = -EINVAL;
266                                 goto out;
267                         }
268                         urb->actual_length += seg->result;
269                         if (seg->result < xfer->seg_size
270                             && cnt != xfer->segs-1)
271                                 found_short = 1;
272                         d_printf(2, NULL, "xfer %p#%u: DONE short %d "
273                                  "result %zu urb->actual_length %d\n",
274                                  xfer, seg->index, found_short, seg->result,
275                                  urb->actual_length);
276                         break;
277                 case WA_SEG_ERROR:
278                         xfer->result = seg->result;
279                         d_printf(2, NULL, "xfer %p#%u: ERROR result %zu\n",
280                                  xfer, seg->index, seg->result);
281                         goto out;
282                 case WA_SEG_ABORTED:
283                         WARN_ON(urb->status != -ECONNRESET
284                                 && urb->status != -ENOENT);
285                         d_printf(2, NULL, "xfer %p#%u ABORTED: result %d\n",
286                                  xfer, seg->index, urb->status);
287                         xfer->result = urb->status;
288                         goto out;
289                 default:
290                         /* if (printk_ratelimit()) */
291                                 printk(KERN_ERR "xfer %p#%u: "
292                                        "is_done bad state %d\n",
293                                        xfer, cnt, seg->status);
294                         xfer->result = -EINVAL;
295                         WARN_ON(1);
296                         goto out;
297                 }
298         }
299         xfer->result = 0;
300 out:
301         d_fnend(3, NULL, "(xfer %p) = void\n", xfer);
302         return result;
303 }
304
305 /*
306  * Initialize a transfer's ID
307  *
308  * We need to use a sequential number; if we use the pointer or the
309  * hash of the pointer, it can repeat over sequential transfers and
310  * then it will confuse the HWA....wonder why in hell they put a 32
311  * bit handle in there then.
312  */
313 static void wa_xfer_id_init(struct wa_xfer *xfer)
314 {
315         xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
316 }
317
318 /*
319  * Return the xfer's ID associated with xfer
320  *
321  * Need to generate a
322  */
323 static u32 wa_xfer_id(struct wa_xfer *xfer)
324 {
325         return xfer->id;
326 }
327
328 /*
329  * Search for a transfer list ID on the HCD's URB list
330  *
331  * For 32 bit architectures, we use the pointer itself; for 64 bits, a
332  * 32-bit hash of the pointer.
333  *
334  * @returns NULL if not found.
335  */
336 static struct wa_xfer *wa_xfer_get_by_id(struct wahc *wa, u32 id)
337 {
338         unsigned long flags;
339         struct wa_xfer *xfer_itr;
340         spin_lock_irqsave(&wa->xfer_list_lock, flags);
341         list_for_each_entry(xfer_itr, &wa->xfer_list, list_node) {
342                 if (id == xfer_itr->id) {
343                         wa_xfer_get(xfer_itr);
344                         goto out;
345                 }
346         }
347         xfer_itr = NULL;
348 out:
349         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
350         return xfer_itr;
351 }
352
353 struct wa_xfer_abort_buffer {
354         struct urb urb;
355         struct wa_xfer_abort cmd;
356 };
357
358 static void __wa_xfer_abort_cb(struct urb *urb)
359 {
360         struct wa_xfer_abort_buffer *b = urb->context;
361         usb_put_urb(&b->urb);
362 }
363
364 /*
365  * Aborts an ongoing transaction
366  *
367  * Assumes the transfer is referenced and locked and in a submitted
368  * state (mainly that there is an endpoint/rpipe assigned).
369  *
370  * The callback (see above) does nothing but freeing up the data by
371  * putting the URB. Because the URB is allocated at the head of the
372  * struct, the whole space we allocated is kfreed.
373  *
374  * We'll get an 'aborted transaction' xfer result on DTI, that'll
375  * politely ignore because at this point the transaction has been
376  * marked as aborted already.
377  */
378 static void __wa_xfer_abort(struct wa_xfer *xfer)
379 {
380         int result;
381         struct device *dev = &xfer->wa->usb_iface->dev;
382         struct wa_xfer_abort_buffer *b;
383         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
384
385         b = kmalloc(sizeof(*b), GFP_ATOMIC);
386         if (b == NULL)
387                 goto error_kmalloc;
388         b->cmd.bLength =  sizeof(b->cmd);
389         b->cmd.bRequestType = WA_XFER_ABORT;
390         b->cmd.wRPipe = rpipe->descr.wRPipeIndex;
391         b->cmd.dwTransferID = wa_xfer_id(xfer);
392
393         usb_init_urb(&b->urb);
394         usb_fill_bulk_urb(&b->urb, xfer->wa->usb_dev,
395                 usb_sndbulkpipe(xfer->wa->usb_dev,
396                                 xfer->wa->dto_epd->bEndpointAddress),
397                 &b->cmd, sizeof(b->cmd), __wa_xfer_abort_cb, b);
398         result = usb_submit_urb(&b->urb, GFP_ATOMIC);
399         if (result < 0)
400                 goto error_submit;
401         return;                         /* callback frees! */
402
403
404 error_submit:
405         if (printk_ratelimit())
406                 dev_err(dev, "xfer %p: Can't submit abort request: %d\n",
407                         xfer, result);
408         kfree(b);
409 error_kmalloc:
410         return;
411
412 }
413
414 /*
415  *
416  * @returns < 0 on error, transfer segment request size if ok
417  */
418 static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
419                                      enum wa_xfer_type *pxfer_type)
420 {
421         ssize_t result;
422         struct device *dev = &xfer->wa->usb_iface->dev;
423         size_t maxpktsize;
424         struct urb *urb = xfer->urb;
425         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
426
427         d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
428                   xfer, rpipe, urb);
429         switch (rpipe->descr.bmAttribute & 0x3) {
430         case USB_ENDPOINT_XFER_CONTROL:
431                 *pxfer_type = WA_XFER_TYPE_CTL;
432                 result = sizeof(struct wa_xfer_ctl);
433                 break;
434         case USB_ENDPOINT_XFER_INT:
435         case USB_ENDPOINT_XFER_BULK:
436                 *pxfer_type = WA_XFER_TYPE_BI;
437                 result = sizeof(struct wa_xfer_bi);
438                 break;
439         case USB_ENDPOINT_XFER_ISOC:
440                 dev_err(dev, "FIXME: ISOC not implemented\n");
441                 result = -ENOSYS;
442                 goto error;
443         default:
444                 /* never happens */
445                 BUG();
446                 result = -EINVAL;       /* shut gcc up */
447         };
448         xfer->is_inbound = urb->pipe & USB_DIR_IN ? 1 : 0;
449         xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
450         xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
451                 * 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
452         /* Compute the segment size and make sure it is a multiple of
453          * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
454          * a check (FIXME) */
455         maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
456         if (xfer->seg_size < maxpktsize) {
457                 dev_err(dev, "HW BUG? seg_size %zu smaller than maxpktsize "
458                         "%zu\n", xfer->seg_size, maxpktsize);
459                 result = -EINVAL;
460                 goto error;
461         }
462         xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
463         xfer->segs = (urb->transfer_buffer_length + xfer->seg_size - 1)
464                 / xfer->seg_size;
465         if (xfer->segs >= WA_SEGS_MAX) {
466                 dev_err(dev, "BUG? ops, number of segments %d bigger than %d\n",
467                         (int)(urb->transfer_buffer_length / xfer->seg_size),
468                         WA_SEGS_MAX);
469                 result = -EINVAL;
470                 goto error;
471         }
472         if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
473                 xfer->segs = 1;
474 error:
475         d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
476                 xfer, rpipe, urb, (int)result);
477         return result;
478 }
479
480 /** Fill in the common request header and xfer-type specific data. */
481 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer,
482                                  struct wa_xfer_hdr *xfer_hdr0,
483                                  enum wa_xfer_type xfer_type,
484                                  size_t xfer_hdr_size)
485 {
486         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
487
488         xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
489         xfer_hdr0->bLength = xfer_hdr_size;
490         xfer_hdr0->bRequestType = xfer_type;
491         xfer_hdr0->wRPipe = rpipe->descr.wRPipeIndex;
492         xfer_hdr0->dwTransferID = wa_xfer_id(xfer);
493         xfer_hdr0->bTransferSegment = 0;
494         switch (xfer_type) {
495         case WA_XFER_TYPE_CTL: {
496                 struct wa_xfer_ctl *xfer_ctl =
497                         container_of(xfer_hdr0, struct wa_xfer_ctl, hdr);
498                 xfer_ctl->bmAttribute = xfer->is_inbound ? 1 : 0;
499                 BUG_ON(xfer->urb->transfer_flags & URB_NO_SETUP_DMA_MAP
500                        && xfer->urb->setup_packet == NULL);
501                 memcpy(&xfer_ctl->baSetupData, xfer->urb->setup_packet,
502                        sizeof(xfer_ctl->baSetupData));
503                 break;
504         }
505         case WA_XFER_TYPE_BI:
506                 break;
507         case WA_XFER_TYPE_ISO:
508                 printk(KERN_ERR "FIXME: ISOC not implemented\n");
509         default:
510                 BUG();
511         };
512 }
513
514 /*
515  * Callback for the OUT data phase of the segment request
516  *
517  * Check wa_seg_cb(); most comments also apply here because this
518  * function does almost the same thing and they work closely
519  * together.
520  *
521  * If the seg request has failed but this DTO phase has suceeded,
522  * wa_seg_cb() has already failed the segment and moved the
523  * status to WA_SEG_ERROR, so this will go through 'case 0' and
524  * effectively do nothing.
525  */
526 static void wa_seg_dto_cb(struct urb *urb)
527 {
528         struct wa_seg *seg = urb->context;
529         struct wa_xfer *xfer = seg->xfer;
530         struct wahc *wa;
531         struct device *dev;
532         struct wa_rpipe *rpipe;
533         unsigned long flags;
534         unsigned rpipe_ready = 0;
535         u8 done = 0;
536
537         d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
538         switch (urb->status) {
539         case 0:
540                 spin_lock_irqsave(&xfer->lock, flags);
541                 wa = xfer->wa;
542                 dev = &wa->usb_iface->dev;
543                 d_printf(2, dev, "xfer %p#%u: data out done (%d bytes)\n",
544                            xfer, seg->index, urb->actual_length);
545                 if (seg->status < WA_SEG_PENDING)
546                         seg->status = WA_SEG_PENDING;
547                 seg->result = urb->actual_length;
548                 spin_unlock_irqrestore(&xfer->lock, flags);
549                 break;
550         case -ECONNRESET:       /* URB unlinked; no need to do anything */
551         case -ENOENT:           /* as it was done by the who unlinked us */
552                 break;
553         default:                /* Other errors ... */
554                 spin_lock_irqsave(&xfer->lock, flags);
555                 wa = xfer->wa;
556                 dev = &wa->usb_iface->dev;
557                 rpipe = xfer->ep->hcpriv;
558                 if (printk_ratelimit())
559                         dev_err(dev, "xfer %p#%u: data out error %d\n",
560                                 xfer, seg->index, urb->status);
561                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
562                             EDC_ERROR_TIMEFRAME)){
563                         dev_err(dev, "DTO: URB max acceptable errors "
564                                 "exceeded, resetting device\n");
565                         wa_reset_all(wa);
566                 }
567                 if (seg->status != WA_SEG_ERROR) {
568                         seg->status = WA_SEG_ERROR;
569                         seg->result = urb->status;
570                         xfer->segs_done++;
571                         __wa_xfer_abort(xfer);
572                         rpipe_ready = rpipe_avail_inc(rpipe);
573                         done = __wa_xfer_is_done(xfer);
574                 }
575                 spin_unlock_irqrestore(&xfer->lock, flags);
576                 if (done)
577                         wa_xfer_completion(xfer);
578                 if (rpipe_ready)
579                         wa_xfer_delayed_run(rpipe);
580         }
581         d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
582 }
583
584 /*
585  * Callback for the segment request
586  *
587  * If succesful transition state (unless already transitioned or
588  * outbound transfer); otherwise, take a note of the error, mark this
589  * segment done and try completion.
590  *
591  * Note we don't access until we are sure that the transfer hasn't
592  * been cancelled (ECONNRESET, ENOENT), which could mean that
593  * seg->xfer could be already gone.
594  *
595  * We have to check before setting the status to WA_SEG_PENDING
596  * because sometimes the xfer result callback arrives before this
597  * callback (geeeeeeze), so it might happen that we are already in
598  * another state. As well, we don't set it if the transfer is inbound,
599  * as in that case, wa_seg_dto_cb will do it when the OUT data phase
600  * finishes.
601  */
602 static void wa_seg_cb(struct urb *urb)
603 {
604         struct wa_seg *seg = urb->context;
605         struct wa_xfer *xfer = seg->xfer;
606         struct wahc *wa;
607         struct device *dev;
608         struct wa_rpipe *rpipe;
609         unsigned long flags;
610         unsigned rpipe_ready;
611         u8 done = 0;
612
613         d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
614         switch (urb->status) {
615         case 0:
616                 spin_lock_irqsave(&xfer->lock, flags);
617                 wa = xfer->wa;
618                 dev = &wa->usb_iface->dev;
619                 d_printf(2, dev, "xfer %p#%u: request done\n",
620                            xfer, seg->index);
621                 if (xfer->is_inbound && seg->status < WA_SEG_PENDING)
622                         seg->status = WA_SEG_PENDING;
623                 spin_unlock_irqrestore(&xfer->lock, flags);
624                 break;
625         case -ECONNRESET:       /* URB unlinked; no need to do anything */
626         case -ENOENT:           /* as it was done by the who unlinked us */
627                 break;
628         default:                /* Other errors ... */
629                 spin_lock_irqsave(&xfer->lock, flags);
630                 wa = xfer->wa;
631                 dev = &wa->usb_iface->dev;
632                 rpipe = xfer->ep->hcpriv;
633                 if (printk_ratelimit())
634                         dev_err(dev, "xfer %p#%u: request error %d\n",
635                                 xfer, seg->index, urb->status);
636                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
637                             EDC_ERROR_TIMEFRAME)){
638                         dev_err(dev, "DTO: URB max acceptable errors "
639                                 "exceeded, resetting device\n");
640                         wa_reset_all(wa);
641                 }
642                 usb_unlink_urb(seg->dto_urb);
643                 seg->status = WA_SEG_ERROR;
644                 seg->result = urb->status;
645                 xfer->segs_done++;
646                 __wa_xfer_abort(xfer);
647                 rpipe_ready = rpipe_avail_inc(rpipe);
648                 done = __wa_xfer_is_done(xfer);
649                 spin_unlock_irqrestore(&xfer->lock, flags);
650                 if (done)
651                         wa_xfer_completion(xfer);
652                 if (rpipe_ready)
653                         wa_xfer_delayed_run(rpipe);
654         }
655         d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
656 }
657
658 /*
659  * Allocate the segs array and initialize each of them
660  *
661  * The segments are freed by wa_xfer_destroy() when the xfer use count
662  * drops to zero; however, because each segment is given the same life
663  * cycle as the USB URB it contains, it is actually freed by
664  * usb_put_urb() on the contained USB URB (twisted, eh?).
665  */
666 static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
667 {
668         int result, cnt;
669         size_t alloc_size = sizeof(*xfer->seg[0])
670                 - sizeof(xfer->seg[0]->xfer_hdr) + xfer_hdr_size;
671         struct usb_device *usb_dev = xfer->wa->usb_dev;
672         const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
673         struct wa_seg *seg;
674         size_t buf_itr, buf_size, buf_itr_size;
675
676         result = -ENOMEM;
677         xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
678         if (xfer->seg == NULL)
679                 goto error_segs_kzalloc;
680         buf_itr = 0;
681         buf_size = xfer->urb->transfer_buffer_length;
682         for (cnt = 0; cnt < xfer->segs; cnt++) {
683                 seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
684                 if (seg == NULL)
685                         goto error_seg_kzalloc;
686                 wa_seg_init(seg);
687                 seg->xfer = xfer;
688                 seg->index = cnt;
689                 usb_fill_bulk_urb(&seg->urb, usb_dev,
690                                   usb_sndbulkpipe(usb_dev,
691                                                   dto_epd->bEndpointAddress),
692                                   &seg->xfer_hdr, xfer_hdr_size,
693                                   wa_seg_cb, seg);
694                 buf_itr_size = buf_size > xfer->seg_size ?
695                         xfer->seg_size : buf_size;
696                 if (xfer->is_inbound == 0 && buf_size > 0) {
697                         seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
698                         if (seg->dto_urb == NULL)
699                                 goto error_dto_alloc;
700                         usb_fill_bulk_urb(
701                                 seg->dto_urb, usb_dev,
702                                 usb_sndbulkpipe(usb_dev,
703                                                 dto_epd->bEndpointAddress),
704                                 NULL, 0, wa_seg_dto_cb, seg);
705                         if (xfer->is_dma) {
706                                 seg->dto_urb->transfer_dma =
707                                         xfer->urb->transfer_dma + buf_itr;
708                                 seg->dto_urb->transfer_flags |=
709                                         URB_NO_TRANSFER_DMA_MAP;
710                         } else
711                                 seg->dto_urb->transfer_buffer =
712                                         xfer->urb->transfer_buffer + buf_itr;
713                         seg->dto_urb->transfer_buffer_length = buf_itr_size;
714                 }
715                 seg->status = WA_SEG_READY;
716                 buf_itr += buf_itr_size;
717                 buf_size -= buf_itr_size;
718         }
719         return 0;
720
721 error_dto_alloc:
722         kfree(xfer->seg[cnt]);
723         cnt--;
724 error_seg_kzalloc:
725         /* use the fact that cnt is left at were it failed */
726         for (; cnt > 0; cnt--) {
727                 if (xfer->is_inbound == 0)
728                         kfree(xfer->seg[cnt]->dto_urb);
729                 kfree(xfer->seg[cnt]);
730         }
731 error_segs_kzalloc:
732         return result;
733 }
734
735 /*
736  * Allocates all the stuff needed to submit a transfer
737  *
738  * Breaks the whole data buffer in a list of segments, each one has a
739  * structure allocated to it and linked in xfer->seg[index]
740  *
741  * FIXME: merge setup_segs() and the last part of this function, no
742  *        need to do two for loops when we could run everything in a
743  *        single one
744  */
745 static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb)
746 {
747         int result;
748         struct device *dev = &xfer->wa->usb_iface->dev;
749         enum wa_xfer_type xfer_type = 0; /* shut up GCC */
750         size_t xfer_hdr_size, cnt, transfer_size;
751         struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr;
752
753         d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n",
754                   xfer, xfer->ep->hcpriv, urb);
755
756         result = __wa_xfer_setup_sizes(xfer, &xfer_type);
757         if (result < 0)
758                 goto error_setup_sizes;
759         xfer_hdr_size = result;
760         result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
761         if (result < 0) {
762                 dev_err(dev, "xfer %p: Failed to allocate %d segments: %d\n",
763                         xfer, xfer->segs, result);
764                 goto error_setup_segs;
765         }
766         /* Fill the first header */
767         xfer_hdr0 = &xfer->seg[0]->xfer_hdr;
768         wa_xfer_id_init(xfer);
769         __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
770
771         /* Fill remainig headers */
772         xfer_hdr = xfer_hdr0;
773         transfer_size = urb->transfer_buffer_length;
774         xfer_hdr0->dwTransferLength = transfer_size > xfer->seg_size ?
775                 xfer->seg_size : transfer_size;
776         transfer_size -=  xfer->seg_size;
777         for (cnt = 1; cnt < xfer->segs; cnt++) {
778                 xfer_hdr = &xfer->seg[cnt]->xfer_hdr;
779                 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
780                 xfer_hdr->bTransferSegment = cnt;
781                 xfer_hdr->dwTransferLength = transfer_size > xfer->seg_size ?
782                         cpu_to_le32(xfer->seg_size)
783                         : cpu_to_le32(transfer_size);
784                 xfer->seg[cnt]->status = WA_SEG_READY;
785                 transfer_size -=  xfer->seg_size;
786         }
787         xfer_hdr->bTransferSegment |= 0x80;     /* this is the last segment */
788         result = 0;
789 error_setup_segs:
790 error_setup_sizes:
791         d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n",
792                 xfer, xfer->ep->hcpriv, urb, result);
793         return result;
794 }
795
796 /*
797  *
798  *
799  * rpipe->seg_lock is held!
800  */
801 static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
802                            struct wa_seg *seg)
803 {
804         int result;
805         result = usb_submit_urb(&seg->urb, GFP_ATOMIC);
806         if (result < 0) {
807                 printk(KERN_ERR "xfer %p#%u: REQ submit failed: %d\n",
808                        xfer, seg->index, result);
809                 goto error_seg_submit;
810         }
811         if (seg->dto_urb) {
812                 result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
813                 if (result < 0) {
814                         printk(KERN_ERR "xfer %p#%u: DTO submit failed: %d\n",
815                                xfer, seg->index, result);
816                         goto error_dto_submit;
817                 }
818         }
819         seg->status = WA_SEG_SUBMITTED;
820         rpipe_avail_dec(rpipe);
821         return 0;
822
823 error_dto_submit:
824         usb_unlink_urb(&seg->urb);
825 error_seg_submit:
826         seg->status = WA_SEG_ERROR;
827         seg->result = result;
828         return result;
829 }
830
831 /*
832  * Execute more queued request segments until the maximum concurrent allowed
833  *
834  * The ugly unlock/lock sequence on the error path is needed as the
835  * xfer->lock normally nests the seg_lock and not viceversa.
836  *
837  */
838 static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
839 {
840         int result;
841         struct device *dev = &rpipe->wa->usb_iface->dev;
842         struct wa_seg *seg;
843         struct wa_xfer *xfer;
844         unsigned long flags;
845
846         d_fnstart(1, dev, "(rpipe #%d) %d segments available\n",
847                   le16_to_cpu(rpipe->descr.wRPipeIndex),
848                   atomic_read(&rpipe->segs_available));
849         spin_lock_irqsave(&rpipe->seg_lock, flags);
850         while (atomic_read(&rpipe->segs_available) > 0
851               && !list_empty(&rpipe->seg_list)) {
852                 seg = list_entry(rpipe->seg_list.next, struct wa_seg,
853                                  list_node);
854                 list_del(&seg->list_node);
855                 xfer = seg->xfer;
856                 result = __wa_seg_submit(rpipe, xfer, seg);
857                 d_printf(1, dev, "xfer %p#%u submitted from delayed "
858                          "[%d segments available] %d\n",
859                          xfer, seg->index,
860                          atomic_read(&rpipe->segs_available), result);
861                 if (unlikely(result < 0)) {
862                         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
863                         spin_lock_irqsave(&xfer->lock, flags);
864                         __wa_xfer_abort(xfer);
865                         xfer->segs_done++;
866                         spin_unlock_irqrestore(&xfer->lock, flags);
867                         spin_lock_irqsave(&rpipe->seg_lock, flags);
868                 }
869         }
870         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
871         d_fnend(1, dev, "(rpipe #%d) = void, %d segments available\n",
872                 le16_to_cpu(rpipe->descr.wRPipeIndex),
873                 atomic_read(&rpipe->segs_available));
874
875 }
876
877 /*
878  *
879  * xfer->lock is taken
880  *
881  * On failure submitting we just stop submitting and return error;
882  * wa_urb_enqueue_b() will execute the completion path
883  */
884 static int __wa_xfer_submit(struct wa_xfer *xfer)
885 {
886         int result;
887         struct wahc *wa = xfer->wa;
888         struct device *dev = &wa->usb_iface->dev;
889         unsigned cnt;
890         struct wa_seg *seg;
891         unsigned long flags;
892         struct wa_rpipe *rpipe = xfer->ep->hcpriv;
893         size_t maxrequests = le16_to_cpu(rpipe->descr.wRequests);
894         u8 available;
895         u8 empty;
896
897         d_fnstart(3, dev, "(xfer %p [rpipe %p])\n",
898                   xfer, xfer->ep->hcpriv);
899
900         spin_lock_irqsave(&wa->xfer_list_lock, flags);
901         list_add_tail(&xfer->list_node, &wa->xfer_list);
902         spin_unlock_irqrestore(&wa->xfer_list_lock, flags);
903
904         BUG_ON(atomic_read(&rpipe->segs_available) > maxrequests);
905         result = 0;
906         spin_lock_irqsave(&rpipe->seg_lock, flags);
907         for (cnt = 0; cnt < xfer->segs; cnt++) {
908                 available = atomic_read(&rpipe->segs_available);
909                 empty = list_empty(&rpipe->seg_list);
910                 seg = xfer->seg[cnt];
911                 d_printf(2, dev, "xfer %p#%u: available %u empty %u (%s)\n",
912                          xfer, cnt, available, empty,
913                          available == 0 || !empty ? "delayed" : "submitted");
914                 if (available == 0 || !empty) {
915                         d_printf(1, dev, "xfer %p#%u: delayed\n", xfer, cnt);
916                         seg->status = WA_SEG_DELAYED;
917                         list_add_tail(&seg->list_node, &rpipe->seg_list);
918                 } else {
919                         result = __wa_seg_submit(rpipe, xfer, seg);
920                         if (result < 0)
921                                 goto error_seg_submit;
922                 }
923                 xfer->segs_submitted++;
924         }
925         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
926         d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
927                 xfer->ep->hcpriv);
928         return result;
929
930 error_seg_submit:
931         __wa_xfer_abort(xfer);
932         spin_unlock_irqrestore(&rpipe->seg_lock, flags);
933         d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer,
934                 xfer->ep->hcpriv);
935         return result;
936 }
937
938 /*
939  * Second part of a URB/transfer enqueuement
940  *
941  * Assumes this comes from wa_urb_enqueue() [maybe through
942  * wa_urb_enqueue_run()]. At this point:
943  *
944  * xfer->wa     filled and refcounted
945  * xfer->ep     filled with rpipe refcounted if
946  *              delayed == 0
947  * xfer->urb    filled and refcounted (this is the case when called
948  *              from wa_urb_enqueue() as we come from usb_submit_urb()
949  *              and when called by wa_urb_enqueue_run(), as we took an
950  *              extra ref dropped by _run() after we return).
951  * xfer->gfp    filled
952  *
953  * If we fail at __wa_xfer_submit(), then we just check if we are done
954  * and if so, we run the completion procedure. However, if we are not
955  * yet done, we do nothing and wait for the completion handlers from
956  * the submitted URBs or from the xfer-result path to kick in. If xfer
957  * result never kicks in, the xfer will timeout from the USB code and
958  * dequeue() will be called.
959  */
960 static void wa_urb_enqueue_b(struct wa_xfer *xfer)
961 {
962         int result;
963         unsigned long flags;
964         struct urb *urb = xfer->urb;
965         struct wahc *wa = xfer->wa;
966         struct wusbhc *wusbhc = wa->wusb;
967         struct device *dev = &wa->usb_iface->dev;
968         struct wusb_dev *wusb_dev;
969         unsigned done;
970
971         d_fnstart(3, dev, "(wa %p urb %p)\n", wa, urb);
972         result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp);
973         if (result < 0)
974                 goto error_rpipe_get;
975         result = -ENODEV;
976         /* FIXME: segmentation broken -- kills DWA */
977         mutex_lock(&wusbhc->mutex);             /* get a WUSB dev */
978         if (urb->dev == NULL)
979                 goto error_dev_gone;
980         wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev);
981         if (wusb_dev == NULL) {
982                 mutex_unlock(&wusbhc->mutex);
983                 goto error_dev_gone;
984         }
985         mutex_unlock(&wusbhc->mutex);
986
987         spin_lock_irqsave(&xfer->lock, flags);
988         xfer->wusb_dev = wusb_dev;
989         result = urb->status;
990         if (urb->status != -EINPROGRESS)
991                 goto error_dequeued;
992
993         result = __wa_xfer_setup(xfer, urb);
994         if (result < 0)
995                 goto error_xfer_setup;
996         result = __wa_xfer_submit(xfer);
997         if (result < 0)
998                 goto error_xfer_submit;
999         spin_unlock_irqrestore(&xfer->lock, flags);
1000         d_fnend(3, dev, "(wa %p urb %p) = void\n", wa, urb);
1001         return;
1002
1003         /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1004          * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
1005          * upundo setup().
1006          */
1007 error_xfer_setup:
1008 error_dequeued:
1009         spin_unlock_irqrestore(&xfer->lock, flags);
1010         /* FIXME: segmentation broken, kills DWA */
1011         if (wusb_dev)
1012                 wusb_dev_put(wusb_dev);
1013 error_dev_gone:
1014         rpipe_put(xfer->ep->hcpriv);
1015 error_rpipe_get:
1016         xfer->result = result;
1017         wa_xfer_giveback(xfer);
1018         d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
1019         return;
1020
1021 error_xfer_submit:
1022         done = __wa_xfer_is_done(xfer);
1023         xfer->result = result;
1024         spin_unlock_irqrestore(&xfer->lock, flags);
1025         if (done)
1026                 wa_xfer_completion(xfer);
1027         d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result);
1028         return;
1029 }
1030
1031 /*
1032  * Execute the delayed transfers in the Wire Adapter @wa
1033  *
1034  * We need to be careful here, as dequeue() could be called in the
1035  * middle.  That's why we do the whole thing under the
1036  * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
1037  * and then checks the list -- so as we would be acquiring in inverse
1038  * order, we just drop the lock once we have the xfer and reacquire it
1039  * later.
1040  */
1041 void wa_urb_enqueue_run(struct work_struct *ws)
1042 {
1043         struct wahc *wa = container_of(ws, struct wahc, xfer_work);
1044         struct device *dev = &wa->usb_iface->dev;
1045         struct wa_xfer *xfer, *next;
1046         struct urb *urb;
1047
1048         d_fnstart(3, dev, "(wa %p)\n", wa);
1049         spin_lock_irq(&wa->xfer_list_lock);
1050         list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
1051                                  list_node) {
1052                 list_del_init(&xfer->list_node);
1053                 spin_unlock_irq(&wa->xfer_list_lock);
1054
1055                 urb = xfer->urb;
1056                 wa_urb_enqueue_b(xfer);
1057                 usb_put_urb(urb);       /* taken when queuing */
1058
1059                 spin_lock_irq(&wa->xfer_list_lock);
1060         }
1061         spin_unlock_irq(&wa->xfer_list_lock);
1062         d_fnend(3, dev, "(wa %p) = void\n", wa);
1063 }
1064 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
1065
1066 /*
1067  * Submit a transfer to the Wire Adapter in a delayed way
1068  *
1069  * The process of enqueuing involves possible sleeps() [see
1070  * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1071  * in an atomic section, we defer the enqueue_b() call--else we call direct.
1072  *
1073  * @urb: We own a reference to it done by the HCI Linux USB stack that
1074  *       will be given up by calling usb_hcd_giveback_urb() or by
1075  *       returning error from this function -> ergo we don't have to
1076  *       refcount it.
1077  */
1078 int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
1079                    struct urb *urb, gfp_t gfp)
1080 {
1081         int result;
1082         struct device *dev = &wa->usb_iface->dev;
1083         struct wa_xfer *xfer;
1084         unsigned long my_flags;
1085         unsigned cant_sleep = irqs_disabled() | in_atomic();
1086
1087         d_fnstart(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x)\n",
1088                   wa, ep, urb, urb->transfer_buffer_length, gfp);
1089
1090         if (urb->transfer_buffer == NULL
1091             && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1092             && urb->transfer_buffer_length != 0) {
1093                 dev_err(dev, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1094                 dump_stack();
1095         }
1096
1097         result = -ENOMEM;
1098         xfer = kzalloc(sizeof(*xfer), gfp);
1099         if (xfer == NULL)
1100                 goto error_kmalloc;
1101
1102         result = -ENOENT;
1103         if (urb->status != -EINPROGRESS)        /* cancelled */
1104                 goto error_dequeued;            /* before starting? */
1105         wa_xfer_init(xfer);
1106         xfer->wa = wa_get(wa);
1107         xfer->urb = urb;
1108         xfer->gfp = gfp;
1109         xfer->ep = ep;
1110         urb->hcpriv = xfer;
1111         d_printf(2, dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1112                  xfer, urb, urb->pipe, urb->transfer_buffer_length,
1113                  urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma",
1114                  urb->pipe & USB_DIR_IN ? "inbound" : "outbound",
1115                  cant_sleep ? "deferred" : "inline");
1116         if (cant_sleep) {
1117                 usb_get_urb(urb);
1118                 spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
1119                 list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
1120                 spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
1121                 queue_work(wusbd, &wa->xfer_work);
1122         } else {
1123                 wa_urb_enqueue_b(xfer);
1124         }
1125         d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = 0\n",
1126                 wa, ep, urb, urb->transfer_buffer_length, gfp);
1127         return 0;
1128
1129 error_dequeued:
1130         kfree(xfer);
1131 error_kmalloc:
1132         d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = %d\n",
1133                 wa, ep, urb, urb->transfer_buffer_length, gfp, result);
1134         return result;
1135 }
1136 EXPORT_SYMBOL_GPL(wa_urb_enqueue);
1137
1138 /*
1139  * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1140  * handler] is called.
1141  *
1142  * Until a transfer goes successfully through wa_urb_enqueue() it
1143  * needs to be dequeued with completion calling; when stuck in delayed
1144  * or before wa_xfer_setup() is called, we need to do completion.
1145  *
1146  *  not setup  If there is no hcpriv yet, that means that that enqueue
1147  *             still had no time to set the xfer up. Because
1148  *             urb->status should be other than -EINPROGRESS,
1149  *             enqueue() will catch that and bail out.
1150  *
1151  * If the transfer has gone through setup, we just need to clean it
1152  * up. If it has gone through submit(), we have to abort it [with an
1153  * asynch request] and then make sure we cancel each segment.
1154  *
1155  */
1156 int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
1157 {
1158         struct device *dev = &wa->usb_iface->dev;
1159         unsigned long flags, flags2;
1160         struct wa_xfer *xfer;
1161         struct wa_seg *seg;
1162         struct wa_rpipe *rpipe;
1163         unsigned cnt;
1164         unsigned rpipe_ready = 0;
1165
1166         d_fnstart(3, dev, "(wa %p, urb %p)\n", wa, urb);
1167
1168         d_printf(1, dev, "xfer %p urb %p: aborting\n", urb->hcpriv, urb);
1169         xfer = urb->hcpriv;
1170         if (xfer == NULL) {
1171                 /* NOthing setup yet enqueue will see urb->status !=
1172                  * -EINPROGRESS (by hcd layer) and bail out with
1173                  * error, no need to do completion
1174                  */
1175                 BUG_ON(urb->status == -EINPROGRESS);
1176                 goto out;
1177         }
1178         spin_lock_irqsave(&xfer->lock, flags);
1179         rpipe = xfer->ep->hcpriv;
1180         /* Check the delayed list -> if there, release and complete */
1181         spin_lock_irqsave(&wa->xfer_list_lock, flags2);
1182         if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
1183                 goto dequeue_delayed;
1184         spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1185         if (xfer->seg == NULL)          /* still hasn't reached */
1186                 goto out_unlock;        /* setup(), enqueue_b() completes */
1187         /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1188         __wa_xfer_abort(xfer);
1189         for (cnt = 0; cnt < xfer->segs; cnt++) {
1190                 seg = xfer->seg[cnt];
1191                 switch (seg->status) {
1192                 case WA_SEG_NOTREADY:
1193                 case WA_SEG_READY:
1194                         printk(KERN_ERR "xfer %p#%u: dequeue bad state %u\n",
1195                                xfer, cnt, seg->status);
1196                         WARN_ON(1);
1197                         break;
1198                 case WA_SEG_DELAYED:
1199                         seg->status = WA_SEG_ABORTED;
1200                         spin_lock_irqsave(&rpipe->seg_lock, flags2);
1201                         list_del(&seg->list_node);
1202                         xfer->segs_done++;
1203                         rpipe_ready = rpipe_avail_inc(rpipe);
1204                         spin_unlock_irqrestore(&rpipe->seg_lock, flags2);
1205                         break;
1206                 case WA_SEG_SUBMITTED:
1207                         seg->status = WA_SEG_ABORTED;
1208                         usb_unlink_urb(&seg->urb);
1209                         if (xfer->is_inbound == 0)
1210                                 usb_unlink_urb(seg->dto_urb);
1211                         xfer->segs_done++;
1212                         rpipe_ready = rpipe_avail_inc(rpipe);
1213                         break;
1214                 case WA_SEG_PENDING:
1215                         seg->status = WA_SEG_ABORTED;
1216                         xfer->segs_done++;
1217                         rpipe_ready = rpipe_avail_inc(rpipe);
1218                         break;
1219                 case WA_SEG_DTI_PENDING:
1220                         usb_unlink_urb(wa->dti_urb);
1221                         seg->status = WA_SEG_ABORTED;
1222                         xfer->segs_done++;
1223                         rpipe_ready = rpipe_avail_inc(rpipe);
1224                         break;
1225                 case WA_SEG_DONE:
1226                 case WA_SEG_ERROR:
1227                 case WA_SEG_ABORTED:
1228                         break;
1229                 }
1230         }
1231         xfer->result = urb->status;     /* -ENOENT or -ECONNRESET */
1232         __wa_xfer_is_done(xfer);
1233         spin_unlock_irqrestore(&xfer->lock, flags);
1234         wa_xfer_completion(xfer);
1235         if (rpipe_ready)
1236                 wa_xfer_delayed_run(rpipe);
1237         d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1238         return 0;
1239
1240 out_unlock:
1241         spin_unlock_irqrestore(&xfer->lock, flags);
1242 out:
1243         d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1244         return 0;
1245
1246 dequeue_delayed:
1247         list_del_init(&xfer->list_node);
1248         spin_unlock_irqrestore(&wa->xfer_list_lock, flags2);
1249         xfer->result = urb->status;
1250         spin_unlock_irqrestore(&xfer->lock, flags);
1251         wa_xfer_giveback(xfer);
1252         usb_put_urb(urb);               /* we got a ref in enqueue() */
1253         d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb);
1254         return 0;
1255 }
1256 EXPORT_SYMBOL_GPL(wa_urb_dequeue);
1257
1258 /*
1259  * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1260  * codes
1261  *
1262  * Positive errno values are internal inconsistencies and should be
1263  * flagged louder. Negative are to be passed up to the user in the
1264  * normal way.
1265  *
1266  * @status: USB WA status code -- high two bits are stripped.
1267  */
1268 static int wa_xfer_status_to_errno(u8 status)
1269 {
1270         int errno;
1271         u8 real_status = status;
1272         static int xlat[] = {
1273                 [WA_XFER_STATUS_SUCCESS] =              0,
1274                 [WA_XFER_STATUS_HALTED] =               -EPIPE,
1275                 [WA_XFER_STATUS_DATA_BUFFER_ERROR] =    -ENOBUFS,
1276                 [WA_XFER_STATUS_BABBLE] =               -EOVERFLOW,
1277                 [WA_XFER_RESERVED] =                    EINVAL,
1278                 [WA_XFER_STATUS_NOT_FOUND] =            0,
1279                 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM,
1280                 [WA_XFER_STATUS_TRANSACTION_ERROR] =    -EILSEQ,
1281                 [WA_XFER_STATUS_ABORTED] =              -EINTR,
1282                 [WA_XFER_STATUS_RPIPE_NOT_READY] =      EINVAL,
1283                 [WA_XFER_INVALID_FORMAT] =              EINVAL,
1284                 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] =   EINVAL,
1285                 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH] =  EINVAL,
1286         };
1287         status &= 0x3f;
1288
1289         if (status == 0)
1290                 return 0;
1291         if (status >= ARRAY_SIZE(xlat)) {
1292                 if (printk_ratelimit())
1293                         printk(KERN_ERR "%s(): BUG? "
1294                                "Unknown WA transfer status 0x%02x\n",
1295                                __func__, real_status);
1296                 return -EINVAL;
1297         }
1298         errno = xlat[status];
1299         if (unlikely(errno > 0)) {
1300                 if (printk_ratelimit())
1301                         printk(KERN_ERR "%s(): BUG? "
1302                                "Inconsistent WA status: 0x%02x\n",
1303                                __func__, real_status);
1304                 errno = -errno;
1305         }
1306         return errno;
1307 }
1308
1309 /*
1310  * Process a xfer result completion message
1311  *
1312  * inbound transfers: need to schedule a DTI read
1313  *
1314  * FIXME: this functio needs to be broken up in parts
1315  */
1316 static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
1317 {
1318         int result;
1319         struct device *dev = &wa->usb_iface->dev;
1320         unsigned long flags;
1321         u8 seg_idx;
1322         struct wa_seg *seg;
1323         struct wa_rpipe *rpipe;
1324         struct wa_xfer_result *xfer_result = wa->xfer_result;
1325         u8 done = 0;
1326         u8 usb_status;
1327         unsigned rpipe_ready = 0;
1328
1329         d_fnstart(3, dev, "(wa %p xfer %p)\n", wa, xfer);
1330         spin_lock_irqsave(&xfer->lock, flags);
1331         seg_idx = xfer_result->bTransferSegment & 0x7f;
1332         if (unlikely(seg_idx >= xfer->segs))
1333                 goto error_bad_seg;
1334         seg = xfer->seg[seg_idx];
1335         rpipe = xfer->ep->hcpriv;
1336         usb_status = xfer_result->bTransferStatus;
1337         d_printf(2, dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1338                  xfer, seg_idx, usb_status, seg->status);
1339         if (seg->status == WA_SEG_ABORTED
1340             || seg->status == WA_SEG_ERROR)     /* already handled */
1341                 goto segment_aborted;
1342         if (seg->status == WA_SEG_SUBMITTED)    /* ops, got here */
1343                 seg->status = WA_SEG_PENDING;   /* before wa_seg{_dto}_cb() */
1344         if (seg->status != WA_SEG_PENDING) {
1345                 if (printk_ratelimit())
1346                         dev_err(dev, "xfer %p#%u: Bad segment state %u\n",
1347                                 xfer, seg_idx, seg->status);
1348                 seg->status = WA_SEG_PENDING;   /* workaround/"fix" it */
1349         }
1350         if (usb_status & 0x80) {
1351                 seg->result = wa_xfer_status_to_errno(usb_status);
1352                 dev_err(dev, "DTI: xfer %p#%u failed (0x%02x)\n",
1353                         xfer, seg->index, usb_status);
1354                 goto error_complete;
1355         }
1356         /* FIXME: we ignore warnings, tally them for stats */
1357         if (usb_status & 0x40)          /* Warning?... */
1358                 usb_status = 0;         /* ... pass */
1359         if (xfer->is_inbound) { /* IN data phase: read to buffer */
1360                 seg->status = WA_SEG_DTI_PENDING;
1361                 BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
1362                 if (xfer->is_dma) {
1363                         wa->buf_in_urb->transfer_dma =
1364                                 xfer->urb->transfer_dma
1365                                 + seg_idx * xfer->seg_size;
1366                         wa->buf_in_urb->transfer_flags
1367                                 |= URB_NO_TRANSFER_DMA_MAP;
1368                 } else {
1369                         wa->buf_in_urb->transfer_buffer =
1370                                 xfer->urb->transfer_buffer
1371                                 + seg_idx * xfer->seg_size;
1372                         wa->buf_in_urb->transfer_flags
1373                                 &= ~URB_NO_TRANSFER_DMA_MAP;
1374                 }
1375                 wa->buf_in_urb->transfer_buffer_length =
1376                         le32_to_cpu(xfer_result->dwTransferLength);
1377                 wa->buf_in_urb->context = seg;
1378                 result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
1379                 if (result < 0)
1380                         goto error_submit_buf_in;
1381         } else {
1382                 /* OUT data phase, complete it -- */
1383                 seg->status = WA_SEG_DONE;
1384                 seg->result = le32_to_cpu(xfer_result->dwTransferLength);
1385                 xfer->segs_done++;
1386                 rpipe_ready = rpipe_avail_inc(rpipe);
1387                 done = __wa_xfer_is_done(xfer);
1388         }
1389         spin_unlock_irqrestore(&xfer->lock, flags);
1390         if (done)
1391                 wa_xfer_completion(xfer);
1392         if (rpipe_ready)
1393                 wa_xfer_delayed_run(rpipe);
1394         d_fnend(3, dev, "(wa %p xfer %p) = void\n", wa, xfer);
1395         return;
1396
1397
1398 error_submit_buf_in:
1399         if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1400                 dev_err(dev, "DTI: URB max acceptable errors "
1401                         "exceeded, resetting device\n");
1402                 wa_reset_all(wa);
1403         }
1404         if (printk_ratelimit())
1405                 dev_err(dev, "xfer %p#%u: can't submit DTI data phase: %d\n",
1406                         xfer, seg_idx, result);
1407         seg->result = result;
1408 error_complete:
1409         seg->status = WA_SEG_ERROR;
1410         xfer->segs_done++;
1411         rpipe_ready = rpipe_avail_inc(rpipe);
1412         __wa_xfer_abort(xfer);
1413         done = __wa_xfer_is_done(xfer);
1414         spin_unlock_irqrestore(&xfer->lock, flags);
1415         if (done)
1416                 wa_xfer_completion(xfer);
1417         if (rpipe_ready)
1418                 wa_xfer_delayed_run(rpipe);
1419         d_fnend(3, dev, "(wa %p xfer %p) = void [segment/DTI-submit error]\n",
1420                 wa, xfer);
1421         return;
1422
1423
1424 error_bad_seg:
1425         spin_unlock_irqrestore(&xfer->lock, flags);
1426         wa_urb_dequeue(wa, xfer->urb);
1427         if (printk_ratelimit())
1428                 dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx);
1429         if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
1430                 dev_err(dev, "DTI: URB max acceptable errors "
1431                         "exceeded, resetting device\n");
1432                 wa_reset_all(wa);
1433         }
1434         d_fnend(3, dev, "(wa %p xfer %p) = void [bad seg]\n", wa, xfer);
1435         return;
1436
1437
1438 segment_aborted:
1439         /* nothing to do, as the aborter did the completion */
1440         spin_unlock_irqrestore(&xfer->lock, flags);
1441         d_fnend(3, dev, "(wa %p xfer %p) = void [segment aborted]\n",
1442                 wa, xfer);
1443         return;
1444
1445 }
1446
1447 /*
1448  * Callback for the IN data phase
1449  *
1450  * If succesful transition state; otherwise, take a note of the
1451  * error, mark this segment done and try completion.
1452  *
1453  * Note we don't access until we are sure that the transfer hasn't
1454  * been cancelled (ECONNRESET, ENOENT), which could mean that
1455  * seg->xfer could be already gone.
1456  */
1457 static void wa_buf_in_cb(struct urb *urb)
1458 {
1459         struct wa_seg *seg = urb->context;
1460         struct wa_xfer *xfer = seg->xfer;
1461         struct wahc *wa;
1462         struct device *dev;
1463         struct wa_rpipe *rpipe;
1464         unsigned rpipe_ready;
1465         unsigned long flags;
1466         u8 done = 0;
1467
1468         d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status);
1469         switch (urb->status) {
1470         case 0:
1471                 spin_lock_irqsave(&xfer->lock, flags);
1472                 wa = xfer->wa;
1473                 dev = &wa->usb_iface->dev;
1474                 rpipe = xfer->ep->hcpriv;
1475                 d_printf(2, dev, "xfer %p#%u: data in done (%zu bytes)\n",
1476                            xfer, seg->index, (size_t)urb->actual_length);
1477                 seg->status = WA_SEG_DONE;
1478                 seg->result = urb->actual_length;
1479                 xfer->segs_done++;
1480                 rpipe_ready = rpipe_avail_inc(rpipe);
1481                 done = __wa_xfer_is_done(xfer);
1482                 spin_unlock_irqrestore(&xfer->lock, flags);
1483                 if (done)
1484                         wa_xfer_completion(xfer);
1485                 if (rpipe_ready)
1486                         wa_xfer_delayed_run(rpipe);
1487                 break;
1488         case -ECONNRESET:       /* URB unlinked; no need to do anything */
1489         case -ENOENT:           /* as it was done by the who unlinked us */
1490                 break;
1491         default:                /* Other errors ... */
1492                 spin_lock_irqsave(&xfer->lock, flags);
1493                 wa = xfer->wa;
1494                 dev = &wa->usb_iface->dev;
1495                 rpipe = xfer->ep->hcpriv;
1496                 if (printk_ratelimit())
1497                         dev_err(dev, "xfer %p#%u: data in error %d\n",
1498                                 xfer, seg->index, urb->status);
1499                 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
1500                             EDC_ERROR_TIMEFRAME)){
1501                         dev_err(dev, "DTO: URB max acceptable errors "
1502                                 "exceeded, resetting device\n");
1503                         wa_reset_all(wa);
1504                 }
1505                 seg->status = WA_SEG_ERROR;
1506                 seg->result = urb->status;
1507                 xfer->segs_done++;
1508                 rpipe_ready = rpipe_avail_inc(rpipe);
1509                 __wa_xfer_abort(xfer);
1510                 done = __wa_xfer_is_done(xfer);
1511                 spin_unlock_irqrestore(&xfer->lock, flags);
1512                 if (done)
1513                         wa_xfer_completion(xfer);
1514                 if (rpipe_ready)
1515                         wa_xfer_delayed_run(rpipe);
1516         }
1517         d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status);
1518 }
1519
1520 /*
1521  * Handle an incoming transfer result buffer
1522  *
1523  * Given a transfer result buffer, it completes the transfer (possibly
1524  * scheduling and buffer in read) and then resubmits the DTI URB for a
1525  * new transfer result read.
1526  *
1527  *
1528  * The xfer_result DTI URB state machine
1529  *
1530  * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1531  *
1532  * We start in OFF mode, the first xfer_result notification [through
1533  * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1534  * read.
1535  *
1536  * We receive a buffer -- if it is not a xfer_result, we complain and
1537  * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1538  * request accounting. If it is an IN segment, we move to RBI and post
1539  * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1540  * repost the DTI-URB and move to RXR state. if there was no IN
1541  * segment, it will repost the DTI-URB.
1542  *
1543  * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1544  * errors) in the URBs.
1545  */
1546 static void wa_xfer_result_cb(struct urb *urb)
1547 {
1548         int result;
1549         struct wahc *wa = urb->context;
1550         struct device *dev = &wa->usb_iface->dev;
1551         struct wa_xfer_result *xfer_result;
1552         u32 xfer_id;
1553         struct wa_xfer *xfer;
1554         u8 usb_status;
1555
1556         d_fnstart(3, dev, "(%p)\n", wa);
1557         BUG_ON(wa->dti_urb != urb);
1558         switch (wa->dti_urb->status) {
1559         case 0:
1560                 /* We have a xfer result buffer; check it */
1561                 d_printf(2, dev, "DTI: xfer result %d bytes at %p\n",
1562                            urb->actual_length, urb->transfer_buffer);
1563                 d_dump(3, dev, urb->transfer_buffer, urb->actual_length);
1564                 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) {
1565                         dev_err(dev, "DTI Error: xfer result--bad size "
1566                                 "xfer result (%d bytes vs %zu needed)\n",
1567                                 urb->actual_length, sizeof(*xfer_result));
1568                         break;
1569                 }
1570                 xfer_result = wa->xfer_result;
1571                 if (xfer_result->hdr.bLength != sizeof(*xfer_result)) {
1572                         dev_err(dev, "DTI Error: xfer result--"
1573                                 "bad header length %u\n",
1574                                 xfer_result->hdr.bLength);
1575                         break;
1576                 }
1577                 if (xfer_result->hdr.bNotifyType != WA_XFER_RESULT) {
1578                         dev_err(dev, "DTI Error: xfer result--"
1579                                 "bad header type 0x%02x\n",
1580                                 xfer_result->hdr.bNotifyType);
1581                         break;
1582                 }
1583                 usb_status = xfer_result->bTransferStatus & 0x3f;
1584                 if (usb_status == WA_XFER_STATUS_ABORTED
1585                     || usb_status == WA_XFER_STATUS_NOT_FOUND)
1586                         /* taken care of already */
1587                         break;
1588                 xfer_id = xfer_result->dwTransferID;
1589                 xfer = wa_xfer_get_by_id(wa, xfer_id);
1590                 if (xfer == NULL) {
1591                         /* FIXME: transaction might have been cancelled */
1592                         dev_err(dev, "DTI Error: xfer result--"
1593                                 "unknown xfer 0x%08x (status 0x%02x)\n",
1594                                 xfer_id, usb_status);
1595                         break;
1596                 }
1597                 wa_xfer_result_chew(wa, xfer);
1598                 wa_xfer_put(xfer);
1599                 break;
1600         case -ENOENT:           /* (we killed the URB)...so, no broadcast */
1601         case -ESHUTDOWN:        /* going away! */
1602                 dev_dbg(dev, "DTI: going down! %d\n", urb->status);
1603                 goto out;
1604         default:
1605                 /* Unknown error */
1606                 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS,
1607                             EDC_ERROR_TIMEFRAME)) {
1608                         dev_err(dev, "DTI: URB max acceptable errors "
1609                                 "exceeded, resetting device\n");
1610                         wa_reset_all(wa);
1611                         goto out;
1612                 }
1613                 if (printk_ratelimit())
1614                         dev_err(dev, "DTI: URB error %d\n", urb->status);
1615                 break;
1616         }
1617         /* Resubmit the DTI URB */
1618         result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
1619         if (result < 0) {
1620                 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1621                         "resetting\n", result);
1622                 wa_reset_all(wa);
1623         }
1624 out:
1625         d_fnend(3, dev, "(%p) = void\n", wa);
1626         return;
1627 }
1628
1629 /*
1630  * Transfer complete notification
1631  *
1632  * Called from the notif.c code. We get a notification on EP2 saying
1633  * that some endpoint has some transfer result data available. We are
1634  * about to read it.
1635  *
1636  * To speed up things, we always have a URB reading the DTI URB; we
1637  * don't really set it up and start it until the first xfer complete
1638  * notification arrives, which is what we do here.
1639  *
1640  * Follow up in wa_xfer_result_cb(), as that's where the whole state
1641  * machine starts.
1642  *
1643  * So here we just initialize the DTI URB for reading transfer result
1644  * notifications and also the buffer-in URB, for reading buffers. Then
1645  * we just submit the DTI URB.
1646  *
1647  * @wa shall be referenced
1648  */
1649 void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
1650 {
1651         int result;
1652         struct device *dev = &wa->usb_iface->dev;
1653         struct wa_notif_xfer *notif_xfer;
1654         const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd;
1655
1656         d_fnstart(4, dev, "(%p, %p)\n", wa, notif_hdr);
1657         notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr);
1658         BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER);
1659
1660         if ((0x80 | notif_xfer->bEndpoint) != dti_epd->bEndpointAddress) {
1661                 /* FIXME: hardcoded limitation, adapt */
1662                 dev_err(dev, "BUG: DTI ep is %u, not %u (hack me)\n",
1663                         notif_xfer->bEndpoint, dti_epd->bEndpointAddress);
1664                 goto error;
1665         }
1666         if (wa->dti_urb != NULL)        /* DTI URB already started */
1667                 goto out;
1668
1669         wa->dti_urb = usb_alloc_urb(0, GFP_KERNEL);
1670         if (wa->dti_urb == NULL) {
1671                 dev_err(dev, "Can't allocate DTI URB\n");
1672                 goto error_dti_urb_alloc;
1673         }
1674         usb_fill_bulk_urb(
1675                 wa->dti_urb, wa->usb_dev,
1676                 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1677                 wa->xfer_result, wa->xfer_result_size,
1678                 wa_xfer_result_cb, wa);
1679
1680         wa->buf_in_urb = usb_alloc_urb(0, GFP_KERNEL);
1681         if (wa->buf_in_urb == NULL) {
1682                 dev_err(dev, "Can't allocate BUF-IN URB\n");
1683                 goto error_buf_in_urb_alloc;
1684         }
1685         usb_fill_bulk_urb(
1686                 wa->buf_in_urb, wa->usb_dev,
1687                 usb_rcvbulkpipe(wa->usb_dev, 0x80 | notif_xfer->bEndpoint),
1688                 NULL, 0, wa_buf_in_cb, wa);
1689         result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
1690         if (result < 0) {
1691                 dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
1692                         "resetting\n", result);
1693                 goto error_dti_urb_submit;
1694         }
1695 out:
1696         d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
1697         return;
1698
1699 error_dti_urb_submit:
1700         usb_put_urb(wa->buf_in_urb);
1701 error_buf_in_urb_alloc:
1702         usb_put_urb(wa->dti_urb);
1703         wa->dti_urb = NULL;
1704 error_dti_urb_alloc:
1705 error:
1706         wa_reset_all(wa);
1707         d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr);
1708         return;
1709 }