Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
[linux-2.6] / drivers / usb / host / uhci-q.c
1 /*
2  * Universal Host Controller Interface driver for USB.
3  *
4  * Maintainer: Alan Stern <stern@rowland.harvard.edu>
5  *
6  * (C) Copyright 1999 Linus Torvalds
7  * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8  * (C) Copyright 1999 Randy Dunlap
9  * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10  * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11  * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12  * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13  * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14  *               support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15  * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16  * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
17  */
18
19
20 /*
21  * Technically, updating td->status here is a race, but it's not really a
22  * problem. The worst that can happen is that we set the IOC bit again
23  * generating a spurious interrupt. We could fix this by creating another
24  * QH and leaving the IOC bit always set, but then we would have to play
25  * games with the FSBR code to make sure we get the correct order in all
26  * the cases. I don't think it's worth the effort
27  */
28 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
29 {
30         if (uhci->is_stopped)
31                 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32         uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); 
33 }
34
35 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
36 {
37         uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
38 }
39
40
41 /*
42  * Full-Speed Bandwidth Reclamation (FSBR).
43  * We turn on FSBR whenever a queue that wants it is advancing,
44  * and leave it on for a short time thereafter.
45  */
46 static void uhci_fsbr_on(struct uhci_hcd *uhci)
47 {
48         struct uhci_qh *fsbr_qh, *lqh, *tqh;
49
50         uhci->fsbr_is_on = 1;
51         lqh = list_entry(uhci->skel_async_qh->node.prev,
52                         struct uhci_qh, node);
53
54         /* Find the first FSBR QH.  Linear search through the list is
55          * acceptable because normally FSBR gets turned on as soon as
56          * one QH needs it. */
57         fsbr_qh = NULL;
58         list_for_each_entry_reverse(tqh, &uhci->skel_async_qh->node, node) {
59                 if (tqh->skel < SKEL_FSBR)
60                         break;
61                 fsbr_qh = tqh;
62         }
63
64         /* No FSBR QH means we must insert the terminating skeleton QH */
65         if (!fsbr_qh) {
66                 uhci->skel_term_qh->link = LINK_TO_QH(uhci->skel_term_qh);
67                 wmb();
68                 lqh->link = uhci->skel_term_qh->link;
69
70         /* Otherwise loop the last QH to the first FSBR QH */
71         } else
72                 lqh->link = LINK_TO_QH(fsbr_qh);
73 }
74
75 static void uhci_fsbr_off(struct uhci_hcd *uhci)
76 {
77         struct uhci_qh *lqh;
78
79         uhci->fsbr_is_on = 0;
80         lqh = list_entry(uhci->skel_async_qh->node.prev,
81                         struct uhci_qh, node);
82
83         /* End the async list normally and unlink the terminating QH */
84         lqh->link = uhci->skel_term_qh->link = UHCI_PTR_TERM;
85 }
86
87 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
88 {
89         struct urb_priv *urbp = urb->hcpriv;
90
91         if (!(urb->transfer_flags & URB_NO_FSBR))
92                 urbp->fsbr = 1;
93 }
94
95 static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
96 {
97         if (urbp->fsbr) {
98                 uhci->fsbr_is_wanted = 1;
99                 if (!uhci->fsbr_is_on)
100                         uhci_fsbr_on(uhci);
101                 else if (uhci->fsbr_expiring) {
102                         uhci->fsbr_expiring = 0;
103                         del_timer(&uhci->fsbr_timer);
104                 }
105         }
106 }
107
108 static void uhci_fsbr_timeout(unsigned long _uhci)
109 {
110         struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
111         unsigned long flags;
112
113         spin_lock_irqsave(&uhci->lock, flags);
114         if (uhci->fsbr_expiring) {
115                 uhci->fsbr_expiring = 0;
116                 uhci_fsbr_off(uhci);
117         }
118         spin_unlock_irqrestore(&uhci->lock, flags);
119 }
120
121
122 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
123 {
124         dma_addr_t dma_handle;
125         struct uhci_td *td;
126
127         td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
128         if (!td)
129                 return NULL;
130
131         td->dma_handle = dma_handle;
132         td->frame = -1;
133
134         INIT_LIST_HEAD(&td->list);
135         INIT_LIST_HEAD(&td->fl_list);
136
137         return td;
138 }
139
140 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
141 {
142         if (!list_empty(&td->list))
143                 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
144         if (!list_empty(&td->fl_list))
145                 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
146
147         dma_pool_free(uhci->td_pool, td, td->dma_handle);
148 }
149
150 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
151                 u32 token, u32 buffer)
152 {
153         td->status = cpu_to_le32(status);
154         td->token = cpu_to_le32(token);
155         td->buffer = cpu_to_le32(buffer);
156 }
157
158 static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
159 {
160         list_add_tail(&td->list, &urbp->td_list);
161 }
162
163 static void uhci_remove_td_from_urbp(struct uhci_td *td)
164 {
165         list_del_init(&td->list);
166 }
167
168 /*
169  * We insert Isochronous URBs directly into the frame list at the beginning
170  */
171 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
172                 struct uhci_td *td, unsigned framenum)
173 {
174         framenum &= (UHCI_NUMFRAMES - 1);
175
176         td->frame = framenum;
177
178         /* Is there a TD already mapped there? */
179         if (uhci->frame_cpu[framenum]) {
180                 struct uhci_td *ftd, *ltd;
181
182                 ftd = uhci->frame_cpu[framenum];
183                 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
184
185                 list_add_tail(&td->fl_list, &ftd->fl_list);
186
187                 td->link = ltd->link;
188                 wmb();
189                 ltd->link = LINK_TO_TD(td);
190         } else {
191                 td->link = uhci->frame[framenum];
192                 wmb();
193                 uhci->frame[framenum] = LINK_TO_TD(td);
194                 uhci->frame_cpu[framenum] = td;
195         }
196 }
197
198 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
199                 struct uhci_td *td)
200 {
201         /* If it's not inserted, don't remove it */
202         if (td->frame == -1) {
203                 WARN_ON(!list_empty(&td->fl_list));
204                 return;
205         }
206
207         if (uhci->frame_cpu[td->frame] == td) {
208                 if (list_empty(&td->fl_list)) {
209                         uhci->frame[td->frame] = td->link;
210                         uhci->frame_cpu[td->frame] = NULL;
211                 } else {
212                         struct uhci_td *ntd;
213
214                         ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
215                         uhci->frame[td->frame] = LINK_TO_TD(ntd);
216                         uhci->frame_cpu[td->frame] = ntd;
217                 }
218         } else {
219                 struct uhci_td *ptd;
220
221                 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
222                 ptd->link = td->link;
223         }
224
225         list_del_init(&td->fl_list);
226         td->frame = -1;
227 }
228
229 static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
230                 unsigned int framenum)
231 {
232         struct uhci_td *ftd, *ltd;
233
234         framenum &= (UHCI_NUMFRAMES - 1);
235
236         ftd = uhci->frame_cpu[framenum];
237         if (ftd) {
238                 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
239                 uhci->frame[framenum] = ltd->link;
240                 uhci->frame_cpu[framenum] = NULL;
241
242                 while (!list_empty(&ftd->fl_list))
243                         list_del_init(ftd->fl_list.prev);
244         }
245 }
246
247 /*
248  * Remove all the TDs for an Isochronous URB from the frame list
249  */
250 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
251 {
252         struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
253         struct uhci_td *td;
254
255         list_for_each_entry(td, &urbp->td_list, list)
256                 uhci_remove_td_from_frame_list(uhci, td);
257 }
258
259 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
260                 struct usb_device *udev, struct usb_host_endpoint *hep)
261 {
262         dma_addr_t dma_handle;
263         struct uhci_qh *qh;
264
265         qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
266         if (!qh)
267                 return NULL;
268
269         memset(qh, 0, sizeof(*qh));
270         qh->dma_handle = dma_handle;
271
272         qh->element = UHCI_PTR_TERM;
273         qh->link = UHCI_PTR_TERM;
274
275         INIT_LIST_HEAD(&qh->queue);
276         INIT_LIST_HEAD(&qh->node);
277
278         if (udev) {             /* Normal QH */
279                 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
280                 if (qh->type != USB_ENDPOINT_XFER_ISOC) {
281                         qh->dummy_td = uhci_alloc_td(uhci);
282                         if (!qh->dummy_td) {
283                                 dma_pool_free(uhci->qh_pool, qh, dma_handle);
284                                 return NULL;
285                         }
286                 }
287                 qh->state = QH_STATE_IDLE;
288                 qh->hep = hep;
289                 qh->udev = udev;
290                 hep->hcpriv = qh;
291
292                 if (qh->type == USB_ENDPOINT_XFER_INT ||
293                                 qh->type == USB_ENDPOINT_XFER_ISOC)
294                         qh->load = usb_calc_bus_time(udev->speed,
295                                         usb_endpoint_dir_in(&hep->desc),
296                                         qh->type == USB_ENDPOINT_XFER_ISOC,
297                                         le16_to_cpu(hep->desc.wMaxPacketSize))
298                                 / 1000 + 1;
299
300         } else {                /* Skeleton QH */
301                 qh->state = QH_STATE_ACTIVE;
302                 qh->type = -1;
303         }
304         return qh;
305 }
306
307 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
308 {
309         WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
310         if (!list_empty(&qh->queue))
311                 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
312
313         list_del(&qh->node);
314         if (qh->udev) {
315                 qh->hep->hcpriv = NULL;
316                 if (qh->dummy_td)
317                         uhci_free_td(uhci, qh->dummy_td);
318         }
319         dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
320 }
321
322 /*
323  * When a queue is stopped and a dequeued URB is given back, adjust
324  * the previous TD link (if the URB isn't first on the queue) or
325  * save its toggle value (if it is first and is currently executing).
326  *
327  * Returns 0 if the URB should not yet be given back, 1 otherwise.
328  */
329 static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
330                 struct urb *urb)
331 {
332         struct urb_priv *urbp = urb->hcpriv;
333         struct uhci_td *td;
334         int ret = 1;
335
336         /* Isochronous pipes don't use toggles and their TD link pointers
337          * get adjusted during uhci_urb_dequeue().  But since their queues
338          * cannot truly be stopped, we have to watch out for dequeues
339          * occurring after the nominal unlink frame. */
340         if (qh->type == USB_ENDPOINT_XFER_ISOC) {
341                 ret = (uhci->frame_number + uhci->is_stopped !=
342                                 qh->unlink_frame);
343                 goto done;
344         }
345
346         /* If the URB isn't first on its queue, adjust the link pointer
347          * of the last TD in the previous URB.  The toggle doesn't need
348          * to be saved since this URB can't be executing yet. */
349         if (qh->queue.next != &urbp->node) {
350                 struct urb_priv *purbp;
351                 struct uhci_td *ptd;
352
353                 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
354                 WARN_ON(list_empty(&purbp->td_list));
355                 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
356                                 list);
357                 td = list_entry(urbp->td_list.prev, struct uhci_td,
358                                 list);
359                 ptd->link = td->link;
360                 goto done;
361         }
362
363         /* If the QH element pointer is UHCI_PTR_TERM then then currently
364          * executing URB has already been unlinked, so this one isn't it. */
365         if (qh_element(qh) == UHCI_PTR_TERM)
366                 goto done;
367         qh->element = UHCI_PTR_TERM;
368
369         /* Control pipes don't have to worry about toggles */
370         if (qh->type == USB_ENDPOINT_XFER_CONTROL)
371                 goto done;
372
373         /* Save the next toggle value */
374         WARN_ON(list_empty(&urbp->td_list));
375         td = list_entry(urbp->td_list.next, struct uhci_td, list);
376         qh->needs_fixup = 1;
377         qh->initial_toggle = uhci_toggle(td_token(td));
378
379 done:
380         return ret;
381 }
382
383 /*
384  * Fix up the data toggles for URBs in a queue, when one of them
385  * terminates early (short transfer, error, or dequeued).
386  */
387 static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
388 {
389         struct urb_priv *urbp = NULL;
390         struct uhci_td *td;
391         unsigned int toggle = qh->initial_toggle;
392         unsigned int pipe;
393
394         /* Fixups for a short transfer start with the second URB in the
395          * queue (the short URB is the first). */
396         if (skip_first)
397                 urbp = list_entry(qh->queue.next, struct urb_priv, node);
398
399         /* When starting with the first URB, if the QH element pointer is
400          * still valid then we know the URB's toggles are okay. */
401         else if (qh_element(qh) != UHCI_PTR_TERM)
402                 toggle = 2;
403
404         /* Fix up the toggle for the URBs in the queue.  Normally this
405          * loop won't run more than once: When an error or short transfer
406          * occurs, the queue usually gets emptied. */
407         urbp = list_prepare_entry(urbp, &qh->queue, node);
408         list_for_each_entry_continue(urbp, &qh->queue, node) {
409
410                 /* If the first TD has the right toggle value, we don't
411                  * need to change any toggles in this URB */
412                 td = list_entry(urbp->td_list.next, struct uhci_td, list);
413                 if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
414                         td = list_entry(urbp->td_list.prev, struct uhci_td,
415                                         list);
416                         toggle = uhci_toggle(td_token(td)) ^ 1;
417
418                 /* Otherwise all the toggles in the URB have to be switched */
419                 } else {
420                         list_for_each_entry(td, &urbp->td_list, list) {
421                                 td->token ^= __constant_cpu_to_le32(
422                                                         TD_TOKEN_TOGGLE);
423                                 toggle ^= 1;
424                         }
425                 }
426         }
427
428         wmb();
429         pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
430         usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
431                         usb_pipeout(pipe), toggle);
432         qh->needs_fixup = 0;
433 }
434
435 /*
436  * Link an Isochronous QH into its skeleton's list
437  */
438 static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
439 {
440         list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
441
442         /* Isochronous QHs aren't linked by the hardware */
443 }
444
445 /*
446  * Link a high-period interrupt QH into the schedule at the end of its
447  * skeleton's list
448  */
449 static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
450 {
451         struct uhci_qh *pqh;
452
453         list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
454
455         pqh = list_entry(qh->node.prev, struct uhci_qh, node);
456         qh->link = pqh->link;
457         wmb();
458         pqh->link = LINK_TO_QH(qh);
459 }
460
461 /*
462  * Link a period-1 interrupt or async QH into the schedule at the
463  * correct spot in the async skeleton's list, and update the FSBR link
464  */
465 static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
466 {
467         struct uhci_qh *pqh, *lqh;
468         __le32 link_to_new_qh;
469         __le32 *extra_link = &link_to_new_qh;
470
471         /* Find the predecessor QH for our new one and insert it in the list.
472          * The list of QHs is expected to be short, so linear search won't
473          * take too long. */
474         list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
475                 if (pqh->skel <= qh->skel)
476                         break;
477         }
478         list_add(&qh->node, &pqh->node);
479         qh->link = pqh->link;
480
481         link_to_new_qh = LINK_TO_QH(qh);
482
483         /* If this is now the first FSBR QH, take special action */
484         if (uhci->fsbr_is_on && pqh->skel < SKEL_FSBR &&
485                         qh->skel >= SKEL_FSBR) {
486                 lqh = list_entry(uhci->skel_async_qh->node.prev,
487                                 struct uhci_qh, node);
488
489                 /* If the new QH is also the last one, we must unlink
490                  * the terminating skeleton QH and make the new QH point
491                  * back to itself. */
492                 if (qh == lqh) {
493                         qh->link = link_to_new_qh;
494                         extra_link = &uhci->skel_term_qh->link;
495
496                 /* Otherwise the last QH must point to the new QH */
497                 } else
498                         extra_link = &lqh->link;
499         }
500
501         /* Link it into the schedule */
502         wmb();
503         *extra_link = pqh->link = link_to_new_qh;
504 }
505
506 /*
507  * Put a QH on the schedule in both hardware and software
508  */
509 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
510 {
511         WARN_ON(list_empty(&qh->queue));
512
513         /* Set the element pointer if it isn't set already.
514          * This isn't needed for Isochronous queues, but it doesn't hurt. */
515         if (qh_element(qh) == UHCI_PTR_TERM) {
516                 struct urb_priv *urbp = list_entry(qh->queue.next,
517                                 struct urb_priv, node);
518                 struct uhci_td *td = list_entry(urbp->td_list.next,
519                                 struct uhci_td, list);
520
521                 qh->element = LINK_TO_TD(td);
522         }
523
524         /* Treat the queue as if it has just advanced */
525         qh->wait_expired = 0;
526         qh->advance_jiffies = jiffies;
527
528         if (qh->state == QH_STATE_ACTIVE)
529                 return;
530         qh->state = QH_STATE_ACTIVE;
531
532         /* Move the QH from its old list to the correct spot in the appropriate
533          * skeleton's list */
534         if (qh == uhci->next_qh)
535                 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
536                                 node);
537         list_del(&qh->node);
538
539         if (qh->skel == SKEL_ISO)
540                 link_iso(uhci, qh);
541         else if (qh->skel < SKEL_ASYNC)
542                 link_interrupt(uhci, qh);
543         else
544                 link_async(uhci, qh);
545 }
546
547 /*
548  * Unlink a high-period interrupt QH from the schedule
549  */
550 static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
551 {
552         struct uhci_qh *pqh;
553
554         pqh = list_entry(qh->node.prev, struct uhci_qh, node);
555         pqh->link = qh->link;
556         mb();
557 }
558
559 /*
560  * Unlink a period-1 interrupt or async QH from the schedule
561  */
562 static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
563 {
564         struct uhci_qh *pqh, *lqh;
565         __le32 link_to_next_qh = qh->link;
566
567         pqh = list_entry(qh->node.prev, struct uhci_qh, node);
568
569         /* If this is the first FSBQ QH, take special action */
570         if (uhci->fsbr_is_on && pqh->skel < SKEL_FSBR &&
571                         qh->skel >= SKEL_FSBR) {
572                 lqh = list_entry(uhci->skel_async_qh->node.prev,
573                                 struct uhci_qh, node);
574
575                 /* If this QH is also the last one, we must link in
576                  * the terminating skeleton QH. */
577                 if (qh == lqh) {
578                         link_to_next_qh = LINK_TO_QH(uhci->skel_term_qh);
579                         uhci->skel_term_qh->link = link_to_next_qh;
580                         wmb();
581                         qh->link = link_to_next_qh;
582
583                 /* Otherwise the last QH must point to the new first FSBR QH */
584                 } else
585                         lqh->link = link_to_next_qh;
586         }
587
588         pqh->link = link_to_next_qh;
589         mb();
590 }
591
592 /*
593  * Take a QH off the hardware schedule
594  */
595 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
596 {
597         if (qh->state == QH_STATE_UNLINKING)
598                 return;
599         WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
600         qh->state = QH_STATE_UNLINKING;
601
602         /* Unlink the QH from the schedule and record when we did it */
603         if (qh->skel == SKEL_ISO)
604                 ;
605         else if (qh->skel < SKEL_ASYNC)
606                 unlink_interrupt(uhci, qh);
607         else
608                 unlink_async(uhci, qh);
609
610         uhci_get_current_frame_number(uhci);
611         qh->unlink_frame = uhci->frame_number;
612
613         /* Force an interrupt so we know when the QH is fully unlinked */
614         if (list_empty(&uhci->skel_unlink_qh->node))
615                 uhci_set_next_interrupt(uhci);
616
617         /* Move the QH from its old list to the end of the unlinking list */
618         if (qh == uhci->next_qh)
619                 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
620                                 node);
621         list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
622 }
623
624 /*
625  * When we and the controller are through with a QH, it becomes IDLE.
626  * This happens when a QH has been off the schedule (on the unlinking
627  * list) for more than one frame, or when an error occurs while adding
628  * the first URB onto a new QH.
629  */
630 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
631 {
632         WARN_ON(qh->state == QH_STATE_ACTIVE);
633
634         if (qh == uhci->next_qh)
635                 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
636                                 node);
637         list_move(&qh->node, &uhci->idle_qh_list);
638         qh->state = QH_STATE_IDLE;
639
640         /* Now that the QH is idle, its post_td isn't being used */
641         if (qh->post_td) {
642                 uhci_free_td(uhci, qh->post_td);
643                 qh->post_td = NULL;
644         }
645
646         /* If anyone is waiting for a QH to become idle, wake them up */
647         if (uhci->num_waiting)
648                 wake_up_all(&uhci->waitqh);
649 }
650
651 /*
652  * Find the highest existing bandwidth load for a given phase and period.
653  */
654 static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
655 {
656         int highest_load = uhci->load[phase];
657
658         for (phase += period; phase < MAX_PHASE; phase += period)
659                 highest_load = max_t(int, highest_load, uhci->load[phase]);
660         return highest_load;
661 }
662
663 /*
664  * Set qh->phase to the optimal phase for a periodic transfer and
665  * check whether the bandwidth requirement is acceptable.
666  */
667 static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
668 {
669         int minimax_load;
670
671         /* Find the optimal phase (unless it is already set) and get
672          * its load value. */
673         if (qh->phase >= 0)
674                 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
675         else {
676                 int phase, load;
677                 int max_phase = min_t(int, MAX_PHASE, qh->period);
678
679                 qh->phase = 0;
680                 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
681                 for (phase = 1; phase < max_phase; ++phase) {
682                         load = uhci_highest_load(uhci, phase, qh->period);
683                         if (load < minimax_load) {
684                                 minimax_load = load;
685                                 qh->phase = phase;
686                         }
687                 }
688         }
689
690         /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
691         if (minimax_load + qh->load > 900) {
692                 dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
693                                 "period %d, phase %d, %d + %d us\n",
694                                 qh->period, qh->phase, minimax_load, qh->load);
695                 return -ENOSPC;
696         }
697         return 0;
698 }
699
700 /*
701  * Reserve a periodic QH's bandwidth in the schedule
702  */
703 static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
704 {
705         int i;
706         int load = qh->load;
707         char *p = "??";
708
709         for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
710                 uhci->load[i] += load;
711                 uhci->total_load += load;
712         }
713         uhci_to_hcd(uhci)->self.bandwidth_allocated =
714                         uhci->total_load / MAX_PHASE;
715         switch (qh->type) {
716         case USB_ENDPOINT_XFER_INT:
717                 ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
718                 p = "INT";
719                 break;
720         case USB_ENDPOINT_XFER_ISOC:
721                 ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
722                 p = "ISO";
723                 break;
724         }
725         qh->bandwidth_reserved = 1;
726         dev_dbg(uhci_dev(uhci),
727                         "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
728                         "reserve", qh->udev->devnum,
729                         qh->hep->desc.bEndpointAddress, p,
730                         qh->period, qh->phase, load);
731 }
732
733 /*
734  * Release a periodic QH's bandwidth reservation
735  */
736 static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
737 {
738         int i;
739         int load = qh->load;
740         char *p = "??";
741
742         for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
743                 uhci->load[i] -= load;
744                 uhci->total_load -= load;
745         }
746         uhci_to_hcd(uhci)->self.bandwidth_allocated =
747                         uhci->total_load / MAX_PHASE;
748         switch (qh->type) {
749         case USB_ENDPOINT_XFER_INT:
750                 --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
751                 p = "INT";
752                 break;
753         case USB_ENDPOINT_XFER_ISOC:
754                 --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
755                 p = "ISO";
756                 break;
757         }
758         qh->bandwidth_reserved = 0;
759         dev_dbg(uhci_dev(uhci),
760                         "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
761                         "release", qh->udev->devnum,
762                         qh->hep->desc.bEndpointAddress, p,
763                         qh->period, qh->phase, load);
764 }
765
766 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
767                 struct urb *urb)
768 {
769         struct urb_priv *urbp;
770
771         urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
772         if (!urbp)
773                 return NULL;
774
775         urbp->urb = urb;
776         urb->hcpriv = urbp;
777         
778         INIT_LIST_HEAD(&urbp->node);
779         INIT_LIST_HEAD(&urbp->td_list);
780
781         return urbp;
782 }
783
784 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
785                 struct urb_priv *urbp)
786 {
787         struct uhci_td *td, *tmp;
788
789         if (!list_empty(&urbp->node))
790                 dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
791                                 urbp->urb);
792
793         list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
794                 uhci_remove_td_from_urbp(td);
795                 uhci_free_td(uhci, td);
796         }
797
798         urbp->urb->hcpriv = NULL;
799         kmem_cache_free(uhci_up_cachep, urbp);
800 }
801
802 /*
803  * Map status to standard result codes
804  *
805  * <status> is (td_status(td) & 0xF60000), a.k.a.
806  * uhci_status_bits(td_status(td)).
807  * Note: <status> does not include the TD_CTRL_NAK bit.
808  * <dir_out> is True for output TDs and False for input TDs.
809  */
810 static int uhci_map_status(int status, int dir_out)
811 {
812         if (!status)
813                 return 0;
814         if (status & TD_CTRL_BITSTUFF)                  /* Bitstuff error */
815                 return -EPROTO;
816         if (status & TD_CTRL_CRCTIMEO) {                /* CRC/Timeout */
817                 if (dir_out)
818                         return -EPROTO;
819                 else
820                         return -EILSEQ;
821         }
822         if (status & TD_CTRL_BABBLE)                    /* Babble */
823                 return -EOVERFLOW;
824         if (status & TD_CTRL_DBUFERR)                   /* Buffer error */
825                 return -ENOSR;
826         if (status & TD_CTRL_STALLED)                   /* Stalled */
827                 return -EPIPE;
828         return 0;
829 }
830
831 /*
832  * Control transfers
833  */
834 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
835                 struct uhci_qh *qh)
836 {
837         struct uhci_td *td;
838         unsigned long destination, status;
839         int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
840         int len = urb->transfer_buffer_length;
841         dma_addr_t data = urb->transfer_dma;
842         __le32 *plink;
843         struct urb_priv *urbp = urb->hcpriv;
844         int skel;
845
846         /* The "pipe" thing contains the destination in bits 8--18 */
847         destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
848
849         /* 3 errors, dummy TD remains inactive */
850         status = uhci_maxerr(3);
851         if (urb->dev->speed == USB_SPEED_LOW)
852                 status |= TD_CTRL_LS;
853
854         /*
855          * Build the TD for the control request setup packet
856          */
857         td = qh->dummy_td;
858         uhci_add_td_to_urbp(td, urbp);
859         uhci_fill_td(td, status, destination | uhci_explen(8),
860                         urb->setup_dma);
861         plink = &td->link;
862         status |= TD_CTRL_ACTIVE;
863
864         /*
865          * If direction is "send", change the packet ID from SETUP (0x2D)
866          * to OUT (0xE1).  Else change it from SETUP to IN (0x69) and
867          * set Short Packet Detect (SPD) for all data packets.
868          */
869         if (usb_pipeout(urb->pipe))
870                 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
871         else {
872                 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
873                 status |= TD_CTRL_SPD;
874         }
875
876         /*
877          * Build the DATA TDs
878          */
879         while (len > 0) {
880                 int pktsze = min(len, maxsze);
881
882                 td = uhci_alloc_td(uhci);
883                 if (!td)
884                         goto nomem;
885                 *plink = LINK_TO_TD(td);
886
887                 /* Alternate Data0/1 (start with Data1) */
888                 destination ^= TD_TOKEN_TOGGLE;
889         
890                 uhci_add_td_to_urbp(td, urbp);
891                 uhci_fill_td(td, status, destination | uhci_explen(pktsze),
892                                 data);
893                 plink = &td->link;
894
895                 data += pktsze;
896                 len -= pktsze;
897         }
898
899         /*
900          * Build the final TD for control status 
901          */
902         td = uhci_alloc_td(uhci);
903         if (!td)
904                 goto nomem;
905         *plink = LINK_TO_TD(td);
906
907         /*
908          * It's IN if the pipe is an output pipe or we're not expecting
909          * data back.
910          */
911         destination &= ~TD_TOKEN_PID_MASK;
912         if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
913                 destination |= USB_PID_IN;
914         else
915                 destination |= USB_PID_OUT;
916
917         destination |= TD_TOKEN_TOGGLE;         /* End in Data1 */
918
919         status &= ~TD_CTRL_SPD;
920
921         uhci_add_td_to_urbp(td, urbp);
922         uhci_fill_td(td, status | TD_CTRL_IOC,
923                         destination | uhci_explen(0), 0);
924         plink = &td->link;
925
926         /*
927          * Build the new dummy TD and activate the old one
928          */
929         td = uhci_alloc_td(uhci);
930         if (!td)
931                 goto nomem;
932         *plink = LINK_TO_TD(td);
933
934         uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
935         wmb();
936         qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
937         qh->dummy_td = td;
938
939         /* Low-speed transfers get a different queue, and won't hog the bus.
940          * Also, some devices enumerate better without FSBR; the easiest way
941          * to do that is to put URBs on the low-speed queue while the device
942          * isn't in the CONFIGURED state. */
943         if (urb->dev->speed == USB_SPEED_LOW ||
944                         urb->dev->state != USB_STATE_CONFIGURED)
945                 skel = SKEL_LS_CONTROL;
946         else {
947                 skel = SKEL_FS_CONTROL;
948                 uhci_add_fsbr(uhci, urb);
949         }
950         if (qh->state != QH_STATE_ACTIVE)
951                 qh->skel = skel;
952
953         urb->actual_length = -8;        /* Account for the SETUP packet */
954         return 0;
955
956 nomem:
957         /* Remove the dummy TD from the td_list so it doesn't get freed */
958         uhci_remove_td_from_urbp(qh->dummy_td);
959         return -ENOMEM;
960 }
961
962 /*
963  * Common submit for bulk and interrupt
964  */
965 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
966                 struct uhci_qh *qh)
967 {
968         struct uhci_td *td;
969         unsigned long destination, status;
970         int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
971         int len = urb->transfer_buffer_length;
972         dma_addr_t data = urb->transfer_dma;
973         __le32 *plink;
974         struct urb_priv *urbp = urb->hcpriv;
975         unsigned int toggle;
976
977         if (len < 0)
978                 return -EINVAL;
979
980         /* The "pipe" thing contains the destination in bits 8--18 */
981         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
982         toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
983                          usb_pipeout(urb->pipe));
984
985         /* 3 errors, dummy TD remains inactive */
986         status = uhci_maxerr(3);
987         if (urb->dev->speed == USB_SPEED_LOW)
988                 status |= TD_CTRL_LS;
989         if (usb_pipein(urb->pipe))
990                 status |= TD_CTRL_SPD;
991
992         /*
993          * Build the DATA TDs
994          */
995         plink = NULL;
996         td = qh->dummy_td;
997         do {    /* Allow zero length packets */
998                 int pktsze = maxsze;
999
1000                 if (len <= pktsze) {            /* The last packet */
1001                         pktsze = len;
1002                         if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
1003                                 status &= ~TD_CTRL_SPD;
1004                 }
1005
1006                 if (plink) {
1007                         td = uhci_alloc_td(uhci);
1008                         if (!td)
1009                                 goto nomem;
1010                         *plink = LINK_TO_TD(td);
1011                 }
1012                 uhci_add_td_to_urbp(td, urbp);
1013                 uhci_fill_td(td, status,
1014                                 destination | uhci_explen(pktsze) |
1015                                         (toggle << TD_TOKEN_TOGGLE_SHIFT),
1016                                 data);
1017                 plink = &td->link;
1018                 status |= TD_CTRL_ACTIVE;
1019
1020                 data += pktsze;
1021                 len -= maxsze;
1022                 toggle ^= 1;
1023         } while (len > 0);
1024
1025         /*
1026          * URB_ZERO_PACKET means adding a 0-length packet, if direction
1027          * is OUT and the transfer_length was an exact multiple of maxsze,
1028          * hence (len = transfer_length - N * maxsze) == 0
1029          * however, if transfer_length == 0, the zero packet was already
1030          * prepared above.
1031          */
1032         if ((urb->transfer_flags & URB_ZERO_PACKET) &&
1033                         usb_pipeout(urb->pipe) && len == 0 &&
1034                         urb->transfer_buffer_length > 0) {
1035                 td = uhci_alloc_td(uhci);
1036                 if (!td)
1037                         goto nomem;
1038                 *plink = LINK_TO_TD(td);
1039
1040                 uhci_add_td_to_urbp(td, urbp);
1041                 uhci_fill_td(td, status,
1042                                 destination | uhci_explen(0) |
1043                                         (toggle << TD_TOKEN_TOGGLE_SHIFT),
1044                                 data);
1045                 plink = &td->link;
1046
1047                 toggle ^= 1;
1048         }
1049
1050         /* Set the interrupt-on-completion flag on the last packet.
1051          * A more-or-less typical 4 KB URB (= size of one memory page)
1052          * will require about 3 ms to transfer; that's a little on the
1053          * fast side but not enough to justify delaying an interrupt
1054          * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1055          * flag setting. */
1056         td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
1057
1058         /*
1059          * Build the new dummy TD and activate the old one
1060          */
1061         td = uhci_alloc_td(uhci);
1062         if (!td)
1063                 goto nomem;
1064         *plink = LINK_TO_TD(td);
1065
1066         uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
1067         wmb();
1068         qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
1069         qh->dummy_td = td;
1070
1071         usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1072                         usb_pipeout(urb->pipe), toggle);
1073         return 0;
1074
1075 nomem:
1076         /* Remove the dummy TD from the td_list so it doesn't get freed */
1077         uhci_remove_td_from_urbp(qh->dummy_td);
1078         return -ENOMEM;
1079 }
1080
1081 static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1082                 struct uhci_qh *qh)
1083 {
1084         int ret;
1085
1086         /* Can't have low-speed bulk transfers */
1087         if (urb->dev->speed == USB_SPEED_LOW)
1088                 return -EINVAL;
1089
1090         if (qh->state != QH_STATE_ACTIVE)
1091                 qh->skel = SKEL_BULK;
1092         ret = uhci_submit_common(uhci, urb, qh);
1093         if (ret == 0)
1094                 uhci_add_fsbr(uhci, urb);
1095         return ret;
1096 }
1097
1098 static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1099                 struct uhci_qh *qh)
1100 {
1101         int ret;
1102
1103         /* USB 1.1 interrupt transfers only involve one packet per interval.
1104          * Drivers can submit URBs of any length, but longer ones will need
1105          * multiple intervals to complete.
1106          */
1107
1108         if (!qh->bandwidth_reserved) {
1109                 int exponent;
1110
1111                 /* Figure out which power-of-two queue to use */
1112                 for (exponent = 7; exponent >= 0; --exponent) {
1113                         if ((1 << exponent) <= urb->interval)
1114                                 break;
1115                 }
1116                 if (exponent < 0)
1117                         return -EINVAL;
1118                 qh->period = 1 << exponent;
1119                 qh->skel = SKEL_INDEX(exponent);
1120
1121                 /* For now, interrupt phase is fixed by the layout
1122                  * of the QH lists. */
1123                 qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
1124                 ret = uhci_check_bandwidth(uhci, qh);
1125                 if (ret)
1126                         return ret;
1127         } else if (qh->period > urb->interval)
1128                 return -EINVAL;         /* Can't decrease the period */
1129
1130         ret = uhci_submit_common(uhci, urb, qh);
1131         if (ret == 0) {
1132                 urb->interval = qh->period;
1133                 if (!qh->bandwidth_reserved)
1134                         uhci_reserve_bandwidth(uhci, qh);
1135         }
1136         return ret;
1137 }
1138
1139 /*
1140  * Fix up the data structures following a short transfer
1141  */
1142 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1143                 struct uhci_qh *qh, struct urb_priv *urbp)
1144 {
1145         struct uhci_td *td;
1146         struct list_head *tmp;
1147         int ret;
1148
1149         td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1150         if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1151
1152                 /* When a control transfer is short, we have to restart
1153                  * the queue at the status stage transaction, which is
1154                  * the last TD. */
1155                 WARN_ON(list_empty(&urbp->td_list));
1156                 qh->element = LINK_TO_TD(td);
1157                 tmp = td->list.prev;
1158                 ret = -EINPROGRESS;
1159
1160         } else {
1161
1162                 /* When a bulk/interrupt transfer is short, we have to
1163                  * fix up the toggles of the following URBs on the queue
1164                  * before restarting the queue at the next URB. */
1165                 qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
1166                 uhci_fixup_toggles(qh, 1);
1167
1168                 if (list_empty(&urbp->td_list))
1169                         td = qh->post_td;
1170                 qh->element = td->link;
1171                 tmp = urbp->td_list.prev;
1172                 ret = 0;
1173         }
1174
1175         /* Remove all the TDs we skipped over, from tmp back to the start */
1176         while (tmp != &urbp->td_list) {
1177                 td = list_entry(tmp, struct uhci_td, list);
1178                 tmp = tmp->prev;
1179
1180                 uhci_remove_td_from_urbp(td);
1181                 uhci_free_td(uhci, td);
1182         }
1183         return ret;
1184 }
1185
1186 /*
1187  * Common result for control, bulk, and interrupt
1188  */
1189 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1190 {
1191         struct urb_priv *urbp = urb->hcpriv;
1192         struct uhci_qh *qh = urbp->qh;
1193         struct uhci_td *td, *tmp;
1194         unsigned status;
1195         int ret = 0;
1196
1197         list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1198                 unsigned int ctrlstat;
1199                 int len;
1200
1201                 ctrlstat = td_status(td);
1202                 status = uhci_status_bits(ctrlstat);
1203                 if (status & TD_CTRL_ACTIVE)
1204                         return -EINPROGRESS;
1205
1206                 len = uhci_actual_length(ctrlstat);
1207                 urb->actual_length += len;
1208
1209                 if (status) {
1210                         ret = uhci_map_status(status,
1211                                         uhci_packetout(td_token(td)));
1212                         if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1213                                 /* Some debugging code */
1214                                 dev_dbg(&urb->dev->dev,
1215                                                 "%s: failed with status %x\n",
1216                                                 __FUNCTION__, status);
1217
1218                                 if (debug > 1 && errbuf) {
1219                                         /* Print the chain for debugging */
1220                                         uhci_show_qh(urbp->qh, errbuf,
1221                                                         ERRBUF_LEN, 0);
1222                                         lprintk(errbuf);
1223                                 }
1224                         }
1225
1226                 } else if (len < uhci_expected_length(td_token(td))) {
1227
1228                         /* We received a short packet */
1229                         if (urb->transfer_flags & URB_SHORT_NOT_OK)
1230                                 ret = -EREMOTEIO;
1231
1232                         /* Fixup needed only if this isn't the URB's last TD */
1233                         else if (&td->list != urbp->td_list.prev)
1234                                 ret = 1;
1235                 }
1236
1237                 uhci_remove_td_from_urbp(td);
1238                 if (qh->post_td)
1239                         uhci_free_td(uhci, qh->post_td);
1240                 qh->post_td = td;
1241
1242                 if (ret != 0)
1243                         goto err;
1244         }
1245         return ret;
1246
1247 err:
1248         if (ret < 0) {
1249                 /* In case a control transfer gets an error
1250                  * during the setup stage */
1251                 urb->actual_length = max(urb->actual_length, 0);
1252
1253                 /* Note that the queue has stopped and save
1254                  * the next toggle value */
1255                 qh->element = UHCI_PTR_TERM;
1256                 qh->is_stopped = 1;
1257                 qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
1258                 qh->initial_toggle = uhci_toggle(td_token(td)) ^
1259                                 (ret == -EREMOTEIO);
1260
1261         } else          /* Short packet received */
1262                 ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1263         return ret;
1264 }
1265
1266 /*
1267  * Isochronous transfers
1268  */
1269 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1270                 struct uhci_qh *qh)
1271 {
1272         struct uhci_td *td = NULL;      /* Since urb->number_of_packets > 0 */
1273         int i, frame;
1274         unsigned long destination, status;
1275         struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1276
1277         /* Values must not be too big (could overflow below) */
1278         if (urb->interval >= UHCI_NUMFRAMES ||
1279                         urb->number_of_packets >= UHCI_NUMFRAMES)
1280                 return -EFBIG;
1281
1282         /* Check the period and figure out the starting frame number */
1283         if (!qh->bandwidth_reserved) {
1284                 qh->period = urb->interval;
1285                 if (urb->transfer_flags & URB_ISO_ASAP) {
1286                         qh->phase = -1;         /* Find the best phase */
1287                         i = uhci_check_bandwidth(uhci, qh);
1288                         if (i)
1289                                 return i;
1290
1291                         /* Allow a little time to allocate the TDs */
1292                         uhci_get_current_frame_number(uhci);
1293                         frame = uhci->frame_number + 10;
1294
1295                         /* Move forward to the first frame having the
1296                          * correct phase */
1297                         urb->start_frame = frame + ((qh->phase - frame) &
1298                                         (qh->period - 1));
1299                 } else {
1300                         i = urb->start_frame - uhci->last_iso_frame;
1301                         if (i <= 0 || i >= UHCI_NUMFRAMES)
1302                                 return -EINVAL;
1303                         qh->phase = urb->start_frame & (qh->period - 1);
1304                         i = uhci_check_bandwidth(uhci, qh);
1305                         if (i)
1306                                 return i;
1307                 }
1308
1309         } else if (qh->period != urb->interval) {
1310                 return -EINVAL;         /* Can't change the period */
1311
1312         } else {        /* Pick up where the last URB leaves off */
1313                 if (list_empty(&qh->queue)) {
1314                         frame = qh->iso_frame;
1315                 } else {
1316                         struct urb *lurb;
1317
1318                         lurb = list_entry(qh->queue.prev,
1319                                         struct urb_priv, node)->urb;
1320                         frame = lurb->start_frame +
1321                                         lurb->number_of_packets *
1322                                         lurb->interval;
1323                 }
1324                 if (urb->transfer_flags & URB_ISO_ASAP)
1325                         urb->start_frame = frame;
1326                 else if (urb->start_frame != frame)
1327                         return -EINVAL;
1328         }
1329
1330         /* Make sure we won't have to go too far into the future */
1331         if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1332                         urb->start_frame + urb->number_of_packets *
1333                                 urb->interval))
1334                 return -EFBIG;
1335
1336         status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1337         destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1338
1339         for (i = 0; i < urb->number_of_packets; i++) {
1340                 td = uhci_alloc_td(uhci);
1341                 if (!td)
1342                         return -ENOMEM;
1343
1344                 uhci_add_td_to_urbp(td, urbp);
1345                 uhci_fill_td(td, status, destination |
1346                                 uhci_explen(urb->iso_frame_desc[i].length),
1347                                 urb->transfer_dma +
1348                                         urb->iso_frame_desc[i].offset);
1349         }
1350
1351         /* Set the interrupt-on-completion flag on the last packet. */
1352         td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
1353
1354         /* Add the TDs to the frame list */
1355         frame = urb->start_frame;
1356         list_for_each_entry(td, &urbp->td_list, list) {
1357                 uhci_insert_td_in_frame_list(uhci, td, frame);
1358                 frame += qh->period;
1359         }
1360
1361         if (list_empty(&qh->queue)) {
1362                 qh->iso_packet_desc = &urb->iso_frame_desc[0];
1363                 qh->iso_frame = urb->start_frame;
1364                 qh->iso_status = 0;
1365         }
1366
1367         qh->skel = SKEL_ISO;
1368         if (!qh->bandwidth_reserved)
1369                 uhci_reserve_bandwidth(uhci, qh);
1370         return 0;
1371 }
1372
1373 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1374 {
1375         struct uhci_td *td, *tmp;
1376         struct urb_priv *urbp = urb->hcpriv;
1377         struct uhci_qh *qh = urbp->qh;
1378
1379         list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1380                 unsigned int ctrlstat;
1381                 int status;
1382                 int actlength;
1383
1384                 if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1385                         return -EINPROGRESS;
1386
1387                 uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1388
1389                 ctrlstat = td_status(td);
1390                 if (ctrlstat & TD_CTRL_ACTIVE) {
1391                         status = -EXDEV;        /* TD was added too late? */
1392                 } else {
1393                         status = uhci_map_status(uhci_status_bits(ctrlstat),
1394                                         usb_pipeout(urb->pipe));
1395                         actlength = uhci_actual_length(ctrlstat);
1396
1397                         urb->actual_length += actlength;
1398                         qh->iso_packet_desc->actual_length = actlength;
1399                         qh->iso_packet_desc->status = status;
1400                 }
1401
1402                 if (status) {
1403                         urb->error_count++;
1404                         qh->iso_status = status;
1405                 }
1406
1407                 uhci_remove_td_from_urbp(td);
1408                 uhci_free_td(uhci, td);
1409                 qh->iso_frame += qh->period;
1410                 ++qh->iso_packet_desc;
1411         }
1412         return qh->iso_status;
1413 }
1414
1415 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1416                 struct usb_host_endpoint *hep,
1417                 struct urb *urb, gfp_t mem_flags)
1418 {
1419         int ret;
1420         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1421         unsigned long flags;
1422         struct urb_priv *urbp;
1423         struct uhci_qh *qh;
1424
1425         spin_lock_irqsave(&uhci->lock, flags);
1426
1427         ret = urb->status;
1428         if (ret != -EINPROGRESS)                /* URB already unlinked! */
1429                 goto done;
1430
1431         ret = -ENOMEM;
1432         urbp = uhci_alloc_urb_priv(uhci, urb);
1433         if (!urbp)
1434                 goto done;
1435
1436         if (hep->hcpriv)
1437                 qh = (struct uhci_qh *) hep->hcpriv;
1438         else {
1439                 qh = uhci_alloc_qh(uhci, urb->dev, hep);
1440                 if (!qh)
1441                         goto err_no_qh;
1442         }
1443         urbp->qh = qh;
1444
1445         switch (qh->type) {
1446         case USB_ENDPOINT_XFER_CONTROL:
1447                 ret = uhci_submit_control(uhci, urb, qh);
1448                 break;
1449         case USB_ENDPOINT_XFER_BULK:
1450                 ret = uhci_submit_bulk(uhci, urb, qh);
1451                 break;
1452         case USB_ENDPOINT_XFER_INT:
1453                 ret = uhci_submit_interrupt(uhci, urb, qh);
1454                 break;
1455         case USB_ENDPOINT_XFER_ISOC:
1456                 urb->error_count = 0;
1457                 ret = uhci_submit_isochronous(uhci, urb, qh);
1458                 break;
1459         }
1460         if (ret != 0)
1461                 goto err_submit_failed;
1462
1463         /* Add this URB to the QH */
1464         urbp->qh = qh;
1465         list_add_tail(&urbp->node, &qh->queue);
1466
1467         /* If the new URB is the first and only one on this QH then either
1468          * the QH is new and idle or else it's unlinked and waiting to
1469          * become idle, so we can activate it right away.  But only if the
1470          * queue isn't stopped. */
1471         if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1472                 uhci_activate_qh(uhci, qh);
1473                 uhci_urbp_wants_fsbr(uhci, urbp);
1474         }
1475         goto done;
1476
1477 err_submit_failed:
1478         if (qh->state == QH_STATE_IDLE)
1479                 uhci_make_qh_idle(uhci, qh);    /* Reclaim unused QH */
1480
1481 err_no_qh:
1482         uhci_free_urb_priv(uhci, urbp);
1483
1484 done:
1485         spin_unlock_irqrestore(&uhci->lock, flags);
1486         return ret;
1487 }
1488
1489 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1490 {
1491         struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1492         unsigned long flags;
1493         struct urb_priv *urbp;
1494         struct uhci_qh *qh;
1495
1496         spin_lock_irqsave(&uhci->lock, flags);
1497         urbp = urb->hcpriv;
1498         if (!urbp)                      /* URB was never linked! */
1499                 goto done;
1500         qh = urbp->qh;
1501
1502         /* Remove Isochronous TDs from the frame list ASAP */
1503         if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1504                 uhci_unlink_isochronous_tds(uhci, urb);
1505                 mb();
1506
1507                 /* If the URB has already started, update the QH unlink time */
1508                 uhci_get_current_frame_number(uhci);
1509                 if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1510                         qh->unlink_frame = uhci->frame_number;
1511         }
1512
1513         uhci_unlink_qh(uhci, qh);
1514
1515 done:
1516         spin_unlock_irqrestore(&uhci->lock, flags);
1517         return 0;
1518 }
1519
1520 /*
1521  * Finish unlinking an URB and give it back
1522  */
1523 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1524                 struct urb *urb)
1525 __releases(uhci->lock)
1526 __acquires(uhci->lock)
1527 {
1528         struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1529
1530         /* When giving back the first URB in an Isochronous queue,
1531          * reinitialize the QH's iso-related members for the next URB. */
1532         if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1533                         urbp->node.prev == &qh->queue &&
1534                         urbp->node.next != &qh->queue) {
1535                 struct urb *nurb = list_entry(urbp->node.next,
1536                                 struct urb_priv, node)->urb;
1537
1538                 qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1539                 qh->iso_frame = nurb->start_frame;
1540                 qh->iso_status = 0;
1541         }
1542
1543         /* Take the URB off the QH's queue.  If the queue is now empty,
1544          * this is a perfect time for a toggle fixup. */
1545         list_del_init(&urbp->node);
1546         if (list_empty(&qh->queue) && qh->needs_fixup) {
1547                 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1548                                 usb_pipeout(urb->pipe), qh->initial_toggle);
1549                 qh->needs_fixup = 0;
1550         }
1551
1552         uhci_free_urb_priv(uhci, urbp);
1553
1554         spin_unlock(&uhci->lock);
1555         usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb);
1556         spin_lock(&uhci->lock);
1557
1558         /* If the queue is now empty, we can unlink the QH and give up its
1559          * reserved bandwidth. */
1560         if (list_empty(&qh->queue)) {
1561                 uhci_unlink_qh(uhci, qh);
1562                 if (qh->bandwidth_reserved)
1563                         uhci_release_bandwidth(uhci, qh);
1564         }
1565 }
1566
1567 /*
1568  * Scan the URBs in a QH's queue
1569  */
1570 #define QH_FINISHED_UNLINKING(qh)                       \
1571                 (qh->state == QH_STATE_UNLINKING &&     \
1572                 uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1573
1574 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1575 {
1576         struct urb_priv *urbp;
1577         struct urb *urb;
1578         int status;
1579
1580         while (!list_empty(&qh->queue)) {
1581                 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1582                 urb = urbp->urb;
1583
1584                 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1585                         status = uhci_result_isochronous(uhci, urb);
1586                 else
1587                         status = uhci_result_common(uhci, urb);
1588                 if (status == -EINPROGRESS)
1589                         break;
1590
1591                 spin_lock(&urb->lock);
1592                 if (urb->status == -EINPROGRESS)        /* Not dequeued */
1593                         urb->status = status;
1594                 else
1595                         status = ECONNRESET;            /* Not -ECONNRESET */
1596                 spin_unlock(&urb->lock);
1597
1598                 /* Dequeued but completed URBs can't be given back unless
1599                  * the QH is stopped or has finished unlinking. */
1600                 if (status == ECONNRESET) {
1601                         if (QH_FINISHED_UNLINKING(qh))
1602                                 qh->is_stopped = 1;
1603                         else if (!qh->is_stopped)
1604                                 return;
1605                 }
1606
1607                 uhci_giveback_urb(uhci, qh, urb);
1608                 if (status < 0 && qh->type != USB_ENDPOINT_XFER_ISOC)
1609                         break;
1610         }
1611
1612         /* If the QH is neither stopped nor finished unlinking (normal case),
1613          * our work here is done. */
1614         if (QH_FINISHED_UNLINKING(qh))
1615                 qh->is_stopped = 1;
1616         else if (!qh->is_stopped)
1617                 return;
1618
1619         /* Otherwise give back each of the dequeued URBs */
1620 restart:
1621         list_for_each_entry(urbp, &qh->queue, node) {
1622                 urb = urbp->urb;
1623                 if (urb->status != -EINPROGRESS) {
1624
1625                         /* Fix up the TD links and save the toggles for
1626                          * non-Isochronous queues.  For Isochronous queues,
1627                          * test for too-recent dequeues. */
1628                         if (!uhci_cleanup_queue(uhci, qh, urb)) {
1629                                 qh->is_stopped = 0;
1630                                 return;
1631                         }
1632                         uhci_giveback_urb(uhci, qh, urb);
1633                         goto restart;
1634                 }
1635         }
1636         qh->is_stopped = 0;
1637
1638         /* There are no more dequeued URBs.  If there are still URBs on the
1639          * queue, the QH can now be re-activated. */
1640         if (!list_empty(&qh->queue)) {
1641                 if (qh->needs_fixup)
1642                         uhci_fixup_toggles(qh, 0);
1643
1644                 /* If the first URB on the queue wants FSBR but its time
1645                  * limit has expired, set the next TD to interrupt on
1646                  * completion before reactivating the QH. */
1647                 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1648                 if (urbp->fsbr && qh->wait_expired) {
1649                         struct uhci_td *td = list_entry(urbp->td_list.next,
1650                                         struct uhci_td, list);
1651
1652                         td->status |= __cpu_to_le32(TD_CTRL_IOC);
1653                 }
1654
1655                 uhci_activate_qh(uhci, qh);
1656         }
1657
1658         /* The queue is empty.  The QH can become idle if it is fully
1659          * unlinked. */
1660         else if (QH_FINISHED_UNLINKING(qh))
1661                 uhci_make_qh_idle(uhci, qh);
1662 }
1663
1664 /*
1665  * Check for queues that have made some forward progress.
1666  * Returns 0 if the queue is not Isochronous, is ACTIVE, and
1667  * has not advanced since last examined; 1 otherwise.
1668  *
1669  * Early Intel controllers have a bug which causes qh->element sometimes
1670  * not to advance when a TD completes successfully.  The queue remains
1671  * stuck on the inactive completed TD.  We detect such cases and advance
1672  * the element pointer by hand.
1673  */
1674 static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1675 {
1676         struct urb_priv *urbp = NULL;
1677         struct uhci_td *td;
1678         int ret = 1;
1679         unsigned status;
1680
1681         if (qh->type == USB_ENDPOINT_XFER_ISOC)
1682                 goto done;
1683
1684         /* Treat an UNLINKING queue as though it hasn't advanced.
1685          * This is okay because reactivation will treat it as though
1686          * it has advanced, and if it is going to become IDLE then
1687          * this doesn't matter anyway.  Furthermore it's possible
1688          * for an UNLINKING queue not to have any URBs at all, or
1689          * for its first URB not to have any TDs (if it was dequeued
1690          * just as it completed).  So it's not easy in any case to
1691          * test whether such queues have advanced. */
1692         if (qh->state != QH_STATE_ACTIVE) {
1693                 urbp = NULL;
1694                 status = 0;
1695
1696         } else {
1697                 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1698                 td = list_entry(urbp->td_list.next, struct uhci_td, list);
1699                 status = td_status(td);
1700                 if (!(status & TD_CTRL_ACTIVE)) {
1701
1702                         /* We're okay, the queue has advanced */
1703                         qh->wait_expired = 0;
1704                         qh->advance_jiffies = jiffies;
1705                         goto done;
1706                 }
1707                 ret = 0;
1708         }
1709
1710         /* The queue hasn't advanced; check for timeout */
1711         if (qh->wait_expired)
1712                 goto done;
1713
1714         if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1715
1716                 /* Detect the Intel bug and work around it */
1717                 if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) {
1718                         qh->element = qh->post_td->link;
1719                         qh->advance_jiffies = jiffies;
1720                         ret = 1;
1721                         goto done;
1722                 }
1723
1724                 qh->wait_expired = 1;
1725
1726                 /* If the current URB wants FSBR, unlink it temporarily
1727                  * so that we can safely set the next TD to interrupt on
1728                  * completion.  That way we'll know as soon as the queue
1729                  * starts moving again. */
1730                 if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1731                         uhci_unlink_qh(uhci, qh);
1732
1733         } else {
1734                 /* Unmoving but not-yet-expired queues keep FSBR alive */
1735                 if (urbp)
1736                         uhci_urbp_wants_fsbr(uhci, urbp);
1737         }
1738
1739 done:
1740         return ret;
1741 }
1742
1743 /*
1744  * Process events in the schedule, but only in one thread at a time
1745  */
1746 static void uhci_scan_schedule(struct uhci_hcd *uhci)
1747 {
1748         int i;
1749         struct uhci_qh *qh;
1750
1751         /* Don't allow re-entrant calls */
1752         if (uhci->scan_in_progress) {
1753                 uhci->need_rescan = 1;
1754                 return;
1755         }
1756         uhci->scan_in_progress = 1;
1757 rescan:
1758         uhci->need_rescan = 0;
1759         uhci->fsbr_is_wanted = 0;
1760
1761         uhci_clear_next_interrupt(uhci);
1762         uhci_get_current_frame_number(uhci);
1763         uhci->cur_iso_frame = uhci->frame_number;
1764
1765         /* Go through all the QH queues and process the URBs in each one */
1766         for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1767                 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1768                                 struct uhci_qh, node);
1769                 while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1770                         uhci->next_qh = list_entry(qh->node.next,
1771                                         struct uhci_qh, node);
1772
1773                         if (uhci_advance_check(uhci, qh)) {
1774                                 uhci_scan_qh(uhci, qh);
1775                                 if (qh->state == QH_STATE_ACTIVE) {
1776                                         uhci_urbp_wants_fsbr(uhci,
1777         list_entry(qh->queue.next, struct urb_priv, node));
1778                                 }
1779                         }
1780                 }
1781         }
1782
1783         uhci->last_iso_frame = uhci->cur_iso_frame;
1784         if (uhci->need_rescan)
1785                 goto rescan;
1786         uhci->scan_in_progress = 0;
1787
1788         if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1789                         !uhci->fsbr_expiring) {
1790                 uhci->fsbr_expiring = 1;
1791                 mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1792         }
1793
1794         if (list_empty(&uhci->skel_unlink_qh->node))
1795                 uhci_clear_next_interrupt(uhci);
1796         else
1797                 uhci_set_next_interrupt(uhci);
1798 }