2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
19 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
20 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
21 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
22 static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
23 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
26 * Technically, updating td->status here is a race, but it's not really a
27 * problem. The worst that can happen is that we set the IOC bit again
28 * generating a spurious interrupt. We could fix this by creating another
29 * QH and leaving the IOC bit always set, but then we would have to play
30 * games with the FSBR code to make sure we get the correct order in all
31 * the cases. I don't think it's worth the effort
33 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
36 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
37 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
40 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
42 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
45 static inline void uhci_moveto_complete(struct uhci_hcd *uhci,
46 struct urb_priv *urbp)
48 list_move_tail(&urbp->urb_list, &uhci->complete_list);
51 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
53 dma_addr_t dma_handle;
56 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
60 td->dma_handle = dma_handle;
62 td->link = UHCI_PTR_TERM;
67 INIT_LIST_HEAD(&td->list);
68 INIT_LIST_HEAD(&td->remove_list);
69 INIT_LIST_HEAD(&td->fl_list);
74 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
75 u32 token, u32 buffer)
77 td->status = cpu_to_le32(status);
78 td->token = cpu_to_le32(token);
79 td->buffer = cpu_to_le32(buffer);
83 * We insert Isochronous URB's directly into the frame list at the beginning
85 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
87 framenum &= (UHCI_NUMFRAMES - 1);
91 /* Is there a TD already mapped there? */
92 if (uhci->frame_cpu[framenum]) {
93 struct uhci_td *ftd, *ltd;
95 ftd = uhci->frame_cpu[framenum];
96 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
98 list_add_tail(&td->fl_list, &ftd->fl_list);
100 td->link = ltd->link;
102 ltd->link = cpu_to_le32(td->dma_handle);
104 td->link = uhci->frame[framenum];
106 uhci->frame[framenum] = cpu_to_le32(td->dma_handle);
107 uhci->frame_cpu[framenum] = td;
111 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
113 /* If it's not inserted, don't remove it */
114 if (td->frame == -1 && list_empty(&td->fl_list))
117 if (td->frame != -1 && uhci->frame_cpu[td->frame] == td) {
118 if (list_empty(&td->fl_list)) {
119 uhci->frame[td->frame] = td->link;
120 uhci->frame_cpu[td->frame] = NULL;
124 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
125 uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
126 uhci->frame_cpu[td->frame] = ntd;
131 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
132 ptd->link = td->link;
136 td->link = UHCI_PTR_TERM;
138 list_del_init(&td->fl_list);
143 * Inserts a td list into qh.
145 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth)
147 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
151 /* Ordering isn't important here yet since the QH hasn't been */
152 /* inserted into the schedule yet */
153 plink = &qh->element;
154 list_for_each_entry(td, &urbp->td_list, list) {
155 *plink = cpu_to_le32(td->dma_handle) | breadth;
158 *plink = UHCI_PTR_TERM;
161 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
163 if (!list_empty(&td->list))
164 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
165 if (!list_empty(&td->remove_list))
166 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
167 if (!list_empty(&td->fl_list))
168 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
170 dma_pool_free(uhci->td_pool, td, td->dma_handle);
173 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci)
175 dma_addr_t dma_handle;
178 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
182 qh->dma_handle = dma_handle;
184 qh->element = UHCI_PTR_TERM;
185 qh->link = UHCI_PTR_TERM;
189 INIT_LIST_HEAD(&qh->list);
190 INIT_LIST_HEAD(&qh->remove_list);
195 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
197 if (!list_empty(&qh->list))
198 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
199 if (!list_empty(&qh->remove_list))
200 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
202 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
206 * Append this urb's qh after the last qh in skelqh->list
208 * Note that urb_priv.queue_list doesn't have a separate queue head;
209 * it's a ring with every element "live".
211 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
213 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
214 struct urb_priv *turbp;
217 /* Grab the last QH */
218 lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
220 /* Point to the next skelqh */
221 urbp->qh->link = lqh->link;
222 wmb(); /* Ordering is important */
225 * Patch QHs for previous endpoint's queued URBs? HC goes
226 * here next, not to the next skelqh it now points to.
228 * lqh --> td ... --> qh ... --> td --> qh ... --> td
231 * +<----------------+-----------------+
233 * newqh --> td ... --> td
238 * The HC could see (and use!) any of these as we write them.
240 lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
242 list_for_each_entry(turbp, &lqh->urbp->queue_list, queue_list)
243 turbp->qh->link = lqh->link;
246 list_add_tail(&urbp->qh->list, &skelqh->list);
250 * Start removal of QH from schedule; it finishes next frame.
251 * TDs should be unlinked before this is called.
253 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
262 * Only go through the hoops if it's actually linked in
264 if (!list_empty(&qh->list)) {
266 /* If our queue is nonempty, make the next URB the head */
267 if (!list_empty(&qh->urbp->queue_list)) {
268 struct urb_priv *nurbp;
270 nurbp = list_entry(qh->urbp->queue_list.next,
271 struct urb_priv, queue_list);
273 list_add(&nurbp->qh->list, &qh->list);
274 newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
278 /* Fix up the previous QH's queue to link to either
279 * the new head of this queue or the start of the
280 * next endpoint's queue. */
281 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
284 struct urb_priv *turbp;
286 list_for_each_entry(turbp, &pqh->urbp->queue_list,
288 turbp->qh->link = newlink;
292 /* Leave qh->link in case the HC is on the QH now, it will */
293 /* continue the rest of the schedule */
294 qh->element = UHCI_PTR_TERM;
296 list_del_init(&qh->list);
299 list_del_init(&qh->urbp->queue_list);
302 uhci_get_current_frame_number(uhci);
303 if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age) {
304 uhci_free_pending_qhs(uhci);
305 uhci->qh_remove_age = uhci->frame_number;
308 /* Check to see if the remove list is empty. Set the IOC bit */
309 /* to force an interrupt so we can remove the QH */
310 if (list_empty(&uhci->qh_remove_list))
311 uhci_set_next_interrupt(uhci);
313 list_add(&qh->remove_list, &uhci->qh_remove_list);
316 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
318 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
321 list_for_each_entry(td, &urbp->td_list, list) {
323 td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
325 td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
333 /* This function will append one URB's QH to another URB's QH. This is for */
334 /* queuing interrupt, control or bulk transfers */
335 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
337 struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
338 struct uhci_td *lltd;
340 eurbp = eurb->hcpriv;
343 /* Find the first URB in the queue */
346 list_for_each_entry(furbp, &eurbp->queue_list, queue_list)
351 lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
353 lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
355 /* Control transfers always start with toggle 0 */
356 if (!usb_pipecontrol(urb->pipe))
357 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
358 usb_pipeout(urb->pipe),
359 uhci_fixup_toggle(urb,
360 uhci_toggle(td_token(lltd)) ^ 1));
362 /* All qh's in the queue need to link to the next queue */
363 urbp->qh->link = eurbp->qh->link;
365 wmb(); /* Make sure we flush everything */
367 lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
369 list_add_tail(&urbp->queue_list, &furbp->queue_list);
374 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
376 struct urb_priv *urbp, *nurbp, *purbp, *turbp;
377 struct uhci_td *pltd;
382 if (list_empty(&urbp->queue_list))
385 nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
388 * Fix up the toggle for the following URBs in the queue.
389 * Only needed for bulk and interrupt: control and isochronous
390 * endpoints don't propagate toggles between messages.
392 if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
394 /* We just set the toggle in uhci_unlink_generic */
395 toggle = usb_gettoggle(urb->dev,
396 usb_pipeendpoint(urb->pipe),
397 usb_pipeout(urb->pipe));
399 /* If we're in the middle of the queue, grab the */
400 /* toggle from the TD previous to us */
401 purbp = list_entry(urbp->queue_list.prev,
402 struct urb_priv, queue_list);
403 pltd = list_entry(purbp->td_list.prev,
404 struct uhci_td, list);
405 toggle = uhci_toggle(td_token(pltd)) ^ 1;
408 list_for_each_entry(turbp, &urbp->queue_list, queue_list) {
411 toggle = uhci_fixup_toggle(turbp->urb, toggle);
414 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
415 usb_pipeout(urb->pipe), toggle);
419 /* We're somewhere in the middle (or end). The case where
420 * we're at the head is handled in uhci_remove_qh(). */
421 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
424 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
426 pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
428 /* The next URB happens to be the beginning, so */
429 /* we're the last, end the chain */
430 pltd->link = UHCI_PTR_TERM;
433 /* urbp->queue_list is handled in uhci_remove_qh() */
436 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
438 struct urb_priv *urbp;
440 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
444 memset((void *)urbp, 0, sizeof(*urbp));
446 urbp->fsbrtime = jiffies;
449 INIT_LIST_HEAD(&urbp->td_list);
450 INIT_LIST_HEAD(&urbp->queue_list);
451 INIT_LIST_HEAD(&urbp->urb_list);
453 list_add_tail(&urbp->urb_list, &uhci->urb_list);
460 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
462 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
464 list_add_tail(&td->list, &urbp->td_list);
467 static void uhci_remove_td_from_urb(struct uhci_td *td)
469 if (list_empty(&td->list))
472 list_del_init(&td->list);
475 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
477 struct uhci_td *td, *tmp;
478 struct urb_priv *urbp;
480 urbp = (struct urb_priv *)urb->hcpriv;
484 if (!list_empty(&urbp->urb_list))
485 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
486 "or uhci->remove_list!\n", urb);
488 uhci_get_current_frame_number(uhci);
489 if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) {
490 uhci_free_pending_tds(uhci);
491 uhci->td_remove_age = uhci->frame_number;
494 /* Check to see if the remove list is empty. Set the IOC bit */
495 /* to force an interrupt so we can remove the TD's*/
496 if (list_empty(&uhci->td_remove_list))
497 uhci_set_next_interrupt(uhci);
499 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
500 uhci_remove_td_from_urb(td);
501 uhci_remove_td(uhci, td);
502 list_add(&td->remove_list, &uhci->td_remove_list);
506 kmem_cache_free(uhci_up_cachep, urbp);
509 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
511 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
513 if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
515 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
516 uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
520 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
522 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
524 if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
527 uhci->fsbrtimeout = jiffies + FSBR_DELAY;
532 * Map status to standard result codes
534 * <status> is (td_status(td) & 0xF60000), a.k.a.
535 * uhci_status_bits(td_status(td)).
536 * Note: <status> does not include the TD_CTRL_NAK bit.
537 * <dir_out> is True for output TDs and False for input TDs.
539 static int uhci_map_status(int status, int dir_out)
543 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
545 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
551 if (status & TD_CTRL_BABBLE) /* Babble */
553 if (status & TD_CTRL_DBUFERR) /* Buffer error */
555 if (status & TD_CTRL_STALLED) /* Stalled */
557 WARN_ON(status & TD_CTRL_ACTIVE); /* Active */
564 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
566 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
568 struct uhci_qh *qh, *skelqh;
569 unsigned long destination, status;
570 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
571 int len = urb->transfer_buffer_length;
572 dma_addr_t data = urb->transfer_dma;
574 /* The "pipe" thing contains the destination in bits 8--18 */
575 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
578 status = TD_CTRL_ACTIVE | uhci_maxerr(3);
579 if (urb->dev->speed == USB_SPEED_LOW)
580 status |= TD_CTRL_LS;
583 * Build the TD for the control request setup packet
585 td = uhci_alloc_td(uhci);
589 uhci_add_td_to_urb(urb, td);
590 uhci_fill_td(td, status, destination | uhci_explen(7),
594 * If direction is "send", change the packet ID from SETUP (0x2D)
595 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
596 * set Short Packet Detect (SPD) for all data packets.
598 if (usb_pipeout(urb->pipe))
599 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
601 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
602 status |= TD_CTRL_SPD;
606 * Build the DATA TD's
614 td = uhci_alloc_td(uhci);
618 /* Alternate Data0/1 (start with Data1) */
619 destination ^= TD_TOKEN_TOGGLE;
621 uhci_add_td_to_urb(urb, td);
622 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
630 * Build the final TD for control status
632 td = uhci_alloc_td(uhci);
637 * It's IN if the pipe is an output pipe or we're not expecting
640 destination &= ~TD_TOKEN_PID_MASK;
641 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
642 destination |= USB_PID_IN;
644 destination |= USB_PID_OUT;
646 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
648 status &= ~TD_CTRL_SPD;
650 uhci_add_td_to_urb(urb, td);
651 uhci_fill_td(td, status | TD_CTRL_IOC,
652 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
654 qh = uhci_alloc_qh(uhci);
661 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
663 /* Low-speed transfers get a different queue, and won't hog the bus.
664 * Also, some devices enumerate better without FSBR; the easiest way
665 * to do that is to put URBs on the low-speed queue while the device
666 * is in the DEFAULT state. */
667 if (urb->dev->speed == USB_SPEED_LOW ||
668 urb->dev->state == USB_STATE_DEFAULT)
669 skelqh = uhci->skel_ls_control_qh;
671 skelqh = uhci->skel_fs_control_qh;
672 uhci_inc_fsbr(uhci, urb);
676 uhci_append_queued_urb(uhci, eurb, urb);
678 uhci_insert_qh(uhci, skelqh, urb);
684 * If control-IN transfer was short, the status packet wasn't sent.
685 * This routine changes the element pointer in the QH to point at the
686 * status TD. It's safe to do this even while the QH is live, because
687 * the hardware only updates the element pointer following a successful
688 * transfer. The inactive TD for the short packet won't cause an update,
689 * so the pointer won't get overwritten. The next time the controller
690 * sees this QH, it will send the status packet.
692 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
694 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
697 urbp->short_control_packet = 1;
699 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
700 urbp->qh->element = cpu_to_le32(td->dma_handle);
706 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
708 struct list_head *tmp, *head;
709 struct urb_priv *urbp = urb->hcpriv;
714 if (list_empty(&urbp->td_list))
717 head = &urbp->td_list;
719 if (urbp->short_control_packet) {
725 td = list_entry(tmp, struct uhci_td, list);
727 /* The first TD is the SETUP stage, check the status, but skip */
729 status = uhci_status_bits(td_status(td));
730 if (status & TD_CTRL_ACTIVE)
736 urb->actual_length = 0;
738 /* The rest of the TD's (but the last) are data */
740 while (tmp != head && tmp->next != head) {
741 unsigned int ctrlstat;
743 td = list_entry(tmp, struct uhci_td, list);
746 ctrlstat = td_status(td);
747 status = uhci_status_bits(ctrlstat);
748 if (status & TD_CTRL_ACTIVE)
751 urb->actual_length += uhci_actual_length(ctrlstat);
756 /* Check to see if we received a short packet */
757 if (uhci_actual_length(ctrlstat) <
758 uhci_expected_length(td_token(td))) {
759 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
764 if (uhci_packetid(td_token(td)) == USB_PID_IN)
765 return usb_control_retrigger_status(uhci, urb);
772 td = list_entry(tmp, struct uhci_td, list);
774 /* Control status stage */
775 status = td_status(td);
777 #ifdef I_HAVE_BUGGY_APC_BACKUPS
778 /* APC BackUPS Pro kludge */
779 /* It tries to send all of the descriptor instead of the amount */
781 if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
782 status & TD_CTRL_ACTIVE &&
783 status & TD_CTRL_NAK)
787 status = uhci_status_bits(status);
788 if (status & TD_CTRL_ACTIVE)
797 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
800 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
801 /* Some debugging code */
802 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
803 __FUNCTION__, status);
806 /* Print the chain for debugging purposes */
807 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
817 * Common submit for bulk and interrupt
819 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
823 unsigned long destination, status;
824 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
825 int len = urb->transfer_buffer_length;
826 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
827 dma_addr_t data = urb->transfer_dma;
832 /* The "pipe" thing contains the destination in bits 8--18 */
833 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
835 status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
836 if (urb->dev->speed == USB_SPEED_LOW)
837 status |= TD_CTRL_LS;
838 if (usb_pipein(urb->pipe))
839 status |= TD_CTRL_SPD;
842 * Build the DATA TD's
844 do { /* Allow zero length packets */
849 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
850 status &= ~TD_CTRL_SPD;
853 td = uhci_alloc_td(uhci);
857 uhci_add_td_to_urb(urb, td);
858 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
859 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
860 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
866 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
867 usb_pipeout(urb->pipe));
871 * URB_ZERO_PACKET means adding a 0-length packet, if direction
872 * is OUT and the transfer_length was an exact multiple of maxsze,
873 * hence (len = transfer_length - N * maxsze) == 0
874 * however, if transfer_length == 0, the zero packet was already
877 if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
878 !len && urb->transfer_buffer_length) {
879 td = uhci_alloc_td(uhci);
883 uhci_add_td_to_urb(urb, td);
884 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
885 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
886 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
889 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
890 usb_pipeout(urb->pipe));
893 /* Set the interrupt-on-completion flag on the last packet.
894 * A more-or-less typical 4 KB URB (= size of one memory page)
895 * will require about 3 ms to transfer; that's a little on the
896 * fast side but not enough to justify delaying an interrupt
897 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
899 td->status |= cpu_to_le32(TD_CTRL_IOC);
901 qh = uhci_alloc_qh(uhci);
908 /* Always breadth first */
909 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
912 uhci_append_queued_urb(uhci, eurb, urb);
914 uhci_insert_qh(uhci, skelqh, urb);
920 * Common result for bulk and interrupt
922 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
924 struct urb_priv *urbp = urb->hcpriv;
926 unsigned int status = 0;
929 urb->actual_length = 0;
931 list_for_each_entry(td, &urbp->td_list, list) {
932 unsigned int ctrlstat = td_status(td);
934 status = uhci_status_bits(ctrlstat);
935 if (status & TD_CTRL_ACTIVE)
938 urb->actual_length += uhci_actual_length(ctrlstat);
943 if (uhci_actual_length(ctrlstat) <
944 uhci_expected_length(td_token(td))) {
945 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
956 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
960 * Enable this chunk of code if you want to see some more debugging.
961 * But be careful, it has the tendancy to starve out khubd and prevent
962 * disconnects from happening successfully if you have a slow debug
963 * log interface (like a serial console.
966 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
967 /* Some debugging code */
968 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
969 __FUNCTION__, status);
972 /* Print the chain for debugging purposes */
973 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
982 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
986 /* Can't have low-speed bulk transfers */
987 if (urb->dev->speed == USB_SPEED_LOW)
990 ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
991 if (ret == -EINPROGRESS)
992 uhci_inc_fsbr(uhci, urb);
997 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
999 /* USB 1.1 interrupt transfers only involve one packet per interval;
1000 * that's the uhci_submit_common() "breadth first" policy. Drivers
1001 * can submit urbs of any length, but longer ones might need many
1002 * intervals to complete.
1004 return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1008 * Isochronous transfers
1010 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1012 struct urb *last_urb = NULL;
1013 struct urb_priv *up;
1016 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1017 struct urb *u = up->urb;
1019 /* look for pending URB's with identical pipe handle */
1020 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1021 (u->status == -EINPROGRESS) && (u != urb)) {
1023 *start = u->start_frame;
1029 *end = (last_urb->start_frame + last_urb->number_of_packets *
1030 last_urb->interval) & (UHCI_NUMFRAMES-1);
1033 ret = -1; /* no previous urb found */
1038 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1041 unsigned int start = 0, end = 0;
1043 if (urb->number_of_packets > 900) /* 900? Why? */
1046 limits = isochronous_find_limits(uhci, urb, &start, &end);
1048 if (urb->transfer_flags & URB_ISO_ASAP) {
1050 uhci_get_current_frame_number(uhci);
1051 urb->start_frame = (uhci->frame_number + 10)
1052 & (UHCI_NUMFRAMES - 1);
1054 urb->start_frame = end;
1056 urb->start_frame &= (UHCI_NUMFRAMES - 1);
1057 /* FIXME: Sanity check */
1064 * Isochronous transfers
1066 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1070 int status, destination;
1072 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1073 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1075 ret = isochronous_find_start(uhci, urb);
1079 frame = urb->start_frame;
1080 for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1081 if (!urb->iso_frame_desc[i].length)
1084 td = uhci_alloc_td(uhci);
1088 uhci_add_td_to_urb(urb, td);
1089 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1090 urb->transfer_dma + urb->iso_frame_desc[i].offset);
1092 if (i + 1 >= urb->number_of_packets)
1093 td->status |= cpu_to_le32(TD_CTRL_IOC);
1095 uhci_insert_td_frame_list(uhci, td, frame);
1098 return -EINPROGRESS;
1101 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1104 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1108 urb->actual_length = 0;
1111 list_for_each_entry(td, &urbp->td_list, list) {
1113 unsigned int ctrlstat = td_status(td);
1115 if (ctrlstat & TD_CTRL_ACTIVE)
1116 return -EINPROGRESS;
1118 actlength = uhci_actual_length(ctrlstat);
1119 urb->iso_frame_desc[i].actual_length = actlength;
1120 urb->actual_length += actlength;
1122 status = uhci_map_status(uhci_status_bits(ctrlstat),
1123 usb_pipeout(urb->pipe));
1124 urb->iso_frame_desc[i].status = status;
1136 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1138 struct urb_priv *up;
1140 /* We don't match Isoc transfers since they are special */
1141 if (usb_pipeisoc(urb->pipe))
1144 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1145 struct urb *u = up->urb;
1147 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1148 /* For control, ignore the direction */
1149 if (usb_pipecontrol(urb->pipe) &&
1150 (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1152 else if (u->pipe == urb->pipe)
1160 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1161 struct usb_host_endpoint *ep,
1162 struct urb *urb, gfp_t mem_flags)
1165 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1166 unsigned long flags;
1170 spin_lock_irqsave(&uhci->lock, flags);
1173 if (ret != -EINPROGRESS) /* URB already unlinked! */
1176 eurb = uhci_find_urb_ep(uhci, urb);
1178 if (!uhci_alloc_urb_priv(uhci, urb)) {
1183 switch (usb_pipetype(urb->pipe)) {
1185 ret = uhci_submit_control(uhci, urb, eurb);
1187 case PIPE_INTERRUPT:
1189 bustime = usb_check_bandwidth(urb->dev, urb);
1193 ret = uhci_submit_interrupt(uhci, urb, eurb);
1194 if (ret == -EINPROGRESS)
1195 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1197 } else { /* inherit from parent */
1198 urb->bandwidth = eurb->bandwidth;
1199 ret = uhci_submit_interrupt(uhci, urb, eurb);
1203 ret = uhci_submit_bulk(uhci, urb, eurb);
1205 case PIPE_ISOCHRONOUS:
1206 bustime = usb_check_bandwidth(urb->dev, urb);
1212 ret = uhci_submit_isochronous(uhci, urb);
1213 if (ret == -EINPROGRESS)
1214 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1218 if (ret != -EINPROGRESS) {
1219 /* Submit failed, so delete it from the urb_list */
1220 struct urb_priv *urbp = urb->hcpriv;
1222 list_del_init(&urbp->urb_list);
1223 uhci_destroy_urb_priv(uhci, urb);
1228 spin_unlock_irqrestore(&uhci->lock, flags);
1233 * Return the result of a transfer
1235 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1237 int ret = -EINPROGRESS;
1238 struct urb_priv *urbp;
1240 spin_lock(&urb->lock);
1242 urbp = (struct urb_priv *)urb->hcpriv;
1244 if (urb->status != -EINPROGRESS) /* URB already dequeued */
1247 switch (usb_pipetype(urb->pipe)) {
1249 ret = uhci_result_control(uhci, urb);
1252 case PIPE_INTERRUPT:
1253 ret = uhci_result_common(uhci, urb);
1255 case PIPE_ISOCHRONOUS:
1256 ret = uhci_result_isochronous(uhci, urb);
1260 if (ret == -EINPROGRESS)
1264 switch (usb_pipetype(urb->pipe)) {
1267 case PIPE_ISOCHRONOUS:
1268 /* Release bandwidth for Interrupt or Isoc. transfers */
1270 usb_release_bandwidth(urb->dev, urb, 1);
1271 uhci_unlink_generic(uhci, urb);
1273 case PIPE_INTERRUPT:
1274 /* Release bandwidth for Interrupt or Isoc. transfers */
1275 /* Make sure we don't release if we have a queued URB */
1276 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1277 usb_release_bandwidth(urb->dev, urb, 0);
1279 /* bandwidth was passed on to queued URB, */
1280 /* so don't let usb_unlink_urb() release it */
1282 uhci_unlink_generic(uhci, urb);
1285 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1287 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1290 /* Move it from uhci->urb_list to uhci->complete_list */
1291 uhci_moveto_complete(uhci, urbp);
1294 spin_unlock(&urb->lock);
1297 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1299 struct list_head *head;
1301 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1304 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
1307 * Now we need to find out what the last successful toggle was
1308 * so we can update the local data toggle for the next transfer
1310 * There are 2 ways the last successful completed TD is found:
1312 * 1) The TD is NOT active and the actual length < expected length
1313 * 2) The TD is NOT active and it's the last TD in the chain
1315 * and a third way the first uncompleted TD is found:
1317 * 3) The TD is active and the previous TD is NOT active
1319 * Control and Isochronous ignore the toggle, so this is safe
1322 * FIXME: The toggle fixups won't be 100% reliable until we
1323 * change over to using a single queue for each endpoint and
1324 * stop the queue before unlinking.
1326 head = &urbp->td_list;
1327 list_for_each_entry(td, head, list) {
1328 unsigned int ctrlstat = td_status(td);
1330 if (!(ctrlstat & TD_CTRL_ACTIVE) &&
1331 (uhci_actual_length(ctrlstat) <
1332 uhci_expected_length(td_token(td)) ||
1333 td->list.next == head))
1334 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1335 uhci_packetout(td_token(td)),
1336 uhci_toggle(td_token(td)) ^ 1);
1337 else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive)
1338 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1339 uhci_packetout(td_token(td)),
1340 uhci_toggle(td_token(td)));
1342 prevactive = ctrlstat & TD_CTRL_ACTIVE;
1345 uhci_delete_queued_urb(uhci, urb);
1347 /* The interrupt loop will reclaim the QH's */
1348 uhci_remove_qh(uhci, urbp->qh);
1352 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1354 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1355 unsigned long flags;
1356 struct urb_priv *urbp;
1358 spin_lock_irqsave(&uhci->lock, flags);
1360 if (!urbp) /* URB was never linked! */
1362 list_del_init(&urbp->urb_list);
1364 uhci_unlink_generic(uhci, urb);
1366 uhci_get_current_frame_number(uhci);
1367 if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age) {
1368 uhci_remove_pending_urbps(uhci);
1369 uhci->urb_remove_age = uhci->frame_number;
1372 /* If we're the first, set the next interrupt bit */
1373 if (list_empty(&uhci->urb_remove_list))
1374 uhci_set_next_interrupt(uhci);
1375 list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1378 spin_unlock_irqrestore(&uhci->lock, flags);
1382 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1384 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1385 struct list_head *head;
1389 uhci_dec_fsbr(uhci, urb);
1391 urbp->fsbr_timeout = 1;
1394 * Ideally we would want to fix qh->element as well, but it's
1395 * read/write by the HC, so that can introduce a race. It's not
1396 * really worth the hassle
1399 head = &urbp->td_list;
1400 list_for_each_entry(td, head, list) {
1402 * Make sure we don't do the last one (since it'll have the
1403 * TERM bit set) as well as we skip every so many TD's to
1404 * make sure it doesn't hog the bandwidth
1406 if (td->list.next != head && (count % DEPTH_INTERVAL) ==
1407 (DEPTH_INTERVAL - 1))
1408 td->link |= UHCI_PTR_DEPTH;
1416 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1418 struct uhci_qh *qh, *tmp;
1420 list_for_each_entry_safe(qh, tmp, &uhci->qh_remove_list, remove_list) {
1421 list_del_init(&qh->remove_list);
1423 uhci_free_qh(uhci, qh);
1427 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1429 struct uhci_td *td, *tmp;
1431 list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
1432 list_del_init(&td->remove_list);
1434 uhci_free_td(uhci, td);
1439 uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1440 __releases(uhci->lock)
1441 __acquires(uhci->lock)
1443 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1445 uhci_destroy_urb_priv(uhci, urb);
1447 spin_unlock(&uhci->lock);
1448 usb_hcd_giveback_urb(hcd, urb, regs);
1449 spin_lock(&uhci->lock);
1452 static void uhci_finish_completion(struct uhci_hcd *uhci, struct pt_regs *regs)
1454 struct urb_priv *urbp, *tmp;
1456 list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
1457 struct urb *urb = urbp->urb;
1459 list_del_init(&urbp->urb_list);
1460 uhci_finish_urb(uhci_to_hcd(uhci), urb, regs);
1464 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1467 /* Splice the urb_remove_list onto the end of the complete_list */
1468 list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1471 /* Process events in the schedule, but only in one thread at a time */
1472 static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
1474 struct urb_priv *urbp, *tmp;
1476 /* Don't allow re-entrant calls */
1477 if (uhci->scan_in_progress) {
1478 uhci->need_rescan = 1;
1481 uhci->scan_in_progress = 1;
1483 uhci->need_rescan = 0;
1485 uhci_clear_next_interrupt(uhci);
1486 uhci_get_current_frame_number(uhci);
1488 if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age)
1489 uhci_free_pending_qhs(uhci);
1490 if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age)
1491 uhci_free_pending_tds(uhci);
1492 if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age)
1493 uhci_remove_pending_urbps(uhci);
1495 /* Walk the list of pending URBs to see which ones completed
1496 * (must be _safe because uhci_transfer_result() dequeues URBs) */
1497 list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) {
1498 struct urb *urb = urbp->urb;
1500 /* Checks the status and does all of the magic necessary */
1501 uhci_transfer_result(uhci, urb);
1503 uhci_finish_completion(uhci, regs);
1505 /* If the controller is stopped, we can finish these off right now */
1506 if (uhci->is_stopped) {
1507 uhci_free_pending_qhs(uhci);
1508 uhci_free_pending_tds(uhci);
1509 uhci_remove_pending_urbps(uhci);
1512 if (uhci->need_rescan)
1514 uhci->scan_in_progress = 0;
1516 if (list_empty(&uhci->urb_remove_list) &&
1517 list_empty(&uhci->td_remove_list) &&
1518 list_empty(&uhci->qh_remove_list))
1519 uhci_clear_next_interrupt(uhci);
1521 uhci_set_next_interrupt(uhci);
1523 /* Wake up anyone waiting for an URB to complete */
1524 wake_up_all(&uhci->waitqh);
1527 static void check_fsbr(struct uhci_hcd *uhci)
1529 struct urb_priv *up;
1531 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1532 struct urb *u = up->urb;
1534 spin_lock(&u->lock);
1536 /* Check if the FSBR timed out */
1537 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1538 uhci_fsbr_timeout(uhci, u);
1540 spin_unlock(&u->lock);
1543 /* Really disable FSBR */
1544 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1545 uhci->fsbrtimeout = 0;
1546 uhci->skel_term_qh->link = UHCI_PTR_TERM;