2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
19 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
20 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
21 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
22 static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
23 static void uhci_free_pending_tds(struct uhci_hcd *uhci);
26 * Technically, updating td->status here is a race, but it's not really a
27 * problem. The worst that can happen is that we set the IOC bit again
28 * generating a spurious interrupt. We could fix this by creating another
29 * QH and leaving the IOC bit always set, but then we would have to play
30 * games with the FSBR code to make sure we get the correct order in all
31 * the cases. I don't think it's worth the effort
33 static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
36 mod_timer(&uhci->stall_timer, jiffies);
37 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
40 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
42 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
45 static inline void uhci_moveto_complete(struct uhci_hcd *uhci,
46 struct urb_priv *urbp)
48 list_move_tail(&urbp->urb_list, &uhci->complete_list);
51 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
53 dma_addr_t dma_handle;
56 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
60 td->dma_handle = dma_handle;
62 td->link = UHCI_PTR_TERM;
68 INIT_LIST_HEAD(&td->list);
69 INIT_LIST_HEAD(&td->remove_list);
70 INIT_LIST_HEAD(&td->fl_list);
77 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
78 u32 token, u32 buffer)
80 td->status = cpu_to_le32(status);
81 td->token = cpu_to_le32(token);
82 td->buffer = cpu_to_le32(buffer);
86 * We insert Isochronous URB's directly into the frame list at the beginning
88 static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
90 framenum &= (UHCI_NUMFRAMES - 1);
94 /* Is there a TD already mapped there? */
95 if (uhci->fl->frame_cpu[framenum]) {
96 struct uhci_td *ftd, *ltd;
98 ftd = uhci->fl->frame_cpu[framenum];
99 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
101 list_add_tail(&td->fl_list, &ftd->fl_list);
103 td->link = ltd->link;
105 ltd->link = cpu_to_le32(td->dma_handle);
107 td->link = uhci->fl->frame[framenum];
109 uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
110 uhci->fl->frame_cpu[framenum] = td;
114 static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
116 /* If it's not inserted, don't remove it */
117 if (td->frame == -1 && list_empty(&td->fl_list))
120 if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
121 if (list_empty(&td->fl_list)) {
122 uhci->fl->frame[td->frame] = td->link;
123 uhci->fl->frame_cpu[td->frame] = NULL;
127 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
128 uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
129 uhci->fl->frame_cpu[td->frame] = ntd;
134 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
135 ptd->link = td->link;
139 td->link = UHCI_PTR_TERM;
141 list_del_init(&td->fl_list);
146 * Inserts a td list into qh.
148 static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth)
150 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
154 /* Ordering isn't important here yet since the QH hasn't been */
155 /* inserted into the schedule yet */
156 plink = &qh->element;
157 list_for_each_entry(td, &urbp->td_list, list) {
158 *plink = cpu_to_le32(td->dma_handle) | breadth;
161 *plink = UHCI_PTR_TERM;
164 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
166 if (!list_empty(&td->list))
167 dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
168 if (!list_empty(&td->remove_list))
169 dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
170 if (!list_empty(&td->fl_list))
171 dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
174 usb_put_dev(td->dev);
176 dma_pool_free(uhci->td_pool, td, td->dma_handle);
179 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
181 dma_addr_t dma_handle;
184 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
188 qh->dma_handle = dma_handle;
190 qh->element = UHCI_PTR_TERM;
191 qh->link = UHCI_PTR_TERM;
196 INIT_LIST_HEAD(&qh->list);
197 INIT_LIST_HEAD(&qh->remove_list);
204 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
206 if (!list_empty(&qh->list))
207 dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
208 if (!list_empty(&qh->remove_list))
209 dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
212 usb_put_dev(qh->dev);
214 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
218 * Append this urb's qh after the last qh in skelqh->list
220 * Note that urb_priv.queue_list doesn't have a separate queue head;
221 * it's a ring with every element "live".
223 static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
225 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
226 struct urb_priv *turbp;
229 /* Grab the last QH */
230 lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
232 /* Point to the next skelqh */
233 urbp->qh->link = lqh->link;
234 wmb(); /* Ordering is important */
237 * Patch QHs for previous endpoint's queued URBs? HC goes
238 * here next, not to the next skelqh it now points to.
240 * lqh --> td ... --> qh ... --> td --> qh ... --> td
243 * +<----------------+-----------------+
245 * newqh --> td ... --> td
250 * The HC could see (and use!) any of these as we write them.
252 lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
254 list_for_each_entry(turbp, &lqh->urbp->queue_list, queue_list)
255 turbp->qh->link = lqh->link;
258 list_add_tail(&urbp->qh->list, &skelqh->list);
262 * Start removal of QH from schedule; it finishes next frame.
263 * TDs should be unlinked before this is called.
265 static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
274 * Only go through the hoops if it's actually linked in
276 if (!list_empty(&qh->list)) {
278 /* If our queue is nonempty, make the next URB the head */
279 if (!list_empty(&qh->urbp->queue_list)) {
280 struct urb_priv *nurbp;
282 nurbp = list_entry(qh->urbp->queue_list.next,
283 struct urb_priv, queue_list);
285 list_add(&nurbp->qh->list, &qh->list);
286 newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
290 /* Fix up the previous QH's queue to link to either
291 * the new head of this queue or the start of the
292 * next endpoint's queue. */
293 pqh = list_entry(qh->list.prev, struct uhci_qh, list);
296 struct urb_priv *turbp;
298 list_for_each_entry(turbp, &pqh->urbp->queue_list,
300 turbp->qh->link = newlink;
304 /* Leave qh->link in case the HC is on the QH now, it will */
305 /* continue the rest of the schedule */
306 qh->element = UHCI_PTR_TERM;
308 list_del_init(&qh->list);
311 list_del_init(&qh->urbp->queue_list);
314 uhci_get_current_frame_number(uhci);
315 if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age) {
316 uhci_free_pending_qhs(uhci);
317 uhci->qh_remove_age = uhci->frame_number;
320 /* Check to see if the remove list is empty. Set the IOC bit */
321 /* to force an interrupt so we can remove the QH */
322 if (list_empty(&uhci->qh_remove_list))
323 uhci_set_next_interrupt(uhci);
325 list_add(&qh->remove_list, &uhci->qh_remove_list);
328 static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
330 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
333 list_for_each_entry(td, &urbp->td_list, list) {
335 td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
337 td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
345 /* This function will append one URB's QH to another URB's QH. This is for */
346 /* queuing interrupt, control or bulk transfers */
347 static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
349 struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
350 struct uhci_td *lltd;
352 eurbp = eurb->hcpriv;
355 /* Find the first URB in the queue */
358 list_for_each_entry(furbp, &eurbp->queue_list, queue_list)
363 lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
365 lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
367 /* Control transfers always start with toggle 0 */
368 if (!usb_pipecontrol(urb->pipe))
369 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
370 usb_pipeout(urb->pipe),
371 uhci_fixup_toggle(urb,
372 uhci_toggle(td_token(lltd)) ^ 1));
374 /* All qh's in the queue need to link to the next queue */
375 urbp->qh->link = eurbp->qh->link;
377 wmb(); /* Make sure we flush everything */
379 lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
381 list_add_tail(&urbp->queue_list, &furbp->queue_list);
386 static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
388 struct urb_priv *urbp, *nurbp, *purbp, *turbp;
389 struct uhci_td *pltd;
394 if (list_empty(&urbp->queue_list))
397 nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
400 * Fix up the toggle for the following URBs in the queue.
401 * Only needed for bulk and interrupt: control and isochronous
402 * endpoints don't propagate toggles between messages.
404 if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
406 /* We just set the toggle in uhci_unlink_generic */
407 toggle = usb_gettoggle(urb->dev,
408 usb_pipeendpoint(urb->pipe),
409 usb_pipeout(urb->pipe));
411 /* If we're in the middle of the queue, grab the */
412 /* toggle from the TD previous to us */
413 purbp = list_entry(urbp->queue_list.prev,
414 struct urb_priv, queue_list);
415 pltd = list_entry(purbp->td_list.prev,
416 struct uhci_td, list);
417 toggle = uhci_toggle(td_token(pltd)) ^ 1;
420 list_for_each_entry(turbp, &urbp->queue_list, queue_list) {
423 toggle = uhci_fixup_toggle(turbp->urb, toggle);
426 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
427 usb_pipeout(urb->pipe), toggle);
431 /* We're somewhere in the middle (or end). The case where
432 * we're at the head is handled in uhci_remove_qh(). */
433 purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
436 pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
438 pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
440 /* The next URB happens to be the beginning, so */
441 /* we're the last, end the chain */
442 pltd->link = UHCI_PTR_TERM;
445 /* urbp->queue_list is handled in uhci_remove_qh() */
448 static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
450 struct urb_priv *urbp;
452 urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
456 memset((void *)urbp, 0, sizeof(*urbp));
458 urbp->inserttime = jiffies;
459 urbp->fsbrtime = jiffies;
462 INIT_LIST_HEAD(&urbp->td_list);
463 INIT_LIST_HEAD(&urbp->queue_list);
464 INIT_LIST_HEAD(&urbp->urb_list);
466 list_add_tail(&urbp->urb_list, &uhci->urb_list);
473 static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
475 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
479 list_add_tail(&td->list, &urbp->td_list);
482 static void uhci_remove_td_from_urb(struct uhci_td *td)
484 if (list_empty(&td->list))
487 list_del_init(&td->list);
492 static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
494 struct uhci_td *td, *tmp;
495 struct urb_priv *urbp;
497 urbp = (struct urb_priv *)urb->hcpriv;
501 if (!list_empty(&urbp->urb_list))
502 dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
503 "or uhci->remove_list!\n", urb);
505 uhci_get_current_frame_number(uhci);
506 if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) {
507 uhci_free_pending_tds(uhci);
508 uhci->td_remove_age = uhci->frame_number;
511 /* Check to see if the remove list is empty. Set the IOC bit */
512 /* to force an interrupt so we can remove the TD's*/
513 if (list_empty(&uhci->td_remove_list))
514 uhci_set_next_interrupt(uhci);
516 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
517 uhci_remove_td_from_urb(td);
518 uhci_remove_td(uhci, td);
519 list_add(&td->remove_list, &uhci->td_remove_list);
523 kmem_cache_free(uhci_up_cachep, urbp);
526 static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
528 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
530 if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
532 if (!uhci->fsbr++ && !uhci->fsbrtimeout)
533 uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
537 static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
539 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
541 if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
544 uhci->fsbrtimeout = jiffies + FSBR_DELAY;
549 * Map status to standard result codes
551 * <status> is (td_status(td) & 0xF60000), a.k.a.
552 * uhci_status_bits(td_status(td)).
553 * Note: <status> does not include the TD_CTRL_NAK bit.
554 * <dir_out> is True for output TDs and False for input TDs.
556 static int uhci_map_status(int status, int dir_out)
560 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
562 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
568 if (status & TD_CTRL_BABBLE) /* Babble */
570 if (status & TD_CTRL_DBUFERR) /* Buffer error */
572 if (status & TD_CTRL_STALLED) /* Stalled */
574 WARN_ON(status & TD_CTRL_ACTIVE); /* Active */
581 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
583 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
585 struct uhci_qh *qh, *skelqh;
586 unsigned long destination, status;
587 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
588 int len = urb->transfer_buffer_length;
589 dma_addr_t data = urb->transfer_dma;
591 /* The "pipe" thing contains the destination in bits 8--18 */
592 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
595 status = TD_CTRL_ACTIVE | uhci_maxerr(3);
596 if (urb->dev->speed == USB_SPEED_LOW)
597 status |= TD_CTRL_LS;
600 * Build the TD for the control request setup packet
602 td = uhci_alloc_td(uhci, urb->dev);
606 uhci_add_td_to_urb(urb, td);
607 uhci_fill_td(td, status, destination | uhci_explen(7),
611 * If direction is "send", change the packet ID from SETUP (0x2D)
612 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
613 * set Short Packet Detect (SPD) for all data packets.
615 if (usb_pipeout(urb->pipe))
616 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
618 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
619 status |= TD_CTRL_SPD;
623 * Build the DATA TD's
631 td = uhci_alloc_td(uhci, urb->dev);
635 /* Alternate Data0/1 (start with Data1) */
636 destination ^= TD_TOKEN_TOGGLE;
638 uhci_add_td_to_urb(urb, td);
639 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
647 * Build the final TD for control status
649 td = uhci_alloc_td(uhci, urb->dev);
654 * It's IN if the pipe is an output pipe or we're not expecting
657 destination &= ~TD_TOKEN_PID_MASK;
658 if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
659 destination |= USB_PID_IN;
661 destination |= USB_PID_OUT;
663 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
665 status &= ~TD_CTRL_SPD;
667 uhci_add_td_to_urb(urb, td);
668 uhci_fill_td(td, status | TD_CTRL_IOC,
669 destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
671 qh = uhci_alloc_qh(uhci, urb->dev);
678 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
680 /* Low-speed transfers get a different queue, and won't hog the bus.
681 * Also, some devices enumerate better without FSBR; the easiest way
682 * to do that is to put URBs on the low-speed queue while the device
683 * is in the DEFAULT state. */
684 if (urb->dev->speed == USB_SPEED_LOW ||
685 urb->dev->state == USB_STATE_DEFAULT)
686 skelqh = uhci->skel_ls_control_qh;
688 skelqh = uhci->skel_fs_control_qh;
689 uhci_inc_fsbr(uhci, urb);
693 uhci_append_queued_urb(uhci, eurb, urb);
695 uhci_insert_qh(uhci, skelqh, urb);
701 * If control-IN transfer was short, the status packet wasn't sent.
702 * This routine changes the element pointer in the QH to point at the
703 * status TD. It's safe to do this even while the QH is live, because
704 * the hardware only updates the element pointer following a successful
705 * transfer. The inactive TD for the short packet won't cause an update,
706 * so the pointer won't get overwritten. The next time the controller
707 * sees this QH, it will send the status packet.
709 static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
711 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
714 urbp->short_control_packet = 1;
716 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
717 urbp->qh->element = cpu_to_le32(td->dma_handle);
723 static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
725 struct list_head *tmp, *head;
726 struct urb_priv *urbp = urb->hcpriv;
731 if (list_empty(&urbp->td_list))
734 head = &urbp->td_list;
736 if (urbp->short_control_packet) {
742 td = list_entry(tmp, struct uhci_td, list);
744 /* The first TD is the SETUP stage, check the status, but skip */
746 status = uhci_status_bits(td_status(td));
747 if (status & TD_CTRL_ACTIVE)
753 urb->actual_length = 0;
755 /* The rest of the TD's (but the last) are data */
757 while (tmp != head && tmp->next != head) {
758 unsigned int ctrlstat;
760 td = list_entry(tmp, struct uhci_td, list);
763 ctrlstat = td_status(td);
764 status = uhci_status_bits(ctrlstat);
765 if (status & TD_CTRL_ACTIVE)
768 urb->actual_length += uhci_actual_length(ctrlstat);
773 /* Check to see if we received a short packet */
774 if (uhci_actual_length(ctrlstat) <
775 uhci_expected_length(td_token(td))) {
776 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
781 if (uhci_packetid(td_token(td)) == USB_PID_IN)
782 return usb_control_retrigger_status(uhci, urb);
789 td = list_entry(tmp, struct uhci_td, list);
791 /* Control status stage */
792 status = td_status(td);
794 #ifdef I_HAVE_BUGGY_APC_BACKUPS
795 /* APC BackUPS Pro kludge */
796 /* It tries to send all of the descriptor instead of the amount */
798 if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
799 status & TD_CTRL_ACTIVE &&
800 status & TD_CTRL_NAK)
804 status = uhci_status_bits(status);
805 if (status & TD_CTRL_ACTIVE)
814 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
817 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
818 /* Some debugging code */
819 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
820 __FUNCTION__, status);
823 /* Print the chain for debugging purposes */
824 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
834 * Common submit for bulk and interrupt
836 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
840 unsigned long destination, status;
841 int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
842 int len = urb->transfer_buffer_length;
843 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
844 dma_addr_t data = urb->transfer_dma;
849 /* The "pipe" thing contains the destination in bits 8--18 */
850 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
852 status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
853 if (urb->dev->speed == USB_SPEED_LOW)
854 status |= TD_CTRL_LS;
855 if (usb_pipein(urb->pipe))
856 status |= TD_CTRL_SPD;
859 * Build the DATA TD's
861 do { /* Allow zero length packets */
866 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
867 status &= ~TD_CTRL_SPD;
870 td = uhci_alloc_td(uhci, urb->dev);
874 uhci_add_td_to_urb(urb, td);
875 uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
876 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
877 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
883 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
884 usb_pipeout(urb->pipe));
888 * URB_ZERO_PACKET means adding a 0-length packet, if direction
889 * is OUT and the transfer_length was an exact multiple of maxsze,
890 * hence (len = transfer_length - N * maxsze) == 0
891 * however, if transfer_length == 0, the zero packet was already
894 if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
895 !len && urb->transfer_buffer_length) {
896 td = uhci_alloc_td(uhci, urb->dev);
900 uhci_add_td_to_urb(urb, td);
901 uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
902 (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
903 usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
906 usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
907 usb_pipeout(urb->pipe));
910 /* Set the interrupt-on-completion flag on the last packet.
911 * A more-or-less typical 4 KB URB (= size of one memory page)
912 * will require about 3 ms to transfer; that's a little on the
913 * fast side but not enough to justify delaying an interrupt
914 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
916 td->status |= cpu_to_le32(TD_CTRL_IOC);
918 qh = uhci_alloc_qh(uhci, urb->dev);
925 /* Always breadth first */
926 uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
929 uhci_append_queued_urb(uhci, eurb, urb);
931 uhci_insert_qh(uhci, skelqh, urb);
937 * Common result for bulk and interrupt
939 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
941 struct urb_priv *urbp = urb->hcpriv;
943 unsigned int status = 0;
946 urb->actual_length = 0;
948 list_for_each_entry(td, &urbp->td_list, list) {
949 unsigned int ctrlstat = td_status(td);
951 status = uhci_status_bits(ctrlstat);
952 if (status & TD_CTRL_ACTIVE)
955 urb->actual_length += uhci_actual_length(ctrlstat);
960 if (uhci_actual_length(ctrlstat) <
961 uhci_expected_length(td_token(td))) {
962 if (urb->transfer_flags & URB_SHORT_NOT_OK) {
973 ret = uhci_map_status(status, uhci_packetout(td_token(td)));
977 * Enable this chunk of code if you want to see some more debugging.
978 * But be careful, it has the tendancy to starve out khubd and prevent
979 * disconnects from happening successfully if you have a slow debug
980 * log interface (like a serial console.
983 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
984 /* Some debugging code */
985 dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
986 __FUNCTION__, status);
989 /* Print the chain for debugging purposes */
990 uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
999 static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1003 /* Can't have low-speed bulk transfers */
1004 if (urb->dev->speed == USB_SPEED_LOW)
1007 ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
1008 if (ret == -EINPROGRESS)
1009 uhci_inc_fsbr(uhci, urb);
1014 static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
1016 /* USB 1.1 interrupt transfers only involve one packet per interval;
1017 * that's the uhci_submit_common() "breadth first" policy. Drivers
1018 * can submit urbs of any length, but longer ones might need many
1019 * intervals to complete.
1021 return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
1025 * Isochronous transfers
1027 static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
1029 struct urb *last_urb = NULL;
1030 struct urb_priv *up;
1033 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1034 struct urb *u = up->urb;
1036 /* look for pending URB's with identical pipe handle */
1037 if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
1038 (u->status == -EINPROGRESS) && (u != urb)) {
1040 *start = u->start_frame;
1046 *end = (last_urb->start_frame + last_urb->number_of_packets *
1047 last_urb->interval) & (UHCI_NUMFRAMES-1);
1050 ret = -1; /* no previous urb found */
1055 static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
1058 unsigned int start = 0, end = 0;
1060 if (urb->number_of_packets > 900) /* 900? Why? */
1063 limits = isochronous_find_limits(uhci, urb, &start, &end);
1065 if (urb->transfer_flags & URB_ISO_ASAP) {
1067 uhci_get_current_frame_number(uhci);
1068 urb->start_frame = (uhci->frame_number + 10)
1069 & (UHCI_NUMFRAMES - 1);
1071 urb->start_frame = end;
1073 urb->start_frame &= (UHCI_NUMFRAMES - 1);
1074 /* FIXME: Sanity check */
1081 * Isochronous transfers
1083 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1087 int status, destination;
1089 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1090 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1092 ret = isochronous_find_start(uhci, urb);
1096 frame = urb->start_frame;
1097 for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
1098 if (!urb->iso_frame_desc[i].length)
1101 td = uhci_alloc_td(uhci, urb->dev);
1105 uhci_add_td_to_urb(urb, td);
1106 uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
1107 urb->transfer_dma + urb->iso_frame_desc[i].offset);
1109 if (i + 1 >= urb->number_of_packets)
1110 td->status |= cpu_to_le32(TD_CTRL_IOC);
1112 uhci_insert_td_frame_list(uhci, td, frame);
1115 return -EINPROGRESS;
1118 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1121 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1125 urb->actual_length = 0;
1128 list_for_each_entry(td, &urbp->td_list, list) {
1130 unsigned int ctrlstat = td_status(td);
1132 if (ctrlstat & TD_CTRL_ACTIVE)
1133 return -EINPROGRESS;
1135 actlength = uhci_actual_length(ctrlstat);
1136 urb->iso_frame_desc[i].actual_length = actlength;
1137 urb->actual_length += actlength;
1139 status = uhci_map_status(uhci_status_bits(ctrlstat),
1140 usb_pipeout(urb->pipe));
1141 urb->iso_frame_desc[i].status = status;
1153 static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
1155 struct urb_priv *up;
1157 /* We don't match Isoc transfers since they are special */
1158 if (usb_pipeisoc(urb->pipe))
1161 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1162 struct urb *u = up->urb;
1164 if (u->dev == urb->dev && u->status == -EINPROGRESS) {
1165 /* For control, ignore the direction */
1166 if (usb_pipecontrol(urb->pipe) &&
1167 (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
1169 else if (u->pipe == urb->pipe)
1177 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1178 struct usb_host_endpoint *ep,
1179 struct urb *urb, int mem_flags)
1182 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1183 unsigned long flags;
1187 spin_lock_irqsave(&uhci->lock, flags);
1190 if (ret != -EINPROGRESS) /* URB already unlinked! */
1193 eurb = uhci_find_urb_ep(uhci, urb);
1195 if (!uhci_alloc_urb_priv(uhci, urb)) {
1200 switch (usb_pipetype(urb->pipe)) {
1202 ret = uhci_submit_control(uhci, urb, eurb);
1204 case PIPE_INTERRUPT:
1206 bustime = usb_check_bandwidth(urb->dev, urb);
1210 ret = uhci_submit_interrupt(uhci, urb, eurb);
1211 if (ret == -EINPROGRESS)
1212 usb_claim_bandwidth(urb->dev, urb, bustime, 0);
1214 } else { /* inherit from parent */
1215 urb->bandwidth = eurb->bandwidth;
1216 ret = uhci_submit_interrupt(uhci, urb, eurb);
1220 ret = uhci_submit_bulk(uhci, urb, eurb);
1222 case PIPE_ISOCHRONOUS:
1223 bustime = usb_check_bandwidth(urb->dev, urb);
1229 ret = uhci_submit_isochronous(uhci, urb);
1230 if (ret == -EINPROGRESS)
1231 usb_claim_bandwidth(urb->dev, urb, bustime, 1);
1235 if (ret != -EINPROGRESS) {
1236 /* Submit failed, so delete it from the urb_list */
1237 struct urb_priv *urbp = urb->hcpriv;
1239 list_del_init(&urbp->urb_list);
1240 uhci_destroy_urb_priv(uhci, urb);
1245 spin_unlock_irqrestore(&uhci->lock, flags);
1250 * Return the result of a transfer
1252 static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
1254 int ret = -EINPROGRESS;
1255 struct urb_priv *urbp;
1257 spin_lock(&urb->lock);
1259 urbp = (struct urb_priv *)urb->hcpriv;
1261 if (urb->status != -EINPROGRESS) /* URB already dequeued */
1264 switch (usb_pipetype(urb->pipe)) {
1266 ret = uhci_result_control(uhci, urb);
1269 case PIPE_INTERRUPT:
1270 ret = uhci_result_common(uhci, urb);
1272 case PIPE_ISOCHRONOUS:
1273 ret = uhci_result_isochronous(uhci, urb);
1277 if (ret == -EINPROGRESS)
1281 switch (usb_pipetype(urb->pipe)) {
1284 case PIPE_ISOCHRONOUS:
1285 /* Release bandwidth for Interrupt or Isoc. transfers */
1287 usb_release_bandwidth(urb->dev, urb, 1);
1288 uhci_unlink_generic(uhci, urb);
1290 case PIPE_INTERRUPT:
1291 /* Release bandwidth for Interrupt or Isoc. transfers */
1292 /* Make sure we don't release if we have a queued URB */
1293 if (list_empty(&urbp->queue_list) && urb->bandwidth)
1294 usb_release_bandwidth(urb->dev, urb, 0);
1296 /* bandwidth was passed on to queued URB, */
1297 /* so don't let usb_unlink_urb() release it */
1299 uhci_unlink_generic(uhci, urb);
1302 dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
1304 __FUNCTION__, usb_pipetype(urb->pipe), urb);
1307 /* Move it from uhci->urb_list to uhci->complete_list */
1308 uhci_moveto_complete(uhci, urbp);
1311 spin_unlock(&urb->lock);
1314 static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
1316 struct list_head *head;
1318 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1321 uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
1324 * Now we need to find out what the last successful toggle was
1325 * so we can update the local data toggle for the next transfer
1327 * There are 2 ways the last successful completed TD is found:
1329 * 1) The TD is NOT active and the actual length < expected length
1330 * 2) The TD is NOT active and it's the last TD in the chain
1332 * and a third way the first uncompleted TD is found:
1334 * 3) The TD is active and the previous TD is NOT active
1336 * Control and Isochronous ignore the toggle, so this is safe
1339 * FIXME: The toggle fixups won't be 100% reliable until we
1340 * change over to using a single queue for each endpoint and
1341 * stop the queue before unlinking.
1343 head = &urbp->td_list;
1344 list_for_each_entry(td, head, list) {
1345 unsigned int ctrlstat = td_status(td);
1347 if (!(ctrlstat & TD_CTRL_ACTIVE) &&
1348 (uhci_actual_length(ctrlstat) <
1349 uhci_expected_length(td_token(td)) ||
1350 td->list.next == head))
1351 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1352 uhci_packetout(td_token(td)),
1353 uhci_toggle(td_token(td)) ^ 1);
1354 else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive)
1355 usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
1356 uhci_packetout(td_token(td)),
1357 uhci_toggle(td_token(td)));
1359 prevactive = ctrlstat & TD_CTRL_ACTIVE;
1362 uhci_delete_queued_urb(uhci, urb);
1364 /* The interrupt loop will reclaim the QH's */
1365 uhci_remove_qh(uhci, urbp->qh);
1369 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
1371 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1372 unsigned long flags;
1373 struct urb_priv *urbp;
1375 spin_lock_irqsave(&uhci->lock, flags);
1377 if (!urbp) /* URB was never linked! */
1379 list_del_init(&urbp->urb_list);
1381 uhci_unlink_generic(uhci, urb);
1383 uhci_get_current_frame_number(uhci);
1384 if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age) {
1385 uhci_remove_pending_urbps(uhci);
1386 uhci->urb_remove_age = uhci->frame_number;
1389 /* If we're the first, set the next interrupt bit */
1390 if (list_empty(&uhci->urb_remove_list))
1391 uhci_set_next_interrupt(uhci);
1392 list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
1395 spin_unlock_irqrestore(&uhci->lock, flags);
1399 static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
1401 struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
1402 struct list_head *head;
1406 uhci_dec_fsbr(uhci, urb);
1408 urbp->fsbr_timeout = 1;
1411 * Ideally we would want to fix qh->element as well, but it's
1412 * read/write by the HC, so that can introduce a race. It's not
1413 * really worth the hassle
1416 head = &urbp->td_list;
1417 list_for_each_entry(td, head, list) {
1419 * Make sure we don't do the last one (since it'll have the
1420 * TERM bit set) as well as we skip every so many TD's to
1421 * make sure it doesn't hog the bandwidth
1423 if (td->list.next != head && (count % DEPTH_INTERVAL) ==
1424 (DEPTH_INTERVAL - 1))
1425 td->link |= UHCI_PTR_DEPTH;
1433 static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
1435 struct uhci_qh *qh, *tmp;
1437 list_for_each_entry_safe(qh, tmp, &uhci->qh_remove_list, remove_list) {
1438 list_del_init(&qh->remove_list);
1440 uhci_free_qh(uhci, qh);
1444 static void uhci_free_pending_tds(struct uhci_hcd *uhci)
1446 struct uhci_td *td, *tmp;
1448 list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
1449 list_del_init(&td->remove_list);
1451 uhci_free_td(uhci, td);
1456 uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
1457 __releases(uhci->lock)
1458 __acquires(uhci->lock)
1460 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1462 uhci_destroy_urb_priv(uhci, urb);
1464 spin_unlock(&uhci->lock);
1465 usb_hcd_giveback_urb(hcd, urb, regs);
1466 spin_lock(&uhci->lock);
1469 static void uhci_finish_completion(struct uhci_hcd *uhci, struct pt_regs *regs)
1471 struct urb_priv *urbp, *tmp;
1473 list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
1474 struct urb *urb = urbp->urb;
1476 list_del_init(&urbp->urb_list);
1477 uhci_finish_urb(uhci_to_hcd(uhci), urb, regs);
1481 static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
1484 /* Splice the urb_remove_list onto the end of the complete_list */
1485 list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
1488 /* Process events in the schedule, but only in one thread at a time */
1489 static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
1491 struct urb_priv *urbp, *tmp;
1493 /* Don't allow re-entrant calls */
1494 if (uhci->scan_in_progress) {
1495 uhci->need_rescan = 1;
1498 uhci->scan_in_progress = 1;
1500 uhci->need_rescan = 0;
1502 uhci_clear_next_interrupt(uhci);
1503 uhci_get_current_frame_number(uhci);
1505 if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age)
1506 uhci_free_pending_qhs(uhci);
1507 if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age)
1508 uhci_free_pending_tds(uhci);
1509 if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age)
1510 uhci_remove_pending_urbps(uhci);
1512 /* Walk the list of pending URBs to see which ones completed
1513 * (must be _safe because uhci_transfer_result() dequeues URBs) */
1514 list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) {
1515 struct urb *urb = urbp->urb;
1517 /* Checks the status and does all of the magic necessary */
1518 uhci_transfer_result(uhci, urb);
1520 uhci_finish_completion(uhci, regs);
1522 /* If the controller is stopped, we can finish these off right now */
1523 if (uhci->is_stopped) {
1524 uhci_free_pending_qhs(uhci);
1525 uhci_free_pending_tds(uhci);
1526 uhci_remove_pending_urbps(uhci);
1529 if (uhci->need_rescan)
1531 uhci->scan_in_progress = 0;
1533 if (list_empty(&uhci->urb_remove_list) &&
1534 list_empty(&uhci->td_remove_list) &&
1535 list_empty(&uhci->qh_remove_list))
1536 uhci_clear_next_interrupt(uhci);
1538 uhci_set_next_interrupt(uhci);
1540 /* Wake up anyone waiting for an URB to complete */
1541 wake_up_all(&uhci->waitqh);
1544 static void check_fsbr(struct uhci_hcd *uhci)
1546 struct urb_priv *up;
1548 list_for_each_entry(up, &uhci->urb_list, urb_list) {
1549 struct urb *u = up->urb;
1551 spin_lock(&u->lock);
1553 /* Check if the FSBR timed out */
1554 if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
1555 uhci_fsbr_timeout(uhci, u);
1557 spin_unlock(&u->lock);
1560 /* Really disable FSBR */
1561 if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
1562 uhci->fsbrtimeout = 0;
1563 uhci->skel_term_qh->link = UHCI_PTR_TERM;