2 * Universal Host Controller Interface driver for USB.
4 * Maintainer: Alan Stern <stern@rowland.harvard.edu>
6 * (C) Copyright 1999 Linus Torvalds
7 * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
8 * (C) Copyright 1999 Randy Dunlap
9 * (C) Copyright 1999 Georg Acher, acher@in.tum.de
10 * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
11 * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
12 * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
13 * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
14 * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
15 * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
16 * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu
21 * Technically, updating td->status here is a race, but it's not really a
22 * problem. The worst that can happen is that we set the IOC bit again
23 * generating a spurious interrupt. We could fix this by creating another
24 * QH and leaving the IOC bit always set, but then we would have to play
25 * games with the FSBR code to make sure we get the correct order in all
26 * the cases. I don't think it's worth the effort
28 static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
31 mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
32 uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
35 static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
37 uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
42 * Full-Speed Bandwidth Reclamation (FSBR).
43 * We turn on FSBR whenever a queue that wants it is advancing,
44 * and leave it on for a short time thereafter.
46 static void uhci_fsbr_on(struct uhci_hcd *uhci)
50 /* The terminating skeleton QH always points back to the first
51 * FSBR QH. Make the last async QH point to the terminating
54 lqh = list_entry(uhci->skel_async_qh->node.prev,
55 struct uhci_qh, node);
56 lqh->link = LINK_TO_QH(uhci->skel_term_qh);
59 static void uhci_fsbr_off(struct uhci_hcd *uhci)
63 /* Remove the link from the last async QH to the terminating
66 lqh = list_entry(uhci->skel_async_qh->node.prev,
67 struct uhci_qh, node);
68 lqh->link = UHCI_PTR_TERM;
71 static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
73 struct urb_priv *urbp = urb->hcpriv;
75 if (!(urb->transfer_flags & URB_NO_FSBR))
79 static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp)
82 uhci->fsbr_is_wanted = 1;
83 if (!uhci->fsbr_is_on)
85 else if (uhci->fsbr_expiring) {
86 uhci->fsbr_expiring = 0;
87 del_timer(&uhci->fsbr_timer);
92 static void uhci_fsbr_timeout(unsigned long _uhci)
94 struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
97 spin_lock_irqsave(&uhci->lock, flags);
98 if (uhci->fsbr_expiring) {
99 uhci->fsbr_expiring = 0;
102 spin_unlock_irqrestore(&uhci->lock, flags);
106 static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
108 dma_addr_t dma_handle;
111 td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
115 td->dma_handle = dma_handle;
118 INIT_LIST_HEAD(&td->list);
119 INIT_LIST_HEAD(&td->fl_list);
124 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
126 if (!list_empty(&td->list))
127 dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
128 if (!list_empty(&td->fl_list))
129 dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
131 dma_pool_free(uhci->td_pool, td, td->dma_handle);
134 static inline void uhci_fill_td(struct uhci_td *td, u32 status,
135 u32 token, u32 buffer)
137 td->status = cpu_to_le32(status);
138 td->token = cpu_to_le32(token);
139 td->buffer = cpu_to_le32(buffer);
142 static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
144 list_add_tail(&td->list, &urbp->td_list);
147 static void uhci_remove_td_from_urbp(struct uhci_td *td)
149 list_del_init(&td->list);
153 * We insert Isochronous URBs directly into the frame list at the beginning
155 static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
156 struct uhci_td *td, unsigned framenum)
158 framenum &= (UHCI_NUMFRAMES - 1);
160 td->frame = framenum;
162 /* Is there a TD already mapped there? */
163 if (uhci->frame_cpu[framenum]) {
164 struct uhci_td *ftd, *ltd;
166 ftd = uhci->frame_cpu[framenum];
167 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
169 list_add_tail(&td->fl_list, &ftd->fl_list);
171 td->link = ltd->link;
173 ltd->link = LINK_TO_TD(td);
175 td->link = uhci->frame[framenum];
177 uhci->frame[framenum] = LINK_TO_TD(td);
178 uhci->frame_cpu[framenum] = td;
182 static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
185 /* If it's not inserted, don't remove it */
186 if (td->frame == -1) {
187 WARN_ON(!list_empty(&td->fl_list));
191 if (uhci->frame_cpu[td->frame] == td) {
192 if (list_empty(&td->fl_list)) {
193 uhci->frame[td->frame] = td->link;
194 uhci->frame_cpu[td->frame] = NULL;
198 ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
199 uhci->frame[td->frame] = LINK_TO_TD(ntd);
200 uhci->frame_cpu[td->frame] = ntd;
205 ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
206 ptd->link = td->link;
209 list_del_init(&td->fl_list);
213 static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci,
214 unsigned int framenum)
216 struct uhci_td *ftd, *ltd;
218 framenum &= (UHCI_NUMFRAMES - 1);
220 ftd = uhci->frame_cpu[framenum];
222 ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
223 uhci->frame[framenum] = ltd->link;
224 uhci->frame_cpu[framenum] = NULL;
226 while (!list_empty(&ftd->fl_list))
227 list_del_init(ftd->fl_list.prev);
232 * Remove all the TDs for an Isochronous URB from the frame list
234 static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
236 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
239 list_for_each_entry(td, &urbp->td_list, list)
240 uhci_remove_td_from_frame_list(uhci, td);
243 static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
244 struct usb_device *udev, struct usb_host_endpoint *hep)
246 dma_addr_t dma_handle;
249 qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
253 memset(qh, 0, sizeof(*qh));
254 qh->dma_handle = dma_handle;
256 qh->element = UHCI_PTR_TERM;
257 qh->link = UHCI_PTR_TERM;
259 INIT_LIST_HEAD(&qh->queue);
260 INIT_LIST_HEAD(&qh->node);
262 if (udev) { /* Normal QH */
263 qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
264 if (qh->type != USB_ENDPOINT_XFER_ISOC) {
265 qh->dummy_td = uhci_alloc_td(uhci);
267 dma_pool_free(uhci->qh_pool, qh, dma_handle);
271 qh->state = QH_STATE_IDLE;
276 if (qh->type == USB_ENDPOINT_XFER_INT ||
277 qh->type == USB_ENDPOINT_XFER_ISOC)
278 qh->load = usb_calc_bus_time(udev->speed,
279 usb_endpoint_dir_in(&hep->desc),
280 qh->type == USB_ENDPOINT_XFER_ISOC,
281 le16_to_cpu(hep->desc.wMaxPacketSize))
284 } else { /* Skeleton QH */
285 qh->state = QH_STATE_ACTIVE;
291 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
293 WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
294 if (!list_empty(&qh->queue))
295 dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
299 qh->hep->hcpriv = NULL;
301 uhci_free_td(uhci, qh->dummy_td);
303 dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
307 * When a queue is stopped and a dequeued URB is given back, adjust
308 * the previous TD link (if the URB isn't first on the queue) or
309 * save its toggle value (if it is first and is currently executing).
311 * Returns 0 if the URB should not yet be given back, 1 otherwise.
313 static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
316 struct urb_priv *urbp = urb->hcpriv;
320 /* Isochronous pipes don't use toggles and their TD link pointers
321 * get adjusted during uhci_urb_dequeue(). But since their queues
322 * cannot truly be stopped, we have to watch out for dequeues
323 * occurring after the nominal unlink frame. */
324 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
325 ret = (uhci->frame_number + uhci->is_stopped !=
330 /* If the URB isn't first on its queue, adjust the link pointer
331 * of the last TD in the previous URB. The toggle doesn't need
332 * to be saved since this URB can't be executing yet. */
333 if (qh->queue.next != &urbp->node) {
334 struct urb_priv *purbp;
337 purbp = list_entry(urbp->node.prev, struct urb_priv, node);
338 WARN_ON(list_empty(&purbp->td_list));
339 ptd = list_entry(purbp->td_list.prev, struct uhci_td,
341 td = list_entry(urbp->td_list.prev, struct uhci_td,
343 ptd->link = td->link;
347 /* If the QH element pointer is UHCI_PTR_TERM then then currently
348 * executing URB has already been unlinked, so this one isn't it. */
349 if (qh_element(qh) == UHCI_PTR_TERM)
351 qh->element = UHCI_PTR_TERM;
353 /* Control pipes don't have to worry about toggles */
354 if (qh->type == USB_ENDPOINT_XFER_CONTROL)
357 /* Save the next toggle value */
358 WARN_ON(list_empty(&urbp->td_list));
359 td = list_entry(urbp->td_list.next, struct uhci_td, list);
361 qh->initial_toggle = uhci_toggle(td_token(td));
368 * Fix up the data toggles for URBs in a queue, when one of them
369 * terminates early (short transfer, error, or dequeued).
371 static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
373 struct urb_priv *urbp = NULL;
375 unsigned int toggle = qh->initial_toggle;
378 /* Fixups for a short transfer start with the second URB in the
379 * queue (the short URB is the first). */
381 urbp = list_entry(qh->queue.next, struct urb_priv, node);
383 /* When starting with the first URB, if the QH element pointer is
384 * still valid then we know the URB's toggles are okay. */
385 else if (qh_element(qh) != UHCI_PTR_TERM)
388 /* Fix up the toggle for the URBs in the queue. Normally this
389 * loop won't run more than once: When an error or short transfer
390 * occurs, the queue usually gets emptied. */
391 urbp = list_prepare_entry(urbp, &qh->queue, node);
392 list_for_each_entry_continue(urbp, &qh->queue, node) {
394 /* If the first TD has the right toggle value, we don't
395 * need to change any toggles in this URB */
396 td = list_entry(urbp->td_list.next, struct uhci_td, list);
397 if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
398 td = list_entry(urbp->td_list.prev, struct uhci_td,
400 toggle = uhci_toggle(td_token(td)) ^ 1;
402 /* Otherwise all the toggles in the URB have to be switched */
404 list_for_each_entry(td, &urbp->td_list, list) {
405 td->token ^= __constant_cpu_to_le32(
413 pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
414 usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
415 usb_pipeout(pipe), toggle);
420 * Link an Isochronous QH into its skeleton's list
422 static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh)
424 list_add_tail(&qh->node, &uhci->skel_iso_qh->node);
426 /* Isochronous QHs aren't linked by the hardware */
430 * Link a high-period interrupt QH into the schedule at the end of its
433 static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
437 list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node);
439 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
440 qh->link = pqh->link;
442 pqh->link = LINK_TO_QH(qh);
446 * Link a period-1 interrupt or async QH into the schedule at the
447 * correct spot in the async skeleton's list, and update the FSBR link
449 static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
452 __le32 link_to_new_qh;
454 /* Find the predecessor QH for our new one and insert it in the list.
455 * The list of QHs is expected to be short, so linear search won't
457 list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) {
458 if (pqh->skel <= qh->skel)
461 list_add(&qh->node, &pqh->node);
463 /* Link it into the schedule */
464 qh->link = pqh->link;
466 link_to_new_qh = LINK_TO_QH(qh);
467 pqh->link = link_to_new_qh;
469 /* If this is now the first FSBR QH, link the terminating skeleton
471 if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
472 uhci->skel_term_qh->link = link_to_new_qh;
476 * Put a QH on the schedule in both hardware and software
478 static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
480 WARN_ON(list_empty(&qh->queue));
482 /* Set the element pointer if it isn't set already.
483 * This isn't needed for Isochronous queues, but it doesn't hurt. */
484 if (qh_element(qh) == UHCI_PTR_TERM) {
485 struct urb_priv *urbp = list_entry(qh->queue.next,
486 struct urb_priv, node);
487 struct uhci_td *td = list_entry(urbp->td_list.next,
488 struct uhci_td, list);
490 qh->element = LINK_TO_TD(td);
493 /* Treat the queue as if it has just advanced */
494 qh->wait_expired = 0;
495 qh->advance_jiffies = jiffies;
497 if (qh->state == QH_STATE_ACTIVE)
499 qh->state = QH_STATE_ACTIVE;
501 /* Move the QH from its old list to the correct spot in the appropriate
503 if (qh == uhci->next_qh)
504 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
508 if (qh->skel == SKEL_ISO)
510 else if (qh->skel < SKEL_ASYNC)
511 link_interrupt(uhci, qh);
513 link_async(uhci, qh);
517 * Unlink a high-period interrupt QH from the schedule
519 static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh)
523 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
524 pqh->link = qh->link;
529 * Unlink a period-1 interrupt or async QH from the schedule
531 static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh)
534 __le32 link_to_next_qh = qh->link;
536 pqh = list_entry(qh->node.prev, struct uhci_qh, node);
537 pqh->link = link_to_next_qh;
539 /* If this was the old first FSBR QH, link the terminating skeleton
540 * QH to the next (new first FSBR) QH. */
541 if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR)
542 uhci->skel_term_qh->link = link_to_next_qh;
547 * Take a QH off the hardware schedule
549 static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
551 if (qh->state == QH_STATE_UNLINKING)
553 WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
554 qh->state = QH_STATE_UNLINKING;
556 /* Unlink the QH from the schedule and record when we did it */
557 if (qh->skel == SKEL_ISO)
559 else if (qh->skel < SKEL_ASYNC)
560 unlink_interrupt(uhci, qh);
562 unlink_async(uhci, qh);
564 uhci_get_current_frame_number(uhci);
565 qh->unlink_frame = uhci->frame_number;
567 /* Force an interrupt so we know when the QH is fully unlinked */
568 if (list_empty(&uhci->skel_unlink_qh->node))
569 uhci_set_next_interrupt(uhci);
571 /* Move the QH from its old list to the end of the unlinking list */
572 if (qh == uhci->next_qh)
573 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
575 list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
579 * When we and the controller are through with a QH, it becomes IDLE.
580 * This happens when a QH has been off the schedule (on the unlinking
581 * list) for more than one frame, or when an error occurs while adding
582 * the first URB onto a new QH.
584 static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
586 WARN_ON(qh->state == QH_STATE_ACTIVE);
588 if (qh == uhci->next_qh)
589 uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
591 list_move(&qh->node, &uhci->idle_qh_list);
592 qh->state = QH_STATE_IDLE;
594 /* Now that the QH is idle, its post_td isn't being used */
596 uhci_free_td(uhci, qh->post_td);
600 /* If anyone is waiting for a QH to become idle, wake them up */
601 if (uhci->num_waiting)
602 wake_up_all(&uhci->waitqh);
606 * Find the highest existing bandwidth load for a given phase and period.
608 static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period)
610 int highest_load = uhci->load[phase];
612 for (phase += period; phase < MAX_PHASE; phase += period)
613 highest_load = max_t(int, highest_load, uhci->load[phase]);
618 * Set qh->phase to the optimal phase for a periodic transfer and
619 * check whether the bandwidth requirement is acceptable.
621 static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
625 /* Find the optimal phase (unless it is already set) and get
628 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
631 int max_phase = min_t(int, MAX_PHASE, qh->period);
634 minimax_load = uhci_highest_load(uhci, qh->phase, qh->period);
635 for (phase = 1; phase < max_phase; ++phase) {
636 load = uhci_highest_load(uhci, phase, qh->period);
637 if (load < minimax_load) {
644 /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */
645 if (minimax_load + qh->load > 900) {
646 dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: "
647 "period %d, phase %d, %d + %d us\n",
648 qh->period, qh->phase, minimax_load, qh->load);
655 * Reserve a periodic QH's bandwidth in the schedule
657 static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
663 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
664 uhci->load[i] += load;
665 uhci->total_load += load;
667 uhci_to_hcd(uhci)->self.bandwidth_allocated =
668 uhci->total_load / MAX_PHASE;
670 case USB_ENDPOINT_XFER_INT:
671 ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
674 case USB_ENDPOINT_XFER_ISOC:
675 ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
679 qh->bandwidth_reserved = 1;
680 dev_dbg(uhci_dev(uhci),
681 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
682 "reserve", qh->udev->devnum,
683 qh->hep->desc.bEndpointAddress, p,
684 qh->period, qh->phase, load);
688 * Release a periodic QH's bandwidth reservation
690 static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh)
696 for (i = qh->phase; i < MAX_PHASE; i += qh->period) {
697 uhci->load[i] -= load;
698 uhci->total_load -= load;
700 uhci_to_hcd(uhci)->self.bandwidth_allocated =
701 uhci->total_load / MAX_PHASE;
703 case USB_ENDPOINT_XFER_INT:
704 --uhci_to_hcd(uhci)->self.bandwidth_int_reqs;
707 case USB_ENDPOINT_XFER_ISOC:
708 --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs;
712 qh->bandwidth_reserved = 0;
713 dev_dbg(uhci_dev(uhci),
714 "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n",
715 "release", qh->udev->devnum,
716 qh->hep->desc.bEndpointAddress, p,
717 qh->period, qh->phase, load);
720 static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
723 struct urb_priv *urbp;
725 urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC);
732 INIT_LIST_HEAD(&urbp->node);
733 INIT_LIST_HEAD(&urbp->td_list);
738 static void uhci_free_urb_priv(struct uhci_hcd *uhci,
739 struct urb_priv *urbp)
741 struct uhci_td *td, *tmp;
743 if (!list_empty(&urbp->node))
744 dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
747 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
748 uhci_remove_td_from_urbp(td);
749 uhci_free_td(uhci, td);
752 kmem_cache_free(uhci_up_cachep, urbp);
756 * Map status to standard result codes
758 * <status> is (td_status(td) & 0xF60000), a.k.a.
759 * uhci_status_bits(td_status(td)).
760 * Note: <status> does not include the TD_CTRL_NAK bit.
761 * <dir_out> is True for output TDs and False for input TDs.
763 static int uhci_map_status(int status, int dir_out)
767 if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
769 if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
775 if (status & TD_CTRL_BABBLE) /* Babble */
777 if (status & TD_CTRL_DBUFERR) /* Buffer error */
779 if (status & TD_CTRL_STALLED) /* Stalled */
787 static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
791 unsigned long destination, status;
792 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
793 int len = urb->transfer_buffer_length;
794 dma_addr_t data = urb->transfer_dma;
796 struct urb_priv *urbp = urb->hcpriv;
799 /* The "pipe" thing contains the destination in bits 8--18 */
800 destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
802 /* 3 errors, dummy TD remains inactive */
803 status = uhci_maxerr(3);
804 if (urb->dev->speed == USB_SPEED_LOW)
805 status |= TD_CTRL_LS;
808 * Build the TD for the control request setup packet
811 uhci_add_td_to_urbp(td, urbp);
812 uhci_fill_td(td, status, destination | uhci_explen(8),
815 status |= TD_CTRL_ACTIVE;
818 * If direction is "send", change the packet ID from SETUP (0x2D)
819 * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
820 * set Short Packet Detect (SPD) for all data packets.
822 * 0-length transfers always get treated as "send".
824 if (usb_pipeout(urb->pipe) || len == 0)
825 destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
827 destination ^= (USB_PID_SETUP ^ USB_PID_IN);
828 status |= TD_CTRL_SPD;
837 if (len <= pktsze) { /* The last data packet */
839 status &= ~TD_CTRL_SPD;
842 td = uhci_alloc_td(uhci);
845 *plink = LINK_TO_TD(td);
847 /* Alternate Data0/1 (start with Data1) */
848 destination ^= TD_TOKEN_TOGGLE;
850 uhci_add_td_to_urbp(td, urbp);
851 uhci_fill_td(td, status, destination | uhci_explen(pktsze),
860 * Build the final TD for control status
862 td = uhci_alloc_td(uhci);
865 *plink = LINK_TO_TD(td);
867 /* Change direction for the status transaction */
868 destination ^= (USB_PID_IN ^ USB_PID_OUT);
869 destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
871 uhci_add_td_to_urbp(td, urbp);
872 uhci_fill_td(td, status | TD_CTRL_IOC,
873 destination | uhci_explen(0), 0);
877 * Build the new dummy TD and activate the old one
879 td = uhci_alloc_td(uhci);
882 *plink = LINK_TO_TD(td);
884 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
886 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
889 /* Low-speed transfers get a different queue, and won't hog the bus.
890 * Also, some devices enumerate better without FSBR; the easiest way
891 * to do that is to put URBs on the low-speed queue while the device
892 * isn't in the CONFIGURED state. */
893 if (urb->dev->speed == USB_SPEED_LOW ||
894 urb->dev->state != USB_STATE_CONFIGURED)
895 skel = SKEL_LS_CONTROL;
897 skel = SKEL_FS_CONTROL;
898 uhci_add_fsbr(uhci, urb);
900 if (qh->state != QH_STATE_ACTIVE)
903 urb->actual_length = -8; /* Account for the SETUP packet */
907 /* Remove the dummy TD from the td_list so it doesn't get freed */
908 uhci_remove_td_from_urbp(qh->dummy_td);
913 * Common submit for bulk and interrupt
915 static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
919 unsigned long destination, status;
920 int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
921 int len = urb->transfer_buffer_length;
922 dma_addr_t data = urb->transfer_dma;
924 struct urb_priv *urbp = urb->hcpriv;
930 /* The "pipe" thing contains the destination in bits 8--18 */
931 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
932 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
933 usb_pipeout(urb->pipe));
935 /* 3 errors, dummy TD remains inactive */
936 status = uhci_maxerr(3);
937 if (urb->dev->speed == USB_SPEED_LOW)
938 status |= TD_CTRL_LS;
939 if (usb_pipein(urb->pipe))
940 status |= TD_CTRL_SPD;
947 do { /* Allow zero length packets */
950 if (len <= pktsze) { /* The last packet */
952 if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
953 status &= ~TD_CTRL_SPD;
957 td = uhci_alloc_td(uhci);
960 *plink = LINK_TO_TD(td);
962 uhci_add_td_to_urbp(td, urbp);
963 uhci_fill_td(td, status,
964 destination | uhci_explen(pktsze) |
965 (toggle << TD_TOKEN_TOGGLE_SHIFT),
968 status |= TD_CTRL_ACTIVE;
976 * URB_ZERO_PACKET means adding a 0-length packet, if direction
977 * is OUT and the transfer_length was an exact multiple of maxsze,
978 * hence (len = transfer_length - N * maxsze) == 0
979 * however, if transfer_length == 0, the zero packet was already
982 if ((urb->transfer_flags & URB_ZERO_PACKET) &&
983 usb_pipeout(urb->pipe) && len == 0 &&
984 urb->transfer_buffer_length > 0) {
985 td = uhci_alloc_td(uhci);
988 *plink = LINK_TO_TD(td);
990 uhci_add_td_to_urbp(td, urbp);
991 uhci_fill_td(td, status,
992 destination | uhci_explen(0) |
993 (toggle << TD_TOKEN_TOGGLE_SHIFT),
1000 /* Set the interrupt-on-completion flag on the last packet.
1001 * A more-or-less typical 4 KB URB (= size of one memory page)
1002 * will require about 3 ms to transfer; that's a little on the
1003 * fast side but not enough to justify delaying an interrupt
1004 * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
1006 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
1009 * Build the new dummy TD and activate the old one
1011 td = uhci_alloc_td(uhci);
1014 *plink = LINK_TO_TD(td);
1016 uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
1018 qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
1021 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1022 usb_pipeout(urb->pipe), toggle);
1026 /* Remove the dummy TD from the td_list so it doesn't get freed */
1027 uhci_remove_td_from_urbp(qh->dummy_td);
1031 static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
1036 /* Can't have low-speed bulk transfers */
1037 if (urb->dev->speed == USB_SPEED_LOW)
1040 if (qh->state != QH_STATE_ACTIVE)
1041 qh->skel = SKEL_BULK;
1042 ret = uhci_submit_common(uhci, urb, qh);
1044 uhci_add_fsbr(uhci, urb);
1048 static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
1053 /* USB 1.1 interrupt transfers only involve one packet per interval.
1054 * Drivers can submit URBs of any length, but longer ones will need
1055 * multiple intervals to complete.
1058 if (!qh->bandwidth_reserved) {
1061 /* Figure out which power-of-two queue to use */
1062 for (exponent = 7; exponent >= 0; --exponent) {
1063 if ((1 << exponent) <= urb->interval)
1069 /* If the slot is full, try a lower period */
1071 qh->period = 1 << exponent;
1072 qh->skel = SKEL_INDEX(exponent);
1074 /* For now, interrupt phase is fixed by the layout
1077 qh->phase = (qh->period / 2) & (MAX_PHASE - 1);
1078 ret = uhci_check_bandwidth(uhci, qh);
1079 } while (ret != 0 && --exponent >= 0);
1082 } else if (qh->period > urb->interval)
1083 return -EINVAL; /* Can't decrease the period */
1085 ret = uhci_submit_common(uhci, urb, qh);
1087 urb->interval = qh->period;
1088 if (!qh->bandwidth_reserved)
1089 uhci_reserve_bandwidth(uhci, qh);
1095 * Fix up the data structures following a short transfer
1097 static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
1098 struct uhci_qh *qh, struct urb_priv *urbp)
1101 struct list_head *tmp;
1104 td = list_entry(urbp->td_list.prev, struct uhci_td, list);
1105 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1107 /* When a control transfer is short, we have to restart
1108 * the queue at the status stage transaction, which is
1110 WARN_ON(list_empty(&urbp->td_list));
1111 qh->element = LINK_TO_TD(td);
1112 tmp = td->list.prev;
1117 /* When a bulk/interrupt transfer is short, we have to
1118 * fix up the toggles of the following URBs on the queue
1119 * before restarting the queue at the next URB. */
1120 qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
1121 uhci_fixup_toggles(qh, 1);
1123 if (list_empty(&urbp->td_list))
1125 qh->element = td->link;
1126 tmp = urbp->td_list.prev;
1130 /* Remove all the TDs we skipped over, from tmp back to the start */
1131 while (tmp != &urbp->td_list) {
1132 td = list_entry(tmp, struct uhci_td, list);
1135 uhci_remove_td_from_urbp(td);
1136 uhci_free_td(uhci, td);
1142 * Common result for control, bulk, and interrupt
1144 static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
1146 struct urb_priv *urbp = urb->hcpriv;
1147 struct uhci_qh *qh = urbp->qh;
1148 struct uhci_td *td, *tmp;
1152 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1153 unsigned int ctrlstat;
1156 ctrlstat = td_status(td);
1157 status = uhci_status_bits(ctrlstat);
1158 if (status & TD_CTRL_ACTIVE)
1159 return -EINPROGRESS;
1161 len = uhci_actual_length(ctrlstat);
1162 urb->actual_length += len;
1165 ret = uhci_map_status(status,
1166 uhci_packetout(td_token(td)));
1167 if ((debug == 1 && ret != -EPIPE) || debug > 1) {
1168 /* Some debugging code */
1169 dev_dbg(&urb->dev->dev,
1170 "%s: failed with status %x\n",
1173 if (debug > 1 && errbuf) {
1174 /* Print the chain for debugging */
1175 uhci_show_qh(uhci, urbp->qh, errbuf,
1181 /* Did we receive a short packet? */
1182 } else if (len < uhci_expected_length(td_token(td))) {
1184 /* For control transfers, go to the status TD if
1185 * this isn't already the last data TD */
1186 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1187 if (td->list.next != urbp->td_list.prev)
1191 /* For bulk and interrupt, this may be an error */
1192 else if (urb->transfer_flags & URB_SHORT_NOT_OK)
1195 /* Fixup needed only if this isn't the URB's last TD */
1196 else if (&td->list != urbp->td_list.prev)
1200 uhci_remove_td_from_urbp(td);
1202 uhci_free_td(uhci, qh->post_td);
1212 /* Note that the queue has stopped and save
1213 * the next toggle value */
1214 qh->element = UHCI_PTR_TERM;
1216 qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
1217 qh->initial_toggle = uhci_toggle(td_token(td)) ^
1218 (ret == -EREMOTEIO);
1220 } else /* Short packet received */
1221 ret = uhci_fixup_short_transfer(uhci, qh, urbp);
1226 * Isochronous transfers
1228 static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1231 struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
1233 unsigned long destination, status;
1234 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1236 /* Values must not be too big (could overflow below) */
1237 if (urb->interval >= UHCI_NUMFRAMES ||
1238 urb->number_of_packets >= UHCI_NUMFRAMES)
1241 /* Check the period and figure out the starting frame number */
1242 if (!qh->bandwidth_reserved) {
1243 qh->period = urb->interval;
1244 if (urb->transfer_flags & URB_ISO_ASAP) {
1245 qh->phase = -1; /* Find the best phase */
1246 i = uhci_check_bandwidth(uhci, qh);
1250 /* Allow a little time to allocate the TDs */
1251 uhci_get_current_frame_number(uhci);
1252 frame = uhci->frame_number + 10;
1254 /* Move forward to the first frame having the
1256 urb->start_frame = frame + ((qh->phase - frame) &
1259 i = urb->start_frame - uhci->last_iso_frame;
1260 if (i <= 0 || i >= UHCI_NUMFRAMES)
1262 qh->phase = urb->start_frame & (qh->period - 1);
1263 i = uhci_check_bandwidth(uhci, qh);
1268 } else if (qh->period != urb->interval) {
1269 return -EINVAL; /* Can't change the period */
1272 /* Find the next unused frame */
1273 if (list_empty(&qh->queue)) {
1274 frame = qh->iso_frame;
1278 lurb = list_entry(qh->queue.prev,
1279 struct urb_priv, node)->urb;
1280 frame = lurb->start_frame +
1281 lurb->number_of_packets *
1284 if (urb->transfer_flags & URB_ISO_ASAP) {
1285 /* Skip some frames if necessary to insure
1286 * the start frame is in the future.
1288 uhci_get_current_frame_number(uhci);
1289 if (uhci_frame_before_eq(frame, uhci->frame_number)) {
1290 frame = uhci->frame_number + 1;
1291 frame += ((qh->phase - frame) &
1294 } /* Otherwise pick up where the last URB leaves off */
1295 urb->start_frame = frame;
1298 /* Make sure we won't have to go too far into the future */
1299 if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES,
1300 urb->start_frame + urb->number_of_packets *
1304 status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
1305 destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
1307 for (i = 0; i < urb->number_of_packets; i++) {
1308 td = uhci_alloc_td(uhci);
1312 uhci_add_td_to_urbp(td, urbp);
1313 uhci_fill_td(td, status, destination |
1314 uhci_explen(urb->iso_frame_desc[i].length),
1316 urb->iso_frame_desc[i].offset);
1319 /* Set the interrupt-on-completion flag on the last packet. */
1320 td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
1322 /* Add the TDs to the frame list */
1323 frame = urb->start_frame;
1324 list_for_each_entry(td, &urbp->td_list, list) {
1325 uhci_insert_td_in_frame_list(uhci, td, frame);
1326 frame += qh->period;
1329 if (list_empty(&qh->queue)) {
1330 qh->iso_packet_desc = &urb->iso_frame_desc[0];
1331 qh->iso_frame = urb->start_frame;
1334 qh->skel = SKEL_ISO;
1335 if (!qh->bandwidth_reserved)
1336 uhci_reserve_bandwidth(uhci, qh);
1340 static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
1342 struct uhci_td *td, *tmp;
1343 struct urb_priv *urbp = urb->hcpriv;
1344 struct uhci_qh *qh = urbp->qh;
1346 list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
1347 unsigned int ctrlstat;
1351 if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame))
1352 return -EINPROGRESS;
1354 uhci_remove_tds_from_frame(uhci, qh->iso_frame);
1356 ctrlstat = td_status(td);
1357 if (ctrlstat & TD_CTRL_ACTIVE) {
1358 status = -EXDEV; /* TD was added too late? */
1360 status = uhci_map_status(uhci_status_bits(ctrlstat),
1361 usb_pipeout(urb->pipe));
1362 actlength = uhci_actual_length(ctrlstat);
1364 urb->actual_length += actlength;
1365 qh->iso_packet_desc->actual_length = actlength;
1366 qh->iso_packet_desc->status = status;
1371 uhci_remove_td_from_urbp(td);
1372 uhci_free_td(uhci, td);
1373 qh->iso_frame += qh->period;
1374 ++qh->iso_packet_desc;
1379 static int uhci_urb_enqueue(struct usb_hcd *hcd,
1380 struct urb *urb, gfp_t mem_flags)
1383 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1384 unsigned long flags;
1385 struct urb_priv *urbp;
1388 spin_lock_irqsave(&uhci->lock, flags);
1390 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1392 goto done_not_linked;
1395 urbp = uhci_alloc_urb_priv(uhci, urb);
1399 if (urb->ep->hcpriv)
1400 qh = urb->ep->hcpriv;
1402 qh = uhci_alloc_qh(uhci, urb->dev, urb->ep);
1409 case USB_ENDPOINT_XFER_CONTROL:
1410 ret = uhci_submit_control(uhci, urb, qh);
1412 case USB_ENDPOINT_XFER_BULK:
1413 ret = uhci_submit_bulk(uhci, urb, qh);
1415 case USB_ENDPOINT_XFER_INT:
1416 ret = uhci_submit_interrupt(uhci, urb, qh);
1418 case USB_ENDPOINT_XFER_ISOC:
1419 urb->error_count = 0;
1420 ret = uhci_submit_isochronous(uhci, urb, qh);
1424 goto err_submit_failed;
1426 /* Add this URB to the QH */
1428 list_add_tail(&urbp->node, &qh->queue);
1430 /* If the new URB is the first and only one on this QH then either
1431 * the QH is new and idle or else it's unlinked and waiting to
1432 * become idle, so we can activate it right away. But only if the
1433 * queue isn't stopped. */
1434 if (qh->queue.next == &urbp->node && !qh->is_stopped) {
1435 uhci_activate_qh(uhci, qh);
1436 uhci_urbp_wants_fsbr(uhci, urbp);
1441 if (qh->state == QH_STATE_IDLE)
1442 uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
1444 uhci_free_urb_priv(uhci, urbp);
1447 usb_hcd_unlink_urb_from_ep(hcd, urb);
1449 spin_unlock_irqrestore(&uhci->lock, flags);
1453 static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1455 struct uhci_hcd *uhci = hcd_to_uhci(hcd);
1456 unsigned long flags;
1460 spin_lock_irqsave(&uhci->lock, flags);
1461 rc = usb_hcd_check_unlink_urb(hcd, urb, status);
1465 qh = ((struct urb_priv *) urb->hcpriv)->qh;
1467 /* Remove Isochronous TDs from the frame list ASAP */
1468 if (qh->type == USB_ENDPOINT_XFER_ISOC) {
1469 uhci_unlink_isochronous_tds(uhci, urb);
1472 /* If the URB has already started, update the QH unlink time */
1473 uhci_get_current_frame_number(uhci);
1474 if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
1475 qh->unlink_frame = uhci->frame_number;
1478 uhci_unlink_qh(uhci, qh);
1481 spin_unlock_irqrestore(&uhci->lock, flags);
1486 * Finish unlinking an URB and give it back
1488 static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
1489 struct urb *urb, int status)
1490 __releases(uhci->lock)
1491 __acquires(uhci->lock)
1493 struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
1495 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1497 /* urb->actual_length < 0 means the setup transaction didn't
1498 * complete successfully. Either it failed or the URB was
1499 * unlinked first. Regardless, don't confuse people with a
1500 * negative length. */
1501 urb->actual_length = max(urb->actual_length, 0);
1504 /* When giving back the first URB in an Isochronous queue,
1505 * reinitialize the QH's iso-related members for the next URB. */
1506 else if (qh->type == USB_ENDPOINT_XFER_ISOC &&
1507 urbp->node.prev == &qh->queue &&
1508 urbp->node.next != &qh->queue) {
1509 struct urb *nurb = list_entry(urbp->node.next,
1510 struct urb_priv, node)->urb;
1512 qh->iso_packet_desc = &nurb->iso_frame_desc[0];
1513 qh->iso_frame = nurb->start_frame;
1516 /* Take the URB off the QH's queue. If the queue is now empty,
1517 * this is a perfect time for a toggle fixup. */
1518 list_del_init(&urbp->node);
1519 if (list_empty(&qh->queue) && qh->needs_fixup) {
1520 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
1521 usb_pipeout(urb->pipe), qh->initial_toggle);
1522 qh->needs_fixup = 0;
1525 uhci_free_urb_priv(uhci, urbp);
1526 usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb);
1528 spin_unlock(&uhci->lock);
1529 usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status);
1530 spin_lock(&uhci->lock);
1532 /* If the queue is now empty, we can unlink the QH and give up its
1533 * reserved bandwidth. */
1534 if (list_empty(&qh->queue)) {
1535 uhci_unlink_qh(uhci, qh);
1536 if (qh->bandwidth_reserved)
1537 uhci_release_bandwidth(uhci, qh);
1542 * Scan the URBs in a QH's queue
1544 #define QH_FINISHED_UNLINKING(qh) \
1545 (qh->state == QH_STATE_UNLINKING && \
1546 uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
1548 static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
1550 struct urb_priv *urbp;
1554 while (!list_empty(&qh->queue)) {
1555 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1558 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1559 status = uhci_result_isochronous(uhci, urb);
1561 status = uhci_result_common(uhci, urb);
1562 if (status == -EINPROGRESS)
1565 /* Dequeued but completed URBs can't be given back unless
1566 * the QH is stopped or has finished unlinking. */
1567 if (urb->unlinked) {
1568 if (QH_FINISHED_UNLINKING(qh))
1570 else if (!qh->is_stopped)
1574 uhci_giveback_urb(uhci, qh, urb, status);
1579 /* If the QH is neither stopped nor finished unlinking (normal case),
1580 * our work here is done. */
1581 if (QH_FINISHED_UNLINKING(qh))
1583 else if (!qh->is_stopped)
1586 /* Otherwise give back each of the dequeued URBs */
1588 list_for_each_entry(urbp, &qh->queue, node) {
1590 if (urb->unlinked) {
1592 /* Fix up the TD links and save the toggles for
1593 * non-Isochronous queues. For Isochronous queues,
1594 * test for too-recent dequeues. */
1595 if (!uhci_cleanup_queue(uhci, qh, urb)) {
1599 uhci_giveback_urb(uhci, qh, urb, 0);
1605 /* There are no more dequeued URBs. If there are still URBs on the
1606 * queue, the QH can now be re-activated. */
1607 if (!list_empty(&qh->queue)) {
1608 if (qh->needs_fixup)
1609 uhci_fixup_toggles(qh, 0);
1611 /* If the first URB on the queue wants FSBR but its time
1612 * limit has expired, set the next TD to interrupt on
1613 * completion before reactivating the QH. */
1614 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1615 if (urbp->fsbr && qh->wait_expired) {
1616 struct uhci_td *td = list_entry(urbp->td_list.next,
1617 struct uhci_td, list);
1619 td->status |= __cpu_to_le32(TD_CTRL_IOC);
1622 uhci_activate_qh(uhci, qh);
1625 /* The queue is empty. The QH can become idle if it is fully
1627 else if (QH_FINISHED_UNLINKING(qh))
1628 uhci_make_qh_idle(uhci, qh);
1632 * Check for queues that have made some forward progress.
1633 * Returns 0 if the queue is not Isochronous, is ACTIVE, and
1634 * has not advanced since last examined; 1 otherwise.
1636 * Early Intel controllers have a bug which causes qh->element sometimes
1637 * not to advance when a TD completes successfully. The queue remains
1638 * stuck on the inactive completed TD. We detect such cases and advance
1639 * the element pointer by hand.
1641 static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
1643 struct urb_priv *urbp = NULL;
1648 if (qh->type == USB_ENDPOINT_XFER_ISOC)
1651 /* Treat an UNLINKING queue as though it hasn't advanced.
1652 * This is okay because reactivation will treat it as though
1653 * it has advanced, and if it is going to become IDLE then
1654 * this doesn't matter anyway. Furthermore it's possible
1655 * for an UNLINKING queue not to have any URBs at all, or
1656 * for its first URB not to have any TDs (if it was dequeued
1657 * just as it completed). So it's not easy in any case to
1658 * test whether such queues have advanced. */
1659 if (qh->state != QH_STATE_ACTIVE) {
1664 urbp = list_entry(qh->queue.next, struct urb_priv, node);
1665 td = list_entry(urbp->td_list.next, struct uhci_td, list);
1666 status = td_status(td);
1667 if (!(status & TD_CTRL_ACTIVE)) {
1669 /* We're okay, the queue has advanced */
1670 qh->wait_expired = 0;
1671 qh->advance_jiffies = jiffies;
1677 /* The queue hasn't advanced; check for timeout */
1678 if (qh->wait_expired)
1681 if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
1683 /* Detect the Intel bug and work around it */
1684 if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) {
1685 qh->element = qh->post_td->link;
1686 qh->advance_jiffies = jiffies;
1691 qh->wait_expired = 1;
1693 /* If the current URB wants FSBR, unlink it temporarily
1694 * so that we can safely set the next TD to interrupt on
1695 * completion. That way we'll know as soon as the queue
1696 * starts moving again. */
1697 if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
1698 uhci_unlink_qh(uhci, qh);
1701 /* Unmoving but not-yet-expired queues keep FSBR alive */
1703 uhci_urbp_wants_fsbr(uhci, urbp);
1711 * Process events in the schedule, but only in one thread at a time
1713 static void uhci_scan_schedule(struct uhci_hcd *uhci)
1718 /* Don't allow re-entrant calls */
1719 if (uhci->scan_in_progress) {
1720 uhci->need_rescan = 1;
1723 uhci->scan_in_progress = 1;
1725 uhci->need_rescan = 0;
1726 uhci->fsbr_is_wanted = 0;
1728 uhci_clear_next_interrupt(uhci);
1729 uhci_get_current_frame_number(uhci);
1730 uhci->cur_iso_frame = uhci->frame_number;
1732 /* Go through all the QH queues and process the URBs in each one */
1733 for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
1734 uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
1735 struct uhci_qh, node);
1736 while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
1737 uhci->next_qh = list_entry(qh->node.next,
1738 struct uhci_qh, node);
1740 if (uhci_advance_check(uhci, qh)) {
1741 uhci_scan_qh(uhci, qh);
1742 if (qh->state == QH_STATE_ACTIVE) {
1743 uhci_urbp_wants_fsbr(uhci,
1744 list_entry(qh->queue.next, struct urb_priv, node));
1750 uhci->last_iso_frame = uhci->cur_iso_frame;
1751 if (uhci->need_rescan)
1753 uhci->scan_in_progress = 0;
1755 if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted &&
1756 !uhci->fsbr_expiring) {
1757 uhci->fsbr_expiring = 1;
1758 mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY);
1761 if (list_empty(&uhci->skel_unlink_qh->node))
1762 uhci_clear_next_interrupt(uhci);
1764 uhci_set_next_interrupt(uhci);