2  * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
 
   4  * Copyright (C) 2005-2007 AMD (http://www.amd.com)
 
   5  * Author: Thomas Dahlmann
 
   7  * This program is free software; you can redistribute it and/or modify
 
   8  * it under the terms of the GNU General Public License as published by
 
   9  * the Free Software Foundation; either version 2 of the License, or
 
  10  * (at your option) any later version.
 
  12  * This program is distributed in the hope that it will be useful,
 
  13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 
  14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
  15  * GNU General Public License for more details.
 
  17  * You should have received a copy of the GNU General Public License
 
  18  * along with this program; if not, write to the Free Software
 
  19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
  23  * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
 
  24  * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
 
  25  * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
 
  27  * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
 
  28  * be used as host port) and UOC bits PAD_EN and APU are set (should be done
 
  31  * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
 
  32  * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
 
  33  * can be used with gadget ether.
 
  37 /* #define UDC_VERBOSE */
 
  40 #define UDC_MOD_DESCRIPTION             "AMD 5536 UDC - USB Device Controller"
 
  41 #define UDC_DRIVER_VERSION_STRING       "01.00.0206 - $Revision: #3 $"
 
  44 #include <linux/module.h>
 
  45 #include <linux/pci.h>
 
  46 #include <linux/kernel.h>
 
  47 #include <linux/version.h>
 
  48 #include <linux/delay.h>
 
  49 #include <linux/ioport.h>
 
  50 #include <linux/sched.h>
 
  51 #include <linux/slab.h>
 
  52 #include <linux/smp_lock.h>
 
  53 #include <linux/errno.h>
 
  54 #include <linux/init.h>
 
  55 #include <linux/timer.h>
 
  56 #include <linux/list.h>
 
  57 #include <linux/interrupt.h>
 
  58 #include <linux/ioctl.h>
 
  60 #include <linux/dmapool.h>
 
  61 #include <linux/moduleparam.h>
 
  62 #include <linux/device.h>
 
  64 #include <linux/irq.h>
 
  66 #include <asm/byteorder.h>
 
  67 #include <asm/system.h>
 
  68 #include <asm/unaligned.h>
 
  71 #include <linux/usb/ch9.h>
 
  72 #include <linux/usb/gadget.h>
 
  75 #include "amd5536udc.h"
 
  78 static void udc_tasklet_disconnect(unsigned long);
 
  79 static void empty_req_queue(struct udc_ep *);
 
  80 static int udc_probe(struct udc *dev);
 
  81 static void udc_basic_init(struct udc *dev);
 
  82 static void udc_setup_endpoints(struct udc *dev);
 
  83 static void udc_soft_reset(struct udc *dev);
 
  84 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
 
  85 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
 
  86 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
 
  87 static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
 
  88                                 unsigned long buf_len, gfp_t gfp_flags);
 
  89 static int udc_remote_wakeup(struct udc *dev);
 
  90 static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
 
  91 static void udc_pci_remove(struct pci_dev *pdev);
 
  94 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
 
  95 static const char name[] = "amd5536udc";
 
  97 /* structure to hold endpoint function pointers */
 
  98 static const struct usb_ep_ops udc_ep_ops;
 
 100 /* received setup data */
 
 101 static union udc_setup_data setup_data;
 
 103 /* pointer to device object */
 
 104 static struct udc *udc;
 
 106 /* irq spin lock for soft reset */
 
 107 static DEFINE_SPINLOCK(udc_irq_spinlock);
 
 108 /* stall spin lock */
 
 109 static DEFINE_SPINLOCK(udc_stall_spinlock);
 
 112 * slave mode: pending bytes in rx fifo after nyet,
 
 113 * used if EPIN irq came but no req was available
 
 115 static unsigned int udc_rxfifo_pending;
 
 117 /* count soft resets after suspend to avoid loop */
 
 118 static int soft_reset_occured;
 
 119 static int soft_reset_after_usbreset_occured;
 
 122 static struct timer_list udc_timer;
 
 123 static int stop_timer;
 
 125 /* set_rde -- Is used to control enabling of RX DMA. Problem is
 
 126  * that UDC has only one bit (RDE) to enable/disable RX DMA for
 
 127  * all OUT endpoints. So we have to handle race conditions like
 
 128  * when OUT data reaches the fifo but no request was queued yet.
 
 129  * This cannot be solved by letting the RX DMA disabled until a
 
 130  * request gets queued because there may be other OUT packets
 
 131  * in the FIFO (important for not blocking control traffic).
 
 132  * The value of set_rde controls the correspondig timer.
 
 134  * set_rde -1 == not used, means it is alloed to be set to 0 or 1
 
 135  * set_rde  0 == do not touch RDE, do no start the RDE timer
 
 136  * set_rde  1 == timer function will look whether FIFO has data
 
 137  * set_rde  2 == set by timer function to enable RX DMA on next call
 
 139 static int set_rde = -1;
 
 141 static DECLARE_COMPLETION(on_exit);
 
 142 static struct timer_list udc_pollstall_timer;
 
 143 static int stop_pollstall_timer;
 
 144 static DECLARE_COMPLETION(on_pollstall_exit);
 
 146 /* tasklet for usb disconnect */
 
 147 static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
 
 148                 (unsigned long) &udc);
 
 151 /* endpoint names used for print */
 
 152 static const char ep0_string[] = "ep0in";
 
 153 static const char *ep_string[] = {
 
 155         "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
 
 156         "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
 
 157         "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
 
 158         "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
 
 159         "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
 
 160         "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
 
 161         "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
 
 165 static int use_dma = 1;
 
 166 /* packet per buffer dma */
 
 167 static int use_dma_ppb = 1;
 
 168 /* with per descr. update */
 
 169 static int use_dma_ppb_du;
 
 170 /* buffer fill mode */
 
 171 static int use_dma_bufferfill_mode;
 
 172 /* full speed only mode */
 
 173 static int use_fullspeed;
 
 174 /* tx buffer size for high speed */
 
 175 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
 
 177 /* module parameters */
 
 178 module_param(use_dma, bool, S_IRUGO);
 
 179 MODULE_PARM_DESC(use_dma, "true for DMA");
 
 180 module_param(use_dma_ppb, bool, S_IRUGO);
 
 181 MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
 
 182 module_param(use_dma_ppb_du, bool, S_IRUGO);
 
 183 MODULE_PARM_DESC(use_dma_ppb_du,
 
 184         "true for DMA in packet per buffer mode with descriptor update");
 
 185 module_param(use_fullspeed, bool, S_IRUGO);
 
 186 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
 
 188 /*---------------------------------------------------------------------------*/
 
 189 /* Prints UDC device registers and endpoint irq registers */
 
 190 static void print_regs(struct udc *dev)
 
 192         DBG(dev, "------- Device registers -------\n");
 
 193         DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
 
 194         DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
 
 195         DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
 
 197         DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
 
 198         DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
 
 200         DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
 
 201         DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
 
 203         DBG(dev, "USE DMA        = %d\n", use_dma);
 
 204         if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
 
 205                 DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
 
 206                         "WITHOUT desc. update)\n");
 
 207                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
 
 208         } else if (use_dma && use_dma_ppb_du && use_dma_ppb_du) {
 
 209                 DBG(dev, "DMA mode       = PPBDU (packet per buffer "
 
 210                         "WITH desc. update)\n");
 
 211                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
 
 213         if (use_dma && use_dma_bufferfill_mode) {
 
 214                 DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
 
 215                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
 
 218                 dev_info(&dev->pdev->dev, "FIFO mode\n");
 
 220         DBG(dev, "-------------------------------------------------------\n");
 
 223 /* Masks unused interrupts */
 
 224 static int udc_mask_unused_interrupts(struct udc *dev)
 
 228         /* mask all dev interrupts */
 
 229         tmp =   AMD_BIT(UDC_DEVINT_SVC) |
 
 230                 AMD_BIT(UDC_DEVINT_ENUM) |
 
 231                 AMD_BIT(UDC_DEVINT_US) |
 
 232                 AMD_BIT(UDC_DEVINT_UR) |
 
 233                 AMD_BIT(UDC_DEVINT_ES) |
 
 234                 AMD_BIT(UDC_DEVINT_SI) |
 
 235                 AMD_BIT(UDC_DEVINT_SOF)|
 
 236                 AMD_BIT(UDC_DEVINT_SC);
 
 237         writel(tmp, &dev->regs->irqmsk);
 
 239         /* mask all ep interrupts */
 
 240         writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
 
 245 /* Enables endpoint 0 interrupts */
 
 246 static int udc_enable_ep0_interrupts(struct udc *dev)
 
 250         DBG(dev, "udc_enable_ep0_interrupts()\n");
 
 253         tmp = readl(&dev->regs->ep_irqmsk);
 
 254         /* enable ep0 irq's */
 
 255         tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
 
 256                 & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
 
 257         writel(tmp, &dev->regs->ep_irqmsk);
 
 262 /* Enables device interrupts for SET_INTF and SET_CONFIG */
 
 263 static int udc_enable_dev_setup_interrupts(struct udc *dev)
 
 267         DBG(dev, "enable device interrupts for setup data\n");
 
 270         tmp = readl(&dev->regs->irqmsk);
 
 272         /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
 
 273         tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
 
 274                 & AMD_UNMASK_BIT(UDC_DEVINT_SC)
 
 275                 & AMD_UNMASK_BIT(UDC_DEVINT_UR)
 
 276                 & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
 
 277                 & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
 
 278         writel(tmp, &dev->regs->irqmsk);
 
 283 /* Calculates fifo start of endpoint based on preceeding endpoints */
 
 284 static int udc_set_txfifo_addr(struct udc_ep *ep)
 
 290         if (!ep || !(ep->in))
 
 294         ep->txfifo = dev->txfifo;
 
 297         for (i = 0; i < ep->num; i++) {
 
 298                 if (dev->ep[i].regs) {
 
 300                         tmp = readl(&dev->ep[i].regs->bufin_framenum);
 
 301                         tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
 
 308 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
 
 309 static u32 cnak_pending;
 
 311 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
 
 313         if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
 
 314                 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
 
 315                 cnak_pending |= 1 << (num);
 
 318                 cnak_pending = cnak_pending & (~(1 << (num)));
 
 322 /* Enables endpoint, is called by gadget driver */
 
 324 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
 
 329         unsigned long           iflags;
 
 333                         || usbep->name == ep0_string
 
 335                         || desc->bDescriptorType != USB_DT_ENDPOINT)
 
 338         ep = container_of(usbep, struct udc_ep, ep);
 
 341         DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
 
 343         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 
 346         spin_lock_irqsave(&dev->lock, iflags);
 
 351         /* set traffic type */
 
 352         tmp = readl(&dev->ep[ep->num].regs->ctl);
 
 353         tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
 
 354         writel(tmp, &dev->ep[ep->num].regs->ctl);
 
 356         /* set max packet size */
 
 357         tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
 
 358         tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_EP_MAX_PKT_SIZE);
 
 359         ep->ep.maxpacket = desc->wMaxPacketSize;
 
 360         writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
 
 365                 /* ep ix in UDC CSR register space */
 
 366                 udc_csr_epix = ep->num;
 
 368                 /* set buffer size (tx fifo entries) */
 
 369                 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
 
 370                 /* double buffering: fifo size = 2 x max packet size */
 
 373                                 desc->wMaxPacketSize * UDC_EPIN_BUFF_SIZE_MULT
 
 376                 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
 
 378                 /* calc. tx fifo base addr */
 
 379                 udc_set_txfifo_addr(ep);
 
 382                 tmp = readl(&ep->regs->ctl);
 
 383                 tmp |= AMD_BIT(UDC_EPCTL_F);
 
 384                 writel(tmp, &ep->regs->ctl);
 
 388                 /* ep ix in UDC CSR register space */
 
 389                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 
 391                 /* set max packet size UDC CSR  */
 
 392                 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
 
 393                 tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize,
 
 395                 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
 
 397                 if (use_dma && !ep->in) {
 
 398                         /* alloc and init BNA dummy request */
 
 399                         ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
 
 400                         ep->bna_occurred = 0;
 
 403                 if (ep->num != UDC_EP0OUT_IX)
 
 404                         dev->data_ep_enabled = 1;
 
 408         tmp = readl(&dev->csr->ne[udc_csr_epix]);
 
 410         tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_CSR_NE_MAX_PKT);
 
 412         tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
 
 414         tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
 
 416         tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
 
 418         tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
 
 420         tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
 
 422         tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
 
 424         writel(tmp, &dev->csr->ne[udc_csr_epix]);
 
 427         tmp = readl(&dev->regs->ep_irqmsk);
 
 428         tmp &= AMD_UNMASK_BIT(ep->num);
 
 429         writel(tmp, &dev->regs->ep_irqmsk);
 
 432          * clear NAK by writing CNAK
 
 433          * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
 
 435         if (!use_dma || ep->in) {
 
 436                 tmp = readl(&ep->regs->ctl);
 
 437                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
 438                 writel(tmp, &ep->regs->ctl);
 
 440                 UDC_QUEUE_CNAK(ep, ep->num);
 
 442         tmp = desc->bEndpointAddress;
 
 443         DBG(dev, "%s enabled\n", usbep->name);
 
 445         spin_unlock_irqrestore(&dev->lock, iflags);
 
 449 /* Resets endpoint */
 
 450 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
 
 454         VDBG(ep->dev, "ep-%d reset\n", ep->num);
 
 456         ep->ep.ops = &udc_ep_ops;
 
 457         INIT_LIST_HEAD(&ep->queue);
 
 459         ep->ep.maxpacket = (u16) ~0;
 
 461         tmp = readl(&ep->regs->ctl);
 
 462         tmp |= AMD_BIT(UDC_EPCTL_SNAK);
 
 463         writel(tmp, &ep->regs->ctl);
 
 466         /* disable interrupt */
 
 467         tmp = readl(®s->ep_irqmsk);
 
 468         tmp |= AMD_BIT(ep->num);
 
 469         writel(tmp, ®s->ep_irqmsk);
 
 472                 /* unset P and IN bit of potential former DMA */
 
 473                 tmp = readl(&ep->regs->ctl);
 
 474                 tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
 
 475                 writel(tmp, &ep->regs->ctl);
 
 477                 tmp = readl(&ep->regs->sts);
 
 478                 tmp |= AMD_BIT(UDC_EPSTS_IN);
 
 479                 writel(tmp, &ep->regs->sts);
 
 482                 tmp = readl(&ep->regs->ctl);
 
 483                 tmp |= AMD_BIT(UDC_EPCTL_F);
 
 484                 writel(tmp, &ep->regs->ctl);
 
 487         /* reset desc pointer */
 
 488         writel(0, &ep->regs->desptr);
 
 491 /* Disables endpoint, is called by gadget driver */
 
 492 static int udc_ep_disable(struct usb_ep *usbep)
 
 494         struct udc_ep   *ep = NULL;
 
 495         unsigned long   iflags;
 
 500         ep = container_of(usbep, struct udc_ep, ep);
 
 501         if (usbep->name == ep0_string || !ep->desc)
 
 504         DBG(ep->dev, "Disable ep-%d\n", ep->num);
 
 506         spin_lock_irqsave(&ep->dev->lock, iflags);
 
 507         udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
 
 509         ep_init(ep->dev->regs, ep);
 
 510         spin_unlock_irqrestore(&ep->dev->lock, iflags);
 
 515 /* Allocates request packet, called by gadget driver */
 
 516 static struct usb_request *
 
 517 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
 
 519         struct udc_request      *req;
 
 520         struct udc_data_dma     *dma_desc;
 
 526         ep = container_of(usbep, struct udc_ep, ep);
 
 528         VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
 
 529         req = kzalloc(sizeof(struct udc_request), gfp);
 
 533         req->req.dma = DMA_DONT_USE;
 
 534         INIT_LIST_HEAD(&req->queue);
 
 537                 /* ep0 in requests are allocated from data pool here */
 
 538                 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
 
 545                 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
 
 548                                 (unsigned long)req->td_phys);
 
 549                 /* prevent from using desc. - set HOST BUSY */
 
 550                 dma_desc->status = AMD_ADDBITS(dma_desc->status,
 
 551                                                 UDC_DMA_STP_STS_BS_HOST_BUSY,
 
 553                 dma_desc->bufptr = __constant_cpu_to_le32(DMA_DONT_USE);
 
 554                 req->td_data = dma_desc;
 
 555                 req->td_data_last = NULL;
 
 562 /* Frees request packet, called by gadget driver */
 
 564 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
 
 567         struct udc_request      *req;
 
 569         if (!usbep || !usbreq)
 
 572         ep = container_of(usbep, struct udc_ep, ep);
 
 573         req = container_of(usbreq, struct udc_request, req);
 
 574         VDBG(ep->dev, "free_req req=%p\n", req);
 
 575         BUG_ON(!list_empty(&req->queue));
 
 577                 VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
 
 579                 /* free dma chain if created */
 
 580                 if (req->chain_len > 1) {
 
 581                         udc_free_dma_chain(ep->dev, req);
 
 584                 pci_pool_free(ep->dev->data_requests, req->td_data,
 
 590 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
 
 591 static void udc_init_bna_dummy(struct udc_request *req)
 
 595                 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 
 596                 /* set next pointer to itself */
 
 597                 req->td_data->next = req->td_phys;
 
 600                         = AMD_ADDBITS(req->td_data->status,
 
 601                                         UDC_DMA_STP_STS_BS_DMA_DONE,
 
 604                 pr_debug("bna desc = %p, sts = %08x\n",
 
 605                         req->td_data, req->td_data->status);
 
 610 /* Allocate BNA dummy descriptor */
 
 611 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
 
 613         struct udc_request *req = NULL;
 
 614         struct usb_request *_req = NULL;
 
 616         /* alloc the dummy request */
 
 617         _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
 
 619                 req = container_of(_req, struct udc_request, req);
 
 620                 ep->bna_dummy_req = req;
 
 621                 udc_init_bna_dummy(req);
 
 626 /* Write data to TX fifo for IN packets */
 
 628 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
 
 634         unsigned                remaining = 0;
 
 639         req_buf = req->buf + req->actual;
 
 641         remaining = req->length - req->actual;
 
 643         buf = (u32 *) req_buf;
 
 645         bytes = ep->ep.maxpacket;
 
 646         if (bytes > remaining)
 
 650         for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
 
 651                 writel(*(buf + i), ep->txfifo);
 
 654         /* remaining bytes must be written by byte access */
 
 655         for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
 
 656                 writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
 
 660         /* dummy write confirm */
 
 661         writel(0, &ep->regs->confirm);
 
 664 /* Read dwords from RX fifo for OUT transfers */
 
 665 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
 
 669         VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
 
 671         for (i = 0; i < dwords; i++) {
 
 672                 *(buf + i) = readl(dev->rxfifo);
 
 677 /* Read bytes from RX fifo for OUT transfers */
 
 678 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
 
 683         VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
 
 686         for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
 
 687                 *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
 
 690         /* remaining bytes must be read by byte access */
 
 691         if (bytes % UDC_DWORD_BYTES) {
 
 692                 tmp = readl(dev->rxfifo);
 
 693                 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
 
 694                         *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
 
 695                         tmp = tmp >> UDC_BITS_PER_BYTE;
 
 702 /* Read data from RX fifo for OUT transfers */
 
 704 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
 
 709         unsigned finished = 0;
 
 711         /* received number bytes */
 
 712         bytes = readl(&ep->regs->sts);
 
 713         bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
 
 715         buf_space = req->req.length - req->req.actual;
 
 716         buf = req->req.buf + req->req.actual;
 
 717         if (bytes > buf_space) {
 
 718                 if ((buf_space % ep->ep.maxpacket) != 0) {
 
 720                                 "%s: rx %d bytes, rx-buf space = %d bytesn\n",
 
 721                                 ep->ep.name, bytes, buf_space);
 
 722                         req->req.status = -EOVERFLOW;
 
 726         req->req.actual += bytes;
 
 729         if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
 
 730                 || ((req->req.actual == req->req.length) && !req->req.zero))
 
 733         /* read rx fifo bytes */
 
 734         VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
 
 735         udc_rxfifo_read_bytes(ep->dev, buf, bytes);
 
 740 /* create/re-init a DMA descriptor or a DMA descriptor chain */
 
 741 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
 
 746         VDBG(ep->dev, "prep_dma\n");
 
 747         VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
 
 748                         ep->num, req->td_data);
 
 750         /* set buffer pointer */
 
 751         req->td_data->bufptr = req->req.dma;
 
 754         req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 
 756         /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
 
 759                 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
 
 761                         if (retval == -ENOMEM)
 
 762                                 DBG(ep->dev, "Out of DMA memory\n");
 
 766                         if (req->req.length == ep->ep.maxpacket) {
 
 768                                 req->td_data->status =
 
 769                                         AMD_ADDBITS(req->td_data->status,
 
 771                                                 UDC_DMA_IN_STS_TXBYTES);
 
 779                 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
 
 780                                 "maxpacket=%d ep%d\n",
 
 781                                 use_dma_ppb, req->req.length,
 
 782                                 ep->ep.maxpacket, ep->num);
 
 784                  * if bytes < max packet then tx bytes must
 
 785                  * be written in packet per buffer mode
 
 787                 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
 
 788                                 || ep->num == UDC_EP0OUT_IX
 
 789                                 || ep->num == UDC_EP0IN_IX) {
 
 791                         req->td_data->status =
 
 792                                 AMD_ADDBITS(req->td_data->status,
 
 794                                                 UDC_DMA_IN_STS_TXBYTES);
 
 795                         /* reset frame num */
 
 796                         req->td_data->status =
 
 797                                 AMD_ADDBITS(req->td_data->status,
 
 799                                                 UDC_DMA_IN_STS_FRAMENUM);
 
 802                 req->td_data->status =
 
 803                         AMD_ADDBITS(req->td_data->status,
 
 804                                 UDC_DMA_STP_STS_BS_HOST_BUSY,
 
 807                 VDBG(ep->dev, "OUT set host ready\n");
 
 809                 req->td_data->status =
 
 810                         AMD_ADDBITS(req->td_data->status,
 
 811                                 UDC_DMA_STP_STS_BS_HOST_READY,
 
 815                         /* clear NAK by writing CNAK */
 
 817                                 tmp = readl(&ep->regs->ctl);
 
 818                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
 819                                 writel(tmp, &ep->regs->ctl);
 
 821                                 UDC_QUEUE_CNAK(ep, ep->num);
 
 829 /* Completes request packet ... caller MUST hold lock */
 
 831 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
 
 832 __releases(ep->dev->lock)
 
 833 __acquires(ep->dev->lock)
 
 838         VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
 
 842         if (req->dma_mapping) {
 
 844                         pci_unmap_single(dev->pdev,
 
 849                         pci_unmap_single(dev->pdev,
 
 853                 req->dma_mapping = 0;
 
 854                 req->req.dma = DMA_DONT_USE;
 
 860         /* set new status if pending */
 
 861         if (req->req.status == -EINPROGRESS)
 
 862                 req->req.status = sts;
 
 864         /* remove from ep queue */
 
 865         list_del_init(&req->queue);
 
 867         VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
 
 868                 &req->req, req->req.length, ep->ep.name, sts);
 
 870         spin_unlock(&dev->lock);
 
 871         req->req.complete(&ep->ep, &req->req);
 
 872         spin_lock(&dev->lock);
 
 876 /* frees pci pool descriptors of a DMA chain */
 
 877 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
 
 881         struct udc_data_dma     *td;
 
 882         struct udc_data_dma     *td_last = NULL;
 
 885         DBG(dev, "free chain req = %p\n", req);
 
 887         /* do not free first desc., will be done by free for request */
 
 888         td_last = req->td_data;
 
 889         td = phys_to_virt(td_last->next);
 
 891         for (i = 1; i < req->chain_len; i++) {
 
 893                 pci_pool_free(dev->data_requests, td,
 
 894                                 (dma_addr_t) td_last->next);
 
 896                 td = phys_to_virt(td_last->next);
 
 902 /* Iterates to the end of a DMA chain and returns last descriptor */
 
 903 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
 
 905         struct udc_data_dma     *td;
 
 908         while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
 
 909                 td = phys_to_virt(td->next);
 
 916 /* Iterates to the end of a DMA chain and counts bytes received */
 
 917 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
 
 919         struct udc_data_dma     *td;
 
 923         /* received number bytes */
 
 924         count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
 
 926         while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
 
 927                 td = phys_to_virt(td->next);
 
 928                 /* received number bytes */
 
 930                         count += AMD_GETBITS(td->status,
 
 931                                 UDC_DMA_OUT_STS_RXBYTES);
 
 939 /* Creates or re-inits a DMA chain */
 
 940 static int udc_create_dma_chain(
 
 942         struct udc_request *req,
 
 943         unsigned long buf_len, gfp_t gfp_flags
 
 946         unsigned long bytes = req->req.length;
 
 949         struct udc_data_dma     *td = NULL;
 
 950         struct udc_data_dma     *last = NULL;
 
 951         unsigned long txbytes;
 
 952         unsigned create_new_chain = 0;
 
 955         VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
 
 957         dma_addr = DMA_DONT_USE;
 
 959         /* unset L bit in first desc for OUT */
 
 961                 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
 
 964         /* alloc only new desc's if not already available */
 
 965         len = req->req.length / ep->ep.maxpacket;
 
 966         if (req->req.length % ep->ep.maxpacket) {
 
 970         if (len > req->chain_len) {
 
 971                 /* shorter chain already allocated before */
 
 972                 if (req->chain_len > 1) {
 
 973                         udc_free_dma_chain(ep->dev, req);
 
 975                 req->chain_len = len;
 
 976                 create_new_chain = 1;
 
 980         /* gen. required number of descriptors and buffers */
 
 981         for (i = buf_len; i < bytes; i += buf_len) {
 
 982                 /* create or determine next desc. */
 
 983                 if (create_new_chain) {
 
 985                         td = pci_pool_alloc(ep->dev->data_requests,
 
 986                                         gfp_flags, &dma_addr);
 
 991                 } else if (i == buf_len) {
 
 993                         td = (struct udc_data_dma *) phys_to_virt(
 
 997                         td = (struct udc_data_dma *) phys_to_virt(last->next);
 
1003                         td->bufptr = req->req.dma + i; /* assign buffer */
 
1007                 /* short packet ? */
 
1008                 if ((bytes - i) >= buf_len) {
 
1012                         txbytes = bytes - i;
 
1015                 /* link td and assign tx bytes */
 
1017                         if (create_new_chain) {
 
1018                                 req->td_data->next = dma_addr;
 
1020                                 /* req->td_data->next = virt_to_phys(td); */
 
1022                         /* write tx bytes */
 
1025                                 req->td_data->status =
 
1026                                         AMD_ADDBITS(req->td_data->status,
 
1028                                                         UDC_DMA_IN_STS_TXBYTES);
 
1030                                 td->status = AMD_ADDBITS(td->status,
 
1032                                                         UDC_DMA_IN_STS_TXBYTES);
 
1035                         if (create_new_chain) {
 
1036                                 last->next = dma_addr;
 
1038                                 /* last->next = virt_to_phys(td); */
 
1041                                 /* write tx bytes */
 
1042                                 td->status = AMD_ADDBITS(td->status,
 
1044                                                         UDC_DMA_IN_STS_TXBYTES);
 
1051                 td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 
1052                 /* last desc. points to itself */
 
1053                 req->td_data_last = td;
 
1059 /* Enabling RX DMA */
 
1060 static void udc_set_rde(struct udc *dev)
 
1064         VDBG(dev, "udc_set_rde()\n");
 
1065         /* stop RDE timer */
 
1066         if (timer_pending(&udc_timer)) {
 
1068                 mod_timer(&udc_timer, jiffies - 1);
 
1071         tmp = readl(&dev->regs->ctl);
 
1072         tmp |= AMD_BIT(UDC_DEVCTL_RDE);
 
1073         writel(tmp, &dev->regs->ctl);
 
1076 /* Queues a request packet, called by gadget driver */
 
1078 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
 
1082         unsigned long           iflags;
 
1084         struct udc_request      *req;
 
1088         /* check the inputs */
 
1089         req = container_of(usbreq, struct udc_request, req);
 
1091         if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
 
1092                         || !list_empty(&req->queue))
 
1095         ep = container_of(usbep, struct udc_ep, ep);
 
1096         if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
 
1099         VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
 
1102         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 
1105         /* map dma (usually done before) */
 
1106         if (ep->dma && usbreq->length != 0
 
1107                         && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) {
 
1108                 VDBG(dev, "DMA map req %p\n", req);
 
1110                         usbreq->dma = pci_map_single(dev->pdev,
 
1115                         usbreq->dma = pci_map_single(dev->pdev,
 
1118                                                 PCI_DMA_FROMDEVICE);
 
1119                 req->dma_mapping = 1;
 
1122         VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
 
1123                         usbep->name, usbreq, usbreq->length,
 
1124                         req->td_data, usbreq->buf);
 
1126         spin_lock_irqsave(&dev->lock, iflags);
 
1128         usbreq->status = -EINPROGRESS;
 
1131         /* on empty queue just do first transfer */
 
1132         if (list_empty(&ep->queue)) {
 
1134                 if (usbreq->length == 0) {
 
1135                         /* IN zlp's are handled by hardware */
 
1136                         complete_req(ep, req, 0);
 
1137                         VDBG(dev, "%s: zlp\n", ep->ep.name);
 
1139                          * if set_config or set_intf is waiting for ack by zlp
 
1142                         if (dev->set_cfg_not_acked) {
 
1143                                 tmp = readl(&dev->regs->ctl);
 
1144                                 tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
 
1145                                 writel(tmp, &dev->regs->ctl);
 
1146                                 dev->set_cfg_not_acked = 0;
 
1148                         /* setup command is ACK'ed now by zlp */
 
1149                         if (dev->waiting_zlp_ack_ep0in) {
 
1150                                 /* clear NAK by writing CNAK in EP0_IN */
 
1151                                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1152                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1153                                 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1154                                 dev->ep[UDC_EP0IN_IX].naking = 0;
 
1155                                 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
 
1157                                 dev->waiting_zlp_ack_ep0in = 0;
 
1162                         retval = prep_dma(ep, req, gfp);
 
1165                         /* write desc pointer to enable DMA */
 
1167                                 /* set HOST READY */
 
1168                                 req->td_data->status =
 
1169                                         AMD_ADDBITS(req->td_data->status,
 
1170                                                 UDC_DMA_IN_STS_BS_HOST_READY,
 
1174                         /* disabled rx dma while descriptor update */
 
1176                                 /* stop RDE timer */
 
1177                                 if (timer_pending(&udc_timer)) {
 
1179                                         mod_timer(&udc_timer, jiffies - 1);
 
1182                                 tmp = readl(&dev->regs->ctl);
 
1183                                 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
 
1184                                 writel(tmp, &dev->regs->ctl);
 
1188                                  * if BNA occurred then let BNA dummy desc.
 
1189                                  * point to current desc.
 
1191                                 if (ep->bna_occurred) {
 
1192                                         VDBG(dev, "copy to BNA dummy desc.\n");
 
1193                                         memcpy(ep->bna_dummy_req->td_data,
 
1195                                                 sizeof(struct udc_data_dma));
 
1198                         /* write desc pointer */
 
1199                         writel(req->td_phys, &ep->regs->desptr);
 
1201                         /* clear NAK by writing CNAK */
 
1203                                 tmp = readl(&ep->regs->ctl);
 
1204                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1205                                 writel(tmp, &ep->regs->ctl);
 
1207                                 UDC_QUEUE_CNAK(ep, ep->num);
 
1212                                 tmp = readl(&dev->regs->ep_irqmsk);
 
1213                                 tmp &= AMD_UNMASK_BIT(ep->num);
 
1214                                 writel(tmp, &dev->regs->ep_irqmsk);
 
1218         } else if (ep->dma) {
 
1221                  * prep_dma not used for OUT ep's, this is not possible
 
1222                  * for PPB modes, because of chain creation reasons
 
1225                         retval = prep_dma(ep, req, gfp);
 
1230         VDBG(dev, "list_add\n");
 
1231         /* add request to ep queue */
 
1234                 list_add_tail(&req->queue, &ep->queue);
 
1236                 /* open rxfifo if out data queued */
 
1241                         if (ep->num != UDC_EP0OUT_IX)
 
1242                                 dev->data_ep_queued = 1;
 
1244                 /* stop OUT naking */
 
1246                         if (!use_dma && udc_rxfifo_pending) {
 
1247                                 DBG(dev, "udc_queue(): pending bytes in"
 
1248                                         "rxfifo after nyet\n");
 
1250                                  * read pending bytes afer nyet:
 
1253                                 if (udc_rxfifo_read(ep, req)) {
 
1255                                         complete_req(ep, req, 0);
 
1257                                 udc_rxfifo_pending = 0;
 
1264         spin_unlock_irqrestore(&dev->lock, iflags);
 
1268 /* Empty request queue of an endpoint; caller holds spinlock */
 
1269 static void empty_req_queue(struct udc_ep *ep)
 
1271         struct udc_request      *req;
 
1274         while (!list_empty(&ep->queue)) {
 
1275                 req = list_entry(ep->queue.next,
 
1278                 complete_req(ep, req, -ESHUTDOWN);
 
1282 /* Dequeues a request packet, called by gadget driver */
 
1283 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
 
1286         struct udc_request      *req;
 
1288         unsigned long           iflags;
 
1290         ep = container_of(usbep, struct udc_ep, ep);
 
1291         if (!usbep || !usbreq || (!ep->desc && (ep->num != 0
 
1292                                 && ep->num != UDC_EP0OUT_IX)))
 
1295         req = container_of(usbreq, struct udc_request, req);
 
1297         spin_lock_irqsave(&ep->dev->lock, iflags);
 
1298         halted = ep->halted;
 
1300         /* request in processing or next one */
 
1301         if (ep->queue.next == &req->queue) {
 
1302                 if (ep->dma && req->dma_going) {
 
1304                                 ep->cancel_transfer = 1;
 
1308                                 /* stop potential receive DMA */
 
1309                                 tmp = readl(&udc->regs->ctl);
 
1310                                 writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
 
1313                                  * Cancel transfer later in ISR
 
1314                                  * if descriptor was touched.
 
1316                                 dma_sts = AMD_GETBITS(req->td_data->status,
 
1317                                                         UDC_DMA_OUT_STS_BS);
 
1318                                 if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
 
1319                                         ep->cancel_transfer = 1;
 
1321                                         udc_init_bna_dummy(ep->req);
 
1322                                         writel(ep->bna_dummy_req->td_phys,
 
1325                                 writel(tmp, &udc->regs->ctl);
 
1329         complete_req(ep, req, -ECONNRESET);
 
1330         ep->halted = halted;
 
1332         spin_unlock_irqrestore(&ep->dev->lock, iflags);
 
1336 /* Halt or clear halt of endpoint */
 
1338 udc_set_halt(struct usb_ep *usbep, int halt)
 
1342         unsigned long iflags;
 
1348         pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
 
1350         ep = container_of(usbep, struct udc_ep, ep);
 
1351         if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
 
1353         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
 
1356         spin_lock_irqsave(&udc_stall_spinlock, iflags);
 
1357         /* halt or clear halt */
 
1360                         ep->dev->stall_ep0in = 1;
 
1364                          * rxfifo empty not taken into acount
 
1366                         tmp = readl(&ep->regs->ctl);
 
1367                         tmp |= AMD_BIT(UDC_EPCTL_S);
 
1368                         writel(tmp, &ep->regs->ctl);
 
1371                         /* setup poll timer */
 
1372                         if (!timer_pending(&udc_pollstall_timer)) {
 
1373                                 udc_pollstall_timer.expires = jiffies +
 
1374                                         HZ * UDC_POLLSTALL_TIMER_USECONDS
 
1376                                 if (!stop_pollstall_timer) {
 
1377                                         DBG(ep->dev, "start polltimer\n");
 
1378                                         add_timer(&udc_pollstall_timer);
 
1383                 /* ep is halted by set_halt() before */
 
1385                         tmp = readl(&ep->regs->ctl);
 
1386                         /* clear stall bit */
 
1387                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 
1388                         /* clear NAK by writing CNAK */
 
1389                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1390                         writel(tmp, &ep->regs->ctl);
 
1392                         UDC_QUEUE_CNAK(ep, ep->num);
 
1395         spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
 
1399 /* gadget interface */
 
1400 static const struct usb_ep_ops udc_ep_ops = {
 
1401         .enable         = udc_ep_enable,
 
1402         .disable        = udc_ep_disable,
 
1404         .alloc_request  = udc_alloc_request,
 
1405         .free_request   = udc_free_request,
 
1408         .dequeue        = udc_dequeue,
 
1410         .set_halt       = udc_set_halt,
 
1411         /* fifo ops not implemented */
 
1414 /*-------------------------------------------------------------------------*/
 
1416 /* Get frame counter (not implemented) */
 
1417 static int udc_get_frame(struct usb_gadget *gadget)
 
1422 /* Remote wakeup gadget interface */
 
1423 static int udc_wakeup(struct usb_gadget *gadget)
 
1429         dev = container_of(gadget, struct udc, gadget);
 
1430         udc_remote_wakeup(dev);
 
1435 /* gadget operations */
 
1436 static const struct usb_gadget_ops udc_ops = {
 
1437         .wakeup         = udc_wakeup,
 
1438         .get_frame      = udc_get_frame,
 
1441 /* Setups endpoint parameters, adds endpoints to linked list */
 
1442 static void make_ep_lists(struct udc *dev)
 
1444         /* make gadget ep lists */
 
1445         INIT_LIST_HEAD(&dev->gadget.ep_list);
 
1446         list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
 
1447                                                 &dev->gadget.ep_list);
 
1448         list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
 
1449                                                 &dev->gadget.ep_list);
 
1450         list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
 
1451                                                 &dev->gadget.ep_list);
 
1454         dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
 
1455         if (dev->gadget.speed == USB_SPEED_FULL)
 
1456                 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
 
1457         else if (dev->gadget.speed == USB_SPEED_HIGH)
 
1458                 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
 
1459         dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
 
1462 /* init registers at driver load time */
 
1463 static int startup_registers(struct udc *dev)
 
1467         /* init controller by soft reset */
 
1468         udc_soft_reset(dev);
 
1470         /* mask not needed interrupts */
 
1471         udc_mask_unused_interrupts(dev);
 
1473         /* put into initial config */
 
1474         udc_basic_init(dev);
 
1475         /* link up all endpoints */
 
1476         udc_setup_endpoints(dev);
 
1479         tmp = readl(&dev->regs->cfg);
 
1480         if (use_fullspeed) {
 
1481                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
 
1483                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
 
1485         writel(tmp, &dev->regs->cfg);
 
1490 /* Inits UDC context */
 
1491 static void udc_basic_init(struct udc *dev)
 
1495         DBG(dev, "udc_basic_init()\n");
 
1497         dev->gadget.speed = USB_SPEED_UNKNOWN;
 
1499         /* stop RDE timer */
 
1500         if (timer_pending(&udc_timer)) {
 
1502                 mod_timer(&udc_timer, jiffies - 1);
 
1504         /* stop poll stall timer */
 
1505         if (timer_pending(&udc_pollstall_timer)) {
 
1506                 mod_timer(&udc_pollstall_timer, jiffies - 1);
 
1509         tmp = readl(&dev->regs->ctl);
 
1510         tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
 
1511         tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
 
1512         writel(tmp, &dev->regs->ctl);
 
1514         /* enable dynamic CSR programming */
 
1515         tmp = readl(&dev->regs->cfg);
 
1516         tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
 
1517         /* set self powered */
 
1518         tmp |= AMD_BIT(UDC_DEVCFG_SP);
 
1519         /* set remote wakeupable */
 
1520         tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
 
1521         writel(tmp, &dev->regs->cfg);
 
1525         dev->data_ep_enabled = 0;
 
1526         dev->data_ep_queued = 0;
 
1529 /* Sets initial endpoint parameters */
 
1530 static void udc_setup_endpoints(struct udc *dev)
 
1536         DBG(dev, "udc_setup_endpoints()\n");
 
1538         /* read enum speed */
 
1539         tmp = readl(&dev->regs->sts);
 
1540         tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
 
1541         if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) {
 
1542                 dev->gadget.speed = USB_SPEED_HIGH;
 
1543         } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) {
 
1544                 dev->gadget.speed = USB_SPEED_FULL;
 
1547         /* set basic ep parameters */
 
1548         for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
 
1551                 ep->ep.name = ep_string[tmp];
 
1553                 /* txfifo size is calculated at enable time */
 
1554                 ep->txfifo = dev->txfifo;
 
1557                 if (tmp < UDC_EPIN_NUM) {
 
1558                         ep->fifo_depth = UDC_TXFIFO_SIZE;
 
1561                         ep->fifo_depth = UDC_RXFIFO_SIZE;
 
1565                 ep->regs = &dev->ep_regs[tmp];
 
1567                  * ep will be reset only if ep was not enabled before to avoid
 
1568                  * disabling ep interrupts when ENUM interrupt occurs but ep is
 
1569                  * not enabled by gadget driver
 
1572                         ep_init(dev->regs, ep);
 
1577                          * ep->dma is not really used, just to indicate that
 
1578                          * DMA is active: remove this
 
1579                          * dma regs = dev control regs
 
1581                         ep->dma = &dev->regs->ctl;
 
1583                         /* nak OUT endpoints until enable - not for ep0 */
 
1584                         if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
 
1585                                                 && tmp > UDC_EPIN_NUM) {
 
1587                                 reg = readl(&dev->ep[tmp].regs->ctl);
 
1588                                 reg |= AMD_BIT(UDC_EPCTL_SNAK);
 
1589                                 writel(reg, &dev->ep[tmp].regs->ctl);
 
1590                                 dev->ep[tmp].naking = 1;
 
1595         /* EP0 max packet */
 
1596         if (dev->gadget.speed == USB_SPEED_FULL) {
 
1597                 dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
 
1598                 dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
 
1599                                                 UDC_FS_EP0OUT_MAX_PKT_SIZE;
 
1600         } else if (dev->gadget.speed == USB_SPEED_HIGH) {
 
1601                 dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
 
1602                 dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
 
1606          * with suspend bug workaround, ep0 params for gadget driver
 
1607          * are set at gadget driver bind() call
 
1609         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
 
1610         dev->ep[UDC_EP0IN_IX].halted = 0;
 
1611         INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
 
1613         /* init cfg/alt/int */
 
1614         dev->cur_config = 0;
 
1619 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
 
1620 static void usb_connect(struct udc *dev)
 
1623         dev_info(&dev->pdev->dev, "USB Connect\n");
 
1627         /* put into initial config */
 
1628         udc_basic_init(dev);
 
1630         /* enable device setup interrupts */
 
1631         udc_enable_dev_setup_interrupts(dev);
 
1635  * Calls gadget with disconnect event and resets the UDC and makes
 
1636  * initial bringup to be ready for ep0 events
 
1638 static void usb_disconnect(struct udc *dev)
 
1641         dev_info(&dev->pdev->dev, "USB Disconnect\n");
 
1645         /* mask interrupts */
 
1646         udc_mask_unused_interrupts(dev);
 
1648         /* REVISIT there doesn't seem to be a point to having this
 
1649          * talk to a tasklet ... do it directly, we already hold
 
1650          * the spinlock needed to process the disconnect.
 
1653         tasklet_schedule(&disconnect_tasklet);
 
1656 /* Tasklet for disconnect to be outside of interrupt context */
 
1657 static void udc_tasklet_disconnect(unsigned long par)
 
1659         struct udc *dev = (struct udc *)(*((struct udc **) par));
 
1662         DBG(dev, "Tasklet disconnect\n");
 
1663         spin_lock_irq(&dev->lock);
 
1666                 spin_unlock(&dev->lock);
 
1667                 dev->driver->disconnect(&dev->gadget);
 
1668                 spin_lock(&dev->lock);
 
1671                 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
 
1672                         empty_req_queue(&dev->ep[tmp]);
 
1679                         &dev->ep[UDC_EP0IN_IX]);
 
1682         if (!soft_reset_occured) {
 
1683                 /* init controller by soft reset */
 
1684                 udc_soft_reset(dev);
 
1685                 soft_reset_occured++;
 
1688         /* re-enable dev interrupts */
 
1689         udc_enable_dev_setup_interrupts(dev);
 
1690         /* back to full speed ? */
 
1691         if (use_fullspeed) {
 
1692                 tmp = readl(&dev->regs->cfg);
 
1693                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
 
1694                 writel(tmp, &dev->regs->cfg);
 
1697         spin_unlock_irq(&dev->lock);
 
1700 /* Reset the UDC core */
 
1701 static void udc_soft_reset(struct udc *dev)
 
1703         unsigned long   flags;
 
1705         DBG(dev, "Soft reset\n");
 
1707          * reset possible waiting interrupts, because int.
 
1708          * status is lost after soft reset,
 
1709          * ep int. status reset
 
1711         writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
 
1712         /* device int. status reset */
 
1713         writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
 
1715         spin_lock_irqsave(&udc_irq_spinlock, flags);
 
1716         writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
 
1717         readl(&dev->regs->cfg);
 
1718         spin_unlock_irqrestore(&udc_irq_spinlock, flags);
 
1722 /* RDE timer callback to set RDE bit */
 
1723 static void udc_timer_function(unsigned long v)
 
1727         spin_lock_irq(&udc_irq_spinlock);
 
1731                  * open the fifo if fifo was filled on last timer call
 
1735                         /* set RDE to receive setup data */
 
1736                         tmp = readl(&udc->regs->ctl);
 
1737                         tmp |= AMD_BIT(UDC_DEVCTL_RDE);
 
1738                         writel(tmp, &udc->regs->ctl);
 
1740                 } else if (readl(&udc->regs->sts)
 
1741                                 & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
 
1743                          * if fifo empty setup polling, do not just
 
1746                         udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
 
1748                                 add_timer(&udc_timer);
 
1752                          * fifo contains data now, setup timer for opening
 
1753                          * the fifo when timer expires to be able to receive
 
1754                          * setup packets, when data packets gets queued by
 
1755                          * gadget layer then timer will forced to expire with
 
1756                          * set_rde=0 (RDE is set in udc_queue())
 
1759                         /* debug: lhadmot_timer_start = 221070 */
 
1760                         udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
 
1762                                 add_timer(&udc_timer);
 
1767                 set_rde = -1; /* RDE was set by udc_queue() */
 
1768         spin_unlock_irq(&udc_irq_spinlock);
 
1774 /* Handle halt state, used in stall poll timer */
 
1775 static void udc_handle_halt_state(struct udc_ep *ep)
 
1778         /* set stall as long not halted */
 
1779         if (ep->halted == 1) {
 
1780                 tmp = readl(&ep->regs->ctl);
 
1781                 /* STALL cleared ? */
 
1782                 if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
 
1784                          * FIXME: MSC spec requires that stall remains
 
1785                          * even on receivng of CLEAR_FEATURE HALT. So
 
1786                          * we would set STALL again here to be compliant.
 
1787                          * But with current mass storage drivers this does
 
1788                          * not work (would produce endless host retries).
 
1789                          * So we clear halt on CLEAR_FEATURE.
 
1791                         DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
 
1792                         tmp |= AMD_BIT(UDC_EPCTL_S);
 
1793                         writel(tmp, &ep->regs->ctl);*/
 
1795                         /* clear NAK by writing CNAK */
 
1796                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1797                         writel(tmp, &ep->regs->ctl);
 
1799                         UDC_QUEUE_CNAK(ep, ep->num);
 
1804 /* Stall timer callback to poll S bit and set it again after */
 
1805 static void udc_pollstall_timer_function(unsigned long v)
 
1810         spin_lock_irq(&udc_stall_spinlock);
 
1812          * only one IN and OUT endpoints are handled
 
1815         ep = &udc->ep[UDC_EPIN_IX];
 
1816         udc_handle_halt_state(ep);
 
1819         /* OUT poll stall */
 
1820         ep = &udc->ep[UDC_EPOUT_IX];
 
1821         udc_handle_halt_state(ep);
 
1825         /* setup timer again when still halted */
 
1826         if (!stop_pollstall_timer && halted) {
 
1827                 udc_pollstall_timer.expires = jiffies +
 
1828                                         HZ * UDC_POLLSTALL_TIMER_USECONDS
 
1830                 add_timer(&udc_pollstall_timer);
 
1832         spin_unlock_irq(&udc_stall_spinlock);
 
1834         if (stop_pollstall_timer)
 
1835                 complete(&on_pollstall_exit);
 
1838 /* Inits endpoint 0 so that SETUP packets are processed */
 
1839 static void activate_control_endpoints(struct udc *dev)
 
1843         DBG(dev, "activate_control_endpoints\n");
 
1846         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1847         tmp |= AMD_BIT(UDC_EPCTL_F);
 
1848         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1850         /* set ep0 directions */
 
1851         dev->ep[UDC_EP0IN_IX].in = 1;
 
1852         dev->ep[UDC_EP0OUT_IX].in = 0;
 
1854         /* set buffer size (tx fifo entries) of EP0_IN */
 
1855         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
 
1856         if (dev->gadget.speed == USB_SPEED_FULL)
 
1857                 tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
 
1858                                         UDC_EPIN_BUFF_SIZE);
 
1859         else if (dev->gadget.speed == USB_SPEED_HIGH)
 
1860                 tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
 
1861                                         UDC_EPIN_BUFF_SIZE);
 
1862         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
 
1864         /* set max packet size of EP0_IN */
 
1865         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
 
1866         if (dev->gadget.speed == USB_SPEED_FULL)
 
1867                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
 
1868                                         UDC_EP_MAX_PKT_SIZE);
 
1869         else if (dev->gadget.speed == USB_SPEED_HIGH)
 
1870                 tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
 
1871                                 UDC_EP_MAX_PKT_SIZE);
 
1872         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
 
1874         /* set max packet size of EP0_OUT */
 
1875         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
 
1876         if (dev->gadget.speed == USB_SPEED_FULL)
 
1877                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
 
1878                                         UDC_EP_MAX_PKT_SIZE);
 
1879         else if (dev->gadget.speed == USB_SPEED_HIGH)
 
1880                 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
 
1881                                         UDC_EP_MAX_PKT_SIZE);
 
1882         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
 
1884         /* set max packet size of EP0 in UDC CSR */
 
1885         tmp = readl(&dev->csr->ne[0]);
 
1886         if (dev->gadget.speed == USB_SPEED_FULL)
 
1887                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
 
1888                                         UDC_CSR_NE_MAX_PKT);
 
1889         else if (dev->gadget.speed == USB_SPEED_HIGH)
 
1890                 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
 
1891                                         UDC_CSR_NE_MAX_PKT);
 
1892         writel(tmp, &dev->csr->ne[0]);
 
1895                 dev->ep[UDC_EP0OUT_IX].td->status |=
 
1896                         AMD_BIT(UDC_DMA_OUT_STS_L);
 
1897                 /* write dma desc address */
 
1898                 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
 
1899                         &dev->ep[UDC_EP0OUT_IX].regs->subptr);
 
1900                 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
 
1901                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
 
1902                 /* stop RDE timer */
 
1903                 if (timer_pending(&udc_timer)) {
 
1905                         mod_timer(&udc_timer, jiffies - 1);
 
1907                 /* stop pollstall timer */
 
1908                 if (timer_pending(&udc_pollstall_timer)) {
 
1909                         mod_timer(&udc_pollstall_timer, jiffies - 1);
 
1912                 tmp = readl(&dev->regs->ctl);
 
1913                 tmp |= AMD_BIT(UDC_DEVCTL_MODE)
 
1914                                 | AMD_BIT(UDC_DEVCTL_RDE)
 
1915                                 | AMD_BIT(UDC_DEVCTL_TDE);
 
1916                 if (use_dma_bufferfill_mode) {
 
1917                         tmp |= AMD_BIT(UDC_DEVCTL_BF);
 
1918                 } else if (use_dma_ppb_du) {
 
1919                         tmp |= AMD_BIT(UDC_DEVCTL_DU);
 
1921                 writel(tmp, &dev->regs->ctl);
 
1924         /* clear NAK by writing CNAK for EP0IN */
 
1925         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1926         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1927         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1928         dev->ep[UDC_EP0IN_IX].naking = 0;
 
1929         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
 
1931         /* clear NAK by writing CNAK for EP0OUT */
 
1932         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
1933         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1934         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
1935         dev->ep[UDC_EP0OUT_IX].naking = 0;
 
1936         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
 
1939 /* Make endpoint 0 ready for control traffic */
 
1940 static int setup_ep0(struct udc *dev)
 
1942         activate_control_endpoints(dev);
 
1943         /* enable ep0 interrupts */
 
1944         udc_enable_ep0_interrupts(dev);
 
1945         /* enable device setup interrupts */
 
1946         udc_enable_dev_setup_interrupts(dev);
 
1951 /* Called by gadget driver to register itself */
 
1952 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
 
1954         struct udc              *dev = udc;
 
1958         if (!driver || !driver->bind || !driver->setup
 
1959                         || driver->speed != USB_SPEED_HIGH)
 
1966         driver->driver.bus = NULL;
 
1967         dev->driver = driver;
 
1968         dev->gadget.dev.driver = &driver->driver;
 
1970         retval = driver->bind(&dev->gadget);
 
1972         /* Some gadget drivers use both ep0 directions.
 
1973          * NOTE: to gadget driver, ep0 is just one endpoint...
 
1975         dev->ep[UDC_EP0OUT_IX].ep.driver_data =
 
1976                 dev->ep[UDC_EP0IN_IX].ep.driver_data;
 
1979                 DBG(dev, "binding to %s returning %d\n",
 
1980                                 driver->driver.name, retval);
 
1982                 dev->gadget.dev.driver = NULL;
 
1986         /* get ready for ep0 traffic */
 
1990         tmp = readl(&dev->regs->ctl);
 
1991         tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
 
1992         writel(tmp, &dev->regs->ctl);
 
1998 EXPORT_SYMBOL(usb_gadget_register_driver);
 
2000 /* shutdown requests and disconnect from gadget */
 
2002 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
 
2003 __releases(dev->lock)
 
2004 __acquires(dev->lock)
 
2008         /* empty queues and init hardware */
 
2009         udc_basic_init(dev);
 
2010         for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
 
2011                 empty_req_queue(&dev->ep[tmp]);
 
2014         if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
 
2015                 spin_unlock(&dev->lock);
 
2016                 driver->disconnect(&dev->gadget);
 
2017                 spin_lock(&dev->lock);
 
2020         udc_setup_endpoints(dev);
 
2023 /* Called by gadget driver to unregister itself */
 
2024 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
 
2026         struct udc      *dev = udc;
 
2027         unsigned long   flags;
 
2032         if (!driver || driver != dev->driver || !driver->unbind)
 
2035         spin_lock_irqsave(&dev->lock, flags);
 
2036         udc_mask_unused_interrupts(dev);
 
2037         shutdown(dev, driver);
 
2038         spin_unlock_irqrestore(&dev->lock, flags);
 
2040         driver->unbind(&dev->gadget);
 
2044         tmp = readl(&dev->regs->ctl);
 
2045         tmp |= AMD_BIT(UDC_DEVCTL_SD);
 
2046         writel(tmp, &dev->regs->ctl);
 
2049         DBG(dev, "%s: unregistered\n", driver->driver.name);
 
2053 EXPORT_SYMBOL(usb_gadget_unregister_driver);
 
2056 /* Clear pending NAK bits */
 
2057 static void udc_process_cnak_queue(struct udc *dev)
 
2063         DBG(dev, "CNAK pending queue processing\n");
 
2064         for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
 
2065                 if (cnak_pending & (1 << tmp)) {
 
2066                         DBG(dev, "CNAK pending for ep%d\n", tmp);
 
2067                         /* clear NAK by writing CNAK */
 
2068                         reg = readl(&dev->ep[tmp].regs->ctl);
 
2069                         reg |= AMD_BIT(UDC_EPCTL_CNAK);
 
2070                         writel(reg, &dev->ep[tmp].regs->ctl);
 
2071                         dev->ep[tmp].naking = 0;
 
2072                         UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
 
2075         /* ...  and ep0out */
 
2076         if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
 
2077                 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
 
2078                 /* clear NAK by writing CNAK */
 
2079                 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
2080                 reg |= AMD_BIT(UDC_EPCTL_CNAK);
 
2081                 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
2082                 dev->ep[UDC_EP0OUT_IX].naking = 0;
 
2083                 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
 
2084                                 dev->ep[UDC_EP0OUT_IX].num);
 
2088 /* Enabling RX DMA after setup packet */
 
2089 static void udc_ep0_set_rde(struct udc *dev)
 
2093                  * only enable RXDMA when no data endpoint enabled
 
2096                 if (!dev->data_ep_enabled || dev->data_ep_queued) {
 
2100                          * setup timer for enabling RDE (to not enable
 
2101                          * RXFIFO DMA for data endpoints to early)
 
2103                         if (set_rde != 0 && !timer_pending(&udc_timer)) {
 
2105                                         jiffies + HZ/UDC_RDE_TIMER_DIV;
 
2108                                         add_timer(&udc_timer);
 
2116 /* Interrupt handler for data OUT traffic */
 
2117 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
 
2119         irqreturn_t             ret_val = IRQ_NONE;
 
2122         struct udc_request      *req;
 
2124         struct udc_data_dma     *td = NULL;
 
2127         VDBG(dev, "ep%d irq\n", ep_ix);
 
2128         ep = &dev->ep[ep_ix];
 
2130         tmp = readl(&ep->regs->sts);
 
2133                 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
 
2134                         DBG(dev, "BNA ep%dout occured - DESPTR = %x \n",
 
2135                                         ep->num, readl(&ep->regs->desptr));
 
2137                         writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
 
2138                         if (!ep->cancel_transfer)
 
2139                                 ep->bna_occurred = 1;
 
2141                                 ep->cancel_transfer = 0;
 
2142                         ret_val = IRQ_HANDLED;
 
2147         if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
 
2148                 dev_err(&dev->pdev->dev, "HE ep%dout occured\n", ep->num);
 
2151                 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
 
2152                 ret_val = IRQ_HANDLED;
 
2156         if (!list_empty(&ep->queue)) {
 
2159                 req = list_entry(ep->queue.next,
 
2160                         struct udc_request, queue);
 
2163                 udc_rxfifo_pending = 1;
 
2165         VDBG(dev, "req = %p\n", req);
 
2170                 if (req && udc_rxfifo_read(ep, req)) {
 
2171                         ret_val = IRQ_HANDLED;
 
2174                         complete_req(ep, req, 0);
 
2176                         if (!list_empty(&ep->queue) && !ep->halted) {
 
2177                                 req = list_entry(ep->queue.next,
 
2178                                         struct udc_request, queue);
 
2184         } else if (!ep->cancel_transfer && req != NULL) {
 
2185                 ret_val = IRQ_HANDLED;
 
2187                 /* check for DMA done */
 
2189                         dma_done = AMD_GETBITS(req->td_data->status,
 
2190                                                 UDC_DMA_OUT_STS_BS);
 
2191                 /* packet per buffer mode - rx bytes */
 
2194                          * if BNA occurred then recover desc. from
 
2197                         if (ep->bna_occurred) {
 
2198                                 VDBG(dev, "Recover desc. from BNA dummy\n");
 
2199                                 memcpy(req->td_data, ep->bna_dummy_req->td_data,
 
2200                                                 sizeof(struct udc_data_dma));
 
2201                                 ep->bna_occurred = 0;
 
2202                                 udc_init_bna_dummy(ep->req);
 
2204                         td = udc_get_last_dma_desc(req);
 
2205                         dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
 
2207                 if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
 
2208                         /* buffer fill mode - rx bytes */
 
2210                                 /* received number bytes */
 
2211                                 count = AMD_GETBITS(req->td_data->status,
 
2212                                                 UDC_DMA_OUT_STS_RXBYTES);
 
2213                                 VDBG(dev, "rx bytes=%u\n", count);
 
2214                         /* packet per buffer mode - rx bytes */
 
2216                                 VDBG(dev, "req->td_data=%p\n", req->td_data);
 
2217                                 VDBG(dev, "last desc = %p\n", td);
 
2218                                 /* received number bytes */
 
2219                                 if (use_dma_ppb_du) {
 
2220                                         /* every desc. counts bytes */
 
2221                                         count = udc_get_ppbdu_rxbytes(req);
 
2223                                         /* last desc. counts bytes */
 
2224                                         count = AMD_GETBITS(td->status,
 
2225                                                 UDC_DMA_OUT_STS_RXBYTES);
 
2226                                         if (!count && req->req.length
 
2227                                                 == UDC_DMA_MAXPACKET) {
 
2229                                                  * on 64k packets the RXBYTES
 
2232                                                 count = UDC_DMA_MAXPACKET;
 
2235                                 VDBG(dev, "last desc rx bytes=%u\n", count);
 
2238                         tmp = req->req.length - req->req.actual;
 
2240                                 if ((tmp % ep->ep.maxpacket) != 0) {
 
2241                                         DBG(dev, "%s: rx %db, space=%db\n",
 
2242                                                 ep->ep.name, count, tmp);
 
2243                                         req->req.status = -EOVERFLOW;
 
2247                         req->req.actual += count;
 
2249                         /* complete request */
 
2250                         complete_req(ep, req, 0);
 
2253                         if (!list_empty(&ep->queue) && !ep->halted) {
 
2254                                 req = list_entry(ep->queue.next,
 
2258                                  * DMA may be already started by udc_queue()
 
2259                                  * called by gadget drivers completion
 
2260                                  * routine. This happens when queue
 
2261                                  * holds one request only.
 
2263                                 if (req->dma_going == 0) {
 
2265                                         if (prep_dma(ep, req, GFP_ATOMIC) != 0)
 
2267                                         /* write desc pointer */
 
2268                                         writel(req->td_phys,
 
2276                                  * implant BNA dummy descriptor to allow
 
2277                                  * RXFIFO opening by RDE
 
2279                                 if (ep->bna_dummy_req) {
 
2280                                         /* write desc pointer */
 
2281                                         writel(ep->bna_dummy_req->td_phys,
 
2283                                         ep->bna_occurred = 0;
 
2287                                  * schedule timer for setting RDE if queue
 
2288                                  * remains empty to allow ep0 packets pass
 
2292                                                 && !timer_pending(&udc_timer)) {
 
2295                                                 + HZ*UDC_RDE_TIMER_SECONDS;
 
2298                                                 add_timer(&udc_timer);
 
2301                                 if (ep->num != UDC_EP0OUT_IX)
 
2302                                         dev->data_ep_queued = 0;
 
2307                         * RX DMA must be reenabled for each desc in PPBDU mode
 
2308                         * and must be enabled for PPBNDU mode in case of BNA
 
2313         } else if (ep->cancel_transfer) {
 
2314                 ret_val = IRQ_HANDLED;
 
2315                 ep->cancel_transfer = 0;
 
2318         /* check pending CNAKS */
 
2320                 /* CNAk processing when rxfifo empty only */
 
2321                 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
 
2322                         udc_process_cnak_queue(dev);
 
2326         /* clear OUT bits in ep status */
 
2327         writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
 
2332 /* Interrupt handler for data IN traffic */
 
2333 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
 
2335         irqreturn_t ret_val = IRQ_NONE;
 
2339         struct udc_request *req;
 
2340         struct udc_data_dma *td;
 
2344         ep = &dev->ep[ep_ix];
 
2346         epsts = readl(&ep->regs->sts);
 
2349                 if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
 
2350                         dev_err(&dev->pdev->dev,
 
2351                                 "BNA ep%din occured - DESPTR = %08lx \n",
 
2353                                 (unsigned long) readl(&ep->regs->desptr));
 
2356                         writel(epsts, &ep->regs->sts);
 
2357                         ret_val = IRQ_HANDLED;
 
2362         if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
 
2363                 dev_err(&dev->pdev->dev,
 
2364                         "HE ep%dn occured - DESPTR = %08lx \n",
 
2365                         ep->num, (unsigned long) readl(&ep->regs->desptr));
 
2368                 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
 
2369                 ret_val = IRQ_HANDLED;
 
2373         /* DMA completion */
 
2374         if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
 
2375                 VDBG(dev, "TDC set- completion\n");
 
2376                 ret_val = IRQ_HANDLED;
 
2377                 if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
 
2378                         req = list_entry(ep->queue.next,
 
2379                                         struct udc_request, queue);
 
2382                                  * length bytes transfered
 
2383                                  * check dma done of last desc. in PPBDU mode
 
2385                                 if (use_dma_ppb_du) {
 
2386                                         td = udc_get_last_dma_desc(req);
 
2389                                                         AMD_GETBITS(td->status,
 
2391                                                 /* don't care DMA done */
 
2396                                         /* assume all bytes transferred */
 
2397                                         req->req.actual = req->req.length;
 
2400                                 if (req->req.actual == req->req.length) {
 
2402                                         complete_req(ep, req, 0);
 
2404                                         /* further request available ? */
 
2405                                         if (list_empty(&ep->queue)) {
 
2406                                                 /* disable interrupt */
 
2408                                                         &dev->regs->ep_irqmsk);
 
2409                                                 tmp |= AMD_BIT(ep->num);
 
2411                                                         &dev->regs->ep_irqmsk);
 
2417                 ep->cancel_transfer = 0;
 
2421          * status reg has IN bit set and TDC not set (if TDC was handled,
 
2422          * IN must not be handled (UDC defect) ?
 
2424         if ((epsts & AMD_BIT(UDC_EPSTS_IN))
 
2425                         && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
 
2426                 ret_val = IRQ_HANDLED;
 
2427                 if (!list_empty(&ep->queue)) {
 
2429                         req = list_entry(ep->queue.next,
 
2430                                         struct udc_request, queue);
 
2434                                 udc_txfifo_write(ep, &req->req);
 
2435                                 len = req->req.length - req->req.actual;
 
2436                                                 if (len > ep->ep.maxpacket)
 
2437                                                         len = ep->ep.maxpacket;
 
2438                                                 req->req.actual += len;
 
2439                                 if (req->req.actual == req->req.length
 
2440                                         || (len != ep->ep.maxpacket)) {
 
2442                                         complete_req(ep, req, 0);
 
2445                         } else if (req && !req->dma_going) {
 
2446                                 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
 
2453                                          * unset L bit of first desc.
 
2456                                         if (use_dma_ppb && req->req.length >
 
2458                                                 req->td_data->status &=
 
2463                                         /* write desc pointer */
 
2464                                         writel(req->td_phys, &ep->regs->desptr);
 
2466                                         /* set HOST READY */
 
2467                                         req->td_data->status =
 
2469                                                 req->td_data->status,
 
2470                                                 UDC_DMA_IN_STS_BS_HOST_READY,
 
2473                                         /* set poll demand bit */
 
2474                                         tmp = readl(&ep->regs->ctl);
 
2475                                         tmp |= AMD_BIT(UDC_EPCTL_P);
 
2476                                         writel(tmp, &ep->regs->ctl);
 
2482         /* clear status bits */
 
2483         writel(epsts, &ep->regs->sts);
 
2490 /* Interrupt handler for Control OUT traffic */
 
2491 static irqreturn_t udc_control_out_isr(struct udc *dev)
 
2492 __releases(dev->lock)
 
2493 __acquires(dev->lock)
 
2495         irqreturn_t ret_val = IRQ_NONE;
 
2497         int setup_supported;
 
2501         struct udc_ep   *ep_tmp;
 
2503         ep = &dev->ep[UDC_EP0OUT_IX];
 
2506         writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
 
2508         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2509         /* check BNA and clear if set */
 
2510         if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
 
2511                 VDBG(dev, "ep0: BNA set\n");
 
2512                 writel(AMD_BIT(UDC_EPSTS_BNA),
 
2513                         &dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2514                 ep->bna_occurred = 1;
 
2515                 ret_val = IRQ_HANDLED;
 
2519         /* type of data: SETUP or DATA 0 bytes */
 
2520         tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
 
2521         VDBG(dev, "data_typ = %x\n", tmp);
 
2524         if (tmp == UDC_EPSTS_OUT_SETUP) {
 
2525                 ret_val = IRQ_HANDLED;
 
2527                 ep->dev->stall_ep0in = 0;
 
2528                 dev->waiting_zlp_ack_ep0in = 0;
 
2530                 /* set NAK for EP0_IN */
 
2531                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2532                 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
 
2533                 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2534                 dev->ep[UDC_EP0IN_IX].naking = 1;
 
2535                 /* get setup data */
 
2538                         /* clear OUT bits in ep status */
 
2539                         writel(UDC_EPSTS_OUT_CLEAR,
 
2540                                 &dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2542                         setup_data.data[0] =
 
2543                                 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
 
2544                         setup_data.data[1] =
 
2545                                 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
 
2546                         /* set HOST READY */
 
2547                         dev->ep[UDC_EP0OUT_IX].td_stp->status =
 
2548                                         UDC_DMA_STP_STS_BS_HOST_READY;
 
2551                         udc_rxfifo_read_dwords(dev, setup_data.data, 2);
 
2554                 /* determine direction of control data */
 
2555                 if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
 
2556                         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
 
2558                         udc_ep0_set_rde(dev);
 
2561                         dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
 
2563                          * implant BNA dummy descriptor to allow RXFIFO opening
 
2566                         if (ep->bna_dummy_req) {
 
2567                                 /* write desc pointer */
 
2568                                 writel(ep->bna_dummy_req->td_phys,
 
2569                                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
 
2570                                 ep->bna_occurred = 0;
 
2574                         dev->ep[UDC_EP0OUT_IX].naking = 1;
 
2576                          * setup timer for enabling RDE (to not enable
 
2577                          * RXFIFO DMA for data to early)
 
2580                         if (!timer_pending(&udc_timer)) {
 
2581                                 udc_timer.expires = jiffies +
 
2582                                                         HZ/UDC_RDE_TIMER_DIV;
 
2584                                         add_timer(&udc_timer);
 
2590                  * mass storage reset must be processed here because
 
2591                  * next packet may be a CLEAR_FEATURE HALT which would not
 
2592                  * clear the stall bit when no STALL handshake was received
 
2593                  * before (autostall can cause this)
 
2595                 if (setup_data.data[0] == UDC_MSCRES_DWORD0
 
2596                                 && setup_data.data[1] == UDC_MSCRES_DWORD1) {
 
2597                         DBG(dev, "MSC Reset\n");
 
2600                          * only one IN and OUT endpoints are handled
 
2602                         ep_tmp = &udc->ep[UDC_EPIN_IX];
 
2603                         udc_set_halt(&ep_tmp->ep, 0);
 
2604                         ep_tmp = &udc->ep[UDC_EPOUT_IX];
 
2605                         udc_set_halt(&ep_tmp->ep, 0);
 
2608                 /* call gadget with setup data received */
 
2609                 spin_unlock(&dev->lock);
 
2610                 setup_supported = dev->driver->setup(&dev->gadget,
 
2611                                                 &setup_data.request);
 
2612                 spin_lock(&dev->lock);
 
2614                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2615                 /* ep0 in returns data (not zlp) on IN phase */
 
2616                 if (setup_supported >= 0 && setup_supported <
 
2617                                 UDC_EP0IN_MAXPACKET) {
 
2618                         /* clear NAK by writing CNAK in EP0_IN */
 
2619                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
2620                         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2621                         dev->ep[UDC_EP0IN_IX].naking = 0;
 
2622                         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
 
2624                 /* if unsupported request then stall */
 
2625                 } else if (setup_supported < 0) {
 
2626                         tmp |= AMD_BIT(UDC_EPCTL_S);
 
2627                         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2629                         dev->waiting_zlp_ack_ep0in = 1;
 
2632                 /* clear NAK by writing CNAK in EP0_OUT */
 
2634                         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
2635                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
2636                         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
2637                         dev->ep[UDC_EP0OUT_IX].naking = 0;
 
2638                         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
 
2642                         /* clear OUT bits in ep status */
 
2643                         writel(UDC_EPSTS_OUT_CLEAR,
 
2644                                 &dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2647         /* data packet 0 bytes */
 
2648         } else if (tmp == UDC_EPSTS_OUT_DATA) {
 
2649                 /* clear OUT bits in ep status */
 
2650                 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2652                 /* get setup data: only 0 packet */
 
2654                         /* no req if 0 packet, just reactivate */
 
2655                         if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
 
2658                                 /* set HOST READY */
 
2659                                 dev->ep[UDC_EP0OUT_IX].td->status =
 
2661                                         dev->ep[UDC_EP0OUT_IX].td->status,
 
2662                                         UDC_DMA_OUT_STS_BS_HOST_READY,
 
2663                                         UDC_DMA_OUT_STS_BS);
 
2665                                 udc_ep0_set_rde(dev);
 
2666                                 ret_val = IRQ_HANDLED;
 
2670                                 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
 
2671                                 /* re-program desc. pointer for possible ZLPs */
 
2672                                 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
 
2673                                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
 
2675                                 udc_ep0_set_rde(dev);
 
2679                         /* received number bytes */
 
2680                         count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2681                         count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
 
2682                         /* out data for fifo mode not working */
 
2685                         /* 0 packet or real data ? */
 
2687                                 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
 
2689                                 /* dummy read confirm */
 
2690                                 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
 
2691                                 ret_val = IRQ_HANDLED;
 
2696         /* check pending CNAKS */
 
2698                 /* CNAk processing when rxfifo empty only */
 
2699                 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
 
2700                         udc_process_cnak_queue(dev);
 
2708 /* Interrupt handler for Control IN traffic */
 
2709 static irqreturn_t udc_control_in_isr(struct udc *dev)
 
2711         irqreturn_t ret_val = IRQ_NONE;
 
2714         struct udc_request *req;
 
2717         ep = &dev->ep[UDC_EP0IN_IX];
 
2720         writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
 
2722         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
 
2723         /* DMA completion */
 
2724         if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
 
2725                 VDBG(dev, "isr: TDC clear \n");
 
2726                 ret_val = IRQ_HANDLED;
 
2729                 writel(AMD_BIT(UDC_EPSTS_TDC),
 
2730                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
 
2732         /* status reg has IN bit set ? */
 
2733         } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
 
2734                 ret_val = IRQ_HANDLED;
 
2738                         writel(AMD_BIT(UDC_EPSTS_IN),
 
2739                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
 
2741                 if (dev->stall_ep0in) {
 
2742                         DBG(dev, "stall ep0in\n");
 
2744                         tmp = readl(&ep->regs->ctl);
 
2745                         tmp |= AMD_BIT(UDC_EPCTL_S);
 
2746                         writel(tmp, &ep->regs->ctl);
 
2748                         if (!list_empty(&ep->queue)) {
 
2750                                 req = list_entry(ep->queue.next,
 
2751                                                 struct udc_request, queue);
 
2754                                         /* write desc pointer */
 
2755                                         writel(req->td_phys, &ep->regs->desptr);
 
2756                                         /* set HOST READY */
 
2757                                         req->td_data->status =
 
2759                                                 req->td_data->status,
 
2760                                                 UDC_DMA_STP_STS_BS_HOST_READY,
 
2761                                                 UDC_DMA_STP_STS_BS);
 
2763                                         /* set poll demand bit */
 
2765                                         readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2766                                         tmp |= AMD_BIT(UDC_EPCTL_P);
 
2768                                         &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2770                                         /* all bytes will be transferred */
 
2771                                         req->req.actual = req->req.length;
 
2774                                         complete_req(ep, req, 0);
 
2778                                         udc_txfifo_write(ep, &req->req);
 
2780                                         /* lengh bytes transfered */
 
2781                                         len = req->req.length - req->req.actual;
 
2782                                         if (len > ep->ep.maxpacket)
 
2783                                                 len = ep->ep.maxpacket;
 
2785                                         req->req.actual += len;
 
2786                                         if (req->req.actual == req->req.length
 
2787                                                 || (len != ep->ep.maxpacket)) {
 
2789                                                 complete_req(ep, req, 0);
 
2796                 dev->stall_ep0in = 0;
 
2799                         writel(AMD_BIT(UDC_EPSTS_IN),
 
2800                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
 
2808 /* Interrupt handler for global device events */
 
2809 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
 
2810 __releases(dev->lock)
 
2811 __acquires(dev->lock)
 
2813         irqreturn_t ret_val = IRQ_NONE;
 
2820         /* SET_CONFIG irq ? */
 
2821         if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
 
2822                 ret_val = IRQ_HANDLED;
 
2824                 /* read config value */
 
2825                 tmp = readl(&dev->regs->sts);
 
2826                 cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
 
2827                 DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
 
2828                 dev->cur_config = cfg;
 
2829                 dev->set_cfg_not_acked = 1;
 
2831                 /* make usb request for gadget driver */
 
2832                 memset(&setup_data, 0 , sizeof(union udc_setup_data));
 
2833                 setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
 
2834                 setup_data.request.wValue = dev->cur_config;
 
2836                 /* programm the NE registers */
 
2837                 for (i = 0; i < UDC_EP_NUM; i++) {
 
2841                                 /* ep ix in UDC CSR register space */
 
2842                                 udc_csr_epix = ep->num;
 
2847                                 /* ep ix in UDC CSR register space */
 
2848                                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 
2851                         tmp = readl(&dev->csr->ne[udc_csr_epix]);
 
2853                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
 
2856                         writel(tmp, &dev->csr->ne[udc_csr_epix]);
 
2858                         /* clear stall bits */
 
2860                         tmp = readl(&ep->regs->ctl);
 
2861                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 
2862                         writel(tmp, &ep->regs->ctl);
 
2864                 /* call gadget zero with setup data received */
 
2865                 spin_unlock(&dev->lock);
 
2866                 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
 
2867                 spin_lock(&dev->lock);
 
2869         } /* SET_INTERFACE ? */
 
2870         if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
 
2871                 ret_val = IRQ_HANDLED;
 
2873                 dev->set_cfg_not_acked = 1;
 
2874                 /* read interface and alt setting values */
 
2875                 tmp = readl(&dev->regs->sts);
 
2876                 dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
 
2877                 dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
 
2879                 /* make usb request for gadget driver */
 
2880                 memset(&setup_data, 0 , sizeof(union udc_setup_data));
 
2881                 setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
 
2882                 setup_data.request.bRequestType = USB_RECIP_INTERFACE;
 
2883                 setup_data.request.wValue = dev->cur_alt;
 
2884                 setup_data.request.wIndex = dev->cur_intf;
 
2886                 DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
 
2887                                 dev->cur_alt, dev->cur_intf);
 
2889                 /* programm the NE registers */
 
2890                 for (i = 0; i < UDC_EP_NUM; i++) {
 
2894                                 /* ep ix in UDC CSR register space */
 
2895                                 udc_csr_epix = ep->num;
 
2900                                 /* ep ix in UDC CSR register space */
 
2901                                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 
2906                         tmp = readl(&dev->csr->ne[udc_csr_epix]);
 
2908                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
 
2910                         /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
 
2912                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
 
2915                         writel(tmp, &dev->csr->ne[udc_csr_epix]);
 
2917                         /* clear stall bits */
 
2919                         tmp = readl(&ep->regs->ctl);
 
2920                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 
2921                         writel(tmp, &ep->regs->ctl);
 
2924                 /* call gadget zero with setup data received */
 
2925                 spin_unlock(&dev->lock);
 
2926                 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
 
2927                 spin_lock(&dev->lock);
 
2930         if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
 
2931                 DBG(dev, "USB Reset interrupt\n");
 
2932                 ret_val = IRQ_HANDLED;
 
2934                 /* allow soft reset when suspend occurs */
 
2935                 soft_reset_occured = 0;
 
2937                 dev->waiting_zlp_ack_ep0in = 0;
 
2938                 dev->set_cfg_not_acked = 0;
 
2940                 /* mask not needed interrupts */
 
2941                 udc_mask_unused_interrupts(dev);
 
2943                 /* call gadget to resume and reset configs etc. */
 
2944                 spin_unlock(&dev->lock);
 
2945                 if (dev->sys_suspended && dev->driver->resume) {
 
2946                         dev->driver->resume(&dev->gadget);
 
2947                         dev->sys_suspended = 0;
 
2949                 dev->driver->disconnect(&dev->gadget);
 
2950                 spin_lock(&dev->lock);
 
2952                 /* disable ep0 to empty req queue */
 
2953                 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
 
2954                 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
 
2956                 /* soft reset when rxfifo not empty */
 
2957                 tmp = readl(&dev->regs->sts);
 
2958                 if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
 
2959                                 && !soft_reset_after_usbreset_occured) {
 
2960                         udc_soft_reset(dev);
 
2961                         soft_reset_after_usbreset_occured++;
 
2965                  * DMA reset to kill potential old DMA hw hang,
 
2966                  * POLL bit is already reset by ep_init() through
 
2969                 DBG(dev, "DMA machine reset\n");
 
2970                 tmp = readl(&dev->regs->cfg);
 
2971                 writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
 
2972                 writel(tmp, &dev->regs->cfg);
 
2974                 /* put into initial config */
 
2975                 udc_basic_init(dev);
 
2977                 /* enable device setup interrupts */
 
2978                 udc_enable_dev_setup_interrupts(dev);
 
2980                 /* enable suspend interrupt */
 
2981                 tmp = readl(&dev->regs->irqmsk);
 
2982                 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
 
2983                 writel(tmp, &dev->regs->irqmsk);
 
2986         if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
 
2987                 DBG(dev, "USB Suspend interrupt\n");
 
2988                 ret_val = IRQ_HANDLED;
 
2989                 if (dev->driver->suspend) {
 
2990                         spin_unlock(&dev->lock);
 
2991                         dev->sys_suspended = 1;
 
2992                         dev->driver->suspend(&dev->gadget);
 
2993                         spin_lock(&dev->lock);
 
2996         if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
 
2997                 DBG(dev, "ENUM interrupt\n");
 
2998                 ret_val = IRQ_HANDLED;
 
2999                 soft_reset_after_usbreset_occured = 0;
 
3001                 /* disable ep0 to empty req queue */
 
3002                 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
 
3003                 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
 
3005                 /* link up all endpoints */
 
3006                 udc_setup_endpoints(dev);
 
3007                 if (dev->gadget.speed == USB_SPEED_HIGH) {
 
3008                         dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
 
3010                 } else if (dev->gadget.speed == USB_SPEED_FULL) {
 
3011                         dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
 
3016                 activate_control_endpoints(dev);
 
3018                 /* enable ep0 interrupts */
 
3019                 udc_enable_ep0_interrupts(dev);
 
3021         /* session valid change interrupt */
 
3022         if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
 
3023                 DBG(dev, "USB SVC interrupt\n");
 
3024                 ret_val = IRQ_HANDLED;
 
3026                 /* check that session is not valid to detect disconnect */
 
3027                 tmp = readl(&dev->regs->sts);
 
3028                 if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
 
3029                         /* disable suspend interrupt */
 
3030                         tmp = readl(&dev->regs->irqmsk);
 
3031                         tmp |= AMD_BIT(UDC_DEVINT_US);
 
3032                         writel(tmp, &dev->regs->irqmsk);
 
3033                         DBG(dev, "USB Disconnect (session valid low)\n");
 
3034                         /* cleanup on disconnect */
 
3035                         usb_disconnect(udc);
 
3043 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
 
3044 static irqreturn_t udc_irq(int irq, void *pdev)
 
3046         struct udc *dev = pdev;
 
3050         irqreturn_t ret_val = IRQ_NONE;
 
3052         spin_lock(&dev->lock);
 
3054         /* check for ep irq */
 
3055         reg = readl(&dev->regs->ep_irqsts);
 
3057                 if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
 
3058                         ret_val |= udc_control_out_isr(dev);
 
3059                 if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
 
3060                         ret_val |= udc_control_in_isr(dev);
 
3066                 for (i = 1; i < UDC_EP_NUM; i++) {
 
3068                         if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
 
3071                         /* clear irq status */
 
3072                         writel(ep_irq, &dev->regs->ep_irqsts);
 
3074                         /* irq for out ep ? */
 
3075                         if (i > UDC_EPIN_NUM)
 
3076                                 ret_val |= udc_data_out_isr(dev, i);
 
3078                                 ret_val |= udc_data_in_isr(dev, i);
 
3084         /* check for dev irq */
 
3085         reg = readl(&dev->regs->irqsts);
 
3088                 writel(reg, &dev->regs->irqsts);
 
3089                 ret_val |= udc_dev_isr(dev, reg);
 
3093         spin_unlock(&dev->lock);
 
3097 /* Tears down device */
 
3098 static void gadget_release(struct device *pdev)
 
3100         struct amd5536udc *dev = dev_get_drvdata(pdev);
 
3104 /* Cleanup on device remove */
 
3105 static void udc_remove(struct udc *dev)
 
3109         if (timer_pending(&udc_timer))
 
3110                 wait_for_completion(&on_exit);
 
3112                 del_timer_sync(&udc_timer);
 
3113         /* remove pollstall timer */
 
3114         stop_pollstall_timer++;
 
3115         if (timer_pending(&udc_pollstall_timer))
 
3116                 wait_for_completion(&on_pollstall_exit);
 
3117         if (udc_pollstall_timer.data)
 
3118                 del_timer_sync(&udc_pollstall_timer);
 
3122 /* Reset all pci context */
 
3123 static void udc_pci_remove(struct pci_dev *pdev)
 
3127         dev = pci_get_drvdata(pdev);
 
3129         /* gadget driver must not be registered */
 
3130         BUG_ON(dev->driver != NULL);
 
3132         /* dma pool cleanup */
 
3133         if (dev->data_requests)
 
3134                 pci_pool_destroy(dev->data_requests);
 
3136         if (dev->stp_requests) {
 
3137                 /* cleanup DMA desc's for ep0in */
 
3138                 pci_pool_free(dev->stp_requests,
 
3139                         dev->ep[UDC_EP0OUT_IX].td_stp,
 
3140                         dev->ep[UDC_EP0OUT_IX].td_stp_dma);
 
3141                 pci_pool_free(dev->stp_requests,
 
3142                         dev->ep[UDC_EP0OUT_IX].td,
 
3143                         dev->ep[UDC_EP0OUT_IX].td_phys);
 
3145                 pci_pool_destroy(dev->stp_requests);
 
3148         /* reset controller */
 
3149         writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
 
3150         if (dev->irq_registered)
 
3151                 free_irq(pdev->irq, dev);
 
3154         if (dev->mem_region)
 
3155                 release_mem_region(pci_resource_start(pdev, 0),
 
3156                                 pci_resource_len(pdev, 0));
 
3158                 pci_disable_device(pdev);
 
3160         device_unregister(&dev->gadget.dev);
 
3161         pci_set_drvdata(pdev, NULL);
 
3166 /* create dma pools on init */
 
3167 static int init_dma_pools(struct udc *dev)
 
3169         struct udc_stp_dma      *td_stp;
 
3170         struct udc_data_dma     *td_data;
 
3173         /* consistent DMA mode setting ? */
 
3175                 use_dma_bufferfill_mode = 0;
 
3178                 use_dma_bufferfill_mode = 1;
 
3182         dev->data_requests = dma_pool_create("data_requests", NULL,
 
3183                 sizeof(struct udc_data_dma), 0, 0);
 
3184         if (!dev->data_requests) {
 
3185                 DBG(dev, "can't get request data pool\n");
 
3190         /* EP0 in dma regs = dev control regs */
 
3191         dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
 
3193         /* dma desc for setup data */
 
3194         dev->stp_requests = dma_pool_create("setup requests", NULL,
 
3195                 sizeof(struct udc_stp_dma), 0, 0);
 
3196         if (!dev->stp_requests) {
 
3197                 DBG(dev, "can't get stp request pool\n");
 
3202         td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
 
3203                                 &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
 
3204         if (td_stp == NULL) {
 
3208         dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
 
3210         /* data: 0 packets !? */
 
3211         td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
 
3212                                 &dev->ep[UDC_EP0OUT_IX].td_phys);
 
3213         if (td_data == NULL) {
 
3217         dev->ep[UDC_EP0OUT_IX].td = td_data;
 
3224 /* Called by pci bus driver to init pci context */
 
3225 static int udc_pci_probe(
 
3226         struct pci_dev *pdev,
 
3227         const struct pci_device_id *id
 
3231         unsigned long           resource;
 
3237                 dev_dbg(&pdev->dev, "already probed\n");
 
3242         dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
 
3249         if (pci_enable_device(pdev) < 0) {
 
3255         /* PCI resource allocation */
 
3256         resource = pci_resource_start(pdev, 0);
 
3257         len = pci_resource_len(pdev, 0);
 
3259         if (!request_mem_region(resource, len, name)) {
 
3260                 dev_dbg(&pdev->dev, "pci device used already\n");
 
3264         dev->mem_region = 1;
 
3266         dev->virt_addr = ioremap_nocache(resource, len);
 
3267         if (dev->virt_addr == NULL) {
 
3268                 dev_dbg(&pdev->dev, "start address cannot be mapped\n");
 
3274                 dev_err(&dev->pdev->dev, "irq not set\n");
 
3279         if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
 
3280                 dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
 
3284         dev->irq_registered = 1;
 
3286         pci_set_drvdata(pdev, dev);
 
3288         /* chip revision for Hs AMD5536 */
 
3289         dev->chiprev = pdev->revision;
 
3291         pci_set_master(pdev);
 
3292         pci_try_set_mwi(pdev);
 
3294         /* init dma pools */
 
3296                 retval = init_dma_pools(dev);
 
3301         dev->phys_addr = resource;
 
3302         dev->irq = pdev->irq;
 
3304         dev->gadget.dev.parent = &pdev->dev;
 
3305         dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
 
3307         /* general probing */
 
3308         if (udc_probe(dev) == 0)
 
3313                 udc_pci_remove(pdev);
 
3318 static int udc_probe(struct udc *dev)
 
3324         /* mark timer as not initialized */
 
3326         udc_pollstall_timer.data = 0;
 
3328         /* device struct setup */
 
3329         spin_lock_init(&dev->lock);
 
3330         dev->gadget.ops = &udc_ops;
 
3332         strcpy(dev->gadget.dev.bus_id, "gadget");
 
3333         dev->gadget.dev.release = gadget_release;
 
3334         dev->gadget.name = name;
 
3335         dev->gadget.name = name;
 
3336         dev->gadget.is_dualspeed = 1;
 
3338         /* udc csr registers base */
 
3339         dev->csr = dev->virt_addr + UDC_CSR_ADDR;
 
3340         /* dev registers base */
 
3341         dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
 
3342         /* ep registers base */
 
3343         dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
 
3345         dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
 
3346         dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
 
3348         /* init registers, interrupts, ... */
 
3349         startup_registers(dev);
 
3351         dev_info(&dev->pdev->dev, "%s\n", mod_desc);
 
3353         snprintf(tmp, sizeof tmp, "%d", dev->irq);
 
3354         dev_info(&dev->pdev->dev,
 
3355                 "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
 
3356                 tmp, dev->phys_addr, dev->chiprev,
 
3357                 (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
 
3358         strcpy(tmp, UDC_DRIVER_VERSION_STRING);
 
3359         if (dev->chiprev == UDC_HSA0_REV) {
 
3360                 dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
 
3364         dev_info(&dev->pdev->dev,
 
3365                 "driver version: %s(for Geode5536 B1)\n", tmp);
 
3368         retval = device_register(&dev->gadget.dev);
 
3373         init_timer(&udc_timer);
 
3374         udc_timer.function = udc_timer_function;
 
3376         /* timer pollstall init */
 
3377         init_timer(&udc_pollstall_timer);
 
3378         udc_pollstall_timer.function = udc_pollstall_timer_function;
 
3379         udc_pollstall_timer.data = 1;
 
3382         reg = readl(&dev->regs->ctl);
 
3383         reg |= AMD_BIT(UDC_DEVCTL_SD);
 
3384         writel(reg, &dev->regs->ctl);
 
3386         /* print dev register info */
 
3395 /* Initiates a remote wakeup */
 
3396 static int udc_remote_wakeup(struct udc *dev)
 
3398         unsigned long flags;
 
3401         DBG(dev, "UDC initiates remote wakeup\n");
 
3403         spin_lock_irqsave(&dev->lock, flags);
 
3405         tmp = readl(&dev->regs->ctl);
 
3406         tmp |= AMD_BIT(UDC_DEVCTL_RES);
 
3407         writel(tmp, &dev->regs->ctl);
 
3408         tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
 
3409         writel(tmp, &dev->regs->ctl);
 
3411         spin_unlock_irqrestore(&dev->lock, flags);
 
3415 /* PCI device parameters */
 
3416 static const struct pci_device_id pci_id[] = {
 
3418                 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
 
3419                 .class =        (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
 
3420                 .class_mask =   0xffffffff,
 
3424 MODULE_DEVICE_TABLE(pci, pci_id);
 
3427 static struct pci_driver udc_pci_driver = {
 
3428         .name =         (char *) name,
 
3430         .probe =        udc_pci_probe,
 
3431         .remove =       udc_pci_remove,
 
3435 static int __init init(void)
 
3437         return pci_register_driver(&udc_pci_driver);
 
3442 static void __exit cleanup(void)
 
3444         pci_unregister_driver(&udc_pci_driver);
 
3446 module_exit(cleanup);
 
3448 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
 
3449 MODULE_AUTHOR("Thomas Dahlmann");
 
3450 MODULE_LICENSE("GPL");