2  * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
 
   4  * Copyright (C) 2005-2007 AMD (http://www.amd.com)
 
   5  * Author: Thomas Dahlmann
 
   7  * This program is free software; you can redistribute it and/or modify
 
   8  * it under the terms of the GNU General Public License as published by
 
   9  * the Free Software Foundation; either version 2 of the License, or
 
  10  * (at your option) any later version.
 
  12  * This program is distributed in the hope that it will be useful,
 
  13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 
  14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 
  15  * GNU General Public License for more details.
 
  17  * You should have received a copy of the GNU General Public License
 
  18  * along with this program; if not, write to the Free Software
 
  19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
  23  * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
 
  24  * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
 
  25  * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
 
  27  * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
 
  28  * be used as host port) and UOC bits PAD_EN and APU are set (should be done
 
  31  * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
 
  32  * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
 
  33  * can be used with gadget ether.
 
  37 /* #define UDC_VERBOSE */
 
  40 #define UDC_MOD_DESCRIPTION             "AMD 5536 UDC - USB Device Controller"
 
  41 #define UDC_DRIVER_VERSION_STRING       "01.00.0206 - $Revision: #3 $"
 
  44 #include <linux/module.h>
 
  45 #include <linux/pci.h>
 
  46 #include <linux/kernel.h>
 
  47 #include <linux/delay.h>
 
  48 #include <linux/ioport.h>
 
  49 #include <linux/sched.h>
 
  50 #include <linux/slab.h>
 
  51 #include <linux/smp_lock.h>
 
  52 #include <linux/errno.h>
 
  53 #include <linux/init.h>
 
  54 #include <linux/timer.h>
 
  55 #include <linux/list.h>
 
  56 #include <linux/interrupt.h>
 
  57 #include <linux/ioctl.h>
 
  59 #include <linux/dmapool.h>
 
  60 #include <linux/moduleparam.h>
 
  61 #include <linux/device.h>
 
  63 #include <linux/irq.h>
 
  65 #include <asm/byteorder.h>
 
  66 #include <asm/system.h>
 
  67 #include <asm/unaligned.h>
 
  70 #include <linux/usb/ch9.h>
 
  71 #include <linux/usb/gadget.h>
 
  74 #include "amd5536udc.h"
 
  77 static void udc_tasklet_disconnect(unsigned long);
 
  78 static void empty_req_queue(struct udc_ep *);
 
  79 static int udc_probe(struct udc *dev);
 
  80 static void udc_basic_init(struct udc *dev);
 
  81 static void udc_setup_endpoints(struct udc *dev);
 
  82 static void udc_soft_reset(struct udc *dev);
 
  83 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
 
  84 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
 
  85 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
 
  86 static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
 
  87                                 unsigned long buf_len, gfp_t gfp_flags);
 
  88 static int udc_remote_wakeup(struct udc *dev);
 
  89 static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
 
  90 static void udc_pci_remove(struct pci_dev *pdev);
 
  93 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
 
  94 static const char name[] = "amd5536udc";
 
  96 /* structure to hold endpoint function pointers */
 
  97 static const struct usb_ep_ops udc_ep_ops;
 
  99 /* received setup data */
 
 100 static union udc_setup_data setup_data;
 
 102 /* pointer to device object */
 
 103 static struct udc *udc;
 
 105 /* irq spin lock for soft reset */
 
 106 static DEFINE_SPINLOCK(udc_irq_spinlock);
 
 107 /* stall spin lock */
 
 108 static DEFINE_SPINLOCK(udc_stall_spinlock);
 
 111 * slave mode: pending bytes in rx fifo after nyet,
 
 112 * used if EPIN irq came but no req was available
 
 114 static unsigned int udc_rxfifo_pending;
 
 116 /* count soft resets after suspend to avoid loop */
 
 117 static int soft_reset_occured;
 
 118 static int soft_reset_after_usbreset_occured;
 
 121 static struct timer_list udc_timer;
 
 122 static int stop_timer;
 
 124 /* set_rde -- Is used to control enabling of RX DMA. Problem is
 
 125  * that UDC has only one bit (RDE) to enable/disable RX DMA for
 
 126  * all OUT endpoints. So we have to handle race conditions like
 
 127  * when OUT data reaches the fifo but no request was queued yet.
 
 128  * This cannot be solved by letting the RX DMA disabled until a
 
 129  * request gets queued because there may be other OUT packets
 
 130  * in the FIFO (important for not blocking control traffic).
 
 131  * The value of set_rde controls the correspondig timer.
 
 133  * set_rde -1 == not used, means it is alloed to be set to 0 or 1
 
 134  * set_rde  0 == do not touch RDE, do no start the RDE timer
 
 135  * set_rde  1 == timer function will look whether FIFO has data
 
 136  * set_rde  2 == set by timer function to enable RX DMA on next call
 
 138 static int set_rde = -1;
 
 140 static DECLARE_COMPLETION(on_exit);
 
 141 static struct timer_list udc_pollstall_timer;
 
 142 static int stop_pollstall_timer;
 
 143 static DECLARE_COMPLETION(on_pollstall_exit);
 
 145 /* tasklet for usb disconnect */
 
 146 static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
 
 147                 (unsigned long) &udc);
 
 150 /* endpoint names used for print */
 
 151 static const char ep0_string[] = "ep0in";
 
 152 static const char *ep_string[] = {
 
 154         "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
 
 155         "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
 
 156         "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
 
 157         "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
 
 158         "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
 
 159         "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
 
 160         "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
 
 164 static int use_dma = 1;
 
 165 /* packet per buffer dma */
 
 166 static int use_dma_ppb = 1;
 
 167 /* with per descr. update */
 
 168 static int use_dma_ppb_du;
 
 169 /* buffer fill mode */
 
 170 static int use_dma_bufferfill_mode;
 
 171 /* full speed only mode */
 
 172 static int use_fullspeed;
 
 173 /* tx buffer size for high speed */
 
 174 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
 
 176 /* module parameters */
 
 177 module_param(use_dma, bool, S_IRUGO);
 
 178 MODULE_PARM_DESC(use_dma, "true for DMA");
 
 179 module_param(use_dma_ppb, bool, S_IRUGO);
 
 180 MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
 
 181 module_param(use_dma_ppb_du, bool, S_IRUGO);
 
 182 MODULE_PARM_DESC(use_dma_ppb_du,
 
 183         "true for DMA in packet per buffer mode with descriptor update");
 
 184 module_param(use_fullspeed, bool, S_IRUGO);
 
 185 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
 
 187 /*---------------------------------------------------------------------------*/
 
 188 /* Prints UDC device registers and endpoint irq registers */
 
 189 static void print_regs(struct udc *dev)
 
 191         DBG(dev, "------- Device registers -------\n");
 
 192         DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
 
 193         DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
 
 194         DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
 
 196         DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
 
 197         DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
 
 199         DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
 
 200         DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
 
 202         DBG(dev, "USE DMA        = %d\n", use_dma);
 
 203         if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
 
 204                 DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
 
 205                         "WITHOUT desc. update)\n");
 
 206                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
 
 207         } else if (use_dma && use_dma_ppb_du && use_dma_ppb_du) {
 
 208                 DBG(dev, "DMA mode       = PPBDU (packet per buffer "
 
 209                         "WITH desc. update)\n");
 
 210                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
 
 212         if (use_dma && use_dma_bufferfill_mode) {
 
 213                 DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
 
 214                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
 
 217                 dev_info(&dev->pdev->dev, "FIFO mode\n");
 
 219         DBG(dev, "-------------------------------------------------------\n");
 
 222 /* Masks unused interrupts */
 
 223 static int udc_mask_unused_interrupts(struct udc *dev)
 
 227         /* mask all dev interrupts */
 
 228         tmp =   AMD_BIT(UDC_DEVINT_SVC) |
 
 229                 AMD_BIT(UDC_DEVINT_ENUM) |
 
 230                 AMD_BIT(UDC_DEVINT_US) |
 
 231                 AMD_BIT(UDC_DEVINT_UR) |
 
 232                 AMD_BIT(UDC_DEVINT_ES) |
 
 233                 AMD_BIT(UDC_DEVINT_SI) |
 
 234                 AMD_BIT(UDC_DEVINT_SOF)|
 
 235                 AMD_BIT(UDC_DEVINT_SC);
 
 236         writel(tmp, &dev->regs->irqmsk);
 
 238         /* mask all ep interrupts */
 
 239         writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
 
 244 /* Enables endpoint 0 interrupts */
 
 245 static int udc_enable_ep0_interrupts(struct udc *dev)
 
 249         DBG(dev, "udc_enable_ep0_interrupts()\n");
 
 252         tmp = readl(&dev->regs->ep_irqmsk);
 
 253         /* enable ep0 irq's */
 
 254         tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
 
 255                 & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
 
 256         writel(tmp, &dev->regs->ep_irqmsk);
 
 261 /* Enables device interrupts for SET_INTF and SET_CONFIG */
 
 262 static int udc_enable_dev_setup_interrupts(struct udc *dev)
 
 266         DBG(dev, "enable device interrupts for setup data\n");
 
 269         tmp = readl(&dev->regs->irqmsk);
 
 271         /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
 
 272         tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
 
 273                 & AMD_UNMASK_BIT(UDC_DEVINT_SC)
 
 274                 & AMD_UNMASK_BIT(UDC_DEVINT_UR)
 
 275                 & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
 
 276                 & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
 
 277         writel(tmp, &dev->regs->irqmsk);
 
 282 /* Calculates fifo start of endpoint based on preceeding endpoints */
 
 283 static int udc_set_txfifo_addr(struct udc_ep *ep)
 
 289         if (!ep || !(ep->in))
 
 293         ep->txfifo = dev->txfifo;
 
 296         for (i = 0; i < ep->num; i++) {
 
 297                 if (dev->ep[i].regs) {
 
 299                         tmp = readl(&dev->ep[i].regs->bufin_framenum);
 
 300                         tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
 
 307 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
 
 308 static u32 cnak_pending;
 
 310 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
 
 312         if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
 
 313                 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
 
 314                 cnak_pending |= 1 << (num);
 
 317                 cnak_pending = cnak_pending & (~(1 << (num)));
 
 321 /* Enables endpoint, is called by gadget driver */
 
 323 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
 
 328         unsigned long           iflags;
 
 333                         || usbep->name == ep0_string
 
 335                         || desc->bDescriptorType != USB_DT_ENDPOINT)
 
 338         ep = container_of(usbep, struct udc_ep, ep);
 
 341         DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
 
 343         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 
 346         spin_lock_irqsave(&dev->lock, iflags);
 
 351         /* set traffic type */
 
 352         tmp = readl(&dev->ep[ep->num].regs->ctl);
 
 353         tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
 
 354         writel(tmp, &dev->ep[ep->num].regs->ctl);
 
 356         /* set max packet size */
 
 357         maxpacket = le16_to_cpu(desc->wMaxPacketSize);
 
 358         tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
 
 359         tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
 
 360         ep->ep.maxpacket = maxpacket;
 
 361         writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
 
 366                 /* ep ix in UDC CSR register space */
 
 367                 udc_csr_epix = ep->num;
 
 369                 /* set buffer size (tx fifo entries) */
 
 370                 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
 
 371                 /* double buffering: fifo size = 2 x max packet size */
 
 374                                 maxpacket * UDC_EPIN_BUFF_SIZE_MULT
 
 377                 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
 
 379                 /* calc. tx fifo base addr */
 
 380                 udc_set_txfifo_addr(ep);
 
 383                 tmp = readl(&ep->regs->ctl);
 
 384                 tmp |= AMD_BIT(UDC_EPCTL_F);
 
 385                 writel(tmp, &ep->regs->ctl);
 
 389                 /* ep ix in UDC CSR register space */
 
 390                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 
 392                 /* set max packet size UDC CSR  */
 
 393                 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
 
 394                 tmp = AMD_ADDBITS(tmp, maxpacket,
 
 396                 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
 
 398                 if (use_dma && !ep->in) {
 
 399                         /* alloc and init BNA dummy request */
 
 400                         ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
 
 401                         ep->bna_occurred = 0;
 
 404                 if (ep->num != UDC_EP0OUT_IX)
 
 405                         dev->data_ep_enabled = 1;
 
 409         tmp = readl(&dev->csr->ne[udc_csr_epix]);
 
 411         tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
 
 413         tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
 
 415         tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
 
 417         tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
 
 419         tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
 
 421         tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
 
 423         tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
 
 425         writel(tmp, &dev->csr->ne[udc_csr_epix]);
 
 428         tmp = readl(&dev->regs->ep_irqmsk);
 
 429         tmp &= AMD_UNMASK_BIT(ep->num);
 
 430         writel(tmp, &dev->regs->ep_irqmsk);
 
 433          * clear NAK by writing CNAK
 
 434          * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
 
 436         if (!use_dma || ep->in) {
 
 437                 tmp = readl(&ep->regs->ctl);
 
 438                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
 439                 writel(tmp, &ep->regs->ctl);
 
 441                 UDC_QUEUE_CNAK(ep, ep->num);
 
 443         tmp = desc->bEndpointAddress;
 
 444         DBG(dev, "%s enabled\n", usbep->name);
 
 446         spin_unlock_irqrestore(&dev->lock, iflags);
 
 450 /* Resets endpoint */
 
 451 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
 
 455         VDBG(ep->dev, "ep-%d reset\n", ep->num);
 
 457         ep->ep.ops = &udc_ep_ops;
 
 458         INIT_LIST_HEAD(&ep->queue);
 
 460         ep->ep.maxpacket = (u16) ~0;
 
 462         tmp = readl(&ep->regs->ctl);
 
 463         tmp |= AMD_BIT(UDC_EPCTL_SNAK);
 
 464         writel(tmp, &ep->regs->ctl);
 
 467         /* disable interrupt */
 
 468         tmp = readl(®s->ep_irqmsk);
 
 469         tmp |= AMD_BIT(ep->num);
 
 470         writel(tmp, ®s->ep_irqmsk);
 
 473                 /* unset P and IN bit of potential former DMA */
 
 474                 tmp = readl(&ep->regs->ctl);
 
 475                 tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
 
 476                 writel(tmp, &ep->regs->ctl);
 
 478                 tmp = readl(&ep->regs->sts);
 
 479                 tmp |= AMD_BIT(UDC_EPSTS_IN);
 
 480                 writel(tmp, &ep->regs->sts);
 
 483                 tmp = readl(&ep->regs->ctl);
 
 484                 tmp |= AMD_BIT(UDC_EPCTL_F);
 
 485                 writel(tmp, &ep->regs->ctl);
 
 488         /* reset desc pointer */
 
 489         writel(0, &ep->regs->desptr);
 
 492 /* Disables endpoint, is called by gadget driver */
 
 493 static int udc_ep_disable(struct usb_ep *usbep)
 
 495         struct udc_ep   *ep = NULL;
 
 496         unsigned long   iflags;
 
 501         ep = container_of(usbep, struct udc_ep, ep);
 
 502         if (usbep->name == ep0_string || !ep->desc)
 
 505         DBG(ep->dev, "Disable ep-%d\n", ep->num);
 
 507         spin_lock_irqsave(&ep->dev->lock, iflags);
 
 508         udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
 
 510         ep_init(ep->dev->regs, ep);
 
 511         spin_unlock_irqrestore(&ep->dev->lock, iflags);
 
 516 /* Allocates request packet, called by gadget driver */
 
 517 static struct usb_request *
 
 518 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
 
 520         struct udc_request      *req;
 
 521         struct udc_data_dma     *dma_desc;
 
 527         ep = container_of(usbep, struct udc_ep, ep);
 
 529         VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
 
 530         req = kzalloc(sizeof(struct udc_request), gfp);
 
 534         req->req.dma = DMA_DONT_USE;
 
 535         INIT_LIST_HEAD(&req->queue);
 
 538                 /* ep0 in requests are allocated from data pool here */
 
 539                 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
 
 546                 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
 
 549                                 (unsigned long)req->td_phys);
 
 550                 /* prevent from using desc. - set HOST BUSY */
 
 551                 dma_desc->status = AMD_ADDBITS(dma_desc->status,
 
 552                                                 UDC_DMA_STP_STS_BS_HOST_BUSY,
 
 554                 dma_desc->bufptr = __constant_cpu_to_le32(DMA_DONT_USE);
 
 555                 req->td_data = dma_desc;
 
 556                 req->td_data_last = NULL;
 
 563 /* Frees request packet, called by gadget driver */
 
 565 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
 
 568         struct udc_request      *req;
 
 570         if (!usbep || !usbreq)
 
 573         ep = container_of(usbep, struct udc_ep, ep);
 
 574         req = container_of(usbreq, struct udc_request, req);
 
 575         VDBG(ep->dev, "free_req req=%p\n", req);
 
 576         BUG_ON(!list_empty(&req->queue));
 
 578                 VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
 
 580                 /* free dma chain if created */
 
 581                 if (req->chain_len > 1) {
 
 582                         udc_free_dma_chain(ep->dev, req);
 
 585                 pci_pool_free(ep->dev->data_requests, req->td_data,
 
 591 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
 
 592 static void udc_init_bna_dummy(struct udc_request *req)
 
 596                 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 
 597                 /* set next pointer to itself */
 
 598                 req->td_data->next = req->td_phys;
 
 601                         = AMD_ADDBITS(req->td_data->status,
 
 602                                         UDC_DMA_STP_STS_BS_DMA_DONE,
 
 605                 pr_debug("bna desc = %p, sts = %08x\n",
 
 606                         req->td_data, req->td_data->status);
 
 611 /* Allocate BNA dummy descriptor */
 
 612 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
 
 614         struct udc_request *req = NULL;
 
 615         struct usb_request *_req = NULL;
 
 617         /* alloc the dummy request */
 
 618         _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
 
 620                 req = container_of(_req, struct udc_request, req);
 
 621                 ep->bna_dummy_req = req;
 
 622                 udc_init_bna_dummy(req);
 
 627 /* Write data to TX fifo for IN packets */
 
 629 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
 
 635         unsigned                remaining = 0;
 
 640         req_buf = req->buf + req->actual;
 
 642         remaining = req->length - req->actual;
 
 644         buf = (u32 *) req_buf;
 
 646         bytes = ep->ep.maxpacket;
 
 647         if (bytes > remaining)
 
 651         for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
 
 652                 writel(*(buf + i), ep->txfifo);
 
 655         /* remaining bytes must be written by byte access */
 
 656         for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
 
 657                 writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
 
 661         /* dummy write confirm */
 
 662         writel(0, &ep->regs->confirm);
 
 665 /* Read dwords from RX fifo for OUT transfers */
 
 666 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
 
 670         VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
 
 672         for (i = 0; i < dwords; i++) {
 
 673                 *(buf + i) = readl(dev->rxfifo);
 
 678 /* Read bytes from RX fifo for OUT transfers */
 
 679 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
 
 684         VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
 
 687         for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
 
 688                 *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
 
 691         /* remaining bytes must be read by byte access */
 
 692         if (bytes % UDC_DWORD_BYTES) {
 
 693                 tmp = readl(dev->rxfifo);
 
 694                 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
 
 695                         *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
 
 696                         tmp = tmp >> UDC_BITS_PER_BYTE;
 
 703 /* Read data from RX fifo for OUT transfers */
 
 705 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
 
 710         unsigned finished = 0;
 
 712         /* received number bytes */
 
 713         bytes = readl(&ep->regs->sts);
 
 714         bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
 
 716         buf_space = req->req.length - req->req.actual;
 
 717         buf = req->req.buf + req->req.actual;
 
 718         if (bytes > buf_space) {
 
 719                 if ((buf_space % ep->ep.maxpacket) != 0) {
 
 721                                 "%s: rx %d bytes, rx-buf space = %d bytesn\n",
 
 722                                 ep->ep.name, bytes, buf_space);
 
 723                         req->req.status = -EOVERFLOW;
 
 727         req->req.actual += bytes;
 
 730         if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
 
 731                 || ((req->req.actual == req->req.length) && !req->req.zero))
 
 734         /* read rx fifo bytes */
 
 735         VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
 
 736         udc_rxfifo_read_bytes(ep->dev, buf, bytes);
 
 741 /* create/re-init a DMA descriptor or a DMA descriptor chain */
 
 742 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
 
 747         VDBG(ep->dev, "prep_dma\n");
 
 748         VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
 
 749                         ep->num, req->td_data);
 
 751         /* set buffer pointer */
 
 752         req->td_data->bufptr = req->req.dma;
 
 755         req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 
 757         /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
 
 760                 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
 
 762                         if (retval == -ENOMEM)
 
 763                                 DBG(ep->dev, "Out of DMA memory\n");
 
 767                         if (req->req.length == ep->ep.maxpacket) {
 
 769                                 req->td_data->status =
 
 770                                         AMD_ADDBITS(req->td_data->status,
 
 772                                                 UDC_DMA_IN_STS_TXBYTES);
 
 780                 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
 
 781                                 "maxpacket=%d ep%d\n",
 
 782                                 use_dma_ppb, req->req.length,
 
 783                                 ep->ep.maxpacket, ep->num);
 
 785                  * if bytes < max packet then tx bytes must
 
 786                  * be written in packet per buffer mode
 
 788                 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
 
 789                                 || ep->num == UDC_EP0OUT_IX
 
 790                                 || ep->num == UDC_EP0IN_IX) {
 
 792                         req->td_data->status =
 
 793                                 AMD_ADDBITS(req->td_data->status,
 
 795                                                 UDC_DMA_IN_STS_TXBYTES);
 
 796                         /* reset frame num */
 
 797                         req->td_data->status =
 
 798                                 AMD_ADDBITS(req->td_data->status,
 
 800                                                 UDC_DMA_IN_STS_FRAMENUM);
 
 803                 req->td_data->status =
 
 804                         AMD_ADDBITS(req->td_data->status,
 
 805                                 UDC_DMA_STP_STS_BS_HOST_BUSY,
 
 808                 VDBG(ep->dev, "OUT set host ready\n");
 
 810                 req->td_data->status =
 
 811                         AMD_ADDBITS(req->td_data->status,
 
 812                                 UDC_DMA_STP_STS_BS_HOST_READY,
 
 816                         /* clear NAK by writing CNAK */
 
 818                                 tmp = readl(&ep->regs->ctl);
 
 819                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
 820                                 writel(tmp, &ep->regs->ctl);
 
 822                                 UDC_QUEUE_CNAK(ep, ep->num);
 
 830 /* Completes request packet ... caller MUST hold lock */
 
 832 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
 
 833 __releases(ep->dev->lock)
 
 834 __acquires(ep->dev->lock)
 
 839         VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
 
 843         if (req->dma_mapping) {
 
 845                         pci_unmap_single(dev->pdev,
 
 850                         pci_unmap_single(dev->pdev,
 
 854                 req->dma_mapping = 0;
 
 855                 req->req.dma = DMA_DONT_USE;
 
 861         /* set new status if pending */
 
 862         if (req->req.status == -EINPROGRESS)
 
 863                 req->req.status = sts;
 
 865         /* remove from ep queue */
 
 866         list_del_init(&req->queue);
 
 868         VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
 
 869                 &req->req, req->req.length, ep->ep.name, sts);
 
 871         spin_unlock(&dev->lock);
 
 872         req->req.complete(&ep->ep, &req->req);
 
 873         spin_lock(&dev->lock);
 
 877 /* frees pci pool descriptors of a DMA chain */
 
 878 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
 
 882         struct udc_data_dma     *td;
 
 883         struct udc_data_dma     *td_last = NULL;
 
 886         DBG(dev, "free chain req = %p\n", req);
 
 888         /* do not free first desc., will be done by free for request */
 
 889         td_last = req->td_data;
 
 890         td = phys_to_virt(td_last->next);
 
 892         for (i = 1; i < req->chain_len; i++) {
 
 894                 pci_pool_free(dev->data_requests, td,
 
 895                                 (dma_addr_t) td_last->next);
 
 897                 td = phys_to_virt(td_last->next);
 
 903 /* Iterates to the end of a DMA chain and returns last descriptor */
 
 904 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
 
 906         struct udc_data_dma     *td;
 
 909         while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
 
 910                 td = phys_to_virt(td->next);
 
 917 /* Iterates to the end of a DMA chain and counts bytes received */
 
 918 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
 
 920         struct udc_data_dma     *td;
 
 924         /* received number bytes */
 
 925         count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
 
 927         while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
 
 928                 td = phys_to_virt(td->next);
 
 929                 /* received number bytes */
 
 931                         count += AMD_GETBITS(td->status,
 
 932                                 UDC_DMA_OUT_STS_RXBYTES);
 
 940 /* Creates or re-inits a DMA chain */
 
 941 static int udc_create_dma_chain(
 
 943         struct udc_request *req,
 
 944         unsigned long buf_len, gfp_t gfp_flags
 
 947         unsigned long bytes = req->req.length;
 
 950         struct udc_data_dma     *td = NULL;
 
 951         struct udc_data_dma     *last = NULL;
 
 952         unsigned long txbytes;
 
 953         unsigned create_new_chain = 0;
 
 956         VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
 
 958         dma_addr = DMA_DONT_USE;
 
 960         /* unset L bit in first desc for OUT */
 
 962                 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
 
 965         /* alloc only new desc's if not already available */
 
 966         len = req->req.length / ep->ep.maxpacket;
 
 967         if (req->req.length % ep->ep.maxpacket) {
 
 971         if (len > req->chain_len) {
 
 972                 /* shorter chain already allocated before */
 
 973                 if (req->chain_len > 1) {
 
 974                         udc_free_dma_chain(ep->dev, req);
 
 976                 req->chain_len = len;
 
 977                 create_new_chain = 1;
 
 981         /* gen. required number of descriptors and buffers */
 
 982         for (i = buf_len; i < bytes; i += buf_len) {
 
 983                 /* create or determine next desc. */
 
 984                 if (create_new_chain) {
 
 986                         td = pci_pool_alloc(ep->dev->data_requests,
 
 987                                         gfp_flags, &dma_addr);
 
 992                 } else if (i == buf_len) {
 
 994                         td = (struct udc_data_dma *) phys_to_virt(
 
 998                         td = (struct udc_data_dma *) phys_to_virt(last->next);
 
1004                         td->bufptr = req->req.dma + i; /* assign buffer */
 
1008                 /* short packet ? */
 
1009                 if ((bytes - i) >= buf_len) {
 
1013                         txbytes = bytes - i;
 
1016                 /* link td and assign tx bytes */
 
1018                         if (create_new_chain) {
 
1019                                 req->td_data->next = dma_addr;
 
1021                                 /* req->td_data->next = virt_to_phys(td); */
 
1023                         /* write tx bytes */
 
1026                                 req->td_data->status =
 
1027                                         AMD_ADDBITS(req->td_data->status,
 
1029                                                         UDC_DMA_IN_STS_TXBYTES);
 
1031                                 td->status = AMD_ADDBITS(td->status,
 
1033                                                         UDC_DMA_IN_STS_TXBYTES);
 
1036                         if (create_new_chain) {
 
1037                                 last->next = dma_addr;
 
1039                                 /* last->next = virt_to_phys(td); */
 
1042                                 /* write tx bytes */
 
1043                                 td->status = AMD_ADDBITS(td->status,
 
1045                                                         UDC_DMA_IN_STS_TXBYTES);
 
1052                 td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
 
1053                 /* last desc. points to itself */
 
1054                 req->td_data_last = td;
 
1060 /* Enabling RX DMA */
 
1061 static void udc_set_rde(struct udc *dev)
 
1065         VDBG(dev, "udc_set_rde()\n");
 
1066         /* stop RDE timer */
 
1067         if (timer_pending(&udc_timer)) {
 
1069                 mod_timer(&udc_timer, jiffies - 1);
 
1072         tmp = readl(&dev->regs->ctl);
 
1073         tmp |= AMD_BIT(UDC_DEVCTL_RDE);
 
1074         writel(tmp, &dev->regs->ctl);
 
1077 /* Queues a request packet, called by gadget driver */
 
1079 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
 
1083         unsigned long           iflags;
 
1085         struct udc_request      *req;
 
1089         /* check the inputs */
 
1090         req = container_of(usbreq, struct udc_request, req);
 
1092         if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
 
1093                         || !list_empty(&req->queue))
 
1096         ep = container_of(usbep, struct udc_ep, ep);
 
1097         if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
 
1100         VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
 
1103         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
 
1106         /* map dma (usually done before) */
 
1107         if (ep->dma && usbreq->length != 0
 
1108                         && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) {
 
1109                 VDBG(dev, "DMA map req %p\n", req);
 
1111                         usbreq->dma = pci_map_single(dev->pdev,
 
1116                         usbreq->dma = pci_map_single(dev->pdev,
 
1119                                                 PCI_DMA_FROMDEVICE);
 
1120                 req->dma_mapping = 1;
 
1123         VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
 
1124                         usbep->name, usbreq, usbreq->length,
 
1125                         req->td_data, usbreq->buf);
 
1127         spin_lock_irqsave(&dev->lock, iflags);
 
1129         usbreq->status = -EINPROGRESS;
 
1132         /* on empty queue just do first transfer */
 
1133         if (list_empty(&ep->queue)) {
 
1135                 if (usbreq->length == 0) {
 
1136                         /* IN zlp's are handled by hardware */
 
1137                         complete_req(ep, req, 0);
 
1138                         VDBG(dev, "%s: zlp\n", ep->ep.name);
 
1140                          * if set_config or set_intf is waiting for ack by zlp
 
1143                         if (dev->set_cfg_not_acked) {
 
1144                                 tmp = readl(&dev->regs->ctl);
 
1145                                 tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
 
1146                                 writel(tmp, &dev->regs->ctl);
 
1147                                 dev->set_cfg_not_acked = 0;
 
1149                         /* setup command is ACK'ed now by zlp */
 
1150                         if (dev->waiting_zlp_ack_ep0in) {
 
1151                                 /* clear NAK by writing CNAK in EP0_IN */
 
1152                                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1153                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1154                                 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1155                                 dev->ep[UDC_EP0IN_IX].naking = 0;
 
1156                                 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
 
1158                                 dev->waiting_zlp_ack_ep0in = 0;
 
1163                         retval = prep_dma(ep, req, gfp);
 
1166                         /* write desc pointer to enable DMA */
 
1168                                 /* set HOST READY */
 
1169                                 req->td_data->status =
 
1170                                         AMD_ADDBITS(req->td_data->status,
 
1171                                                 UDC_DMA_IN_STS_BS_HOST_READY,
 
1175                         /* disabled rx dma while descriptor update */
 
1177                                 /* stop RDE timer */
 
1178                                 if (timer_pending(&udc_timer)) {
 
1180                                         mod_timer(&udc_timer, jiffies - 1);
 
1183                                 tmp = readl(&dev->regs->ctl);
 
1184                                 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
 
1185                                 writel(tmp, &dev->regs->ctl);
 
1189                                  * if BNA occurred then let BNA dummy desc.
 
1190                                  * point to current desc.
 
1192                                 if (ep->bna_occurred) {
 
1193                                         VDBG(dev, "copy to BNA dummy desc.\n");
 
1194                                         memcpy(ep->bna_dummy_req->td_data,
 
1196                                                 sizeof(struct udc_data_dma));
 
1199                         /* write desc pointer */
 
1200                         writel(req->td_phys, &ep->regs->desptr);
 
1202                         /* clear NAK by writing CNAK */
 
1204                                 tmp = readl(&ep->regs->ctl);
 
1205                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1206                                 writel(tmp, &ep->regs->ctl);
 
1208                                 UDC_QUEUE_CNAK(ep, ep->num);
 
1213                                 tmp = readl(&dev->regs->ep_irqmsk);
 
1214                                 tmp &= AMD_UNMASK_BIT(ep->num);
 
1215                                 writel(tmp, &dev->regs->ep_irqmsk);
 
1219         } else if (ep->dma) {
 
1222                  * prep_dma not used for OUT ep's, this is not possible
 
1223                  * for PPB modes, because of chain creation reasons
 
1226                         retval = prep_dma(ep, req, gfp);
 
1231         VDBG(dev, "list_add\n");
 
1232         /* add request to ep queue */
 
1235                 list_add_tail(&req->queue, &ep->queue);
 
1237                 /* open rxfifo if out data queued */
 
1242                         if (ep->num != UDC_EP0OUT_IX)
 
1243                                 dev->data_ep_queued = 1;
 
1245                 /* stop OUT naking */
 
1247                         if (!use_dma && udc_rxfifo_pending) {
 
1248                                 DBG(dev, "udc_queue(): pending bytes in "
 
1249                                         "rxfifo after nyet\n");
 
1251                                  * read pending bytes afer nyet:
 
1254                                 if (udc_rxfifo_read(ep, req)) {
 
1256                                         complete_req(ep, req, 0);
 
1258                                 udc_rxfifo_pending = 0;
 
1265         spin_unlock_irqrestore(&dev->lock, iflags);
 
1269 /* Empty request queue of an endpoint; caller holds spinlock */
 
1270 static void empty_req_queue(struct udc_ep *ep)
 
1272         struct udc_request      *req;
 
1275         while (!list_empty(&ep->queue)) {
 
1276                 req = list_entry(ep->queue.next,
 
1279                 complete_req(ep, req, -ESHUTDOWN);
 
1283 /* Dequeues a request packet, called by gadget driver */
 
1284 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
 
1287         struct udc_request      *req;
 
1289         unsigned long           iflags;
 
1291         ep = container_of(usbep, struct udc_ep, ep);
 
1292         if (!usbep || !usbreq || (!ep->desc && (ep->num != 0
 
1293                                 && ep->num != UDC_EP0OUT_IX)))
 
1296         req = container_of(usbreq, struct udc_request, req);
 
1298         spin_lock_irqsave(&ep->dev->lock, iflags);
 
1299         halted = ep->halted;
 
1301         /* request in processing or next one */
 
1302         if (ep->queue.next == &req->queue) {
 
1303                 if (ep->dma && req->dma_going) {
 
1305                                 ep->cancel_transfer = 1;
 
1309                                 /* stop potential receive DMA */
 
1310                                 tmp = readl(&udc->regs->ctl);
 
1311                                 writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
 
1314                                  * Cancel transfer later in ISR
 
1315                                  * if descriptor was touched.
 
1317                                 dma_sts = AMD_GETBITS(req->td_data->status,
 
1318                                                         UDC_DMA_OUT_STS_BS);
 
1319                                 if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
 
1320                                         ep->cancel_transfer = 1;
 
1322                                         udc_init_bna_dummy(ep->req);
 
1323                                         writel(ep->bna_dummy_req->td_phys,
 
1326                                 writel(tmp, &udc->regs->ctl);
 
1330         complete_req(ep, req, -ECONNRESET);
 
1331         ep->halted = halted;
 
1333         spin_unlock_irqrestore(&ep->dev->lock, iflags);
 
1337 /* Halt or clear halt of endpoint */
 
1339 udc_set_halt(struct usb_ep *usbep, int halt)
 
1343         unsigned long iflags;
 
1349         pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
 
1351         ep = container_of(usbep, struct udc_ep, ep);
 
1352         if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
 
1354         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
 
1357         spin_lock_irqsave(&udc_stall_spinlock, iflags);
 
1358         /* halt or clear halt */
 
1361                         ep->dev->stall_ep0in = 1;
 
1365                          * rxfifo empty not taken into acount
 
1367                         tmp = readl(&ep->regs->ctl);
 
1368                         tmp |= AMD_BIT(UDC_EPCTL_S);
 
1369                         writel(tmp, &ep->regs->ctl);
 
1372                         /* setup poll timer */
 
1373                         if (!timer_pending(&udc_pollstall_timer)) {
 
1374                                 udc_pollstall_timer.expires = jiffies +
 
1375                                         HZ * UDC_POLLSTALL_TIMER_USECONDS
 
1377                                 if (!stop_pollstall_timer) {
 
1378                                         DBG(ep->dev, "start polltimer\n");
 
1379                                         add_timer(&udc_pollstall_timer);
 
1384                 /* ep is halted by set_halt() before */
 
1386                         tmp = readl(&ep->regs->ctl);
 
1387                         /* clear stall bit */
 
1388                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 
1389                         /* clear NAK by writing CNAK */
 
1390                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1391                         writel(tmp, &ep->regs->ctl);
 
1393                         UDC_QUEUE_CNAK(ep, ep->num);
 
1396         spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
 
1400 /* gadget interface */
 
1401 static const struct usb_ep_ops udc_ep_ops = {
 
1402         .enable         = udc_ep_enable,
 
1403         .disable        = udc_ep_disable,
 
1405         .alloc_request  = udc_alloc_request,
 
1406         .free_request   = udc_free_request,
 
1409         .dequeue        = udc_dequeue,
 
1411         .set_halt       = udc_set_halt,
 
1412         /* fifo ops not implemented */
 
1415 /*-------------------------------------------------------------------------*/
 
1417 /* Get frame counter (not implemented) */
 
1418 static int udc_get_frame(struct usb_gadget *gadget)
 
1423 /* Remote wakeup gadget interface */
 
1424 static int udc_wakeup(struct usb_gadget *gadget)
 
1430         dev = container_of(gadget, struct udc, gadget);
 
1431         udc_remote_wakeup(dev);
 
1436 /* gadget operations */
 
1437 static const struct usb_gadget_ops udc_ops = {
 
1438         .wakeup         = udc_wakeup,
 
1439         .get_frame      = udc_get_frame,
 
1442 /* Setups endpoint parameters, adds endpoints to linked list */
 
1443 static void make_ep_lists(struct udc *dev)
 
1445         /* make gadget ep lists */
 
1446         INIT_LIST_HEAD(&dev->gadget.ep_list);
 
1447         list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
 
1448                                                 &dev->gadget.ep_list);
 
1449         list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
 
1450                                                 &dev->gadget.ep_list);
 
1451         list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
 
1452                                                 &dev->gadget.ep_list);
 
1455         dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
 
1456         if (dev->gadget.speed == USB_SPEED_FULL)
 
1457                 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
 
1458         else if (dev->gadget.speed == USB_SPEED_HIGH)
 
1459                 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
 
1460         dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
 
1463 /* init registers at driver load time */
 
1464 static int startup_registers(struct udc *dev)
 
1468         /* init controller by soft reset */
 
1469         udc_soft_reset(dev);
 
1471         /* mask not needed interrupts */
 
1472         udc_mask_unused_interrupts(dev);
 
1474         /* put into initial config */
 
1475         udc_basic_init(dev);
 
1476         /* link up all endpoints */
 
1477         udc_setup_endpoints(dev);
 
1480         tmp = readl(&dev->regs->cfg);
 
1481         if (use_fullspeed) {
 
1482                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
 
1484                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
 
1486         writel(tmp, &dev->regs->cfg);
 
1491 /* Inits UDC context */
 
1492 static void udc_basic_init(struct udc *dev)
 
1496         DBG(dev, "udc_basic_init()\n");
 
1498         dev->gadget.speed = USB_SPEED_UNKNOWN;
 
1500         /* stop RDE timer */
 
1501         if (timer_pending(&udc_timer)) {
 
1503                 mod_timer(&udc_timer, jiffies - 1);
 
1505         /* stop poll stall timer */
 
1506         if (timer_pending(&udc_pollstall_timer)) {
 
1507                 mod_timer(&udc_pollstall_timer, jiffies - 1);
 
1510         tmp = readl(&dev->regs->ctl);
 
1511         tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
 
1512         tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
 
1513         writel(tmp, &dev->regs->ctl);
 
1515         /* enable dynamic CSR programming */
 
1516         tmp = readl(&dev->regs->cfg);
 
1517         tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
 
1518         /* set self powered */
 
1519         tmp |= AMD_BIT(UDC_DEVCFG_SP);
 
1520         /* set remote wakeupable */
 
1521         tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
 
1522         writel(tmp, &dev->regs->cfg);
 
1526         dev->data_ep_enabled = 0;
 
1527         dev->data_ep_queued = 0;
 
1530 /* Sets initial endpoint parameters */
 
1531 static void udc_setup_endpoints(struct udc *dev)
 
1537         DBG(dev, "udc_setup_endpoints()\n");
 
1539         /* read enum speed */
 
1540         tmp = readl(&dev->regs->sts);
 
1541         tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
 
1542         if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) {
 
1543                 dev->gadget.speed = USB_SPEED_HIGH;
 
1544         } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) {
 
1545                 dev->gadget.speed = USB_SPEED_FULL;
 
1548         /* set basic ep parameters */
 
1549         for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
 
1552                 ep->ep.name = ep_string[tmp];
 
1554                 /* txfifo size is calculated at enable time */
 
1555                 ep->txfifo = dev->txfifo;
 
1558                 if (tmp < UDC_EPIN_NUM) {
 
1559                         ep->fifo_depth = UDC_TXFIFO_SIZE;
 
1562                         ep->fifo_depth = UDC_RXFIFO_SIZE;
 
1566                 ep->regs = &dev->ep_regs[tmp];
 
1568                  * ep will be reset only if ep was not enabled before to avoid
 
1569                  * disabling ep interrupts when ENUM interrupt occurs but ep is
 
1570                  * not enabled by gadget driver
 
1573                         ep_init(dev->regs, ep);
 
1578                          * ep->dma is not really used, just to indicate that
 
1579                          * DMA is active: remove this
 
1580                          * dma regs = dev control regs
 
1582                         ep->dma = &dev->regs->ctl;
 
1584                         /* nak OUT endpoints until enable - not for ep0 */
 
1585                         if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
 
1586                                                 && tmp > UDC_EPIN_NUM) {
 
1588                                 reg = readl(&dev->ep[tmp].regs->ctl);
 
1589                                 reg |= AMD_BIT(UDC_EPCTL_SNAK);
 
1590                                 writel(reg, &dev->ep[tmp].regs->ctl);
 
1591                                 dev->ep[tmp].naking = 1;
 
1596         /* EP0 max packet */
 
1597         if (dev->gadget.speed == USB_SPEED_FULL) {
 
1598                 dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
 
1599                 dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
 
1600                                                 UDC_FS_EP0OUT_MAX_PKT_SIZE;
 
1601         } else if (dev->gadget.speed == USB_SPEED_HIGH) {
 
1602                 dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
 
1603                 dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
 
1607          * with suspend bug workaround, ep0 params for gadget driver
 
1608          * are set at gadget driver bind() call
 
1610         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
 
1611         dev->ep[UDC_EP0IN_IX].halted = 0;
 
1612         INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
 
1614         /* init cfg/alt/int */
 
1615         dev->cur_config = 0;
 
1620 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
 
1621 static void usb_connect(struct udc *dev)
 
1624         dev_info(&dev->pdev->dev, "USB Connect\n");
 
1628         /* put into initial config */
 
1629         udc_basic_init(dev);
 
1631         /* enable device setup interrupts */
 
1632         udc_enable_dev_setup_interrupts(dev);
 
1636  * Calls gadget with disconnect event and resets the UDC and makes
 
1637  * initial bringup to be ready for ep0 events
 
1639 static void usb_disconnect(struct udc *dev)
 
1642         dev_info(&dev->pdev->dev, "USB Disconnect\n");
 
1646         /* mask interrupts */
 
1647         udc_mask_unused_interrupts(dev);
 
1649         /* REVISIT there doesn't seem to be a point to having this
 
1650          * talk to a tasklet ... do it directly, we already hold
 
1651          * the spinlock needed to process the disconnect.
 
1654         tasklet_schedule(&disconnect_tasklet);
 
1657 /* Tasklet for disconnect to be outside of interrupt context */
 
1658 static void udc_tasklet_disconnect(unsigned long par)
 
1660         struct udc *dev = (struct udc *)(*((struct udc **) par));
 
1663         DBG(dev, "Tasklet disconnect\n");
 
1664         spin_lock_irq(&dev->lock);
 
1667                 spin_unlock(&dev->lock);
 
1668                 dev->driver->disconnect(&dev->gadget);
 
1669                 spin_lock(&dev->lock);
 
1672                 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
 
1673                         empty_req_queue(&dev->ep[tmp]);
 
1680                         &dev->ep[UDC_EP0IN_IX]);
 
1683         if (!soft_reset_occured) {
 
1684                 /* init controller by soft reset */
 
1685                 udc_soft_reset(dev);
 
1686                 soft_reset_occured++;
 
1689         /* re-enable dev interrupts */
 
1690         udc_enable_dev_setup_interrupts(dev);
 
1691         /* back to full speed ? */
 
1692         if (use_fullspeed) {
 
1693                 tmp = readl(&dev->regs->cfg);
 
1694                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
 
1695                 writel(tmp, &dev->regs->cfg);
 
1698         spin_unlock_irq(&dev->lock);
 
1701 /* Reset the UDC core */
 
1702 static void udc_soft_reset(struct udc *dev)
 
1704         unsigned long   flags;
 
1706         DBG(dev, "Soft reset\n");
 
1708          * reset possible waiting interrupts, because int.
 
1709          * status is lost after soft reset,
 
1710          * ep int. status reset
 
1712         writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
 
1713         /* device int. status reset */
 
1714         writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
 
1716         spin_lock_irqsave(&udc_irq_spinlock, flags);
 
1717         writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
 
1718         readl(&dev->regs->cfg);
 
1719         spin_unlock_irqrestore(&udc_irq_spinlock, flags);
 
1723 /* RDE timer callback to set RDE bit */
 
1724 static void udc_timer_function(unsigned long v)
 
1728         spin_lock_irq(&udc_irq_spinlock);
 
1732                  * open the fifo if fifo was filled on last timer call
 
1736                         /* set RDE to receive setup data */
 
1737                         tmp = readl(&udc->regs->ctl);
 
1738                         tmp |= AMD_BIT(UDC_DEVCTL_RDE);
 
1739                         writel(tmp, &udc->regs->ctl);
 
1741                 } else if (readl(&udc->regs->sts)
 
1742                                 & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
 
1744                          * if fifo empty setup polling, do not just
 
1747                         udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
 
1749                                 add_timer(&udc_timer);
 
1753                          * fifo contains data now, setup timer for opening
 
1754                          * the fifo when timer expires to be able to receive
 
1755                          * setup packets, when data packets gets queued by
 
1756                          * gadget layer then timer will forced to expire with
 
1757                          * set_rde=0 (RDE is set in udc_queue())
 
1760                         /* debug: lhadmot_timer_start = 221070 */
 
1761                         udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
 
1763                                 add_timer(&udc_timer);
 
1768                 set_rde = -1; /* RDE was set by udc_queue() */
 
1769         spin_unlock_irq(&udc_irq_spinlock);
 
1775 /* Handle halt state, used in stall poll timer */
 
1776 static void udc_handle_halt_state(struct udc_ep *ep)
 
1779         /* set stall as long not halted */
 
1780         if (ep->halted == 1) {
 
1781                 tmp = readl(&ep->regs->ctl);
 
1782                 /* STALL cleared ? */
 
1783                 if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
 
1785                          * FIXME: MSC spec requires that stall remains
 
1786                          * even on receivng of CLEAR_FEATURE HALT. So
 
1787                          * we would set STALL again here to be compliant.
 
1788                          * But with current mass storage drivers this does
 
1789                          * not work (would produce endless host retries).
 
1790                          * So we clear halt on CLEAR_FEATURE.
 
1792                         DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
 
1793                         tmp |= AMD_BIT(UDC_EPCTL_S);
 
1794                         writel(tmp, &ep->regs->ctl);*/
 
1796                         /* clear NAK by writing CNAK */
 
1797                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1798                         writel(tmp, &ep->regs->ctl);
 
1800                         UDC_QUEUE_CNAK(ep, ep->num);
 
1805 /* Stall timer callback to poll S bit and set it again after */
 
1806 static void udc_pollstall_timer_function(unsigned long v)
 
1811         spin_lock_irq(&udc_stall_spinlock);
 
1813          * only one IN and OUT endpoints are handled
 
1816         ep = &udc->ep[UDC_EPIN_IX];
 
1817         udc_handle_halt_state(ep);
 
1820         /* OUT poll stall */
 
1821         ep = &udc->ep[UDC_EPOUT_IX];
 
1822         udc_handle_halt_state(ep);
 
1826         /* setup timer again when still halted */
 
1827         if (!stop_pollstall_timer && halted) {
 
1828                 udc_pollstall_timer.expires = jiffies +
 
1829                                         HZ * UDC_POLLSTALL_TIMER_USECONDS
 
1831                 add_timer(&udc_pollstall_timer);
 
1833         spin_unlock_irq(&udc_stall_spinlock);
 
1835         if (stop_pollstall_timer)
 
1836                 complete(&on_pollstall_exit);
 
1839 /* Inits endpoint 0 so that SETUP packets are processed */
 
1840 static void activate_control_endpoints(struct udc *dev)
 
1844         DBG(dev, "activate_control_endpoints\n");
 
1847         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1848         tmp |= AMD_BIT(UDC_EPCTL_F);
 
1849         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1851         /* set ep0 directions */
 
1852         dev->ep[UDC_EP0IN_IX].in = 1;
 
1853         dev->ep[UDC_EP0OUT_IX].in = 0;
 
1855         /* set buffer size (tx fifo entries) of EP0_IN */
 
1856         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
 
1857         if (dev->gadget.speed == USB_SPEED_FULL)
 
1858                 tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
 
1859                                         UDC_EPIN_BUFF_SIZE);
 
1860         else if (dev->gadget.speed == USB_SPEED_HIGH)
 
1861                 tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
 
1862                                         UDC_EPIN_BUFF_SIZE);
 
1863         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
 
1865         /* set max packet size of EP0_IN */
 
1866         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
 
1867         if (dev->gadget.speed == USB_SPEED_FULL)
 
1868                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
 
1869                                         UDC_EP_MAX_PKT_SIZE);
 
1870         else if (dev->gadget.speed == USB_SPEED_HIGH)
 
1871                 tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
 
1872                                 UDC_EP_MAX_PKT_SIZE);
 
1873         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
 
1875         /* set max packet size of EP0_OUT */
 
1876         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
 
1877         if (dev->gadget.speed == USB_SPEED_FULL)
 
1878                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
 
1879                                         UDC_EP_MAX_PKT_SIZE);
 
1880         else if (dev->gadget.speed == USB_SPEED_HIGH)
 
1881                 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
 
1882                                         UDC_EP_MAX_PKT_SIZE);
 
1883         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
 
1885         /* set max packet size of EP0 in UDC CSR */
 
1886         tmp = readl(&dev->csr->ne[0]);
 
1887         if (dev->gadget.speed == USB_SPEED_FULL)
 
1888                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
 
1889                                         UDC_CSR_NE_MAX_PKT);
 
1890         else if (dev->gadget.speed == USB_SPEED_HIGH)
 
1891                 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
 
1892                                         UDC_CSR_NE_MAX_PKT);
 
1893         writel(tmp, &dev->csr->ne[0]);
 
1896                 dev->ep[UDC_EP0OUT_IX].td->status |=
 
1897                         AMD_BIT(UDC_DMA_OUT_STS_L);
 
1898                 /* write dma desc address */
 
1899                 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
 
1900                         &dev->ep[UDC_EP0OUT_IX].regs->subptr);
 
1901                 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
 
1902                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
 
1903                 /* stop RDE timer */
 
1904                 if (timer_pending(&udc_timer)) {
 
1906                         mod_timer(&udc_timer, jiffies - 1);
 
1908                 /* stop pollstall timer */
 
1909                 if (timer_pending(&udc_pollstall_timer)) {
 
1910                         mod_timer(&udc_pollstall_timer, jiffies - 1);
 
1913                 tmp = readl(&dev->regs->ctl);
 
1914                 tmp |= AMD_BIT(UDC_DEVCTL_MODE)
 
1915                                 | AMD_BIT(UDC_DEVCTL_RDE)
 
1916                                 | AMD_BIT(UDC_DEVCTL_TDE);
 
1917                 if (use_dma_bufferfill_mode) {
 
1918                         tmp |= AMD_BIT(UDC_DEVCTL_BF);
 
1919                 } else if (use_dma_ppb_du) {
 
1920                         tmp |= AMD_BIT(UDC_DEVCTL_DU);
 
1922                 writel(tmp, &dev->regs->ctl);
 
1925         /* clear NAK by writing CNAK for EP0IN */
 
1926         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1927         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1928         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
1929         dev->ep[UDC_EP0IN_IX].naking = 0;
 
1930         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
 
1932         /* clear NAK by writing CNAK for EP0OUT */
 
1933         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
1934         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
1935         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
1936         dev->ep[UDC_EP0OUT_IX].naking = 0;
 
1937         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
 
1940 /* Make endpoint 0 ready for control traffic */
 
1941 static int setup_ep0(struct udc *dev)
 
1943         activate_control_endpoints(dev);
 
1944         /* enable ep0 interrupts */
 
1945         udc_enable_ep0_interrupts(dev);
 
1946         /* enable device setup interrupts */
 
1947         udc_enable_dev_setup_interrupts(dev);
 
1952 /* Called by gadget driver to register itself */
 
1953 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
 
1955         struct udc              *dev = udc;
 
1959         if (!driver || !driver->bind || !driver->setup
 
1960                         || driver->speed != USB_SPEED_HIGH)
 
1967         driver->driver.bus = NULL;
 
1968         dev->driver = driver;
 
1969         dev->gadget.dev.driver = &driver->driver;
 
1971         retval = driver->bind(&dev->gadget);
 
1973         /* Some gadget drivers use both ep0 directions.
 
1974          * NOTE: to gadget driver, ep0 is just one endpoint...
 
1976         dev->ep[UDC_EP0OUT_IX].ep.driver_data =
 
1977                 dev->ep[UDC_EP0IN_IX].ep.driver_data;
 
1980                 DBG(dev, "binding to %s returning %d\n",
 
1981                                 driver->driver.name, retval);
 
1983                 dev->gadget.dev.driver = NULL;
 
1987         /* get ready for ep0 traffic */
 
1991         tmp = readl(&dev->regs->ctl);
 
1992         tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
 
1993         writel(tmp, &dev->regs->ctl);
 
1999 EXPORT_SYMBOL(usb_gadget_register_driver);
 
2001 /* shutdown requests and disconnect from gadget */
 
2003 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
 
2004 __releases(dev->lock)
 
2005 __acquires(dev->lock)
 
2009         /* empty queues and init hardware */
 
2010         udc_basic_init(dev);
 
2011         for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
 
2012                 empty_req_queue(&dev->ep[tmp]);
 
2015         if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
 
2016                 spin_unlock(&dev->lock);
 
2017                 driver->disconnect(&dev->gadget);
 
2018                 spin_lock(&dev->lock);
 
2021         udc_setup_endpoints(dev);
 
2024 /* Called by gadget driver to unregister itself */
 
2025 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
 
2027         struct udc      *dev = udc;
 
2028         unsigned long   flags;
 
2033         if (!driver || driver != dev->driver || !driver->unbind)
 
2036         spin_lock_irqsave(&dev->lock, flags);
 
2037         udc_mask_unused_interrupts(dev);
 
2038         shutdown(dev, driver);
 
2039         spin_unlock_irqrestore(&dev->lock, flags);
 
2041         driver->unbind(&dev->gadget);
 
2042         dev->gadget.dev.driver = NULL;
 
2046         tmp = readl(&dev->regs->ctl);
 
2047         tmp |= AMD_BIT(UDC_DEVCTL_SD);
 
2048         writel(tmp, &dev->regs->ctl);
 
2051         DBG(dev, "%s: unregistered\n", driver->driver.name);
 
2055 EXPORT_SYMBOL(usb_gadget_unregister_driver);
 
2058 /* Clear pending NAK bits */
 
2059 static void udc_process_cnak_queue(struct udc *dev)
 
2065         DBG(dev, "CNAK pending queue processing\n");
 
2066         for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
 
2067                 if (cnak_pending & (1 << tmp)) {
 
2068                         DBG(dev, "CNAK pending for ep%d\n", tmp);
 
2069                         /* clear NAK by writing CNAK */
 
2070                         reg = readl(&dev->ep[tmp].regs->ctl);
 
2071                         reg |= AMD_BIT(UDC_EPCTL_CNAK);
 
2072                         writel(reg, &dev->ep[tmp].regs->ctl);
 
2073                         dev->ep[tmp].naking = 0;
 
2074                         UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
 
2077         /* ...  and ep0out */
 
2078         if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
 
2079                 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
 
2080                 /* clear NAK by writing CNAK */
 
2081                 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
2082                 reg |= AMD_BIT(UDC_EPCTL_CNAK);
 
2083                 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
2084                 dev->ep[UDC_EP0OUT_IX].naking = 0;
 
2085                 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
 
2086                                 dev->ep[UDC_EP0OUT_IX].num);
 
2090 /* Enabling RX DMA after setup packet */
 
2091 static void udc_ep0_set_rde(struct udc *dev)
 
2095                  * only enable RXDMA when no data endpoint enabled
 
2098                 if (!dev->data_ep_enabled || dev->data_ep_queued) {
 
2102                          * setup timer for enabling RDE (to not enable
 
2103                          * RXFIFO DMA for data endpoints to early)
 
2105                         if (set_rde != 0 && !timer_pending(&udc_timer)) {
 
2107                                         jiffies + HZ/UDC_RDE_TIMER_DIV;
 
2110                                         add_timer(&udc_timer);
 
2118 /* Interrupt handler for data OUT traffic */
 
2119 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
 
2121         irqreturn_t             ret_val = IRQ_NONE;
 
2124         struct udc_request      *req;
 
2126         struct udc_data_dma     *td = NULL;
 
2129         VDBG(dev, "ep%d irq\n", ep_ix);
 
2130         ep = &dev->ep[ep_ix];
 
2132         tmp = readl(&ep->regs->sts);
 
2135                 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
 
2136                         DBG(dev, "BNA ep%dout occured - DESPTR = %x \n",
 
2137                                         ep->num, readl(&ep->regs->desptr));
 
2139                         writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
 
2140                         if (!ep->cancel_transfer)
 
2141                                 ep->bna_occurred = 1;
 
2143                                 ep->cancel_transfer = 0;
 
2144                         ret_val = IRQ_HANDLED;
 
2149         if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
 
2150                 dev_err(&dev->pdev->dev, "HE ep%dout occured\n", ep->num);
 
2153                 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
 
2154                 ret_val = IRQ_HANDLED;
 
2158         if (!list_empty(&ep->queue)) {
 
2161                 req = list_entry(ep->queue.next,
 
2162                         struct udc_request, queue);
 
2165                 udc_rxfifo_pending = 1;
 
2167         VDBG(dev, "req = %p\n", req);
 
2172                 if (req && udc_rxfifo_read(ep, req)) {
 
2173                         ret_val = IRQ_HANDLED;
 
2176                         complete_req(ep, req, 0);
 
2178                         if (!list_empty(&ep->queue) && !ep->halted) {
 
2179                                 req = list_entry(ep->queue.next,
 
2180                                         struct udc_request, queue);
 
2186         } else if (!ep->cancel_transfer && req != NULL) {
 
2187                 ret_val = IRQ_HANDLED;
 
2189                 /* check for DMA done */
 
2191                         dma_done = AMD_GETBITS(req->td_data->status,
 
2192                                                 UDC_DMA_OUT_STS_BS);
 
2193                 /* packet per buffer mode - rx bytes */
 
2196                          * if BNA occurred then recover desc. from
 
2199                         if (ep->bna_occurred) {
 
2200                                 VDBG(dev, "Recover desc. from BNA dummy\n");
 
2201                                 memcpy(req->td_data, ep->bna_dummy_req->td_data,
 
2202                                                 sizeof(struct udc_data_dma));
 
2203                                 ep->bna_occurred = 0;
 
2204                                 udc_init_bna_dummy(ep->req);
 
2206                         td = udc_get_last_dma_desc(req);
 
2207                         dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
 
2209                 if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
 
2210                         /* buffer fill mode - rx bytes */
 
2212                                 /* received number bytes */
 
2213                                 count = AMD_GETBITS(req->td_data->status,
 
2214                                                 UDC_DMA_OUT_STS_RXBYTES);
 
2215                                 VDBG(dev, "rx bytes=%u\n", count);
 
2216                         /* packet per buffer mode - rx bytes */
 
2218                                 VDBG(dev, "req->td_data=%p\n", req->td_data);
 
2219                                 VDBG(dev, "last desc = %p\n", td);
 
2220                                 /* received number bytes */
 
2221                                 if (use_dma_ppb_du) {
 
2222                                         /* every desc. counts bytes */
 
2223                                         count = udc_get_ppbdu_rxbytes(req);
 
2225                                         /* last desc. counts bytes */
 
2226                                         count = AMD_GETBITS(td->status,
 
2227                                                 UDC_DMA_OUT_STS_RXBYTES);
 
2228                                         if (!count && req->req.length
 
2229                                                 == UDC_DMA_MAXPACKET) {
 
2231                                                  * on 64k packets the RXBYTES
 
2234                                                 count = UDC_DMA_MAXPACKET;
 
2237                                 VDBG(dev, "last desc rx bytes=%u\n", count);
 
2240                         tmp = req->req.length - req->req.actual;
 
2242                                 if ((tmp % ep->ep.maxpacket) != 0) {
 
2243                                         DBG(dev, "%s: rx %db, space=%db\n",
 
2244                                                 ep->ep.name, count, tmp);
 
2245                                         req->req.status = -EOVERFLOW;
 
2249                         req->req.actual += count;
 
2251                         /* complete request */
 
2252                         complete_req(ep, req, 0);
 
2255                         if (!list_empty(&ep->queue) && !ep->halted) {
 
2256                                 req = list_entry(ep->queue.next,
 
2260                                  * DMA may be already started by udc_queue()
 
2261                                  * called by gadget drivers completion
 
2262                                  * routine. This happens when queue
 
2263                                  * holds one request only.
 
2265                                 if (req->dma_going == 0) {
 
2267                                         if (prep_dma(ep, req, GFP_ATOMIC) != 0)
 
2269                                         /* write desc pointer */
 
2270                                         writel(req->td_phys,
 
2278                                  * implant BNA dummy descriptor to allow
 
2279                                  * RXFIFO opening by RDE
 
2281                                 if (ep->bna_dummy_req) {
 
2282                                         /* write desc pointer */
 
2283                                         writel(ep->bna_dummy_req->td_phys,
 
2285                                         ep->bna_occurred = 0;
 
2289                                  * schedule timer for setting RDE if queue
 
2290                                  * remains empty to allow ep0 packets pass
 
2294                                                 && !timer_pending(&udc_timer)) {
 
2297                                                 + HZ*UDC_RDE_TIMER_SECONDS;
 
2300                                                 add_timer(&udc_timer);
 
2303                                 if (ep->num != UDC_EP0OUT_IX)
 
2304                                         dev->data_ep_queued = 0;
 
2309                         * RX DMA must be reenabled for each desc in PPBDU mode
 
2310                         * and must be enabled for PPBNDU mode in case of BNA
 
2315         } else if (ep->cancel_transfer) {
 
2316                 ret_val = IRQ_HANDLED;
 
2317                 ep->cancel_transfer = 0;
 
2320         /* check pending CNAKS */
 
2322                 /* CNAk processing when rxfifo empty only */
 
2323                 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
 
2324                         udc_process_cnak_queue(dev);
 
2328         /* clear OUT bits in ep status */
 
2329         writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
 
2334 /* Interrupt handler for data IN traffic */
 
2335 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
 
2337         irqreturn_t ret_val = IRQ_NONE;
 
2341         struct udc_request *req;
 
2342         struct udc_data_dma *td;
 
2346         ep = &dev->ep[ep_ix];
 
2348         epsts = readl(&ep->regs->sts);
 
2351                 if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
 
2352                         dev_err(&dev->pdev->dev,
 
2353                                 "BNA ep%din occured - DESPTR = %08lx \n",
 
2355                                 (unsigned long) readl(&ep->regs->desptr));
 
2358                         writel(epsts, &ep->regs->sts);
 
2359                         ret_val = IRQ_HANDLED;
 
2364         if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
 
2365                 dev_err(&dev->pdev->dev,
 
2366                         "HE ep%dn occured - DESPTR = %08lx \n",
 
2367                         ep->num, (unsigned long) readl(&ep->regs->desptr));
 
2370                 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
 
2371                 ret_val = IRQ_HANDLED;
 
2375         /* DMA completion */
 
2376         if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
 
2377                 VDBG(dev, "TDC set- completion\n");
 
2378                 ret_val = IRQ_HANDLED;
 
2379                 if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
 
2380                         req = list_entry(ep->queue.next,
 
2381                                         struct udc_request, queue);
 
2384                                  * length bytes transfered
 
2385                                  * check dma done of last desc. in PPBDU mode
 
2387                                 if (use_dma_ppb_du) {
 
2388                                         td = udc_get_last_dma_desc(req);
 
2391                                                         AMD_GETBITS(td->status,
 
2393                                                 /* don't care DMA done */
 
2398                                         /* assume all bytes transferred */
 
2399                                         req->req.actual = req->req.length;
 
2402                                 if (req->req.actual == req->req.length) {
 
2404                                         complete_req(ep, req, 0);
 
2406                                         /* further request available ? */
 
2407                                         if (list_empty(&ep->queue)) {
 
2408                                                 /* disable interrupt */
 
2410                                                         &dev->regs->ep_irqmsk);
 
2411                                                 tmp |= AMD_BIT(ep->num);
 
2413                                                         &dev->regs->ep_irqmsk);
 
2419                 ep->cancel_transfer = 0;
 
2423          * status reg has IN bit set and TDC not set (if TDC was handled,
 
2424          * IN must not be handled (UDC defect) ?
 
2426         if ((epsts & AMD_BIT(UDC_EPSTS_IN))
 
2427                         && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
 
2428                 ret_val = IRQ_HANDLED;
 
2429                 if (!list_empty(&ep->queue)) {
 
2431                         req = list_entry(ep->queue.next,
 
2432                                         struct udc_request, queue);
 
2436                                 udc_txfifo_write(ep, &req->req);
 
2437                                 len = req->req.length - req->req.actual;
 
2438                                                 if (len > ep->ep.maxpacket)
 
2439                                                         len = ep->ep.maxpacket;
 
2440                                                 req->req.actual += len;
 
2441                                 if (req->req.actual == req->req.length
 
2442                                         || (len != ep->ep.maxpacket)) {
 
2444                                         complete_req(ep, req, 0);
 
2447                         } else if (req && !req->dma_going) {
 
2448                                 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
 
2455                                          * unset L bit of first desc.
 
2458                                         if (use_dma_ppb && req->req.length >
 
2460                                                 req->td_data->status &=
 
2465                                         /* write desc pointer */
 
2466                                         writel(req->td_phys, &ep->regs->desptr);
 
2468                                         /* set HOST READY */
 
2469                                         req->td_data->status =
 
2471                                                 req->td_data->status,
 
2472                                                 UDC_DMA_IN_STS_BS_HOST_READY,
 
2475                                         /* set poll demand bit */
 
2476                                         tmp = readl(&ep->regs->ctl);
 
2477                                         tmp |= AMD_BIT(UDC_EPCTL_P);
 
2478                                         writel(tmp, &ep->regs->ctl);
 
2484         /* clear status bits */
 
2485         writel(epsts, &ep->regs->sts);
 
2492 /* Interrupt handler for Control OUT traffic */
 
2493 static irqreturn_t udc_control_out_isr(struct udc *dev)
 
2494 __releases(dev->lock)
 
2495 __acquires(dev->lock)
 
2497         irqreturn_t ret_val = IRQ_NONE;
 
2499         int setup_supported;
 
2503         struct udc_ep   *ep_tmp;
 
2505         ep = &dev->ep[UDC_EP0OUT_IX];
 
2508         writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
 
2510         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2511         /* check BNA and clear if set */
 
2512         if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
 
2513                 VDBG(dev, "ep0: BNA set\n");
 
2514                 writel(AMD_BIT(UDC_EPSTS_BNA),
 
2515                         &dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2516                 ep->bna_occurred = 1;
 
2517                 ret_val = IRQ_HANDLED;
 
2521         /* type of data: SETUP or DATA 0 bytes */
 
2522         tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
 
2523         VDBG(dev, "data_typ = %x\n", tmp);
 
2526         if (tmp == UDC_EPSTS_OUT_SETUP) {
 
2527                 ret_val = IRQ_HANDLED;
 
2529                 ep->dev->stall_ep0in = 0;
 
2530                 dev->waiting_zlp_ack_ep0in = 0;
 
2532                 /* set NAK for EP0_IN */
 
2533                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2534                 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
 
2535                 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2536                 dev->ep[UDC_EP0IN_IX].naking = 1;
 
2537                 /* get setup data */
 
2540                         /* clear OUT bits in ep status */
 
2541                         writel(UDC_EPSTS_OUT_CLEAR,
 
2542                                 &dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2544                         setup_data.data[0] =
 
2545                                 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
 
2546                         setup_data.data[1] =
 
2547                                 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
 
2548                         /* set HOST READY */
 
2549                         dev->ep[UDC_EP0OUT_IX].td_stp->status =
 
2550                                         UDC_DMA_STP_STS_BS_HOST_READY;
 
2553                         udc_rxfifo_read_dwords(dev, setup_data.data, 2);
 
2556                 /* determine direction of control data */
 
2557                 if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
 
2558                         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
 
2560                         udc_ep0_set_rde(dev);
 
2563                         dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
 
2565                          * implant BNA dummy descriptor to allow RXFIFO opening
 
2568                         if (ep->bna_dummy_req) {
 
2569                                 /* write desc pointer */
 
2570                                 writel(ep->bna_dummy_req->td_phys,
 
2571                                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
 
2572                                 ep->bna_occurred = 0;
 
2576                         dev->ep[UDC_EP0OUT_IX].naking = 1;
 
2578                          * setup timer for enabling RDE (to not enable
 
2579                          * RXFIFO DMA for data to early)
 
2582                         if (!timer_pending(&udc_timer)) {
 
2583                                 udc_timer.expires = jiffies +
 
2584                                                         HZ/UDC_RDE_TIMER_DIV;
 
2586                                         add_timer(&udc_timer);
 
2592                  * mass storage reset must be processed here because
 
2593                  * next packet may be a CLEAR_FEATURE HALT which would not
 
2594                  * clear the stall bit when no STALL handshake was received
 
2595                  * before (autostall can cause this)
 
2597                 if (setup_data.data[0] == UDC_MSCRES_DWORD0
 
2598                                 && setup_data.data[1] == UDC_MSCRES_DWORD1) {
 
2599                         DBG(dev, "MSC Reset\n");
 
2602                          * only one IN and OUT endpoints are handled
 
2604                         ep_tmp = &udc->ep[UDC_EPIN_IX];
 
2605                         udc_set_halt(&ep_tmp->ep, 0);
 
2606                         ep_tmp = &udc->ep[UDC_EPOUT_IX];
 
2607                         udc_set_halt(&ep_tmp->ep, 0);
 
2610                 /* call gadget with setup data received */
 
2611                 spin_unlock(&dev->lock);
 
2612                 setup_supported = dev->driver->setup(&dev->gadget,
 
2613                                                 &setup_data.request);
 
2614                 spin_lock(&dev->lock);
 
2616                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2617                 /* ep0 in returns data (not zlp) on IN phase */
 
2618                 if (setup_supported >= 0 && setup_supported <
 
2619                                 UDC_EP0IN_MAXPACKET) {
 
2620                         /* clear NAK by writing CNAK in EP0_IN */
 
2621                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
2622                         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2623                         dev->ep[UDC_EP0IN_IX].naking = 0;
 
2624                         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
 
2626                 /* if unsupported request then stall */
 
2627                 } else if (setup_supported < 0) {
 
2628                         tmp |= AMD_BIT(UDC_EPCTL_S);
 
2629                         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2631                         dev->waiting_zlp_ack_ep0in = 1;
 
2634                 /* clear NAK by writing CNAK in EP0_OUT */
 
2636                         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
2637                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
 
2638                         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
 
2639                         dev->ep[UDC_EP0OUT_IX].naking = 0;
 
2640                         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
 
2644                         /* clear OUT bits in ep status */
 
2645                         writel(UDC_EPSTS_OUT_CLEAR,
 
2646                                 &dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2649         /* data packet 0 bytes */
 
2650         } else if (tmp == UDC_EPSTS_OUT_DATA) {
 
2651                 /* clear OUT bits in ep status */
 
2652                 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2654                 /* get setup data: only 0 packet */
 
2656                         /* no req if 0 packet, just reactivate */
 
2657                         if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
 
2660                                 /* set HOST READY */
 
2661                                 dev->ep[UDC_EP0OUT_IX].td->status =
 
2663                                         dev->ep[UDC_EP0OUT_IX].td->status,
 
2664                                         UDC_DMA_OUT_STS_BS_HOST_READY,
 
2665                                         UDC_DMA_OUT_STS_BS);
 
2667                                 udc_ep0_set_rde(dev);
 
2668                                 ret_val = IRQ_HANDLED;
 
2672                                 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
 
2673                                 /* re-program desc. pointer for possible ZLPs */
 
2674                                 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
 
2675                                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
 
2677                                 udc_ep0_set_rde(dev);
 
2681                         /* received number bytes */
 
2682                         count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
 
2683                         count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
 
2684                         /* out data for fifo mode not working */
 
2687                         /* 0 packet or real data ? */
 
2689                                 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
 
2691                                 /* dummy read confirm */
 
2692                                 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
 
2693                                 ret_val = IRQ_HANDLED;
 
2698         /* check pending CNAKS */
 
2700                 /* CNAk processing when rxfifo empty only */
 
2701                 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
 
2702                         udc_process_cnak_queue(dev);
 
2710 /* Interrupt handler for Control IN traffic */
 
2711 static irqreturn_t udc_control_in_isr(struct udc *dev)
 
2713         irqreturn_t ret_val = IRQ_NONE;
 
2716         struct udc_request *req;
 
2719         ep = &dev->ep[UDC_EP0IN_IX];
 
2722         writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
 
2724         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
 
2725         /* DMA completion */
 
2726         if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
 
2727                 VDBG(dev, "isr: TDC clear \n");
 
2728                 ret_val = IRQ_HANDLED;
 
2731                 writel(AMD_BIT(UDC_EPSTS_TDC),
 
2732                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
 
2734         /* status reg has IN bit set ? */
 
2735         } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
 
2736                 ret_val = IRQ_HANDLED;
 
2740                         writel(AMD_BIT(UDC_EPSTS_IN),
 
2741                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
 
2743                 if (dev->stall_ep0in) {
 
2744                         DBG(dev, "stall ep0in\n");
 
2746                         tmp = readl(&ep->regs->ctl);
 
2747                         tmp |= AMD_BIT(UDC_EPCTL_S);
 
2748                         writel(tmp, &ep->regs->ctl);
 
2750                         if (!list_empty(&ep->queue)) {
 
2752                                 req = list_entry(ep->queue.next,
 
2753                                                 struct udc_request, queue);
 
2756                                         /* write desc pointer */
 
2757                                         writel(req->td_phys, &ep->regs->desptr);
 
2758                                         /* set HOST READY */
 
2759                                         req->td_data->status =
 
2761                                                 req->td_data->status,
 
2762                                                 UDC_DMA_STP_STS_BS_HOST_READY,
 
2763                                                 UDC_DMA_STP_STS_BS);
 
2765                                         /* set poll demand bit */
 
2767                                         readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2768                                         tmp |= AMD_BIT(UDC_EPCTL_P);
 
2770                                         &dev->ep[UDC_EP0IN_IX].regs->ctl);
 
2772                                         /* all bytes will be transferred */
 
2773                                         req->req.actual = req->req.length;
 
2776                                         complete_req(ep, req, 0);
 
2780                                         udc_txfifo_write(ep, &req->req);
 
2782                                         /* lengh bytes transfered */
 
2783                                         len = req->req.length - req->req.actual;
 
2784                                         if (len > ep->ep.maxpacket)
 
2785                                                 len = ep->ep.maxpacket;
 
2787                                         req->req.actual += len;
 
2788                                         if (req->req.actual == req->req.length
 
2789                                                 || (len != ep->ep.maxpacket)) {
 
2791                                                 complete_req(ep, req, 0);
 
2798                 dev->stall_ep0in = 0;
 
2801                         writel(AMD_BIT(UDC_EPSTS_IN),
 
2802                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
 
2810 /* Interrupt handler for global device events */
 
2811 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
 
2812 __releases(dev->lock)
 
2813 __acquires(dev->lock)
 
2815         irqreturn_t ret_val = IRQ_NONE;
 
2822         /* SET_CONFIG irq ? */
 
2823         if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
 
2824                 ret_val = IRQ_HANDLED;
 
2826                 /* read config value */
 
2827                 tmp = readl(&dev->regs->sts);
 
2828                 cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
 
2829                 DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
 
2830                 dev->cur_config = cfg;
 
2831                 dev->set_cfg_not_acked = 1;
 
2833                 /* make usb request for gadget driver */
 
2834                 memset(&setup_data, 0 , sizeof(union udc_setup_data));
 
2835                 setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
 
2836                 setup_data.request.wValue = cpu_to_le16(dev->cur_config);
 
2838                 /* programm the NE registers */
 
2839                 for (i = 0; i < UDC_EP_NUM; i++) {
 
2843                                 /* ep ix in UDC CSR register space */
 
2844                                 udc_csr_epix = ep->num;
 
2849                                 /* ep ix in UDC CSR register space */
 
2850                                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 
2853                         tmp = readl(&dev->csr->ne[udc_csr_epix]);
 
2855                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
 
2858                         writel(tmp, &dev->csr->ne[udc_csr_epix]);
 
2860                         /* clear stall bits */
 
2862                         tmp = readl(&ep->regs->ctl);
 
2863                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 
2864                         writel(tmp, &ep->regs->ctl);
 
2866                 /* call gadget zero with setup data received */
 
2867                 spin_unlock(&dev->lock);
 
2868                 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
 
2869                 spin_lock(&dev->lock);
 
2871         } /* SET_INTERFACE ? */
 
2872         if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
 
2873                 ret_val = IRQ_HANDLED;
 
2875                 dev->set_cfg_not_acked = 1;
 
2876                 /* read interface and alt setting values */
 
2877                 tmp = readl(&dev->regs->sts);
 
2878                 dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
 
2879                 dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
 
2881                 /* make usb request for gadget driver */
 
2882                 memset(&setup_data, 0 , sizeof(union udc_setup_data));
 
2883                 setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
 
2884                 setup_data.request.bRequestType = USB_RECIP_INTERFACE;
 
2885                 setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
 
2886                 setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
 
2888                 DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
 
2889                                 dev->cur_alt, dev->cur_intf);
 
2891                 /* programm the NE registers */
 
2892                 for (i = 0; i < UDC_EP_NUM; i++) {
 
2896                                 /* ep ix in UDC CSR register space */
 
2897                                 udc_csr_epix = ep->num;
 
2902                                 /* ep ix in UDC CSR register space */
 
2903                                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
 
2908                         tmp = readl(&dev->csr->ne[udc_csr_epix]);
 
2910                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
 
2912                         /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
 
2914                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
 
2917                         writel(tmp, &dev->csr->ne[udc_csr_epix]);
 
2919                         /* clear stall bits */
 
2921                         tmp = readl(&ep->regs->ctl);
 
2922                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
 
2923                         writel(tmp, &ep->regs->ctl);
 
2926                 /* call gadget zero with setup data received */
 
2927                 spin_unlock(&dev->lock);
 
2928                 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
 
2929                 spin_lock(&dev->lock);
 
2932         if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
 
2933                 DBG(dev, "USB Reset interrupt\n");
 
2934                 ret_val = IRQ_HANDLED;
 
2936                 /* allow soft reset when suspend occurs */
 
2937                 soft_reset_occured = 0;
 
2939                 dev->waiting_zlp_ack_ep0in = 0;
 
2940                 dev->set_cfg_not_acked = 0;
 
2942                 /* mask not needed interrupts */
 
2943                 udc_mask_unused_interrupts(dev);
 
2945                 /* call gadget to resume and reset configs etc. */
 
2946                 spin_unlock(&dev->lock);
 
2947                 if (dev->sys_suspended && dev->driver->resume) {
 
2948                         dev->driver->resume(&dev->gadget);
 
2949                         dev->sys_suspended = 0;
 
2951                 dev->driver->disconnect(&dev->gadget);
 
2952                 spin_lock(&dev->lock);
 
2954                 /* disable ep0 to empty req queue */
 
2955                 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
 
2956                 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
 
2958                 /* soft reset when rxfifo not empty */
 
2959                 tmp = readl(&dev->regs->sts);
 
2960                 if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
 
2961                                 && !soft_reset_after_usbreset_occured) {
 
2962                         udc_soft_reset(dev);
 
2963                         soft_reset_after_usbreset_occured++;
 
2967                  * DMA reset to kill potential old DMA hw hang,
 
2968                  * POLL bit is already reset by ep_init() through
 
2971                 DBG(dev, "DMA machine reset\n");
 
2972                 tmp = readl(&dev->regs->cfg);
 
2973                 writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
 
2974                 writel(tmp, &dev->regs->cfg);
 
2976                 /* put into initial config */
 
2977                 udc_basic_init(dev);
 
2979                 /* enable device setup interrupts */
 
2980                 udc_enable_dev_setup_interrupts(dev);
 
2982                 /* enable suspend interrupt */
 
2983                 tmp = readl(&dev->regs->irqmsk);
 
2984                 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
 
2985                 writel(tmp, &dev->regs->irqmsk);
 
2988         if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
 
2989                 DBG(dev, "USB Suspend interrupt\n");
 
2990                 ret_val = IRQ_HANDLED;
 
2991                 if (dev->driver->suspend) {
 
2992                         spin_unlock(&dev->lock);
 
2993                         dev->sys_suspended = 1;
 
2994                         dev->driver->suspend(&dev->gadget);
 
2995                         spin_lock(&dev->lock);
 
2998         if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
 
2999                 DBG(dev, "ENUM interrupt\n");
 
3000                 ret_val = IRQ_HANDLED;
 
3001                 soft_reset_after_usbreset_occured = 0;
 
3003                 /* disable ep0 to empty req queue */
 
3004                 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
 
3005                 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
 
3007                 /* link up all endpoints */
 
3008                 udc_setup_endpoints(dev);
 
3009                 if (dev->gadget.speed == USB_SPEED_HIGH) {
 
3010                         dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
 
3012                 } else if (dev->gadget.speed == USB_SPEED_FULL) {
 
3013                         dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
 
3018                 activate_control_endpoints(dev);
 
3020                 /* enable ep0 interrupts */
 
3021                 udc_enable_ep0_interrupts(dev);
 
3023         /* session valid change interrupt */
 
3024         if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
 
3025                 DBG(dev, "USB SVC interrupt\n");
 
3026                 ret_val = IRQ_HANDLED;
 
3028                 /* check that session is not valid to detect disconnect */
 
3029                 tmp = readl(&dev->regs->sts);
 
3030                 if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
 
3031                         /* disable suspend interrupt */
 
3032                         tmp = readl(&dev->regs->irqmsk);
 
3033                         tmp |= AMD_BIT(UDC_DEVINT_US);
 
3034                         writel(tmp, &dev->regs->irqmsk);
 
3035                         DBG(dev, "USB Disconnect (session valid low)\n");
 
3036                         /* cleanup on disconnect */
 
3037                         usb_disconnect(udc);
 
3045 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
 
3046 static irqreturn_t udc_irq(int irq, void *pdev)
 
3048         struct udc *dev = pdev;
 
3052         irqreturn_t ret_val = IRQ_NONE;
 
3054         spin_lock(&dev->lock);
 
3056         /* check for ep irq */
 
3057         reg = readl(&dev->regs->ep_irqsts);
 
3059                 if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
 
3060                         ret_val |= udc_control_out_isr(dev);
 
3061                 if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
 
3062                         ret_val |= udc_control_in_isr(dev);
 
3068                 for (i = 1; i < UDC_EP_NUM; i++) {
 
3070                         if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
 
3073                         /* clear irq status */
 
3074                         writel(ep_irq, &dev->regs->ep_irqsts);
 
3076                         /* irq for out ep ? */
 
3077                         if (i > UDC_EPIN_NUM)
 
3078                                 ret_val |= udc_data_out_isr(dev, i);
 
3080                                 ret_val |= udc_data_in_isr(dev, i);
 
3086         /* check for dev irq */
 
3087         reg = readl(&dev->regs->irqsts);
 
3090                 writel(reg, &dev->regs->irqsts);
 
3091                 ret_val |= udc_dev_isr(dev, reg);
 
3095         spin_unlock(&dev->lock);
 
3099 /* Tears down device */
 
3100 static void gadget_release(struct device *pdev)
 
3102         struct amd5536udc *dev = dev_get_drvdata(pdev);
 
3106 /* Cleanup on device remove */
 
3107 static void udc_remove(struct udc *dev)
 
3111         if (timer_pending(&udc_timer))
 
3112                 wait_for_completion(&on_exit);
 
3114                 del_timer_sync(&udc_timer);
 
3115         /* remove pollstall timer */
 
3116         stop_pollstall_timer++;
 
3117         if (timer_pending(&udc_pollstall_timer))
 
3118                 wait_for_completion(&on_pollstall_exit);
 
3119         if (udc_pollstall_timer.data)
 
3120                 del_timer_sync(&udc_pollstall_timer);
 
3124 /* Reset all pci context */
 
3125 static void udc_pci_remove(struct pci_dev *pdev)
 
3129         dev = pci_get_drvdata(pdev);
 
3131         /* gadget driver must not be registered */
 
3132         BUG_ON(dev->driver != NULL);
 
3134         /* dma pool cleanup */
 
3135         if (dev->data_requests)
 
3136                 pci_pool_destroy(dev->data_requests);
 
3138         if (dev->stp_requests) {
 
3139                 /* cleanup DMA desc's for ep0in */
 
3140                 pci_pool_free(dev->stp_requests,
 
3141                         dev->ep[UDC_EP0OUT_IX].td_stp,
 
3142                         dev->ep[UDC_EP0OUT_IX].td_stp_dma);
 
3143                 pci_pool_free(dev->stp_requests,
 
3144                         dev->ep[UDC_EP0OUT_IX].td,
 
3145                         dev->ep[UDC_EP0OUT_IX].td_phys);
 
3147                 pci_pool_destroy(dev->stp_requests);
 
3150         /* reset controller */
 
3151         writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
 
3152         if (dev->irq_registered)
 
3153                 free_irq(pdev->irq, dev);
 
3156         if (dev->mem_region)
 
3157                 release_mem_region(pci_resource_start(pdev, 0),
 
3158                                 pci_resource_len(pdev, 0));
 
3160                 pci_disable_device(pdev);
 
3162         device_unregister(&dev->gadget.dev);
 
3163         pci_set_drvdata(pdev, NULL);
 
3168 /* create dma pools on init */
 
3169 static int init_dma_pools(struct udc *dev)
 
3171         struct udc_stp_dma      *td_stp;
 
3172         struct udc_data_dma     *td_data;
 
3175         /* consistent DMA mode setting ? */
 
3177                 use_dma_bufferfill_mode = 0;
 
3180                 use_dma_bufferfill_mode = 1;
 
3184         dev->data_requests = dma_pool_create("data_requests", NULL,
 
3185                 sizeof(struct udc_data_dma), 0, 0);
 
3186         if (!dev->data_requests) {
 
3187                 DBG(dev, "can't get request data pool\n");
 
3192         /* EP0 in dma regs = dev control regs */
 
3193         dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
 
3195         /* dma desc for setup data */
 
3196         dev->stp_requests = dma_pool_create("setup requests", NULL,
 
3197                 sizeof(struct udc_stp_dma), 0, 0);
 
3198         if (!dev->stp_requests) {
 
3199                 DBG(dev, "can't get stp request pool\n");
 
3204         td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
 
3205                                 &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
 
3206         if (td_stp == NULL) {
 
3210         dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
 
3212         /* data: 0 packets !? */
 
3213         td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
 
3214                                 &dev->ep[UDC_EP0OUT_IX].td_phys);
 
3215         if (td_data == NULL) {
 
3219         dev->ep[UDC_EP0OUT_IX].td = td_data;
 
3226 /* Called by pci bus driver to init pci context */
 
3227 static int udc_pci_probe(
 
3228         struct pci_dev *pdev,
 
3229         const struct pci_device_id *id
 
3233         unsigned long           resource;
 
3239                 dev_dbg(&pdev->dev, "already probed\n");
 
3244         dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
 
3251         if (pci_enable_device(pdev) < 0) {
 
3259         /* PCI resource allocation */
 
3260         resource = pci_resource_start(pdev, 0);
 
3261         len = pci_resource_len(pdev, 0);
 
3263         if (!request_mem_region(resource, len, name)) {
 
3264                 dev_dbg(&pdev->dev, "pci device used already\n");
 
3270         dev->mem_region = 1;
 
3272         dev->virt_addr = ioremap_nocache(resource, len);
 
3273         if (dev->virt_addr == NULL) {
 
3274                 dev_dbg(&pdev->dev, "start address cannot be mapped\n");
 
3282                 dev_err(&dev->pdev->dev, "irq not set\n");
 
3289         if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
 
3290                 dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
 
3296         dev->irq_registered = 1;
 
3298         pci_set_drvdata(pdev, dev);
 
3300         /* chip revision for Hs AMD5536 */
 
3301         dev->chiprev = pdev->revision;
 
3303         pci_set_master(pdev);
 
3304         pci_try_set_mwi(pdev);
 
3306         /* init dma pools */
 
3308                 retval = init_dma_pools(dev);
 
3313         dev->phys_addr = resource;
 
3314         dev->irq = pdev->irq;
 
3316         dev->gadget.dev.parent = &pdev->dev;
 
3317         dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
 
3319         /* general probing */
 
3320         if (udc_probe(dev) == 0)
 
3325                 udc_pci_remove(pdev);
 
3330 static int udc_probe(struct udc *dev)
 
3336         /* mark timer as not initialized */
 
3338         udc_pollstall_timer.data = 0;
 
3340         /* device struct setup */
 
3341         spin_lock_init(&dev->lock);
 
3342         dev->gadget.ops = &udc_ops;
 
3344         dev_set_name(&dev->gadget.dev, "gadget");
 
3345         dev->gadget.dev.release = gadget_release;
 
3346         dev->gadget.name = name;
 
3347         dev->gadget.name = name;
 
3348         dev->gadget.is_dualspeed = 1;
 
3350         /* udc csr registers base */
 
3351         dev->csr = dev->virt_addr + UDC_CSR_ADDR;
 
3352         /* dev registers base */
 
3353         dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
 
3354         /* ep registers base */
 
3355         dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
 
3357         dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
 
3358         dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
 
3360         /* init registers, interrupts, ... */
 
3361         startup_registers(dev);
 
3363         dev_info(&dev->pdev->dev, "%s\n", mod_desc);
 
3365         snprintf(tmp, sizeof tmp, "%d", dev->irq);
 
3366         dev_info(&dev->pdev->dev,
 
3367                 "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
 
3368                 tmp, dev->phys_addr, dev->chiprev,
 
3369                 (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
 
3370         strcpy(tmp, UDC_DRIVER_VERSION_STRING);
 
3371         if (dev->chiprev == UDC_HSA0_REV) {
 
3372                 dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
 
3376         dev_info(&dev->pdev->dev,
 
3377                 "driver version: %s(for Geode5536 B1)\n", tmp);
 
3380         retval = device_register(&dev->gadget.dev);
 
3385         init_timer(&udc_timer);
 
3386         udc_timer.function = udc_timer_function;
 
3388         /* timer pollstall init */
 
3389         init_timer(&udc_pollstall_timer);
 
3390         udc_pollstall_timer.function = udc_pollstall_timer_function;
 
3391         udc_pollstall_timer.data = 1;
 
3394         reg = readl(&dev->regs->ctl);
 
3395         reg |= AMD_BIT(UDC_DEVCTL_SD);
 
3396         writel(reg, &dev->regs->ctl);
 
3398         /* print dev register info */
 
3407 /* Initiates a remote wakeup */
 
3408 static int udc_remote_wakeup(struct udc *dev)
 
3410         unsigned long flags;
 
3413         DBG(dev, "UDC initiates remote wakeup\n");
 
3415         spin_lock_irqsave(&dev->lock, flags);
 
3417         tmp = readl(&dev->regs->ctl);
 
3418         tmp |= AMD_BIT(UDC_DEVCTL_RES);
 
3419         writel(tmp, &dev->regs->ctl);
 
3420         tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
 
3421         writel(tmp, &dev->regs->ctl);
 
3423         spin_unlock_irqrestore(&dev->lock, flags);
 
3427 /* PCI device parameters */
 
3428 static const struct pci_device_id pci_id[] = {
 
3430                 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
 
3431                 .class =        (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
 
3432                 .class_mask =   0xffffffff,
 
3436 MODULE_DEVICE_TABLE(pci, pci_id);
 
3439 static struct pci_driver udc_pci_driver = {
 
3440         .name =         (char *) name,
 
3442         .probe =        udc_pci_probe,
 
3443         .remove =       udc_pci_remove,
 
3447 static int __init init(void)
 
3449         return pci_register_driver(&udc_pci_driver);
 
3454 static void __exit cleanup(void)
 
3456         pci_unregister_driver(&udc_pci_driver);
 
3458 module_exit(cleanup);
 
3460 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
 
3461 MODULE_AUTHOR("Thomas Dahlmann");
 
3462 MODULE_LICENSE("GPL");