Merge branch 'linus' into timers/hpet
[linux-2.6] / drivers / usb / gadget / amd5536udc.c
1 /*
2  * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
3  *
4  * Copyright (C) 2005-2007 AMD (http://www.amd.com)
5  * Author: Thomas Dahlmann
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  */
21
22 /*
23  * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
24  * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
25  * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
26  *
27  * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
28  * be used as host port) and UOC bits PAD_EN and APU are set (should be done
29  * by BIOS init).
30  *
31  * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
32  * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
33  * can be used with gadget ether.
34  */
35
36 /* debug control */
37 /* #define UDC_VERBOSE */
38
39 /* Driver strings */
40 #define UDC_MOD_DESCRIPTION             "AMD 5536 UDC - USB Device Controller"
41 #define UDC_DRIVER_VERSION_STRING       "01.00.0206 - $Revision: #3 $"
42
43 /* system */
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/kernel.h>
47 #include <linux/version.h>
48 #include <linux/delay.h>
49 #include <linux/ioport.h>
50 #include <linux/sched.h>
51 #include <linux/slab.h>
52 #include <linux/smp_lock.h>
53 #include <linux/errno.h>
54 #include <linux/init.h>
55 #include <linux/timer.h>
56 #include <linux/list.h>
57 #include <linux/interrupt.h>
58 #include <linux/ioctl.h>
59 #include <linux/fs.h>
60 #include <linux/dmapool.h>
61 #include <linux/moduleparam.h>
62 #include <linux/device.h>
63 #include <linux/io.h>
64 #include <linux/irq.h>
65
66 #include <asm/byteorder.h>
67 #include <asm/system.h>
68 #include <asm/unaligned.h>
69
70 /* gadget stack */
71 #include <linux/usb/ch9.h>
72 #include <linux/usb/gadget.h>
73
74 /* udc specific */
75 #include "amd5536udc.h"
76
77
78 static void udc_tasklet_disconnect(unsigned long);
79 static void empty_req_queue(struct udc_ep *);
80 static int udc_probe(struct udc *dev);
81 static void udc_basic_init(struct udc *dev);
82 static void udc_setup_endpoints(struct udc *dev);
83 static void udc_soft_reset(struct udc *dev);
84 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
85 static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
86 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
87 static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
88                                 unsigned long buf_len, gfp_t gfp_flags);
89 static int udc_remote_wakeup(struct udc *dev);
90 static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
91 static void udc_pci_remove(struct pci_dev *pdev);
92
93 /* description */
94 static const char mod_desc[] = UDC_MOD_DESCRIPTION;
95 static const char name[] = "amd5536udc";
96
97 /* structure to hold endpoint function pointers */
98 static const struct usb_ep_ops udc_ep_ops;
99
100 /* received setup data */
101 static union udc_setup_data setup_data;
102
103 /* pointer to device object */
104 static struct udc *udc;
105
106 /* irq spin lock for soft reset */
107 static DEFINE_SPINLOCK(udc_irq_spinlock);
108 /* stall spin lock */
109 static DEFINE_SPINLOCK(udc_stall_spinlock);
110
111 /*
112 * slave mode: pending bytes in rx fifo after nyet,
113 * used if EPIN irq came but no req was available
114 */
115 static unsigned int udc_rxfifo_pending;
116
117 /* count soft resets after suspend to avoid loop */
118 static int soft_reset_occured;
119 static int soft_reset_after_usbreset_occured;
120
121 /* timer */
122 static struct timer_list udc_timer;
123 static int stop_timer;
124
125 /* set_rde -- Is used to control enabling of RX DMA. Problem is
126  * that UDC has only one bit (RDE) to enable/disable RX DMA for
127  * all OUT endpoints. So we have to handle race conditions like
128  * when OUT data reaches the fifo but no request was queued yet.
129  * This cannot be solved by letting the RX DMA disabled until a
130  * request gets queued because there may be other OUT packets
131  * in the FIFO (important for not blocking control traffic).
132  * The value of set_rde controls the correspondig timer.
133  *
134  * set_rde -1 == not used, means it is alloed to be set to 0 or 1
135  * set_rde  0 == do not touch RDE, do no start the RDE timer
136  * set_rde  1 == timer function will look whether FIFO has data
137  * set_rde  2 == set by timer function to enable RX DMA on next call
138  */
139 static int set_rde = -1;
140
141 static DECLARE_COMPLETION(on_exit);
142 static struct timer_list udc_pollstall_timer;
143 static int stop_pollstall_timer;
144 static DECLARE_COMPLETION(on_pollstall_exit);
145
146 /* tasklet for usb disconnect */
147 static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
148                 (unsigned long) &udc);
149
150
151 /* endpoint names used for print */
152 static const char ep0_string[] = "ep0in";
153 static const char *ep_string[] = {
154         ep0_string,
155         "ep1in-int", "ep2in-bulk", "ep3in-bulk", "ep4in-bulk", "ep5in-bulk",
156         "ep6in-bulk", "ep7in-bulk", "ep8in-bulk", "ep9in-bulk", "ep10in-bulk",
157         "ep11in-bulk", "ep12in-bulk", "ep13in-bulk", "ep14in-bulk",
158         "ep15in-bulk", "ep0out", "ep1out-bulk", "ep2out-bulk", "ep3out-bulk",
159         "ep4out-bulk", "ep5out-bulk", "ep6out-bulk", "ep7out-bulk",
160         "ep8out-bulk", "ep9out-bulk", "ep10out-bulk", "ep11out-bulk",
161         "ep12out-bulk", "ep13out-bulk", "ep14out-bulk", "ep15out-bulk"
162 };
163
164 /* DMA usage flag */
165 static int use_dma = 1;
166 /* packet per buffer dma */
167 static int use_dma_ppb = 1;
168 /* with per descr. update */
169 static int use_dma_ppb_du;
170 /* buffer fill mode */
171 static int use_dma_bufferfill_mode;
172 /* full speed only mode */
173 static int use_fullspeed;
174 /* tx buffer size for high speed */
175 static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
176
177 /* module parameters */
178 module_param(use_dma, bool, S_IRUGO);
179 MODULE_PARM_DESC(use_dma, "true for DMA");
180 module_param(use_dma_ppb, bool, S_IRUGO);
181 MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
182 module_param(use_dma_ppb_du, bool, S_IRUGO);
183 MODULE_PARM_DESC(use_dma_ppb_du,
184         "true for DMA in packet per buffer mode with descriptor update");
185 module_param(use_fullspeed, bool, S_IRUGO);
186 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
187
188 /*---------------------------------------------------------------------------*/
189 /* Prints UDC device registers and endpoint irq registers */
190 static void print_regs(struct udc *dev)
191 {
192         DBG(dev, "------- Device registers -------\n");
193         DBG(dev, "dev config     = %08x\n", readl(&dev->regs->cfg));
194         DBG(dev, "dev control    = %08x\n", readl(&dev->regs->ctl));
195         DBG(dev, "dev status     = %08x\n", readl(&dev->regs->sts));
196         DBG(dev, "\n");
197         DBG(dev, "dev int's      = %08x\n", readl(&dev->regs->irqsts));
198         DBG(dev, "dev intmask    = %08x\n", readl(&dev->regs->irqmsk));
199         DBG(dev, "\n");
200         DBG(dev, "dev ep int's   = %08x\n", readl(&dev->regs->ep_irqsts));
201         DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
202         DBG(dev, "\n");
203         DBG(dev, "USE DMA        = %d\n", use_dma);
204         if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
205                 DBG(dev, "DMA mode       = PPBNDU (packet per buffer "
206                         "WITHOUT desc. update)\n");
207                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
208         } else if (use_dma && use_dma_ppb_du && use_dma_ppb_du) {
209                 DBG(dev, "DMA mode       = PPBDU (packet per buffer "
210                         "WITH desc. update)\n");
211                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
212         }
213         if (use_dma && use_dma_bufferfill_mode) {
214                 DBG(dev, "DMA mode       = BF (buffer fill mode)\n");
215                 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
216         }
217         if (!use_dma) {
218                 dev_info(&dev->pdev->dev, "FIFO mode\n");
219         }
220         DBG(dev, "-------------------------------------------------------\n");
221 }
222
223 /* Masks unused interrupts */
224 static int udc_mask_unused_interrupts(struct udc *dev)
225 {
226         u32 tmp;
227
228         /* mask all dev interrupts */
229         tmp =   AMD_BIT(UDC_DEVINT_SVC) |
230                 AMD_BIT(UDC_DEVINT_ENUM) |
231                 AMD_BIT(UDC_DEVINT_US) |
232                 AMD_BIT(UDC_DEVINT_UR) |
233                 AMD_BIT(UDC_DEVINT_ES) |
234                 AMD_BIT(UDC_DEVINT_SI) |
235                 AMD_BIT(UDC_DEVINT_SOF)|
236                 AMD_BIT(UDC_DEVINT_SC);
237         writel(tmp, &dev->regs->irqmsk);
238
239         /* mask all ep interrupts */
240         writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
241
242         return 0;
243 }
244
245 /* Enables endpoint 0 interrupts */
246 static int udc_enable_ep0_interrupts(struct udc *dev)
247 {
248         u32 tmp;
249
250         DBG(dev, "udc_enable_ep0_interrupts()\n");
251
252         /* read irq mask */
253         tmp = readl(&dev->regs->ep_irqmsk);
254         /* enable ep0 irq's */
255         tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
256                 & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
257         writel(tmp, &dev->regs->ep_irqmsk);
258
259         return 0;
260 }
261
262 /* Enables device interrupts for SET_INTF and SET_CONFIG */
263 static int udc_enable_dev_setup_interrupts(struct udc *dev)
264 {
265         u32 tmp;
266
267         DBG(dev, "enable device interrupts for setup data\n");
268
269         /* read irq mask */
270         tmp = readl(&dev->regs->irqmsk);
271
272         /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
273         tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
274                 & AMD_UNMASK_BIT(UDC_DEVINT_SC)
275                 & AMD_UNMASK_BIT(UDC_DEVINT_UR)
276                 & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
277                 & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
278         writel(tmp, &dev->regs->irqmsk);
279
280         return 0;
281 }
282
283 /* Calculates fifo start of endpoint based on preceeding endpoints */
284 static int udc_set_txfifo_addr(struct udc_ep *ep)
285 {
286         struct udc      *dev;
287         u32 tmp;
288         int i;
289
290         if (!ep || !(ep->in))
291                 return -EINVAL;
292
293         dev = ep->dev;
294         ep->txfifo = dev->txfifo;
295
296         /* traverse ep's */
297         for (i = 0; i < ep->num; i++) {
298                 if (dev->ep[i].regs) {
299                         /* read fifo size */
300                         tmp = readl(&dev->ep[i].regs->bufin_framenum);
301                         tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
302                         ep->txfifo += tmp;
303                 }
304         }
305         return 0;
306 }
307
308 /* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
309 static u32 cnak_pending;
310
311 static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
312 {
313         if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
314                 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
315                 cnak_pending |= 1 << (num);
316                 ep->naking = 1;
317         } else
318                 cnak_pending = cnak_pending & (~(1 << (num)));
319 }
320
321
322 /* Enables endpoint, is called by gadget driver */
323 static int
324 udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
325 {
326         struct udc_ep           *ep;
327         struct udc              *dev;
328         u32                     tmp;
329         unsigned long           iflags;
330         u8 udc_csr_epix;
331         unsigned                maxpacket;
332
333         if (!usbep
334                         || usbep->name == ep0_string
335                         || !desc
336                         || desc->bDescriptorType != USB_DT_ENDPOINT)
337                 return -EINVAL;
338
339         ep = container_of(usbep, struct udc_ep, ep);
340         dev = ep->dev;
341
342         DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
343
344         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
345                 return -ESHUTDOWN;
346
347         spin_lock_irqsave(&dev->lock, iflags);
348         ep->desc = desc;
349
350         ep->halted = 0;
351
352         /* set traffic type */
353         tmp = readl(&dev->ep[ep->num].regs->ctl);
354         tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
355         writel(tmp, &dev->ep[ep->num].regs->ctl);
356
357         /* set max packet size */
358         maxpacket = le16_to_cpu(desc->wMaxPacketSize);
359         tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
360         tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
361         ep->ep.maxpacket = maxpacket;
362         writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
363
364         /* IN ep */
365         if (ep->in) {
366
367                 /* ep ix in UDC CSR register space */
368                 udc_csr_epix = ep->num;
369
370                 /* set buffer size (tx fifo entries) */
371                 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
372                 /* double buffering: fifo size = 2 x max packet size */
373                 tmp = AMD_ADDBITS(
374                                 tmp,
375                                 maxpacket * UDC_EPIN_BUFF_SIZE_MULT
376                                           / UDC_DWORD_BYTES,
377                                 UDC_EPIN_BUFF_SIZE);
378                 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
379
380                 /* calc. tx fifo base addr */
381                 udc_set_txfifo_addr(ep);
382
383                 /* flush fifo */
384                 tmp = readl(&ep->regs->ctl);
385                 tmp |= AMD_BIT(UDC_EPCTL_F);
386                 writel(tmp, &ep->regs->ctl);
387
388         /* OUT ep */
389         } else {
390                 /* ep ix in UDC CSR register space */
391                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
392
393                 /* set max packet size UDC CSR  */
394                 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
395                 tmp = AMD_ADDBITS(tmp, maxpacket,
396                                         UDC_CSR_NE_MAX_PKT);
397                 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
398
399                 if (use_dma && !ep->in) {
400                         /* alloc and init BNA dummy request */
401                         ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
402                         ep->bna_occurred = 0;
403                 }
404
405                 if (ep->num != UDC_EP0OUT_IX)
406                         dev->data_ep_enabled = 1;
407         }
408
409         /* set ep values */
410         tmp = readl(&dev->csr->ne[udc_csr_epix]);
411         /* max packet */
412         tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
413         /* ep number */
414         tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
415         /* ep direction */
416         tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
417         /* ep type */
418         tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
419         /* ep config */
420         tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
421         /* ep interface */
422         tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
423         /* ep alt */
424         tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
425         /* write reg */
426         writel(tmp, &dev->csr->ne[udc_csr_epix]);
427
428         /* enable ep irq */
429         tmp = readl(&dev->regs->ep_irqmsk);
430         tmp &= AMD_UNMASK_BIT(ep->num);
431         writel(tmp, &dev->regs->ep_irqmsk);
432
433         /*
434          * clear NAK by writing CNAK
435          * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
436          */
437         if (!use_dma || ep->in) {
438                 tmp = readl(&ep->regs->ctl);
439                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
440                 writel(tmp, &ep->regs->ctl);
441                 ep->naking = 0;
442                 UDC_QUEUE_CNAK(ep, ep->num);
443         }
444         tmp = desc->bEndpointAddress;
445         DBG(dev, "%s enabled\n", usbep->name);
446
447         spin_unlock_irqrestore(&dev->lock, iflags);
448         return 0;
449 }
450
451 /* Resets endpoint */
452 static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
453 {
454         u32             tmp;
455
456         VDBG(ep->dev, "ep-%d reset\n", ep->num);
457         ep->desc = NULL;
458         ep->ep.ops = &udc_ep_ops;
459         INIT_LIST_HEAD(&ep->queue);
460
461         ep->ep.maxpacket = (u16) ~0;
462         /* set NAK */
463         tmp = readl(&ep->regs->ctl);
464         tmp |= AMD_BIT(UDC_EPCTL_SNAK);
465         writel(tmp, &ep->regs->ctl);
466         ep->naking = 1;
467
468         /* disable interrupt */
469         tmp = readl(&regs->ep_irqmsk);
470         tmp |= AMD_BIT(ep->num);
471         writel(tmp, &regs->ep_irqmsk);
472
473         if (ep->in) {
474                 /* unset P and IN bit of potential former DMA */
475                 tmp = readl(&ep->regs->ctl);
476                 tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
477                 writel(tmp, &ep->regs->ctl);
478
479                 tmp = readl(&ep->regs->sts);
480                 tmp |= AMD_BIT(UDC_EPSTS_IN);
481                 writel(tmp, &ep->regs->sts);
482
483                 /* flush the fifo */
484                 tmp = readl(&ep->regs->ctl);
485                 tmp |= AMD_BIT(UDC_EPCTL_F);
486                 writel(tmp, &ep->regs->ctl);
487
488         }
489         /* reset desc pointer */
490         writel(0, &ep->regs->desptr);
491 }
492
493 /* Disables endpoint, is called by gadget driver */
494 static int udc_ep_disable(struct usb_ep *usbep)
495 {
496         struct udc_ep   *ep = NULL;
497         unsigned long   iflags;
498
499         if (!usbep)
500                 return -EINVAL;
501
502         ep = container_of(usbep, struct udc_ep, ep);
503         if (usbep->name == ep0_string || !ep->desc)
504                 return -EINVAL;
505
506         DBG(ep->dev, "Disable ep-%d\n", ep->num);
507
508         spin_lock_irqsave(&ep->dev->lock, iflags);
509         udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
510         empty_req_queue(ep);
511         ep_init(ep->dev->regs, ep);
512         spin_unlock_irqrestore(&ep->dev->lock, iflags);
513
514         return 0;
515 }
516
517 /* Allocates request packet, called by gadget driver */
518 static struct usb_request *
519 udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
520 {
521         struct udc_request      *req;
522         struct udc_data_dma     *dma_desc;
523         struct udc_ep   *ep;
524
525         if (!usbep)
526                 return NULL;
527
528         ep = container_of(usbep, struct udc_ep, ep);
529
530         VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
531         req = kzalloc(sizeof(struct udc_request), gfp);
532         if (!req)
533                 return NULL;
534
535         req->req.dma = DMA_DONT_USE;
536         INIT_LIST_HEAD(&req->queue);
537
538         if (ep->dma) {
539                 /* ep0 in requests are allocated from data pool here */
540                 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
541                                                 &req->td_phys);
542                 if (!dma_desc) {
543                         kfree(req);
544                         return NULL;
545                 }
546
547                 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
548                                 "td_phys = %lx\n",
549                                 req, dma_desc,
550                                 (unsigned long)req->td_phys);
551                 /* prevent from using desc. - set HOST BUSY */
552                 dma_desc->status = AMD_ADDBITS(dma_desc->status,
553                                                 UDC_DMA_STP_STS_BS_HOST_BUSY,
554                                                 UDC_DMA_STP_STS_BS);
555                 dma_desc->bufptr = __constant_cpu_to_le32(DMA_DONT_USE);
556                 req->td_data = dma_desc;
557                 req->td_data_last = NULL;
558                 req->chain_len = 1;
559         }
560
561         return &req->req;
562 }
563
564 /* Frees request packet, called by gadget driver */
565 static void
566 udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
567 {
568         struct udc_ep   *ep;
569         struct udc_request      *req;
570
571         if (!usbep || !usbreq)
572                 return;
573
574         ep = container_of(usbep, struct udc_ep, ep);
575         req = container_of(usbreq, struct udc_request, req);
576         VDBG(ep->dev, "free_req req=%p\n", req);
577         BUG_ON(!list_empty(&req->queue));
578         if (req->td_data) {
579                 VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
580
581                 /* free dma chain if created */
582                 if (req->chain_len > 1) {
583                         udc_free_dma_chain(ep->dev, req);
584                 }
585
586                 pci_pool_free(ep->dev->data_requests, req->td_data,
587                                                         req->td_phys);
588         }
589         kfree(req);
590 }
591
592 /* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
593 static void udc_init_bna_dummy(struct udc_request *req)
594 {
595         if (req) {
596                 /* set last bit */
597                 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
598                 /* set next pointer to itself */
599                 req->td_data->next = req->td_phys;
600                 /* set HOST BUSY */
601                 req->td_data->status
602                         = AMD_ADDBITS(req->td_data->status,
603                                         UDC_DMA_STP_STS_BS_DMA_DONE,
604                                         UDC_DMA_STP_STS_BS);
605 #ifdef UDC_VERBOSE
606                 pr_debug("bna desc = %p, sts = %08x\n",
607                         req->td_data, req->td_data->status);
608 #endif
609         }
610 }
611
612 /* Allocate BNA dummy descriptor */
613 static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
614 {
615         struct udc_request *req = NULL;
616         struct usb_request *_req = NULL;
617
618         /* alloc the dummy request */
619         _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
620         if (_req) {
621                 req = container_of(_req, struct udc_request, req);
622                 ep->bna_dummy_req = req;
623                 udc_init_bna_dummy(req);
624         }
625         return req;
626 }
627
628 /* Write data to TX fifo for IN packets */
629 static void
630 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
631 {
632         u8                      *req_buf;
633         u32                     *buf;
634         int                     i, j;
635         unsigned                bytes = 0;
636         unsigned                remaining = 0;
637
638         if (!req || !ep)
639                 return;
640
641         req_buf = req->buf + req->actual;
642         prefetch(req_buf);
643         remaining = req->length - req->actual;
644
645         buf = (u32 *) req_buf;
646
647         bytes = ep->ep.maxpacket;
648         if (bytes > remaining)
649                 bytes = remaining;
650
651         /* dwords first */
652         for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
653                 writel(*(buf + i), ep->txfifo);
654         }
655
656         /* remaining bytes must be written by byte access */
657         for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
658                 writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
659                                                         ep->txfifo);
660         }
661
662         /* dummy write confirm */
663         writel(0, &ep->regs->confirm);
664 }
665
666 /* Read dwords from RX fifo for OUT transfers */
667 static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
668 {
669         int i;
670
671         VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
672
673         for (i = 0; i < dwords; i++) {
674                 *(buf + i) = readl(dev->rxfifo);
675         }
676         return 0;
677 }
678
679 /* Read bytes from RX fifo for OUT transfers */
680 static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
681 {
682         int i, j;
683         u32 tmp;
684
685         VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
686
687         /* dwords first */
688         for (i = 0; i < bytes / UDC_DWORD_BYTES; i++) {
689                 *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
690         }
691
692         /* remaining bytes must be read by byte access */
693         if (bytes % UDC_DWORD_BYTES) {
694                 tmp = readl(dev->rxfifo);
695                 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
696                         *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
697                         tmp = tmp >> UDC_BITS_PER_BYTE;
698                 }
699         }
700
701         return 0;
702 }
703
704 /* Read data from RX fifo for OUT transfers */
705 static int
706 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
707 {
708         u8 *buf;
709         unsigned buf_space;
710         unsigned bytes = 0;
711         unsigned finished = 0;
712
713         /* received number bytes */
714         bytes = readl(&ep->regs->sts);
715         bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
716
717         buf_space = req->req.length - req->req.actual;
718         buf = req->req.buf + req->req.actual;
719         if (bytes > buf_space) {
720                 if ((buf_space % ep->ep.maxpacket) != 0) {
721                         DBG(ep->dev,
722                                 "%s: rx %d bytes, rx-buf space = %d bytesn\n",
723                                 ep->ep.name, bytes, buf_space);
724                         req->req.status = -EOVERFLOW;
725                 }
726                 bytes = buf_space;
727         }
728         req->req.actual += bytes;
729
730         /* last packet ? */
731         if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
732                 || ((req->req.actual == req->req.length) && !req->req.zero))
733                 finished = 1;
734
735         /* read rx fifo bytes */
736         VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
737         udc_rxfifo_read_bytes(ep->dev, buf, bytes);
738
739         return finished;
740 }
741
742 /* create/re-init a DMA descriptor or a DMA descriptor chain */
743 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
744 {
745         int     retval = 0;
746         u32     tmp;
747
748         VDBG(ep->dev, "prep_dma\n");
749         VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
750                         ep->num, req->td_data);
751
752         /* set buffer pointer */
753         req->td_data->bufptr = req->req.dma;
754
755         /* set last bit */
756         req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
757
758         /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
759         if (use_dma_ppb) {
760
761                 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
762                 if (retval != 0) {
763                         if (retval == -ENOMEM)
764                                 DBG(ep->dev, "Out of DMA memory\n");
765                         return retval;
766                 }
767                 if (ep->in) {
768                         if (req->req.length == ep->ep.maxpacket) {
769                                 /* write tx bytes */
770                                 req->td_data->status =
771                                         AMD_ADDBITS(req->td_data->status,
772                                                 ep->ep.maxpacket,
773                                                 UDC_DMA_IN_STS_TXBYTES);
774
775                         }
776                 }
777
778         }
779
780         if (ep->in) {
781                 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
782                                 "maxpacket=%d ep%d\n",
783                                 use_dma_ppb, req->req.length,
784                                 ep->ep.maxpacket, ep->num);
785                 /*
786                  * if bytes < max packet then tx bytes must
787                  * be written in packet per buffer mode
788                  */
789                 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
790                                 || ep->num == UDC_EP0OUT_IX
791                                 || ep->num == UDC_EP0IN_IX) {
792                         /* write tx bytes */
793                         req->td_data->status =
794                                 AMD_ADDBITS(req->td_data->status,
795                                                 req->req.length,
796                                                 UDC_DMA_IN_STS_TXBYTES);
797                         /* reset frame num */
798                         req->td_data->status =
799                                 AMD_ADDBITS(req->td_data->status,
800                                                 0,
801                                                 UDC_DMA_IN_STS_FRAMENUM);
802                 }
803                 /* set HOST BUSY */
804                 req->td_data->status =
805                         AMD_ADDBITS(req->td_data->status,
806                                 UDC_DMA_STP_STS_BS_HOST_BUSY,
807                                 UDC_DMA_STP_STS_BS);
808         } else {
809                 VDBG(ep->dev, "OUT set host ready\n");
810                 /* set HOST READY */
811                 req->td_data->status =
812                         AMD_ADDBITS(req->td_data->status,
813                                 UDC_DMA_STP_STS_BS_HOST_READY,
814                                 UDC_DMA_STP_STS_BS);
815
816
817                         /* clear NAK by writing CNAK */
818                         if (ep->naking) {
819                                 tmp = readl(&ep->regs->ctl);
820                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
821                                 writel(tmp, &ep->regs->ctl);
822                                 ep->naking = 0;
823                                 UDC_QUEUE_CNAK(ep, ep->num);
824                         }
825
826         }
827
828         return retval;
829 }
830
831 /* Completes request packet ... caller MUST hold lock */
832 static void
833 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
834 __releases(ep->dev->lock)
835 __acquires(ep->dev->lock)
836 {
837         struct udc              *dev;
838         unsigned                halted;
839
840         VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
841
842         dev = ep->dev;
843         /* unmap DMA */
844         if (req->dma_mapping) {
845                 if (ep->in)
846                         pci_unmap_single(dev->pdev,
847                                         req->req.dma,
848                                         req->req.length,
849                                         PCI_DMA_TODEVICE);
850                 else
851                         pci_unmap_single(dev->pdev,
852                                         req->req.dma,
853                                         req->req.length,
854                                         PCI_DMA_FROMDEVICE);
855                 req->dma_mapping = 0;
856                 req->req.dma = DMA_DONT_USE;
857         }
858
859         halted = ep->halted;
860         ep->halted = 1;
861
862         /* set new status if pending */
863         if (req->req.status == -EINPROGRESS)
864                 req->req.status = sts;
865
866         /* remove from ep queue */
867         list_del_init(&req->queue);
868
869         VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
870                 &req->req, req->req.length, ep->ep.name, sts);
871
872         spin_unlock(&dev->lock);
873         req->req.complete(&ep->ep, &req->req);
874         spin_lock(&dev->lock);
875         ep->halted = halted;
876 }
877
878 /* frees pci pool descriptors of a DMA chain */
879 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
880 {
881
882         int ret_val = 0;
883         struct udc_data_dma     *td;
884         struct udc_data_dma     *td_last = NULL;
885         unsigned int i;
886
887         DBG(dev, "free chain req = %p\n", req);
888
889         /* do not free first desc., will be done by free for request */
890         td_last = req->td_data;
891         td = phys_to_virt(td_last->next);
892
893         for (i = 1; i < req->chain_len; i++) {
894
895                 pci_pool_free(dev->data_requests, td,
896                                 (dma_addr_t) td_last->next);
897                 td_last = td;
898                 td = phys_to_virt(td_last->next);
899         }
900
901         return ret_val;
902 }
903
904 /* Iterates to the end of a DMA chain and returns last descriptor */
905 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
906 {
907         struct udc_data_dma     *td;
908
909         td = req->td_data;
910         while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
911                 td = phys_to_virt(td->next);
912         }
913
914         return td;
915
916 }
917
918 /* Iterates to the end of a DMA chain and counts bytes received */
919 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
920 {
921         struct udc_data_dma     *td;
922         u32 count;
923
924         td = req->td_data;
925         /* received number bytes */
926         count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
927
928         while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
929                 td = phys_to_virt(td->next);
930                 /* received number bytes */
931                 if (td) {
932                         count += AMD_GETBITS(td->status,
933                                 UDC_DMA_OUT_STS_RXBYTES);
934                 }
935         }
936
937         return count;
938
939 }
940
941 /* Creates or re-inits a DMA chain */
942 static int udc_create_dma_chain(
943         struct udc_ep *ep,
944         struct udc_request *req,
945         unsigned long buf_len, gfp_t gfp_flags
946 )
947 {
948         unsigned long bytes = req->req.length;
949         unsigned int i;
950         dma_addr_t dma_addr;
951         struct udc_data_dma     *td = NULL;
952         struct udc_data_dma     *last = NULL;
953         unsigned long txbytes;
954         unsigned create_new_chain = 0;
955         unsigned len;
956
957         VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
958                         bytes, buf_len);
959         dma_addr = DMA_DONT_USE;
960
961         /* unset L bit in first desc for OUT */
962         if (!ep->in) {
963                 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
964         }
965
966         /* alloc only new desc's if not already available */
967         len = req->req.length / ep->ep.maxpacket;
968         if (req->req.length % ep->ep.maxpacket) {
969                 len++;
970         }
971
972         if (len > req->chain_len) {
973                 /* shorter chain already allocated before */
974                 if (req->chain_len > 1) {
975                         udc_free_dma_chain(ep->dev, req);
976                 }
977                 req->chain_len = len;
978                 create_new_chain = 1;
979         }
980
981         td = req->td_data;
982         /* gen. required number of descriptors and buffers */
983         for (i = buf_len; i < bytes; i += buf_len) {
984                 /* create or determine next desc. */
985                 if (create_new_chain) {
986
987                         td = pci_pool_alloc(ep->dev->data_requests,
988                                         gfp_flags, &dma_addr);
989                         if (!td)
990                                 return -ENOMEM;
991
992                         td->status = 0;
993                 } else if (i == buf_len) {
994                         /* first td */
995                         td = (struct udc_data_dma *) phys_to_virt(
996                                                 req->td_data->next);
997                         td->status = 0;
998                 } else {
999                         td = (struct udc_data_dma *) phys_to_virt(last->next);
1000                         td->status = 0;
1001                 }
1002
1003
1004                 if (td)
1005                         td->bufptr = req->req.dma + i; /* assign buffer */
1006                 else
1007                         break;
1008
1009                 /* short packet ? */
1010                 if ((bytes - i) >= buf_len) {
1011                         txbytes = buf_len;
1012                 } else {
1013                         /* short packet */
1014                         txbytes = bytes - i;
1015                 }
1016
1017                 /* link td and assign tx bytes */
1018                 if (i == buf_len) {
1019                         if (create_new_chain) {
1020                                 req->td_data->next = dma_addr;
1021                         } else {
1022                                 /* req->td_data->next = virt_to_phys(td); */
1023                         }
1024                         /* write tx bytes */
1025                         if (ep->in) {
1026                                 /* first desc */
1027                                 req->td_data->status =
1028                                         AMD_ADDBITS(req->td_data->status,
1029                                                         ep->ep.maxpacket,
1030                                                         UDC_DMA_IN_STS_TXBYTES);
1031                                 /* second desc */
1032                                 td->status = AMD_ADDBITS(td->status,
1033                                                         txbytes,
1034                                                         UDC_DMA_IN_STS_TXBYTES);
1035                         }
1036                 } else {
1037                         if (create_new_chain) {
1038                                 last->next = dma_addr;
1039                         } else {
1040                                 /* last->next = virt_to_phys(td); */
1041                         }
1042                         if (ep->in) {
1043                                 /* write tx bytes */
1044                                 td->status = AMD_ADDBITS(td->status,
1045                                                         txbytes,
1046                                                         UDC_DMA_IN_STS_TXBYTES);
1047                         }
1048                 }
1049                 last = td;
1050         }
1051         /* set last bit */
1052         if (td) {
1053                 td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
1054                 /* last desc. points to itself */
1055                 req->td_data_last = td;
1056         }
1057
1058         return 0;
1059 }
1060
1061 /* Enabling RX DMA */
1062 static void udc_set_rde(struct udc *dev)
1063 {
1064         u32 tmp;
1065
1066         VDBG(dev, "udc_set_rde()\n");
1067         /* stop RDE timer */
1068         if (timer_pending(&udc_timer)) {
1069                 set_rde = 0;
1070                 mod_timer(&udc_timer, jiffies - 1);
1071         }
1072         /* set RDE */
1073         tmp = readl(&dev->regs->ctl);
1074         tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1075         writel(tmp, &dev->regs->ctl);
1076 }
1077
1078 /* Queues a request packet, called by gadget driver */
1079 static int
1080 udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1081 {
1082         int                     retval = 0;
1083         u8                      open_rxfifo = 0;
1084         unsigned long           iflags;
1085         struct udc_ep           *ep;
1086         struct udc_request      *req;
1087         struct udc              *dev;
1088         u32                     tmp;
1089
1090         /* check the inputs */
1091         req = container_of(usbreq, struct udc_request, req);
1092
1093         if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1094                         || !list_empty(&req->queue))
1095                 return -EINVAL;
1096
1097         ep = container_of(usbep, struct udc_ep, ep);
1098         if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1099                 return -EINVAL;
1100
1101         VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1102         dev = ep->dev;
1103
1104         if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1105                 return -ESHUTDOWN;
1106
1107         /* map dma (usually done before) */
1108         if (ep->dma && usbreq->length != 0
1109                         && (usbreq->dma == DMA_DONT_USE || usbreq->dma == 0)) {
1110                 VDBG(dev, "DMA map req %p\n", req);
1111                 if (ep->in)
1112                         usbreq->dma = pci_map_single(dev->pdev,
1113                                                 usbreq->buf,
1114                                                 usbreq->length,
1115                                                 PCI_DMA_TODEVICE);
1116                 else
1117                         usbreq->dma = pci_map_single(dev->pdev,
1118                                                 usbreq->buf,
1119                                                 usbreq->length,
1120                                                 PCI_DMA_FROMDEVICE);
1121                 req->dma_mapping = 1;
1122         }
1123
1124         VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1125                         usbep->name, usbreq, usbreq->length,
1126                         req->td_data, usbreq->buf);
1127
1128         spin_lock_irqsave(&dev->lock, iflags);
1129         usbreq->actual = 0;
1130         usbreq->status = -EINPROGRESS;
1131         req->dma_done = 0;
1132
1133         /* on empty queue just do first transfer */
1134         if (list_empty(&ep->queue)) {
1135                 /* zlp */
1136                 if (usbreq->length == 0) {
1137                         /* IN zlp's are handled by hardware */
1138                         complete_req(ep, req, 0);
1139                         VDBG(dev, "%s: zlp\n", ep->ep.name);
1140                         /*
1141                          * if set_config or set_intf is waiting for ack by zlp
1142                          * then set CSR_DONE
1143                          */
1144                         if (dev->set_cfg_not_acked) {
1145                                 tmp = readl(&dev->regs->ctl);
1146                                 tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1147                                 writel(tmp, &dev->regs->ctl);
1148                                 dev->set_cfg_not_acked = 0;
1149                         }
1150                         /* setup command is ACK'ed now by zlp */
1151                         if (dev->waiting_zlp_ack_ep0in) {
1152                                 /* clear NAK by writing CNAK in EP0_IN */
1153                                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1154                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1155                                 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1156                                 dev->ep[UDC_EP0IN_IX].naking = 0;
1157                                 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1158                                                         UDC_EP0IN_IX);
1159                                 dev->waiting_zlp_ack_ep0in = 0;
1160                         }
1161                         goto finished;
1162                 }
1163                 if (ep->dma) {
1164                         retval = prep_dma(ep, req, gfp);
1165                         if (retval != 0)
1166                                 goto finished;
1167                         /* write desc pointer to enable DMA */
1168                         if (ep->in) {
1169                                 /* set HOST READY */
1170                                 req->td_data->status =
1171                                         AMD_ADDBITS(req->td_data->status,
1172                                                 UDC_DMA_IN_STS_BS_HOST_READY,
1173                                                 UDC_DMA_IN_STS_BS);
1174                         }
1175
1176                         /* disabled rx dma while descriptor update */
1177                         if (!ep->in) {
1178                                 /* stop RDE timer */
1179                                 if (timer_pending(&udc_timer)) {
1180                                         set_rde = 0;
1181                                         mod_timer(&udc_timer, jiffies - 1);
1182                                 }
1183                                 /* clear RDE */
1184                                 tmp = readl(&dev->regs->ctl);
1185                                 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1186                                 writel(tmp, &dev->regs->ctl);
1187                                 open_rxfifo = 1;
1188
1189                                 /*
1190                                  * if BNA occurred then let BNA dummy desc.
1191                                  * point to current desc.
1192                                  */
1193                                 if (ep->bna_occurred) {
1194                                         VDBG(dev, "copy to BNA dummy desc.\n");
1195                                         memcpy(ep->bna_dummy_req->td_data,
1196                                                 req->td_data,
1197                                                 sizeof(struct udc_data_dma));
1198                                 }
1199                         }
1200                         /* write desc pointer */
1201                         writel(req->td_phys, &ep->regs->desptr);
1202
1203                         /* clear NAK by writing CNAK */
1204                         if (ep->naking) {
1205                                 tmp = readl(&ep->regs->ctl);
1206                                 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1207                                 writel(tmp, &ep->regs->ctl);
1208                                 ep->naking = 0;
1209                                 UDC_QUEUE_CNAK(ep, ep->num);
1210                         }
1211
1212                         if (ep->in) {
1213                                 /* enable ep irq */
1214                                 tmp = readl(&dev->regs->ep_irqmsk);
1215                                 tmp &= AMD_UNMASK_BIT(ep->num);
1216                                 writel(tmp, &dev->regs->ep_irqmsk);
1217                         }
1218                 }
1219
1220         } else if (ep->dma) {
1221
1222                 /*
1223                  * prep_dma not used for OUT ep's, this is not possible
1224                  * for PPB modes, because of chain creation reasons
1225                  */
1226                 if (ep->in) {
1227                         retval = prep_dma(ep, req, gfp);
1228                         if (retval != 0)
1229                                 goto finished;
1230                 }
1231         }
1232         VDBG(dev, "list_add\n");
1233         /* add request to ep queue */
1234         if (req) {
1235
1236                 list_add_tail(&req->queue, &ep->queue);
1237
1238                 /* open rxfifo if out data queued */
1239                 if (open_rxfifo) {
1240                         /* enable DMA */
1241                         req->dma_going = 1;
1242                         udc_set_rde(dev);
1243                         if (ep->num != UDC_EP0OUT_IX)
1244                                 dev->data_ep_queued = 1;
1245                 }
1246                 /* stop OUT naking */
1247                 if (!ep->in) {
1248                         if (!use_dma && udc_rxfifo_pending) {
1249                                 DBG(dev, "udc_queue(): pending bytes in "
1250                                         "rxfifo after nyet\n");
1251                                 /*
1252                                  * read pending bytes afer nyet:
1253                                  * referring to isr
1254                                  */
1255                                 if (udc_rxfifo_read(ep, req)) {
1256                                         /* finish */
1257                                         complete_req(ep, req, 0);
1258                                 }
1259                                 udc_rxfifo_pending = 0;
1260
1261                         }
1262                 }
1263         }
1264
1265 finished:
1266         spin_unlock_irqrestore(&dev->lock, iflags);
1267         return retval;
1268 }
1269
1270 /* Empty request queue of an endpoint; caller holds spinlock */
1271 static void empty_req_queue(struct udc_ep *ep)
1272 {
1273         struct udc_request      *req;
1274
1275         ep->halted = 1;
1276         while (!list_empty(&ep->queue)) {
1277                 req = list_entry(ep->queue.next,
1278                         struct udc_request,
1279                         queue);
1280                 complete_req(ep, req, -ESHUTDOWN);
1281         }
1282 }
1283
1284 /* Dequeues a request packet, called by gadget driver */
1285 static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1286 {
1287         struct udc_ep           *ep;
1288         struct udc_request      *req;
1289         unsigned                halted;
1290         unsigned long           iflags;
1291
1292         ep = container_of(usbep, struct udc_ep, ep);
1293         if (!usbep || !usbreq || (!ep->desc && (ep->num != 0
1294                                 && ep->num != UDC_EP0OUT_IX)))
1295                 return -EINVAL;
1296
1297         req = container_of(usbreq, struct udc_request, req);
1298
1299         spin_lock_irqsave(&ep->dev->lock, iflags);
1300         halted = ep->halted;
1301         ep->halted = 1;
1302         /* request in processing or next one */
1303         if (ep->queue.next == &req->queue) {
1304                 if (ep->dma && req->dma_going) {
1305                         if (ep->in)
1306                                 ep->cancel_transfer = 1;
1307                         else {
1308                                 u32 tmp;
1309                                 u32 dma_sts;
1310                                 /* stop potential receive DMA */
1311                                 tmp = readl(&udc->regs->ctl);
1312                                 writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1313                                                         &udc->regs->ctl);
1314                                 /*
1315                                  * Cancel transfer later in ISR
1316                                  * if descriptor was touched.
1317                                  */
1318                                 dma_sts = AMD_GETBITS(req->td_data->status,
1319                                                         UDC_DMA_OUT_STS_BS);
1320                                 if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1321                                         ep->cancel_transfer = 1;
1322                                 else {
1323                                         udc_init_bna_dummy(ep->req);
1324                                         writel(ep->bna_dummy_req->td_phys,
1325                                                 &ep->regs->desptr);
1326                                 }
1327                                 writel(tmp, &udc->regs->ctl);
1328                         }
1329                 }
1330         }
1331         complete_req(ep, req, -ECONNRESET);
1332         ep->halted = halted;
1333
1334         spin_unlock_irqrestore(&ep->dev->lock, iflags);
1335         return 0;
1336 }
1337
1338 /* Halt or clear halt of endpoint */
1339 static int
1340 udc_set_halt(struct usb_ep *usbep, int halt)
1341 {
1342         struct udc_ep   *ep;
1343         u32 tmp;
1344         unsigned long iflags;
1345         int retval = 0;
1346
1347         if (!usbep)
1348                 return -EINVAL;
1349
1350         pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1351
1352         ep = container_of(usbep, struct udc_ep, ep);
1353         if (!ep->desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
1354                 return -EINVAL;
1355         if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1356                 return -ESHUTDOWN;
1357
1358         spin_lock_irqsave(&udc_stall_spinlock, iflags);
1359         /* halt or clear halt */
1360         if (halt) {
1361                 if (ep->num == 0)
1362                         ep->dev->stall_ep0in = 1;
1363                 else {
1364                         /*
1365                          * set STALL
1366                          * rxfifo empty not taken into acount
1367                          */
1368                         tmp = readl(&ep->regs->ctl);
1369                         tmp |= AMD_BIT(UDC_EPCTL_S);
1370                         writel(tmp, &ep->regs->ctl);
1371                         ep->halted = 1;
1372
1373                         /* setup poll timer */
1374                         if (!timer_pending(&udc_pollstall_timer)) {
1375                                 udc_pollstall_timer.expires = jiffies +
1376                                         HZ * UDC_POLLSTALL_TIMER_USECONDS
1377                                         / (1000 * 1000);
1378                                 if (!stop_pollstall_timer) {
1379                                         DBG(ep->dev, "start polltimer\n");
1380                                         add_timer(&udc_pollstall_timer);
1381                                 }
1382                         }
1383                 }
1384         } else {
1385                 /* ep is halted by set_halt() before */
1386                 if (ep->halted) {
1387                         tmp = readl(&ep->regs->ctl);
1388                         /* clear stall bit */
1389                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1390                         /* clear NAK by writing CNAK */
1391                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1392                         writel(tmp, &ep->regs->ctl);
1393                         ep->halted = 0;
1394                         UDC_QUEUE_CNAK(ep, ep->num);
1395                 }
1396         }
1397         spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1398         return retval;
1399 }
1400
1401 /* gadget interface */
1402 static const struct usb_ep_ops udc_ep_ops = {
1403         .enable         = udc_ep_enable,
1404         .disable        = udc_ep_disable,
1405
1406         .alloc_request  = udc_alloc_request,
1407         .free_request   = udc_free_request,
1408
1409         .queue          = udc_queue,
1410         .dequeue        = udc_dequeue,
1411
1412         .set_halt       = udc_set_halt,
1413         /* fifo ops not implemented */
1414 };
1415
1416 /*-------------------------------------------------------------------------*/
1417
1418 /* Get frame counter (not implemented) */
1419 static int udc_get_frame(struct usb_gadget *gadget)
1420 {
1421         return -EOPNOTSUPP;
1422 }
1423
1424 /* Remote wakeup gadget interface */
1425 static int udc_wakeup(struct usb_gadget *gadget)
1426 {
1427         struct udc              *dev;
1428
1429         if (!gadget)
1430                 return -EINVAL;
1431         dev = container_of(gadget, struct udc, gadget);
1432         udc_remote_wakeup(dev);
1433
1434         return 0;
1435 }
1436
1437 /* gadget operations */
1438 static const struct usb_gadget_ops udc_ops = {
1439         .wakeup         = udc_wakeup,
1440         .get_frame      = udc_get_frame,
1441 };
1442
1443 /* Setups endpoint parameters, adds endpoints to linked list */
1444 static void make_ep_lists(struct udc *dev)
1445 {
1446         /* make gadget ep lists */
1447         INIT_LIST_HEAD(&dev->gadget.ep_list);
1448         list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1449                                                 &dev->gadget.ep_list);
1450         list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1451                                                 &dev->gadget.ep_list);
1452         list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1453                                                 &dev->gadget.ep_list);
1454
1455         /* fifo config */
1456         dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1457         if (dev->gadget.speed == USB_SPEED_FULL)
1458                 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1459         else if (dev->gadget.speed == USB_SPEED_HIGH)
1460                 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1461         dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1462 }
1463
1464 /* init registers at driver load time */
1465 static int startup_registers(struct udc *dev)
1466 {
1467         u32 tmp;
1468
1469         /* init controller by soft reset */
1470         udc_soft_reset(dev);
1471
1472         /* mask not needed interrupts */
1473         udc_mask_unused_interrupts(dev);
1474
1475         /* put into initial config */
1476         udc_basic_init(dev);
1477         /* link up all endpoints */
1478         udc_setup_endpoints(dev);
1479
1480         /* program speed */
1481         tmp = readl(&dev->regs->cfg);
1482         if (use_fullspeed) {
1483                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1484         } else {
1485                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1486         }
1487         writel(tmp, &dev->regs->cfg);
1488
1489         return 0;
1490 }
1491
1492 /* Inits UDC context */
1493 static void udc_basic_init(struct udc *dev)
1494 {
1495         u32     tmp;
1496
1497         DBG(dev, "udc_basic_init()\n");
1498
1499         dev->gadget.speed = USB_SPEED_UNKNOWN;
1500
1501         /* stop RDE timer */
1502         if (timer_pending(&udc_timer)) {
1503                 set_rde = 0;
1504                 mod_timer(&udc_timer, jiffies - 1);
1505         }
1506         /* stop poll stall timer */
1507         if (timer_pending(&udc_pollstall_timer)) {
1508                 mod_timer(&udc_pollstall_timer, jiffies - 1);
1509         }
1510         /* disable DMA */
1511         tmp = readl(&dev->regs->ctl);
1512         tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1513         tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1514         writel(tmp, &dev->regs->ctl);
1515
1516         /* enable dynamic CSR programming */
1517         tmp = readl(&dev->regs->cfg);
1518         tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1519         /* set self powered */
1520         tmp |= AMD_BIT(UDC_DEVCFG_SP);
1521         /* set remote wakeupable */
1522         tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1523         writel(tmp, &dev->regs->cfg);
1524
1525         make_ep_lists(dev);
1526
1527         dev->data_ep_enabled = 0;
1528         dev->data_ep_queued = 0;
1529 }
1530
1531 /* Sets initial endpoint parameters */
1532 static void udc_setup_endpoints(struct udc *dev)
1533 {
1534         struct udc_ep   *ep;
1535         u32     tmp;
1536         u32     reg;
1537
1538         DBG(dev, "udc_setup_endpoints()\n");
1539
1540         /* read enum speed */
1541         tmp = readl(&dev->regs->sts);
1542         tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
1543         if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH) {
1544                 dev->gadget.speed = USB_SPEED_HIGH;
1545         } else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL) {
1546                 dev->gadget.speed = USB_SPEED_FULL;
1547         }
1548
1549         /* set basic ep parameters */
1550         for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1551                 ep = &dev->ep[tmp];
1552                 ep->dev = dev;
1553                 ep->ep.name = ep_string[tmp];
1554                 ep->num = tmp;
1555                 /* txfifo size is calculated at enable time */
1556                 ep->txfifo = dev->txfifo;
1557
1558                 /* fifo size */
1559                 if (tmp < UDC_EPIN_NUM) {
1560                         ep->fifo_depth = UDC_TXFIFO_SIZE;
1561                         ep->in = 1;
1562                 } else {
1563                         ep->fifo_depth = UDC_RXFIFO_SIZE;
1564                         ep->in = 0;
1565
1566                 }
1567                 ep->regs = &dev->ep_regs[tmp];
1568                 /*
1569                  * ep will be reset only if ep was not enabled before to avoid
1570                  * disabling ep interrupts when ENUM interrupt occurs but ep is
1571                  * not enabled by gadget driver
1572                  */
1573                 if (!ep->desc) {
1574                         ep_init(dev->regs, ep);
1575                 }
1576
1577                 if (use_dma) {
1578                         /*
1579                          * ep->dma is not really used, just to indicate that
1580                          * DMA is active: remove this
1581                          * dma regs = dev control regs
1582                          */
1583                         ep->dma = &dev->regs->ctl;
1584
1585                         /* nak OUT endpoints until enable - not for ep0 */
1586                         if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1587                                                 && tmp > UDC_EPIN_NUM) {
1588                                 /* set NAK */
1589                                 reg = readl(&dev->ep[tmp].regs->ctl);
1590                                 reg |= AMD_BIT(UDC_EPCTL_SNAK);
1591                                 writel(reg, &dev->ep[tmp].regs->ctl);
1592                                 dev->ep[tmp].naking = 1;
1593
1594                         }
1595                 }
1596         }
1597         /* EP0 max packet */
1598         if (dev->gadget.speed == USB_SPEED_FULL) {
1599                 dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_FS_EP0IN_MAX_PKT_SIZE;
1600                 dev->ep[UDC_EP0OUT_IX].ep.maxpacket =
1601                                                 UDC_FS_EP0OUT_MAX_PKT_SIZE;
1602         } else if (dev->gadget.speed == USB_SPEED_HIGH) {
1603                 dev->ep[UDC_EP0IN_IX].ep.maxpacket = UDC_EP0IN_MAX_PKT_SIZE;
1604                 dev->ep[UDC_EP0OUT_IX].ep.maxpacket = UDC_EP0OUT_MAX_PKT_SIZE;
1605         }
1606
1607         /*
1608          * with suspend bug workaround, ep0 params for gadget driver
1609          * are set at gadget driver bind() call
1610          */
1611         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1612         dev->ep[UDC_EP0IN_IX].halted = 0;
1613         INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1614
1615         /* init cfg/alt/int */
1616         dev->cur_config = 0;
1617         dev->cur_intf = 0;
1618         dev->cur_alt = 0;
1619 }
1620
1621 /* Bringup after Connect event, initial bringup to be ready for ep0 events */
1622 static void usb_connect(struct udc *dev)
1623 {
1624
1625         dev_info(&dev->pdev->dev, "USB Connect\n");
1626
1627         dev->connected = 1;
1628
1629         /* put into initial config */
1630         udc_basic_init(dev);
1631
1632         /* enable device setup interrupts */
1633         udc_enable_dev_setup_interrupts(dev);
1634 }
1635
1636 /*
1637  * Calls gadget with disconnect event and resets the UDC and makes
1638  * initial bringup to be ready for ep0 events
1639  */
1640 static void usb_disconnect(struct udc *dev)
1641 {
1642
1643         dev_info(&dev->pdev->dev, "USB Disconnect\n");
1644
1645         dev->connected = 0;
1646
1647         /* mask interrupts */
1648         udc_mask_unused_interrupts(dev);
1649
1650         /* REVISIT there doesn't seem to be a point to having this
1651          * talk to a tasklet ... do it directly, we already hold
1652          * the spinlock needed to process the disconnect.
1653          */
1654
1655         tasklet_schedule(&disconnect_tasklet);
1656 }
1657
1658 /* Tasklet for disconnect to be outside of interrupt context */
1659 static void udc_tasklet_disconnect(unsigned long par)
1660 {
1661         struct udc *dev = (struct udc *)(*((struct udc **) par));
1662         u32 tmp;
1663
1664         DBG(dev, "Tasklet disconnect\n");
1665         spin_lock_irq(&dev->lock);
1666
1667         if (dev->driver) {
1668                 spin_unlock(&dev->lock);
1669                 dev->driver->disconnect(&dev->gadget);
1670                 spin_lock(&dev->lock);
1671
1672                 /* empty queues */
1673                 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1674                         empty_req_queue(&dev->ep[tmp]);
1675                 }
1676
1677         }
1678
1679         /* disable ep0 */
1680         ep_init(dev->regs,
1681                         &dev->ep[UDC_EP0IN_IX]);
1682
1683
1684         if (!soft_reset_occured) {
1685                 /* init controller by soft reset */
1686                 udc_soft_reset(dev);
1687                 soft_reset_occured++;
1688         }
1689
1690         /* re-enable dev interrupts */
1691         udc_enable_dev_setup_interrupts(dev);
1692         /* back to full speed ? */
1693         if (use_fullspeed) {
1694                 tmp = readl(&dev->regs->cfg);
1695                 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1696                 writel(tmp, &dev->regs->cfg);
1697         }
1698
1699         spin_unlock_irq(&dev->lock);
1700 }
1701
1702 /* Reset the UDC core */
1703 static void udc_soft_reset(struct udc *dev)
1704 {
1705         unsigned long   flags;
1706
1707         DBG(dev, "Soft reset\n");
1708         /*
1709          * reset possible waiting interrupts, because int.
1710          * status is lost after soft reset,
1711          * ep int. status reset
1712          */
1713         writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1714         /* device int. status reset */
1715         writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1716
1717         spin_lock_irqsave(&udc_irq_spinlock, flags);
1718         writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1719         readl(&dev->regs->cfg);
1720         spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1721
1722 }
1723
1724 /* RDE timer callback to set RDE bit */
1725 static void udc_timer_function(unsigned long v)
1726 {
1727         u32 tmp;
1728
1729         spin_lock_irq(&udc_irq_spinlock);
1730
1731         if (set_rde > 0) {
1732                 /*
1733                  * open the fifo if fifo was filled on last timer call
1734                  * conditionally
1735                  */
1736                 if (set_rde > 1) {
1737                         /* set RDE to receive setup data */
1738                         tmp = readl(&udc->regs->ctl);
1739                         tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1740                         writel(tmp, &udc->regs->ctl);
1741                         set_rde = -1;
1742                 } else if (readl(&udc->regs->sts)
1743                                 & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1744                         /*
1745                          * if fifo empty setup polling, do not just
1746                          * open the fifo
1747                          */
1748                         udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
1749                         if (!stop_timer) {
1750                                 add_timer(&udc_timer);
1751                         }
1752                 } else {
1753                         /*
1754                          * fifo contains data now, setup timer for opening
1755                          * the fifo when timer expires to be able to receive
1756                          * setup packets, when data packets gets queued by
1757                          * gadget layer then timer will forced to expire with
1758                          * set_rde=0 (RDE is set in udc_queue())
1759                          */
1760                         set_rde++;
1761                         /* debug: lhadmot_timer_start = 221070 */
1762                         udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
1763                         if (!stop_timer) {
1764                                 add_timer(&udc_timer);
1765                         }
1766                 }
1767
1768         } else
1769                 set_rde = -1; /* RDE was set by udc_queue() */
1770         spin_unlock_irq(&udc_irq_spinlock);
1771         if (stop_timer)
1772                 complete(&on_exit);
1773
1774 }
1775
1776 /* Handle halt state, used in stall poll timer */
1777 static void udc_handle_halt_state(struct udc_ep *ep)
1778 {
1779         u32 tmp;
1780         /* set stall as long not halted */
1781         if (ep->halted == 1) {
1782                 tmp = readl(&ep->regs->ctl);
1783                 /* STALL cleared ? */
1784                 if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1785                         /*
1786                          * FIXME: MSC spec requires that stall remains
1787                          * even on receivng of CLEAR_FEATURE HALT. So
1788                          * we would set STALL again here to be compliant.
1789                          * But with current mass storage drivers this does
1790                          * not work (would produce endless host retries).
1791                          * So we clear halt on CLEAR_FEATURE.
1792                          *
1793                         DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1794                         tmp |= AMD_BIT(UDC_EPCTL_S);
1795                         writel(tmp, &ep->regs->ctl);*/
1796
1797                         /* clear NAK by writing CNAK */
1798                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1799                         writel(tmp, &ep->regs->ctl);
1800                         ep->halted = 0;
1801                         UDC_QUEUE_CNAK(ep, ep->num);
1802                 }
1803         }
1804 }
1805
1806 /* Stall timer callback to poll S bit and set it again after */
1807 static void udc_pollstall_timer_function(unsigned long v)
1808 {
1809         struct udc_ep *ep;
1810         int halted = 0;
1811
1812         spin_lock_irq(&udc_stall_spinlock);
1813         /*
1814          * only one IN and OUT endpoints are handled
1815          * IN poll stall
1816          */
1817         ep = &udc->ep[UDC_EPIN_IX];
1818         udc_handle_halt_state(ep);
1819         if (ep->halted)
1820                 halted = 1;
1821         /* OUT poll stall */
1822         ep = &udc->ep[UDC_EPOUT_IX];
1823         udc_handle_halt_state(ep);
1824         if (ep->halted)
1825                 halted = 1;
1826
1827         /* setup timer again when still halted */
1828         if (!stop_pollstall_timer && halted) {
1829                 udc_pollstall_timer.expires = jiffies +
1830                                         HZ * UDC_POLLSTALL_TIMER_USECONDS
1831                                         / (1000 * 1000);
1832                 add_timer(&udc_pollstall_timer);
1833         }
1834         spin_unlock_irq(&udc_stall_spinlock);
1835
1836         if (stop_pollstall_timer)
1837                 complete(&on_pollstall_exit);
1838 }
1839
1840 /* Inits endpoint 0 so that SETUP packets are processed */
1841 static void activate_control_endpoints(struct udc *dev)
1842 {
1843         u32 tmp;
1844
1845         DBG(dev, "activate_control_endpoints\n");
1846
1847         /* flush fifo */
1848         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1849         tmp |= AMD_BIT(UDC_EPCTL_F);
1850         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1851
1852         /* set ep0 directions */
1853         dev->ep[UDC_EP0IN_IX].in = 1;
1854         dev->ep[UDC_EP0OUT_IX].in = 0;
1855
1856         /* set buffer size (tx fifo entries) of EP0_IN */
1857         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1858         if (dev->gadget.speed == USB_SPEED_FULL)
1859                 tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1860                                         UDC_EPIN_BUFF_SIZE);
1861         else if (dev->gadget.speed == USB_SPEED_HIGH)
1862                 tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1863                                         UDC_EPIN_BUFF_SIZE);
1864         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1865
1866         /* set max packet size of EP0_IN */
1867         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1868         if (dev->gadget.speed == USB_SPEED_FULL)
1869                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1870                                         UDC_EP_MAX_PKT_SIZE);
1871         else if (dev->gadget.speed == USB_SPEED_HIGH)
1872                 tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1873                                 UDC_EP_MAX_PKT_SIZE);
1874         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1875
1876         /* set max packet size of EP0_OUT */
1877         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1878         if (dev->gadget.speed == USB_SPEED_FULL)
1879                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1880                                         UDC_EP_MAX_PKT_SIZE);
1881         else if (dev->gadget.speed == USB_SPEED_HIGH)
1882                 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1883                                         UDC_EP_MAX_PKT_SIZE);
1884         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1885
1886         /* set max packet size of EP0 in UDC CSR */
1887         tmp = readl(&dev->csr->ne[0]);
1888         if (dev->gadget.speed == USB_SPEED_FULL)
1889                 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1890                                         UDC_CSR_NE_MAX_PKT);
1891         else if (dev->gadget.speed == USB_SPEED_HIGH)
1892                 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1893                                         UDC_CSR_NE_MAX_PKT);
1894         writel(tmp, &dev->csr->ne[0]);
1895
1896         if (use_dma) {
1897                 dev->ep[UDC_EP0OUT_IX].td->status |=
1898                         AMD_BIT(UDC_DMA_OUT_STS_L);
1899                 /* write dma desc address */
1900                 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1901                         &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1902                 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1903                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1904                 /* stop RDE timer */
1905                 if (timer_pending(&udc_timer)) {
1906                         set_rde = 0;
1907                         mod_timer(&udc_timer, jiffies - 1);
1908                 }
1909                 /* stop pollstall timer */
1910                 if (timer_pending(&udc_pollstall_timer)) {
1911                         mod_timer(&udc_pollstall_timer, jiffies - 1);
1912                 }
1913                 /* enable DMA */
1914                 tmp = readl(&dev->regs->ctl);
1915                 tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1916                                 | AMD_BIT(UDC_DEVCTL_RDE)
1917                                 | AMD_BIT(UDC_DEVCTL_TDE);
1918                 if (use_dma_bufferfill_mode) {
1919                         tmp |= AMD_BIT(UDC_DEVCTL_BF);
1920                 } else if (use_dma_ppb_du) {
1921                         tmp |= AMD_BIT(UDC_DEVCTL_DU);
1922                 }
1923                 writel(tmp, &dev->regs->ctl);
1924         }
1925
1926         /* clear NAK by writing CNAK for EP0IN */
1927         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1928         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1929         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1930         dev->ep[UDC_EP0IN_IX].naking = 0;
1931         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1932
1933         /* clear NAK by writing CNAK for EP0OUT */
1934         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1935         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1936         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1937         dev->ep[UDC_EP0OUT_IX].naking = 0;
1938         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1939 }
1940
1941 /* Make endpoint 0 ready for control traffic */
1942 static int setup_ep0(struct udc *dev)
1943 {
1944         activate_control_endpoints(dev);
1945         /* enable ep0 interrupts */
1946         udc_enable_ep0_interrupts(dev);
1947         /* enable device setup interrupts */
1948         udc_enable_dev_setup_interrupts(dev);
1949
1950         return 0;
1951 }
1952
1953 /* Called by gadget driver to register itself */
1954 int usb_gadget_register_driver(struct usb_gadget_driver *driver)
1955 {
1956         struct udc              *dev = udc;
1957         int                     retval;
1958         u32 tmp;
1959
1960         if (!driver || !driver->bind || !driver->setup
1961                         || driver->speed != USB_SPEED_HIGH)
1962                 return -EINVAL;
1963         if (!dev)
1964                 return -ENODEV;
1965         if (dev->driver)
1966                 return -EBUSY;
1967
1968         driver->driver.bus = NULL;
1969         dev->driver = driver;
1970         dev->gadget.dev.driver = &driver->driver;
1971
1972         retval = driver->bind(&dev->gadget);
1973
1974         /* Some gadget drivers use both ep0 directions.
1975          * NOTE: to gadget driver, ep0 is just one endpoint...
1976          */
1977         dev->ep[UDC_EP0OUT_IX].ep.driver_data =
1978                 dev->ep[UDC_EP0IN_IX].ep.driver_data;
1979
1980         if (retval) {
1981                 DBG(dev, "binding to %s returning %d\n",
1982                                 driver->driver.name, retval);
1983                 dev->driver = NULL;
1984                 dev->gadget.dev.driver = NULL;
1985                 return retval;
1986         }
1987
1988         /* get ready for ep0 traffic */
1989         setup_ep0(dev);
1990
1991         /* clear SD */
1992         tmp = readl(&dev->regs->ctl);
1993         tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
1994         writel(tmp, &dev->regs->ctl);
1995
1996         usb_connect(dev);
1997
1998         return 0;
1999 }
2000 EXPORT_SYMBOL(usb_gadget_register_driver);
2001
2002 /* shutdown requests and disconnect from gadget */
2003 static void
2004 shutdown(struct udc *dev, struct usb_gadget_driver *driver)
2005 __releases(dev->lock)
2006 __acquires(dev->lock)
2007 {
2008         int tmp;
2009
2010         /* empty queues and init hardware */
2011         udc_basic_init(dev);
2012         for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
2013                 empty_req_queue(&dev->ep[tmp]);
2014         }
2015
2016         if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
2017                 spin_unlock(&dev->lock);
2018                 driver->disconnect(&dev->gadget);
2019                 spin_lock(&dev->lock);
2020         }
2021         /* init */
2022         udc_setup_endpoints(dev);
2023 }
2024
2025 /* Called by gadget driver to unregister itself */
2026 int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
2027 {
2028         struct udc      *dev = udc;
2029         unsigned long   flags;
2030         u32 tmp;
2031
2032         if (!dev)
2033                 return -ENODEV;
2034         if (!driver || driver != dev->driver || !driver->unbind)
2035                 return -EINVAL;
2036
2037         spin_lock_irqsave(&dev->lock, flags);
2038         udc_mask_unused_interrupts(dev);
2039         shutdown(dev, driver);
2040         spin_unlock_irqrestore(&dev->lock, flags);
2041
2042         driver->unbind(&dev->gadget);
2043         dev->gadget.dev.driver = NULL;
2044         dev->driver = NULL;
2045
2046         /* set SD */
2047         tmp = readl(&dev->regs->ctl);
2048         tmp |= AMD_BIT(UDC_DEVCTL_SD);
2049         writel(tmp, &dev->regs->ctl);
2050
2051
2052         DBG(dev, "%s: unregistered\n", driver->driver.name);
2053
2054         return 0;
2055 }
2056 EXPORT_SYMBOL(usb_gadget_unregister_driver);
2057
2058
2059 /* Clear pending NAK bits */
2060 static void udc_process_cnak_queue(struct udc *dev)
2061 {
2062         u32 tmp;
2063         u32 reg;
2064
2065         /* check epin's */
2066         DBG(dev, "CNAK pending queue processing\n");
2067         for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2068                 if (cnak_pending & (1 << tmp)) {
2069                         DBG(dev, "CNAK pending for ep%d\n", tmp);
2070                         /* clear NAK by writing CNAK */
2071                         reg = readl(&dev->ep[tmp].regs->ctl);
2072                         reg |= AMD_BIT(UDC_EPCTL_CNAK);
2073                         writel(reg, &dev->ep[tmp].regs->ctl);
2074                         dev->ep[tmp].naking = 0;
2075                         UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2076                 }
2077         }
2078         /* ...  and ep0out */
2079         if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2080                 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2081                 /* clear NAK by writing CNAK */
2082                 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2083                 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2084                 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2085                 dev->ep[UDC_EP0OUT_IX].naking = 0;
2086                 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2087                                 dev->ep[UDC_EP0OUT_IX].num);
2088         }
2089 }
2090
2091 /* Enabling RX DMA after setup packet */
2092 static void udc_ep0_set_rde(struct udc *dev)
2093 {
2094         if (use_dma) {
2095                 /*
2096                  * only enable RXDMA when no data endpoint enabled
2097                  * or data is queued
2098                  */
2099                 if (!dev->data_ep_enabled || dev->data_ep_queued) {
2100                         udc_set_rde(dev);
2101                 } else {
2102                         /*
2103                          * setup timer for enabling RDE (to not enable
2104                          * RXFIFO DMA for data endpoints to early)
2105                          */
2106                         if (set_rde != 0 && !timer_pending(&udc_timer)) {
2107                                 udc_timer.expires =
2108                                         jiffies + HZ/UDC_RDE_TIMER_DIV;
2109                                 set_rde = 1;
2110                                 if (!stop_timer) {
2111                                         add_timer(&udc_timer);
2112                                 }
2113                         }
2114                 }
2115         }
2116 }
2117
2118
2119 /* Interrupt handler for data OUT traffic */
2120 static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2121 {
2122         irqreturn_t             ret_val = IRQ_NONE;
2123         u32                     tmp;
2124         struct udc_ep           *ep;
2125         struct udc_request      *req;
2126         unsigned int            count;
2127         struct udc_data_dma     *td = NULL;
2128         unsigned                dma_done;
2129
2130         VDBG(dev, "ep%d irq\n", ep_ix);
2131         ep = &dev->ep[ep_ix];
2132
2133         tmp = readl(&ep->regs->sts);
2134         if (use_dma) {
2135                 /* BNA event ? */
2136                 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2137                         DBG(dev, "BNA ep%dout occured - DESPTR = %x \n",
2138                                         ep->num, readl(&ep->regs->desptr));
2139                         /* clear BNA */
2140                         writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2141                         if (!ep->cancel_transfer)
2142                                 ep->bna_occurred = 1;
2143                         else
2144                                 ep->cancel_transfer = 0;
2145                         ret_val = IRQ_HANDLED;
2146                         goto finished;
2147                 }
2148         }
2149         /* HE event ? */
2150         if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
2151                 dev_err(&dev->pdev->dev, "HE ep%dout occured\n", ep->num);
2152
2153                 /* clear HE */
2154                 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2155                 ret_val = IRQ_HANDLED;
2156                 goto finished;
2157         }
2158
2159         if (!list_empty(&ep->queue)) {
2160
2161                 /* next request */
2162                 req = list_entry(ep->queue.next,
2163                         struct udc_request, queue);
2164         } else {
2165                 req = NULL;
2166                 udc_rxfifo_pending = 1;
2167         }
2168         VDBG(dev, "req = %p\n", req);
2169         /* fifo mode */
2170         if (!use_dma) {
2171
2172                 /* read fifo */
2173                 if (req && udc_rxfifo_read(ep, req)) {
2174                         ret_val = IRQ_HANDLED;
2175
2176                         /* finish */
2177                         complete_req(ep, req, 0);
2178                         /* next request */
2179                         if (!list_empty(&ep->queue) && !ep->halted) {
2180                                 req = list_entry(ep->queue.next,
2181                                         struct udc_request, queue);
2182                         } else
2183                                 req = NULL;
2184                 }
2185
2186         /* DMA */
2187         } else if (!ep->cancel_transfer && req != NULL) {
2188                 ret_val = IRQ_HANDLED;
2189
2190                 /* check for DMA done */
2191                 if (!use_dma_ppb) {
2192                         dma_done = AMD_GETBITS(req->td_data->status,
2193                                                 UDC_DMA_OUT_STS_BS);
2194                 /* packet per buffer mode - rx bytes */
2195                 } else {
2196                         /*
2197                          * if BNA occurred then recover desc. from
2198                          * BNA dummy desc.
2199                          */
2200                         if (ep->bna_occurred) {
2201                                 VDBG(dev, "Recover desc. from BNA dummy\n");
2202                                 memcpy(req->td_data, ep->bna_dummy_req->td_data,
2203                                                 sizeof(struct udc_data_dma));
2204                                 ep->bna_occurred = 0;
2205                                 udc_init_bna_dummy(ep->req);
2206                         }
2207                         td = udc_get_last_dma_desc(req);
2208                         dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2209                 }
2210                 if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2211                         /* buffer fill mode - rx bytes */
2212                         if (!use_dma_ppb) {
2213                                 /* received number bytes */
2214                                 count = AMD_GETBITS(req->td_data->status,
2215                                                 UDC_DMA_OUT_STS_RXBYTES);
2216                                 VDBG(dev, "rx bytes=%u\n", count);
2217                         /* packet per buffer mode - rx bytes */
2218                         } else {
2219                                 VDBG(dev, "req->td_data=%p\n", req->td_data);
2220                                 VDBG(dev, "last desc = %p\n", td);
2221                                 /* received number bytes */
2222                                 if (use_dma_ppb_du) {
2223                                         /* every desc. counts bytes */
2224                                         count = udc_get_ppbdu_rxbytes(req);
2225                                 } else {
2226                                         /* last desc. counts bytes */
2227                                         count = AMD_GETBITS(td->status,
2228                                                 UDC_DMA_OUT_STS_RXBYTES);
2229                                         if (!count && req->req.length
2230                                                 == UDC_DMA_MAXPACKET) {
2231                                                 /*
2232                                                  * on 64k packets the RXBYTES
2233                                                  * field is zero
2234                                                  */
2235                                                 count = UDC_DMA_MAXPACKET;
2236                                         }
2237                                 }
2238                                 VDBG(dev, "last desc rx bytes=%u\n", count);
2239                         }
2240
2241                         tmp = req->req.length - req->req.actual;
2242                         if (count > tmp) {
2243                                 if ((tmp % ep->ep.maxpacket) != 0) {
2244                                         DBG(dev, "%s: rx %db, space=%db\n",
2245                                                 ep->ep.name, count, tmp);
2246                                         req->req.status = -EOVERFLOW;
2247                                 }
2248                                 count = tmp;
2249                         }
2250                         req->req.actual += count;
2251                         req->dma_going = 0;
2252                         /* complete request */
2253                         complete_req(ep, req, 0);
2254
2255                         /* next request */
2256                         if (!list_empty(&ep->queue) && !ep->halted) {
2257                                 req = list_entry(ep->queue.next,
2258                                         struct udc_request,
2259                                         queue);
2260                                 /*
2261                                  * DMA may be already started by udc_queue()
2262                                  * called by gadget drivers completion
2263                                  * routine. This happens when queue
2264                                  * holds one request only.
2265                                  */
2266                                 if (req->dma_going == 0) {
2267                                         /* next dma */
2268                                         if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2269                                                 goto finished;
2270                                         /* write desc pointer */
2271                                         writel(req->td_phys,
2272                                                 &ep->regs->desptr);
2273                                         req->dma_going = 1;
2274                                         /* enable DMA */
2275                                         udc_set_rde(dev);
2276                                 }
2277                         } else {
2278                                 /*
2279                                  * implant BNA dummy descriptor to allow
2280                                  * RXFIFO opening by RDE
2281                                  */
2282                                 if (ep->bna_dummy_req) {
2283                                         /* write desc pointer */
2284                                         writel(ep->bna_dummy_req->td_phys,
2285                                                 &ep->regs->desptr);
2286                                         ep->bna_occurred = 0;
2287                                 }
2288
2289                                 /*
2290                                  * schedule timer for setting RDE if queue
2291                                  * remains empty to allow ep0 packets pass
2292                                  * through
2293                                  */
2294                                 if (set_rde != 0
2295                                                 && !timer_pending(&udc_timer)) {
2296                                         udc_timer.expires =
2297                                                 jiffies
2298                                                 + HZ*UDC_RDE_TIMER_SECONDS;
2299                                         set_rde = 1;
2300                                         if (!stop_timer) {
2301                                                 add_timer(&udc_timer);
2302                                         }
2303                                 }
2304                                 if (ep->num != UDC_EP0OUT_IX)
2305                                         dev->data_ep_queued = 0;
2306                         }
2307
2308                 } else {
2309                         /*
2310                         * RX DMA must be reenabled for each desc in PPBDU mode
2311                         * and must be enabled for PPBNDU mode in case of BNA
2312                         */
2313                         udc_set_rde(dev);
2314                 }
2315
2316         } else if (ep->cancel_transfer) {
2317                 ret_val = IRQ_HANDLED;
2318                 ep->cancel_transfer = 0;
2319         }
2320
2321         /* check pending CNAKS */
2322         if (cnak_pending) {
2323                 /* CNAk processing when rxfifo empty only */
2324                 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2325                         udc_process_cnak_queue(dev);
2326                 }
2327         }
2328
2329         /* clear OUT bits in ep status */
2330         writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2331 finished:
2332         return ret_val;
2333 }
2334
2335 /* Interrupt handler for data IN traffic */
2336 static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2337 {
2338         irqreturn_t ret_val = IRQ_NONE;
2339         u32 tmp;
2340         u32 epsts;
2341         struct udc_ep *ep;
2342         struct udc_request *req;
2343         struct udc_data_dma *td;
2344         unsigned dma_done;
2345         unsigned len;
2346
2347         ep = &dev->ep[ep_ix];
2348
2349         epsts = readl(&ep->regs->sts);
2350         if (use_dma) {
2351                 /* BNA ? */
2352                 if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2353                         dev_err(&dev->pdev->dev,
2354                                 "BNA ep%din occured - DESPTR = %08lx \n",
2355                                 ep->num,
2356                                 (unsigned long) readl(&ep->regs->desptr));
2357
2358                         /* clear BNA */
2359                         writel(epsts, &ep->regs->sts);
2360                         ret_val = IRQ_HANDLED;
2361                         goto finished;
2362                 }
2363         }
2364         /* HE event ? */
2365         if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2366                 dev_err(&dev->pdev->dev,
2367                         "HE ep%dn occured - DESPTR = %08lx \n",
2368                         ep->num, (unsigned long) readl(&ep->regs->desptr));
2369
2370                 /* clear HE */
2371                 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2372                 ret_val = IRQ_HANDLED;
2373                 goto finished;
2374         }
2375
2376         /* DMA completion */
2377         if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2378                 VDBG(dev, "TDC set- completion\n");
2379                 ret_val = IRQ_HANDLED;
2380                 if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2381                         req = list_entry(ep->queue.next,
2382                                         struct udc_request, queue);
2383                         if (req) {
2384                                 /*
2385                                  * length bytes transfered
2386                                  * check dma done of last desc. in PPBDU mode
2387                                  */
2388                                 if (use_dma_ppb_du) {
2389                                         td = udc_get_last_dma_desc(req);
2390                                         if (td) {
2391                                                 dma_done =
2392                                                         AMD_GETBITS(td->status,
2393                                                         UDC_DMA_IN_STS_BS);
2394                                                 /* don't care DMA done */
2395                                                 req->req.actual =
2396                                                         req->req.length;
2397                                         }
2398                                 } else {
2399                                         /* assume all bytes transferred */
2400                                         req->req.actual = req->req.length;
2401                                 }
2402
2403                                 if (req->req.actual == req->req.length) {
2404                                         /* complete req */
2405                                         complete_req(ep, req, 0);
2406                                         req->dma_going = 0;
2407                                         /* further request available ? */
2408                                         if (list_empty(&ep->queue)) {
2409                                                 /* disable interrupt */
2410                                                 tmp = readl(
2411                                                         &dev->regs->ep_irqmsk);
2412                                                 tmp |= AMD_BIT(ep->num);
2413                                                 writel(tmp,
2414                                                         &dev->regs->ep_irqmsk);
2415                                         }
2416
2417                                 }
2418                         }
2419                 }
2420                 ep->cancel_transfer = 0;
2421
2422         }
2423         /*
2424          * status reg has IN bit set and TDC not set (if TDC was handled,
2425          * IN must not be handled (UDC defect) ?
2426          */
2427         if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2428                         && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2429                 ret_val = IRQ_HANDLED;
2430                 if (!list_empty(&ep->queue)) {
2431                         /* next request */
2432                         req = list_entry(ep->queue.next,
2433                                         struct udc_request, queue);
2434                         /* FIFO mode */
2435                         if (!use_dma) {
2436                                 /* write fifo */
2437                                 udc_txfifo_write(ep, &req->req);
2438                                 len = req->req.length - req->req.actual;
2439                                                 if (len > ep->ep.maxpacket)
2440                                                         len = ep->ep.maxpacket;
2441                                                 req->req.actual += len;
2442                                 if (req->req.actual == req->req.length
2443                                         || (len != ep->ep.maxpacket)) {
2444                                         /* complete req */
2445                                         complete_req(ep, req, 0);
2446                                 }
2447                         /* DMA */
2448                         } else if (req && !req->dma_going) {
2449                                 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2450                                         req, req->td_data);
2451                                 if (req->td_data) {
2452
2453                                         req->dma_going = 1;
2454
2455                                         /*
2456                                          * unset L bit of first desc.
2457                                          * for chain
2458                                          */
2459                                         if (use_dma_ppb && req->req.length >
2460                                                         ep->ep.maxpacket) {
2461                                                 req->td_data->status &=
2462                                                         AMD_CLEAR_BIT(
2463                                                         UDC_DMA_IN_STS_L);
2464                                         }
2465
2466                                         /* write desc pointer */
2467                                         writel(req->td_phys, &ep->regs->desptr);
2468
2469                                         /* set HOST READY */
2470                                         req->td_data->status =
2471                                                 AMD_ADDBITS(
2472                                                 req->td_data->status,
2473                                                 UDC_DMA_IN_STS_BS_HOST_READY,
2474                                                 UDC_DMA_IN_STS_BS);
2475
2476                                         /* set poll demand bit */
2477                                         tmp = readl(&ep->regs->ctl);
2478                                         tmp |= AMD_BIT(UDC_EPCTL_P);
2479                                         writel(tmp, &ep->regs->ctl);
2480                                 }
2481                         }
2482
2483                 }
2484         }
2485         /* clear status bits */
2486         writel(epsts, &ep->regs->sts);
2487
2488 finished:
2489         return ret_val;
2490
2491 }
2492
2493 /* Interrupt handler for Control OUT traffic */
2494 static irqreturn_t udc_control_out_isr(struct udc *dev)
2495 __releases(dev->lock)
2496 __acquires(dev->lock)
2497 {
2498         irqreturn_t ret_val = IRQ_NONE;
2499         u32 tmp;
2500         int setup_supported;
2501         u32 count;
2502         int set = 0;
2503         struct udc_ep   *ep;
2504         struct udc_ep   *ep_tmp;
2505
2506         ep = &dev->ep[UDC_EP0OUT_IX];
2507
2508         /* clear irq */
2509         writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2510
2511         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2512         /* check BNA and clear if set */
2513         if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2514                 VDBG(dev, "ep0: BNA set\n");
2515                 writel(AMD_BIT(UDC_EPSTS_BNA),
2516                         &dev->ep[UDC_EP0OUT_IX].regs->sts);
2517                 ep->bna_occurred = 1;
2518                 ret_val = IRQ_HANDLED;
2519                 goto finished;
2520         }
2521
2522         /* type of data: SETUP or DATA 0 bytes */
2523         tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2524         VDBG(dev, "data_typ = %x\n", tmp);
2525
2526         /* setup data */
2527         if (tmp == UDC_EPSTS_OUT_SETUP) {
2528                 ret_val = IRQ_HANDLED;
2529
2530                 ep->dev->stall_ep0in = 0;
2531                 dev->waiting_zlp_ack_ep0in = 0;
2532
2533                 /* set NAK for EP0_IN */
2534                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2535                 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2536                 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2537                 dev->ep[UDC_EP0IN_IX].naking = 1;
2538                 /* get setup data */
2539                 if (use_dma) {
2540
2541                         /* clear OUT bits in ep status */
2542                         writel(UDC_EPSTS_OUT_CLEAR,
2543                                 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2544
2545                         setup_data.data[0] =
2546                                 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2547                         setup_data.data[1] =
2548                                 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2549                         /* set HOST READY */
2550                         dev->ep[UDC_EP0OUT_IX].td_stp->status =
2551                                         UDC_DMA_STP_STS_BS_HOST_READY;
2552                 } else {
2553                         /* read fifo */
2554                         udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2555                 }
2556
2557                 /* determine direction of control data */
2558                 if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2559                         dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2560                         /* enable RDE */
2561                         udc_ep0_set_rde(dev);
2562                         set = 0;
2563                 } else {
2564                         dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2565                         /*
2566                          * implant BNA dummy descriptor to allow RXFIFO opening
2567                          * by RDE
2568                          */
2569                         if (ep->bna_dummy_req) {
2570                                 /* write desc pointer */
2571                                 writel(ep->bna_dummy_req->td_phys,
2572                                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2573                                 ep->bna_occurred = 0;
2574                         }
2575
2576                         set = 1;
2577                         dev->ep[UDC_EP0OUT_IX].naking = 1;
2578                         /*
2579                          * setup timer for enabling RDE (to not enable
2580                          * RXFIFO DMA for data to early)
2581                          */
2582                         set_rde = 1;
2583                         if (!timer_pending(&udc_timer)) {
2584                                 udc_timer.expires = jiffies +
2585                                                         HZ/UDC_RDE_TIMER_DIV;
2586                                 if (!stop_timer) {
2587                                         add_timer(&udc_timer);
2588                                 }
2589                         }
2590                 }
2591
2592                 /*
2593                  * mass storage reset must be processed here because
2594                  * next packet may be a CLEAR_FEATURE HALT which would not
2595                  * clear the stall bit when no STALL handshake was received
2596                  * before (autostall can cause this)
2597                  */
2598                 if (setup_data.data[0] == UDC_MSCRES_DWORD0
2599                                 && setup_data.data[1] == UDC_MSCRES_DWORD1) {
2600                         DBG(dev, "MSC Reset\n");
2601                         /*
2602                          * clear stall bits
2603                          * only one IN and OUT endpoints are handled
2604                          */
2605                         ep_tmp = &udc->ep[UDC_EPIN_IX];
2606                         udc_set_halt(&ep_tmp->ep, 0);
2607                         ep_tmp = &udc->ep[UDC_EPOUT_IX];
2608                         udc_set_halt(&ep_tmp->ep, 0);
2609                 }
2610
2611                 /* call gadget with setup data received */
2612                 spin_unlock(&dev->lock);
2613                 setup_supported = dev->driver->setup(&dev->gadget,
2614                                                 &setup_data.request);
2615                 spin_lock(&dev->lock);
2616
2617                 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2618                 /* ep0 in returns data (not zlp) on IN phase */
2619                 if (setup_supported >= 0 && setup_supported <
2620                                 UDC_EP0IN_MAXPACKET) {
2621                         /* clear NAK by writing CNAK in EP0_IN */
2622                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2623                         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2624                         dev->ep[UDC_EP0IN_IX].naking = 0;
2625                         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2626
2627                 /* if unsupported request then stall */
2628                 } else if (setup_supported < 0) {
2629                         tmp |= AMD_BIT(UDC_EPCTL_S);
2630                         writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2631                 } else
2632                         dev->waiting_zlp_ack_ep0in = 1;
2633
2634
2635                 /* clear NAK by writing CNAK in EP0_OUT */
2636                 if (!set) {
2637                         tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2638                         tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2639                         writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2640                         dev->ep[UDC_EP0OUT_IX].naking = 0;
2641                         UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2642                 }
2643
2644                 if (!use_dma) {
2645                         /* clear OUT bits in ep status */
2646                         writel(UDC_EPSTS_OUT_CLEAR,
2647                                 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2648                 }
2649
2650         /* data packet 0 bytes */
2651         } else if (tmp == UDC_EPSTS_OUT_DATA) {
2652                 /* clear OUT bits in ep status */
2653                 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2654
2655                 /* get setup data: only 0 packet */
2656                 if (use_dma) {
2657                         /* no req if 0 packet, just reactivate */
2658                         if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2659                                 VDBG(dev, "ZLP\n");
2660
2661                                 /* set HOST READY */
2662                                 dev->ep[UDC_EP0OUT_IX].td->status =
2663                                         AMD_ADDBITS(
2664                                         dev->ep[UDC_EP0OUT_IX].td->status,
2665                                         UDC_DMA_OUT_STS_BS_HOST_READY,
2666                                         UDC_DMA_OUT_STS_BS);
2667                                 /* enable RDE */
2668                                 udc_ep0_set_rde(dev);
2669                                 ret_val = IRQ_HANDLED;
2670
2671                         } else {
2672                                 /* control write */
2673                                 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2674                                 /* re-program desc. pointer for possible ZLPs */
2675                                 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2676                                         &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2677                                 /* enable RDE */
2678                                 udc_ep0_set_rde(dev);
2679                         }
2680                 } else {
2681
2682                         /* received number bytes */
2683                         count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2684                         count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2685                         /* out data for fifo mode not working */
2686                         count = 0;
2687
2688                         /* 0 packet or real data ? */
2689                         if (count != 0) {
2690                                 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2691                         } else {
2692                                 /* dummy read confirm */
2693                                 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2694                                 ret_val = IRQ_HANDLED;
2695                         }
2696                 }
2697         }
2698
2699         /* check pending CNAKS */
2700         if (cnak_pending) {
2701                 /* CNAk processing when rxfifo empty only */
2702                 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
2703                         udc_process_cnak_queue(dev);
2704                 }
2705         }
2706
2707 finished:
2708         return ret_val;
2709 }
2710
2711 /* Interrupt handler for Control IN traffic */
2712 static irqreturn_t udc_control_in_isr(struct udc *dev)
2713 {
2714         irqreturn_t ret_val = IRQ_NONE;
2715         u32 tmp;
2716         struct udc_ep *ep;
2717         struct udc_request *req;
2718         unsigned len;
2719
2720         ep = &dev->ep[UDC_EP0IN_IX];
2721
2722         /* clear irq */
2723         writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2724
2725         tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2726         /* DMA completion */
2727         if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
2728                 VDBG(dev, "isr: TDC clear \n");
2729                 ret_val = IRQ_HANDLED;
2730
2731                 /* clear TDC bit */
2732                 writel(AMD_BIT(UDC_EPSTS_TDC),
2733                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
2734
2735         /* status reg has IN bit set ? */
2736         } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2737                 ret_val = IRQ_HANDLED;
2738
2739                 if (ep->dma) {
2740                         /* clear IN bit */
2741                         writel(AMD_BIT(UDC_EPSTS_IN),
2742                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
2743                 }
2744                 if (dev->stall_ep0in) {
2745                         DBG(dev, "stall ep0in\n");
2746                         /* halt ep0in */
2747                         tmp = readl(&ep->regs->ctl);
2748                         tmp |= AMD_BIT(UDC_EPCTL_S);
2749                         writel(tmp, &ep->regs->ctl);
2750                 } else {
2751                         if (!list_empty(&ep->queue)) {
2752                                 /* next request */
2753                                 req = list_entry(ep->queue.next,
2754                                                 struct udc_request, queue);
2755
2756                                 if (ep->dma) {
2757                                         /* write desc pointer */
2758                                         writel(req->td_phys, &ep->regs->desptr);
2759                                         /* set HOST READY */
2760                                         req->td_data->status =
2761                                                 AMD_ADDBITS(
2762                                                 req->td_data->status,
2763                                                 UDC_DMA_STP_STS_BS_HOST_READY,
2764                                                 UDC_DMA_STP_STS_BS);
2765
2766                                         /* set poll demand bit */
2767                                         tmp =
2768                                         readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2769                                         tmp |= AMD_BIT(UDC_EPCTL_P);
2770                                         writel(tmp,
2771                                         &dev->ep[UDC_EP0IN_IX].regs->ctl);
2772
2773                                         /* all bytes will be transferred */
2774                                         req->req.actual = req->req.length;
2775
2776                                         /* complete req */
2777                                         complete_req(ep, req, 0);
2778
2779                                 } else {
2780                                         /* write fifo */
2781                                         udc_txfifo_write(ep, &req->req);
2782
2783                                         /* lengh bytes transfered */
2784                                         len = req->req.length - req->req.actual;
2785                                         if (len > ep->ep.maxpacket)
2786                                                 len = ep->ep.maxpacket;
2787
2788                                         req->req.actual += len;
2789                                         if (req->req.actual == req->req.length
2790                                                 || (len != ep->ep.maxpacket)) {
2791                                                 /* complete req */
2792                                                 complete_req(ep, req, 0);
2793                                         }
2794                                 }
2795
2796                         }
2797                 }
2798                 ep->halted = 0;
2799                 dev->stall_ep0in = 0;
2800                 if (!ep->dma) {
2801                         /* clear IN bit */
2802                         writel(AMD_BIT(UDC_EPSTS_IN),
2803                                 &dev->ep[UDC_EP0IN_IX].regs->sts);
2804                 }
2805         }
2806
2807         return ret_val;
2808 }
2809
2810
2811 /* Interrupt handler for global device events */
2812 static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2813 __releases(dev->lock)
2814 __acquires(dev->lock)
2815 {
2816         irqreturn_t ret_val = IRQ_NONE;
2817         u32 tmp;
2818         u32 cfg;
2819         struct udc_ep *ep;
2820         u16 i;
2821         u8 udc_csr_epix;
2822
2823         /* SET_CONFIG irq ? */
2824         if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2825                 ret_val = IRQ_HANDLED;
2826
2827                 /* read config value */
2828                 tmp = readl(&dev->regs->sts);
2829                 cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2830                 DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2831                 dev->cur_config = cfg;
2832                 dev->set_cfg_not_acked = 1;
2833
2834                 /* make usb request for gadget driver */
2835                 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2836                 setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
2837                 setup_data.request.wValue = cpu_to_le16(dev->cur_config);
2838
2839                 /* programm the NE registers */
2840                 for (i = 0; i < UDC_EP_NUM; i++) {
2841                         ep = &dev->ep[i];
2842                         if (ep->in) {
2843
2844                                 /* ep ix in UDC CSR register space */
2845                                 udc_csr_epix = ep->num;
2846
2847
2848                         /* OUT ep */
2849                         } else {
2850                                 /* ep ix in UDC CSR register space */
2851                                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2852                         }
2853
2854                         tmp = readl(&dev->csr->ne[udc_csr_epix]);
2855                         /* ep cfg */
2856                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2857                                                 UDC_CSR_NE_CFG);
2858                         /* write reg */
2859                         writel(tmp, &dev->csr->ne[udc_csr_epix]);
2860
2861                         /* clear stall bits */
2862                         ep->halted = 0;
2863                         tmp = readl(&ep->regs->ctl);
2864                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2865                         writel(tmp, &ep->regs->ctl);
2866                 }
2867                 /* call gadget zero with setup data received */
2868                 spin_unlock(&dev->lock);
2869                 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2870                 spin_lock(&dev->lock);
2871
2872         } /* SET_INTERFACE ? */
2873         if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2874                 ret_val = IRQ_HANDLED;
2875
2876                 dev->set_cfg_not_acked = 1;
2877                 /* read interface and alt setting values */
2878                 tmp = readl(&dev->regs->sts);
2879                 dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2880                 dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2881
2882                 /* make usb request for gadget driver */
2883                 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2884                 setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2885                 setup_data.request.bRequestType = USB_RECIP_INTERFACE;
2886                 setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2887                 setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
2888
2889                 DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2890                                 dev->cur_alt, dev->cur_intf);
2891
2892                 /* programm the NE registers */
2893                 for (i = 0; i < UDC_EP_NUM; i++) {
2894                         ep = &dev->ep[i];
2895                         if (ep->in) {
2896
2897                                 /* ep ix in UDC CSR register space */
2898                                 udc_csr_epix = ep->num;
2899
2900
2901                         /* OUT ep */
2902                         } else {
2903                                 /* ep ix in UDC CSR register space */
2904                                 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2905                         }
2906
2907                         /* UDC CSR reg */
2908                         /* set ep values */
2909                         tmp = readl(&dev->csr->ne[udc_csr_epix]);
2910                         /* ep interface */
2911                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2912                                                 UDC_CSR_NE_INTF);
2913                         /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2914                         /* ep alt */
2915                         tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2916                                                 UDC_CSR_NE_ALT);
2917                         /* write reg */
2918                         writel(tmp, &dev->csr->ne[udc_csr_epix]);
2919
2920                         /* clear stall bits */
2921                         ep->halted = 0;
2922                         tmp = readl(&ep->regs->ctl);
2923                         tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2924                         writel(tmp, &ep->regs->ctl);
2925                 }
2926
2927                 /* call gadget zero with setup data received */
2928                 spin_unlock(&dev->lock);
2929                 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2930                 spin_lock(&dev->lock);
2931
2932         } /* USB reset */
2933         if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2934                 DBG(dev, "USB Reset interrupt\n");
2935                 ret_val = IRQ_HANDLED;
2936
2937                 /* allow soft reset when suspend occurs */
2938                 soft_reset_occured = 0;
2939
2940                 dev->waiting_zlp_ack_ep0in = 0;
2941                 dev->set_cfg_not_acked = 0;
2942
2943                 /* mask not needed interrupts */
2944                 udc_mask_unused_interrupts(dev);
2945
2946                 /* call gadget to resume and reset configs etc. */
2947                 spin_unlock(&dev->lock);
2948                 if (dev->sys_suspended && dev->driver->resume) {
2949                         dev->driver->resume(&dev->gadget);
2950                         dev->sys_suspended = 0;
2951                 }
2952                 dev->driver->disconnect(&dev->gadget);
2953                 spin_lock(&dev->lock);
2954
2955                 /* disable ep0 to empty req queue */
2956                 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2957                 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2958
2959                 /* soft reset when rxfifo not empty */
2960                 tmp = readl(&dev->regs->sts);
2961                 if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2962                                 && !soft_reset_after_usbreset_occured) {
2963                         udc_soft_reset(dev);
2964                         soft_reset_after_usbreset_occured++;
2965                 }
2966
2967                 /*
2968                  * DMA reset to kill potential old DMA hw hang,
2969                  * POLL bit is already reset by ep_init() through
2970                  * disconnect()
2971                  */
2972                 DBG(dev, "DMA machine reset\n");
2973                 tmp = readl(&dev->regs->cfg);
2974                 writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2975                 writel(tmp, &dev->regs->cfg);
2976
2977                 /* put into initial config */
2978                 udc_basic_init(dev);
2979
2980                 /* enable device setup interrupts */
2981                 udc_enable_dev_setup_interrupts(dev);
2982
2983                 /* enable suspend interrupt */
2984                 tmp = readl(&dev->regs->irqmsk);
2985                 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2986                 writel(tmp, &dev->regs->irqmsk);
2987
2988         } /* USB suspend */
2989         if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2990                 DBG(dev, "USB Suspend interrupt\n");
2991                 ret_val = IRQ_HANDLED;
2992                 if (dev->driver->suspend) {
2993                         spin_unlock(&dev->lock);
2994                         dev->sys_suspended = 1;
2995                         dev->driver->suspend(&dev->gadget);
2996                         spin_lock(&dev->lock);
2997                 }
2998         } /* new speed ? */
2999         if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
3000                 DBG(dev, "ENUM interrupt\n");
3001                 ret_val = IRQ_HANDLED;
3002                 soft_reset_after_usbreset_occured = 0;
3003
3004                 /* disable ep0 to empty req queue */
3005                 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
3006                 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
3007
3008                 /* link up all endpoints */
3009                 udc_setup_endpoints(dev);
3010                 if (dev->gadget.speed == USB_SPEED_HIGH) {
3011                         dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
3012                                 "high");
3013                 } else if (dev->gadget.speed == USB_SPEED_FULL) {
3014                         dev_info(&dev->pdev->dev, "Connect: speed = %s\n",
3015                                 "full");
3016                 }
3017
3018                 /* init ep 0 */
3019                 activate_control_endpoints(dev);
3020
3021                 /* enable ep0 interrupts */
3022                 udc_enable_ep0_interrupts(dev);
3023         }
3024         /* session valid change interrupt */
3025         if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
3026                 DBG(dev, "USB SVC interrupt\n");
3027                 ret_val = IRQ_HANDLED;
3028
3029                 /* check that session is not valid to detect disconnect */
3030                 tmp = readl(&dev->regs->sts);
3031                 if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
3032                         /* disable suspend interrupt */
3033                         tmp = readl(&dev->regs->irqmsk);
3034                         tmp |= AMD_BIT(UDC_DEVINT_US);
3035                         writel(tmp, &dev->regs->irqmsk);
3036                         DBG(dev, "USB Disconnect (session valid low)\n");
3037                         /* cleanup on disconnect */
3038                         usb_disconnect(udc);
3039                 }
3040
3041         }
3042
3043         return ret_val;
3044 }
3045
3046 /* Interrupt Service Routine, see Linux Kernel Doc for parameters */
3047 static irqreturn_t udc_irq(int irq, void *pdev)
3048 {
3049         struct udc *dev = pdev;
3050         u32 reg;
3051         u16 i;
3052         u32 ep_irq;
3053         irqreturn_t ret_val = IRQ_NONE;
3054
3055         spin_lock(&dev->lock);
3056
3057         /* check for ep irq */
3058         reg = readl(&dev->regs->ep_irqsts);
3059         if (reg) {
3060                 if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3061                         ret_val |= udc_control_out_isr(dev);
3062                 if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3063                         ret_val |= udc_control_in_isr(dev);
3064
3065                 /*
3066                  * data endpoint
3067                  * iterate ep's
3068                  */
3069                 for (i = 1; i < UDC_EP_NUM; i++) {
3070                         ep_irq = 1 << i;
3071                         if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3072                                 continue;
3073
3074                         /* clear irq status */
3075                         writel(ep_irq, &dev->regs->ep_irqsts);
3076
3077                         /* irq for out ep ? */
3078                         if (i > UDC_EPIN_NUM)
3079                                 ret_val |= udc_data_out_isr(dev, i);
3080                         else
3081                                 ret_val |= udc_data_in_isr(dev, i);
3082                 }
3083
3084         }
3085
3086
3087         /* check for dev irq */
3088         reg = readl(&dev->regs->irqsts);
3089         if (reg) {
3090                 /* clear irq */
3091                 writel(reg, &dev->regs->irqsts);
3092                 ret_val |= udc_dev_isr(dev, reg);
3093         }
3094
3095
3096         spin_unlock(&dev->lock);
3097         return ret_val;
3098 }
3099
3100 /* Tears down device */
3101 static void gadget_release(struct device *pdev)
3102 {
3103         struct amd5536udc *dev = dev_get_drvdata(pdev);
3104         kfree(dev);
3105 }
3106
3107 /* Cleanup on device remove */
3108 static void udc_remove(struct udc *dev)
3109 {
3110         /* remove timer */
3111         stop_timer++;
3112         if (timer_pending(&udc_timer))
3113                 wait_for_completion(&on_exit);
3114         if (udc_timer.data)
3115                 del_timer_sync(&udc_timer);
3116         /* remove pollstall timer */
3117         stop_pollstall_timer++;
3118         if (timer_pending(&udc_pollstall_timer))
3119                 wait_for_completion(&on_pollstall_exit);
3120         if (udc_pollstall_timer.data)
3121                 del_timer_sync(&udc_pollstall_timer);
3122         udc = NULL;
3123 }
3124
3125 /* Reset all pci context */
3126 static void udc_pci_remove(struct pci_dev *pdev)
3127 {
3128         struct udc              *dev;
3129
3130         dev = pci_get_drvdata(pdev);
3131
3132         /* gadget driver must not be registered */
3133         BUG_ON(dev->driver != NULL);
3134
3135         /* dma pool cleanup */
3136         if (dev->data_requests)
3137                 pci_pool_destroy(dev->data_requests);
3138
3139         if (dev->stp_requests) {
3140                 /* cleanup DMA desc's for ep0in */
3141                 pci_pool_free(dev->stp_requests,
3142                         dev->ep[UDC_EP0OUT_IX].td_stp,
3143                         dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3144                 pci_pool_free(dev->stp_requests,
3145                         dev->ep[UDC_EP0OUT_IX].td,
3146                         dev->ep[UDC_EP0OUT_IX].td_phys);
3147
3148                 pci_pool_destroy(dev->stp_requests);
3149         }
3150
3151         /* reset controller */
3152         writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
3153         if (dev->irq_registered)
3154                 free_irq(pdev->irq, dev);
3155         if (dev->regs)
3156                 iounmap(dev->regs);
3157         if (dev->mem_region)
3158                 release_mem_region(pci_resource_start(pdev, 0),
3159                                 pci_resource_len(pdev, 0));
3160         if (dev->active)
3161                 pci_disable_device(pdev);
3162
3163         device_unregister(&dev->gadget.dev);
3164         pci_set_drvdata(pdev, NULL);
3165
3166         udc_remove(dev);
3167 }
3168
3169 /* create dma pools on init */
3170 static int init_dma_pools(struct udc *dev)
3171 {
3172         struct udc_stp_dma      *td_stp;
3173         struct udc_data_dma     *td_data;
3174         int retval;
3175
3176         /* consistent DMA mode setting ? */
3177         if (use_dma_ppb) {
3178                 use_dma_bufferfill_mode = 0;
3179         } else {
3180                 use_dma_ppb_du = 0;
3181                 use_dma_bufferfill_mode = 1;
3182         }
3183
3184         /* DMA setup */
3185         dev->data_requests = dma_pool_create("data_requests", NULL,
3186                 sizeof(struct udc_data_dma), 0, 0);
3187         if (!dev->data_requests) {
3188                 DBG(dev, "can't get request data pool\n");
3189                 retval = -ENOMEM;
3190                 goto finished;
3191         }
3192
3193         /* EP0 in dma regs = dev control regs */
3194         dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3195
3196         /* dma desc for setup data */
3197         dev->stp_requests = dma_pool_create("setup requests", NULL,
3198                 sizeof(struct udc_stp_dma), 0, 0);
3199         if (!dev->stp_requests) {
3200                 DBG(dev, "can't get stp request pool\n");
3201                 retval = -ENOMEM;
3202                 goto finished;
3203         }
3204         /* setup */
3205         td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3206                                 &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3207         if (td_stp == NULL) {
3208                 retval = -ENOMEM;
3209                 goto finished;
3210         }
3211         dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3212
3213         /* data: 0 packets !? */
3214         td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3215                                 &dev->ep[UDC_EP0OUT_IX].td_phys);
3216         if (td_data == NULL) {
3217                 retval = -ENOMEM;
3218                 goto finished;
3219         }
3220         dev->ep[UDC_EP0OUT_IX].td = td_data;
3221         return 0;
3222
3223 finished:
3224         return retval;
3225 }
3226
3227 /* Called by pci bus driver to init pci context */
3228 static int udc_pci_probe(
3229         struct pci_dev *pdev,
3230         const struct pci_device_id *id
3231 )
3232 {
3233         struct udc              *dev;
3234         unsigned long           resource;
3235         unsigned long           len;
3236         int                     retval = 0;
3237
3238         /* one udc only */
3239         if (udc) {
3240                 dev_dbg(&pdev->dev, "already probed\n");
3241                 return -EBUSY;
3242         }
3243
3244         /* init */
3245         dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
3246         if (!dev) {
3247                 retval = -ENOMEM;
3248                 goto finished;
3249         }
3250
3251         /* pci setup */
3252         if (pci_enable_device(pdev) < 0) {
3253                 kfree(dev);
3254                 dev = NULL;
3255                 retval = -ENODEV;
3256                 goto finished;
3257         }
3258         dev->active = 1;
3259
3260         /* PCI resource allocation */
3261         resource = pci_resource_start(pdev, 0);
3262         len = pci_resource_len(pdev, 0);
3263
3264         if (!request_mem_region(resource, len, name)) {
3265                 dev_dbg(&pdev->dev, "pci device used already\n");
3266                 kfree(dev);
3267                 dev = NULL;
3268                 retval = -EBUSY;
3269                 goto finished;
3270         }
3271         dev->mem_region = 1;
3272
3273         dev->virt_addr = ioremap_nocache(resource, len);
3274         if (dev->virt_addr == NULL) {
3275                 dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3276                 kfree(dev);
3277                 dev = NULL;
3278                 retval = -EFAULT;
3279                 goto finished;
3280         }
3281
3282         if (!pdev->irq) {
3283                 dev_err(&dev->pdev->dev, "irq not set\n");
3284                 kfree(dev);
3285                 dev = NULL;
3286                 retval = -ENODEV;
3287                 goto finished;
3288         }
3289
3290         if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
3291                 dev_dbg(&dev->pdev->dev, "request_irq(%d) fail\n", pdev->irq);
3292                 kfree(dev);
3293                 dev = NULL;
3294                 retval = -EBUSY;
3295                 goto finished;
3296         }
3297         dev->irq_registered = 1;
3298
3299         pci_set_drvdata(pdev, dev);
3300
3301         /* chip revision for Hs AMD5536 */
3302         dev->chiprev = pdev->revision;
3303
3304         pci_set_master(pdev);
3305         pci_try_set_mwi(pdev);
3306
3307         /* init dma pools */
3308         if (use_dma) {
3309                 retval = init_dma_pools(dev);
3310                 if (retval != 0)
3311                         goto finished;
3312         }
3313
3314         dev->phys_addr = resource;
3315         dev->irq = pdev->irq;
3316         dev->pdev = pdev;
3317         dev->gadget.dev.parent = &pdev->dev;
3318         dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
3319
3320         /* general probing */
3321         if (udc_probe(dev) == 0)
3322                 return 0;
3323
3324 finished:
3325         if (dev)
3326                 udc_pci_remove(pdev);
3327         return retval;
3328 }
3329
3330 /* general probe */
3331 static int udc_probe(struct udc *dev)
3332 {
3333         char            tmp[128];
3334         u32             reg;
3335         int             retval;
3336
3337         /* mark timer as not initialized */
3338         udc_timer.data = 0;
3339         udc_pollstall_timer.data = 0;
3340
3341         /* device struct setup */
3342         spin_lock_init(&dev->lock);
3343         dev->gadget.ops = &udc_ops;
3344
3345         dev_set_name(&dev->gadget.dev, "gadget");
3346         dev->gadget.dev.release = gadget_release;
3347         dev->gadget.name = name;
3348         dev->gadget.name = name;
3349         dev->gadget.is_dualspeed = 1;
3350
3351         /* udc csr registers base */
3352         dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3353         /* dev registers base */
3354         dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3355         /* ep registers base */
3356         dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3357         /* fifo's base */
3358         dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3359         dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3360
3361         /* init registers, interrupts, ... */
3362         startup_registers(dev);
3363
3364         dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3365
3366         snprintf(tmp, sizeof tmp, "%d", dev->irq);
3367         dev_info(&dev->pdev->dev,
3368                 "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3369                 tmp, dev->phys_addr, dev->chiprev,
3370                 (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3371         strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3372         if (dev->chiprev == UDC_HSA0_REV) {
3373                 dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3374                 retval = -ENODEV;
3375                 goto finished;
3376         }
3377         dev_info(&dev->pdev->dev,
3378                 "driver version: %s(for Geode5536 B1)\n", tmp);
3379         udc = dev;
3380
3381         retval = device_register(&dev->gadget.dev);
3382         if (retval)
3383                 goto finished;
3384
3385         /* timer init */
3386         init_timer(&udc_timer);
3387         udc_timer.function = udc_timer_function;
3388         udc_timer.data = 1;
3389         /* timer pollstall init */
3390         init_timer(&udc_pollstall_timer);
3391         udc_pollstall_timer.function = udc_pollstall_timer_function;
3392         udc_pollstall_timer.data = 1;
3393
3394         /* set SD */
3395         reg = readl(&dev->regs->ctl);
3396         reg |= AMD_BIT(UDC_DEVCTL_SD);
3397         writel(reg, &dev->regs->ctl);
3398
3399         /* print dev register info */
3400         print_regs(dev);
3401
3402         return 0;
3403
3404 finished:
3405         return retval;
3406 }
3407
3408 /* Initiates a remote wakeup */
3409 static int udc_remote_wakeup(struct udc *dev)
3410 {
3411         unsigned long flags;
3412         u32 tmp;
3413
3414         DBG(dev, "UDC initiates remote wakeup\n");
3415
3416         spin_lock_irqsave(&dev->lock, flags);
3417
3418         tmp = readl(&dev->regs->ctl);
3419         tmp |= AMD_BIT(UDC_DEVCTL_RES);
3420         writel(tmp, &dev->regs->ctl);
3421         tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
3422         writel(tmp, &dev->regs->ctl);
3423
3424         spin_unlock_irqrestore(&dev->lock, flags);
3425         return 0;
3426 }
3427
3428 /* PCI device parameters */
3429 static const struct pci_device_id pci_id[] = {
3430         {
3431                 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
3432                 .class =        (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3433                 .class_mask =   0xffffffff,
3434         },
3435         {},
3436 };
3437 MODULE_DEVICE_TABLE(pci, pci_id);
3438
3439 /* PCI functions */
3440 static struct pci_driver udc_pci_driver = {
3441         .name =         (char *) name,
3442         .id_table =     pci_id,
3443         .probe =        udc_pci_probe,
3444         .remove =       udc_pci_remove,
3445 };
3446
3447 /* Inits driver */
3448 static int __init init(void)
3449 {
3450         return pci_register_driver(&udc_pci_driver);
3451 }
3452 module_init(init);
3453
3454 /* Cleans driver */
3455 static void __exit cleanup(void)
3456 {
3457         pci_unregister_driver(&udc_pci_driver);
3458 }
3459 module_exit(cleanup);
3460
3461 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3462 MODULE_AUTHOR("Thomas Dahlmann");
3463 MODULE_LICENSE("GPL");
3464