Merge master.kernel.org:/pub/scm/linux/kernel/git/aia21/ntfs-2.6
[linux-2.6] / drivers / ieee1394 / ohci1394.c
1 /*
2  * ohci1394.c - driver for OHCI 1394 boards
3  * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4  *                        Gord Peters <GordPeters@smarttech.com>
5  *              2001      Ben Collins <bcollins@debian.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software Foundation,
19  * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20  */
21
22 /*
23  * Things known to be working:
24  * . Async Request Transmit
25  * . Async Response Receive
26  * . Async Request Receive
27  * . Async Response Transmit
28  * . Iso Receive
29  * . DMA mmap for iso receive
30  * . Config ROM generation
31  *
32  * Things implemented, but still in test phase:
33  * . Iso Transmit
34  * . Async Stream Packets Transmit (Receive done via Iso interface)
35  *
36  * Things not implemented:
37  * . DMA error recovery
38  *
39  * Known bugs:
40  * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41  *   added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42  */
43
44 /*
45  * Acknowledgments:
46  *
47  * Adam J Richter <adam@yggdrasil.com>
48  *  . Use of pci_class to find device
49  *
50  * Emilie Chung <emilie.chung@axis.com>
51  *  . Tip on Async Request Filter
52  *
53  * Pascal Drolet <pascal.drolet@informission.ca>
54  *  . Various tips for optimization and functionnalities
55  *
56  * Robert Ficklin <rficklin@westengineering.com>
57  *  . Loop in irq_handler
58  *
59  * James Goodwin <jamesg@Filanet.com>
60  *  . Various tips on initialization, self-id reception, etc.
61  *
62  * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63  *  . Apple PowerBook detection
64  *
65  * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66  *  . Reset the board properly before leaving + misc cleanups
67  *
68  * Leon van Stuivenberg <leonvs@iae.nl>
69  *  . Bug fixes
70  *
71  * Ben Collins <bcollins@debian.org>
72  *  . Working big-endian support
73  *  . Updated to 2.4.x module scheme (PCI aswell)
74  *  . Config ROM generation
75  *
76  * Manfred Weihs <weihs@ict.tuwien.ac.at>
77  *  . Reworked code for initiating bus resets
78  *    (long, short, with or without hold-off)
79  *
80  * Nandu Santhi <contactnandu@users.sourceforge.net>
81  *  . Added support for nVidia nForce2 onboard Firewire chipset
82  *
83  */
84
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
95 #include <linux/fs.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
102
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
105 #include <asm/irq.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
110
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
116 #endif
117
118 #include "csr1212.h"
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
121 #include "hosts.h"
122 #include "dma.h"
123 #include "iso.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
127
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
130 #endif
131
132 #ifdef DBGMSG
133 #undef DBGMSG
134 #endif
135
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139 #else
140 #define DBGMSG(fmt, args...)
141 #endif
142
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145         HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146                 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148         HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149                 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
151 #else
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
154 #endif
155
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
159
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163
164 static char version[] __devinitdata =
165         "$Rev: 1299 $ Ben Collins <bcollins@debian.org>";
166
167 /* Module Parameters */
168 static int phys_dma = 1;
169 module_param(phys_dma, int, 0644);
170 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
171
172 static void dma_trm_tasklet(unsigned long data);
173 static void dma_trm_reset(struct dma_trm_ctx *d);
174
175 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
176                              enum context_type type, int ctx, int num_desc,
177                              int buf_size, int split_buf_size, int context_base);
178 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
179 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
180
181 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
182                              enum context_type type, int ctx, int num_desc,
183                              int context_base);
184
185 static void ohci1394_pci_remove(struct pci_dev *pdev);
186
187 #ifndef __LITTLE_ENDIAN
188 static unsigned hdr_sizes[] =
189 {
190         3,      /* TCODE_WRITEQ */
191         4,      /* TCODE_WRITEB */
192         3,      /* TCODE_WRITE_RESPONSE */
193         0,      /* ??? */
194         3,      /* TCODE_READQ */
195         4,      /* TCODE_READB */
196         3,      /* TCODE_READQ_RESPONSE */
197         4,      /* TCODE_READB_RESPONSE */
198         1,      /* TCODE_CYCLE_START (???) */
199         4,      /* TCODE_LOCK_REQUEST */
200         2,      /* TCODE_ISO_DATA */
201         4,      /* TCODE_LOCK_RESPONSE */
202 };
203
204 /* Swap headers */
205 static inline void packet_swab(quadlet_t *data, int tcode)
206 {
207         size_t size = hdr_sizes[tcode];
208
209         if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
210                 return;
211
212         while (size--)
213                 data[size] = swab32(data[size]);
214 }
215 #else
216 /* Don't waste cycles on same sex byte swaps */
217 #define packet_swab(w,x)
218 #endif /* !LITTLE_ENDIAN */
219
220 /***********************************
221  * IEEE-1394 functionality section *
222  ***********************************/
223
224 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
225 {
226         int i;
227         unsigned long flags;
228         quadlet_t r;
229
230         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
231
232         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
233
234         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
235                 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
236                         break;
237
238                 mdelay(1);
239         }
240
241         r = reg_read(ohci, OHCI1394_PhyControl);
242
243         if (i >= OHCI_LOOP_COUNT)
244                 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
245                        r, r & 0x80000000, i);
246
247         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
248
249         return (r & 0x00ff0000) >> 16;
250 }
251
252 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
253 {
254         int i;
255         unsigned long flags;
256         u32 r = 0;
257
258         spin_lock_irqsave (&ohci->phy_reg_lock, flags);
259
260         reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
261
262         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
263                 r = reg_read(ohci, OHCI1394_PhyControl);
264                 if (!(r & 0x00004000))
265                         break;
266
267                 mdelay(1);
268         }
269
270         if (i == OHCI_LOOP_COUNT)
271                 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
272                        r, r & 0x00004000, i);
273
274         spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
275
276         return;
277 }
278
279 /* Or's our value into the current value */
280 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
281 {
282         u8 old;
283
284         old = get_phy_reg (ohci, addr);
285         old |= data;
286         set_phy_reg (ohci, addr, old);
287
288         return;
289 }
290
291 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
292                                 int phyid, int isroot)
293 {
294         quadlet_t *q = ohci->selfid_buf_cpu;
295         quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
296         size_t size;
297         quadlet_t q0, q1;
298
299         /* Check status of self-id reception */
300
301         if (ohci->selfid_swap)
302                 q0 = le32_to_cpu(q[0]);
303         else
304                 q0 = q[0];
305
306         if ((self_id_count & 0x80000000) ||
307             ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
308                 PRINT(KERN_ERR,
309                       "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
310                       self_id_count, q0, ohci->self_id_errors);
311
312                 /* Tip by James Goodwin <jamesg@Filanet.com>:
313                  * We had an error, generate another bus reset in response.  */
314                 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
315                         set_phy_reg_mask (ohci, 1, 0x40);
316                         ohci->self_id_errors++;
317                 } else {
318                         PRINT(KERN_ERR,
319                               "Too many errors on SelfID error reception, giving up!");
320                 }
321                 return;
322         }
323
324         /* SelfID Ok, reset error counter. */
325         ohci->self_id_errors = 0;
326
327         size = ((self_id_count & 0x00001FFC) >> 2) - 1;
328         q++;
329
330         while (size > 0) {
331                 if (ohci->selfid_swap) {
332                         q0 = le32_to_cpu(q[0]);
333                         q1 = le32_to_cpu(q[1]);
334                 } else {
335                         q0 = q[0];
336                         q1 = q[1];
337                 }
338
339                 if (q0 == ~q1) {
340                         DBGMSG ("SelfID packet 0x%x received", q0);
341                         hpsb_selfid_received(host, cpu_to_be32(q0));
342                         if (((q0 & 0x3f000000) >> 24) == phyid)
343                                 DBGMSG ("SelfID for this node is 0x%08x", q0);
344                 } else {
345                         PRINT(KERN_ERR,
346                               "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
347                 }
348                 q += 2;
349                 size -= 2;
350         }
351
352         DBGMSG("SelfID complete");
353
354         return;
355 }
356
357 static void ohci_soft_reset(struct ti_ohci *ohci) {
358         int i;
359
360         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
361
362         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
363                 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
364                         break;
365                 mdelay(1);
366         }
367         DBGMSG ("Soft reset finished");
368 }
369
370
371 /* Generate the dma receive prgs and start the context */
372 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
373 {
374         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
375         int i;
376
377         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
378
379         for (i=0; i<d->num_desc; i++) {
380                 u32 c;
381
382                 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
383                 if (generate_irq)
384                         c |= DMA_CTL_IRQ;
385
386                 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
387
388                 /* End of descriptor list? */
389                 if (i + 1 < d->num_desc) {
390                         d->prg_cpu[i]->branchAddress =
391                                 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
392                 } else {
393                         d->prg_cpu[i]->branchAddress =
394                                 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
395                 }
396
397                 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
398                 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
399         }
400
401         d->buf_ind = 0;
402         d->buf_offset = 0;
403
404         if (d->type == DMA_CTX_ISO) {
405                 /* Clear contextControl */
406                 reg_write(ohci, d->ctrlClear, 0xffffffff);
407
408                 /* Set bufferFill, isochHeader, multichannel for IR context */
409                 reg_write(ohci, d->ctrlSet, 0xd0000000);
410
411                 /* Set the context match register to match on all tags */
412                 reg_write(ohci, d->ctxtMatch, 0xf0000000);
413
414                 /* Clear the multi channel mask high and low registers */
415                 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
416                 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
417
418                 /* Set up isoRecvIntMask to generate interrupts */
419                 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
420         }
421
422         /* Tell the controller where the first AR program is */
423         reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
424
425         /* Run context */
426         reg_write(ohci, d->ctrlSet, 0x00008000);
427
428         DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
429 }
430
431 /* Initialize the dma transmit context */
432 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
433 {
434         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
435
436         /* Stop the context */
437         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
438
439         d->prg_ind = 0;
440         d->sent_ind = 0;
441         d->free_prgs = d->num_desc;
442         d->branchAddrPtr = NULL;
443         INIT_LIST_HEAD(&d->fifo_list);
444         INIT_LIST_HEAD(&d->pending_list);
445
446         if (d->type == DMA_CTX_ISO) {
447                 /* enable interrupts */
448                 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
449         }
450
451         DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
452 }
453
454 /* Count the number of available iso contexts */
455 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
456 {
457         int i,ctx=0;
458         u32 tmp;
459
460         reg_write(ohci, reg, 0xffffffff);
461         tmp = reg_read(ohci, reg);
462
463         DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
464
465         /* Count the number of contexts */
466         for (i=0; i<32; i++) {
467                 if (tmp & 1) ctx++;
468                 tmp >>= 1;
469         }
470         return ctx;
471 }
472
473 /* Global initialization */
474 static void ohci_initialize(struct ti_ohci *ohci)
475 {
476         char irq_buf[16];
477         quadlet_t buf;
478         int num_ports, i;
479
480         spin_lock_init(&ohci->phy_reg_lock);
481         spin_lock_init(&ohci->event_lock);
482
483         /* Put some defaults to these undefined bus options */
484         buf = reg_read(ohci, OHCI1394_BusOptions);
485         buf |=  0x60000000; /* Enable CMC and ISC */
486         if (hpsb_disable_irm)
487                 buf &= ~0x80000000;
488         else
489                 buf |=  0x80000000; /* Enable IRMC */
490         buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
491         buf &= ~0x18000000; /* Disable PMC and BMC */
492         reg_write(ohci, OHCI1394_BusOptions, buf);
493
494         /* Set the bus number */
495         reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
496
497         /* Enable posted writes */
498         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
499
500         /* Clear link control register */
501         reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
502
503         /* Enable cycle timer and cycle master and set the IRM
504          * contender bit in our self ID packets if appropriate. */
505         reg_write(ohci, OHCI1394_LinkControlSet,
506                   OHCI1394_LinkControl_CycleTimerEnable |
507                   OHCI1394_LinkControl_CycleMaster);
508         i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
509         if (hpsb_disable_irm)
510                 i &= ~PHY_04_CONTENDER;
511         else
512                 i |= PHY_04_CONTENDER;
513         set_phy_reg(ohci, 4, i);
514
515         /* Set up self-id dma buffer */
516         reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
517
518         /* enable self-id and phys */
519         reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
520                   OHCI1394_LinkControl_RcvPhyPkt);
521
522         /* Set the Config ROM mapping register */
523         reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
524
525         /* Now get our max packet size */
526         ohci->max_packet_size =
527                 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
528                 
529         /* Don't accept phy packets into AR request context */
530         reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
531
532         /* Clear the interrupt mask */
533         reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
534         reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
535
536         /* Clear the interrupt mask */
537         reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
538         reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
539
540         /* Initialize AR dma */
541         initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
542         initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
543
544         /* Initialize AT dma */
545         initialize_dma_trm_ctx(&ohci->at_req_context);
546         initialize_dma_trm_ctx(&ohci->at_resp_context);
547         
548         /* Initialize IR Legacy DMA channel mask */
549         ohci->ir_legacy_channels = 0;
550
551         /*
552          * Accept AT requests from all nodes. This probably
553          * will have to be controlled from the subsystem
554          * on a per node basis.
555          */
556         reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
557
558         /* Specify AT retries */
559         reg_write(ohci, OHCI1394_ATRetries,
560                   OHCI1394_MAX_AT_REQ_RETRIES |
561                   (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
562                   (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
563
564         /* We don't want hardware swapping */
565         reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
566
567         /* Enable interrupts */
568         reg_write(ohci, OHCI1394_IntMaskSet,
569                   OHCI1394_unrecoverableError |
570                   OHCI1394_masterIntEnable |
571                   OHCI1394_busReset |
572                   OHCI1394_selfIDComplete |
573                   OHCI1394_RSPkt |
574                   OHCI1394_RQPkt |
575                   OHCI1394_respTxComplete |
576                   OHCI1394_reqTxComplete |
577                   OHCI1394_isochRx |
578                   OHCI1394_isochTx |
579                   OHCI1394_cycleInconsistent);
580
581         /* Enable link */
582         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
583
584         buf = reg_read(ohci, OHCI1394_Version);
585 #ifndef __sparc__
586         sprintf (irq_buf, "%d", ohci->dev->irq);
587 #else
588         sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
589 #endif
590         PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s]  "
591               "MMIO=[%lx-%lx]  Max Packet=[%d]",
592               ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
593               ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
594               pci_resource_start(ohci->dev, 0),
595               pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
596               ohci->max_packet_size);
597
598         /* Check all of our ports to make sure that if anything is
599          * connected, we enable that port. */
600         num_ports = get_phy_reg(ohci, 2) & 0xf;
601         for (i = 0; i < num_ports; i++) {
602                 unsigned int status;
603
604                 set_phy_reg(ohci, 7, i);
605                 status = get_phy_reg(ohci, 8);
606
607                 if (status & 0x20)
608                         set_phy_reg(ohci, 8, status & ~1);
609         }
610
611         /* Serial EEPROM Sanity check. */
612         if ((ohci->max_packet_size < 512) ||
613             (ohci->max_packet_size > 4096)) {
614                 /* Serial EEPROM contents are suspect, set a sane max packet
615                  * size and print the raw contents for bug reports if verbose
616                  * debug is enabled. */
617 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
618                 int i;
619 #endif
620
621                 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
622                       "attempting to setting max_packet_size to 512 bytes");
623                 reg_write(ohci, OHCI1394_BusOptions,
624                           (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
625                 ohci->max_packet_size = 512;
626 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
627                 PRINT(KERN_DEBUG, "    EEPROM Present: %d",
628                       (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
629                 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
630
631                 for (i = 0;
632                      ((i < 1000) &&
633                       (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
634                         udelay(10);
635
636                 for (i = 0; i < 0x20; i++) {
637                         reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
638                         PRINT(KERN_DEBUG, "    EEPROM %02x: %02x", i,
639                               (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
640                 }
641 #endif
642         }
643 }
644
645 /*
646  * Insert a packet in the DMA fifo and generate the DMA prg
647  * FIXME: rewrite the program in order to accept packets crossing
648  *        page boundaries.
649  *        check also that a single dma descriptor doesn't cross a
650  *        page boundary.
651  */
652 static void insert_packet(struct ti_ohci *ohci,
653                           struct dma_trm_ctx *d, struct hpsb_packet *packet)
654 {
655         u32 cycleTimer;
656         int idx = d->prg_ind;
657
658         DBGMSG("Inserting packet for node " NODE_BUS_FMT
659                ", tlabel=%d, tcode=0x%x, speed=%d",
660                NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
661                packet->tcode, packet->speed_code);
662
663         d->prg_cpu[idx]->begin.address = 0;
664         d->prg_cpu[idx]->begin.branchAddress = 0;
665
666         if (d->type == DMA_CTX_ASYNC_RESP) {
667                 /*
668                  * For response packets, we need to put a timeout value in
669                  * the 16 lower bits of the status... let's try 1 sec timeout
670                  */
671                 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
672                 d->prg_cpu[idx]->begin.status = cpu_to_le32(
673                         (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
674                         ((cycleTimer&0x01fff000)>>12));
675
676                 DBGMSG("cycleTimer: %08x timeStamp: %08x",
677                        cycleTimer, d->prg_cpu[idx]->begin.status);
678         } else 
679                 d->prg_cpu[idx]->begin.status = 0;
680
681         if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
682
683                 if (packet->type == hpsb_raw) {
684                         d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
685                         d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
686                         d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
687                 } else {
688                         d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
689                                 (packet->header[0] & 0xFFFF);
690
691                         if (packet->tcode == TCODE_ISO_DATA) {
692                                 /* Sending an async stream packet */
693                                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
694                         } else {
695                                 /* Sending a normal async request or response */
696                                 d->prg_cpu[idx]->data[1] =
697                                         (packet->header[1] & 0xFFFF) |
698                                         (packet->header[0] & 0xFFFF0000);
699                                 d->prg_cpu[idx]->data[2] = packet->header[2];
700                                 d->prg_cpu[idx]->data[3] = packet->header[3];
701                         }
702                         packet_swab(d->prg_cpu[idx]->data, packet->tcode);
703                 }
704
705                 if (packet->data_size) { /* block transmit */
706                         if (packet->tcode == TCODE_STREAM_DATA){
707                                 d->prg_cpu[idx]->begin.control =
708                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
709                                                     DMA_CTL_IMMEDIATE | 0x8);
710                         } else {
711                                 d->prg_cpu[idx]->begin.control =
712                                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
713                                                     DMA_CTL_IMMEDIATE | 0x10);
714                         }
715                         d->prg_cpu[idx]->end.control =
716                                 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
717                                             DMA_CTL_IRQ |
718                                             DMA_CTL_BRANCH |
719                                             packet->data_size);
720                         /*
721                          * Check that the packet data buffer
722                          * does not cross a page boundary.
723                          *
724                          * XXX Fix this some day. eth1394 seems to trigger
725                          * it, but ignoring it doesn't seem to cause a
726                          * problem.
727                          */
728 #if 0
729                         if (cross_bound((unsigned long)packet->data,
730                                         packet->data_size)>0) {
731                                 /* FIXME: do something about it */
732                                 PRINT(KERN_ERR,
733                                       "%s: packet data addr: %p size %Zd bytes "
734                                       "cross page boundary", __FUNCTION__,
735                                       packet->data, packet->data_size);
736                         }
737 #endif
738                         d->prg_cpu[idx]->end.address = cpu_to_le32(
739                                 pci_map_single(ohci->dev, packet->data,
740                                                packet->data_size,
741                                                PCI_DMA_TODEVICE));
742                         OHCI_DMA_ALLOC("single, block transmit packet");
743
744                         d->prg_cpu[idx]->end.branchAddress = 0;
745                         d->prg_cpu[idx]->end.status = 0;
746                         if (d->branchAddrPtr)
747                                 *(d->branchAddrPtr) =
748                                         cpu_to_le32(d->prg_bus[idx] | 0x3);
749                         d->branchAddrPtr =
750                                 &(d->prg_cpu[idx]->end.branchAddress);
751                 } else { /* quadlet transmit */
752                         if (packet->type == hpsb_raw)
753                                 d->prg_cpu[idx]->begin.control =
754                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
755                                                     DMA_CTL_IMMEDIATE |
756                                                     DMA_CTL_IRQ |
757                                                     DMA_CTL_BRANCH |
758                                                     (packet->header_size + 4));
759                         else
760                                 d->prg_cpu[idx]->begin.control =
761                                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
762                                                     DMA_CTL_IMMEDIATE |
763                                                     DMA_CTL_IRQ |
764                                                     DMA_CTL_BRANCH |
765                                                     packet->header_size);
766
767                         if (d->branchAddrPtr)
768                                 *(d->branchAddrPtr) =
769                                         cpu_to_le32(d->prg_bus[idx] | 0x2);
770                         d->branchAddrPtr =
771                                 &(d->prg_cpu[idx]->begin.branchAddress);
772                 }
773
774         } else { /* iso packet */
775                 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
776                         (packet->header[0] & 0xFFFF);
777                 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
778                 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
779
780                 d->prg_cpu[idx]->begin.control =
781                         cpu_to_le32(DMA_CTL_OUTPUT_MORE |
782                                     DMA_CTL_IMMEDIATE | 0x8);
783                 d->prg_cpu[idx]->end.control =
784                         cpu_to_le32(DMA_CTL_OUTPUT_LAST |
785                                     DMA_CTL_UPDATE |
786                                     DMA_CTL_IRQ |
787                                     DMA_CTL_BRANCH |
788                                     packet->data_size);
789                 d->prg_cpu[idx]->end.address = cpu_to_le32(
790                                 pci_map_single(ohci->dev, packet->data,
791                                 packet->data_size, PCI_DMA_TODEVICE));
792                 OHCI_DMA_ALLOC("single, iso transmit packet");
793
794                 d->prg_cpu[idx]->end.branchAddress = 0;
795                 d->prg_cpu[idx]->end.status = 0;
796                 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
797                        "                       begin=%08x %08x %08x %08x\n"
798                        "                             %08x %08x %08x %08x\n"
799                        "                       end  =%08x %08x %08x %08x",
800                        d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
801                        d->prg_cpu[idx]->begin.control,
802                        d->prg_cpu[idx]->begin.address,
803                        d->prg_cpu[idx]->begin.branchAddress,
804                        d->prg_cpu[idx]->begin.status,
805                        d->prg_cpu[idx]->data[0],
806                        d->prg_cpu[idx]->data[1],
807                        d->prg_cpu[idx]->data[2],
808                        d->prg_cpu[idx]->data[3],
809                        d->prg_cpu[idx]->end.control,
810                        d->prg_cpu[idx]->end.address,
811                        d->prg_cpu[idx]->end.branchAddress,
812                        d->prg_cpu[idx]->end.status);
813                 if (d->branchAddrPtr)
814                         *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
815                 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
816         }
817         d->free_prgs--;
818
819         /* queue the packet in the appropriate context queue */
820         list_add_tail(&packet->driver_list, &d->fifo_list);
821         d->prg_ind = (d->prg_ind + 1) % d->num_desc;
822 }
823
824 /*
825  * This function fills the FIFO with the (eventual) pending packets
826  * and runs or wakes up the DMA prg if necessary.
827  *
828  * The function MUST be called with the d->lock held.
829  */
830 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
831 {
832         struct hpsb_packet *packet, *ptmp;
833         int idx = d->prg_ind;
834         int z = 0;
835
836         /* insert the packets into the dma fifo */
837         list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
838                 if (!d->free_prgs)
839                         break;
840
841                 /* For the first packet only */
842                 if (!z)
843                         z = (packet->data_size) ? 3 : 2;
844
845                 /* Insert the packet */
846                 list_del_init(&packet->driver_list);
847                 insert_packet(ohci, d, packet);
848         }
849
850         /* Nothing must have been done, either no free_prgs or no packets */
851         if (z == 0)
852                 return;
853
854         /* Is the context running ? (should be unless it is
855            the first packet to be sent in this context) */
856         if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
857                 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
858
859                 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
860                 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
861
862                 /* Check that the node id is valid, and not 63 */
863                 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
864                         PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
865                 else
866                         reg_write(ohci, d->ctrlSet, 0x8000);
867         } else {
868                 /* Wake up the dma context if necessary */
869                 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
870                         DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
871
872                 /* do this always, to avoid race condition */
873                 reg_write(ohci, d->ctrlSet, 0x1000);
874         }
875
876         return;
877 }
878
879 /* Transmission of an async or iso packet */
880 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
881 {
882         struct ti_ohci *ohci = host->hostdata;
883         struct dma_trm_ctx *d;
884         unsigned long flags;
885
886         if (packet->data_size > ohci->max_packet_size) {
887                 PRINT(KERN_ERR,
888                       "Transmit packet size %Zd is too big",
889                       packet->data_size);
890                 return -EOVERFLOW;
891         }
892
893         /* Decide whether we have an iso, a request, or a response packet */
894         if (packet->type == hpsb_raw)
895                 d = &ohci->at_req_context;
896         else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
897                 /* The legacy IT DMA context is initialized on first
898                  * use.  However, the alloc cannot be run from
899                  * interrupt context, so we bail out if that is the
900                  * case. I don't see anyone sending ISO packets from
901                  * interrupt context anyway... */
902
903                 if (ohci->it_legacy_context.ohci == NULL) {
904                         if (in_interrupt()) {
905                                 PRINT(KERN_ERR,
906                                       "legacy IT context cannot be initialized during interrupt");
907                                 return -EINVAL;
908                         }
909
910                         if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
911                                               DMA_CTX_ISO, 0, IT_NUM_DESC,
912                                               OHCI1394_IsoXmitContextBase) < 0) {
913                                 PRINT(KERN_ERR,
914                                       "error initializing legacy IT context");
915                                 return -ENOMEM;
916                         }
917
918                         initialize_dma_trm_ctx(&ohci->it_legacy_context);
919                 }
920
921                 d = &ohci->it_legacy_context;
922         } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
923                 d = &ohci->at_resp_context;
924         else
925                 d = &ohci->at_req_context;
926
927         spin_lock_irqsave(&d->lock,flags);
928
929         list_add_tail(&packet->driver_list, &d->pending_list);
930
931         dma_trm_flush(ohci, d);
932
933         spin_unlock_irqrestore(&d->lock,flags);
934
935         return 0;
936 }
937
938 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
939 {
940         struct ti_ohci *ohci = host->hostdata;
941         int retval = 0;
942         unsigned long flags;
943         int phy_reg;
944
945         switch (cmd) {
946         case RESET_BUS:
947                 switch (arg) {
948                 case SHORT_RESET:
949                         phy_reg = get_phy_reg(ohci, 5);
950                         phy_reg |= 0x40;
951                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
952                         break;
953                 case LONG_RESET:
954                         phy_reg = get_phy_reg(ohci, 1);
955                         phy_reg |= 0x40;
956                         set_phy_reg(ohci, 1, phy_reg); /* set IBR */
957                         break;
958                 case SHORT_RESET_NO_FORCE_ROOT:
959                         phy_reg = get_phy_reg(ohci, 1);
960                         if (phy_reg & 0x80) {
961                                 phy_reg &= ~0x80;
962                                 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
963                         }
964
965                         phy_reg = get_phy_reg(ohci, 5);
966                         phy_reg |= 0x40;
967                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
968                         break;
969                 case LONG_RESET_NO_FORCE_ROOT:
970                         phy_reg = get_phy_reg(ohci, 1);
971                         phy_reg &= ~0x80;
972                         phy_reg |= 0x40;
973                         set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
974                         break;
975                 case SHORT_RESET_FORCE_ROOT:
976                         phy_reg = get_phy_reg(ohci, 1);
977                         if (!(phy_reg & 0x80)) {
978                                 phy_reg |= 0x80;
979                                 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
980                         }
981
982                         phy_reg = get_phy_reg(ohci, 5);
983                         phy_reg |= 0x40;
984                         set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
985                         break;
986                 case LONG_RESET_FORCE_ROOT:
987                         phy_reg = get_phy_reg(ohci, 1);
988                         phy_reg |= 0xc0;
989                         set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
990                         break;
991                 default:
992                         retval = -1;
993                 }
994                 break;
995
996         case GET_CYCLE_COUNTER:
997                 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
998                 break;
999
1000         case SET_CYCLE_COUNTER:
1001                 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
1002                 break;
1003
1004         case SET_BUS_ID:
1005                 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1006                 break;
1007
1008         case ACT_CYCLE_MASTER:
1009                 if (arg) {
1010                         /* check if we are root and other nodes are present */
1011                         u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1012                         if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1013                                 /*
1014                                  * enable cycleTimer, cycleMaster
1015                                  */
1016                                 DBGMSG("Cycle master enabled");
1017                                 reg_write(ohci, OHCI1394_LinkControlSet,
1018                                           OHCI1394_LinkControl_CycleTimerEnable |
1019                                           OHCI1394_LinkControl_CycleMaster);
1020                         }
1021                 } else {
1022                         /* disable cycleTimer, cycleMaster, cycleSource */
1023                         reg_write(ohci, OHCI1394_LinkControlClear,
1024                                   OHCI1394_LinkControl_CycleTimerEnable |
1025                                   OHCI1394_LinkControl_CycleMaster |
1026                                   OHCI1394_LinkControl_CycleSource);
1027                 }
1028                 break;
1029
1030         case CANCEL_REQUESTS:
1031                 DBGMSG("Cancel request received");
1032                 dma_trm_reset(&ohci->at_req_context);
1033                 dma_trm_reset(&ohci->at_resp_context);
1034                 break;
1035
1036         case ISO_LISTEN_CHANNEL:
1037         {
1038                 u64 mask;
1039                 struct dma_rcv_ctx *d = &ohci->ir_legacy_context;
1040                 int ir_legacy_active;
1041
1042                 if (arg<0 || arg>63) {
1043                         PRINT(KERN_ERR,
1044                               "%s: IS0 listen channel %d is out of range",
1045                               __FUNCTION__, arg);
1046                         return -EFAULT;
1047                 }
1048
1049                 mask = (u64)0x1<<arg;
1050
1051                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1052
1053                 if (ohci->ISO_channel_usage & mask) {
1054                         PRINT(KERN_ERR,
1055                               "%s: IS0 listen channel %d is already used",
1056                               __FUNCTION__, arg);
1057                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1058                         return -EFAULT;
1059                 }
1060
1061                 ir_legacy_active = ohci->ir_legacy_channels;
1062
1063                 ohci->ISO_channel_usage |= mask;
1064                 ohci->ir_legacy_channels |= mask;
1065
1066                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1067
1068                 if (!ir_legacy_active) {
1069                         if (ohci1394_register_iso_tasklet(ohci,
1070                                           &ohci->ir_legacy_tasklet) < 0) {
1071                                 PRINT(KERN_ERR, "No IR DMA context available");
1072                                 return -EBUSY;
1073                         }
1074
1075                         /* the IR context can be assigned to any DMA context
1076                          * by ohci1394_register_iso_tasklet */
1077                         d->ctx = ohci->ir_legacy_tasklet.context;
1078                         d->ctrlSet = OHCI1394_IsoRcvContextControlSet +
1079                                 32*d->ctx;
1080                         d->ctrlClear = OHCI1394_IsoRcvContextControlClear +
1081                                 32*d->ctx;
1082                         d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
1083                         d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
1084
1085                         initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
1086
1087                         if (printk_ratelimit())
1088                                 PRINT(KERN_ERR, "IR legacy activated");
1089                 }
1090
1091                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1092
1093                 if (arg>31)
1094                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1095                                   1<<(arg-32));
1096                 else
1097                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1098                                   1<<arg);
1099
1100                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1101                 DBGMSG("Listening enabled on channel %d", arg);
1102                 break;
1103         }
1104         case ISO_UNLISTEN_CHANNEL:
1105         {
1106                 u64 mask;
1107
1108                 if (arg<0 || arg>63) {
1109                         PRINT(KERN_ERR,
1110                               "%s: IS0 unlisten channel %d is out of range",
1111                               __FUNCTION__, arg);
1112                         return -EFAULT;
1113                 }
1114
1115                 mask = (u64)0x1<<arg;
1116
1117                 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1118
1119                 if (!(ohci->ISO_channel_usage & mask)) {
1120                         PRINT(KERN_ERR,
1121                               "%s: IS0 unlisten channel %d is not used",
1122                               __FUNCTION__, arg);
1123                         spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1124                         return -EFAULT;
1125                 }
1126
1127                 ohci->ISO_channel_usage &= ~mask;
1128                 ohci->ir_legacy_channels &= ~mask;
1129
1130                 if (arg>31)
1131                         reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1132                                   1<<(arg-32));
1133                 else
1134                         reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1135                                   1<<arg);
1136
1137                 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1138                 DBGMSG("Listening disabled on channel %d", arg);
1139
1140                 if (ohci->ir_legacy_channels == 0) {
1141                         stop_dma_rcv_ctx(&ohci->ir_legacy_context);
1142                         DBGMSG("ISO legacy receive context stopped");
1143                 }
1144
1145                 break;
1146         }
1147         default:
1148                 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1149                         cmd);
1150                 break;
1151         }
1152         return retval;
1153 }
1154
1155 /***********************************
1156  * rawiso ISO reception            *
1157  ***********************************/
1158
1159 /*
1160   We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1161   buffer is split into "blocks" (regions described by one DMA
1162   descriptor). Each block must be one page or less in size, and
1163   must not cross a page boundary.
1164
1165   There is one little wrinkle with buffer-fill mode: a packet that
1166   starts in the final block may wrap around into the first block. But
1167   the user API expects all packets to be contiguous. Our solution is
1168   to keep the very last page of the DMA buffer in reserve - if a
1169   packet spans the gap, we copy its tail into this page.
1170 */
1171
1172 struct ohci_iso_recv {
1173         struct ti_ohci *ohci;
1174
1175         struct ohci1394_iso_tasklet task;
1176         int task_active;
1177
1178         enum { BUFFER_FILL_MODE = 0,
1179                PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1180
1181         /* memory and PCI mapping for the DMA descriptors */
1182         struct dma_prog_region prog;
1183         struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1184
1185         /* how many DMA blocks fit in the buffer */
1186         unsigned int nblocks;
1187
1188         /* stride of DMA blocks */
1189         unsigned int buf_stride;
1190
1191         /* number of blocks to batch between interrupts */
1192         int block_irq_interval;
1193
1194         /* block that DMA will finish next */
1195         int block_dma;
1196
1197         /* (buffer-fill only) block that the reader will release next */
1198         int block_reader;
1199
1200         /* (buffer-fill only) bytes of buffer the reader has released,
1201            less than one block */
1202         int released_bytes;
1203
1204         /* (buffer-fill only) buffer offset at which the next packet will appear */
1205         int dma_offset;
1206
1207         /* OHCI DMA context control registers */
1208         u32 ContextControlSet;
1209         u32 ContextControlClear;
1210         u32 CommandPtr;
1211         u32 ContextMatch;
1212 };
1213
1214 static void ohci_iso_recv_task(unsigned long data);
1215 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1216 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1217 static int  ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1218 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1219
1220 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1221 {
1222         struct ti_ohci *ohci = iso->host->hostdata;
1223         struct ohci_iso_recv *recv;
1224         int ctx;
1225         int ret = -ENOMEM;
1226
1227         recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1228         if (!recv)
1229                 return -ENOMEM;
1230
1231         iso->hostdata = recv;
1232         recv->ohci = ohci;
1233         recv->task_active = 0;
1234         dma_prog_region_init(&recv->prog);
1235         recv->block = NULL;
1236
1237         /* use buffer-fill mode, unless irq_interval is 1
1238            (note: multichannel requires buffer-fill) */
1239
1240         if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1241              iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1242                 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1243         } else {
1244                 recv->dma_mode = BUFFER_FILL_MODE;
1245         }
1246
1247         /* set nblocks, buf_stride, block_irq_interval */
1248
1249         if (recv->dma_mode == BUFFER_FILL_MODE) {
1250                 recv->buf_stride = PAGE_SIZE;
1251
1252                 /* one block per page of data in the DMA buffer, minus the final guard page */
1253                 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1254                 if (recv->nblocks < 3) {
1255                         DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1256                         goto err;
1257                 }
1258
1259                 /* iso->irq_interval is in packets - translate that to blocks */
1260                 if (iso->irq_interval == 1)
1261                         recv->block_irq_interval = 1;
1262                 else
1263                         recv->block_irq_interval = iso->irq_interval *
1264                                                         ((recv->nblocks+1)/iso->buf_packets);
1265                 if (recv->block_irq_interval*4 > recv->nblocks)
1266                         recv->block_irq_interval = recv->nblocks/4;
1267                 if (recv->block_irq_interval < 1)
1268                         recv->block_irq_interval = 1;
1269
1270         } else {
1271                 int max_packet_size;
1272
1273                 recv->nblocks = iso->buf_packets;
1274                 recv->block_irq_interval = iso->irq_interval;
1275                 if (recv->block_irq_interval * 4 > iso->buf_packets)
1276                         recv->block_irq_interval = iso->buf_packets / 4;
1277                 if (recv->block_irq_interval < 1)
1278                 recv->block_irq_interval = 1;
1279
1280                 /* choose a buffer stride */
1281                 /* must be a power of 2, and <= PAGE_SIZE */
1282
1283                 max_packet_size = iso->buf_size / iso->buf_packets;
1284
1285                 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1286                     recv->buf_stride *= 2);
1287
1288                 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1289                    recv->buf_stride > PAGE_SIZE) {
1290                         /* this shouldn't happen, but anyway... */
1291                         DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1292                         goto err;
1293                 }
1294         }
1295
1296         recv->block_reader = 0;
1297         recv->released_bytes = 0;
1298         recv->block_dma = 0;
1299         recv->dma_offset = 0;
1300
1301         /* size of DMA program = one descriptor per block */
1302         if (dma_prog_region_alloc(&recv->prog,
1303                                  sizeof(struct dma_cmd) * recv->nblocks,
1304                                  recv->ohci->dev))
1305                 goto err;
1306
1307         recv->block = (struct dma_cmd*) recv->prog.kvirt;
1308
1309         ohci1394_init_iso_tasklet(&recv->task,
1310                                   iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1311                                                        OHCI_ISO_RECEIVE,
1312                                   ohci_iso_recv_task, (unsigned long) iso);
1313
1314         if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1315                 ret = -EBUSY;
1316                 goto err;
1317         }
1318
1319         recv->task_active = 1;
1320
1321         /* recv context registers are spaced 32 bytes apart */
1322         ctx = recv->task.context;
1323         recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1324         recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1325         recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1326         recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1327
1328         if (iso->channel == -1) {
1329                 /* clear multi-channel selection mask */
1330                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1331                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1332         }
1333
1334         /* write the DMA program */
1335         ohci_iso_recv_program(iso);
1336
1337         DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1338                " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1339                recv->dma_mode == BUFFER_FILL_MODE ?
1340                "buffer-fill" : "packet-per-buffer",
1341                iso->buf_size/PAGE_SIZE, iso->buf_size,
1342                recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1343
1344         return 0;
1345
1346 err:
1347         ohci_iso_recv_shutdown(iso);
1348         return ret;
1349 }
1350
1351 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1352 {
1353         struct ohci_iso_recv *recv = iso->hostdata;
1354
1355         /* disable interrupts */
1356         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1357
1358         /* halt DMA */
1359         ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1360 }
1361
1362 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1363 {
1364         struct ohci_iso_recv *recv = iso->hostdata;
1365
1366         if (recv->task_active) {
1367                 ohci_iso_recv_stop(iso);
1368                 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1369                 recv->task_active = 0;
1370         }
1371
1372         dma_prog_region_free(&recv->prog);
1373         kfree(recv);
1374         iso->hostdata = NULL;
1375 }
1376
1377 /* set up a "gapped" ring buffer DMA program */
1378 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1379 {
1380         struct ohci_iso_recv *recv = iso->hostdata;
1381         int blk;
1382
1383         /* address of 'branch' field in previous DMA descriptor */
1384         u32 *prev_branch = NULL;
1385
1386         for (blk = 0; blk < recv->nblocks; blk++) {
1387                 u32 control;
1388
1389                 /* the DMA descriptor */
1390                 struct dma_cmd *cmd = &recv->block[blk];
1391
1392                 /* offset of the DMA descriptor relative to the DMA prog buffer */
1393                 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1394
1395                 /* offset of this packet's data within the DMA buffer */
1396                 unsigned long buf_offset = blk * recv->buf_stride;
1397
1398                 if (recv->dma_mode == BUFFER_FILL_MODE) {
1399                         control = 2 << 28; /* INPUT_MORE */
1400                 } else {
1401                         control = 3 << 28; /* INPUT_LAST */
1402                 }
1403
1404                 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1405
1406                 /* interrupt on last block, and at intervals */
1407                 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1408                         control |= 3 << 20; /* want interrupt */
1409                 }
1410
1411                 control |= 3 << 18; /* enable branch to address */
1412                 control |= recv->buf_stride;
1413
1414                 cmd->control = cpu_to_le32(control);
1415                 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1416                 cmd->branchAddress = 0; /* filled in on next loop */
1417                 cmd->status = cpu_to_le32(recv->buf_stride);
1418
1419                 /* link the previous descriptor to this one */
1420                 if (prev_branch) {
1421                         *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1422                 }
1423
1424                 prev_branch = &cmd->branchAddress;
1425         }
1426
1427         /* the final descriptor's branch address and Z should be left at 0 */
1428 }
1429
1430 /* listen or unlisten to a specific channel (multi-channel mode only) */
1431 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1432 {
1433         struct ohci_iso_recv *recv = iso->hostdata;
1434         int reg, i;
1435
1436         if (channel < 32) {
1437                 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1438                 i = channel;
1439         } else {
1440                 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1441                 i = channel - 32;
1442         }
1443
1444         reg_write(recv->ohci, reg, (1 << i));
1445
1446         /* issue a dummy read to force all PCI writes to be posted immediately */
1447         mb();
1448         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1449 }
1450
1451 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1452 {
1453         struct ohci_iso_recv *recv = iso->hostdata;
1454         int i;
1455
1456         for (i = 0; i < 64; i++) {
1457                 if (mask & (1ULL << i)) {
1458                         if (i < 32)
1459                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1460                         else
1461                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1462                 } else {
1463                         if (i < 32)
1464                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1465                         else
1466                                 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1467                 }
1468         }
1469
1470         /* issue a dummy read to force all PCI writes to be posted immediately */
1471         mb();
1472         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1473 }
1474
1475 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1476 {
1477         struct ohci_iso_recv *recv = iso->hostdata;
1478         struct ti_ohci *ohci = recv->ohci;
1479         u32 command, contextMatch;
1480
1481         reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1482         wmb();
1483
1484         /* always keep ISO headers */
1485         command = (1 << 30);
1486
1487         if (recv->dma_mode == BUFFER_FILL_MODE)
1488                 command |= (1 << 31);
1489
1490         reg_write(recv->ohci, recv->ContextControlSet, command);
1491
1492         /* match on specified tags */
1493         contextMatch = tag_mask << 28;
1494
1495         if (iso->channel == -1) {
1496                 /* enable multichannel reception */
1497                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1498         } else {
1499                 /* listen on channel */
1500                 contextMatch |= iso->channel;
1501         }
1502
1503         if (cycle != -1) {
1504                 u32 seconds;
1505
1506                 /* enable cycleMatch */
1507                 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1508
1509                 /* set starting cycle */
1510                 cycle &= 0x1FFF;
1511
1512                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1513                    just snarf them from the current time */
1514                 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1515
1516                 /* advance one second to give some extra time for DMA to start */
1517                 seconds += 1;
1518
1519                 cycle |= (seconds & 3) << 13;
1520
1521                 contextMatch |= cycle << 12;
1522         }
1523
1524         if (sync != -1) {
1525                 /* set sync flag on first DMA descriptor */
1526                 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1527                 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1528
1529                 /* match sync field */
1530                 contextMatch |= (sync&0xf)<<8;
1531         }
1532
1533         reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1534
1535         /* address of first descriptor block */
1536         command = dma_prog_region_offset_to_bus(&recv->prog,
1537                                                 recv->block_dma * sizeof(struct dma_cmd));
1538         command |= 1; /* Z=1 */
1539
1540         reg_write(recv->ohci, recv->CommandPtr, command);
1541
1542         /* enable interrupts */
1543         reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1544
1545         wmb();
1546
1547         /* run */
1548         reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1549
1550         /* issue a dummy read of the cycle timer register to force
1551            all PCI writes to be posted immediately */
1552         mb();
1553         reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1554
1555         /* check RUN */
1556         if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1557                 PRINT(KERN_ERR,
1558                       "Error starting IR DMA (ContextControl 0x%08x)\n",
1559                       reg_read(recv->ohci, recv->ContextControlSet));
1560                 return -1;
1561         }
1562
1563         return 0;
1564 }
1565
1566 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1567 {
1568         /* re-use the DMA descriptor for the block */
1569         /* by linking the previous descriptor to it */
1570
1571         int next_i = block;
1572         int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1573
1574         struct dma_cmd *next = &recv->block[next_i];
1575         struct dma_cmd *prev = &recv->block[prev_i];
1576         
1577         /* ignore out-of-range requests */
1578         if ((block < 0) || (block > recv->nblocks))
1579                 return;
1580
1581         /* 'next' becomes the new end of the DMA chain,
1582            so disable branch and enable interrupt */
1583         next->branchAddress = 0;
1584         next->control |= cpu_to_le32(3 << 20);
1585         next->status = cpu_to_le32(recv->buf_stride);
1586
1587         /* link prev to next */
1588         prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1589                                                                         sizeof(struct dma_cmd) * next_i)
1590                                           | 1); /* Z=1 */
1591
1592         /* disable interrupt on previous DMA descriptor, except at intervals */
1593         if ((prev_i % recv->block_irq_interval) == 0) {
1594                 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1595         } else {
1596                 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1597         }
1598         wmb();
1599
1600         /* wake up DMA in case it fell asleep */
1601         reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1602 }
1603
1604 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1605                                              struct hpsb_iso_packet_info *info)
1606 {
1607         /* release the memory where the packet was */
1608         recv->released_bytes += info->total_len;
1609
1610         /* have we released enough memory for one block? */
1611         while (recv->released_bytes > recv->buf_stride) {
1612                 ohci_iso_recv_release_block(recv, recv->block_reader);
1613                 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1614                 recv->released_bytes -= recv->buf_stride;
1615         }
1616 }
1617
1618 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1619 {
1620         struct ohci_iso_recv *recv = iso->hostdata;
1621         if (recv->dma_mode == BUFFER_FILL_MODE) {
1622                 ohci_iso_recv_bufferfill_release(recv, info);
1623         } else {
1624                 ohci_iso_recv_release_block(recv, info - iso->infos);
1625         }
1626 }
1627
1628 /* parse all packets from blocks that have been fully received */
1629 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1630 {
1631         int wake = 0;
1632         int runaway = 0;
1633         struct ti_ohci *ohci = recv->ohci;
1634
1635         while (1) {
1636                 /* we expect the next parsable packet to begin at recv->dma_offset */
1637                 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1638
1639                 unsigned int offset;
1640                 unsigned short len, cycle, total_len;
1641                 unsigned char channel, tag, sy;
1642
1643                 unsigned char *p = iso->data_buf.kvirt;
1644
1645                 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1646
1647                 /* don't loop indefinitely */
1648                 if (runaway++ > 100000) {
1649                         atomic_inc(&iso->overflows);
1650                         PRINT(KERN_ERR,
1651                               "IR DMA error - Runaway during buffer parsing!\n");
1652                         break;
1653                 }
1654
1655                 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1656                 if (this_block == recv->block_dma)
1657                         break;
1658
1659                 wake = 1;
1660
1661                 /* parse data length, tag, channel, and sy */
1662
1663                 /* note: we keep our own local copies of 'len' and 'offset'
1664                    so the user can't mess with them by poking in the mmap area */
1665
1666                 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1667
1668                 if (len > 4096) {
1669                         PRINT(KERN_ERR,
1670                               "IR DMA error - bogus 'len' value %u\n", len);
1671                 }
1672
1673                 channel = p[recv->dma_offset+1] & 0x3F;
1674                 tag = p[recv->dma_offset+1] >> 6;
1675                 sy = p[recv->dma_offset+0] & 0xF;
1676
1677                 /* advance to data payload */
1678                 recv->dma_offset += 4;
1679
1680                 /* check for wrap-around */
1681                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1682                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1683                 }
1684
1685                 /* dma_offset now points to the first byte of the data payload */
1686                 offset = recv->dma_offset;
1687
1688                 /* advance to xferStatus/timeStamp */
1689                 recv->dma_offset += len;
1690
1691                 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1692                 /* payload is padded to 4 bytes */
1693                 if (len % 4) {
1694                         recv->dma_offset += 4 - (len%4);
1695                         total_len += 4 - (len%4);
1696                 }
1697
1698                 /* check for wrap-around */
1699                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1700                         /* uh oh, the packet data wraps from the last
1701                            to the first DMA block - make the packet
1702                            contiguous by copying its "tail" into the
1703                            guard page */
1704
1705                         int guard_off = recv->buf_stride*recv->nblocks;
1706                         int tail_len = len - (guard_off - offset);
1707
1708                         if (tail_len > 0  && tail_len < recv->buf_stride) {
1709                                 memcpy(iso->data_buf.kvirt + guard_off,
1710                                        iso->data_buf.kvirt,
1711                                        tail_len);
1712                         }
1713
1714                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1715                 }
1716
1717                 /* parse timestamp */
1718                 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1719                 cycle &= 0x1FFF;
1720
1721                 /* advance to next packet */
1722                 recv->dma_offset += 4;
1723
1724                 /* check for wrap-around */
1725                 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1726                         recv->dma_offset -= recv->buf_stride*recv->nblocks;
1727                 }
1728
1729                 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1730         }
1731
1732         if (wake)
1733                 hpsb_iso_wake(iso);
1734 }
1735
1736 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1737 {
1738         int loop;
1739         struct ti_ohci *ohci = recv->ohci;
1740
1741         /* loop over all blocks */
1742         for (loop = 0; loop < recv->nblocks; loop++) {
1743
1744                 /* check block_dma to see if it's done */
1745                 struct dma_cmd *im = &recv->block[recv->block_dma];
1746
1747                 /* check the DMA descriptor for new writes to xferStatus */
1748                 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1749
1750                 /* rescount is the number of bytes *remaining to be written* in the block */
1751                 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1752
1753                 unsigned char event = xferstatus & 0x1F;
1754
1755                 if (!event) {
1756                         /* nothing has happened to this block yet */
1757                         break;
1758                 }
1759
1760                 if (event != 0x11) {
1761                         atomic_inc(&iso->overflows);
1762                         PRINT(KERN_ERR,
1763                               "IR DMA error - OHCI error code 0x%02x\n", event);
1764                 }
1765
1766                 if (rescount != 0) {
1767                         /* the card is still writing to this block;
1768                            we can't touch it until it's done */
1769                         break;
1770                 }
1771
1772                 /* OK, the block is finished... */
1773
1774                 /* sync our view of the block */
1775                 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1776
1777                 /* reset the DMA descriptor */
1778                 im->status = recv->buf_stride;
1779
1780                 /* advance block_dma */
1781                 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1782
1783                 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1784                         atomic_inc(&iso->overflows);
1785                         DBGMSG("ISO reception overflow - "
1786                                "ran out of DMA blocks");
1787                 }
1788         }
1789
1790         /* parse any packets that have arrived */
1791         ohci_iso_recv_bufferfill_parse(iso, recv);
1792 }
1793
1794 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1795 {
1796         int count;
1797         int wake = 0;
1798         struct ti_ohci *ohci = recv->ohci;
1799
1800         /* loop over the entire buffer */
1801         for (count = 0; count < recv->nblocks; count++) {
1802                 u32 packet_len = 0;
1803
1804                 /* pointer to the DMA descriptor */
1805                 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1806
1807                 /* check the DMA descriptor for new writes to xferStatus */
1808                 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1809                 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1810
1811                 unsigned char event = xferstatus & 0x1F;
1812
1813                 if (!event) {
1814                         /* this packet hasn't come in yet; we are done for now */
1815                         goto out;
1816                 }
1817
1818                 if (event == 0x11) {
1819                         /* packet received successfully! */
1820
1821                         /* rescount is the number of bytes *remaining* in the packet buffer,
1822                            after the packet was written */
1823                         packet_len = recv->buf_stride - rescount;
1824
1825                 } else if (event == 0x02) {
1826                         PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1827                 } else if (event) {
1828                         PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1829                 }
1830
1831                 /* sync our view of the buffer */
1832                 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1833
1834                 /* record the per-packet info */
1835                 {
1836                         /* iso header is 8 bytes ahead of the data payload */
1837                         unsigned char *hdr;
1838
1839                         unsigned int offset;
1840                         unsigned short cycle;
1841                         unsigned char channel, tag, sy;
1842
1843                         offset = iso->pkt_dma * recv->buf_stride;
1844                         hdr = iso->data_buf.kvirt + offset;
1845
1846                         /* skip iso header */
1847                         offset += 8;
1848                         packet_len -= 8;
1849
1850                         cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1851                         channel = hdr[5] & 0x3F;
1852                         tag = hdr[5] >> 6;
1853                         sy = hdr[4] & 0xF;
1854
1855                         hpsb_iso_packet_received(iso, offset, packet_len,
1856                                         recv->buf_stride, cycle, channel, tag, sy);
1857                 }
1858
1859                 /* reset the DMA descriptor */
1860                 il->status = recv->buf_stride;
1861
1862                 wake = 1;
1863                 recv->block_dma = iso->pkt_dma;
1864         }
1865
1866 out:
1867         if (wake)
1868                 hpsb_iso_wake(iso);
1869 }
1870
1871 static void ohci_iso_recv_task(unsigned long data)
1872 {
1873         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1874         struct ohci_iso_recv *recv = iso->hostdata;
1875
1876         if (recv->dma_mode == BUFFER_FILL_MODE)
1877                 ohci_iso_recv_bufferfill_task(iso, recv);
1878         else
1879                 ohci_iso_recv_packetperbuf_task(iso, recv);
1880 }
1881
1882 /***********************************
1883  * rawiso ISO transmission         *
1884  ***********************************/
1885
1886 struct ohci_iso_xmit {
1887         struct ti_ohci *ohci;
1888         struct dma_prog_region prog;
1889         struct ohci1394_iso_tasklet task;
1890         int task_active;
1891
1892         u32 ContextControlSet;
1893         u32 ContextControlClear;
1894         u32 CommandPtr;
1895 };
1896
1897 /* transmission DMA program:
1898    one OUTPUT_MORE_IMMEDIATE for the IT header
1899    one OUTPUT_LAST for the buffer data */
1900
1901 struct iso_xmit_cmd {
1902         struct dma_cmd output_more_immediate;
1903         u8 iso_hdr[8];
1904         u32 unused[2];
1905         struct dma_cmd output_last;
1906 };
1907
1908 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1909 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1910 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1911 static void ohci_iso_xmit_task(unsigned long data);
1912
1913 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1914 {
1915         struct ohci_iso_xmit *xmit;
1916         unsigned int prog_size;
1917         int ctx;
1918         int ret = -ENOMEM;
1919
1920         xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1921         if (!xmit)
1922                 return -ENOMEM;
1923
1924         iso->hostdata = xmit;
1925         xmit->ohci = iso->host->hostdata;
1926         xmit->task_active = 0;
1927
1928         dma_prog_region_init(&xmit->prog);
1929
1930         prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1931
1932         if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1933                 goto err;
1934
1935         ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1936                                   ohci_iso_xmit_task, (unsigned long) iso);
1937
1938         if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1939                 ret = -EBUSY;
1940                 goto err;
1941         }
1942
1943         xmit->task_active = 1;
1944
1945         /* xmit context registers are spaced 16 bytes apart */
1946         ctx = xmit->task.context;
1947         xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1948         xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1949         xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1950
1951         return 0;
1952
1953 err:
1954         ohci_iso_xmit_shutdown(iso);
1955         return ret;
1956 }
1957
1958 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1959 {
1960         struct ohci_iso_xmit *xmit = iso->hostdata;
1961         struct ti_ohci *ohci = xmit->ohci;
1962
1963         /* disable interrupts */
1964         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1965
1966         /* halt DMA */
1967         if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1968                 /* XXX the DMA context will lock up if you try to send too much data! */
1969                 PRINT(KERN_ERR,
1970                       "you probably exceeded the OHCI card's bandwidth limit - "
1971                       "reload the module and reduce xmit bandwidth");
1972         }
1973 }
1974
1975 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1976 {
1977         struct ohci_iso_xmit *xmit = iso->hostdata;
1978
1979         if (xmit->task_active) {
1980                 ohci_iso_xmit_stop(iso);
1981                 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1982                 xmit->task_active = 0;
1983         }
1984
1985         dma_prog_region_free(&xmit->prog);
1986         kfree(xmit);
1987         iso->hostdata = NULL;
1988 }
1989
1990 static void ohci_iso_xmit_task(unsigned long data)
1991 {
1992         struct hpsb_iso *iso = (struct hpsb_iso*) data;
1993         struct ohci_iso_xmit *xmit = iso->hostdata;
1994         struct ti_ohci *ohci = xmit->ohci;
1995         int wake = 0;
1996         int count;
1997
1998         /* check the whole buffer if necessary, starting at pkt_dma */
1999         for (count = 0; count < iso->buf_packets; count++) {
2000                 int cycle;
2001
2002                 /* DMA descriptor */
2003                 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
2004
2005                 /* check for new writes to xferStatus */
2006                 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
2007                 u8  event = xferstatus & 0x1F;
2008
2009                 if (!event) {
2010                         /* packet hasn't been sent yet; we are done for now */
2011                         break;
2012                 }
2013
2014                 if (event != 0x11)
2015                         PRINT(KERN_ERR,
2016                               "IT DMA error - OHCI error code 0x%02x\n", event);
2017
2018                 /* at least one packet went out, so wake up the writer */
2019                 wake = 1;
2020
2021                 /* parse cycle */
2022                 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
2023
2024                 /* tell the subsystem the packet has gone out */
2025                 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
2026
2027                 /* reset the DMA descriptor for next time */
2028                 cmd->output_last.status = 0;
2029         }
2030
2031         if (wake)
2032                 hpsb_iso_wake(iso);
2033 }
2034
2035 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
2036 {
2037         struct ohci_iso_xmit *xmit = iso->hostdata;
2038         struct ti_ohci *ohci = xmit->ohci;
2039
2040         int next_i, prev_i;
2041         struct iso_xmit_cmd *next, *prev;
2042
2043         unsigned int offset;
2044         unsigned short len;
2045         unsigned char tag, sy;
2046
2047         /* check that the packet doesn't cross a page boundary
2048            (we could allow this if we added OUTPUT_MORE descriptor support) */
2049         if (cross_bound(info->offset, info->len)) {
2050                 PRINT(KERN_ERR,
2051                       "rawiso xmit: packet %u crosses a page boundary",
2052                       iso->first_packet);
2053                 return -EINVAL;
2054         }
2055
2056         offset = info->offset;
2057         len = info->len;
2058         tag = info->tag;
2059         sy = info->sy;
2060
2061         /* sync up the card's view of the buffer */
2062         dma_region_sync_for_device(&iso->data_buf, offset, len);
2063
2064         /* append first_packet to the DMA chain */
2065         /* by linking the previous descriptor to it */
2066         /* (next will become the new end of the DMA chain) */
2067
2068         next_i = iso->first_packet;
2069         prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2070
2071         next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2072         prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2073
2074         /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2075         memset(next, 0, sizeof(struct iso_xmit_cmd));
2076         next->output_more_immediate.control = cpu_to_le32(0x02000008);
2077
2078         /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2079
2080         /* tcode = 0xA, and sy */
2081         next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2082
2083         /* tag and channel number */
2084         next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2085
2086         /* transmission speed */
2087         next->iso_hdr[2] = iso->speed & 0x7;
2088
2089         /* payload size */
2090         next->iso_hdr[6] = len & 0xFF;
2091         next->iso_hdr[7] = len >> 8;
2092
2093         /* set up the OUTPUT_LAST */
2094         next->output_last.control = cpu_to_le32(1 << 28);
2095         next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2096         next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2097         next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2098         next->output_last.control |= cpu_to_le32(len);
2099
2100         /* payload bus address */
2101         next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2102
2103         /* leave branchAddress at zero for now */
2104
2105         /* re-write the previous DMA descriptor to chain to this one */
2106
2107         /* set prev branch address to point to next (Z=3) */
2108         prev->output_last.branchAddress = cpu_to_le32(
2109                 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2110
2111         /* disable interrupt, unless required by the IRQ interval */
2112         if (prev_i % iso->irq_interval) {
2113                 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2114         } else {
2115                 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2116         }
2117
2118         wmb();
2119
2120         /* wake DMA in case it is sleeping */
2121         reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2122
2123         /* issue a dummy read of the cycle timer to force all PCI
2124            writes to be posted immediately */
2125         mb();
2126         reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2127
2128         return 0;
2129 }
2130
2131 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2132 {
2133         struct ohci_iso_xmit *xmit = iso->hostdata;
2134         struct ti_ohci *ohci = xmit->ohci;
2135
2136         /* clear out the control register */
2137         reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2138         wmb();
2139
2140         /* address and length of first descriptor block (Z=3) */
2141         reg_write(xmit->ohci, xmit->CommandPtr,
2142                   dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2143
2144         /* cycle match */
2145         if (cycle != -1) {
2146                 u32 start = cycle & 0x1FFF;
2147
2148                 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2149                    just snarf them from the current time */
2150                 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2151
2152                 /* advance one second to give some extra time for DMA to start */
2153                 seconds += 1;
2154
2155                 start |= (seconds & 3) << 13;
2156
2157                 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2158         }
2159
2160         /* enable interrupts */
2161         reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2162
2163         /* run */
2164         reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2165         mb();
2166
2167         /* wait 100 usec to give the card time to go active */
2168         udelay(100);
2169
2170         /* check the RUN bit */
2171         if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2172                 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2173                       reg_read(xmit->ohci, xmit->ContextControlSet));
2174                 return -1;
2175         }
2176
2177         return 0;
2178 }
2179
2180 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2181 {
2182
2183         switch(cmd) {
2184         case XMIT_INIT:
2185                 return ohci_iso_xmit_init(iso);
2186         case XMIT_START:
2187                 return ohci_iso_xmit_start(iso, arg);
2188         case XMIT_STOP:
2189                 ohci_iso_xmit_stop(iso);
2190                 return 0;
2191         case XMIT_QUEUE:
2192                 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2193         case XMIT_SHUTDOWN:
2194                 ohci_iso_xmit_shutdown(iso);
2195                 return 0;
2196
2197         case RECV_INIT:
2198                 return ohci_iso_recv_init(iso);
2199         case RECV_START: {
2200                 int *args = (int*) arg;
2201                 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2202         }
2203         case RECV_STOP:
2204                 ohci_iso_recv_stop(iso);
2205                 return 0;
2206         case RECV_RELEASE:
2207                 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2208                 return 0;
2209         case RECV_FLUSH:
2210                 ohci_iso_recv_task((unsigned long) iso);
2211                 return 0;
2212         case RECV_SHUTDOWN:
2213                 ohci_iso_recv_shutdown(iso);
2214                 return 0;
2215         case RECV_LISTEN_CHANNEL:
2216                 ohci_iso_recv_change_channel(iso, arg, 1);
2217                 return 0;
2218         case RECV_UNLISTEN_CHANNEL:
2219                 ohci_iso_recv_change_channel(iso, arg, 0);
2220                 return 0;
2221         case RECV_SET_CHANNEL_MASK:
2222                 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2223                 return 0;
2224
2225         default:
2226                 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2227                         cmd);
2228                 break;
2229         }
2230         return -EINVAL;
2231 }
2232
2233 /***************************************
2234  * IEEE-1394 functionality section END *
2235  ***************************************/
2236
2237
2238 /********************************************************
2239  * Global stuff (interrupt handler, init/shutdown code) *
2240  ********************************************************/
2241
2242 static void dma_trm_reset(struct dma_trm_ctx *d)
2243 {
2244         unsigned long flags;
2245         LIST_HEAD(packet_list);
2246         struct ti_ohci *ohci = d->ohci;
2247         struct hpsb_packet *packet, *ptmp;
2248
2249         ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2250
2251         /* Lock the context, reset it and release it. Move the packets
2252          * that were pending in the context to packet_list and free
2253          * them after releasing the lock. */
2254
2255         spin_lock_irqsave(&d->lock, flags);
2256
2257         list_splice(&d->fifo_list, &packet_list);
2258         list_splice(&d->pending_list, &packet_list);
2259         INIT_LIST_HEAD(&d->fifo_list);
2260         INIT_LIST_HEAD(&d->pending_list);
2261
2262         d->branchAddrPtr = NULL;
2263         d->sent_ind = d->prg_ind;
2264         d->free_prgs = d->num_desc;
2265
2266         spin_unlock_irqrestore(&d->lock, flags);
2267
2268         if (list_empty(&packet_list))
2269                 return;
2270
2271         PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2272
2273         /* Now process subsystem callbacks for the packets from this
2274          * context. */
2275         list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2276                 list_del_init(&packet->driver_list);
2277                 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2278         }
2279 }
2280
2281 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2282                                        quadlet_t rx_event,
2283                                        quadlet_t tx_event)
2284 {
2285         struct ohci1394_iso_tasklet *t;
2286         unsigned long mask;
2287
2288         spin_lock(&ohci->iso_tasklet_list_lock);
2289
2290         list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2291                 mask = 1 << t->context;
2292
2293                 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2294                         tasklet_schedule(&t->tasklet);
2295                 else if (rx_event & mask)
2296                         tasklet_schedule(&t->tasklet);
2297         }
2298
2299         spin_unlock(&ohci->iso_tasklet_list_lock);
2300
2301 }
2302
2303 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2304                              struct pt_regs *regs_are_unused)
2305 {
2306         quadlet_t event, node_id;
2307         struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2308         struct hpsb_host *host = ohci->host;
2309         int phyid = -1, isroot = 0;
2310         unsigned long flags;
2311
2312         /* Read and clear the interrupt event register.  Don't clear
2313          * the busReset event, though. This is done when we get the
2314          * selfIDComplete interrupt. */
2315         spin_lock_irqsave(&ohci->event_lock, flags);
2316         event = reg_read(ohci, OHCI1394_IntEventClear);
2317         reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2318         spin_unlock_irqrestore(&ohci->event_lock, flags);
2319
2320         if (!event)
2321                 return IRQ_NONE;
2322
2323         /* If event is ~(u32)0 cardbus card was ejected.  In this case
2324          * we just return, and clean up in the ohci1394_pci_remove
2325          * function. */
2326         if (event == ~(u32) 0) {
2327                 DBGMSG("Device removed.");
2328                 return IRQ_NONE;
2329         }
2330
2331         DBGMSG("IntEvent: %08x", event);
2332
2333         if (event & OHCI1394_unrecoverableError) {
2334                 int ctx;
2335                 PRINT(KERN_ERR, "Unrecoverable error!");
2336
2337                 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2338                         PRINT(KERN_ERR, "Async Req Tx Context died: "
2339                                 "ctrl[%08x] cmdptr[%08x]",
2340                                 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2341                                 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2342
2343                 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2344                         PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2345                                 "ctrl[%08x] cmdptr[%08x]",
2346                                 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2347                                 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2348
2349                 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2350                         PRINT(KERN_ERR, "Async Req Rcv Context died: "
2351                                 "ctrl[%08x] cmdptr[%08x]",
2352                                 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2353                                 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2354
2355                 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2356                         PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2357                                 "ctrl[%08x] cmdptr[%08x]",
2358                                 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2359                                 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2360
2361                 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2362                         if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2363                                 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2364                                         "ctrl[%08x] cmdptr[%08x]", ctx,
2365                                         reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2366                                         reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2367                 }
2368
2369                 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2370                         if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2371                                 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2372                                         "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2373                                         reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2374                                         reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2375                                         reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2376                 }
2377
2378                 event &= ~OHCI1394_unrecoverableError;
2379         }
2380
2381         if (event & OHCI1394_cycleInconsistent) {
2382                 /* We subscribe to the cycleInconsistent event only to
2383                  * clear the corresponding event bit... otherwise,
2384                  * isochronous cycleMatch DMA won't work. */
2385                 DBGMSG("OHCI1394_cycleInconsistent");
2386                 event &= ~OHCI1394_cycleInconsistent;
2387         }
2388
2389         if (event & OHCI1394_busReset) {
2390                 /* The busReset event bit can't be cleared during the
2391                  * selfID phase, so we disable busReset interrupts, to
2392                  * avoid burying the cpu in interrupt requests. */
2393                 spin_lock_irqsave(&ohci->event_lock, flags);
2394                 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2395
2396                 if (ohci->check_busreset) {
2397                         int loop_count = 0;
2398
2399                         udelay(10);
2400
2401                         while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2402                                 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2403
2404                                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2405                                 udelay(10);
2406                                 spin_lock_irqsave(&ohci->event_lock, flags);
2407
2408                                 /* The loop counter check is to prevent the driver
2409                                  * from remaining in this state forever. For the
2410                                  * initial bus reset, the loop continues for ever
2411                                  * and the system hangs, until some device is plugged-in
2412                                  * or out manually into a port! The forced reset seems
2413                                  * to solve this problem. This mainly effects nForce2. */
2414                                 if (loop_count > 10000) {
2415                                         ohci_devctl(host, RESET_BUS, LONG_RESET);
2416                                         DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2417                                         loop_count = 0;
2418                                 }
2419
2420                                 loop_count++;
2421                         }
2422                 }
2423                 spin_unlock_irqrestore(&ohci->event_lock, flags);
2424                 if (!host->in_bus_reset) {
2425                         DBGMSG("irq_handler: Bus reset requested");
2426
2427                         /* Subsystem call */
2428                         hpsb_bus_reset(ohci->host);
2429                 }
2430                 event &= ~OHCI1394_busReset;
2431         }
2432
2433         if (event & OHCI1394_reqTxComplete) {
2434                 struct dma_trm_ctx *d = &ohci->at_req_context;
2435                 DBGMSG("Got reqTxComplete interrupt "
2436                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2437                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2438                         ohci1394_stop_context(ohci, d->ctrlClear,
2439                                               "reqTxComplete");
2440                 else
2441                         dma_trm_tasklet((unsigned long)d);
2442                         //tasklet_schedule(&d->task);
2443                 event &= ~OHCI1394_reqTxComplete;
2444         }
2445         if (event & OHCI1394_respTxComplete) {
2446                 struct dma_trm_ctx *d = &ohci->at_resp_context;
2447                 DBGMSG("Got respTxComplete interrupt "
2448                        "status=0x%08X", reg_read(ohci, d->ctrlSet));
2449                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2450                         ohci1394_stop_context(ohci, d->ctrlClear,
2451                                               "respTxComplete");
2452                 else
2453                         tasklet_schedule(&d->task);
2454                 event &= ~OHCI1394_respTxComplete;
2455         }
2456         if (event & OHCI1394_RQPkt) {
2457                 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2458                 DBGMSG("Got RQPkt interrupt status=0x%08X",
2459                        reg_read(ohci, d->ctrlSet));
2460                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2461                         ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2462                 else
2463                         tasklet_schedule(&d->task);
2464                 event &= ~OHCI1394_RQPkt;
2465         }
2466         if (event & OHCI1394_RSPkt) {
2467                 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2468                 DBGMSG("Got RSPkt interrupt status=0x%08X",
2469                        reg_read(ohci, d->ctrlSet));
2470                 if (reg_read(ohci, d->ctrlSet) & 0x800)
2471                         ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2472                 else
2473                         tasklet_schedule(&d->task);
2474                 event &= ~OHCI1394_RSPkt;
2475         }
2476         if (event & OHCI1394_isochRx) {
2477                 quadlet_t rx_event;
2478
2479                 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2480                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2481                 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2482                 event &= ~OHCI1394_isochRx;
2483         }
2484         if (event & OHCI1394_isochTx) {
2485                 quadlet_t tx_event;
2486
2487                 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2488                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2489                 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2490                 event &= ~OHCI1394_isochTx;
2491         }
2492         if (event & OHCI1394_selfIDComplete) {
2493                 if (host->in_bus_reset) {
2494                         node_id = reg_read(ohci, OHCI1394_NodeID);
2495
2496                         if (!(node_id & 0x80000000)) {
2497                                 PRINT(KERN_ERR,
2498                                       "SelfID received, but NodeID invalid "
2499                                       "(probably new bus reset occurred): %08X",
2500                                       node_id);
2501                                 goto selfid_not_valid;
2502                         }
2503
2504                         phyid =  node_id & 0x0000003f;
2505                         isroot = (node_id & 0x40000000) != 0;
2506
2507                         DBGMSG("SelfID interrupt received "
2508                               "(phyid %d, %s)", phyid,
2509                               (isroot ? "root" : "not root"));
2510
2511                         handle_selfid(ohci, host, phyid, isroot);
2512
2513                         /* Clear the bus reset event and re-enable the
2514                          * busReset interrupt.  */
2515                         spin_lock_irqsave(&ohci->event_lock, flags);
2516                         reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2517                         reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2518                         spin_unlock_irqrestore(&ohci->event_lock, flags);
2519
2520                         /* Accept Physical requests from all nodes. */
2521                         reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2522                         reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2523
2524                         /* Turn on phys dma reception.
2525                          *
2526                          * TODO: Enable some sort of filtering management.
2527                          */
2528                         if (phys_dma) {
2529                                 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2530                                 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2531                                 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2532                         } else {
2533                                 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2534                                 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2535                         }
2536
2537                         DBGMSG("PhyReqFilter=%08x%08x",
2538                                reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2539                                reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2540
2541                         hpsb_selfid_complete(host, phyid, isroot);
2542                 } else
2543                         PRINT(KERN_ERR,
2544                               "SelfID received outside of bus reset sequence");
2545
2546 selfid_not_valid:
2547                 event &= ~OHCI1394_selfIDComplete;
2548         }
2549
2550         /* Make sure we handle everything, just in case we accidentally
2551          * enabled an interrupt that we didn't write a handler for.  */
2552         if (event)
2553                 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2554                       event);
2555
2556         return IRQ_HANDLED;
2557 }
2558
2559 /* Put the buffer back into the dma context */
2560 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2561 {
2562         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2563         DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2564
2565         d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2566         d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2567         idx = (idx + d->num_desc - 1 ) % d->num_desc;
2568         d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2569
2570         /* To avoid a race, ensure 1394 interface hardware sees the inserted
2571          * context program descriptors before it sees the wakeup bit set. */
2572         wmb();
2573         
2574         /* wake up the dma context if necessary */
2575         if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2576                 PRINT(KERN_INFO,
2577                       "Waking dma ctx=%d ... processing is probably too slow",
2578                       d->ctx);
2579         }
2580
2581         /* do this always, to avoid race condition */
2582         reg_write(ohci, d->ctrlSet, 0x1000);
2583 }
2584
2585 #define cond_le32_to_cpu(data, noswap) \
2586         (noswap ? data : le32_to_cpu(data))
2587
2588 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2589                             -1, 0, -1, 0, -1, -1, 16, -1};
2590
2591 /*
2592  * Determine the length of a packet in the buffer
2593  * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2594  */
2595 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2596                          int offset, unsigned char tcode, int noswap)
2597 {
2598         int length = -1;
2599
2600         if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2601                 length = TCODE_SIZE[tcode];
2602                 if (length == 0) {
2603                         if (offset + 12 >= d->buf_size) {
2604                                 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2605                                                 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2606                         } else {
2607                                 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2608                         }
2609                         length += 20;
2610                 }
2611         } else if (d->type == DMA_CTX_ISO) {
2612                 /* Assumption: buffer fill mode with header/trailer */
2613                 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2614         }
2615
2616         if (length > 0 && length % 4)
2617                 length += 4 - (length % 4);
2618
2619         return length;
2620 }
2621
2622 /* Tasklet that processes dma receive buffers */
2623 static void dma_rcv_tasklet (unsigned long data)
2624 {
2625         struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2626         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2627         unsigned int split_left, idx, offset, rescount;
2628         unsigned char tcode;
2629         int length, bytes_left, ack;
2630         unsigned long flags;
2631         quadlet_t *buf_ptr;
2632         char *split_ptr;
2633         char msg[256];
2634
2635         spin_lock_irqsave(&d->lock, flags);
2636
2637         idx = d->buf_ind;
2638         offset = d->buf_offset;
2639         buf_ptr = d->buf_cpu[idx] + offset/4;
2640
2641         rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2642         bytes_left = d->buf_size - rescount - offset;
2643
2644         while (bytes_left > 0) {
2645                 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2646
2647                 /* packet_length() will return < 4 for an error */
2648                 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2649
2650                 if (length < 4) { /* something is wrong */
2651                         sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2652                                 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2653                                 d->ctx, length);
2654                         ohci1394_stop_context(ohci, d->ctrlClear, msg);
2655                         spin_unlock_irqrestore(&d->lock, flags);
2656                         return;
2657                 }
2658
2659                 /* The first case is where we have a packet that crosses
2660                  * over more than one descriptor. The next case is where
2661                  * it's all in the first descriptor.  */
2662                 if ((offset + length) > d->buf_size) {
2663                         DBGMSG("Split packet rcv'd");
2664                         if (length > d->split_buf_size) {
2665                                 ohci1394_stop_context(ohci, d->ctrlClear,
2666                                              "Split packet size exceeded");
2667                                 d->buf_ind = idx;
2668                                 d->buf_offset = offset;
2669                                 spin_unlock_irqrestore(&d->lock, flags);
2670                                 return;
2671                         }
2672
2673                         if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2674                             == d->buf_size) {
2675                                 /* Other part of packet not written yet.
2676                                  * this should never happen I think
2677                                  * anyway we'll get it on the next call.  */
2678                                 PRINT(KERN_INFO,
2679                                       "Got only half a packet!");
2680                                 d->buf_ind = idx;
2681                                 d->buf_offset = offset;
2682                                 spin_unlock_irqrestore(&d->lock, flags);
2683                                 return;
2684                         }
2685
2686                         split_left = length;
2687                         split_ptr = (char *)d->spb;
2688                         memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2689                         split_left -= d->buf_size-offset;
2690                         split_ptr += d->buf_size-offset;
2691                         insert_dma_buffer(d, idx);
2692                         idx = (idx+1) % d->num_desc;
2693                         buf_ptr = d->buf_cpu[idx];
2694                         offset=0;
2695
2696                         while (split_left >= d->buf_size) {
2697                                 memcpy(split_ptr,buf_ptr,d->buf_size);
2698                                 split_ptr += d->buf_size;
2699                                 split_left -= d->buf_size;
2700                                 insert_dma_buffer(d, idx);
2701                                 idx = (idx+1) % d->num_desc;
2702                                 buf_ptr = d->buf_cpu[idx];
2703                         }
2704
2705                         if (split_left > 0) {
2706                                 memcpy(split_ptr, buf_ptr, split_left);
2707                                 offset = split_left;
2708                                 buf_ptr += offset/4;
2709                         }
2710                 } else {
2711                         DBGMSG("Single packet rcv'd");
2712                         memcpy(d->spb, buf_ptr, length);
2713                         offset += length;
2714                         buf_ptr += length/4;
2715                         if (offset==d->buf_size) {
2716                                 insert_dma_buffer(d, idx);
2717                                 idx = (idx+1) % d->num_desc;
2718                                 buf_ptr = d->buf_cpu[idx];
2719                                 offset=0;
2720                         }
2721                 }
2722
2723                 /* We get one phy packet to the async descriptor for each
2724                  * bus reset. We always ignore it.  */
2725                 if (tcode != OHCI1394_TCODE_PHY) {
2726                         if (!ohci->no_swap_incoming)
2727                                 packet_swab(d->spb, tcode);
2728                         DBGMSG("Packet received from node"
2729                                 " %d ack=0x%02X spd=%d tcode=0x%X"
2730                                 " length=%d ctx=%d tlabel=%d",
2731                                 (d->spb[1]>>16)&0x3f,
2732                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2733                                 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2734                                 tcode, length, d->ctx,
2735                                 (cond_le32_to_cpu(d->spb[0], ohci->no_swap_incoming)>>10)&0x3f);
2736
2737                         ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2738                                 == 0x11) ? 1 : 0;
2739
2740                         hpsb_packet_received(ohci->host, d->spb,
2741                                              length-4, ack);
2742                 }
2743 #ifdef OHCI1394_DEBUG
2744                 else
2745                         PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2746                                d->ctx);
2747 #endif
2748
2749                 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2750
2751                 bytes_left = d->buf_size - rescount - offset;
2752
2753         }
2754
2755         d->buf_ind = idx;
2756         d->buf_offset = offset;
2757
2758         spin_unlock_irqrestore(&d->lock, flags);
2759 }
2760
2761 /* Bottom half that processes sent packets */
2762 static void dma_trm_tasklet (unsigned long data)
2763 {
2764         struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2765         struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2766         struct hpsb_packet *packet, *ptmp;
2767         unsigned long flags;
2768         u32 status, ack;
2769         size_t datasize;
2770
2771         spin_lock_irqsave(&d->lock, flags);
2772
2773         list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2774                 datasize = packet->data_size;
2775                 if (datasize && packet->type != hpsb_raw)
2776                         status = le32_to_cpu(
2777                                 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2778                 else
2779                         status = le32_to_cpu(
2780                                 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2781
2782                 if (status == 0)
2783                         /* this packet hasn't been sent yet*/
2784                         break;
2785
2786 #ifdef OHCI1394_DEBUG
2787                 if (datasize)
2788                         if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2789                                 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2790                                        "ack=0x%X spd=%d dataLength=%d ctx=%d",
2791                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2792                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2793                                        status&0x1f, (status>>5)&0x3,
2794                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2795                                        d->ctx);
2796                         else
2797                                 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2798                                        "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2799                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2800                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2801                                        (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2802                                        status&0x1f, (status>>5)&0x3,
2803                                        le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2804                                        d->ctx);
2805                 else
2806                         DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2807                                "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2808                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2809                                         >>16)&0x3f,
2810                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2811                                         >>4)&0xf,
2812                                 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2813                                         >>10)&0x3f,
2814                                 status&0x1f, (status>>5)&0x3,
2815                                 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2816                                 d->ctx);
2817 #endif
2818
2819                 if (status & 0x10) {
2820                         ack = status & 0xf;
2821                 } else {
2822                         switch (status & 0x1f) {
2823                         case EVT_NO_STATUS: /* that should never happen */
2824                         case EVT_RESERVED_A: /* that should never happen */
2825                         case EVT_LONG_PACKET: /* that should never happen */
2826                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2827                                 ack = ACKX_SEND_ERROR;
2828                                 break;
2829                         case EVT_MISSING_ACK:
2830                                 ack = ACKX_TIMEOUT;
2831                                 break;
2832                         case EVT_UNDERRUN:
2833                                 ack = ACKX_SEND_ERROR;
2834                                 break;
2835                         case EVT_OVERRUN: /* that should never happen */
2836                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2837                                 ack = ACKX_SEND_ERROR;
2838                                 break;
2839                         case EVT_DESCRIPTOR_READ:
2840                         case EVT_DATA_READ:
2841                         case EVT_DATA_WRITE:
2842                                 ack = ACKX_SEND_ERROR;
2843                                 break;
2844                         case EVT_BUS_RESET: /* that should never happen */
2845                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2846                                 ack = ACKX_SEND_ERROR;
2847                                 break;
2848                         case EVT_TIMEOUT:
2849                                 ack = ACKX_TIMEOUT;
2850                                 break;
2851                         case EVT_TCODE_ERR:
2852                                 ack = ACKX_SEND_ERROR;
2853                                 break;
2854                         case EVT_RESERVED_B: /* that should never happen */
2855                         case EVT_RESERVED_C: /* that should never happen */
2856                                 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2857                                 ack = ACKX_SEND_ERROR;
2858                                 break;
2859                         case EVT_UNKNOWN:
2860                         case EVT_FLUSHED:
2861                                 ack = ACKX_SEND_ERROR;
2862                                 break;
2863                         default:
2864                                 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2865                                 ack = ACKX_SEND_ERROR;
2866                                 BUG();
2867                         }
2868                 }
2869
2870                 list_del_init(&packet->driver_list);
2871                 hpsb_packet_sent(ohci->host, packet, ack);
2872
2873                 if (datasize) {
2874                         pci_unmap_single(ohci->dev,
2875                                          cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2876                                          datasize, PCI_DMA_TODEVICE);
2877                         OHCI_DMA_FREE("single Xmit data packet");
2878                 }
2879
2880                 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2881                 d->free_prgs++;
2882         }
2883
2884         dma_trm_flush(ohci, d);
2885
2886         spin_unlock_irqrestore(&d->lock, flags);
2887 }
2888
2889 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2890 {
2891         if (d->ctrlClear) {
2892                 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2893
2894                 if (d->type == DMA_CTX_ISO) {
2895                         /* disable interrupts */
2896                         reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2897                         ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2898                 } else {
2899                         tasklet_kill(&d->task);
2900                 }
2901         }
2902 }
2903
2904
2905 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2906 {
2907         int i;
2908         struct ti_ohci *ohci = d->ohci;
2909
2910         if (ohci == NULL)
2911                 return;
2912
2913         DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2914
2915         if (d->buf_cpu) {
2916                 for (i=0; i<d->num_desc; i++)
2917                         if (d->buf_cpu[i] && d->buf_bus[i]) {
2918                                 pci_free_consistent(
2919                                         ohci->dev, d->buf_size,
2920                                         d->buf_cpu[i], d->buf_bus[i]);
2921                                 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2922                         }
2923                 kfree(d->buf_cpu);
2924                 kfree(d->buf_bus);
2925         }
2926         if (d->prg_cpu) {
2927                 for (i=0; i<d->num_desc; i++)
2928                         if (d->prg_cpu[i] && d->prg_bus[i]) {
2929                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2930                                 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2931                         }
2932                 pci_pool_destroy(d->prg_pool);
2933                 OHCI_DMA_FREE("dma_rcv prg pool");
2934                 kfree(d->prg_cpu);
2935                 kfree(d->prg_bus);
2936         }
2937         kfree(d->spb);
2938
2939         /* Mark this context as freed. */
2940         d->ohci = NULL;
2941 }
2942
2943 static int
2944 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2945                   enum context_type type, int ctx, int num_desc,
2946                   int buf_size, int split_buf_size, int context_base)
2947 {
2948         int i, len;
2949         static int num_allocs;
2950         static char pool_name[20];
2951
2952         d->ohci = ohci;
2953         d->type = type;
2954         d->ctx = ctx;
2955
2956         d->num_desc = num_desc;
2957         d->buf_size = buf_size;
2958         d->split_buf_size = split_buf_size;
2959
2960         d->ctrlSet = 0;
2961         d->ctrlClear = 0;
2962         d->cmdPtr = 0;
2963
2964         d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC);
2965         d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2966
2967         if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2968                 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2969                 free_dma_rcv_ctx(d);
2970                 return -ENOMEM;
2971         }
2972         memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2973         memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2974
2975         d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2976                                 GFP_ATOMIC);
2977         d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2978
2979         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2980                 PRINT(KERN_ERR, "Failed to allocate dma prg");
2981                 free_dma_rcv_ctx(d);
2982                 return -ENOMEM;
2983         }
2984         memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2985         memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2986
2987         d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2988
2989         if (d->spb == NULL) {
2990                 PRINT(KERN_ERR, "Failed to allocate split buffer");
2991                 free_dma_rcv_ctx(d);
2992                 return -ENOMEM;
2993         }
2994         
2995         len = sprintf(pool_name, "ohci1394_rcv_prg");
2996         sprintf(pool_name+len, "%d", num_allocs);
2997         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2998                                 sizeof(struct dma_cmd), 4, 0);
2999         if(d->prg_pool == NULL)
3000         {
3001                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3002                 free_dma_rcv_ctx(d);
3003                 return -ENOMEM;
3004         }
3005         num_allocs++;
3006
3007         OHCI_DMA_ALLOC("dma_rcv prg pool");
3008
3009         for (i=0; i<d->num_desc; i++) {
3010                 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
3011                                                      d->buf_size,
3012                                                      d->buf_bus+i);
3013                 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
3014
3015                 if (d->buf_cpu[i] != NULL) {
3016                         memset(d->buf_cpu[i], 0, d->buf_size);
3017                 } else {
3018                         PRINT(KERN_ERR,
3019                               "Failed to allocate dma buffer");
3020                         free_dma_rcv_ctx(d);
3021                         return -ENOMEM;
3022                 }
3023
3024                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3025                 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
3026
3027                 if (d->prg_cpu[i] != NULL) {
3028                         memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
3029                 } else {
3030                         PRINT(KERN_ERR,
3031                               "Failed to allocate dma prg");
3032                         free_dma_rcv_ctx(d);
3033                         return -ENOMEM;
3034                 }
3035         }
3036
3037         spin_lock_init(&d->lock);
3038
3039         if (type == DMA_CTX_ISO) {
3040                 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3041                                           OHCI_ISO_MULTICHANNEL_RECEIVE,
3042                                           dma_rcv_tasklet, (unsigned long) d);
3043         } else {
3044                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3045                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3046                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3047
3048                 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3049         }
3050
3051         return 0;
3052 }
3053
3054 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3055 {
3056         int i;
3057         struct ti_ohci *ohci = d->ohci;
3058
3059         if (ohci == NULL)
3060                 return;
3061
3062         DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3063
3064         if (d->prg_cpu) {
3065                 for (i=0; i<d->num_desc; i++)
3066                         if (d->prg_cpu[i] && d->prg_bus[i]) {
3067                                 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3068                                 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3069                         }
3070                 pci_pool_destroy(d->prg_pool);
3071                 OHCI_DMA_FREE("dma_trm prg pool");
3072                 kfree(d->prg_cpu);
3073                 kfree(d->prg_bus);
3074         }
3075
3076         /* Mark this context as freed. */
3077         d->ohci = NULL;
3078 }
3079
3080 static int
3081 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3082                   enum context_type type, int ctx, int num_desc,
3083                   int context_base)
3084 {
3085         int i, len;
3086         static char pool_name[20];
3087         static int num_allocs=0;
3088
3089         d->ohci = ohci;
3090         d->type = type;
3091         d->ctx = ctx;
3092         d->num_desc = num_desc;
3093         d->ctrlSet = 0;
3094         d->ctrlClear = 0;
3095         d->cmdPtr = 0;
3096
3097         d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3098                              GFP_KERNEL);
3099         d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3100
3101         if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3102                 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3103                 free_dma_trm_ctx(d);
3104                 return -ENOMEM;
3105         }
3106         memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3107         memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3108
3109         len = sprintf(pool_name, "ohci1394_trm_prg");
3110         sprintf(pool_name+len, "%d", num_allocs);
3111         d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3112                                 sizeof(struct at_dma_prg), 4, 0);
3113         if (d->prg_pool == NULL) {
3114                 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3115                 free_dma_trm_ctx(d);
3116                 return -ENOMEM;
3117         }
3118         num_allocs++;
3119
3120         OHCI_DMA_ALLOC("dma_rcv prg pool");
3121
3122         for (i = 0; i < d->num_desc; i++) {
3123                 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3124                 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3125
3126                 if (d->prg_cpu[i] != NULL) {
3127                         memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3128                 } else {
3129                         PRINT(KERN_ERR,
3130                               "Failed to allocate at dma prg");
3131                         free_dma_trm_ctx(d);
3132                         return -ENOMEM;
3133                 }
3134         }
3135
3136         spin_lock_init(&d->lock);
3137
3138         /* initialize tasklet */
3139         if (type == DMA_CTX_ISO) {
3140                 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3141                                           dma_trm_tasklet, (unsigned long) d);
3142                 if (ohci1394_register_iso_tasklet(ohci,
3143                                                   &ohci->it_legacy_tasklet) < 0) {
3144                         PRINT(KERN_ERR, "No IT DMA context available");
3145                         free_dma_trm_ctx(d);
3146                         return -EBUSY;
3147                 }
3148
3149                 /* IT can be assigned to any context by register_iso_tasklet */
3150                 d->ctx = ohci->it_legacy_tasklet.context;
3151                 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3152                 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3153                 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3154         } else {
3155                 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3156                 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3157                 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3158                 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3159         }
3160
3161         return 0;
3162 }
3163
3164 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3165 {
3166         struct ti_ohci *ohci = host->hostdata;
3167
3168         reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3169         reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3170
3171         memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3172 }
3173
3174
3175 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3176                                  quadlet_t data, quadlet_t compare)
3177 {
3178         struct ti_ohci *ohci = host->hostdata;
3179         int i;
3180
3181         reg_write(ohci, OHCI1394_CSRData, data);
3182         reg_write(ohci, OHCI1394_CSRCompareData, compare);
3183         reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3184
3185         for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3186                 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3187                         break;
3188
3189                 mdelay(1);
3190         }
3191
3192         return reg_read(ohci, OHCI1394_CSRData);
3193 }
3194
3195 static struct hpsb_host_driver ohci1394_driver = {
3196         .owner =                THIS_MODULE,
3197         .name =                 OHCI1394_DRIVER_NAME,
3198         .set_hw_config_rom =    ohci_set_hw_config_rom,
3199         .transmit_packet =      ohci_transmit,
3200         .devctl =               ohci_devctl,
3201         .isoctl =               ohci_isoctl,
3202         .hw_csr_reg =           ohci_hw_csr_reg,
3203 };
3204
3205 \f
3206
3207 /***********************************
3208  * PCI Driver Interface functions  *
3209  ***********************************/
3210
3211 #define FAIL(err, fmt, args...)                 \
3212 do {                                            \
3213         PRINT_G(KERN_ERR, fmt , ## args);       \
3214         ohci1394_pci_remove(dev);               \
3215         return err;                             \
3216 } while (0)
3217
3218 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3219                                         const struct pci_device_id *ent)
3220 {
3221         static int version_printed = 0;
3222
3223         struct hpsb_host *host;
3224         struct ti_ohci *ohci;   /* shortcut to currently handled device */
3225         unsigned long ohci_base;
3226
3227         if (version_printed++ == 0)
3228                 PRINT_G(KERN_INFO, "%s", version);
3229
3230         if (pci_enable_device(dev))
3231                 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3232         pci_set_master(dev);
3233
3234         host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3235         if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3236
3237         ohci = host->hostdata;
3238         ohci->dev = dev;
3239         ohci->host = host;
3240         ohci->init_state = OHCI_INIT_ALLOC_HOST;
3241         host->pdev = dev;
3242         pci_set_drvdata(dev, ohci);
3243
3244         /* We don't want hardware swapping */
3245         pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3246
3247         /* Some oddball Apple controllers do not order the selfid
3248          * properly, so we make up for it here.  */
3249 #ifndef __LITTLE_ENDIAN
3250         /* XXX: Need a better way to check this. I'm wondering if we can
3251          * read the values of the OHCI1394_PCI_HCI_Control and the
3252          * noByteSwapData registers to see if they were not cleared to
3253          * zero. Should this work? Obviously it's not defined what these
3254          * registers will read when they aren't supported. Bleh! */
3255         if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3256             dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3257                 ohci->no_swap_incoming = 1;
3258                 ohci->selfid_swap = 0;
3259         } else
3260                 ohci->selfid_swap = 1;
3261 #endif
3262
3263
3264 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3265 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3266 #endif
3267
3268         /* These chipsets require a bit of extra care when checking after
3269          * a busreset.  */
3270         if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3271              dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3272             (dev->vendor ==  PCI_VENDOR_ID_NVIDIA &&
3273              dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3274                 ohci->check_busreset = 1;
3275
3276         /* We hardwire the MMIO length, since some CardBus adaptors
3277          * fail to report the right length.  Anyway, the ohci spec
3278          * clearly says it's 2kb, so this shouldn't be a problem. */
3279         ohci_base = pci_resource_start(dev, 0);
3280         if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3281                 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
3282                       pci_resource_len(dev, 0));
3283
3284         /* Seems PCMCIA handles this internally. Not sure why. Seems
3285          * pretty bogus to force a driver to special case this.  */
3286 #ifndef PCMCIA
3287         if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3288                 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3289                      ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3290 #endif
3291         ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3292
3293         ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3294         if (ohci->registers == NULL)
3295                 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3296         ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3297         DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3298
3299         /* csr_config rom allocation */
3300         ohci->csr_config_rom_cpu =
3301                 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3302                                      &ohci->csr_config_rom_bus);
3303         OHCI_DMA_ALLOC("consistent csr_config_rom");
3304         if (ohci->csr_config_rom_cpu == NULL)
3305                 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3306         ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3307
3308         /* self-id dma buffer allocation */
3309         ohci->selfid_buf_cpu =
3310                 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3311                       &ohci->selfid_buf_bus);
3312         OHCI_DMA_ALLOC("consistent selfid_buf");
3313
3314         if (ohci->selfid_buf_cpu == NULL)
3315                 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3316         ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3317
3318         if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3319                 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3320                       "8Kb boundary... may cause problems on some CXD3222 chip",
3321                       ohci->selfid_buf_cpu);
3322
3323         /* No self-id errors at startup */
3324         ohci->self_id_errors = 0;
3325
3326         ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3327         /* AR DMA request context allocation */
3328         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3329                               DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3330                               AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3331                               OHCI1394_AsReqRcvContextBase) < 0)
3332                 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3333
3334         /* AR DMA response context allocation */
3335         if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3336                               DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3337                               AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3338                               OHCI1394_AsRspRcvContextBase) < 0)
3339                 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3340
3341         /* AT DMA request context */
3342         if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3343                               DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3344                               OHCI1394_AsReqTrContextBase) < 0)
3345                 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3346
3347         /* AT DMA response context */
3348         if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3349                               DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3350                               OHCI1394_AsRspTrContextBase) < 0)
3351                 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3352
3353         /* Start off with a soft reset, to clear everything to a sane
3354          * state. */
3355         ohci_soft_reset(ohci);
3356
3357         /* Now enable LPS, which we need in order to start accessing
3358          * most of the registers.  In fact, on some cards (ALI M5251),
3359          * accessing registers in the SClk domain without LPS enabled
3360          * will lock up the machine.  Wait 50msec to make sure we have
3361          * full link enabled.  */
3362         reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3363
3364         /* Disable and clear interrupts */
3365         reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3366         reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3367
3368         mdelay(50);
3369
3370         /* Determine the number of available IR and IT contexts. */
3371         ohci->nb_iso_rcv_ctx =
3372                 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3373         DBGMSG("%d iso receive contexts available",
3374                ohci->nb_iso_rcv_ctx);
3375
3376         ohci->nb_iso_xmit_ctx =
3377                 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3378         DBGMSG("%d iso transmit contexts available",
3379                ohci->nb_iso_xmit_ctx);
3380
3381         /* Set the usage bits for non-existent contexts so they can't
3382          * be allocated */
3383         ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3384         ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3385
3386         INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3387         spin_lock_init(&ohci->iso_tasklet_list_lock);
3388         ohci->ISO_channel_usage = 0;
3389         spin_lock_init(&ohci->IR_channel_lock);
3390
3391         /* Allocate the IR DMA context right here so we don't have
3392          * to do it in interrupt path - note that this doesn't
3393          * waste much memory and avoids the jugglery required to
3394          * allocate it in IRQ path. */
3395         if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3396                               DMA_CTX_ISO, 0, IR_NUM_DESC,
3397                               IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3398                               OHCI1394_IsoRcvContextBase) < 0) {
3399                 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3400         }
3401
3402         /* We hopefully don't have to pre-allocate IT DMA like we did
3403          * for IR DMA above. Allocate it on-demand and mark inactive. */
3404         ohci->it_legacy_context.ohci = NULL;
3405
3406         if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3407                          OHCI1394_DRIVER_NAME, ohci))
3408                 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3409
3410         ohci->init_state = OHCI_INIT_HAVE_IRQ;
3411         ohci_initialize(ohci);
3412
3413         /* Set certain csr values */
3414         host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3415         host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3416         host->csr.cyc_clk_acc = 100;  /* how do we determine clk accuracy? */
3417         host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3418         host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3419
3420         /* Tell the highlevel this host is ready */
3421         if (hpsb_add_host(host))
3422                 FAIL(-ENOMEM, "Failed to register host with highlevel");
3423
3424         ohci->init_state = OHCI_INIT_DONE;
3425
3426         return 0;
3427 #undef FAIL
3428 }
3429
3430 static void ohci1394_pci_remove(struct pci_dev *pdev)
3431 {
3432         struct ti_ohci *ohci;
3433         struct device *dev;
3434
3435         ohci = pci_get_drvdata(pdev);
3436         if (!ohci)
3437                 return;
3438
3439         dev = get_device(&ohci->host->device);
3440
3441         switch (ohci->init_state) {
3442         case OHCI_INIT_DONE:
3443                 hpsb_remove_host(ohci->host);
3444
3445                 /* Clear out BUS Options */
3446                 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3447                 reg_write(ohci, OHCI1394_BusOptions,
3448                           (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3449                           0x00ff0000);
3450                 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3451
3452         case OHCI_INIT_HAVE_IRQ:
3453                 /* Clear interrupt registers */
3454                 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3455                 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3456                 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3457                 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3458                 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3459                 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3460
3461                 /* Disable IRM Contender */
3462                 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3463
3464                 /* Clear link control register */
3465                 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3466
3467                 /* Let all other nodes know to ignore us */
3468                 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3469
3470                 /* Soft reset before we start - this disables
3471                  * interrupts and clears linkEnable and LPS. */
3472                 ohci_soft_reset(ohci);
3473                 free_irq(ohci->dev->irq, ohci);
3474
3475         case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3476                 /* The ohci_soft_reset() stops all DMA contexts, so we
3477                  * dont need to do this.  */
3478                 /* Free AR dma */
3479                 free_dma_rcv_ctx(&ohci->ar_req_context);
3480                 free_dma_rcv_ctx(&ohci->ar_resp_context);
3481
3482                 /* Free AT dma */
3483                 free_dma_trm_ctx(&ohci->at_req_context);
3484                 free_dma_trm_ctx(&ohci->at_resp_context);
3485
3486                 /* Free IR dma */
3487                 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3488
3489                 /* Free IT dma */
3490                 free_dma_trm_ctx(&ohci->it_legacy_context);
3491
3492                 /* Free IR legacy dma */
3493                 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3494
3495
3496         case OHCI_INIT_HAVE_SELFID_BUFFER:
3497                 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3498                                     ohci->selfid_buf_cpu,
3499                                     ohci->selfid_buf_bus);
3500                 OHCI_DMA_FREE("consistent selfid_buf");
3501
3502         case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3503                 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3504                                     ohci->csr_config_rom_cpu,
3505                                     ohci->csr_config_rom_bus);
3506                 OHCI_DMA_FREE("consistent csr_config_rom");
3507
3508         case OHCI_INIT_HAVE_IOMAPPING:
3509                 iounmap(ohci->registers);
3510
3511         case OHCI_INIT_HAVE_MEM_REGION:
3512 #ifndef PCMCIA
3513                 release_mem_region(pci_resource_start(ohci->dev, 0),
3514                                    OHCI1394_REGISTER_SIZE);
3515 #endif
3516
3517 #ifdef CONFIG_PPC_PMAC
3518         /* On UniNorth, power down the cable and turn off the chip
3519          * clock when the module is removed to save power on
3520          * laptops. Turning it back ON is done by the arch code when
3521          * pci_enable_device() is called */
3522         {
3523                 struct device_node* of_node;
3524
3525                 of_node = pci_device_to_OF_node(ohci->dev);
3526                 if (of_node) {
3527                         pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3528                         pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3529                 }
3530         }
3531 #endif /* CONFIG_PPC_PMAC */
3532
3533         case OHCI_INIT_ALLOC_HOST:
3534                 pci_set_drvdata(ohci->dev, NULL);
3535         }
3536
3537         if (dev)
3538                 put_device(dev);
3539 }
3540
3541
3542 static int ohci1394_pci_resume (struct pci_dev *pdev)
3543 {
3544 #ifdef CONFIG_PPC_PMAC
3545         if (_machine == _MACH_Pmac) {
3546                 struct device_node *of_node;
3547
3548                 /* Re-enable 1394 */
3549                 of_node = pci_device_to_OF_node (pdev);
3550                 if (of_node)
3551                         pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3552         }
3553 #endif /* CONFIG_PPC_PMAC */
3554
3555         pci_enable_device(pdev);
3556
3557         return 0;
3558 }
3559
3560
3561 static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3562 {
3563 #ifdef CONFIG_PPC_PMAC
3564         if (_machine == _MACH_Pmac) {
3565                 struct device_node *of_node;
3566
3567                 /* Disable 1394 */
3568                 of_node = pci_device_to_OF_node (pdev);
3569                 if (of_node)
3570                         pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3571         }
3572 #endif
3573
3574         return 0;
3575 }
3576
3577
3578 #define PCI_CLASS_FIREWIRE_OHCI     ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3579
3580 static struct pci_device_id ohci1394_pci_tbl[] = {
3581         {
3582                 .class =        PCI_CLASS_FIREWIRE_OHCI,
3583                 .class_mask =   PCI_ANY_ID,
3584                 .vendor =       PCI_ANY_ID,
3585                 .device =       PCI_ANY_ID,
3586                 .subvendor =    PCI_ANY_ID,
3587                 .subdevice =    PCI_ANY_ID,
3588         },
3589         { 0, },
3590 };
3591
3592 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3593
3594 static struct pci_driver ohci1394_pci_driver = {
3595         .name =         OHCI1394_DRIVER_NAME,
3596         .id_table =     ohci1394_pci_tbl,
3597         .probe =        ohci1394_pci_probe,
3598         .remove =       ohci1394_pci_remove,
3599         .resume =       ohci1394_pci_resume,
3600         .suspend =      ohci1394_pci_suspend,
3601 };
3602
3603 \f
3604
3605 /***********************************
3606  * OHCI1394 Video Interface        *
3607  ***********************************/
3608
3609 /* essentially the only purpose of this code is to allow another
3610    module to hook into ohci's interrupt handler */
3611
3612 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3613 {
3614         int i=0;
3615
3616         /* stop the channel program if it's still running */
3617         reg_write(ohci, reg, 0x8000);
3618
3619         /* Wait until it effectively stops */
3620         while (reg_read(ohci, reg) & 0x400) {
3621                 i++;
3622                 if (i>5000) {
3623                         PRINT(KERN_ERR,
3624                               "Runaway loop while stopping context: %s...", msg ? msg : "");
3625                         return 1;
3626                 }
3627
3628                 mb();
3629                 udelay(10);
3630         }
3631         if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3632         return 0;
3633 }
3634
3635 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3636                                void (*func)(unsigned long), unsigned long data)
3637 {
3638         tasklet_init(&tasklet->tasklet, func, data);
3639         tasklet->type = type;
3640         /* We init the tasklet->link field, so we can list_del() it
3641          * without worrying whether it was added to the list or not. */
3642         INIT_LIST_HEAD(&tasklet->link);
3643 }
3644
3645 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3646                                   struct ohci1394_iso_tasklet *tasklet)
3647 {
3648         unsigned long flags, *usage;
3649         int n, i, r = -EBUSY;
3650
3651         if (tasklet->type == OHCI_ISO_TRANSMIT) {
3652                 n = ohci->nb_iso_xmit_ctx;
3653                 usage = &ohci->it_ctx_usage;
3654         }
3655         else {
3656                 n = ohci->nb_iso_rcv_ctx;
3657                 usage = &ohci->ir_ctx_usage;
3658
3659                 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3660                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3661                         if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3662                                 return r;
3663                         }
3664                 }
3665         }
3666
3667         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3668
3669         for (i = 0; i < n; i++)
3670                 if (!test_and_set_bit(i, usage)) {
3671                         tasklet->context = i;
3672                         list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3673                         r = 0;
3674                         break;
3675                 }
3676
3677         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3678
3679         return r;
3680 }
3681
3682 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3683                                      struct ohci1394_iso_tasklet *tasklet)
3684 {
3685         unsigned long flags;
3686
3687         tasklet_kill(&tasklet->tasklet);
3688
3689         spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3690
3691         if (tasklet->type == OHCI_ISO_TRANSMIT)
3692                 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3693         else {
3694                 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3695
3696                 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3697                         clear_bit(0, &ohci->ir_multichannel_used);
3698                 }
3699         }
3700
3701         list_del(&tasklet->link);
3702
3703         spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3704 }
3705
3706 EXPORT_SYMBOL(ohci1394_stop_context);
3707 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3708 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3709 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3710
3711
3712 /***********************************
3713  * General module initialization   *
3714  ***********************************/
3715
3716 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3717 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3718 MODULE_LICENSE("GPL");
3719
3720 static void __exit ohci1394_cleanup (void)
3721 {
3722         pci_unregister_driver(&ohci1394_pci_driver);
3723 }
3724
3725 static int __init ohci1394_init(void)
3726 {
3727         return pci_register_driver(&ohci1394_pci_driver);
3728 }
3729
3730 module_init(ohci1394_init);
3731 module_exit(ohci1394_cleanup);