Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[linux-2.6] / drivers / staging / poch / poch.c
1 /*
2  * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
3  *
4  * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
5  *
6  * Licensed under GPL version 2 only.
7  */
8
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/uio_driver.h>
13 #include <linux/spinlock.h>
14 #include <linux/cdev.h>
15 #include <linux/delay.h>
16 #include <linux/sysfs.h>
17 #include <linux/poll.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/init.h>
21 #include <linux/ioctl.h>
22 #include <linux/io.h>
23
24 #include "poch.h"
25
26 #include <asm/cacheflush.h>
27
28 #ifndef PCI_VENDOR_ID_RRAPIDS
29 #define PCI_VENDOR_ID_RRAPIDS 0x17D2
30 #endif
31
32 #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
33 #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
34 #endif
35
36 #define POCH_NCHANNELS 2
37
38 #define MAX_POCH_CARDS 8
39 #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
40
41 #define DRV_NAME "poch"
42 #define PFX      DRV_NAME ": "
43
44 /*
45  * BAR0 Bridge Register Definitions
46  */
47
48 #define BRIDGE_REV_REG                  0x0
49 #define BRIDGE_INT_MASK_REG             0x4
50 #define BRIDGE_INT_STAT_REG             0x8
51
52 #define BRIDGE_INT_ACTIVE               (0x1 << 31)
53 #define BRIDGE_INT_FPGA                 (0x1 << 2)
54 #define BRIDGE_INT_TEMP_FAIL            (0x1 << 1)
55 #define BRIDGE_INT_TEMP_WARN            (0x1 << 0)
56
57 #define BRIDGE_FPGA_RESET_REG           0xC
58
59 #define BRIDGE_CARD_POWER_REG           0x10
60 #define BRIDGE_CARD_POWER_EN            (0x1 << 0)
61 #define BRIDGE_CARD_POWER_PROG_DONE     (0x1 << 31)
62
63 #define BRIDGE_JTAG_REG                 0x14
64 #define BRIDGE_DMA_GO_REG               0x18
65 #define BRIDGE_STAT_0_REG               0x1C
66 #define BRIDGE_STAT_1_REG               0x20
67 #define BRIDGE_STAT_2_REG               0x24
68 #define BRIDGE_STAT_3_REG               0x28
69 #define BRIDGE_TEMP_STAT_REG            0x2C
70 #define BRIDGE_TEMP_THRESH_REG          0x30
71 #define BRIDGE_EEPROM_REVSEL_REG        0x34
72 #define BRIDGE_CIS_STRUCT_REG           0x100
73 #define BRIDGE_BOARDREV_REG             0x124
74
75 /*
76  * BAR1 FPGA Register Definitions
77  */
78
79 #define FPGA_IFACE_REV_REG              0x0
80 #define FPGA_RX_BLOCK_SIZE_REG          0x8
81 #define FPGA_TX_BLOCK_SIZE_REG          0xC
82 #define FPGA_RX_BLOCK_COUNT_REG         0x10
83 #define FPGA_TX_BLOCK_COUNT_REG         0x14
84 #define FPGA_RX_CURR_DMA_BLOCK_REG      0x18
85 #define FPGA_TX_CURR_DMA_BLOCK_REG      0x1C
86 #define FPGA_RX_GROUP_COUNT_REG         0x20
87 #define FPGA_TX_GROUP_COUNT_REG         0x24
88 #define FPGA_RX_CURR_GROUP_REG          0x28
89 #define FPGA_TX_CURR_GROUP_REG          0x2C
90 #define FPGA_RX_CURR_PCI_REG            0x38
91 #define FPGA_TX_CURR_PCI_REG            0x3C
92 #define FPGA_RX_GROUP0_START_REG        0x40
93 #define FPGA_TX_GROUP0_START_REG        0xC0
94 #define FPGA_DMA_DESC_1_REG             0x140
95 #define FPGA_DMA_DESC_2_REG             0x144
96 #define FPGA_DMA_DESC_3_REG             0x148
97 #define FPGA_DMA_DESC_4_REG             0x14C
98
99 #define FPGA_DMA_INT_STAT_REG           0x150
100 #define FPGA_DMA_INT_MASK_REG           0x154
101 #define FPGA_DMA_INT_RX         (1 << 0)
102 #define FPGA_DMA_INT_TX         (1 << 1)
103
104 #define FPGA_RX_GROUPS_PER_INT_REG      0x158
105 #define FPGA_TX_GROUPS_PER_INT_REG      0x15C
106 #define FPGA_DMA_ADR_PAGE_REG           0x160
107 #define FPGA_FPGA_REV_REG               0x200
108
109 #define FPGA_ADC_CLOCK_CTL_REG          0x204
110 #define FPGA_ADC_CLOCK_CTL_OSC_EN       (0x1 << 3)
111 #define FPGA_ADC_CLOCK_LOCAL_CLK        (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
112 #define FPGA_ADC_CLOCK_EXT_SAMP_CLK     0X0
113
114 #define FPGA_ADC_DAC_EN_REG             0x208
115 #define FPGA_ADC_DAC_EN_DAC_OFF         (0x1 << 1)
116 #define FPGA_ADC_DAC_EN_ADC_OFF         (0x1 << 0)
117
118 #define FPGA_INT_STAT_REG               0x20C
119 #define FPGA_INT_MASK_REG               0x210
120 #define FPGA_INT_PLL_UNLOCKED           (0x1 << 9)
121 #define FPGA_INT_DMA_CORE               (0x1 << 8)
122 #define FPGA_INT_TX_FF_EMPTY            (0x1 << 7)
123 #define FPGA_INT_RX_FF_EMPTY            (0x1 << 6)
124 #define FPGA_INT_TX_FF_OVRFLW           (0x1 << 3)
125 #define FPGA_INT_RX_FF_OVRFLW           (0x1 << 2)
126 #define FPGA_INT_TX_ACQ_DONE            (0x1 << 1)
127 #define FPGA_INT_RX_ACQ_DONE            (0x1)
128
129 #define FPGA_RX_ADC_CTL_REG             0x214
130 #define FPGA_RX_ADC_CTL_CONT_CAP        (0x0)
131 #define FPGA_RX_ADC_CTL_SNAP_CAP        (0x1)
132
133 #define FPGA_RX_ARM_REG                 0x21C
134
135 #define FPGA_DOM_REG                    0x224
136 #define FPGA_DOM_DCM_RESET              (0x1 << 5)
137 #define FPGA_DOM_SOFT_RESET             (0x1 << 4)
138 #define FPGA_DOM_DUAL_M_SG_DMA          (0x0)
139 #define FPGA_DOM_TARGET_ACCESS          (0x1)
140
141 #define FPGA_TX_CTL_REG                 0x228
142 #define FPGA_TX_CTL_FIFO_FLUSH          (0x1 << 9)
143 #define FPGA_TX_CTL_OUTPUT_ZERO         (0x0 << 2)
144 #define FPGA_TX_CTL_OUTPUT_CARDBUS      (0x1 << 2)
145 #define FPGA_TX_CTL_OUTPUT_ADC          (0x2 << 2)
146 #define FPGA_TX_CTL_OUTPUT_SNAPSHOT     (0x3 << 2)
147 #define FPGA_TX_CTL_LOOPBACK            (0x1 << 0)
148
149 #define FPGA_ENDIAN_MODE_REG            0x22C
150 #define FPGA_RX_FIFO_COUNT_REG          0x28C
151 #define FPGA_TX_ENABLE_REG              0x298
152 #define FPGA_TX_TRIGGER_REG             0x29C
153 #define FPGA_TX_DATAMEM_COUNT_REG       0x2A8
154 #define FPGA_CAP_FIFO_REG               0x300
155 #define FPGA_TX_SNAPSHOT_REG            0x8000
156
157 /*
158  * Channel Index Definitions
159  */
160
161 enum {
162         CHNO_RX_CHANNEL,
163         CHNO_TX_CHANNEL,
164 };
165
166 struct poch_dev;
167
168 enum channel_dir {
169         CHANNEL_DIR_RX,
170         CHANNEL_DIR_TX,
171 };
172
173 struct poch_group_info {
174         struct page *pg;
175         dma_addr_t dma_addr;
176         unsigned long user_offset;
177 };
178
179 struct channel_info {
180         unsigned int chno;
181
182         atomic_t sys_block_size;
183         atomic_t sys_group_size;
184         atomic_t sys_group_count;
185
186         enum channel_dir dir;
187
188         unsigned long block_size;
189         unsigned long group_size;
190         unsigned long group_count;
191
192         /* Contains the DMA address and VM offset of each group. */
193         struct poch_group_info *groups;
194
195         /* Contains the header and circular buffer exported to userspace. */
196         spinlock_t group_offsets_lock;
197         struct poch_cbuf_header *header;
198         struct page *header_pg;
199         unsigned long header_size;
200
201         /* Last group indicated as 'complete' to user space. */
202         unsigned int transfer;
203
204         wait_queue_head_t wq;
205
206         union {
207                 unsigned int data_available;
208                 unsigned int space_available;
209         };
210
211         void __iomem *bridge_iomem;
212         void __iomem *fpga_iomem;
213         spinlock_t *iomem_lock;
214
215         atomic_t free;
216         atomic_t inited;
217
218         /* Error counters */
219         struct poch_counters counters;
220         spinlock_t counters_lock;
221
222         struct device *dev;
223 };
224
225 struct poch_dev {
226         struct uio_info uio;
227         struct pci_dev *pci_dev;
228         unsigned int nchannels;
229         struct channel_info channels[POCH_NCHANNELS];
230         struct cdev cdev;
231
232         /* Counts the no. of channels that have been opened. On first
233          * open, the card is powered on. On last channel close, the
234          * card is powered off.
235          */
236         atomic_t usage;
237
238         void __iomem *bridge_iomem;
239         void __iomem *fpga_iomem;
240         spinlock_t iomem_lock;
241
242         struct device *dev;
243 };
244
245 static dev_t poch_first_dev;
246 static struct class *poch_cls;
247 static DEFINE_IDR(poch_ids);
248
249 static ssize_t store_block_size(struct device *dev,
250                                 struct device_attribute *attr,
251                                 const char *buf, size_t count)
252 {
253         struct channel_info *channel = dev_get_drvdata(dev);
254         unsigned long block_size;
255
256         sscanf(buf, "%lu", &block_size);
257         atomic_set(&channel->sys_block_size, block_size);
258
259         return count;
260 }
261 static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size);
262
263 static ssize_t store_group_size(struct device *dev,
264                                 struct device_attribute *attr,
265                                 const char *buf, size_t count)
266 {
267         struct channel_info *channel = dev_get_drvdata(dev);
268         unsigned long group_size;
269
270         sscanf(buf, "%lu", &group_size);
271         atomic_set(&channel->sys_group_size, group_size);
272
273         return count;
274 }
275 static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size);
276
277 static ssize_t store_group_count(struct device *dev,
278                                 struct device_attribute *attr,
279                                  const char *buf, size_t count)
280 {
281         struct channel_info *channel = dev_get_drvdata(dev);
282         unsigned long group_count;
283
284         sscanf(buf, "%lu", &group_count);
285         atomic_set(&channel->sys_group_count, group_count);
286
287         return count;
288 }
289 static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count);
290
291 static ssize_t show_direction(struct device *dev,
292                               struct device_attribute *attr, char *buf)
293 {
294         struct channel_info *channel = dev_get_drvdata(dev);
295         int len;
296
297         len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx"));
298         return len;
299 }
300 static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL);
301
302 static ssize_t show_mmap_size(struct device *dev,
303                               struct device_attribute *attr, char *buf)
304 {
305         struct channel_info *channel = dev_get_drvdata(dev);
306         int len;
307         unsigned long mmap_size;
308         unsigned long group_pages;
309         unsigned long header_pages;
310         unsigned long total_group_pages;
311
312         /* FIXME: We do not have to add 1, if group_size a multiple of
313            PAGE_SIZE. */
314         group_pages = (channel->group_size / PAGE_SIZE) + 1;
315         header_pages = (channel->header_size / PAGE_SIZE) + 1;
316         total_group_pages = group_pages * channel->group_count;
317
318         mmap_size = (header_pages + total_group_pages) * PAGE_SIZE;
319         len = sprintf(buf, "%lu\n", mmap_size);
320         return len;
321 }
322 static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL);
323
324 static struct device_attribute *poch_class_attrs[] = {
325         &dev_attr_block_size,
326         &dev_attr_group_size,
327         &dev_attr_group_count,
328         &dev_attr_dir,
329         &dev_attr_mmap_size,
330 };
331
332 static void poch_channel_free_groups(struct channel_info *channel)
333 {
334         unsigned long i;
335
336         for (i = 0; i < channel->group_count; i++) {
337                 struct poch_group_info *group;
338                 unsigned int order;
339
340                 group = &channel->groups[i];
341                 order = get_order(channel->group_size);
342                 if (group->pg)
343                         __free_pages(group->pg, order);
344         }
345 }
346
347 static int poch_channel_alloc_groups(struct channel_info *channel)
348 {
349         unsigned long i;
350         unsigned long group_pages;
351         unsigned long header_pages;
352
353         group_pages = (channel->group_size / PAGE_SIZE) + 1;
354         header_pages = (channel->header_size / PAGE_SIZE) + 1;
355
356         for (i = 0; i < channel->group_count; i++) {
357                 struct poch_group_info *group;
358                 unsigned int order;
359                 gfp_t gfp_mask;
360
361                 group = &channel->groups[i];
362                 order = get_order(channel->group_size);
363
364                 /*
365                  * __GFP_COMP is required here since we are going to
366                  * perform non-linear mapping to userspace. For more
367                  * information read the vm_insert_page() function
368                  * comments.
369                  */
370
371                 gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
372                 group->pg = alloc_pages(gfp_mask, order);
373                 if (!group->pg) {
374                         poch_channel_free_groups(channel);
375                         return -ENOMEM;
376                 }
377
378                 /* FIXME: This is the physical address not the bus
379                  * address!  This won't work in architectures that
380                  * have an IOMMU. Can we use pci_map_single() for
381                  * this?
382                  */
383                 group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
384                 group->user_offset =
385                         (header_pages + (i * group_pages)) * PAGE_SIZE;
386
387                 printk(KERN_INFO PFX "%ld: user_offset: 0x%lx dma: 0x%x\n", i,
388                        group->user_offset, group->dma_addr);
389         }
390
391         return 0;
392 }
393
394 static void channel_latch_attr(struct channel_info *channel)
395 {
396         channel->group_count = atomic_read(&channel->sys_group_count);
397         channel->group_size = atomic_read(&channel->sys_group_size);
398         channel->block_size = atomic_read(&channel->sys_block_size);
399 }
400
401 /*
402  * Configure DMA group registers
403  */
404 static void channel_dma_init(struct channel_info *channel)
405 {
406         void __iomem *fpga = channel->fpga_iomem;
407         u32 group_regs_base;
408         u32 group_reg;
409         unsigned int page;
410         unsigned int group_in_page;
411         unsigned long i;
412         u32 block_size_reg;
413         u32 block_count_reg;
414         u32 group_count_reg;
415         u32 groups_per_int_reg;
416         u32 curr_pci_reg;
417
418         if (channel->chno == CHNO_RX_CHANNEL) {
419                 group_regs_base = FPGA_RX_GROUP0_START_REG;
420                 block_size_reg = FPGA_RX_BLOCK_SIZE_REG;
421                 block_count_reg = FPGA_RX_BLOCK_COUNT_REG;
422                 group_count_reg = FPGA_RX_GROUP_COUNT_REG;
423                 groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG;
424                 curr_pci_reg = FPGA_RX_CURR_PCI_REG;
425         } else {
426                 group_regs_base = FPGA_TX_GROUP0_START_REG;
427                 block_size_reg = FPGA_TX_BLOCK_SIZE_REG;
428                 block_count_reg = FPGA_TX_BLOCK_COUNT_REG;
429                 group_count_reg = FPGA_TX_GROUP_COUNT_REG;
430                 groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG;
431                 curr_pci_reg = FPGA_TX_CURR_PCI_REG;
432         }
433
434         printk(KERN_WARNING "block_size, group_size, group_count\n");
435         iowrite32(channel->block_size, fpga + block_size_reg);
436         iowrite32(channel->group_size / channel->block_size,
437                   fpga + block_count_reg);
438         iowrite32(channel->group_count, fpga + group_count_reg);
439         /* FIXME: Hardcoded groups per int. Get it from sysfs? */
440         iowrite32(1, fpga + groups_per_int_reg);
441
442         /* Unlock PCI address? Not defined in the data sheet, but used
443          * in the reference code by Redrapids.
444          */
445         iowrite32(0x1, fpga + curr_pci_reg);
446
447         /* The DMA address page register is shared between the RX and
448          * TX channels, so acquire lock.
449          */
450         spin_lock(channel->iomem_lock);
451         for (i = 0; i < channel->group_count; i++) {
452                 page = i / 32;
453                 group_in_page = i % 32;
454
455                 group_reg = group_regs_base + (group_in_page * 4);
456
457                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
458                 iowrite32(channel->groups[i].dma_addr, fpga + group_reg);
459         }
460         for (i = 0; i < channel->group_count; i++) {
461                 page = i / 32;
462                 group_in_page = i % 32;
463
464                 group_reg = group_regs_base + (group_in_page * 4);
465
466                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
467                 printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i,
468                        ioread32(fpga + group_reg));
469         }
470         spin_unlock(channel->iomem_lock);
471
472 }
473
474 static int poch_channel_alloc_header(struct channel_info *channel)
475 {
476         struct poch_cbuf_header *header = channel->header;
477         unsigned long group_offset_size;
478         unsigned long tot_group_offsets_size;
479
480         /* Allocate memory to hold header exported userspace */
481         group_offset_size = sizeof(header->group_offsets[0]);
482         tot_group_offsets_size = group_offset_size * channel->group_count;
483         channel->header_size = sizeof(*header) + tot_group_offsets_size;
484         channel->header_pg = alloc_pages(GFP_KERNEL | __GFP_ZERO,
485                                          get_order(channel->header_size));
486         if (!channel->header_pg)
487                 return -ENOMEM;
488
489         channel->header = page_address(channel->header_pg);
490
491         return 0;
492 }
493
494 static void poch_channel_free_header(struct channel_info *channel)
495 {
496         unsigned int order;
497
498         order = get_order(channel->header_size);
499         __free_pages(channel->header_pg, order);
500 }
501
502 static void poch_channel_init_header(struct channel_info *channel)
503 {
504         int i;
505         struct poch_group_info *groups;
506         s32 *group_offsets;
507
508         channel->header->group_size_bytes = channel->group_size;
509         channel->header->group_count = channel->group_count;
510
511         spin_lock_init(&channel->group_offsets_lock);
512
513         group_offsets = channel->header->group_offsets;
514         groups = channel->groups;
515
516         for (i = 0; i < channel->group_count; i++) {
517                 if (channel->dir == CHANNEL_DIR_RX)
518                         group_offsets[i] = -1;
519                 else
520                         group_offsets[i] = groups[i].user_offset;
521         }
522 }
523
524 static void __poch_channel_clear_counters(struct channel_info *channel)
525 {
526         channel->counters.pll_unlock = 0;
527         channel->counters.fifo_empty = 0;
528         channel->counters.fifo_overflow = 0;
529 }
530
531 static int poch_channel_init(struct channel_info *channel,
532                              struct poch_dev *poch_dev)
533 {
534         struct pci_dev *pdev = poch_dev->pci_dev;
535         struct device *dev = &pdev->dev;
536         unsigned long alloc_size;
537         int ret;
538
539         printk(KERN_WARNING "channel_latch_attr\n");
540
541         channel_latch_attr(channel);
542
543         channel->transfer = 0;
544
545         /* Allocate memory to hold group information. */
546         alloc_size = channel->group_count * sizeof(struct poch_group_info);
547         channel->groups = kzalloc(alloc_size, GFP_KERNEL);
548         if (!channel->groups) {
549                 dev_err(dev, "error allocating memory for group info\n");
550                 ret = -ENOMEM;
551                 goto out;
552         }
553
554         printk(KERN_WARNING "poch_channel_alloc_groups\n");
555
556         ret = poch_channel_alloc_groups(channel);
557         if (ret) {
558                 dev_err(dev, "error allocating groups of order %d\n",
559                         get_order(channel->group_size));
560                 goto out_free_group_info;
561         }
562
563         ret = poch_channel_alloc_header(channel);
564         if (ret) {
565                 dev_err(dev, "error allocating user space header\n");
566                 goto out_free_groups;
567         }
568
569         channel->fpga_iomem = poch_dev->fpga_iomem;
570         channel->bridge_iomem = poch_dev->bridge_iomem;
571         channel->iomem_lock = &poch_dev->iomem_lock;
572         spin_lock_init(&channel->counters_lock);
573
574         __poch_channel_clear_counters(channel);
575
576         printk(KERN_WARNING "poch_channel_init_header\n");
577
578         poch_channel_init_header(channel);
579
580         return 0;
581
582  out_free_groups:
583         poch_channel_free_groups(channel);
584  out_free_group_info:
585         kfree(channel->groups);
586  out:
587         return ret;
588 }
589
590 static int poch_wait_fpga_prog(void __iomem *bridge)
591 {
592         unsigned long total_wait;
593         const unsigned long wait_period = 100;
594         /* FIXME: Get the actual timeout */
595         const unsigned long prog_timeo = 10000; /* 10 Seconds */
596         u32 card_power;
597
598         printk(KERN_WARNING "poch_wait_fpg_prog\n");
599
600         printk(KERN_INFO PFX "programming fpga ...\n");
601         total_wait = 0;
602         while (1) {
603                 msleep(wait_period);
604                 total_wait += wait_period;
605
606                 card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
607                 if (card_power & BRIDGE_CARD_POWER_PROG_DONE) {
608                         printk(KERN_INFO PFX "programming done\n");
609                         return 0;
610                 }
611                 if (total_wait > prog_timeo) {
612                         printk(KERN_ERR PFX
613                                "timed out while programming FPGA\n");
614                         return -EIO;
615                 }
616         }
617 }
618
619 static void poch_card_power_off(struct poch_dev *poch_dev)
620 {
621         void __iomem *bridge = poch_dev->bridge_iomem;
622         u32 card_power;
623
624         iowrite32(0, bridge + BRIDGE_INT_MASK_REG);
625         iowrite32(0, bridge + BRIDGE_DMA_GO_REG);
626
627         card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
628         iowrite32(card_power & ~BRIDGE_CARD_POWER_EN,
629                   bridge + BRIDGE_CARD_POWER_REG);
630 }
631
632 enum clk_src {
633         CLK_SRC_ON_BOARD,
634         CLK_SRC_EXTERNAL
635 };
636
637 static void poch_card_clock_on(void __iomem *fpga)
638 {
639         /* FIXME: Get this data through sysfs? */
640         enum clk_src clk_src = CLK_SRC_ON_BOARD;
641
642         if (clk_src == CLK_SRC_ON_BOARD) {
643                 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN,
644                           fpga + FPGA_ADC_CLOCK_CTL_REG);
645         } else if (clk_src == CLK_SRC_EXTERNAL) {
646                 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK,
647                           fpga + FPGA_ADC_CLOCK_CTL_REG);
648         }
649 }
650
651 static int poch_card_power_on(struct poch_dev *poch_dev)
652 {
653         void __iomem *bridge = poch_dev->bridge_iomem;
654         void __iomem *fpga = poch_dev->fpga_iomem;
655
656         iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG);
657
658         if (poch_wait_fpga_prog(bridge) != 0) {
659                 poch_card_power_off(poch_dev);
660                 return -EIO;
661         }
662
663         poch_card_clock_on(fpga);
664
665         /* Sync to new clock, reset state machines, set DMA mode. */
666         iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET
667                   | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG);
668
669         /* FIXME: The time required for sync. needs to be tuned. */
670         msleep(1000);
671
672         return 0;
673 }
674
675 static void poch_channel_analog_on(struct channel_info *channel)
676 {
677         void __iomem *fpga = channel->fpga_iomem;
678         u32 adc_dac_en;
679
680         spin_lock(channel->iomem_lock);
681         adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG);
682         switch (channel->chno) {
683         case CHNO_RX_CHANNEL:
684                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF,
685                           fpga + FPGA_ADC_DAC_EN_REG);
686                 break;
687         case CHNO_TX_CHANNEL:
688                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF,
689                           fpga + FPGA_ADC_DAC_EN_REG);
690                 break;
691         }
692         spin_unlock(channel->iomem_lock);
693 }
694
695 static int poch_open(struct inode *inode, struct file *filp)
696 {
697         struct poch_dev *poch_dev;
698         struct channel_info *channel;
699         void __iomem *bridge;
700         void __iomem *fpga;
701         int chno;
702         int usage;
703         int ret;
704
705         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
706         bridge = poch_dev->bridge_iomem;
707         fpga = poch_dev->fpga_iomem;
708
709         chno = iminor(inode) % poch_dev->nchannels;
710         channel = &poch_dev->channels[chno];
711
712         if (!atomic_dec_and_test(&channel->free)) {
713                 atomic_inc(&channel->free);
714                 ret = -EBUSY;
715                 goto out;
716         }
717
718         usage = atomic_inc_return(&poch_dev->usage);
719
720         printk(KERN_WARNING "poch_card_power_on\n");
721
722         if (usage == 1) {
723                 ret = poch_card_power_on(poch_dev);
724                 if (ret)
725                         goto out_dec_usage;
726         }
727
728         printk(KERN_INFO "CardBus Bridge Revision: %x\n",
729                ioread32(bridge + BRIDGE_REV_REG));
730         printk(KERN_INFO "CardBus Interface Revision: %x\n",
731                ioread32(fpga + FPGA_IFACE_REV_REG));
732
733         channel->chno = chno;
734         filp->private_data = channel;
735
736         printk(KERN_WARNING "poch_channel_init\n");
737
738         ret = poch_channel_init(channel, poch_dev);
739         if (ret)
740                 goto out_power_off;
741
742         poch_channel_analog_on(channel);
743
744         printk(KERN_WARNING "channel_dma_init\n");
745
746         channel_dma_init(channel);
747
748         printk(KERN_WARNING "poch_channel_analog_on\n");
749
750         if (usage == 1) {
751                 printk(KERN_WARNING "setting up DMA\n");
752
753                 /* Initialize DMA Controller. */
754                 iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG);
755                 iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG);
756
757                 ioread32(fpga + FPGA_DMA_INT_STAT_REG);
758                 ioread32(fpga + FPGA_INT_STAT_REG);
759                 ioread32(bridge + BRIDGE_INT_STAT_REG);
760
761                 /* Initialize Interrupts. FIXME: Enable temperature
762                  * handling We are enabling both Tx and Rx channel
763                  * interrupts here. Do we need to enable interrupts
764                  * only for the current channel? Anyways we won't get
765                  * the interrupt unless the DMA is activated.
766                  */
767                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
768                 iowrite32(FPGA_INT_DMA_CORE
769                           | FPGA_INT_PLL_UNLOCKED
770                           | FPGA_INT_TX_FF_EMPTY
771                           | FPGA_INT_RX_FF_EMPTY
772                           | FPGA_INT_TX_FF_OVRFLW
773                           | FPGA_INT_RX_FF_OVRFLW,
774                           fpga + FPGA_INT_MASK_REG);
775                 iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX,
776                           fpga + FPGA_DMA_INT_MASK_REG);
777         }
778
779         if (channel->dir == CHANNEL_DIR_TX) {
780                 /* Flush TX FIFO and output data from cardbus. */
781                 iowrite32(FPGA_TX_CTL_FIFO_FLUSH
782                           | FPGA_TX_CTL_OUTPUT_CARDBUS,
783                           fpga + FPGA_TX_CTL_REG);
784         }
785
786         atomic_inc(&channel->inited);
787
788         return 0;
789
790  out_power_off:
791         if (usage == 1)
792                 poch_card_power_off(poch_dev);
793  out_dec_usage:
794         atomic_dec(&poch_dev->usage);
795         atomic_inc(&channel->free);
796  out:
797         return ret;
798 }
799
800 static int poch_release(struct inode *inode, struct file *filp)
801 {
802         struct channel_info *channel = filp->private_data;
803         struct poch_dev *poch_dev;
804         int usage;
805
806         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
807
808         usage = atomic_dec_return(&poch_dev->usage);
809         if (usage == 0) {
810                 printk(KERN_WARNING "poch_card_power_off\n");
811                 poch_card_power_off(poch_dev);
812         }
813
814         atomic_dec(&channel->inited);
815         poch_channel_free_header(channel);
816         poch_channel_free_groups(channel);
817         kfree(channel->groups);
818         atomic_inc(&channel->free);
819
820         return 0;
821 }
822
823 /*
824  * Map the header and the group buffers, to user space.
825  */
826 static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
827 {
828         struct channel_info *channel = filp->private_data;
829
830         unsigned long start;
831         unsigned long size;
832
833         unsigned long group_pages;
834         unsigned long header_pages;
835         unsigned long total_group_pages;
836
837         int pg_num;
838         struct page *pg;
839
840         int i;
841         int ret;
842
843         printk(KERN_WARNING "poch_mmap\n");
844
845         if (vma->vm_pgoff) {
846                 printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff);
847                 return -EINVAL;
848         }
849
850         group_pages = (channel->group_size / PAGE_SIZE) + 1;
851         header_pages = (channel->header_size / PAGE_SIZE) + 1;
852         total_group_pages = group_pages * channel->group_count;
853
854         size = vma->vm_end - vma->vm_start;
855         if (size != (header_pages + total_group_pages) * PAGE_SIZE) {
856                 printk(KERN_WARNING PFX "required %lu bytes\n", size);
857                 return -EINVAL;
858         }
859
860         start = vma->vm_start;
861
862         /* FIXME: Cleanup required on failure? */
863         pg = channel->header_pg;
864         for (pg_num = 0; pg_num < header_pages; pg_num++, pg++) {
865                 printk(KERN_DEBUG PFX "page_count: %d\n", page_count(pg));
866                 printk(KERN_DEBUG PFX "%d: header: 0x%lx\n", pg_num, start);
867                 ret = vm_insert_page(vma, start, pg);
868                 if (ret) {
869                         printk(KERN_DEBUG "vm_insert 1 failed at %lx\n", start);
870                         return ret;
871                 }
872                 start += PAGE_SIZE;
873         }
874
875         for (i = 0; i < channel->group_count; i++) {
876                 pg = channel->groups[i].pg;
877                 for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
878                         printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n",
879                                pg_num, i, start);
880                         ret = vm_insert_page(vma, start, pg);
881                         if (ret) {
882                                 printk(KERN_DEBUG PFX
883                                        "vm_insert 2 failed at %d\n", pg_num);
884                                 return ret;
885                         }
886                         start += PAGE_SIZE;
887                 }
888         }
889
890         return 0;
891 }
892
893 /*
894  * Check whether there is some group that the user space has not
895  * consumed yet. When the user space consumes a group, it sets it to
896  * -1. Cosuming could be reading data in case of RX and filling a
897  * buffer in case of TX.
898  */
899 static int poch_channel_available(struct channel_info *channel)
900 {
901         int i;
902
903         spin_lock_irq(&channel->group_offsets_lock);
904
905         for (i = 0; i < channel->group_count; i++) {
906                 if (channel->dir == CHANNEL_DIR_RX
907                     && channel->header->group_offsets[i] == -1) {
908                         spin_unlock_irq(&channel->group_offsets_lock);
909                         return 1;
910                 }
911
912                 if (channel->dir == CHANNEL_DIR_TX
913                     && channel->header->group_offsets[i] != -1) {
914                         spin_unlock_irq(&channel->group_offsets_lock);
915                         return 1;
916                 }
917         }
918
919         spin_unlock_irq(&channel->group_offsets_lock);
920
921         return 0;
922 }
923
924 static unsigned int poch_poll(struct file *filp, poll_table *pt)
925 {
926         struct channel_info *channel = filp->private_data;
927         unsigned int ret = 0;
928
929         poll_wait(filp, &channel->wq, pt);
930
931         if (poch_channel_available(channel)) {
932                 if (channel->dir == CHANNEL_DIR_RX)
933                         ret = POLLIN | POLLRDNORM;
934                 else
935                         ret = POLLOUT | POLLWRNORM;
936         }
937
938         return ret;
939 }
940
941 static int poch_ioctl(struct inode *inode, struct file *filp,
942                       unsigned int cmd, unsigned long arg)
943 {
944         struct channel_info *channel = filp->private_data;
945         void __iomem *fpga = channel->fpga_iomem;
946         void __iomem *bridge = channel->bridge_iomem;
947         void __user *argp = (void __user *)arg;
948         struct vm_area_struct *vms;
949         struct poch_counters counters;
950         int ret;
951
952         switch (cmd) {
953         case POCH_IOC_TRANSFER_START:
954                 switch (channel->chno) {
955                 case CHNO_TX_CHANNEL:
956                         printk(KERN_INFO PFX "ioctl: Tx start\n");
957                         iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG);
958                         iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG);
959
960                         /* FIXME: Does it make sense to do a DMA GO
961                          * twice, once in Tx and once in Rx.
962                          */
963                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
964                         break;
965                 case CHNO_RX_CHANNEL:
966                         printk(KERN_INFO PFX "ioctl: Rx start\n");
967                         iowrite32(0x1, fpga + FPGA_RX_ARM_REG);
968                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
969                         break;
970                 }
971                 break;
972         case POCH_IOC_TRANSFER_STOP:
973                 switch (channel->chno) {
974                 case CHNO_TX_CHANNEL:
975                         printk(KERN_INFO PFX "ioctl: Tx stop\n");
976                         iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG);
977                         iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG);
978                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
979                         break;
980                 case CHNO_RX_CHANNEL:
981                         printk(KERN_INFO PFX "ioctl: Rx stop\n");
982                         iowrite32(0x0, fpga + FPGA_RX_ARM_REG);
983                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
984                         break;
985                 }
986                 break;
987         case POCH_IOC_GET_COUNTERS:
988                 if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
989                         return -EFAULT;
990
991                 spin_lock_irq(&channel->counters_lock);
992                 counters = channel->counters;
993                 __poch_channel_clear_counters(channel);
994                 spin_unlock_irq(&channel->counters_lock);
995
996                 ret = copy_to_user(argp, &counters,
997                                    sizeof(struct poch_counters));
998                 if (ret)
999                         return ret;
1000
1001                 break;
1002         case POCH_IOC_SYNC_GROUP_FOR_USER:
1003         case POCH_IOC_SYNC_GROUP_FOR_DEVICE:
1004                 vms = find_vma(current->mm, arg);
1005                 if (!vms)
1006                         /* Address not mapped. */
1007                         return -EINVAL;
1008                 if (vms->vm_file != filp)
1009                         /* Address mapped from different device/file. */
1010                         return -EINVAL;
1011
1012                 flush_cache_range(vms, arg, arg + channel->group_size);
1013                 break;
1014         }
1015         return 0;
1016 }
1017
1018 static struct file_operations poch_fops = {
1019         .owner = THIS_MODULE,
1020         .open = poch_open,
1021         .release = poch_release,
1022         .ioctl = poch_ioctl,
1023         .poll = poch_poll,
1024         .mmap = poch_mmap
1025 };
1026
1027 static void poch_irq_dma(struct channel_info *channel)
1028 {
1029         u32 prev_transfer;
1030         u32 curr_transfer;
1031         long groups_done;
1032         unsigned long i, j;
1033         struct poch_group_info *groups;
1034         s32 *group_offsets;
1035         u32 curr_group_reg;
1036
1037         if (!atomic_read(&channel->inited))
1038                 return;
1039
1040         prev_transfer = channel->transfer;
1041
1042         if (channel->chno == CHNO_RX_CHANNEL)
1043                 curr_group_reg = FPGA_RX_CURR_GROUP_REG;
1044         else
1045                 curr_group_reg = FPGA_TX_CURR_GROUP_REG;
1046
1047         curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg);
1048
1049         groups_done = curr_transfer - prev_transfer;
1050         /* Check wrap over, and handle it. */
1051         if (groups_done <= 0)
1052                 groups_done += channel->group_count;
1053
1054         group_offsets = channel->header->group_offsets;
1055         groups = channel->groups;
1056
1057         spin_lock(&channel->group_offsets_lock);
1058
1059         for (i = 0; i < groups_done; i++) {
1060                 j = (prev_transfer + i) % channel->group_count;
1061                 if (channel->dir == CHANNEL_DIR_RX)
1062                         group_offsets[j] = -1;
1063                 else
1064                         group_offsets[j] = groups[j].user_offset;
1065         }
1066
1067         spin_unlock(&channel->group_offsets_lock);
1068
1069         channel->transfer = curr_transfer;
1070
1071         wake_up_interruptible(&channel->wq);
1072 }
1073
1074 static irqreturn_t poch_irq_handler(int irq, void *p)
1075 {
1076         struct poch_dev *poch_dev = p;
1077         void __iomem *bridge = poch_dev->bridge_iomem;
1078         void __iomem *fpga = poch_dev->fpga_iomem;
1079         struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL];
1080         struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL];
1081         u32 bridge_stat;
1082         u32 fpga_stat;
1083         u32 dma_stat;
1084
1085         bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG);
1086         fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG);
1087         dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1088
1089         ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1090         ioread32(fpga + FPGA_INT_STAT_REG);
1091         ioread32(bridge + BRIDGE_INT_STAT_REG);
1092
1093         if (bridge_stat & BRIDGE_INT_FPGA) {
1094                 if (fpga_stat & FPGA_INT_DMA_CORE) {
1095                         if (dma_stat & FPGA_DMA_INT_RX)
1096                                 poch_irq_dma(channel_rx);
1097                         if (dma_stat & FPGA_DMA_INT_TX)
1098                                 poch_irq_dma(channel_tx);
1099                 }
1100                 if (fpga_stat & FPGA_INT_PLL_UNLOCKED) {
1101                         channel_tx->counters.pll_unlock++;
1102                         channel_rx->counters.pll_unlock++;
1103                         if (printk_ratelimit())
1104                                 printk(KERN_WARNING PFX "PLL unlocked\n");
1105                 }
1106                 if (fpga_stat & FPGA_INT_TX_FF_EMPTY)
1107                         channel_tx->counters.fifo_empty++;
1108                 if (fpga_stat & FPGA_INT_TX_FF_OVRFLW)
1109                         channel_tx->counters.fifo_overflow++;
1110                 if (fpga_stat & FPGA_INT_RX_FF_EMPTY)
1111                         channel_rx->counters.fifo_empty++;
1112                 if (fpga_stat & FPGA_INT_RX_FF_OVRFLW)
1113                         channel_rx->counters.fifo_overflow++;
1114
1115                 /*
1116                  * FIXME: These errors should be notified through the
1117                  * poll interface as POLLERR.
1118                  */
1119
1120                 /* Re-enable interrupts. */
1121                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
1122
1123                 return IRQ_HANDLED;
1124         }
1125
1126         return IRQ_NONE;
1127 }
1128
1129 static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id)
1130 {
1131         int i, j;
1132         int nattrs;
1133         struct channel_info *channel;
1134         dev_t devno;
1135
1136         if (poch_dev->dev == NULL)
1137                 return;
1138
1139         for (i = 0; i < poch_dev->nchannels; i++) {
1140                 channel = &poch_dev->channels[i];
1141                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1142
1143                 if (!channel->dev)
1144                         continue;
1145
1146                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1147                 for (j = 0; j < nattrs; j++)
1148                         device_remove_file(channel->dev, poch_class_attrs[j]);
1149
1150                 device_unregister(channel->dev);
1151         }
1152
1153         device_unregister(poch_dev->dev);
1154 }
1155
1156 static int __devinit poch_class_dev_register(struct poch_dev *poch_dev,
1157                                              int id)
1158 {
1159         struct device *dev = &poch_dev->pci_dev->dev;
1160         int i, j;
1161         int nattrs;
1162         int ret;
1163         struct channel_info *channel;
1164         dev_t devno;
1165
1166         poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev,
1167                                       MKDEV(0, 0), NULL, "poch%d", id);
1168         if (IS_ERR(poch_dev->dev)) {
1169                 dev_err(dev, "error creating parent class device");
1170                 ret = PTR_ERR(poch_dev->dev);
1171                 poch_dev->dev = NULL;
1172                 return ret;
1173         }
1174
1175         for (i = 0; i < poch_dev->nchannels; i++) {
1176                 channel = &poch_dev->channels[i];
1177
1178                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1179                 channel->dev = device_create(poch_cls, poch_dev->dev, devno,
1180                                              NULL, "ch%d", i);
1181                 if (IS_ERR(channel->dev)) {
1182                         dev_err(dev, "error creating channel class device");
1183                         ret = PTR_ERR(channel->dev);
1184                         channel->dev = NULL;
1185                         poch_class_dev_unregister(poch_dev, id);
1186                         return ret;
1187                 }
1188
1189                 dev_set_drvdata(channel->dev, channel);
1190                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1191                 for (j = 0; j < nattrs; j++) {
1192                         ret = device_create_file(channel->dev,
1193                                                  poch_class_attrs[j]);
1194                         if (ret) {
1195                                 dev_err(dev, "error creating attribute file");
1196                                 poch_class_dev_unregister(poch_dev, id);
1197                                 return ret;
1198                         }
1199                 }
1200         }
1201
1202         return 0;
1203 }
1204
1205 static int __devinit poch_pci_probe(struct pci_dev *pdev,
1206                                     const struct pci_device_id *pci_id)
1207 {
1208         struct device *dev = &pdev->dev;
1209         struct poch_dev *poch_dev;
1210         struct uio_info *uio;
1211         int ret;
1212         int id;
1213         int i;
1214
1215         poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL);
1216         if (!poch_dev) {
1217                 dev_err(dev, "error allocating priv. data memory\n");
1218                 return -ENOMEM;
1219         }
1220
1221         poch_dev->pci_dev = pdev;
1222         uio = &poch_dev->uio;
1223
1224         pci_set_drvdata(pdev, poch_dev);
1225
1226         spin_lock_init(&poch_dev->iomem_lock);
1227
1228         poch_dev->nchannels = POCH_NCHANNELS;
1229         poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX;
1230         poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX;
1231
1232         for (i = 0; i < poch_dev->nchannels; i++) {
1233                 init_waitqueue_head(&poch_dev->channels[i].wq);
1234                 atomic_set(&poch_dev->channels[i].free, 1);
1235                 atomic_set(&poch_dev->channels[i].inited, 0);
1236         }
1237
1238         ret = pci_enable_device(pdev);
1239         if (ret) {
1240                 dev_err(dev, "error enabling device\n");
1241                 goto out_free;
1242         }
1243
1244         ret = pci_request_regions(pdev, "poch");
1245         if (ret) {
1246                 dev_err(dev, "error requesting resources\n");
1247                 goto out_disable;
1248         }
1249
1250         uio->mem[0].addr = pci_resource_start(pdev, 1);
1251         if (!uio->mem[0].addr) {
1252                 dev_err(dev, "invalid BAR1\n");
1253                 ret = -ENODEV;
1254                 goto out_release;
1255         }
1256
1257         uio->mem[0].size = pci_resource_len(pdev, 1);
1258         uio->mem[0].memtype = UIO_MEM_PHYS;
1259
1260         uio->name = "poch";
1261         uio->version = "0.0.1";
1262         uio->irq = -1;
1263         ret = uio_register_device(dev, uio);
1264         if (ret) {
1265                 dev_err(dev, "error register UIO device: %d\n", ret);
1266                 goto out_release;
1267         }
1268
1269         poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0),
1270                                          pci_resource_len(pdev, 0));
1271         if (poch_dev->bridge_iomem == NULL) {
1272                 dev_err(dev, "error mapping bridge (bar0) registers\n");
1273                 ret = -ENOMEM;
1274                 goto out_uio_unreg;
1275         }
1276
1277         poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1),
1278                                        pci_resource_len(pdev, 1));
1279         if (poch_dev->fpga_iomem == NULL) {
1280                 dev_err(dev, "error mapping fpga (bar1) registers\n");
1281                 ret = -ENOMEM;
1282                 goto out_bar0_unmap;
1283         }
1284
1285         ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED,
1286                           dev->bus_id, poch_dev);
1287         if (ret) {
1288                 dev_err(dev, "error requesting IRQ %u\n", pdev->irq);
1289                 ret = -ENOMEM;
1290                 goto out_bar1_unmap;
1291         }
1292
1293         if (!idr_pre_get(&poch_ids, GFP_KERNEL)) {
1294                 dev_err(dev, "error allocating memory ids\n");
1295                 ret = -ENOMEM;
1296                 goto out_free_irq;
1297         }
1298
1299         idr_get_new(&poch_ids, poch_dev, &id);
1300         if (id >= MAX_POCH_CARDS) {
1301                 dev_err(dev, "minors exhausted\n");
1302                 ret = -EBUSY;
1303                 goto out_free_irq;
1304         }
1305
1306         cdev_init(&poch_dev->cdev, &poch_fops);
1307         poch_dev->cdev.owner = THIS_MODULE;
1308         ret = cdev_add(&poch_dev->cdev,
1309                        poch_first_dev + (id * poch_dev->nchannels),
1310                        poch_dev->nchannels);
1311         if (ret) {
1312                 dev_err(dev, "error register character device\n");
1313                 goto out_idr_remove;
1314         }
1315
1316         ret = poch_class_dev_register(poch_dev, id);
1317         if (ret)
1318                 goto out_cdev_del;
1319
1320         return 0;
1321
1322  out_cdev_del:
1323         cdev_del(&poch_dev->cdev);
1324  out_idr_remove:
1325         idr_remove(&poch_ids, id);
1326  out_free_irq:
1327         free_irq(pdev->irq, poch_dev);
1328  out_bar1_unmap:
1329         iounmap(poch_dev->fpga_iomem);
1330  out_bar0_unmap:
1331         iounmap(poch_dev->bridge_iomem);
1332  out_uio_unreg:
1333         uio_unregister_device(uio);
1334  out_release:
1335         pci_release_regions(pdev);
1336  out_disable:
1337         pci_disable_device(pdev);
1338  out_free:
1339         kfree(poch_dev);
1340         return ret;
1341 }
1342
1343 /*
1344  * FIXME: We are yet to handle the hot unplug case.
1345  */
1346 static void poch_pci_remove(struct pci_dev *pdev)
1347 {
1348         struct poch_dev *poch_dev = pci_get_drvdata(pdev);
1349         struct uio_info *uio = &poch_dev->uio;
1350         unsigned int minor = MINOR(poch_dev->cdev.dev);
1351         unsigned int id = minor / poch_dev->nchannels;
1352
1353         /* FIXME: unmap fpga_iomem and bridge_iomem */
1354
1355         poch_class_dev_unregister(poch_dev, id);
1356         cdev_del(&poch_dev->cdev);
1357         idr_remove(&poch_ids, id);
1358         free_irq(pdev->irq, poch_dev);
1359         uio_unregister_device(uio);
1360         pci_release_regions(pdev);
1361         pci_disable_device(pdev);
1362         pci_set_drvdata(pdev, NULL);
1363         iounmap(uio->mem[0].internal_addr);
1364
1365         kfree(poch_dev);
1366 }
1367
1368 static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = {
1369         { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS,
1370                      PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) },
1371         { 0, }
1372 };
1373
1374 static struct pci_driver poch_pci_driver = {
1375         .name = DRV_NAME,
1376         .id_table = poch_pci_ids,
1377         .probe = poch_pci_probe,
1378         .remove = poch_pci_remove,
1379 };
1380
1381 static int __init poch_init_module(void)
1382 {
1383         int ret = 0;
1384
1385         ret = alloc_chrdev_region(&poch_first_dev, 0,
1386                                   MAX_POCH_DEVICES, DRV_NAME);
1387         if (ret) {
1388                 printk(KERN_ERR PFX "error allocating device no.");
1389                 return ret;
1390         }
1391
1392         poch_cls = class_create(THIS_MODULE, "pocketchange");
1393         if (IS_ERR(poch_cls)) {
1394                 ret = PTR_ERR(poch_cls);
1395                 goto out_unreg_chrdev;
1396         }
1397
1398         ret = pci_register_driver(&poch_pci_driver);
1399         if (ret) {
1400                 printk(KERN_ERR PFX "error register PCI device");
1401                 goto out_class_destroy;
1402         }
1403
1404         return 0;
1405
1406  out_class_destroy:
1407         class_destroy(poch_cls);
1408
1409  out_unreg_chrdev:
1410         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1411
1412         return ret;
1413 }
1414
1415 static void __exit poch_exit_module(void)
1416 {
1417         pci_unregister_driver(&poch_pci_driver);
1418         class_destroy(poch_cls);
1419         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1420 }
1421
1422 module_init(poch_init_module);
1423 module_exit(poch_exit_module);
1424
1425 MODULE_LICENSE("GPL v2");