Merge branch 'topic/jack' into topic/docbook-fix
[linux-2.6] / drivers / staging / poch / poch.c
1 /*
2  * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
3  *
4  * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
5  *
6  * Licensed under GPL version 2 only.
7  */
8
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/uio_driver.h>
13 #include <linux/spinlock.h>
14 #include <linux/cdev.h>
15 #include <linux/delay.h>
16 #include <linux/sysfs.h>
17 #include <linux/poll.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/init.h>
21 #include <linux/ioctl.h>
22 #include <linux/io.h>
23
24 #include "poch.h"
25
26 #include <asm/cacheflush.h>
27
28 #ifndef PCI_VENDOR_ID_RRAPIDS
29 #define PCI_VENDOR_ID_RRAPIDS 0x17D2
30 #endif
31
32 #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
33 #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
34 #endif
35
36 #define POCH_NCHANNELS 2
37
38 #define MAX_POCH_CARDS 8
39 #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
40
41 #define DRV_NAME "poch"
42 #define PFX      DRV_NAME ": "
43
44 /*
45  * BAR0 Bridge Register Definitions
46  */
47
48 #define BRIDGE_REV_REG                  0x0
49 #define BRIDGE_INT_MASK_REG             0x4
50 #define BRIDGE_INT_STAT_REG             0x8
51
52 #define BRIDGE_INT_ACTIVE               (0x1 << 31)
53 #define BRIDGE_INT_FPGA                 (0x1 << 2)
54 #define BRIDGE_INT_TEMP_FAIL            (0x1 << 1)
55 #define BRIDGE_INT_TEMP_WARN            (0x1 << 0)
56
57 #define BRIDGE_FPGA_RESET_REG           0xC
58
59 #define BRIDGE_CARD_POWER_REG           0x10
60 #define BRIDGE_CARD_POWER_EN            (0x1 << 0)
61 #define BRIDGE_CARD_POWER_PROG_DONE     (0x1 << 31)
62
63 #define BRIDGE_JTAG_REG                 0x14
64 #define BRIDGE_DMA_GO_REG               0x18
65 #define BRIDGE_STAT_0_REG               0x1C
66 #define BRIDGE_STAT_1_REG               0x20
67 #define BRIDGE_STAT_2_REG               0x24
68 #define BRIDGE_STAT_3_REG               0x28
69 #define BRIDGE_TEMP_STAT_REG            0x2C
70 #define BRIDGE_TEMP_THRESH_REG          0x30
71 #define BRIDGE_EEPROM_REVSEL_REG        0x34
72 #define BRIDGE_CIS_STRUCT_REG           0x100
73 #define BRIDGE_BOARDREV_REG             0x124
74
75 /*
76  * BAR1 FPGA Register Definitions
77  */
78
79 #define FPGA_IFACE_REV_REG              0x0
80 #define FPGA_RX_BLOCK_SIZE_REG          0x8
81 #define FPGA_TX_BLOCK_SIZE_REG          0xC
82 #define FPGA_RX_BLOCK_COUNT_REG         0x10
83 #define FPGA_TX_BLOCK_COUNT_REG         0x14
84 #define FPGA_RX_CURR_DMA_BLOCK_REG      0x18
85 #define FPGA_TX_CURR_DMA_BLOCK_REG      0x1C
86 #define FPGA_RX_GROUP_COUNT_REG         0x20
87 #define FPGA_TX_GROUP_COUNT_REG         0x24
88 #define FPGA_RX_CURR_GROUP_REG          0x28
89 #define FPGA_TX_CURR_GROUP_REG          0x2C
90 #define FPGA_RX_CURR_PCI_REG            0x38
91 #define FPGA_TX_CURR_PCI_REG            0x3C
92 #define FPGA_RX_GROUP0_START_REG        0x40
93 #define FPGA_TX_GROUP0_START_REG        0xC0
94 #define FPGA_DMA_DESC_1_REG             0x140
95 #define FPGA_DMA_DESC_2_REG             0x144
96 #define FPGA_DMA_DESC_3_REG             0x148
97 #define FPGA_DMA_DESC_4_REG             0x14C
98
99 #define FPGA_DMA_INT_STAT_REG           0x150
100 #define FPGA_DMA_INT_MASK_REG           0x154
101 #define FPGA_DMA_INT_RX         (1 << 0)
102 #define FPGA_DMA_INT_TX         (1 << 1)
103
104 #define FPGA_RX_GROUPS_PER_INT_REG      0x158
105 #define FPGA_TX_GROUPS_PER_INT_REG      0x15C
106 #define FPGA_DMA_ADR_PAGE_REG           0x160
107 #define FPGA_FPGA_REV_REG               0x200
108
109 #define FPGA_ADC_CLOCK_CTL_REG          0x204
110 #define FPGA_ADC_CLOCK_CTL_OSC_EN       (0x1 << 3)
111 #define FPGA_ADC_CLOCK_LOCAL_CLK        (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
112 #define FPGA_ADC_CLOCK_EXT_SAMP_CLK     0X0
113
114 #define FPGA_ADC_DAC_EN_REG             0x208
115 #define FPGA_ADC_DAC_EN_DAC_OFF         (0x1 << 1)
116 #define FPGA_ADC_DAC_EN_ADC_OFF         (0x1 << 0)
117
118 #define FPGA_INT_STAT_REG               0x20C
119 #define FPGA_INT_MASK_REG               0x210
120 #define FPGA_INT_PLL_UNLOCKED           (0x1 << 9)
121 #define FPGA_INT_DMA_CORE               (0x1 << 8)
122 #define FPGA_INT_TX_FF_EMPTY            (0x1 << 7)
123 #define FPGA_INT_RX_FF_EMPTY            (0x1 << 6)
124 #define FPGA_INT_TX_FF_OVRFLW           (0x1 << 3)
125 #define FPGA_INT_RX_FF_OVRFLW           (0x1 << 2)
126 #define FPGA_INT_TX_ACQ_DONE            (0x1 << 1)
127 #define FPGA_INT_RX_ACQ_DONE            (0x1)
128
129 #define FPGA_RX_CTL_REG                 0x214
130 #define FPGA_RX_CTL_FIFO_FLUSH          (0x1 << 9)
131 #define FPGA_RX_CTL_SYNTH_DATA          (0x1 << 8)
132 #define FPGA_RX_CTL_CONT_CAP            (0x0 << 1)
133 #define FPGA_RX_CTL_SNAP_CAP            (0x1 << 1)
134
135 #define FPGA_RX_ARM_REG                 0x21C
136
137 #define FPGA_DOM_REG                    0x224
138 #define FPGA_DOM_DCM_RESET              (0x1 << 5)
139 #define FPGA_DOM_SOFT_RESET             (0x1 << 4)
140 #define FPGA_DOM_DUAL_M_SG_DMA          (0x0)
141 #define FPGA_DOM_TARGET_ACCESS          (0x1)
142
143 #define FPGA_TX_CTL_REG                 0x228
144 #define FPGA_TX_CTL_FIFO_FLUSH          (0x1 << 9)
145 #define FPGA_TX_CTL_OUTPUT_ZERO         (0x0 << 2)
146 #define FPGA_TX_CTL_OUTPUT_CARDBUS      (0x1 << 2)
147 #define FPGA_TX_CTL_OUTPUT_ADC          (0x2 << 2)
148 #define FPGA_TX_CTL_OUTPUT_SNAPSHOT     (0x3 << 2)
149 #define FPGA_TX_CTL_LOOPBACK            (0x1 << 0)
150
151 #define FPGA_ENDIAN_MODE_REG            0x22C
152 #define FPGA_RX_FIFO_COUNT_REG          0x28C
153 #define FPGA_TX_ENABLE_REG              0x298
154 #define FPGA_TX_TRIGGER_REG             0x29C
155 #define FPGA_TX_DATAMEM_COUNT_REG       0x2A8
156 #define FPGA_CAP_FIFO_REG               0x300
157 #define FPGA_TX_SNAPSHOT_REG            0x8000
158
159 /*
160  * Channel Index Definitions
161  */
162
163 enum {
164         CHNO_RX_CHANNEL,
165         CHNO_TX_CHANNEL,
166 };
167
168 struct poch_dev;
169
170 enum channel_dir {
171         CHANNEL_DIR_RX,
172         CHANNEL_DIR_TX,
173 };
174
175 struct poch_group_info {
176         struct page *pg;
177         dma_addr_t dma_addr;
178         unsigned long user_offset;
179 };
180
181 struct channel_info {
182         unsigned int chno;
183
184         atomic_t sys_block_size;
185         atomic_t sys_group_size;
186         atomic_t sys_group_count;
187
188         enum channel_dir dir;
189
190         unsigned long block_size;
191         unsigned long group_size;
192         unsigned long group_count;
193
194         /* Contains the DMA address and VM offset of each group. */
195         struct poch_group_info *groups;
196
197         /* Contains the header and circular buffer exported to userspace. */
198         spinlock_t group_offsets_lock;
199         struct poch_cbuf_header *header;
200         struct page *header_pg;
201         unsigned long header_size;
202
203         /* Last group indicated as 'complete' to user space. */
204         unsigned int transfer;
205
206         wait_queue_head_t wq;
207
208         union {
209                 unsigned int data_available;
210                 unsigned int space_available;
211         };
212
213         void __iomem *bridge_iomem;
214         void __iomem *fpga_iomem;
215         spinlock_t *iomem_lock;
216
217         atomic_t free;
218         atomic_t inited;
219
220         /* Error counters */
221         struct poch_counters counters;
222         spinlock_t counters_lock;
223
224         struct device *dev;
225 };
226
227 struct poch_dev {
228         struct uio_info uio;
229         struct pci_dev *pci_dev;
230         unsigned int nchannels;
231         struct channel_info channels[POCH_NCHANNELS];
232         struct cdev cdev;
233
234         /* Counts the no. of channels that have been opened. On first
235          * open, the card is powered on. On last channel close, the
236          * card is powered off.
237          */
238         atomic_t usage;
239
240         void __iomem *bridge_iomem;
241         void __iomem *fpga_iomem;
242         spinlock_t iomem_lock;
243
244         struct device *dev;
245 };
246
247 static dev_t poch_first_dev;
248 static struct class *poch_cls;
249 static DEFINE_IDR(poch_ids);
250
251 static ssize_t store_block_size(struct device *dev,
252                                 struct device_attribute *attr,
253                                 const char *buf, size_t count)
254 {
255         struct channel_info *channel = dev_get_drvdata(dev);
256         unsigned long block_size;
257
258         sscanf(buf, "%lu", &block_size);
259         atomic_set(&channel->sys_block_size, block_size);
260
261         return count;
262 }
263 static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size);
264
265 static ssize_t store_group_size(struct device *dev,
266                                 struct device_attribute *attr,
267                                 const char *buf, size_t count)
268 {
269         struct channel_info *channel = dev_get_drvdata(dev);
270         unsigned long group_size;
271
272         sscanf(buf, "%lu", &group_size);
273         atomic_set(&channel->sys_group_size, group_size);
274
275         return count;
276 }
277 static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size);
278
279 static ssize_t store_group_count(struct device *dev,
280                                 struct device_attribute *attr,
281                                  const char *buf, size_t count)
282 {
283         struct channel_info *channel = dev_get_drvdata(dev);
284         unsigned long group_count;
285
286         sscanf(buf, "%lu", &group_count);
287         atomic_set(&channel->sys_group_count, group_count);
288
289         return count;
290 }
291 static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count);
292
293 static ssize_t show_direction(struct device *dev,
294                               struct device_attribute *attr, char *buf)
295 {
296         struct channel_info *channel = dev_get_drvdata(dev);
297         int len;
298
299         len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx"));
300         return len;
301 }
302 static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL);
303
304 static unsigned long npages(unsigned long bytes)
305 {
306         if (bytes % PAGE_SIZE == 0)
307                 return bytes / PAGE_SIZE;
308         else
309                 return (bytes / PAGE_SIZE) + 1;
310 }
311
312 static ssize_t show_mmap_size(struct device *dev,
313                               struct device_attribute *attr, char *buf)
314 {
315         struct channel_info *channel = dev_get_drvdata(dev);
316         int len;
317         unsigned long mmap_size;
318         unsigned long group_pages;
319         unsigned long header_pages;
320         unsigned long total_group_pages;
321
322         group_pages = npages(channel->group_size);
323         header_pages = npages(channel->header_size);
324         total_group_pages = group_pages * channel->group_count;
325
326         mmap_size = (header_pages + total_group_pages) * PAGE_SIZE;
327         len = sprintf(buf, "%lu\n", mmap_size);
328         return len;
329 }
330 static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL);
331
332 static struct device_attribute *poch_class_attrs[] = {
333         &dev_attr_block_size,
334         &dev_attr_group_size,
335         &dev_attr_group_count,
336         &dev_attr_dir,
337         &dev_attr_mmap_size,
338 };
339
340 static void poch_channel_free_groups(struct channel_info *channel)
341 {
342         unsigned long i;
343
344         for (i = 0; i < channel->group_count; i++) {
345                 struct poch_group_info *group;
346                 unsigned int order;
347
348                 group = &channel->groups[i];
349                 order = get_order(channel->group_size);
350                 if (group->pg)
351                         __free_pages(group->pg, order);
352         }
353 }
354
355 static int poch_channel_alloc_groups(struct channel_info *channel)
356 {
357         unsigned long i;
358         unsigned long group_pages;
359         unsigned long header_pages;
360
361         group_pages = npages(channel->group_size);
362         header_pages = npages(channel->header_size);
363
364         for (i = 0; i < channel->group_count; i++) {
365                 struct poch_group_info *group;
366                 unsigned int order;
367                 gfp_t gfp_mask;
368
369                 group = &channel->groups[i];
370                 order = get_order(channel->group_size);
371
372                 /*
373                  * __GFP_COMP is required here since we are going to
374                  * perform non-linear mapping to userspace. For more
375                  * information read the vm_insert_page() function
376                  * comments.
377                  */
378
379                 gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
380                 group->pg = alloc_pages(gfp_mask, order);
381                 if (!group->pg) {
382                         poch_channel_free_groups(channel);
383                         return -ENOMEM;
384                 }
385
386                 /* FIXME: This is the physical address not the bus
387                  * address!  This won't work in architectures that
388                  * have an IOMMU. Can we use pci_map_single() for
389                  * this?
390                  */
391                 group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
392                 group->user_offset =
393                         (header_pages + (i * group_pages)) * PAGE_SIZE;
394
395                 printk(KERN_INFO PFX "%ld: user_offset: 0x%lx\n", i,
396                        group->user_offset);
397         }
398
399         return 0;
400 }
401
402 static int channel_latch_attr(struct channel_info *channel)
403 {
404         channel->group_count = atomic_read(&channel->sys_group_count);
405         channel->group_size = atomic_read(&channel->sys_group_size);
406         channel->block_size = atomic_read(&channel->sys_block_size);
407
408         if (channel->group_count == 0) {
409                 printk(KERN_ERR PFX "invalid group count %lu",
410                        channel->group_count);
411                 return -EINVAL;
412         }
413
414         if (channel->group_size == 0 ||
415             channel->group_size < channel->block_size) {
416                 printk(KERN_ERR PFX "invalid group size %lu",
417                        channel->group_size);
418                 return -EINVAL;
419         }
420
421         if (channel->block_size == 0 || (channel->block_size % 8) != 0) {
422                 printk(KERN_ERR PFX "invalid block size %lu",
423                        channel->block_size);
424                 return -EINVAL;
425         }
426
427         if (channel->group_size % channel->block_size != 0) {
428                 printk(KERN_ERR PFX
429                        "group size should be multiple of block size");
430                 return -EINVAL;
431         }
432
433         return 0;
434 }
435
436 /*
437  * Configure DMA group registers
438  */
439 static void channel_dma_init(struct channel_info *channel)
440 {
441         void __iomem *fpga = channel->fpga_iomem;
442         u32 group_regs_base;
443         u32 group_reg;
444         unsigned int page;
445         unsigned int group_in_page;
446         unsigned long i;
447         u32 block_size_reg;
448         u32 block_count_reg;
449         u32 group_count_reg;
450         u32 groups_per_int_reg;
451         u32 curr_pci_reg;
452
453         if (channel->chno == CHNO_RX_CHANNEL) {
454                 group_regs_base = FPGA_RX_GROUP0_START_REG;
455                 block_size_reg = FPGA_RX_BLOCK_SIZE_REG;
456                 block_count_reg = FPGA_RX_BLOCK_COUNT_REG;
457                 group_count_reg = FPGA_RX_GROUP_COUNT_REG;
458                 groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG;
459                 curr_pci_reg = FPGA_RX_CURR_PCI_REG;
460         } else {
461                 group_regs_base = FPGA_TX_GROUP0_START_REG;
462                 block_size_reg = FPGA_TX_BLOCK_SIZE_REG;
463                 block_count_reg = FPGA_TX_BLOCK_COUNT_REG;
464                 group_count_reg = FPGA_TX_GROUP_COUNT_REG;
465                 groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG;
466                 curr_pci_reg = FPGA_TX_CURR_PCI_REG;
467         }
468
469         printk(KERN_WARNING "block_size, group_size, group_count\n");
470         /*
471          * Block size is represented in no. of 64 bit transfers.
472          */
473         iowrite32(channel->block_size / 8, fpga + block_size_reg);
474         iowrite32(channel->group_size / channel->block_size,
475                   fpga + block_count_reg);
476         iowrite32(channel->group_count, fpga + group_count_reg);
477         /* FIXME: Hardcoded groups per int. Get it from sysfs? */
478         iowrite32(1, fpga + groups_per_int_reg);
479
480         /* Unlock PCI address? Not defined in the data sheet, but used
481          * in the reference code by Redrapids.
482          */
483         iowrite32(0x1, fpga + curr_pci_reg);
484
485         /* The DMA address page register is shared between the RX and
486          * TX channels, so acquire lock.
487          */
488         for (i = 0; i < channel->group_count; i++) {
489                 page = i / 32;
490                 group_in_page = i % 32;
491
492                 group_reg = group_regs_base + (group_in_page * 4);
493
494                 spin_lock(channel->iomem_lock);
495                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
496                 iowrite32(channel->groups[i].dma_addr, fpga + group_reg);
497                 spin_unlock(channel->iomem_lock);
498         }
499
500         for (i = 0; i < channel->group_count; i++) {
501                 page = i / 32;
502                 group_in_page = i % 32;
503
504                 group_reg = group_regs_base + (group_in_page * 4);
505
506                 spin_lock(channel->iomem_lock);
507                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
508                 printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i,
509                        ioread32(fpga + group_reg));
510                 spin_unlock(channel->iomem_lock);
511         }
512
513 }
514
515 static int poch_channel_alloc_header(struct channel_info *channel)
516 {
517         struct poch_cbuf_header *header = channel->header;
518         unsigned long group_offset_size;
519         unsigned long tot_group_offsets_size;
520
521         /* Allocate memory to hold header exported userspace */
522         group_offset_size = sizeof(header->group_offsets[0]);
523         tot_group_offsets_size = group_offset_size * channel->group_count;
524         channel->header_size = sizeof(*header) + tot_group_offsets_size;
525         channel->header_pg = alloc_pages(GFP_KERNEL | __GFP_ZERO,
526                                          get_order(channel->header_size));
527         if (!channel->header_pg)
528                 return -ENOMEM;
529
530         channel->header = page_address(channel->header_pg);
531
532         return 0;
533 }
534
535 static void poch_channel_free_header(struct channel_info *channel)
536 {
537         unsigned int order;
538
539         order = get_order(channel->header_size);
540         __free_pages(channel->header_pg, order);
541 }
542
543 static void poch_channel_init_header(struct channel_info *channel)
544 {
545         int i;
546         struct poch_group_info *groups;
547         s32 *group_offsets;
548
549         channel->header->group_size_bytes = channel->group_size;
550         channel->header->group_count = channel->group_count;
551
552         spin_lock_init(&channel->group_offsets_lock);
553
554         group_offsets = channel->header->group_offsets;
555         groups = channel->groups;
556
557         for (i = 0; i < channel->group_count; i++) {
558                 if (channel->dir == CHANNEL_DIR_RX)
559                         group_offsets[i] = -1;
560                 else
561                         group_offsets[i] = groups[i].user_offset;
562         }
563 }
564
565 static void __poch_channel_clear_counters(struct channel_info *channel)
566 {
567         channel->counters.pll_unlock = 0;
568         channel->counters.fifo_empty = 0;
569         channel->counters.fifo_overflow = 0;
570 }
571
572 static int poch_channel_init(struct channel_info *channel,
573                              struct poch_dev *poch_dev)
574 {
575         struct pci_dev *pdev = poch_dev->pci_dev;
576         struct device *dev = &pdev->dev;
577         unsigned long alloc_size;
578         int ret;
579
580         printk(KERN_WARNING "channel_latch_attr\n");
581
582         ret = channel_latch_attr(channel);
583         if (ret != 0)
584                 goto out;
585
586         channel->transfer = 0;
587
588         /* Allocate memory to hold group information. */
589         alloc_size = channel->group_count * sizeof(struct poch_group_info);
590         channel->groups = kzalloc(alloc_size, GFP_KERNEL);
591         if (!channel->groups) {
592                 dev_err(dev, "error allocating memory for group info\n");
593                 ret = -ENOMEM;
594                 goto out;
595         }
596
597         printk(KERN_WARNING "poch_channel_alloc_groups\n");
598
599         ret = poch_channel_alloc_groups(channel);
600         if (ret) {
601                 dev_err(dev, "error allocating groups of order %d\n",
602                         get_order(channel->group_size));
603                 goto out_free_group_info;
604         }
605
606         ret = poch_channel_alloc_header(channel);
607         if (ret) {
608                 dev_err(dev, "error allocating user space header\n");
609                 goto out_free_groups;
610         }
611
612         channel->fpga_iomem = poch_dev->fpga_iomem;
613         channel->bridge_iomem = poch_dev->bridge_iomem;
614         channel->iomem_lock = &poch_dev->iomem_lock;
615         spin_lock_init(&channel->counters_lock);
616
617         __poch_channel_clear_counters(channel);
618
619         printk(KERN_WARNING "poch_channel_init_header\n");
620
621         poch_channel_init_header(channel);
622
623         return 0;
624
625  out_free_groups:
626         poch_channel_free_groups(channel);
627  out_free_group_info:
628         kfree(channel->groups);
629  out:
630         return ret;
631 }
632
633 static int poch_wait_fpga_prog(void __iomem *bridge)
634 {
635         unsigned long total_wait;
636         const unsigned long wait_period = 100;
637         /* FIXME: Get the actual timeout */
638         const unsigned long prog_timeo = 10000; /* 10 Seconds */
639         u32 card_power;
640
641         printk(KERN_WARNING "poch_wait_fpg_prog\n");
642
643         printk(KERN_INFO PFX "programming fpga ...\n");
644         total_wait = 0;
645         while (1) {
646                 msleep(wait_period);
647                 total_wait += wait_period;
648
649                 card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
650                 if (card_power & BRIDGE_CARD_POWER_PROG_DONE) {
651                         printk(KERN_INFO PFX "programming done\n");
652                         return 0;
653                 }
654                 if (total_wait > prog_timeo) {
655                         printk(KERN_ERR PFX
656                                "timed out while programming FPGA\n");
657                         return -EIO;
658                 }
659         }
660 }
661
662 static void poch_card_power_off(struct poch_dev *poch_dev)
663 {
664         void __iomem *bridge = poch_dev->bridge_iomem;
665         u32 card_power;
666
667         iowrite32(0, bridge + BRIDGE_INT_MASK_REG);
668         iowrite32(0, bridge + BRIDGE_DMA_GO_REG);
669
670         card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
671         iowrite32(card_power & ~BRIDGE_CARD_POWER_EN,
672                   bridge + BRIDGE_CARD_POWER_REG);
673 }
674
675 enum clk_src {
676         CLK_SRC_ON_BOARD,
677         CLK_SRC_EXTERNAL
678 };
679
680 static void poch_card_clock_on(void __iomem *fpga)
681 {
682         /* FIXME: Get this data through sysfs? */
683         enum clk_src clk_src = CLK_SRC_ON_BOARD;
684
685         if (clk_src == CLK_SRC_ON_BOARD) {
686                 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN,
687                           fpga + FPGA_ADC_CLOCK_CTL_REG);
688         } else if (clk_src == CLK_SRC_EXTERNAL) {
689                 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK,
690                           fpga + FPGA_ADC_CLOCK_CTL_REG);
691         }
692 }
693
694 static int poch_card_power_on(struct poch_dev *poch_dev)
695 {
696         void __iomem *bridge = poch_dev->bridge_iomem;
697         void __iomem *fpga = poch_dev->fpga_iomem;
698
699         iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG);
700
701         if (poch_wait_fpga_prog(bridge) != 0) {
702                 poch_card_power_off(poch_dev);
703                 return -EIO;
704         }
705
706         poch_card_clock_on(fpga);
707
708         /* Sync to new clock, reset state machines, set DMA mode. */
709         iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET
710                   | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG);
711
712         /* FIXME: The time required for sync. needs to be tuned. */
713         msleep(1000);
714
715         return 0;
716 }
717
718 static void poch_channel_analog_on(struct channel_info *channel)
719 {
720         void __iomem *fpga = channel->fpga_iomem;
721         u32 adc_dac_en;
722
723         spin_lock(channel->iomem_lock);
724         adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG);
725         switch (channel->chno) {
726         case CHNO_RX_CHANNEL:
727                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF,
728                           fpga + FPGA_ADC_DAC_EN_REG);
729                 break;
730         case CHNO_TX_CHANNEL:
731                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF,
732                           fpga + FPGA_ADC_DAC_EN_REG);
733                 break;
734         }
735         spin_unlock(channel->iomem_lock);
736 }
737
738 static int poch_open(struct inode *inode, struct file *filp)
739 {
740         struct poch_dev *poch_dev;
741         struct channel_info *channel;
742         void __iomem *bridge;
743         void __iomem *fpga;
744         int chno;
745         int usage;
746         int ret;
747
748         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
749         bridge = poch_dev->bridge_iomem;
750         fpga = poch_dev->fpga_iomem;
751
752         chno = iminor(inode) % poch_dev->nchannels;
753         channel = &poch_dev->channels[chno];
754
755         if (!atomic_dec_and_test(&channel->free)) {
756                 atomic_inc(&channel->free);
757                 ret = -EBUSY;
758                 goto out;
759         }
760
761         usage = atomic_inc_return(&poch_dev->usage);
762
763         printk(KERN_WARNING "poch_card_power_on\n");
764
765         if (usage == 1) {
766                 ret = poch_card_power_on(poch_dev);
767                 if (ret)
768                         goto out_dec_usage;
769         }
770
771         printk(KERN_INFO "CardBus Bridge Revision: %x\n",
772                ioread32(bridge + BRIDGE_REV_REG));
773         printk(KERN_INFO "CardBus Interface Revision: %x\n",
774                ioread32(fpga + FPGA_IFACE_REV_REG));
775
776         channel->chno = chno;
777         filp->private_data = channel;
778
779         printk(KERN_WARNING "poch_channel_init\n");
780
781         ret = poch_channel_init(channel, poch_dev);
782         if (ret)
783                 goto out_power_off;
784
785         poch_channel_analog_on(channel);
786
787         printk(KERN_WARNING "channel_dma_init\n");
788
789         channel_dma_init(channel);
790
791         printk(KERN_WARNING "poch_channel_analog_on\n");
792
793         if (usage == 1) {
794                 printk(KERN_WARNING "setting up DMA\n");
795
796                 /* Initialize DMA Controller. */
797                 iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG);
798                 iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG);
799
800                 ioread32(fpga + FPGA_DMA_INT_STAT_REG);
801                 ioread32(fpga + FPGA_INT_STAT_REG);
802                 ioread32(bridge + BRIDGE_INT_STAT_REG);
803
804                 /* Initialize Interrupts. FIXME: Enable temperature
805                  * handling We are enabling both Tx and Rx channel
806                  * interrupts here. Do we need to enable interrupts
807                  * only for the current channel? Anyways we won't get
808                  * the interrupt unless the DMA is activated.
809                  */
810                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
811                 iowrite32(FPGA_INT_DMA_CORE
812                           | FPGA_INT_PLL_UNLOCKED
813                           | FPGA_INT_TX_FF_EMPTY
814                           | FPGA_INT_RX_FF_EMPTY
815                           | FPGA_INT_TX_FF_OVRFLW
816                           | FPGA_INT_RX_FF_OVRFLW,
817                           fpga + FPGA_INT_MASK_REG);
818                 iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX,
819                           fpga + FPGA_DMA_INT_MASK_REG);
820         }
821
822         if (channel->dir == CHANNEL_DIR_TX) {
823                 /* Flush TX FIFO and output data from cardbus. */
824                 iowrite32(FPGA_TX_CTL_FIFO_FLUSH
825                           | FPGA_TX_CTL_OUTPUT_CARDBUS,
826                           fpga + FPGA_TX_CTL_REG);
827         } else {
828                 /* Flush RX FIFO and output data to cardbus. */
829                 iowrite32(FPGA_RX_CTL_CONT_CAP
830                           | FPGA_RX_CTL_FIFO_FLUSH,
831                           fpga + FPGA_RX_CTL_REG);
832         }
833
834         atomic_inc(&channel->inited);
835
836         return 0;
837
838  out_power_off:
839         if (usage == 1)
840                 poch_card_power_off(poch_dev);
841  out_dec_usage:
842         atomic_dec(&poch_dev->usage);
843         atomic_inc(&channel->free);
844  out:
845         return ret;
846 }
847
848 static int poch_release(struct inode *inode, struct file *filp)
849 {
850         struct channel_info *channel = filp->private_data;
851         struct poch_dev *poch_dev;
852         int usage;
853
854         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
855
856         usage = atomic_dec_return(&poch_dev->usage);
857         if (usage == 0) {
858                 printk(KERN_WARNING "poch_card_power_off\n");
859                 poch_card_power_off(poch_dev);
860         }
861
862         atomic_dec(&channel->inited);
863         poch_channel_free_header(channel);
864         poch_channel_free_groups(channel);
865         kfree(channel->groups);
866         atomic_inc(&channel->free);
867
868         return 0;
869 }
870
871 /*
872  * Map the header and the group buffers, to user space.
873  */
874 static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
875 {
876         struct channel_info *channel = filp->private_data;
877
878         unsigned long start;
879         unsigned long size;
880
881         unsigned long group_pages;
882         unsigned long header_pages;
883         unsigned long total_group_pages;
884
885         int pg_num;
886         struct page *pg;
887
888         int i;
889         int ret;
890
891         printk(KERN_WARNING "poch_mmap\n");
892
893         if (vma->vm_pgoff) {
894                 printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff);
895                 return -EINVAL;
896         }
897
898         group_pages = npages(channel->group_size);
899         header_pages = npages(channel->header_size);
900         total_group_pages = group_pages * channel->group_count;
901
902         size = vma->vm_end - vma->vm_start;
903         if (size != (header_pages + total_group_pages) * PAGE_SIZE) {
904                 printk(KERN_WARNING PFX "required %lu bytes\n", size);
905                 return -EINVAL;
906         }
907
908         start = vma->vm_start;
909
910         /* FIXME: Cleanup required on failure? */
911         pg = channel->header_pg;
912         for (pg_num = 0; pg_num < header_pages; pg_num++, pg++) {
913                 printk(KERN_DEBUG PFX "page_count: %d\n", page_count(pg));
914                 printk(KERN_DEBUG PFX "%d: header: 0x%lx\n", pg_num, start);
915                 ret = vm_insert_page(vma, start, pg);
916                 if (ret) {
917                         printk(KERN_DEBUG "vm_insert 1 failed at %lx\n", start);
918                         return ret;
919                 }
920                 start += PAGE_SIZE;
921         }
922
923         for (i = 0; i < channel->group_count; i++) {
924                 pg = channel->groups[i].pg;
925                 for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
926                         printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n",
927                                pg_num, i, start);
928                         ret = vm_insert_page(vma, start, pg);
929                         if (ret) {
930                                 printk(KERN_DEBUG PFX
931                                        "vm_insert 2 failed at %d\n", pg_num);
932                                 return ret;
933                         }
934                         start += PAGE_SIZE;
935                 }
936         }
937
938         return 0;
939 }
940
941 /*
942  * Check whether there is some group that the user space has not
943  * consumed yet. When the user space consumes a group, it sets it to
944  * -1. Cosuming could be reading data in case of RX and filling a
945  * buffer in case of TX.
946  */
947 static int poch_channel_available(struct channel_info *channel)
948 {
949         int i;
950
951         spin_lock_irq(&channel->group_offsets_lock);
952
953         for (i = 0; i < channel->group_count; i++) {
954                 if (channel->header->group_offsets[i] != -1) {
955                         spin_unlock_irq(&channel->group_offsets_lock);
956                         return 1;
957                 }
958         }
959
960         spin_unlock_irq(&channel->group_offsets_lock);
961
962         return 0;
963 }
964
965 static unsigned int poch_poll(struct file *filp, poll_table *pt)
966 {
967         struct channel_info *channel = filp->private_data;
968         unsigned int ret = 0;
969
970         poll_wait(filp, &channel->wq, pt);
971
972         if (poch_channel_available(channel)) {
973                 if (channel->dir == CHANNEL_DIR_RX)
974                         ret = POLLIN | POLLRDNORM;
975                 else
976                         ret = POLLOUT | POLLWRNORM;
977         }
978
979         return ret;
980 }
981
982 static int poch_ioctl(struct inode *inode, struct file *filp,
983                       unsigned int cmd, unsigned long arg)
984 {
985         struct channel_info *channel = filp->private_data;
986         void __iomem *fpga = channel->fpga_iomem;
987         void __iomem *bridge = channel->bridge_iomem;
988         void __user *argp = (void __user *)arg;
989         struct vm_area_struct *vms;
990         struct poch_counters counters;
991         int ret;
992
993         switch (cmd) {
994         case POCH_IOC_TRANSFER_START:
995                 switch (channel->chno) {
996                 case CHNO_TX_CHANNEL:
997                         printk(KERN_INFO PFX "ioctl: Tx start\n");
998                         iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG);
999                         iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG);
1000
1001                         /* FIXME: Does it make sense to do a DMA GO
1002                          * twice, once in Tx and once in Rx.
1003                          */
1004                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
1005                         break;
1006                 case CHNO_RX_CHANNEL:
1007                         printk(KERN_INFO PFX "ioctl: Rx start\n");
1008                         iowrite32(0x1, fpga + FPGA_RX_ARM_REG);
1009                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
1010                         break;
1011                 }
1012                 break;
1013         case POCH_IOC_TRANSFER_STOP:
1014                 switch (channel->chno) {
1015                 case CHNO_TX_CHANNEL:
1016                         printk(KERN_INFO PFX "ioctl: Tx stop\n");
1017                         iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG);
1018                         iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG);
1019                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
1020                         break;
1021                 case CHNO_RX_CHANNEL:
1022                         printk(KERN_INFO PFX "ioctl: Rx stop\n");
1023                         iowrite32(0x0, fpga + FPGA_RX_ARM_REG);
1024                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
1025                         break;
1026                 }
1027                 break;
1028         case POCH_IOC_GET_COUNTERS:
1029                 if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
1030                         return -EFAULT;
1031
1032                 spin_lock_irq(&channel->counters_lock);
1033                 counters = channel->counters;
1034                 __poch_channel_clear_counters(channel);
1035                 spin_unlock_irq(&channel->counters_lock);
1036
1037                 ret = copy_to_user(argp, &counters,
1038                                    sizeof(struct poch_counters));
1039                 if (ret)
1040                         return ret;
1041
1042                 break;
1043         case POCH_IOC_SYNC_GROUP_FOR_USER:
1044         case POCH_IOC_SYNC_GROUP_FOR_DEVICE:
1045                 vms = find_vma(current->mm, arg);
1046                 if (!vms)
1047                         /* Address not mapped. */
1048                         return -EINVAL;
1049                 if (vms->vm_file != filp)
1050                         /* Address mapped from different device/file. */
1051                         return -EINVAL;
1052
1053                 flush_cache_range(vms, arg, arg + channel->group_size);
1054                 break;
1055         }
1056         return 0;
1057 }
1058
1059 static struct file_operations poch_fops = {
1060         .owner = THIS_MODULE,
1061         .open = poch_open,
1062         .release = poch_release,
1063         .ioctl = poch_ioctl,
1064         .poll = poch_poll,
1065         .mmap = poch_mmap
1066 };
1067
1068 static void poch_irq_dma(struct channel_info *channel)
1069 {
1070         u32 prev_transfer;
1071         u32 curr_transfer;
1072         long groups_done;
1073         unsigned long i, j;
1074         struct poch_group_info *groups;
1075         s32 *group_offsets;
1076         u32 curr_group_reg;
1077
1078         if (!atomic_read(&channel->inited))
1079                 return;
1080
1081         prev_transfer = channel->transfer;
1082
1083         if (channel->chno == CHNO_RX_CHANNEL)
1084                 curr_group_reg = FPGA_RX_CURR_GROUP_REG;
1085         else
1086                 curr_group_reg = FPGA_TX_CURR_GROUP_REG;
1087
1088         curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg);
1089
1090         groups_done = curr_transfer - prev_transfer;
1091         /* Check wrap over, and handle it. */
1092         if (groups_done <= 0)
1093                 groups_done += channel->group_count;
1094
1095         group_offsets = channel->header->group_offsets;
1096         groups = channel->groups;
1097
1098         spin_lock(&channel->group_offsets_lock);
1099
1100         for (i = 0; i < groups_done; i++) {
1101                 j = (prev_transfer + i) % channel->group_count;
1102                 group_offsets[j] = groups[j].user_offset;
1103         }
1104
1105         spin_unlock(&channel->group_offsets_lock);
1106
1107         channel->transfer = curr_transfer;
1108
1109         wake_up_interruptible(&channel->wq);
1110 }
1111
1112 static irqreturn_t poch_irq_handler(int irq, void *p)
1113 {
1114         struct poch_dev *poch_dev = p;
1115         void __iomem *bridge = poch_dev->bridge_iomem;
1116         void __iomem *fpga = poch_dev->fpga_iomem;
1117         struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL];
1118         struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL];
1119         u32 bridge_stat;
1120         u32 fpga_stat;
1121         u32 dma_stat;
1122
1123         bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG);
1124         fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG);
1125         dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1126
1127         ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1128         ioread32(fpga + FPGA_INT_STAT_REG);
1129         ioread32(bridge + BRIDGE_INT_STAT_REG);
1130
1131         if (bridge_stat & BRIDGE_INT_FPGA) {
1132                 if (fpga_stat & FPGA_INT_DMA_CORE) {
1133                         if (dma_stat & FPGA_DMA_INT_RX)
1134                                 poch_irq_dma(channel_rx);
1135                         if (dma_stat & FPGA_DMA_INT_TX)
1136                                 poch_irq_dma(channel_tx);
1137                 }
1138                 if (fpga_stat & FPGA_INT_PLL_UNLOCKED) {
1139                         channel_tx->counters.pll_unlock++;
1140                         channel_rx->counters.pll_unlock++;
1141                         if (printk_ratelimit())
1142                                 printk(KERN_WARNING PFX "PLL unlocked\n");
1143                 }
1144                 if (fpga_stat & FPGA_INT_TX_FF_EMPTY)
1145                         channel_tx->counters.fifo_empty++;
1146                 if (fpga_stat & FPGA_INT_TX_FF_OVRFLW)
1147                         channel_tx->counters.fifo_overflow++;
1148                 if (fpga_stat & FPGA_INT_RX_FF_EMPTY)
1149                         channel_rx->counters.fifo_empty++;
1150                 if (fpga_stat & FPGA_INT_RX_FF_OVRFLW)
1151                         channel_rx->counters.fifo_overflow++;
1152
1153                 /*
1154                  * FIXME: These errors should be notified through the
1155                  * poll interface as POLLERR.
1156                  */
1157
1158                 /* Re-enable interrupts. */
1159                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
1160
1161                 return IRQ_HANDLED;
1162         }
1163
1164         return IRQ_NONE;
1165 }
1166
1167 static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id)
1168 {
1169         int i, j;
1170         int nattrs;
1171         struct channel_info *channel;
1172         dev_t devno;
1173
1174         if (poch_dev->dev == NULL)
1175                 return;
1176
1177         for (i = 0; i < poch_dev->nchannels; i++) {
1178                 channel = &poch_dev->channels[i];
1179                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1180
1181                 if (!channel->dev)
1182                         continue;
1183
1184                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1185                 for (j = 0; j < nattrs; j++)
1186                         device_remove_file(channel->dev, poch_class_attrs[j]);
1187
1188                 device_unregister(channel->dev);
1189         }
1190
1191         device_unregister(poch_dev->dev);
1192 }
1193
1194 static int __devinit poch_class_dev_register(struct poch_dev *poch_dev,
1195                                              int id)
1196 {
1197         struct device *dev = &poch_dev->pci_dev->dev;
1198         int i, j;
1199         int nattrs;
1200         int ret;
1201         struct channel_info *channel;
1202         dev_t devno;
1203
1204         poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev,
1205                                       MKDEV(0, 0), NULL, "poch%d", id);
1206         if (IS_ERR(poch_dev->dev)) {
1207                 dev_err(dev, "error creating parent class device");
1208                 ret = PTR_ERR(poch_dev->dev);
1209                 poch_dev->dev = NULL;
1210                 return ret;
1211         }
1212
1213         for (i = 0; i < poch_dev->nchannels; i++) {
1214                 channel = &poch_dev->channels[i];
1215
1216                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1217                 channel->dev = device_create(poch_cls, poch_dev->dev, devno,
1218                                              NULL, "ch%d", i);
1219                 if (IS_ERR(channel->dev)) {
1220                         dev_err(dev, "error creating channel class device");
1221                         ret = PTR_ERR(channel->dev);
1222                         channel->dev = NULL;
1223                         poch_class_dev_unregister(poch_dev, id);
1224                         return ret;
1225                 }
1226
1227                 dev_set_drvdata(channel->dev, channel);
1228                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1229                 for (j = 0; j < nattrs; j++) {
1230                         ret = device_create_file(channel->dev,
1231                                                  poch_class_attrs[j]);
1232                         if (ret) {
1233                                 dev_err(dev, "error creating attribute file");
1234                                 poch_class_dev_unregister(poch_dev, id);
1235                                 return ret;
1236                         }
1237                 }
1238         }
1239
1240         return 0;
1241 }
1242
1243 static int __devinit poch_pci_probe(struct pci_dev *pdev,
1244                                     const struct pci_device_id *pci_id)
1245 {
1246         struct device *dev = &pdev->dev;
1247         struct poch_dev *poch_dev;
1248         struct uio_info *uio;
1249         int ret;
1250         int id;
1251         int i;
1252
1253         poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL);
1254         if (!poch_dev) {
1255                 dev_err(dev, "error allocating priv. data memory\n");
1256                 return -ENOMEM;
1257         }
1258
1259         poch_dev->pci_dev = pdev;
1260         uio = &poch_dev->uio;
1261
1262         pci_set_drvdata(pdev, poch_dev);
1263
1264         spin_lock_init(&poch_dev->iomem_lock);
1265
1266         poch_dev->nchannels = POCH_NCHANNELS;
1267         poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX;
1268         poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX;
1269
1270         for (i = 0; i < poch_dev->nchannels; i++) {
1271                 init_waitqueue_head(&poch_dev->channels[i].wq);
1272                 atomic_set(&poch_dev->channels[i].free, 1);
1273                 atomic_set(&poch_dev->channels[i].inited, 0);
1274         }
1275
1276         ret = pci_enable_device(pdev);
1277         if (ret) {
1278                 dev_err(dev, "error enabling device\n");
1279                 goto out_free;
1280         }
1281
1282         ret = pci_request_regions(pdev, "poch");
1283         if (ret) {
1284                 dev_err(dev, "error requesting resources\n");
1285                 goto out_disable;
1286         }
1287
1288         uio->mem[0].addr = pci_resource_start(pdev, 1);
1289         if (!uio->mem[0].addr) {
1290                 dev_err(dev, "invalid BAR1\n");
1291                 ret = -ENODEV;
1292                 goto out_release;
1293         }
1294
1295         uio->mem[0].size = pci_resource_len(pdev, 1);
1296         uio->mem[0].memtype = UIO_MEM_PHYS;
1297
1298         uio->name = "poch";
1299         uio->version = "0.0.1";
1300         uio->irq = -1;
1301         ret = uio_register_device(dev, uio);
1302         if (ret) {
1303                 dev_err(dev, "error register UIO device: %d\n", ret);
1304                 goto out_release;
1305         }
1306
1307         poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0),
1308                                          pci_resource_len(pdev, 0));
1309         if (poch_dev->bridge_iomem == NULL) {
1310                 dev_err(dev, "error mapping bridge (bar0) registers\n");
1311                 ret = -ENOMEM;
1312                 goto out_uio_unreg;
1313         }
1314
1315         poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1),
1316                                        pci_resource_len(pdev, 1));
1317         if (poch_dev->fpga_iomem == NULL) {
1318                 dev_err(dev, "error mapping fpga (bar1) registers\n");
1319                 ret = -ENOMEM;
1320                 goto out_bar0_unmap;
1321         }
1322
1323         ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED,
1324                           dev_name(dev), poch_dev);
1325         if (ret) {
1326                 dev_err(dev, "error requesting IRQ %u\n", pdev->irq);
1327                 ret = -ENOMEM;
1328                 goto out_bar1_unmap;
1329         }
1330
1331         if (!idr_pre_get(&poch_ids, GFP_KERNEL)) {
1332                 dev_err(dev, "error allocating memory ids\n");
1333                 ret = -ENOMEM;
1334                 goto out_free_irq;
1335         }
1336
1337         idr_get_new(&poch_ids, poch_dev, &id);
1338         if (id >= MAX_POCH_CARDS) {
1339                 dev_err(dev, "minors exhausted\n");
1340                 ret = -EBUSY;
1341                 goto out_free_irq;
1342         }
1343
1344         cdev_init(&poch_dev->cdev, &poch_fops);
1345         poch_dev->cdev.owner = THIS_MODULE;
1346         ret = cdev_add(&poch_dev->cdev,
1347                        poch_first_dev + (id * poch_dev->nchannels),
1348                        poch_dev->nchannels);
1349         if (ret) {
1350                 dev_err(dev, "error register character device\n");
1351                 goto out_idr_remove;
1352         }
1353
1354         ret = poch_class_dev_register(poch_dev, id);
1355         if (ret)
1356                 goto out_cdev_del;
1357
1358         return 0;
1359
1360  out_cdev_del:
1361         cdev_del(&poch_dev->cdev);
1362  out_idr_remove:
1363         idr_remove(&poch_ids, id);
1364  out_free_irq:
1365         free_irq(pdev->irq, poch_dev);
1366  out_bar1_unmap:
1367         iounmap(poch_dev->fpga_iomem);
1368  out_bar0_unmap:
1369         iounmap(poch_dev->bridge_iomem);
1370  out_uio_unreg:
1371         uio_unregister_device(uio);
1372  out_release:
1373         pci_release_regions(pdev);
1374  out_disable:
1375         pci_disable_device(pdev);
1376  out_free:
1377         kfree(poch_dev);
1378         return ret;
1379 }
1380
1381 /*
1382  * FIXME: We are yet to handle the hot unplug case.
1383  */
1384 static void poch_pci_remove(struct pci_dev *pdev)
1385 {
1386         struct poch_dev *poch_dev = pci_get_drvdata(pdev);
1387         struct uio_info *uio = &poch_dev->uio;
1388         unsigned int minor = MINOR(poch_dev->cdev.dev);
1389         unsigned int id = minor / poch_dev->nchannels;
1390
1391         poch_class_dev_unregister(poch_dev, id);
1392         cdev_del(&poch_dev->cdev);
1393         idr_remove(&poch_ids, id);
1394         free_irq(pdev->irq, poch_dev);
1395         iounmap(poch_dev->fpga_iomem);
1396         iounmap(poch_dev->bridge_iomem);
1397         uio_unregister_device(uio);
1398         pci_release_regions(pdev);
1399         pci_disable_device(pdev);
1400         pci_set_drvdata(pdev, NULL);
1401         iounmap(uio->mem[0].internal_addr);
1402
1403         kfree(poch_dev);
1404 }
1405
1406 static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = {
1407         { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS,
1408                      PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) },
1409         { 0, }
1410 };
1411
1412 static struct pci_driver poch_pci_driver = {
1413         .name = DRV_NAME,
1414         .id_table = poch_pci_ids,
1415         .probe = poch_pci_probe,
1416         .remove = poch_pci_remove,
1417 };
1418
1419 static int __init poch_init_module(void)
1420 {
1421         int ret = 0;
1422
1423         ret = alloc_chrdev_region(&poch_first_dev, 0,
1424                                   MAX_POCH_DEVICES, DRV_NAME);
1425         if (ret) {
1426                 printk(KERN_ERR PFX "error allocating device no.");
1427                 return ret;
1428         }
1429
1430         poch_cls = class_create(THIS_MODULE, "pocketchange");
1431         if (IS_ERR(poch_cls)) {
1432                 ret = PTR_ERR(poch_cls);
1433                 goto out_unreg_chrdev;
1434         }
1435
1436         ret = pci_register_driver(&poch_pci_driver);
1437         if (ret) {
1438                 printk(KERN_ERR PFX "error register PCI device");
1439                 goto out_class_destroy;
1440         }
1441
1442         return 0;
1443
1444  out_class_destroy:
1445         class_destroy(poch_cls);
1446
1447  out_unreg_chrdev:
1448         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1449
1450         return ret;
1451 }
1452
1453 static void __exit poch_exit_module(void)
1454 {
1455         pci_unregister_driver(&poch_pci_driver);
1456         class_destroy(poch_cls);
1457         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1458 }
1459
1460 module_init(poch_init_module);
1461 module_exit(poch_exit_module);
1462
1463 MODULE_LICENSE("GPL v2");