Staging: poch: Fix user space protocol syncing
[linux-2.6] / drivers / staging / poch / poch.c
1 /*
2  * User-space DMA and UIO based Redrapids Pocket Change CardBus driver
3  *
4  * Copyright 2008 Vijay Kumar <vijaykumar@bravegnu.org>
5  *
6  * Licensed under GPL version 2 only.
7  */
8
9 #include <linux/device.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/uio_driver.h>
13 #include <linux/spinlock.h>
14 #include <linux/cdev.h>
15 #include <linux/delay.h>
16 #include <linux/sysfs.h>
17 #include <linux/poll.h>
18 #include <linux/idr.h>
19 #include <linux/interrupt.h>
20 #include <linux/init.h>
21 #include <linux/ioctl.h>
22 #include <linux/io.h>
23
24 #include "poch.h"
25
26 #include <asm/cacheflush.h>
27
28 #ifndef PCI_VENDOR_ID_RRAPIDS
29 #define PCI_VENDOR_ID_RRAPIDS 0x17D2
30 #endif
31
32 #ifndef PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE
33 #define PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE 0x0351
34 #endif
35
36 #define POCH_NCHANNELS 2
37
38 #define MAX_POCH_CARDS 8
39 #define MAX_POCH_DEVICES (MAX_POCH_CARDS * POCH_NCHANNELS)
40
41 #define DRV_NAME "poch"
42 #define PFX      DRV_NAME ": "
43
44 /*
45  * BAR0 Bridge Register Definitions
46  */
47
48 #define BRIDGE_REV_REG                  0x0
49 #define BRIDGE_INT_MASK_REG             0x4
50 #define BRIDGE_INT_STAT_REG             0x8
51
52 #define BRIDGE_INT_ACTIVE               (0x1 << 31)
53 #define BRIDGE_INT_FPGA                 (0x1 << 2)
54 #define BRIDGE_INT_TEMP_FAIL            (0x1 << 1)
55 #define BRIDGE_INT_TEMP_WARN            (0x1 << 0)
56
57 #define BRIDGE_FPGA_RESET_REG           0xC
58
59 #define BRIDGE_CARD_POWER_REG           0x10
60 #define BRIDGE_CARD_POWER_EN            (0x1 << 0)
61 #define BRIDGE_CARD_POWER_PROG_DONE     (0x1 << 31)
62
63 #define BRIDGE_JTAG_REG                 0x14
64 #define BRIDGE_DMA_GO_REG               0x18
65 #define BRIDGE_STAT_0_REG               0x1C
66 #define BRIDGE_STAT_1_REG               0x20
67 #define BRIDGE_STAT_2_REG               0x24
68 #define BRIDGE_STAT_3_REG               0x28
69 #define BRIDGE_TEMP_STAT_REG            0x2C
70 #define BRIDGE_TEMP_THRESH_REG          0x30
71 #define BRIDGE_EEPROM_REVSEL_REG        0x34
72 #define BRIDGE_CIS_STRUCT_REG           0x100
73 #define BRIDGE_BOARDREV_REG             0x124
74
75 /*
76  * BAR1 FPGA Register Definitions
77  */
78
79 #define FPGA_IFACE_REV_REG              0x0
80 #define FPGA_RX_BLOCK_SIZE_REG          0x8
81 #define FPGA_TX_BLOCK_SIZE_REG          0xC
82 #define FPGA_RX_BLOCK_COUNT_REG         0x10
83 #define FPGA_TX_BLOCK_COUNT_REG         0x14
84 #define FPGA_RX_CURR_DMA_BLOCK_REG      0x18
85 #define FPGA_TX_CURR_DMA_BLOCK_REG      0x1C
86 #define FPGA_RX_GROUP_COUNT_REG         0x20
87 #define FPGA_TX_GROUP_COUNT_REG         0x24
88 #define FPGA_RX_CURR_GROUP_REG          0x28
89 #define FPGA_TX_CURR_GROUP_REG          0x2C
90 #define FPGA_RX_CURR_PCI_REG            0x38
91 #define FPGA_TX_CURR_PCI_REG            0x3C
92 #define FPGA_RX_GROUP0_START_REG        0x40
93 #define FPGA_TX_GROUP0_START_REG        0xC0
94 #define FPGA_DMA_DESC_1_REG             0x140
95 #define FPGA_DMA_DESC_2_REG             0x144
96 #define FPGA_DMA_DESC_3_REG             0x148
97 #define FPGA_DMA_DESC_4_REG             0x14C
98
99 #define FPGA_DMA_INT_STAT_REG           0x150
100 #define FPGA_DMA_INT_MASK_REG           0x154
101 #define FPGA_DMA_INT_RX         (1 << 0)
102 #define FPGA_DMA_INT_TX         (1 << 1)
103
104 #define FPGA_RX_GROUPS_PER_INT_REG      0x158
105 #define FPGA_TX_GROUPS_PER_INT_REG      0x15C
106 #define FPGA_DMA_ADR_PAGE_REG           0x160
107 #define FPGA_FPGA_REV_REG               0x200
108
109 #define FPGA_ADC_CLOCK_CTL_REG          0x204
110 #define FPGA_ADC_CLOCK_CTL_OSC_EN       (0x1 << 3)
111 #define FPGA_ADC_CLOCK_LOCAL_CLK        (0x1 | FPGA_ADC_CLOCK_CTL_OSC_EN)
112 #define FPGA_ADC_CLOCK_EXT_SAMP_CLK     0X0
113
114 #define FPGA_ADC_DAC_EN_REG             0x208
115 #define FPGA_ADC_DAC_EN_DAC_OFF         (0x1 << 1)
116 #define FPGA_ADC_DAC_EN_ADC_OFF         (0x1 << 0)
117
118 #define FPGA_INT_STAT_REG               0x20C
119 #define FPGA_INT_MASK_REG               0x210
120 #define FPGA_INT_PLL_UNLOCKED           (0x1 << 9)
121 #define FPGA_INT_DMA_CORE               (0x1 << 8)
122 #define FPGA_INT_TX_FF_EMPTY            (0x1 << 7)
123 #define FPGA_INT_RX_FF_EMPTY            (0x1 << 6)
124 #define FPGA_INT_TX_FF_OVRFLW           (0x1 << 3)
125 #define FPGA_INT_RX_FF_OVRFLW           (0x1 << 2)
126 #define FPGA_INT_TX_ACQ_DONE            (0x1 << 1)
127 #define FPGA_INT_RX_ACQ_DONE            (0x1)
128
129 #define FPGA_RX_CTL_REG                 0x214
130 #define FPGA_RX_CTL_FIFO_FLUSH          (0x1 << 9)
131 #define FPGA_RX_CTL_SYNTH_DATA          (0x1 << 8)
132 #define FPGA_RX_CTL_CONT_CAP            (0x0 << 1)
133 #define FPGA_RX_CTL_SNAP_CAP            (0x1 << 1)
134
135 #define FPGA_RX_ARM_REG                 0x21C
136
137 #define FPGA_DOM_REG                    0x224
138 #define FPGA_DOM_DCM_RESET              (0x1 << 5)
139 #define FPGA_DOM_SOFT_RESET             (0x1 << 4)
140 #define FPGA_DOM_DUAL_M_SG_DMA          (0x0)
141 #define FPGA_DOM_TARGET_ACCESS          (0x1)
142
143 #define FPGA_TX_CTL_REG                 0x228
144 #define FPGA_TX_CTL_FIFO_FLUSH          (0x1 << 9)
145 #define FPGA_TX_CTL_OUTPUT_ZERO         (0x0 << 2)
146 #define FPGA_TX_CTL_OUTPUT_CARDBUS      (0x1 << 2)
147 #define FPGA_TX_CTL_OUTPUT_ADC          (0x2 << 2)
148 #define FPGA_TX_CTL_OUTPUT_SNAPSHOT     (0x3 << 2)
149 #define FPGA_TX_CTL_LOOPBACK            (0x1 << 0)
150
151 #define FPGA_ENDIAN_MODE_REG            0x22C
152 #define FPGA_RX_FIFO_COUNT_REG          0x28C
153 #define FPGA_TX_ENABLE_REG              0x298
154 #define FPGA_TX_TRIGGER_REG             0x29C
155 #define FPGA_TX_DATAMEM_COUNT_REG       0x2A8
156 #define FPGA_CAP_FIFO_REG               0x300
157 #define FPGA_TX_SNAPSHOT_REG            0x8000
158
159 /*
160  * Channel Index Definitions
161  */
162
163 enum {
164         CHNO_RX_CHANNEL,
165         CHNO_TX_CHANNEL,
166 };
167
168 struct poch_dev;
169
170 enum channel_dir {
171         CHANNEL_DIR_RX,
172         CHANNEL_DIR_TX,
173 };
174
175 struct poch_group_info {
176         struct page *pg;
177         dma_addr_t dma_addr;
178         unsigned long user_offset;
179 };
180
181 struct channel_info {
182         unsigned int chno;
183
184         atomic_t sys_block_size;
185         atomic_t sys_group_size;
186         atomic_t sys_group_count;
187
188         enum channel_dir dir;
189
190         unsigned long block_size;
191         unsigned long group_size;
192         unsigned long group_count;
193
194         /* Contains the DMA address and VM offset of each group. */
195         struct poch_group_info *groups;
196
197         /* Contains the header and circular buffer exported to userspace. */
198         spinlock_t group_offsets_lock;
199         struct poch_cbuf_header *header;
200         struct page *header_pg;
201         unsigned long header_size;
202
203         /* Last group indicated as 'complete' to user space. */
204         unsigned int transfer;
205
206         wait_queue_head_t wq;
207
208         union {
209                 unsigned int data_available;
210                 unsigned int space_available;
211         };
212
213         void __iomem *bridge_iomem;
214         void __iomem *fpga_iomem;
215         spinlock_t *iomem_lock;
216
217         atomic_t free;
218         atomic_t inited;
219
220         /* Error counters */
221         struct poch_counters counters;
222         spinlock_t counters_lock;
223
224         struct device *dev;
225 };
226
227 struct poch_dev {
228         struct uio_info uio;
229         struct pci_dev *pci_dev;
230         unsigned int nchannels;
231         struct channel_info channels[POCH_NCHANNELS];
232         struct cdev cdev;
233
234         /* Counts the no. of channels that have been opened. On first
235          * open, the card is powered on. On last channel close, the
236          * card is powered off.
237          */
238         atomic_t usage;
239
240         void __iomem *bridge_iomem;
241         void __iomem *fpga_iomem;
242         spinlock_t iomem_lock;
243
244         struct device *dev;
245 };
246
247 static dev_t poch_first_dev;
248 static struct class *poch_cls;
249 static DEFINE_IDR(poch_ids);
250
251 static ssize_t store_block_size(struct device *dev,
252                                 struct device_attribute *attr,
253                                 const char *buf, size_t count)
254 {
255         struct channel_info *channel = dev_get_drvdata(dev);
256         unsigned long block_size;
257
258         sscanf(buf, "%lu", &block_size);
259         atomic_set(&channel->sys_block_size, block_size);
260
261         return count;
262 }
263 static DEVICE_ATTR(block_size, S_IWUSR|S_IWGRP, NULL, store_block_size);
264
265 static ssize_t store_group_size(struct device *dev,
266                                 struct device_attribute *attr,
267                                 const char *buf, size_t count)
268 {
269         struct channel_info *channel = dev_get_drvdata(dev);
270         unsigned long group_size;
271
272         sscanf(buf, "%lu", &group_size);
273         atomic_set(&channel->sys_group_size, group_size);
274
275         return count;
276 }
277 static DEVICE_ATTR(group_size, S_IWUSR|S_IWGRP, NULL, store_group_size);
278
279 static ssize_t store_group_count(struct device *dev,
280                                 struct device_attribute *attr,
281                                  const char *buf, size_t count)
282 {
283         struct channel_info *channel = dev_get_drvdata(dev);
284         unsigned long group_count;
285
286         sscanf(buf, "%lu", &group_count);
287         atomic_set(&channel->sys_group_count, group_count);
288
289         return count;
290 }
291 static DEVICE_ATTR(group_count, S_IWUSR|S_IWGRP, NULL, store_group_count);
292
293 static ssize_t show_direction(struct device *dev,
294                               struct device_attribute *attr, char *buf)
295 {
296         struct channel_info *channel = dev_get_drvdata(dev);
297         int len;
298
299         len = sprintf(buf, "%s\n", (channel->dir ? "tx" : "rx"));
300         return len;
301 }
302 static DEVICE_ATTR(dir, S_IRUSR|S_IRGRP, show_direction, NULL);
303
304 static unsigned long npages(unsigned long bytes)
305 {
306         if (bytes % PAGE_SIZE == 0)
307                 return bytes / PAGE_SIZE;
308         else
309                 return (bytes / PAGE_SIZE) + 1;
310 }
311
312 static ssize_t show_mmap_size(struct device *dev,
313                               struct device_attribute *attr, char *buf)
314 {
315         struct channel_info *channel = dev_get_drvdata(dev);
316         int len;
317         unsigned long mmap_size;
318         unsigned long group_pages;
319         unsigned long header_pages;
320         unsigned long total_group_pages;
321
322         group_pages = npages(channel->group_size);
323         header_pages = npages(channel->header_size);
324         total_group_pages = group_pages * channel->group_count;
325
326         mmap_size = (header_pages + total_group_pages) * PAGE_SIZE;
327         len = sprintf(buf, "%lu\n", mmap_size);
328         return len;
329 }
330 static DEVICE_ATTR(mmap_size, S_IRUSR|S_IRGRP, show_mmap_size, NULL);
331
332 static struct device_attribute *poch_class_attrs[] = {
333         &dev_attr_block_size,
334         &dev_attr_group_size,
335         &dev_attr_group_count,
336         &dev_attr_dir,
337         &dev_attr_mmap_size,
338 };
339
340 static void poch_channel_free_groups(struct channel_info *channel)
341 {
342         unsigned long i;
343
344         for (i = 0; i < channel->group_count; i++) {
345                 struct poch_group_info *group;
346                 unsigned int order;
347
348                 group = &channel->groups[i];
349                 order = get_order(channel->group_size);
350                 if (group->pg)
351                         __free_pages(group->pg, order);
352         }
353 }
354
355 static int poch_channel_alloc_groups(struct channel_info *channel)
356 {
357         unsigned long i;
358         unsigned long group_pages;
359         unsigned long header_pages;
360
361         group_pages = npages(channel->group_size);
362         header_pages = npages(channel->header_size);
363
364         for (i = 0; i < channel->group_count; i++) {
365                 struct poch_group_info *group;
366                 unsigned int order;
367                 gfp_t gfp_mask;
368
369                 group = &channel->groups[i];
370                 order = get_order(channel->group_size);
371
372                 /*
373                  * __GFP_COMP is required here since we are going to
374                  * perform non-linear mapping to userspace. For more
375                  * information read the vm_insert_page() function
376                  * comments.
377                  */
378
379                 gfp_mask = GFP_KERNEL | GFP_DMA32 | __GFP_ZERO;
380                 group->pg = alloc_pages(gfp_mask, order);
381                 if (!group->pg) {
382                         poch_channel_free_groups(channel);
383                         return -ENOMEM;
384                 }
385
386                 /* FIXME: This is the physical address not the bus
387                  * address!  This won't work in architectures that
388                  * have an IOMMU. Can we use pci_map_single() for
389                  * this?
390                  */
391                 group->dma_addr = page_to_pfn(group->pg) * PAGE_SIZE;
392                 group->user_offset =
393                         (header_pages + (i * group_pages)) * PAGE_SIZE;
394
395                 printk(KERN_INFO PFX "%ld: user_offset: 0x%lx\n", i,
396                        group->user_offset);
397         }
398
399         return 0;
400 }
401
402 static int channel_latch_attr(struct channel_info *channel)
403 {
404         channel->group_count = atomic_read(&channel->sys_group_count);
405         channel->group_size = atomic_read(&channel->sys_group_size);
406         channel->block_size = atomic_read(&channel->sys_block_size);
407
408         if (channel->group_count == 0) {
409                 printk(KERN_ERR PFX "invalid group count %lu",
410                        channel->group_count);
411                 return -EINVAL;
412         }
413
414         if (channel->group_size == 0 ||
415             channel->group_size < channel->block_size) {
416                 printk(KERN_ERR PFX "invalid group size %lu",
417                        channel->group_size);
418                 return -EINVAL;
419         }
420
421         if (channel->block_size == 0 || (channel->block_size % 8) != 0) {
422                 printk(KERN_ERR PFX "invalid block size %lu",
423                        channel->block_size);
424                 return -EINVAL;
425         }
426
427         if (channel->group_size % channel->block_size != 0) {
428                 printk(KERN_ERR PFX
429                        "group size should be multiple of block size");
430                 return -EINVAL;
431         }
432
433         return 0;
434 }
435
436 /*
437  * Configure DMA group registers
438  */
439 static void channel_dma_init(struct channel_info *channel)
440 {
441         void __iomem *fpga = channel->fpga_iomem;
442         u32 group_regs_base;
443         u32 group_reg;
444         unsigned int page;
445         unsigned int group_in_page;
446         unsigned long i;
447         u32 block_size_reg;
448         u32 block_count_reg;
449         u32 group_count_reg;
450         u32 groups_per_int_reg;
451         u32 curr_pci_reg;
452
453         if (channel->chno == CHNO_RX_CHANNEL) {
454                 group_regs_base = FPGA_RX_GROUP0_START_REG;
455                 block_size_reg = FPGA_RX_BLOCK_SIZE_REG;
456                 block_count_reg = FPGA_RX_BLOCK_COUNT_REG;
457                 group_count_reg = FPGA_RX_GROUP_COUNT_REG;
458                 groups_per_int_reg = FPGA_RX_GROUPS_PER_INT_REG;
459                 curr_pci_reg = FPGA_RX_CURR_PCI_REG;
460         } else {
461                 group_regs_base = FPGA_TX_GROUP0_START_REG;
462                 block_size_reg = FPGA_TX_BLOCK_SIZE_REG;
463                 block_count_reg = FPGA_TX_BLOCK_COUNT_REG;
464                 group_count_reg = FPGA_TX_GROUP_COUNT_REG;
465                 groups_per_int_reg = FPGA_TX_GROUPS_PER_INT_REG;
466                 curr_pci_reg = FPGA_TX_CURR_PCI_REG;
467         }
468
469         printk(KERN_WARNING "block_size, group_size, group_count\n");
470         /*
471          * Block size is represented in no. of 64 bit transfers.
472          */
473         iowrite32(channel->block_size / 8, fpga + block_size_reg);
474         iowrite32(channel->group_size / channel->block_size,
475                   fpga + block_count_reg);
476         iowrite32(channel->group_count, fpga + group_count_reg);
477         /* FIXME: Hardcoded groups per int. Get it from sysfs? */
478         iowrite32(1, fpga + groups_per_int_reg);
479
480         /* Unlock PCI address? Not defined in the data sheet, but used
481          * in the reference code by Redrapids.
482          */
483         iowrite32(0x1, fpga + curr_pci_reg);
484
485         /* The DMA address page register is shared between the RX and
486          * TX channels, so acquire lock.
487          */
488         spin_lock(channel->iomem_lock);
489         for (i = 0; i < channel->group_count; i++) {
490                 page = i / 32;
491                 group_in_page = i % 32;
492
493                 group_reg = group_regs_base + (group_in_page * 4);
494
495                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
496                 iowrite32(channel->groups[i].dma_addr, fpga + group_reg);
497         }
498         for (i = 0; i < channel->group_count; i++) {
499                 page = i / 32;
500                 group_in_page = i % 32;
501
502                 group_reg = group_regs_base + (group_in_page * 4);
503
504                 iowrite32(page, fpga + FPGA_DMA_ADR_PAGE_REG);
505                 printk(KERN_INFO PFX "%ld: read dma_addr: 0x%x\n", i,
506                        ioread32(fpga + group_reg));
507         }
508         spin_unlock(channel->iomem_lock);
509
510 }
511
512 static int poch_channel_alloc_header(struct channel_info *channel)
513 {
514         struct poch_cbuf_header *header = channel->header;
515         unsigned long group_offset_size;
516         unsigned long tot_group_offsets_size;
517
518         /* Allocate memory to hold header exported userspace */
519         group_offset_size = sizeof(header->group_offsets[0]);
520         tot_group_offsets_size = group_offset_size * channel->group_count;
521         channel->header_size = sizeof(*header) + tot_group_offsets_size;
522         channel->header_pg = alloc_pages(GFP_KERNEL | __GFP_ZERO,
523                                          get_order(channel->header_size));
524         if (!channel->header_pg)
525                 return -ENOMEM;
526
527         channel->header = page_address(channel->header_pg);
528
529         return 0;
530 }
531
532 static void poch_channel_free_header(struct channel_info *channel)
533 {
534         unsigned int order;
535
536         order = get_order(channel->header_size);
537         __free_pages(channel->header_pg, order);
538 }
539
540 static void poch_channel_init_header(struct channel_info *channel)
541 {
542         int i;
543         struct poch_group_info *groups;
544         s32 *group_offsets;
545
546         channel->header->group_size_bytes = channel->group_size;
547         channel->header->group_count = channel->group_count;
548
549         spin_lock_init(&channel->group_offsets_lock);
550
551         group_offsets = channel->header->group_offsets;
552         groups = channel->groups;
553
554         for (i = 0; i < channel->group_count; i++) {
555                 if (channel->dir == CHANNEL_DIR_RX)
556                         group_offsets[i] = -1;
557                 else
558                         group_offsets[i] = groups[i].user_offset;
559         }
560 }
561
562 static void __poch_channel_clear_counters(struct channel_info *channel)
563 {
564         channel->counters.pll_unlock = 0;
565         channel->counters.fifo_empty = 0;
566         channel->counters.fifo_overflow = 0;
567 }
568
569 static int poch_channel_init(struct channel_info *channel,
570                              struct poch_dev *poch_dev)
571 {
572         struct pci_dev *pdev = poch_dev->pci_dev;
573         struct device *dev = &pdev->dev;
574         unsigned long alloc_size;
575         int ret;
576
577         printk(KERN_WARNING "channel_latch_attr\n");
578
579         ret = channel_latch_attr(channel);
580         if (ret != 0)
581                 goto out;
582
583         channel->transfer = 0;
584
585         /* Allocate memory to hold group information. */
586         alloc_size = channel->group_count * sizeof(struct poch_group_info);
587         channel->groups = kzalloc(alloc_size, GFP_KERNEL);
588         if (!channel->groups) {
589                 dev_err(dev, "error allocating memory for group info\n");
590                 ret = -ENOMEM;
591                 goto out;
592         }
593
594         printk(KERN_WARNING "poch_channel_alloc_groups\n");
595
596         ret = poch_channel_alloc_groups(channel);
597         if (ret) {
598                 dev_err(dev, "error allocating groups of order %d\n",
599                         get_order(channel->group_size));
600                 goto out_free_group_info;
601         }
602
603         ret = poch_channel_alloc_header(channel);
604         if (ret) {
605                 dev_err(dev, "error allocating user space header\n");
606                 goto out_free_groups;
607         }
608
609         channel->fpga_iomem = poch_dev->fpga_iomem;
610         channel->bridge_iomem = poch_dev->bridge_iomem;
611         channel->iomem_lock = &poch_dev->iomem_lock;
612         spin_lock_init(&channel->counters_lock);
613
614         __poch_channel_clear_counters(channel);
615
616         printk(KERN_WARNING "poch_channel_init_header\n");
617
618         poch_channel_init_header(channel);
619
620         return 0;
621
622  out_free_groups:
623         poch_channel_free_groups(channel);
624  out_free_group_info:
625         kfree(channel->groups);
626  out:
627         return ret;
628 }
629
630 static int poch_wait_fpga_prog(void __iomem *bridge)
631 {
632         unsigned long total_wait;
633         const unsigned long wait_period = 100;
634         /* FIXME: Get the actual timeout */
635         const unsigned long prog_timeo = 10000; /* 10 Seconds */
636         u32 card_power;
637
638         printk(KERN_WARNING "poch_wait_fpg_prog\n");
639
640         printk(KERN_INFO PFX "programming fpga ...\n");
641         total_wait = 0;
642         while (1) {
643                 msleep(wait_period);
644                 total_wait += wait_period;
645
646                 card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
647                 if (card_power & BRIDGE_CARD_POWER_PROG_DONE) {
648                         printk(KERN_INFO PFX "programming done\n");
649                         return 0;
650                 }
651                 if (total_wait > prog_timeo) {
652                         printk(KERN_ERR PFX
653                                "timed out while programming FPGA\n");
654                         return -EIO;
655                 }
656         }
657 }
658
659 static void poch_card_power_off(struct poch_dev *poch_dev)
660 {
661         void __iomem *bridge = poch_dev->bridge_iomem;
662         u32 card_power;
663
664         iowrite32(0, bridge + BRIDGE_INT_MASK_REG);
665         iowrite32(0, bridge + BRIDGE_DMA_GO_REG);
666
667         card_power = ioread32(bridge + BRIDGE_CARD_POWER_REG);
668         iowrite32(card_power & ~BRIDGE_CARD_POWER_EN,
669                   bridge + BRIDGE_CARD_POWER_REG);
670 }
671
672 enum clk_src {
673         CLK_SRC_ON_BOARD,
674         CLK_SRC_EXTERNAL
675 };
676
677 static void poch_card_clock_on(void __iomem *fpga)
678 {
679         /* FIXME: Get this data through sysfs? */
680         enum clk_src clk_src = CLK_SRC_ON_BOARD;
681
682         if (clk_src == CLK_SRC_ON_BOARD) {
683                 iowrite32(FPGA_ADC_CLOCK_LOCAL_CLK | FPGA_ADC_CLOCK_CTL_OSC_EN,
684                           fpga + FPGA_ADC_CLOCK_CTL_REG);
685         } else if (clk_src == CLK_SRC_EXTERNAL) {
686                 iowrite32(FPGA_ADC_CLOCK_EXT_SAMP_CLK,
687                           fpga + FPGA_ADC_CLOCK_CTL_REG);
688         }
689 }
690
691 static int poch_card_power_on(struct poch_dev *poch_dev)
692 {
693         void __iomem *bridge = poch_dev->bridge_iomem;
694         void __iomem *fpga = poch_dev->fpga_iomem;
695
696         iowrite32(BRIDGE_CARD_POWER_EN, bridge + BRIDGE_CARD_POWER_REG);
697
698         if (poch_wait_fpga_prog(bridge) != 0) {
699                 poch_card_power_off(poch_dev);
700                 return -EIO;
701         }
702
703         poch_card_clock_on(fpga);
704
705         /* Sync to new clock, reset state machines, set DMA mode. */
706         iowrite32(FPGA_DOM_DCM_RESET | FPGA_DOM_SOFT_RESET
707                   | FPGA_DOM_DUAL_M_SG_DMA, fpga + FPGA_DOM_REG);
708
709         /* FIXME: The time required for sync. needs to be tuned. */
710         msleep(1000);
711
712         return 0;
713 }
714
715 static void poch_channel_analog_on(struct channel_info *channel)
716 {
717         void __iomem *fpga = channel->fpga_iomem;
718         u32 adc_dac_en;
719
720         spin_lock(channel->iomem_lock);
721         adc_dac_en = ioread32(fpga + FPGA_ADC_DAC_EN_REG);
722         switch (channel->chno) {
723         case CHNO_RX_CHANNEL:
724                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_ADC_OFF,
725                           fpga + FPGA_ADC_DAC_EN_REG);
726                 break;
727         case CHNO_TX_CHANNEL:
728                 iowrite32(adc_dac_en & ~FPGA_ADC_DAC_EN_DAC_OFF,
729                           fpga + FPGA_ADC_DAC_EN_REG);
730                 break;
731         }
732         spin_unlock(channel->iomem_lock);
733 }
734
735 static int poch_open(struct inode *inode, struct file *filp)
736 {
737         struct poch_dev *poch_dev;
738         struct channel_info *channel;
739         void __iomem *bridge;
740         void __iomem *fpga;
741         int chno;
742         int usage;
743         int ret;
744
745         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
746         bridge = poch_dev->bridge_iomem;
747         fpga = poch_dev->fpga_iomem;
748
749         chno = iminor(inode) % poch_dev->nchannels;
750         channel = &poch_dev->channels[chno];
751
752         if (!atomic_dec_and_test(&channel->free)) {
753                 atomic_inc(&channel->free);
754                 ret = -EBUSY;
755                 goto out;
756         }
757
758         usage = atomic_inc_return(&poch_dev->usage);
759
760         printk(KERN_WARNING "poch_card_power_on\n");
761
762         if (usage == 1) {
763                 ret = poch_card_power_on(poch_dev);
764                 if (ret)
765                         goto out_dec_usage;
766         }
767
768         printk(KERN_INFO "CardBus Bridge Revision: %x\n",
769                ioread32(bridge + BRIDGE_REV_REG));
770         printk(KERN_INFO "CardBus Interface Revision: %x\n",
771                ioread32(fpga + FPGA_IFACE_REV_REG));
772
773         channel->chno = chno;
774         filp->private_data = channel;
775
776         printk(KERN_WARNING "poch_channel_init\n");
777
778         ret = poch_channel_init(channel, poch_dev);
779         if (ret)
780                 goto out_power_off;
781
782         poch_channel_analog_on(channel);
783
784         printk(KERN_WARNING "channel_dma_init\n");
785
786         channel_dma_init(channel);
787
788         printk(KERN_WARNING "poch_channel_analog_on\n");
789
790         if (usage == 1) {
791                 printk(KERN_WARNING "setting up DMA\n");
792
793                 /* Initialize DMA Controller. */
794                 iowrite32(FPGA_CAP_FIFO_REG, bridge + BRIDGE_STAT_2_REG);
795                 iowrite32(FPGA_DMA_DESC_1_REG, bridge + BRIDGE_STAT_3_REG);
796
797                 ioread32(fpga + FPGA_DMA_INT_STAT_REG);
798                 ioread32(fpga + FPGA_INT_STAT_REG);
799                 ioread32(bridge + BRIDGE_INT_STAT_REG);
800
801                 /* Initialize Interrupts. FIXME: Enable temperature
802                  * handling We are enabling both Tx and Rx channel
803                  * interrupts here. Do we need to enable interrupts
804                  * only for the current channel? Anyways we won't get
805                  * the interrupt unless the DMA is activated.
806                  */
807                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
808                 iowrite32(FPGA_INT_DMA_CORE
809                           | FPGA_INT_PLL_UNLOCKED
810                           | FPGA_INT_TX_FF_EMPTY
811                           | FPGA_INT_RX_FF_EMPTY
812                           | FPGA_INT_TX_FF_OVRFLW
813                           | FPGA_INT_RX_FF_OVRFLW,
814                           fpga + FPGA_INT_MASK_REG);
815                 iowrite32(FPGA_DMA_INT_RX | FPGA_DMA_INT_TX,
816                           fpga + FPGA_DMA_INT_MASK_REG);
817         }
818
819         if (channel->dir == CHANNEL_DIR_TX) {
820                 /* Flush TX FIFO and output data from cardbus. */
821                 iowrite32(FPGA_TX_CTL_FIFO_FLUSH
822                           | FPGA_TX_CTL_OUTPUT_CARDBUS,
823                           fpga + FPGA_TX_CTL_REG);
824         } else {
825                 /* Flush RX FIFO and output data to cardbus. */
826                 iowrite32(FPGA_RX_CTL_CONT_CAP
827                           | FPGA_RX_CTL_FIFO_FLUSH,
828                           fpga + FPGA_RX_CTL_REG);
829         }
830
831         atomic_inc(&channel->inited);
832
833         return 0;
834
835  out_power_off:
836         if (usage == 1)
837                 poch_card_power_off(poch_dev);
838  out_dec_usage:
839         atomic_dec(&poch_dev->usage);
840         atomic_inc(&channel->free);
841  out:
842         return ret;
843 }
844
845 static int poch_release(struct inode *inode, struct file *filp)
846 {
847         struct channel_info *channel = filp->private_data;
848         struct poch_dev *poch_dev;
849         int usage;
850
851         poch_dev = container_of(inode->i_cdev, struct poch_dev, cdev);
852
853         usage = atomic_dec_return(&poch_dev->usage);
854         if (usage == 0) {
855                 printk(KERN_WARNING "poch_card_power_off\n");
856                 poch_card_power_off(poch_dev);
857         }
858
859         atomic_dec(&channel->inited);
860         poch_channel_free_header(channel);
861         poch_channel_free_groups(channel);
862         kfree(channel->groups);
863         atomic_inc(&channel->free);
864
865         return 0;
866 }
867
868 /*
869  * Map the header and the group buffers, to user space.
870  */
871 static int poch_mmap(struct file *filp, struct vm_area_struct *vma)
872 {
873         struct channel_info *channel = filp->private_data;
874
875         unsigned long start;
876         unsigned long size;
877
878         unsigned long group_pages;
879         unsigned long header_pages;
880         unsigned long total_group_pages;
881
882         int pg_num;
883         struct page *pg;
884
885         int i;
886         int ret;
887
888         printk(KERN_WARNING "poch_mmap\n");
889
890         if (vma->vm_pgoff) {
891                 printk(KERN_WARNING PFX "page offset: %lu\n", vma->vm_pgoff);
892                 return -EINVAL;
893         }
894
895         group_pages = npages(channel->group_size);
896         header_pages = npages(channel->header_size);
897         total_group_pages = group_pages * channel->group_count;
898
899         size = vma->vm_end - vma->vm_start;
900         if (size != (header_pages + total_group_pages) * PAGE_SIZE) {
901                 printk(KERN_WARNING PFX "required %lu bytes\n", size);
902                 return -EINVAL;
903         }
904
905         start = vma->vm_start;
906
907         /* FIXME: Cleanup required on failure? */
908         pg = channel->header_pg;
909         for (pg_num = 0; pg_num < header_pages; pg_num++, pg++) {
910                 printk(KERN_DEBUG PFX "page_count: %d\n", page_count(pg));
911                 printk(KERN_DEBUG PFX "%d: header: 0x%lx\n", pg_num, start);
912                 ret = vm_insert_page(vma, start, pg);
913                 if (ret) {
914                         printk(KERN_DEBUG "vm_insert 1 failed at %lx\n", start);
915                         return ret;
916                 }
917                 start += PAGE_SIZE;
918         }
919
920         for (i = 0; i < channel->group_count; i++) {
921                 pg = channel->groups[i].pg;
922                 for (pg_num = 0; pg_num < group_pages; pg_num++, pg++) {
923                         printk(KERN_DEBUG PFX "%d: group %d: 0x%lx\n",
924                                pg_num, i, start);
925                         ret = vm_insert_page(vma, start, pg);
926                         if (ret) {
927                                 printk(KERN_DEBUG PFX
928                                        "vm_insert 2 failed at %d\n", pg_num);
929                                 return ret;
930                         }
931                         start += PAGE_SIZE;
932                 }
933         }
934
935         return 0;
936 }
937
938 /*
939  * Check whether there is some group that the user space has not
940  * consumed yet. When the user space consumes a group, it sets it to
941  * -1. Cosuming could be reading data in case of RX and filling a
942  * buffer in case of TX.
943  */
944 static int poch_channel_available(struct channel_info *channel)
945 {
946         int i;
947
948         spin_lock_irq(&channel->group_offsets_lock);
949
950         for (i = 0; i < channel->group_count; i++) {
951                 if (channel->header->group_offsets[i] != -1) {
952                         spin_unlock_irq(&channel->group_offsets_lock);
953                         return 1;
954                 }
955         }
956
957         spin_unlock_irq(&channel->group_offsets_lock);
958
959         return 0;
960 }
961
962 static unsigned int poch_poll(struct file *filp, poll_table *pt)
963 {
964         struct channel_info *channel = filp->private_data;
965         unsigned int ret = 0;
966
967         poll_wait(filp, &channel->wq, pt);
968
969         if (poch_channel_available(channel)) {
970                 if (channel->dir == CHANNEL_DIR_RX)
971                         ret = POLLIN | POLLRDNORM;
972                 else
973                         ret = POLLOUT | POLLWRNORM;
974         }
975
976         return ret;
977 }
978
979 static int poch_ioctl(struct inode *inode, struct file *filp,
980                       unsigned int cmd, unsigned long arg)
981 {
982         struct channel_info *channel = filp->private_data;
983         void __iomem *fpga = channel->fpga_iomem;
984         void __iomem *bridge = channel->bridge_iomem;
985         void __user *argp = (void __user *)arg;
986         struct vm_area_struct *vms;
987         struct poch_counters counters;
988         int ret;
989
990         switch (cmd) {
991         case POCH_IOC_TRANSFER_START:
992                 switch (channel->chno) {
993                 case CHNO_TX_CHANNEL:
994                         printk(KERN_INFO PFX "ioctl: Tx start\n");
995                         iowrite32(0x1, fpga + FPGA_TX_TRIGGER_REG);
996                         iowrite32(0x1, fpga + FPGA_TX_ENABLE_REG);
997
998                         /* FIXME: Does it make sense to do a DMA GO
999                          * twice, once in Tx and once in Rx.
1000                          */
1001                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
1002                         break;
1003                 case CHNO_RX_CHANNEL:
1004                         printk(KERN_INFO PFX "ioctl: Rx start\n");
1005                         iowrite32(0x1, fpga + FPGA_RX_ARM_REG);
1006                         iowrite32(0x1, bridge + BRIDGE_DMA_GO_REG);
1007                         break;
1008                 }
1009                 break;
1010         case POCH_IOC_TRANSFER_STOP:
1011                 switch (channel->chno) {
1012                 case CHNO_TX_CHANNEL:
1013                         printk(KERN_INFO PFX "ioctl: Tx stop\n");
1014                         iowrite32(0x0, fpga + FPGA_TX_ENABLE_REG);
1015                         iowrite32(0x0, fpga + FPGA_TX_TRIGGER_REG);
1016                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
1017                         break;
1018                 case CHNO_RX_CHANNEL:
1019                         printk(KERN_INFO PFX "ioctl: Rx stop\n");
1020                         iowrite32(0x0, fpga + FPGA_RX_ARM_REG);
1021                         iowrite32(0x0, bridge + BRIDGE_DMA_GO_REG);
1022                         break;
1023                 }
1024                 break;
1025         case POCH_IOC_GET_COUNTERS:
1026                 if (access_ok(VERIFY_WRITE, argp, sizeof(struct poch_counters)))
1027                         return -EFAULT;
1028
1029                 spin_lock_irq(&channel->counters_lock);
1030                 counters = channel->counters;
1031                 __poch_channel_clear_counters(channel);
1032                 spin_unlock_irq(&channel->counters_lock);
1033
1034                 ret = copy_to_user(argp, &counters,
1035                                    sizeof(struct poch_counters));
1036                 if (ret)
1037                         return ret;
1038
1039                 break;
1040         case POCH_IOC_SYNC_GROUP_FOR_USER:
1041         case POCH_IOC_SYNC_GROUP_FOR_DEVICE:
1042                 vms = find_vma(current->mm, arg);
1043                 if (!vms)
1044                         /* Address not mapped. */
1045                         return -EINVAL;
1046                 if (vms->vm_file != filp)
1047                         /* Address mapped from different device/file. */
1048                         return -EINVAL;
1049
1050                 flush_cache_range(vms, arg, arg + channel->group_size);
1051                 break;
1052         }
1053         return 0;
1054 }
1055
1056 static struct file_operations poch_fops = {
1057         .owner = THIS_MODULE,
1058         .open = poch_open,
1059         .release = poch_release,
1060         .ioctl = poch_ioctl,
1061         .poll = poch_poll,
1062         .mmap = poch_mmap
1063 };
1064
1065 static void poch_irq_dma(struct channel_info *channel)
1066 {
1067         u32 prev_transfer;
1068         u32 curr_transfer;
1069         long groups_done;
1070         unsigned long i, j;
1071         struct poch_group_info *groups;
1072         s32 *group_offsets;
1073         u32 curr_group_reg;
1074
1075         if (!atomic_read(&channel->inited))
1076                 return;
1077
1078         prev_transfer = channel->transfer;
1079
1080         if (channel->chno == CHNO_RX_CHANNEL)
1081                 curr_group_reg = FPGA_RX_CURR_GROUP_REG;
1082         else
1083                 curr_group_reg = FPGA_TX_CURR_GROUP_REG;
1084
1085         curr_transfer = ioread32(channel->fpga_iomem + curr_group_reg);
1086
1087         groups_done = curr_transfer - prev_transfer;
1088         /* Check wrap over, and handle it. */
1089         if (groups_done <= 0)
1090                 groups_done += channel->group_count;
1091
1092         group_offsets = channel->header->group_offsets;
1093         groups = channel->groups;
1094
1095         spin_lock(&channel->group_offsets_lock);
1096
1097         for (i = 0; i < groups_done; i++) {
1098                 j = (prev_transfer + i) % channel->group_count;
1099                 group_offsets[j] = groups[j].user_offset;
1100         }
1101
1102         spin_unlock(&channel->group_offsets_lock);
1103
1104         channel->transfer = curr_transfer;
1105
1106         wake_up_interruptible(&channel->wq);
1107 }
1108
1109 static irqreturn_t poch_irq_handler(int irq, void *p)
1110 {
1111         struct poch_dev *poch_dev = p;
1112         void __iomem *bridge = poch_dev->bridge_iomem;
1113         void __iomem *fpga = poch_dev->fpga_iomem;
1114         struct channel_info *channel_rx = &poch_dev->channels[CHNO_RX_CHANNEL];
1115         struct channel_info *channel_tx = &poch_dev->channels[CHNO_TX_CHANNEL];
1116         u32 bridge_stat;
1117         u32 fpga_stat;
1118         u32 dma_stat;
1119
1120         bridge_stat = ioread32(bridge + BRIDGE_INT_STAT_REG);
1121         fpga_stat = ioread32(fpga + FPGA_INT_STAT_REG);
1122         dma_stat = ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1123
1124         ioread32(fpga + FPGA_DMA_INT_STAT_REG);
1125         ioread32(fpga + FPGA_INT_STAT_REG);
1126         ioread32(bridge + BRIDGE_INT_STAT_REG);
1127
1128         if (bridge_stat & BRIDGE_INT_FPGA) {
1129                 if (fpga_stat & FPGA_INT_DMA_CORE) {
1130                         if (dma_stat & FPGA_DMA_INT_RX)
1131                                 poch_irq_dma(channel_rx);
1132                         if (dma_stat & FPGA_DMA_INT_TX)
1133                                 poch_irq_dma(channel_tx);
1134                 }
1135                 if (fpga_stat & FPGA_INT_PLL_UNLOCKED) {
1136                         channel_tx->counters.pll_unlock++;
1137                         channel_rx->counters.pll_unlock++;
1138                         if (printk_ratelimit())
1139                                 printk(KERN_WARNING PFX "PLL unlocked\n");
1140                 }
1141                 if (fpga_stat & FPGA_INT_TX_FF_EMPTY)
1142                         channel_tx->counters.fifo_empty++;
1143                 if (fpga_stat & FPGA_INT_TX_FF_OVRFLW)
1144                         channel_tx->counters.fifo_overflow++;
1145                 if (fpga_stat & FPGA_INT_RX_FF_EMPTY)
1146                         channel_rx->counters.fifo_empty++;
1147                 if (fpga_stat & FPGA_INT_RX_FF_OVRFLW)
1148                         channel_rx->counters.fifo_overflow++;
1149
1150                 /*
1151                  * FIXME: These errors should be notified through the
1152                  * poll interface as POLLERR.
1153                  */
1154
1155                 /* Re-enable interrupts. */
1156                 iowrite32(BRIDGE_INT_FPGA, bridge + BRIDGE_INT_MASK_REG);
1157
1158                 return IRQ_HANDLED;
1159         }
1160
1161         return IRQ_NONE;
1162 }
1163
1164 static void poch_class_dev_unregister(struct poch_dev *poch_dev, int id)
1165 {
1166         int i, j;
1167         int nattrs;
1168         struct channel_info *channel;
1169         dev_t devno;
1170
1171         if (poch_dev->dev == NULL)
1172                 return;
1173
1174         for (i = 0; i < poch_dev->nchannels; i++) {
1175                 channel = &poch_dev->channels[i];
1176                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1177
1178                 if (!channel->dev)
1179                         continue;
1180
1181                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1182                 for (j = 0; j < nattrs; j++)
1183                         device_remove_file(channel->dev, poch_class_attrs[j]);
1184
1185                 device_unregister(channel->dev);
1186         }
1187
1188         device_unregister(poch_dev->dev);
1189 }
1190
1191 static int __devinit poch_class_dev_register(struct poch_dev *poch_dev,
1192                                              int id)
1193 {
1194         struct device *dev = &poch_dev->pci_dev->dev;
1195         int i, j;
1196         int nattrs;
1197         int ret;
1198         struct channel_info *channel;
1199         dev_t devno;
1200
1201         poch_dev->dev = device_create(poch_cls, &poch_dev->pci_dev->dev,
1202                                       MKDEV(0, 0), NULL, "poch%d", id);
1203         if (IS_ERR(poch_dev->dev)) {
1204                 dev_err(dev, "error creating parent class device");
1205                 ret = PTR_ERR(poch_dev->dev);
1206                 poch_dev->dev = NULL;
1207                 return ret;
1208         }
1209
1210         for (i = 0; i < poch_dev->nchannels; i++) {
1211                 channel = &poch_dev->channels[i];
1212
1213                 devno = poch_first_dev + (id * poch_dev->nchannels) + i;
1214                 channel->dev = device_create(poch_cls, poch_dev->dev, devno,
1215                                              NULL, "ch%d", i);
1216                 if (IS_ERR(channel->dev)) {
1217                         dev_err(dev, "error creating channel class device");
1218                         ret = PTR_ERR(channel->dev);
1219                         channel->dev = NULL;
1220                         poch_class_dev_unregister(poch_dev, id);
1221                         return ret;
1222                 }
1223
1224                 dev_set_drvdata(channel->dev, channel);
1225                 nattrs = sizeof(poch_class_attrs)/sizeof(poch_class_attrs[0]);
1226                 for (j = 0; j < nattrs; j++) {
1227                         ret = device_create_file(channel->dev,
1228                                                  poch_class_attrs[j]);
1229                         if (ret) {
1230                                 dev_err(dev, "error creating attribute file");
1231                                 poch_class_dev_unregister(poch_dev, id);
1232                                 return ret;
1233                         }
1234                 }
1235         }
1236
1237         return 0;
1238 }
1239
1240 static int __devinit poch_pci_probe(struct pci_dev *pdev,
1241                                     const struct pci_device_id *pci_id)
1242 {
1243         struct device *dev = &pdev->dev;
1244         struct poch_dev *poch_dev;
1245         struct uio_info *uio;
1246         int ret;
1247         int id;
1248         int i;
1249
1250         poch_dev = kzalloc(sizeof(struct poch_dev), GFP_KERNEL);
1251         if (!poch_dev) {
1252                 dev_err(dev, "error allocating priv. data memory\n");
1253                 return -ENOMEM;
1254         }
1255
1256         poch_dev->pci_dev = pdev;
1257         uio = &poch_dev->uio;
1258
1259         pci_set_drvdata(pdev, poch_dev);
1260
1261         spin_lock_init(&poch_dev->iomem_lock);
1262
1263         poch_dev->nchannels = POCH_NCHANNELS;
1264         poch_dev->channels[CHNO_RX_CHANNEL].dir = CHANNEL_DIR_RX;
1265         poch_dev->channels[CHNO_TX_CHANNEL].dir = CHANNEL_DIR_TX;
1266
1267         for (i = 0; i < poch_dev->nchannels; i++) {
1268                 init_waitqueue_head(&poch_dev->channels[i].wq);
1269                 atomic_set(&poch_dev->channels[i].free, 1);
1270                 atomic_set(&poch_dev->channels[i].inited, 0);
1271         }
1272
1273         ret = pci_enable_device(pdev);
1274         if (ret) {
1275                 dev_err(dev, "error enabling device\n");
1276                 goto out_free;
1277         }
1278
1279         ret = pci_request_regions(pdev, "poch");
1280         if (ret) {
1281                 dev_err(dev, "error requesting resources\n");
1282                 goto out_disable;
1283         }
1284
1285         uio->mem[0].addr = pci_resource_start(pdev, 1);
1286         if (!uio->mem[0].addr) {
1287                 dev_err(dev, "invalid BAR1\n");
1288                 ret = -ENODEV;
1289                 goto out_release;
1290         }
1291
1292         uio->mem[0].size = pci_resource_len(pdev, 1);
1293         uio->mem[0].memtype = UIO_MEM_PHYS;
1294
1295         uio->name = "poch";
1296         uio->version = "0.0.1";
1297         uio->irq = -1;
1298         ret = uio_register_device(dev, uio);
1299         if (ret) {
1300                 dev_err(dev, "error register UIO device: %d\n", ret);
1301                 goto out_release;
1302         }
1303
1304         poch_dev->bridge_iomem = ioremap(pci_resource_start(pdev, 0),
1305                                          pci_resource_len(pdev, 0));
1306         if (poch_dev->bridge_iomem == NULL) {
1307                 dev_err(dev, "error mapping bridge (bar0) registers\n");
1308                 ret = -ENOMEM;
1309                 goto out_uio_unreg;
1310         }
1311
1312         poch_dev->fpga_iomem = ioremap(pci_resource_start(pdev, 1),
1313                                        pci_resource_len(pdev, 1));
1314         if (poch_dev->fpga_iomem == NULL) {
1315                 dev_err(dev, "error mapping fpga (bar1) registers\n");
1316                 ret = -ENOMEM;
1317                 goto out_bar0_unmap;
1318         }
1319
1320         ret = request_irq(pdev->irq, poch_irq_handler, IRQF_SHARED,
1321                           dev->bus_id, poch_dev);
1322         if (ret) {
1323                 dev_err(dev, "error requesting IRQ %u\n", pdev->irq);
1324                 ret = -ENOMEM;
1325                 goto out_bar1_unmap;
1326         }
1327
1328         if (!idr_pre_get(&poch_ids, GFP_KERNEL)) {
1329                 dev_err(dev, "error allocating memory ids\n");
1330                 ret = -ENOMEM;
1331                 goto out_free_irq;
1332         }
1333
1334         idr_get_new(&poch_ids, poch_dev, &id);
1335         if (id >= MAX_POCH_CARDS) {
1336                 dev_err(dev, "minors exhausted\n");
1337                 ret = -EBUSY;
1338                 goto out_free_irq;
1339         }
1340
1341         cdev_init(&poch_dev->cdev, &poch_fops);
1342         poch_dev->cdev.owner = THIS_MODULE;
1343         ret = cdev_add(&poch_dev->cdev,
1344                        poch_first_dev + (id * poch_dev->nchannels),
1345                        poch_dev->nchannels);
1346         if (ret) {
1347                 dev_err(dev, "error register character device\n");
1348                 goto out_idr_remove;
1349         }
1350
1351         ret = poch_class_dev_register(poch_dev, id);
1352         if (ret)
1353                 goto out_cdev_del;
1354
1355         return 0;
1356
1357  out_cdev_del:
1358         cdev_del(&poch_dev->cdev);
1359  out_idr_remove:
1360         idr_remove(&poch_ids, id);
1361  out_free_irq:
1362         free_irq(pdev->irq, poch_dev);
1363  out_bar1_unmap:
1364         iounmap(poch_dev->fpga_iomem);
1365  out_bar0_unmap:
1366         iounmap(poch_dev->bridge_iomem);
1367  out_uio_unreg:
1368         uio_unregister_device(uio);
1369  out_release:
1370         pci_release_regions(pdev);
1371  out_disable:
1372         pci_disable_device(pdev);
1373  out_free:
1374         kfree(poch_dev);
1375         return ret;
1376 }
1377
1378 /*
1379  * FIXME: We are yet to handle the hot unplug case.
1380  */
1381 static void poch_pci_remove(struct pci_dev *pdev)
1382 {
1383         struct poch_dev *poch_dev = pci_get_drvdata(pdev);
1384         struct uio_info *uio = &poch_dev->uio;
1385         unsigned int minor = MINOR(poch_dev->cdev.dev);
1386         unsigned int id = minor / poch_dev->nchannels;
1387
1388         poch_class_dev_unregister(poch_dev, id);
1389         cdev_del(&poch_dev->cdev);
1390         idr_remove(&poch_ids, id);
1391         free_irq(pdev->irq, poch_dev);
1392         iounmap(poch_dev->fpga_iomem);
1393         iounmap(poch_dev->bridge_iomem);
1394         uio_unregister_device(uio);
1395         pci_release_regions(pdev);
1396         pci_disable_device(pdev);
1397         pci_set_drvdata(pdev, NULL);
1398         iounmap(uio->mem[0].internal_addr);
1399
1400         kfree(poch_dev);
1401 }
1402
1403 static const struct pci_device_id poch_pci_ids[] /* __devinitconst */ = {
1404         { PCI_DEVICE(PCI_VENDOR_ID_RRAPIDS,
1405                      PCI_DEVICE_ID_RRAPIDS_POCKET_CHANGE) },
1406         { 0, }
1407 };
1408
1409 static struct pci_driver poch_pci_driver = {
1410         .name = DRV_NAME,
1411         .id_table = poch_pci_ids,
1412         .probe = poch_pci_probe,
1413         .remove = poch_pci_remove,
1414 };
1415
1416 static int __init poch_init_module(void)
1417 {
1418         int ret = 0;
1419
1420         ret = alloc_chrdev_region(&poch_first_dev, 0,
1421                                   MAX_POCH_DEVICES, DRV_NAME);
1422         if (ret) {
1423                 printk(KERN_ERR PFX "error allocating device no.");
1424                 return ret;
1425         }
1426
1427         poch_cls = class_create(THIS_MODULE, "pocketchange");
1428         if (IS_ERR(poch_cls)) {
1429                 ret = PTR_ERR(poch_cls);
1430                 goto out_unreg_chrdev;
1431         }
1432
1433         ret = pci_register_driver(&poch_pci_driver);
1434         if (ret) {
1435                 printk(KERN_ERR PFX "error register PCI device");
1436                 goto out_class_destroy;
1437         }
1438
1439         return 0;
1440
1441  out_class_destroy:
1442         class_destroy(poch_cls);
1443
1444  out_unreg_chrdev:
1445         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1446
1447         return ret;
1448 }
1449
1450 static void __exit poch_exit_module(void)
1451 {
1452         pci_unregister_driver(&poch_pci_driver);
1453         class_destroy(poch_cls);
1454         unregister_chrdev_region(poch_first_dev, MAX_POCH_DEVICES);
1455 }
1456
1457 module_init(poch_init_module);
1458 module_exit(poch_exit_module);
1459
1460 MODULE_LICENSE("GPL v2");