2 * Simple synchronous serial port driver for ETRAX FS.
4 * Copyright (c) 2005 Axis Communications AB
6 * Author: Mikael Starvik
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/errno.h>
14 #include <linux/major.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/poll.h>
19 #include <linux/init.h>
20 #include <linux/timer.h>
21 #include <linux/spinlock.h>
24 #include <asm/arch/dma.h>
25 #include <asm/arch/pinmux.h>
26 #include <asm/arch/hwregs/reg_rdwr.h>
27 #include <asm/arch/hwregs/sser_defs.h>
28 #include <asm/arch/hwregs/dma_defs.h>
29 #include <asm/arch/hwregs/dma.h>
30 #include <asm/arch/hwregs/intr_vect_defs.h>
31 #include <asm/arch/hwregs/intr_vect.h>
32 #include <asm/arch/hwregs/reg_map.h>
33 #include <asm/sync_serial.h>
35 /* The receiver is a bit tricky beacuse of the continuous stream of data.*/
37 /* Three DMA descriptors are linked together. Each DMA descriptor is */
38 /* responsible for port->bufchunk of a common buffer. */
40 /* +---------------------------------------------+ */
41 /* | +----------+ +----------+ +----------+ | */
42 /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
43 /* +----------+ +----------+ +----------+ */
46 /* +-------------------------------------+ */
48 /* +-------------------------------------+ */
49 /* |<- data_avail ->| */
52 /* If the application keeps up the pace readp will be right after writep.*/
53 /* If the application can't keep the pace we have to throw away data. */
54 /* The idea is that readp should be ready with the data pointed out by */
55 /* Descr[i] when the DMA has filled in Descr[i+1]. */
56 /* Otherwise we will discard */
57 /* the rest of the data pointed out by Descr1 and set readp to the start */
60 #define SYNC_SERIAL_MAJOR 125
62 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
63 /* words can be handled */
64 #define IN_BUFFER_SIZE 12288
65 #define IN_DESCR_SIZE 256
66 #define NUM_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
67 #define OUT_BUFFER_SIZE 4096
69 #define DEFAULT_FRAME_RATE 0
70 #define DEFAULT_WORD_RATE 7
72 /* NOTE: Enabling some debug will likely cause overrun or underrun,
73 * especially if manual mode is use.
82 typedef struct sync_port
84 reg_scope_instances regi_sser;
85 reg_scope_instances regi_dmain;
86 reg_scope_instances regi_dmaout;
88 char started; /* 1 if port has been started */
89 char port_nbr; /* Port 0 or 1 */
90 char busy; /* 1 if port is busy */
92 char enabled; /* 1 if port is enabled */
93 char use_dma; /* 1 if port uses dma */
100 volatile unsigned int out_count; /* Remaining bytes for current transfer */
101 unsigned char* outp; /* Current position in out_buffer */
102 volatile unsigned char* volatile readp; /* Next byte to be read by application */
103 volatile unsigned char* volatile writep; /* Next byte to be written by etrax */
104 unsigned int in_buffer_size;
105 unsigned int inbufchunk;
106 unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
107 unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
108 unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
109 struct dma_descr_data* next_rx_desc;
110 struct dma_descr_data* prev_rx_desc;
113 dma_descr_data in_descr[NUM_IN_DESCR] __attribute__ ((__aligned__(16)));
114 dma_descr_context in_context __attribute__ ((__aligned__(32)));
115 dma_descr_data out_descr __attribute__ ((__aligned__(16)));
116 dma_descr_context out_context __attribute__ ((__aligned__(32)));
117 wait_queue_head_t out_wait_q;
118 wait_queue_head_t in_wait_q;
123 static int etrax_sync_serial_init(void);
124 static void initialize_port(int portnbr);
125 static inline int sync_data_avail(struct sync_port *port);
127 static int sync_serial_open(struct inode *, struct file*);
128 static int sync_serial_release(struct inode*, struct file*);
129 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
131 static int sync_serial_ioctl(struct inode*, struct file*,
132 unsigned int cmd, unsigned long arg);
133 static ssize_t sync_serial_write(struct file * file, const char * buf,
134 size_t count, loff_t *ppos);
135 static ssize_t sync_serial_read(struct file *file, char *buf,
136 size_t count, loff_t *ppos);
138 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
139 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
140 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
141 defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
145 static void send_word(sync_port* port);
146 static void start_dma(struct sync_port *port, const char* data, int count);
147 static void start_dma_in(sync_port* port);
149 static irqreturn_t tr_interrupt(int irq, void *dev_id, struct pt_regs * regs);
150 static irqreturn_t rx_interrupt(int irq, void *dev_id, struct pt_regs * regs);
153 #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
154 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
155 (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
156 !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
157 #define SYNC_SER_MANUAL
159 #ifdef SYNC_SER_MANUAL
160 static irqreturn_t manual_interrupt(int irq, void *dev_id, struct pt_regs * regs);
164 static struct sync_port ports[]=
167 .regi_sser = regi_sser0,
168 .regi_dmaout = regi_dma4,
169 .regi_dmain = regi_dma5,
170 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
177 .regi_sser = regi_sser1,
178 .regi_dmaout = regi_dma6,
179 .regi_dmain = regi_dma7,
180 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
188 #define NUMBER_OF_PORTS (sizeof(ports)/sizeof(sync_port))
190 static struct file_operations sync_serial_fops = {
191 .owner = THIS_MODULE,
192 .write = sync_serial_write,
193 .read = sync_serial_read,
194 .poll = sync_serial_poll,
195 .ioctl = sync_serial_ioctl,
196 .open = sync_serial_open,
197 .release = sync_serial_release
200 static int __init etrax_sync_serial_init(void)
202 ports[0].enabled = 0;
203 ports[1].enabled = 0;
205 if (register_chrdev(SYNC_SERIAL_MAJOR,"sync serial", &sync_serial_fops) <0 )
207 printk("unable to get major for synchronous serial port\n");
211 /* Initialize Ports */
212 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
213 if (crisv32_pinmux_alloc_fixed(pinmux_sser0))
215 printk("Unable to allocate pins for syncrhronous serial port 0\n");
218 ports[0].enabled = 1;
222 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
223 if (crisv32_pinmux_alloc_fixed(pinmux_sser1))
225 printk("Unable to allocate pins for syncrhronous serial port 0\n");
228 ports[1].enabled = 1;
232 printk("ETRAX FS synchronous serial port driver\n");
236 static void __init initialize_port(int portnbr)
238 struct sync_port* port = &ports[portnbr];
239 reg_sser_rw_cfg cfg = {0};
240 reg_sser_rw_frm_cfg frm_cfg = {0};
241 reg_sser_rw_tr_cfg tr_cfg = {0};
242 reg_sser_rw_rec_cfg rec_cfg = {0};
244 DEBUG(printk("Init sync serial port %d\n", portnbr));
246 port->port_nbr = portnbr;
249 port->outp = port->out_buffer;
253 port->readp = port->flip;
254 port->writep = port->flip;
255 port->in_buffer_size = IN_BUFFER_SIZE;
256 port->inbufchunk = IN_DESCR_SIZE;
257 port->next_rx_desc = &port->in_descr[0];
258 port->prev_rx_desc = &port->in_descr[NUM_IN_DESCR-1];
259 port->prev_rx_desc->eol = 1;
261 init_waitqueue_head(&port->out_wait_q);
262 init_waitqueue_head(&port->in_wait_q);
264 spin_lock_init(&port->lock);
266 cfg.out_clk_src = regk_sser_intern_clk;
267 cfg.out_clk_pol = regk_sser_pos;
268 cfg.clk_od_mode = regk_sser_no;
269 cfg.clk_dir = regk_sser_out;
270 cfg.gate_clk = regk_sser_no;
271 cfg.base_freq = regk_sser_f29_493;
273 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
275 frm_cfg.wordrate = DEFAULT_WORD_RATE;
276 frm_cfg.type = regk_sser_edge;
277 frm_cfg.frame_pin_dir = regk_sser_out;
278 frm_cfg.frame_pin_use = regk_sser_frm;
279 frm_cfg.status_pin_dir = regk_sser_in;
280 frm_cfg.status_pin_use = regk_sser_hold;
281 frm_cfg.out_on = regk_sser_tr;
282 frm_cfg.tr_delay = 1;
283 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
285 tr_cfg.urun_stop = regk_sser_no;
286 tr_cfg.sample_size = 7;
287 tr_cfg.sh_dir = regk_sser_msbfirst;
288 tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
289 tr_cfg.rate_ctrl = regk_sser_bulk;
290 tr_cfg.data_pin_use = regk_sser_dout;
291 tr_cfg.bulk_wspace = 1;
292 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
294 rec_cfg.sample_size = 7;
295 rec_cfg.sh_dir = regk_sser_msbfirst;
296 rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
297 rec_cfg.fifo_thr = regk_sser_inf;
298 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
301 static inline int sync_data_avail(struct sync_port *port)
304 unsigned char *start;
307 start = (unsigned char*)port->readp; /* cast away volatile */
308 end = (unsigned char*)port->writep; /* cast away volatile */
309 /* 0123456789 0123456789
317 avail = port->in_buffer_size - (start - end);
321 static inline int sync_data_avail_to_end(struct sync_port *port)
324 unsigned char *start;
327 start = (unsigned char*)port->readp; /* cast away volatile */
328 end = (unsigned char*)port->writep; /* cast away volatile */
329 /* 0123456789 0123456789
337 avail = port->flip + port->in_buffer_size - start;
341 static int sync_serial_open(struct inode *inode, struct file *file)
343 int dev = MINOR(inode->i_rdev);
345 reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
346 reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
348 DEBUG(printk("Open sync serial port %d\n", dev));
350 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled)
352 DEBUG(printk("Invalid minor %d\n", dev));
356 /* Allow open this device twice (assuming one reader and one writer) */
359 DEBUG(printk("Device is busy.. \n"));
362 if (port->init_irqs) {
364 if (port == &ports[0]){
366 if(request_irq(DMA4_INTR_VECT,
369 "synchronous serial 0 dma tr",
371 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
373 } else if(request_irq(DMA5_INTR_VECT,
376 "synchronous serial 1 dma rx",
378 free_irq(DMA4_INTR_VECT, &port[0]);
379 printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
381 } else if (crisv32_request_dma(SYNC_SER0_TX_DMA_NBR,
382 "synchronous serial 0 dma tr",
383 DMA_VERBOSE_ON_ERROR,
386 free_irq(DMA4_INTR_VECT, &port[0]);
387 free_irq(DMA5_INTR_VECT, &port[0]);
388 printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
390 } else if (crisv32_request_dma(SYNC_SER0_RX_DMA_NBR,
391 "synchronous serial 0 dma rec",
392 DMA_VERBOSE_ON_ERROR,
395 crisv32_free_dma(SYNC_SER0_TX_DMA_NBR);
396 free_irq(DMA4_INTR_VECT, &port[0]);
397 free_irq(DMA5_INTR_VECT, &port[0]);
398 printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
403 else if (port == &ports[1]){
405 if (request_irq(DMA6_INTR_VECT,
408 "synchronous serial 1 dma tr",
410 printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
412 } else if (request_irq(DMA7_INTR_VECT,
415 "synchronous serial 1 dma rx",
417 free_irq(DMA6_INTR_VECT, &ports[1]);
418 printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
420 } else if (crisv32_request_dma(SYNC_SER1_TX_DMA_NBR,
421 "synchronous serial 1 dma tr",
422 DMA_VERBOSE_ON_ERROR,
425 free_irq(21, &ports[1]);
426 free_irq(20, &ports[1]);
427 printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
429 } else if (crisv32_request_dma(SYNC_SER1_RX_DMA_NBR,
430 "synchronous serial 3 dma rec",
431 DMA_VERBOSE_ON_ERROR,
434 crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
435 free_irq(DMA6_INTR_VECT, &ports[1]);
436 free_irq(DMA7_INTR_VECT, &ports[1]);
437 printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
444 REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
445 REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
446 /* Enable DMA IRQs */
447 REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
448 REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
449 /* Set up wordsize = 2 for DMAs. */
450 DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
451 DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
455 } else { /* !port->use_dma */
456 #ifdef SYNC_SER_MANUAL
457 if (port == &ports[0]) {
458 if (request_irq(SSER0_INTR_VECT,
461 "synchronous serial manual irq",
463 printk("Can't allocate sync serial manual irq");
466 } else if (port == &ports[1]) {
467 if (request_irq(SSER1_INTR_VECT,
470 "synchronous serial manual irq",
472 printk(KERN_CRIT "Can't allocate sync serial manual irq");
478 panic("sync_serial: Manual mode not supported.\n");
479 #endif /* SYNC_SER_MANUAL */
481 } /* port->init_irqs */
487 static int sync_serial_release(struct inode *inode, struct file *file)
489 int dev = MINOR(inode->i_rdev);
492 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled)
494 DEBUG(printk("Invalid minor %d\n", dev));
505 static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
507 int dev = MINOR(file->f_dentry->d_inode->i_rdev);
508 unsigned int mask = 0;
510 DEBUGPOLL( static unsigned int prev_mask = 0; );
513 poll_wait(file, &port->out_wait_q, wait);
514 poll_wait(file, &port->in_wait_q, wait);
515 /* Some room to write */
516 if (port->out_count < OUT_BUFFER_SIZE)
517 mask |= POLLOUT | POLLWRNORM;
518 /* At least an inbufchunk of data */
519 if (sync_data_avail(port) >= port->inbufchunk)
520 mask |= POLLIN | POLLRDNORM;
522 DEBUGPOLL(if (mask != prev_mask)
523 printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
524 mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
530 static int sync_serial_ioctl(struct inode *inode, struct file *file,
531 unsigned int cmd, unsigned long arg)
534 int dev = MINOR(file->f_dentry->d_inode->i_rdev);
536 reg_sser_rw_tr_cfg tr_cfg;
537 reg_sser_rw_rec_cfg rec_cfg;
538 reg_sser_rw_frm_cfg frm_cfg;
539 reg_sser_rw_cfg gen_cfg;
540 reg_sser_rw_intr_mask intr_mask;
542 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled)
544 DEBUG(printk("Invalid minor %d\n", dev));
548 spin_lock_irq(&port->lock);
550 tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
551 rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
552 frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
553 gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
554 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
559 if (GET_SPEED(arg) == CODEC)
561 gen_cfg.base_freq = regk_sser_f32;
562 /* FREQ = 0 => 4 MHz => clk_div = 7*/
563 gen_cfg.clk_div = 6 + (1 << GET_FREQ(arg));
567 gen_cfg.base_freq = regk_sser_f29_493;
568 switch (GET_SPEED(arg))
571 gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
574 gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
577 gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
580 gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
583 gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
586 gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
589 gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
592 gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
595 gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
598 gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
601 gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
604 gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
607 gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
610 gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
613 gen_cfg.base_freq = regk_sser_f100;
614 gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
619 frm_cfg.wordrate = GET_WORD_RATE(arg);
628 gen_cfg.clk_dir = regk_sser_out;
633 gen_cfg.clk_dir = regk_sser_in;
638 gen_cfg.clk_dir = regk_sser_out;
643 gen_cfg.clk_dir = regk_sser_in;
648 gen_cfg.clk_dir = regk_sser_out;
653 gen_cfg.clk_dir = regk_sser_in;
656 spin_unlock_irq(&port->lock);
660 if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
661 intr_mask.rdav = regk_sser_yes;
664 if (arg & NORMAL_SYNC)
665 frm_cfg.tr_delay = 1;
666 else if (arg & EARLY_SYNC)
667 frm_cfg.tr_delay = 0;
669 tr_cfg.bulk_wspace = frm_cfg.tr_delay;
670 frm_cfg.early_wend = regk_sser_yes;
672 frm_cfg.type = regk_sser_edge;
673 else if (arg & WORD_SYNC)
674 frm_cfg.type = regk_sser_level;
675 else if (arg & EXTENDED_SYNC)
676 frm_cfg.early_wend = regk_sser_no;
679 frm_cfg.frame_pin_use = regk_sser_frm;
680 else if (arg & SYNC_OFF)
681 frm_cfg.frame_pin_use = regk_sser_gio0;
683 if (arg & WORD_SIZE_8)
684 rec_cfg.sample_size = tr_cfg.sample_size = 7;
685 else if (arg & WORD_SIZE_12)
686 rec_cfg.sample_size = tr_cfg.sample_size = 11;
687 else if (arg & WORD_SIZE_16)
688 rec_cfg.sample_size = tr_cfg.sample_size = 15;
689 else if (arg & WORD_SIZE_24)
690 rec_cfg.sample_size = tr_cfg.sample_size = 23;
691 else if (arg & WORD_SIZE_32)
692 rec_cfg.sample_size = tr_cfg.sample_size = 31;
694 if (arg & BIT_ORDER_MSB)
695 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
696 else if (arg & BIT_ORDER_LSB)
697 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
699 if (arg & FLOW_CONTROL_ENABLE)
700 rec_cfg.fifo_thr = regk_sser_thr16;
701 else if (arg & FLOW_CONTROL_DISABLE)
702 rec_cfg.fifo_thr = regk_sser_inf;
704 if (arg & CLOCK_NOT_GATED)
705 gen_cfg.gate_clk = regk_sser_no;
706 else if (arg & CLOCK_GATED)
707 gen_cfg.gate_clk = regk_sser_yes;
711 /* NOTE!! negedge is considered NORMAL */
712 if (arg & CLOCK_NORMAL)
713 rec_cfg.clk_pol = regk_sser_neg;
714 else if (arg & CLOCK_INVERT)
715 rec_cfg.clk_pol = regk_sser_pos;
717 if (arg & FRAME_NORMAL)
718 frm_cfg.level = regk_sser_pos_hi;
719 else if (arg & FRAME_INVERT)
720 frm_cfg.level = regk_sser_neg_lo;
722 if (arg & STATUS_NORMAL)
723 gen_cfg.hold_pol = regk_sser_pos;
724 else if (arg & STATUS_INVERT)
725 gen_cfg.hold_pol = regk_sser_neg;
728 if (arg & CLOCK_NORMAL)
729 gen_cfg.out_clk_pol = regk_sser_neg;
730 else if (arg & CLOCK_INVERT)
731 gen_cfg.out_clk_pol = regk_sser_pos;
733 if (arg & FRAME_NORMAL)
734 frm_cfg.level = regk_sser_pos_hi;
735 else if (arg & FRAME_INVERT)
736 frm_cfg.level = regk_sser_neg_lo;
738 if (arg & STATUS_NORMAL)
739 gen_cfg.hold_pol = regk_sser_pos;
740 else if (arg & STATUS_INVERT)
741 gen_cfg.hold_pol = regk_sser_neg;
744 rec_cfg.fifo_thr = regk_sser_inf;
745 rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
746 rec_cfg.sample_size = tr_cfg.sample_size = 7;
747 frm_cfg.frame_pin_use = regk_sser_frm;
748 frm_cfg.type = regk_sser_level;
749 frm_cfg.tr_delay = 1;
750 frm_cfg.level = regk_sser_neg_lo;
753 rec_cfg.clk_pol = regk_sser_neg;
754 gen_cfg.clk_dir = regk_sser_in;
760 gen_cfg.out_clk_pol = regk_sser_pos;
763 gen_cfg.clk_dir = regk_sser_out;
775 tr_cfg.tr_en = port->output;
776 rec_cfg.rec_en = port->input;
779 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
780 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
781 REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
782 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
783 REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
785 spin_unlock_irq(&port->lock);
789 static ssize_t sync_serial_write(struct file * file, const char * buf,
790 size_t count, loff_t *ppos)
792 int dev = MINOR(file->f_dentry->d_inode->i_rdev);
793 DECLARE_WAITQUEUE(wait, current);
796 unsigned long free_outp;
798 unsigned long out_buffer;
801 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled)
803 DEBUG(printk("Invalid minor %d\n", dev));
808 DEBUGWRITE(printk("W d%d c %lu (%d/%d)\n", port->port_nbr, count, port->out_count, OUT_BUFFER_SIZE));
809 /* Space to end of buffer */
811 * out_buffer <c1>012345<- c ->OUT_BUFFER_SIZE
814 * out_buffer 45<- c ->0123OUT_BUFFER_SIZE
820 /* Read variables that may be updated by interrupts */
821 spin_lock_irqsave(&port->lock, flags);
822 count = count > OUT_BUFFER_SIZE - port->out_count ? OUT_BUFFER_SIZE - port->out_count : count;
823 outp = (unsigned long)port->outp;
824 free_outp = outp + port->out_count;
825 spin_unlock_irqrestore(&port->lock, flags);
826 out_buffer = (unsigned long)port->out_buffer;
828 /* Find out where and how much to write */
829 if (free_outp >= out_buffer + OUT_BUFFER_SIZE)
830 free_outp -= OUT_BUFFER_SIZE;
831 if (free_outp >= outp)
832 c = out_buffer + OUT_BUFFER_SIZE - free_outp;
834 c = outp - free_outp;
838 // DEBUGWRITE(printk("w op %08lX fop %08lX c %lu\n", outp, free_outp, c));
839 if (copy_from_user((void*)free_outp, buf, c))
845 DEBUGWRITE(printk("w2 fi %lu c %lu c1 %lu\n", free_outp-out_buffer, c, c1));
846 if (copy_from_user((void*)out_buffer, buf, c1))
849 spin_lock_irqsave(&port->lock, flags);
850 port->out_count += count;
851 spin_unlock_irqrestore(&port->lock, flags);
853 /* Make sure transmitter/receiver is running */
856 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
857 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
858 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
859 cfg.en = regk_sser_yes;
860 tr_cfg.tr_en = port->output;
861 rec_cfg.rec_en = port->input;
862 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
863 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
864 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
868 if (file->f_flags & O_NONBLOCK) {
869 spin_lock_irqsave(&port->lock, flags);
870 if (!port->tr_running) {
871 if (!port->use_dma) {
872 reg_sser_rw_intr_mask intr_mask;
873 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
874 /* Start sender by writing data */
876 /* and enable transmitter ready IRQ */
878 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
880 start_dma(port, (unsigned char* volatile )port->outp, c);
883 spin_unlock_irqrestore(&port->lock, flags);
884 DEBUGWRITE(printk("w d%d c %lu NB\n",
885 port->port_nbr, count));
889 /* Sleep until all sent */
891 add_wait_queue(&port->out_wait_q, &wait);
892 set_current_state(TASK_INTERRUPTIBLE);
893 spin_lock_irqsave(&port->lock, flags);
894 if (!port->tr_running) {
895 if (!port->use_dma) {
896 reg_sser_rw_intr_mask intr_mask;
897 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
898 /* Start sender by writing data */
900 /* and enable transmitter ready IRQ */
902 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
904 start_dma(port, port->outp, c);
907 spin_unlock_irqrestore(&port->lock, flags);
909 set_current_state(TASK_RUNNING);
910 remove_wait_queue(&port->out_wait_q, &wait);
911 if (signal_pending(current))
915 DEBUGWRITE(printk("w d%d c %lu\n", port->port_nbr, count));
919 static ssize_t sync_serial_read(struct file * file, char * buf,
920 size_t count, loff_t *ppos)
922 int dev = MINOR(file->f_dentry->d_inode->i_rdev);
925 unsigned char* start;
929 if (dev < 0 || dev >= NUMBER_OF_PORTS || !ports[dev].enabled)
931 DEBUG(printk("Invalid minor %d\n", dev));
936 DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
940 reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
941 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
942 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
943 cfg.en = regk_sser_yes;
944 tr_cfg.tr_en = regk_sser_yes;
945 rec_cfg.rec_en = regk_sser_yes;
946 REG_WR(sser, port->regi_sser, rw_cfg, cfg);
947 REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
948 REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
953 /* Calculate number of available bytes */
954 /* Save pointers to avoid that they are modified by interrupt */
955 spin_lock_irqsave(&port->lock, flags);
956 start = (unsigned char*)port->readp; /* cast away volatile */
957 end = (unsigned char*)port->writep; /* cast away volatile */
958 spin_unlock_irqrestore(&port->lock, flags);
959 while ((start == end) && !port->full) /* No data */
961 if (file->f_flags & O_NONBLOCK)
966 interruptible_sleep_on(&port->in_wait_q);
967 if (signal_pending(current))
971 spin_lock_irqsave(&port->lock, flags);
972 start = (unsigned char*)port->readp; /* cast away volatile */
973 end = (unsigned char*)port->writep; /* cast away volatile */
974 spin_unlock_irqrestore(&port->lock, flags);
977 /* Lazy read, never return wrapped data. */
979 avail = port->in_buffer_size;
980 else if (end > start)
983 avail = port->flip + port->in_buffer_size - start;
985 count = count > avail ? avail : count;
986 if (copy_to_user(buf, start, count))
988 /* Disable interrupts while updating readp */
989 spin_lock_irqsave(&port->lock, flags);
990 port->readp += count;
991 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
992 port->readp = port->flip;
994 spin_unlock_irqrestore(&port->lock, flags);
995 DEBUGREAD(printk("r %d\n", count));
999 static void send_word(sync_port* port)
1001 reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
1002 reg_sser_rw_tr_data tr_data = {0};
1004 switch(tr_cfg.sample_size)
1008 tr_data.data = *port->outp++;
1009 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1010 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1011 port->outp = port->out_buffer;
1015 int data = (*port->outp++) << 8;
1016 data |= *port->outp++;
1018 tr_data.data = data;
1019 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1020 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1021 port->outp = port->out_buffer;
1026 tr_data.data = *(unsigned short *)port->outp;
1027 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1029 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1030 port->outp = port->out_buffer;
1034 tr_data.data = *(unsigned short *)port->outp;
1035 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1037 tr_data.data = *port->outp++;
1038 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1039 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1040 port->outp = port->out_buffer;
1044 tr_data.data = *(unsigned short *)port->outp;
1045 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1047 tr_data.data = *(unsigned short *)port->outp;
1048 REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
1050 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1051 port->outp = port->out_buffer;
1057 static void start_dma(struct sync_port* port, const char* data, int count)
1059 port->tr_running = 1;
1060 port->out_descr.buf = (char*)virt_to_phys((char*)data);
1061 port->out_descr.after = port->out_descr.buf + count;
1062 port->out_descr.eol = port->out_descr.intr = 1;
1064 port->out_context.saved_data = (dma_descr_data*)virt_to_phys(&port->out_descr);
1065 port->out_context.saved_data_buf = port->out_descr.buf;
1067 DMA_START_CONTEXT(port->regi_dmaout, virt_to_phys((char*)&port->out_context));
1068 DEBUGTXINT(printk("dma %08lX c %d\n", (unsigned long)data, count));
1071 static void start_dma_in(sync_port* port)
1075 port->writep = port->flip;
1077 if (port->writep > port->flip + port->in_buffer_size)
1079 panic("Offset too large in sync serial driver\n");
1082 buf = (char*)virt_to_phys(port->in_buffer);
1083 for (i = 0; i < NUM_IN_DESCR; i++) {
1084 port->in_descr[i].buf = buf;
1085 port->in_descr[i].after = buf + port->inbufchunk;
1086 port->in_descr[i].intr = 1;
1087 port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
1088 port->in_descr[i].buf = buf;
1089 buf += port->inbufchunk;
1091 /* Link the last descriptor to the first */
1092 port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1093 port->in_descr[i-1].eol = regk_sser_yes;
1094 port->next_rx_desc = &port->in_descr[0];
1095 port->prev_rx_desc = &port->in_descr[NUM_IN_DESCR - 1];
1096 port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
1097 port->in_context.saved_data_buf = port->in_descr[0].buf;
1098 DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
1102 static irqreturn_t tr_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1104 reg_dma_r_masked_intr masked;
1105 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1107 struct dma_descr_data *descr;
1111 for (i = 0; i < NUMBER_OF_PORTS; i++)
1113 sync_port *port = &ports[i];
1114 if (!port->enabled || !port->use_dma )
1117 masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
1119 if (masked.data) /* IRQ active for the port? */
1123 REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
1124 descr = &port->out_descr;
1125 sentl = descr->after - descr->buf;
1126 port->out_count -= sentl;
1127 port->outp += sentl;
1128 if (port->outp >= port->out_buffer + OUT_BUFFER_SIZE)
1129 port->outp = port->out_buffer;
1130 if (port->out_count) {
1132 c = port->out_buffer + OUT_BUFFER_SIZE - port->outp;
1133 if (c > port->out_count)
1134 c = port->out_count;
1135 DEBUGTXINT(printk("tx_int DMAWRITE %i %i\n", sentl, c));
1136 start_dma(port, port->outp, c);
1138 DEBUGTXINT(printk("tx_int DMA stop %i\n", sentl));
1139 port->tr_running = 0;
1141 wake_up_interruptible(&port->out_wait_q); /* wake up the waiting process */
1144 return IRQ_RETVAL(found);
1145 } /* tr_interrupt */
1147 static irqreturn_t rx_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1149 reg_dma_r_masked_intr masked;
1150 reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
1155 for (i = 0; i < NUMBER_OF_PORTS; i++)
1157 sync_port *port = &ports[i];
1159 if (!port->enabled || !port->use_dma )
1162 masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
1164 if (masked.data) /* Descriptor interrupt */
1167 while (REG_RD(dma, port->regi_dmain, rw_data) !=
1168 virt_to_phys(port->next_rx_desc)) {
1170 if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
1171 int first_size = port->flip + port->in_buffer_size - port->writep;
1172 memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
1173 memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
1174 port->writep = port->flip + port->inbufchunk - first_size;
1176 memcpy((char*)port->writep,
1177 phys_to_virt((unsigned)port->next_rx_desc->buf),
1179 port->writep += port->inbufchunk;
1180 if (port->writep >= port->flip + port->in_buffer_size)
1181 port->writep = port->flip;
1183 if (port->writep == port->readp)
1188 port->next_rx_desc->eol = 0;
1189 port->prev_rx_desc->eol = 1;
1190 port->prev_rx_desc = phys_to_virt((unsigned)port->next_rx_desc);
1191 port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
1192 wake_up_interruptible(&port->in_wait_q); /* wake up the waiting process */
1193 DMA_CONTINUE(port->regi_dmain);
1194 REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
1199 return IRQ_RETVAL(found);
1200 } /* rx_interrupt */
1201 #endif /* SYNC_SER_DMA */
1203 #ifdef SYNC_SER_MANUAL
1204 static irqreturn_t manual_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1208 reg_sser_r_masked_intr masked;
1210 for (i = 0; i < NUMBER_OF_PORTS; i++)
1212 sync_port* port = &ports[i];
1214 if (!port->enabled || port->use_dma)
1219 masked = REG_RD(sser, port->regi_sser, r_masked_intr);
1220 if (masked.rdav) /* Data received? */
1222 reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
1223 reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
1226 switch(rec_cfg.sample_size)
1229 *port->writep++ = data.data & 0xff;
1232 *port->writep = (data.data & 0x0ff0) >> 4;
1233 *(port->writep + 1) = data.data & 0x0f;
1237 *(unsigned short*)port->writep = data.data;
1241 *(unsigned int*)port->writep = data.data;
1245 *(unsigned int*)port->writep = data.data;
1250 if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
1251 port->writep = port->flip;
1252 if (port->writep == port->readp) {
1253 /* receive buffer overrun, discard oldest data
1256 if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
1257 port->readp = port->flip;
1259 if (sync_data_avail(port) >= port->inbufchunk)
1260 wake_up_interruptible(&port->in_wait_q); /* Wake up application */
1263 if (masked.trdy) /* Transmitter ready? */
1266 if (port->out_count > 0) /* More data to send */
1268 else /* transmission finished */
1270 reg_sser_rw_intr_mask intr_mask;
1271 intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
1273 REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
1274 wake_up_interruptible(&port->out_wait_q); /* Wake up application */
1278 return IRQ_RETVAL(found);
1282 module_init(etrax_sync_serial_init);