2 * Driver for high-speed SCC boards (those with DMA support)
3 * Copyright (C) 1997-2000 Klaus Kudielka
5 * S5SCC/DMA support by Janko Koleznik S52HI
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/module.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/if_arp.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/sockios.h>
36 #include <linux/workqueue.h>
37 #include <asm/atomic.h>
38 #include <asm/bitops.h>
42 #include <asm/uaccess.h>
47 /* Number of buffers per channel */
49 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
50 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
51 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
56 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
57 0, 8, 1843200, 3686400 }
58 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
59 0, 8, 3686400, 7372800 }
60 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
61 0, 4, 6144000, 6144000 }
62 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
63 0, 8, 4915200, 9830400 }
65 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
67 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
75 #define MAX_NUM_DEVS 32
78 /* SCC chips supported */
84 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
89 /* 8530 registers relative to card base */
91 #define SCCB_DATA 0x01
93 #define SCCA_DATA 0x03
95 /* 8253/8254 registers relative to card base */
101 /* Additional PI/PI2 registers relative to card base */
102 #define PI_DREQ_MASK 0x04
104 /* Additional PackeTwin registers relative to card base */
105 #define TWIN_INT_REG 0x08
106 #define TWIN_CLR_TMR1 0x09
107 #define TWIN_CLR_TMR2 0x0a
108 #define TWIN_SPARE_1 0x0b
109 #define TWIN_DMA_CFG 0x08
110 #define TWIN_SERIAL_CFG 0x09
111 #define TWIN_DMA_CLR_FF 0x0a
112 #define TWIN_SPARE_2 0x0b
115 /* PackeTwin I/O register values */
118 #define TWIN_SCC_MSK 0x01
119 #define TWIN_TMR1_MSK 0x02
120 #define TWIN_TMR2_MSK 0x04
121 #define TWIN_INT_MSK 0x07
124 #define TWIN_DTRA_ON 0x01
125 #define TWIN_DTRB_ON 0x02
126 #define TWIN_EXTCLKA 0x04
127 #define TWIN_EXTCLKB 0x08
128 #define TWIN_LOOPA_ON 0x10
129 #define TWIN_LOOPB_ON 0x20
133 #define TWIN_DMA_HDX_T1 0x08
134 #define TWIN_DMA_HDX_R1 0x0a
135 #define TWIN_DMA_HDX_T3 0x14
136 #define TWIN_DMA_HDX_R3 0x16
137 #define TWIN_DMA_FDX_T3R1 0x1b
138 #define TWIN_DMA_FDX_T1R3 0x1d
157 #define SIOCGSCCPARAM SIOCDEVPRIVATE
158 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
164 int pclk_hz; /* frequency of BRG input (don't change) */
165 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
166 int nrzi; /* 0 (nrz), 1 (nrzi) */
167 int clocks; /* see dmascc_cfg documentation */
168 int txdelay; /* [1/TMR_0_HZ] */
169 int txtimeout; /* [1/HZ] */
170 int txtail; /* [1/TMR_0_HZ] */
171 int waittime; /* [1/TMR_0_HZ] */
172 int slottime; /* [1/TMR_0_HZ] */
173 int persist; /* 1 ... 256 */
174 int dma; /* -1 (disable), 0, 1, 3 */
175 int txpause; /* [1/TMR_0_HZ] */
176 int rtsoff; /* [1/TMR_0_HZ] */
177 int dcdon; /* [1/TMR_0_HZ] */
178 int dcdoff; /* [1/TMR_0_HZ] */
181 struct scc_hardware {
196 struct net_device *dev;
197 struct scc_info *info;
198 struct net_device_stats stats;
200 int card_base, scc_cmd, scc_data;
201 int tmr_cnt, tmr_ctrl, tmr_mode;
202 struct scc_param param;
203 char rx_buf[NUM_RX_BUF][BUF_SIZE];
204 int rx_len[NUM_RX_BUF];
206 struct work_struct rx_work;
207 int rx_head, rx_tail, rx_count;
209 char tx_buf[NUM_TX_BUF][BUF_SIZE];
210 int tx_len[NUM_TX_BUF];
212 int tx_head, tx_tail, tx_count;
214 unsigned long tx_start;
216 spinlock_t *register_lock; /* Per scc_info */
217 spinlock_t ring_lock;
223 struct net_device *dev[2];
224 struct scc_priv priv[2];
225 struct scc_info *next;
226 spinlock_t register_lock; /* Per device register lock */
230 /* Function declarations */
231 static int setup_adapter(int card_base, int type, int n) __init;
233 static void write_scc(struct scc_priv *priv, int reg, int val);
234 static void write_scc_data(struct scc_priv *priv, int val, int fast);
235 static int read_scc(struct scc_priv *priv, int reg);
236 static int read_scc_data(struct scc_priv *priv);
238 static int scc_open(struct net_device *dev);
239 static int scc_close(struct net_device *dev);
240 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
242 static struct net_device_stats *scc_get_stats(struct net_device *dev);
243 static int scc_set_mac_address(struct net_device *dev, void *sa);
245 static inline void tx_on(struct scc_priv *priv);
246 static inline void rx_on(struct scc_priv *priv);
247 static inline void rx_off(struct scc_priv *priv);
248 static void start_timer(struct scc_priv *priv, int t, int r15);
249 static inline unsigned char random(void);
251 static inline void z8530_isr(struct scc_info *info);
252 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs);
253 static void rx_isr(struct scc_priv *priv);
254 static void special_condition(struct scc_priv *priv, int rc);
255 static void rx_bh(void *arg);
256 static void tx_isr(struct scc_priv *priv);
257 static void es_isr(struct scc_priv *priv);
258 static void tm_isr(struct scc_priv *priv);
261 /* Initialization variables */
263 static int io[MAX_NUM_DEVS] __initdata = { 0, };
265 /* Beware! hw[] is also used in cleanup_module(). */
266 static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
267 static char ax25_broadcast[7] __initdata =
268 { 'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1,
270 static char ax25_test[7] __initdata =
271 { 'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1,
275 /* Global variables */
277 static struct scc_info *first;
278 static unsigned long rand;
281 MODULE_AUTHOR("Klaus Kudielka");
282 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
283 MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NUM_DEVS) "i");
284 MODULE_LICENSE("GPL");
286 static void __exit dmascc_exit(void)
289 struct scc_info *info;
294 /* Unregister devices */
295 for (i = 0; i < 2; i++)
296 unregister_netdev(info->dev[i]);
299 if (info->priv[0].type == TYPE_TWIN)
300 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
301 write_scc(&info->priv[0], R9, FHWRES);
302 release_region(info->dev[0]->base_addr,
303 hw[info->priv[0].type].io_size);
305 for (i = 0; i < 2; i++)
306 free_netdev(info->dev[i]);
315 void __init dmascc_setup(char *str, int *ints)
319 for (i = 0; i < MAX_NUM_DEVS && i < ints[0]; i++)
324 static int __init dmascc_init(void)
327 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
330 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
331 counting[MAX_NUM_DEVS];
333 /* Initialize random number generator */
335 /* Cards found = 0 */
337 /* Warning message */
339 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
341 /* Run autodetection for each card type */
342 for (h = 0; h < NUM_TYPES; h++) {
345 /* User-specified I/O address regions */
346 for (i = 0; i < hw[h].num_devs; i++)
348 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
350 hw[h].io_region) / hw[h].io_delta;
351 if (j >= 0 && j < hw[h].num_devs
353 j * hw[h].io_delta == io[i]) {
358 /* Default I/O address regions */
359 for (i = 0; i < hw[h].num_devs; i++) {
361 hw[h].io_region + i * hw[h].io_delta;
365 /* Check valid I/O address regions */
366 for (i = 0; i < hw[h].num_devs; i++)
369 (base[i], hw[h].io_size, "dmascc"))
373 base[i] + hw[h].tmr_offset +
376 base[i] + hw[h].tmr_offset +
379 base[i] + hw[h].tmr_offset +
385 for (i = 0; i < hw[h].num_devs; i++)
387 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
389 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
391 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
393 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
395 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
396 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
400 /* Timer 2: LSB+MSB, Mode 0 */
404 /* Wait until counter registers are loaded */
405 udelay(2000000 / TMR_0_HZ);
408 while (jiffies - time < 13) {
409 for (i = 0; i < hw[h].num_devs; i++)
410 if (base[i] && counting[i]) {
411 /* Read back Timer 1: latch; read LSB; read MSB */
414 inb(t1[i]) + (inb(t1[i]) << 8);
415 /* Also check whether counter did wrap */
417 || t_val > TMR_0_HZ / HZ * 10)
419 delay[i] = jiffies - start[i];
423 /* Evaluate measurements */
424 for (i = 0; i < hw[h].num_devs; i++)
426 if ((delay[i] >= 9 && delay[i] <= 11) &&
427 /* Ok, we have found an adapter */
428 (setup_adapter(base[i], h, n) == 0))
431 release_region(base[i],
437 /* If any adapter was successfully initialized, return ok */
441 /* If no adapter found, return error */
442 printk(KERN_INFO "dmascc: no adapters found\n");
446 module_init(dmascc_init);
447 module_exit(dmascc_exit);
449 static void dev_setup(struct net_device *dev)
451 dev->type = ARPHRD_AX25;
452 dev->hard_header_len = AX25_MAX_HEADER_LEN;
454 dev->addr_len = AX25_ADDR_LEN;
455 dev->tx_queue_len = 64;
456 memcpy(dev->broadcast, ax25_broadcast, AX25_ADDR_LEN);
457 memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN);
460 static int __init setup_adapter(int card_base, int type, int n)
463 struct scc_info *info;
464 struct net_device *dev;
465 struct scc_priv *priv;
468 int tmr_base = card_base + hw[type].tmr_offset;
469 int scc_base = card_base + hw[type].scc_offset;
470 char *chipnames[] = CHIPNAMES;
472 /* Allocate memory */
473 info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
475 printk(KERN_ERR "dmascc: "
476 "could not allocate memory for %s at %#3x\n",
477 hw[type].name, card_base);
481 /* Initialize what is necessary for write_scc and write_scc_data */
482 memset(info, 0, sizeof(struct scc_info));
484 info->dev[0] = alloc_netdev(0, "", dev_setup);
486 printk(KERN_ERR "dmascc: "
487 "could not allocate memory for %s at %#3x\n",
488 hw[type].name, card_base);
492 info->dev[1] = alloc_netdev(0, "", dev_setup);
494 printk(KERN_ERR "dmascc: "
495 "could not allocate memory for %s at %#3x\n",
496 hw[type].name, card_base);
499 spin_lock_init(&info->register_lock);
501 priv = &info->priv[0];
503 priv->card_base = card_base;
504 priv->scc_cmd = scc_base + SCCA_CMD;
505 priv->scc_data = scc_base + SCCA_DATA;
506 priv->register_lock = &info->register_lock;
509 write_scc(priv, R9, FHWRES | MIE | NV);
511 /* Determine type of chip by enabling SDLC/HDLC enhancements */
512 write_scc(priv, R15, SHDLCE);
513 if (!read_scc(priv, R15)) {
514 /* WR7' not present. This is an ordinary Z8530 SCC. */
517 /* Put one character in TX FIFO */
518 write_scc_data(priv, 0, 0);
519 if (read_scc(priv, R0) & Tx_BUF_EMP) {
520 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
523 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
527 write_scc(priv, R15, 0);
529 /* Start IRQ auto-detection */
530 irqs = probe_irq_on();
532 /* Enable interrupts */
533 if (type == TYPE_TWIN) {
534 outb(0, card_base + TWIN_DMA_CFG);
535 inb(card_base + TWIN_CLR_TMR1);
536 inb(card_base + TWIN_CLR_TMR2);
537 info->twin_serial_cfg = TWIN_EI;
538 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
540 write_scc(priv, R15, CTSIE);
541 write_scc(priv, R0, RES_EXT_INT);
542 write_scc(priv, R1, EXT_INT_ENAB);
546 outb(1, tmr_base + TMR_CNT1);
547 outb(0, tmr_base + TMR_CNT1);
549 /* Wait and detect IRQ */
551 while (jiffies - time < 2 + HZ / TMR_0_HZ);
552 irq = probe_irq_off(irqs);
554 /* Clear pending interrupt, disable interrupts */
555 if (type == TYPE_TWIN) {
556 inb(card_base + TWIN_CLR_TMR1);
558 write_scc(priv, R1, 0);
559 write_scc(priv, R15, 0);
560 write_scc(priv, R0, RES_EXT_INT);
565 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
566 hw[type].name, card_base, irq);
570 /* Set up data structures */
571 for (i = 0; i < 2; i++) {
573 priv = &info->priv[i];
579 spin_lock_init(&priv->ring_lock);
580 priv->register_lock = &info->register_lock;
581 priv->card_base = card_base;
582 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
583 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
584 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
585 priv->tmr_ctrl = tmr_base + TMR_CTRL;
586 priv->tmr_mode = i ? 0xb0 : 0x70;
587 priv->param.pclk_hz = hw[type].pclk_hz;
588 priv->param.brg_tc = -1;
589 priv->param.clocks = TCTRxCP | RCRTxCP;
590 priv->param.persist = 256;
591 priv->param.dma = -1;
592 INIT_WORK(&priv->rx_work, rx_bh, priv);
594 sprintf(dev->name, "dmascc%i", 2 * n + i);
595 SET_MODULE_OWNER(dev);
596 dev->base_addr = card_base;
598 dev->open = scc_open;
599 dev->stop = scc_close;
600 dev->do_ioctl = scc_ioctl;
601 dev->hard_start_xmit = scc_send_packet;
602 dev->get_stats = scc_get_stats;
603 dev->hard_header = ax25_hard_header;
604 dev->rebuild_header = ax25_rebuild_header;
605 dev->set_mac_address = scc_set_mac_address;
607 if (register_netdev(info->dev[0])) {
608 printk(KERN_ERR "dmascc: could not register %s\n",
612 if (register_netdev(info->dev[1])) {
613 printk(KERN_ERR "dmascc: could not register %s\n",
621 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
622 hw[type].name, chipnames[chip], card_base, irq);
626 unregister_netdev(info->dev[0]);
628 if (info->priv[0].type == TYPE_TWIN)
629 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
630 write_scc(&info->priv[0], R9, FHWRES);
631 free_netdev(info->dev[1]);
633 free_netdev(info->dev[0]);
641 /* Driver functions */
643 static void write_scc(struct scc_priv *priv, int reg, int val)
646 switch (priv->type) {
649 outb(reg, priv->scc_cmd);
650 outb(val, priv->scc_cmd);
654 outb_p(reg, priv->scc_cmd);
655 outb_p(val, priv->scc_cmd);
658 spin_lock_irqsave(priv->register_lock, flags);
659 outb_p(0, priv->card_base + PI_DREQ_MASK);
661 outb_p(reg, priv->scc_cmd);
662 outb_p(val, priv->scc_cmd);
663 outb(1, priv->card_base + PI_DREQ_MASK);
664 spin_unlock_irqrestore(priv->register_lock, flags);
670 static void write_scc_data(struct scc_priv *priv, int val, int fast)
673 switch (priv->type) {
675 outb(val, priv->scc_data);
678 outb_p(val, priv->scc_data);
682 outb_p(val, priv->scc_data);
684 spin_lock_irqsave(priv->register_lock, flags);
685 outb_p(0, priv->card_base + PI_DREQ_MASK);
686 outb_p(val, priv->scc_data);
687 outb(1, priv->card_base + PI_DREQ_MASK);
688 spin_unlock_irqrestore(priv->register_lock, flags);
695 static int read_scc(struct scc_priv *priv, int reg)
699 switch (priv->type) {
702 outb(reg, priv->scc_cmd);
703 return inb(priv->scc_cmd);
706 outb_p(reg, priv->scc_cmd);
707 return inb_p(priv->scc_cmd);
709 spin_lock_irqsave(priv->register_lock, flags);
710 outb_p(0, priv->card_base + PI_DREQ_MASK);
712 outb_p(reg, priv->scc_cmd);
713 rc = inb_p(priv->scc_cmd);
714 outb(1, priv->card_base + PI_DREQ_MASK);
715 spin_unlock_irqrestore(priv->register_lock, flags);
721 static int read_scc_data(struct scc_priv *priv)
725 switch (priv->type) {
727 return inb(priv->scc_data);
729 return inb_p(priv->scc_data);
731 spin_lock_irqsave(priv->register_lock, flags);
732 outb_p(0, priv->card_base + PI_DREQ_MASK);
733 rc = inb_p(priv->scc_data);
734 outb(1, priv->card_base + PI_DREQ_MASK);
735 spin_unlock_irqrestore(priv->register_lock, flags);
741 static int scc_open(struct net_device *dev)
743 struct scc_priv *priv = dev->priv;
744 struct scc_info *info = priv->info;
745 int card_base = priv->card_base;
747 /* Request IRQ if not already used by other channel */
748 if (!info->irq_used) {
749 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
755 /* Request DMA if required */
756 if (priv->param.dma >= 0) {
757 if (request_dma(priv->param.dma, "dmascc")) {
758 if (--info->irq_used == 0)
759 free_irq(dev->irq, info);
762 unsigned long flags = claim_dma_lock();
763 clear_dma_ff(priv->param.dma);
764 release_dma_lock(flags);
768 /* Initialize local variables */
771 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
773 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
777 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
778 /* X1 clock, SDLC mode */
779 write_scc(priv, R4, SDLC | X1CLK);
781 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
782 /* 8 bit RX char, RX disable */
783 write_scc(priv, R3, Rx8);
784 /* 8 bit TX char, TX disable */
785 write_scc(priv, R5, Tx8);
786 /* SDLC address field */
787 write_scc(priv, R6, 0);
789 write_scc(priv, R7, FLAG);
790 switch (priv->chip) {
793 write_scc(priv, R15, SHDLCE);
795 write_scc(priv, R7, AUTOEOM);
796 write_scc(priv, R15, 0);
800 write_scc(priv, R15, SHDLCE);
801 /* The following bits are set (see 2.5.2.1):
802 - Automatic EOM reset
803 - Interrupt request if RX FIFO is half full
804 This bit should be ignored in DMA mode (according to the
805 documentation), but actually isn't. The receiver doesn't work if
806 it is set. Thus, we have to clear it in DMA mode.
807 - Interrupt/DMA request if TX FIFO is completely empty
808 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
810 b) If cleared, DMA requests may follow each other very quickly,
811 filling up the TX FIFO.
812 Advantage: TX works even in case of high bus latency.
813 Disadvantage: Edge-triggered DMA request circuitry may miss
814 a request. No more data is delivered, resulting
815 in a TX FIFO underrun.
816 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
817 The PackeTwin doesn't. I don't know about the PI, but let's
818 assume it behaves like the PI2.
820 if (priv->param.dma >= 0) {
821 if (priv->type == TYPE_TWIN)
822 write_scc(priv, R7, AUTOEOM | TXFIFOE);
824 write_scc(priv, R7, AUTOEOM);
826 write_scc(priv, R7, AUTOEOM | RXFIFOH);
828 write_scc(priv, R15, 0);
831 /* Preset CRC, NRZ(I) encoding */
832 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
834 /* Configure baud rate generator */
835 if (priv->param.brg_tc >= 0) {
836 /* Program BR generator */
837 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
838 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
839 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
840 PackeTwin, not connected on the PI2); set DPLL source to BRG */
841 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
843 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
845 /* Disable BR generator */
846 write_scc(priv, R14, DTRREQ | BRSRC);
849 /* Configure clocks */
850 if (priv->type == TYPE_TWIN) {
851 /* Disable external TX clock receiver */
852 outb((info->twin_serial_cfg &=
853 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
854 card_base + TWIN_SERIAL_CFG);
856 write_scc(priv, R11, priv->param.clocks);
857 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
858 /* Enable external TX clock receiver */
859 outb((info->twin_serial_cfg |=
860 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
861 card_base + TWIN_SERIAL_CFG);
864 /* Configure PackeTwin */
865 if (priv->type == TYPE_TWIN) {
866 /* Assert DTR, enable interrupts */
867 outb((info->twin_serial_cfg |= TWIN_EI |
868 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
869 card_base + TWIN_SERIAL_CFG);
872 /* Read current status */
873 priv->rr0 = read_scc(priv, R0);
874 /* Enable DCD interrupt */
875 write_scc(priv, R15, DCDIE);
877 netif_start_queue(dev);
883 static int scc_close(struct net_device *dev)
885 struct scc_priv *priv = dev->priv;
886 struct scc_info *info = priv->info;
887 int card_base = priv->card_base;
889 netif_stop_queue(dev);
891 if (priv->type == TYPE_TWIN) {
893 outb((info->twin_serial_cfg &=
894 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
895 card_base + TWIN_SERIAL_CFG);
898 /* Reset channel, free DMA and IRQ */
899 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
900 if (priv->param.dma >= 0) {
901 if (priv->type == TYPE_TWIN)
902 outb(0, card_base + TWIN_DMA_CFG);
903 free_dma(priv->param.dma);
905 if (--info->irq_used == 0)
906 free_irq(dev->irq, info);
912 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
914 struct scc_priv *priv = dev->priv;
919 (ifr->ifr_data, &priv->param,
920 sizeof(struct scc_param)))
924 if (!capable(CAP_NET_ADMIN))
926 if (netif_running(dev))
929 (&priv->param, ifr->ifr_data,
930 sizeof(struct scc_param)))
939 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
941 struct scc_priv *priv = dev->priv;
945 /* Temporarily stop the scheduler feeding us packets */
946 netif_stop_queue(dev);
948 /* Transfer data to DMA buffer */
950 memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1);
951 priv->tx_len[i] = skb->len - 1;
953 /* Clear interrupts while we touch our circular buffers */
955 spin_lock_irqsave(&priv->ring_lock, flags);
956 /* Move the ring buffer's head */
957 priv->tx_head = (i + 1) % NUM_TX_BUF;
960 /* If we just filled up the last buffer, leave queue stopped.
961 The higher layers must wait until we have a DMA buffer
962 to accept the data. */
963 if (priv->tx_count < NUM_TX_BUF)
964 netif_wake_queue(dev);
966 /* Set new TX state */
967 if (priv->state == IDLE) {
968 /* Assert RTS, start timer */
969 priv->state = TX_HEAD;
970 priv->tx_start = jiffies;
971 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
972 write_scc(priv, R15, 0);
973 start_timer(priv, priv->param.txdelay, 0);
976 /* Turn interrupts back on and free buffer */
977 spin_unlock_irqrestore(&priv->ring_lock, flags);
984 static struct net_device_stats *scc_get_stats(struct net_device *dev)
986 struct scc_priv *priv = dev->priv;
992 static int scc_set_mac_address(struct net_device *dev, void *sa)
994 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
1000 static inline void tx_on(struct scc_priv *priv)
1003 unsigned long flags;
1005 if (priv->param.dma >= 0) {
1006 n = (priv->chip == Z85230) ? 3 : 1;
1007 /* Program DMA controller */
1008 flags = claim_dma_lock();
1009 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
1010 set_dma_addr(priv->param.dma,
1011 (int) priv->tx_buf[priv->tx_tail] + n);
1012 set_dma_count(priv->param.dma,
1013 priv->tx_len[priv->tx_tail] - n);
1014 release_dma_lock(flags);
1015 /* Enable TX underrun interrupt */
1016 write_scc(priv, R15, TxUIE);
1017 /* Configure DREQ */
1018 if (priv->type == TYPE_TWIN)
1019 outb((priv->param.dma ==
1020 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
1021 priv->card_base + TWIN_DMA_CFG);
1024 EXT_INT_ENAB | WT_FN_RDYFN |
1026 /* Write first byte(s) */
1027 spin_lock_irqsave(priv->register_lock, flags);
1028 for (i = 0; i < n; i++)
1029 write_scc_data(priv,
1030 priv->tx_buf[priv->tx_tail][i], 1);
1031 enable_dma(priv->param.dma);
1032 spin_unlock_irqrestore(priv->register_lock, flags);
1034 write_scc(priv, R15, TxUIE);
1036 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1039 /* Reset EOM latch if we do not have the AUTOEOM feature */
1040 if (priv->chip == Z8530)
1041 write_scc(priv, R0, RES_EOM_L);
1045 static inline void rx_on(struct scc_priv *priv)
1047 unsigned long flags;
1050 while (read_scc(priv, R0) & Rx_CH_AV)
1051 read_scc_data(priv);
1053 if (priv->param.dma >= 0) {
1054 /* Program DMA controller */
1055 flags = claim_dma_lock();
1056 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1057 set_dma_addr(priv->param.dma,
1058 (int) priv->rx_buf[priv->rx_head]);
1059 set_dma_count(priv->param.dma, BUF_SIZE);
1060 release_dma_lock(flags);
1061 enable_dma(priv->param.dma);
1062 /* Configure PackeTwin DMA */
1063 if (priv->type == TYPE_TWIN) {
1064 outb((priv->param.dma ==
1065 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1066 priv->card_base + TWIN_DMA_CFG);
1068 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1069 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1070 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1072 /* Reset current frame */
1074 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1075 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1078 write_scc(priv, R0, ERR_RES);
1079 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1083 static inline void rx_off(struct scc_priv *priv)
1085 /* Disable receiver */
1086 write_scc(priv, R3, Rx8);
1087 /* Disable DREQ / RX interrupt */
1088 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1089 outb(0, priv->card_base + TWIN_DMA_CFG);
1091 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1093 if (priv->param.dma >= 0)
1094 disable_dma(priv->param.dma);
1098 static void start_timer(struct scc_priv *priv, int t, int r15)
1100 unsigned long flags;
1102 outb(priv->tmr_mode, priv->tmr_ctrl);
1108 outb(t & 0xFF, priv->tmr_cnt);
1109 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1110 if (priv->type != TYPE_TWIN) {
1111 write_scc(priv, R15, r15 | CTSIE);
1114 restore_flags(flags);
1119 static inline unsigned char random(void)
1121 /* See "Numerical Recipes in C", second edition, p. 284 */
1122 rand = rand * 1664525L + 1013904223L;
1123 return (unsigned char) (rand >> 24);
1126 static inline void z8530_isr(struct scc_info *info)
1130 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1132 rx_isr(&info->priv[0]);
1133 } else if (is & CHATxIP) {
1134 tx_isr(&info->priv[0]);
1135 } else if (is & CHAEXT) {
1136 es_isr(&info->priv[0]);
1137 } else if (is & CHBRxIP) {
1138 rx_isr(&info->priv[1]);
1139 } else if (is & CHBTxIP) {
1140 tx_isr(&info->priv[1]);
1142 es_isr(&info->priv[1]);
1144 write_scc(&info->priv[0], R0, RES_H_IUS);
1148 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1151 /* Ok, no interrupts pending from this 8530. The INT line should
1156 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs)
1158 struct scc_info *info = dev_id;
1160 spin_lock(info->priv[0].register_lock);
1161 /* At this point interrupts are enabled, and the interrupt under service
1162 is already acknowledged, but masked off.
1164 Interrupt processing: We loop until we know that the IRQ line is
1165 low. If another positive edge occurs afterwards during the ISR,
1166 another interrupt will be triggered by the interrupt controller
1167 as soon as the IRQ level is enabled again (see asm/irq.h).
1169 Bottom-half handlers will be processed after scc_isr(). This is
1170 important, since we only have small ringbuffers and want new data
1171 to be fetched/delivered immediately. */
1173 if (info->priv[0].type == TYPE_TWIN) {
1174 int is, card_base = info->priv[0].card_base;
1175 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1177 if (is & TWIN_SCC_MSK) {
1179 } else if (is & TWIN_TMR1_MSK) {
1180 inb(card_base + TWIN_CLR_TMR1);
1181 tm_isr(&info->priv[0]);
1183 inb(card_base + TWIN_CLR_TMR2);
1184 tm_isr(&info->priv[1]);
1189 spin_unlock(info->priv[0].register_lock);
1194 static void rx_isr(struct scc_priv *priv)
1196 if (priv->param.dma >= 0) {
1197 /* Check special condition and perform error reset. See 2.4.7.5. */
1198 special_condition(priv, read_scc(priv, R1));
1199 write_scc(priv, R0, ERR_RES);
1201 /* Check special condition for each character. Error reset not necessary.
1202 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1204 while (read_scc(priv, R0) & Rx_CH_AV) {
1205 rc = read_scc(priv, R1);
1206 if (priv->rx_ptr < BUF_SIZE)
1207 priv->rx_buf[priv->rx_head][priv->
1209 read_scc_data(priv);
1212 read_scc_data(priv);
1214 special_condition(priv, rc);
1220 static void special_condition(struct scc_priv *priv, int rc)
1223 unsigned long flags;
1225 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1228 /* Receiver overrun */
1230 if (priv->param.dma < 0)
1231 write_scc(priv, R0, ERR_RES);
1232 } else if (rc & END_FR) {
1233 /* End of frame. Get byte count */
1234 if (priv->param.dma >= 0) {
1235 flags = claim_dma_lock();
1236 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1238 release_dma_lock(flags);
1240 cb = priv->rx_ptr - 2;
1242 if (priv->rx_over) {
1243 /* We had an overrun */
1244 priv->stats.rx_errors++;
1245 if (priv->rx_over == 2)
1246 priv->stats.rx_length_errors++;
1248 priv->stats.rx_fifo_errors++;
1250 } else if (rc & CRC_ERR) {
1251 /* Count invalid CRC only if packet length >= minimum */
1253 priv->stats.rx_errors++;
1254 priv->stats.rx_crc_errors++;
1258 if (priv->rx_count < NUM_RX_BUF - 1) {
1259 /* Put good frame in FIFO */
1260 priv->rx_len[priv->rx_head] = cb;
1265 schedule_work(&priv->rx_work);
1267 priv->stats.rx_errors++;
1268 priv->stats.rx_over_errors++;
1272 /* Get ready for new frame */
1273 if (priv->param.dma >= 0) {
1274 flags = claim_dma_lock();
1275 set_dma_addr(priv->param.dma,
1276 (int) priv->rx_buf[priv->rx_head]);
1277 set_dma_count(priv->param.dma, BUF_SIZE);
1278 release_dma_lock(flags);
1286 static void rx_bh(void *arg)
1288 struct scc_priv *priv = arg;
1289 int i = priv->rx_tail;
1291 unsigned long flags;
1292 struct sk_buff *skb;
1293 unsigned char *data;
1295 spin_lock_irqsave(&priv->ring_lock, flags);
1296 while (priv->rx_count) {
1297 spin_unlock_irqrestore(&priv->ring_lock, flags);
1298 cb = priv->rx_len[i];
1299 /* Allocate buffer */
1300 skb = dev_alloc_skb(cb + 1);
1303 priv->stats.rx_dropped++;
1306 data = skb_put(skb, cb + 1);
1308 memcpy(&data[1], priv->rx_buf[i], cb);
1309 skb->protocol = ax25_type_trans(skb, priv->dev);
1311 priv->dev->last_rx = jiffies;
1312 priv->stats.rx_packets++;
1313 priv->stats.rx_bytes += cb;
1315 spin_lock_irqsave(&priv->ring_lock, flags);
1317 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1320 spin_unlock_irqrestore(&priv->ring_lock, flags);
1324 static void tx_isr(struct scc_priv *priv)
1326 int i = priv->tx_tail, p = priv->tx_ptr;
1328 /* Suspend TX interrupts if we don't want to send anything.
1330 if (p == priv->tx_len[i]) {
1331 write_scc(priv, R0, RES_Tx_P);
1335 /* Write characters */
1336 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1337 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1340 /* Reset EOM latch of Z8530 */
1341 if (!priv->tx_ptr && p && priv->chip == Z8530)
1342 write_scc(priv, R0, RES_EOM_L);
1348 static void es_isr(struct scc_priv *priv)
1350 int i, rr0, drr0, res;
1351 unsigned long flags;
1353 /* Read status, reset interrupt bit (open latches) */
1354 rr0 = read_scc(priv, R0);
1355 write_scc(priv, R0, RES_EXT_INT);
1356 drr0 = priv->rr0 ^ rr0;
1359 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1360 it might have already been cleared again by AUTOEOM. */
1361 if (priv->state == TX_DATA) {
1362 /* Get remaining bytes */
1364 if (priv->param.dma >= 0) {
1365 disable_dma(priv->param.dma);
1366 flags = claim_dma_lock();
1367 res = get_dma_residue(priv->param.dma);
1368 release_dma_lock(flags);
1370 res = priv->tx_len[i] - priv->tx_ptr;
1373 /* Disable DREQ / TX interrupt */
1374 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1375 outb(0, priv->card_base + TWIN_DMA_CFG);
1377 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1379 /* Update packet statistics */
1380 priv->stats.tx_errors++;
1381 priv->stats.tx_fifo_errors++;
1382 /* Other underrun interrupts may already be waiting */
1383 write_scc(priv, R0, RES_EXT_INT);
1384 write_scc(priv, R0, RES_EXT_INT);
1386 /* Update packet statistics */
1387 priv->stats.tx_packets++;
1388 priv->stats.tx_bytes += priv->tx_len[i];
1389 /* Remove frame from FIFO */
1390 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1392 /* Inform upper layers */
1393 netif_wake_queue(priv->dev);
1396 write_scc(priv, R15, 0);
1397 if (priv->tx_count &&
1398 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1399 priv->state = TX_PAUSE;
1400 start_timer(priv, priv->param.txpause, 0);
1402 priv->state = TX_TAIL;
1403 start_timer(priv, priv->param.txtail, 0);
1407 /* DCD transition */
1410 switch (priv->state) {
1413 priv->state = DCD_ON;
1414 write_scc(priv, R15, 0);
1415 start_timer(priv, priv->param.dcdon, 0);
1418 switch (priv->state) {
1421 priv->state = DCD_OFF;
1422 write_scc(priv, R15, 0);
1423 start_timer(priv, priv->param.dcdoff, 0);
1428 /* CTS transition */
1429 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1435 static void tm_isr(struct scc_priv *priv)
1437 switch (priv->state) {
1441 priv->state = TX_DATA;
1444 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1445 priv->state = RTS_OFF;
1446 if (priv->type != TYPE_TWIN)
1447 write_scc(priv, R15, 0);
1448 start_timer(priv, priv->param.rtsoff, 0);
1451 write_scc(priv, R15, DCDIE);
1452 priv->rr0 = read_scc(priv, R0);
1453 if (priv->rr0 & DCD) {
1454 priv->stats.collisions++;
1456 priv->state = RX_ON;
1459 start_timer(priv, priv->param.waittime, DCDIE);
1463 if (priv->tx_count) {
1464 priv->state = TX_HEAD;
1465 priv->tx_start = jiffies;
1467 TxCRC_ENAB | RTS | TxENAB | Tx8);
1468 write_scc(priv, R15, 0);
1469 start_timer(priv, priv->param.txdelay, 0);
1472 if (priv->type != TYPE_TWIN)
1473 write_scc(priv, R15, DCDIE);
1478 write_scc(priv, R15, DCDIE);
1479 priv->rr0 = read_scc(priv, R0);
1480 if (priv->rr0 & DCD) {
1482 priv->state = RX_ON;
1486 random() / priv->param.persist *
1487 priv->param.slottime, DCDIE);