2 * Driver for high-speed SCC boards (those with DMA support)
3 * Copyright (C) 1997-2000 Klaus Kudielka
5 * S5SCC/DMA support by Janko Koleznik S52HI
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/module.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/if_arp.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/sockios.h>
36 #include <linux/workqueue.h>
37 #include <asm/atomic.h>
38 #include <asm/bitops.h>
42 #include <asm/uaccess.h>
47 /* Number of buffers per channel */
49 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
50 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
51 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
56 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
57 0, 8, 1843200, 3686400 }
58 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
59 0, 8, 3686400, 7372800 }
60 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
61 0, 4, 6144000, 6144000 }
62 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
63 0, 8, 4915200, 9830400 }
65 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
67 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
75 #define MAX_NUM_DEVS 32
78 /* SCC chips supported */
84 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
89 /* 8530 registers relative to card base */
91 #define SCCB_DATA 0x01
93 #define SCCA_DATA 0x03
95 /* 8253/8254 registers relative to card base */
101 /* Additional PI/PI2 registers relative to card base */
102 #define PI_DREQ_MASK 0x04
104 /* Additional PackeTwin registers relative to card base */
105 #define TWIN_INT_REG 0x08
106 #define TWIN_CLR_TMR1 0x09
107 #define TWIN_CLR_TMR2 0x0a
108 #define TWIN_SPARE_1 0x0b
109 #define TWIN_DMA_CFG 0x08
110 #define TWIN_SERIAL_CFG 0x09
111 #define TWIN_DMA_CLR_FF 0x0a
112 #define TWIN_SPARE_2 0x0b
115 /* PackeTwin I/O register values */
118 #define TWIN_SCC_MSK 0x01
119 #define TWIN_TMR1_MSK 0x02
120 #define TWIN_TMR2_MSK 0x04
121 #define TWIN_INT_MSK 0x07
124 #define TWIN_DTRA_ON 0x01
125 #define TWIN_DTRB_ON 0x02
126 #define TWIN_EXTCLKA 0x04
127 #define TWIN_EXTCLKB 0x08
128 #define TWIN_LOOPA_ON 0x10
129 #define TWIN_LOOPB_ON 0x20
133 #define TWIN_DMA_HDX_T1 0x08
134 #define TWIN_DMA_HDX_R1 0x0a
135 #define TWIN_DMA_HDX_T3 0x14
136 #define TWIN_DMA_HDX_R3 0x16
137 #define TWIN_DMA_FDX_T3R1 0x1b
138 #define TWIN_DMA_FDX_T1R3 0x1d
157 #define SIOCGSCCPARAM SIOCDEVPRIVATE
158 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
164 int pclk_hz; /* frequency of BRG input (don't change) */
165 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
166 int nrzi; /* 0 (nrz), 1 (nrzi) */
167 int clocks; /* see dmascc_cfg documentation */
168 int txdelay; /* [1/TMR_0_HZ] */
169 int txtimeout; /* [1/HZ] */
170 int txtail; /* [1/TMR_0_HZ] */
171 int waittime; /* [1/TMR_0_HZ] */
172 int slottime; /* [1/TMR_0_HZ] */
173 int persist; /* 1 ... 256 */
174 int dma; /* -1 (disable), 0, 1, 3 */
175 int txpause; /* [1/TMR_0_HZ] */
176 int rtsoff; /* [1/TMR_0_HZ] */
177 int dcdon; /* [1/TMR_0_HZ] */
178 int dcdoff; /* [1/TMR_0_HZ] */
181 struct scc_hardware {
196 struct net_device *dev;
197 struct scc_info *info;
198 struct net_device_stats stats;
200 int card_base, scc_cmd, scc_data;
201 int tmr_cnt, tmr_ctrl, tmr_mode;
202 struct scc_param param;
203 char rx_buf[NUM_RX_BUF][BUF_SIZE];
204 int rx_len[NUM_RX_BUF];
206 struct work_struct rx_work;
207 int rx_head, rx_tail, rx_count;
209 char tx_buf[NUM_TX_BUF][BUF_SIZE];
210 int tx_len[NUM_TX_BUF];
212 int tx_head, tx_tail, tx_count;
214 unsigned long tx_start;
216 spinlock_t *register_lock; /* Per scc_info */
217 spinlock_t ring_lock;
223 struct net_device *dev[2];
224 struct scc_priv priv[2];
225 struct scc_info *next;
226 spinlock_t register_lock; /* Per device register lock */
230 /* Function declarations */
231 static int setup_adapter(int card_base, int type, int n) __init;
233 static void write_scc(struct scc_priv *priv, int reg, int val);
234 static void write_scc_data(struct scc_priv *priv, int val, int fast);
235 static int read_scc(struct scc_priv *priv, int reg);
236 static int read_scc_data(struct scc_priv *priv);
238 static int scc_open(struct net_device *dev);
239 static int scc_close(struct net_device *dev);
240 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
242 static struct net_device_stats *scc_get_stats(struct net_device *dev);
243 static int scc_set_mac_address(struct net_device *dev, void *sa);
245 static inline void tx_on(struct scc_priv *priv);
246 static inline void rx_on(struct scc_priv *priv);
247 static inline void rx_off(struct scc_priv *priv);
248 static void start_timer(struct scc_priv *priv, int t, int r15);
249 static inline unsigned char random(void);
251 static inline void z8530_isr(struct scc_info *info);
252 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs);
253 static void rx_isr(struct scc_priv *priv);
254 static void special_condition(struct scc_priv *priv, int rc);
255 static void rx_bh(void *arg);
256 static void tx_isr(struct scc_priv *priv);
257 static void es_isr(struct scc_priv *priv);
258 static void tm_isr(struct scc_priv *priv);
261 /* Initialization variables */
263 static int io[MAX_NUM_DEVS] __initdata = { 0, };
265 /* Beware! hw[] is also used in cleanup_module(). */
266 static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
267 static char ax25_broadcast[7] __initdata =
268 { 'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1,
270 static char ax25_test[7] __initdata =
271 { 'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1,
275 /* Global variables */
277 static struct scc_info *first;
278 static unsigned long rand;
281 MODULE_AUTHOR("Klaus Kudielka");
282 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
283 MODULE_PARM(io, "1-" __MODULE_STRING(MAX_NUM_DEVS) "i");
284 MODULE_LICENSE("GPL");
286 static void __exit dmascc_exit(void)
289 struct scc_info *info;
294 /* Unregister devices */
295 for (i = 0; i < 2; i++)
296 unregister_netdev(info->dev[i]);
299 if (info->priv[0].type == TYPE_TWIN)
300 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
301 write_scc(&info->priv[0], R9, FHWRES);
302 release_region(info->dev[0]->base_addr,
303 hw[info->priv[0].type].io_size);
305 for (i = 0; i < 2; i++)
306 free_netdev(info->dev[i]);
314 static int __init dmascc_init(void)
317 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
320 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
321 counting[MAX_NUM_DEVS];
323 /* Initialize random number generator */
325 /* Cards found = 0 */
327 /* Warning message */
329 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
331 /* Run autodetection for each card type */
332 for (h = 0; h < NUM_TYPES; h++) {
335 /* User-specified I/O address regions */
336 for (i = 0; i < hw[h].num_devs; i++)
338 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
340 hw[h].io_region) / hw[h].io_delta;
341 if (j >= 0 && j < hw[h].num_devs
343 j * hw[h].io_delta == io[i]) {
348 /* Default I/O address regions */
349 for (i = 0; i < hw[h].num_devs; i++) {
351 hw[h].io_region + i * hw[h].io_delta;
355 /* Check valid I/O address regions */
356 for (i = 0; i < hw[h].num_devs; i++)
359 (base[i], hw[h].io_size, "dmascc"))
363 base[i] + hw[h].tmr_offset +
366 base[i] + hw[h].tmr_offset +
369 base[i] + hw[h].tmr_offset +
375 for (i = 0; i < hw[h].num_devs; i++)
377 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
379 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
381 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
383 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
385 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
386 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
390 /* Timer 2: LSB+MSB, Mode 0 */
394 /* Wait until counter registers are loaded */
395 udelay(2000000 / TMR_0_HZ);
398 while (jiffies - time < 13) {
399 for (i = 0; i < hw[h].num_devs; i++)
400 if (base[i] && counting[i]) {
401 /* Read back Timer 1: latch; read LSB; read MSB */
404 inb(t1[i]) + (inb(t1[i]) << 8);
405 /* Also check whether counter did wrap */
407 || t_val > TMR_0_HZ / HZ * 10)
409 delay[i] = jiffies - start[i];
413 /* Evaluate measurements */
414 for (i = 0; i < hw[h].num_devs; i++)
416 if ((delay[i] >= 9 && delay[i] <= 11) &&
417 /* Ok, we have found an adapter */
418 (setup_adapter(base[i], h, n) == 0))
421 release_region(base[i],
427 /* If any adapter was successfully initialized, return ok */
431 /* If no adapter found, return error */
432 printk(KERN_INFO "dmascc: no adapters found\n");
436 module_init(dmascc_init);
437 module_exit(dmascc_exit);
439 static void dev_setup(struct net_device *dev)
441 dev->type = ARPHRD_AX25;
442 dev->hard_header_len = AX25_MAX_HEADER_LEN;
444 dev->addr_len = AX25_ADDR_LEN;
445 dev->tx_queue_len = 64;
446 memcpy(dev->broadcast, ax25_broadcast, AX25_ADDR_LEN);
447 memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN);
450 static int __init setup_adapter(int card_base, int type, int n)
453 struct scc_info *info;
454 struct net_device *dev;
455 struct scc_priv *priv;
458 int tmr_base = card_base + hw[type].tmr_offset;
459 int scc_base = card_base + hw[type].scc_offset;
460 char *chipnames[] = CHIPNAMES;
462 /* Allocate memory */
463 info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
465 printk(KERN_ERR "dmascc: "
466 "could not allocate memory for %s at %#3x\n",
467 hw[type].name, card_base);
471 /* Initialize what is necessary for write_scc and write_scc_data */
472 memset(info, 0, sizeof(struct scc_info));
474 info->dev[0] = alloc_netdev(0, "", dev_setup);
476 printk(KERN_ERR "dmascc: "
477 "could not allocate memory for %s at %#3x\n",
478 hw[type].name, card_base);
482 info->dev[1] = alloc_netdev(0, "", dev_setup);
484 printk(KERN_ERR "dmascc: "
485 "could not allocate memory for %s at %#3x\n",
486 hw[type].name, card_base);
489 spin_lock_init(&info->register_lock);
491 priv = &info->priv[0];
493 priv->card_base = card_base;
494 priv->scc_cmd = scc_base + SCCA_CMD;
495 priv->scc_data = scc_base + SCCA_DATA;
496 priv->register_lock = &info->register_lock;
499 write_scc(priv, R9, FHWRES | MIE | NV);
501 /* Determine type of chip by enabling SDLC/HDLC enhancements */
502 write_scc(priv, R15, SHDLCE);
503 if (!read_scc(priv, R15)) {
504 /* WR7' not present. This is an ordinary Z8530 SCC. */
507 /* Put one character in TX FIFO */
508 write_scc_data(priv, 0, 0);
509 if (read_scc(priv, R0) & Tx_BUF_EMP) {
510 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
513 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
517 write_scc(priv, R15, 0);
519 /* Start IRQ auto-detection */
520 irqs = probe_irq_on();
522 /* Enable interrupts */
523 if (type == TYPE_TWIN) {
524 outb(0, card_base + TWIN_DMA_CFG);
525 inb(card_base + TWIN_CLR_TMR1);
526 inb(card_base + TWIN_CLR_TMR2);
527 info->twin_serial_cfg = TWIN_EI;
528 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
530 write_scc(priv, R15, CTSIE);
531 write_scc(priv, R0, RES_EXT_INT);
532 write_scc(priv, R1, EXT_INT_ENAB);
536 outb(1, tmr_base + TMR_CNT1);
537 outb(0, tmr_base + TMR_CNT1);
539 /* Wait and detect IRQ */
541 while (jiffies - time < 2 + HZ / TMR_0_HZ);
542 irq = probe_irq_off(irqs);
544 /* Clear pending interrupt, disable interrupts */
545 if (type == TYPE_TWIN) {
546 inb(card_base + TWIN_CLR_TMR1);
548 write_scc(priv, R1, 0);
549 write_scc(priv, R15, 0);
550 write_scc(priv, R0, RES_EXT_INT);
555 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
556 hw[type].name, card_base, irq);
560 /* Set up data structures */
561 for (i = 0; i < 2; i++) {
563 priv = &info->priv[i];
569 spin_lock_init(&priv->ring_lock);
570 priv->register_lock = &info->register_lock;
571 priv->card_base = card_base;
572 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
573 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
574 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
575 priv->tmr_ctrl = tmr_base + TMR_CTRL;
576 priv->tmr_mode = i ? 0xb0 : 0x70;
577 priv->param.pclk_hz = hw[type].pclk_hz;
578 priv->param.brg_tc = -1;
579 priv->param.clocks = TCTRxCP | RCRTxCP;
580 priv->param.persist = 256;
581 priv->param.dma = -1;
582 INIT_WORK(&priv->rx_work, rx_bh, priv);
584 sprintf(dev->name, "dmascc%i", 2 * n + i);
585 SET_MODULE_OWNER(dev);
586 dev->base_addr = card_base;
588 dev->open = scc_open;
589 dev->stop = scc_close;
590 dev->do_ioctl = scc_ioctl;
591 dev->hard_start_xmit = scc_send_packet;
592 dev->get_stats = scc_get_stats;
593 dev->hard_header = ax25_hard_header;
594 dev->rebuild_header = ax25_rebuild_header;
595 dev->set_mac_address = scc_set_mac_address;
597 if (register_netdev(info->dev[0])) {
598 printk(KERN_ERR "dmascc: could not register %s\n",
602 if (register_netdev(info->dev[1])) {
603 printk(KERN_ERR "dmascc: could not register %s\n",
611 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
612 hw[type].name, chipnames[chip], card_base, irq);
616 unregister_netdev(info->dev[0]);
618 if (info->priv[0].type == TYPE_TWIN)
619 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
620 write_scc(&info->priv[0], R9, FHWRES);
621 free_netdev(info->dev[1]);
623 free_netdev(info->dev[0]);
631 /* Driver functions */
633 static void write_scc(struct scc_priv *priv, int reg, int val)
636 switch (priv->type) {
639 outb(reg, priv->scc_cmd);
640 outb(val, priv->scc_cmd);
644 outb_p(reg, priv->scc_cmd);
645 outb_p(val, priv->scc_cmd);
648 spin_lock_irqsave(priv->register_lock, flags);
649 outb_p(0, priv->card_base + PI_DREQ_MASK);
651 outb_p(reg, priv->scc_cmd);
652 outb_p(val, priv->scc_cmd);
653 outb(1, priv->card_base + PI_DREQ_MASK);
654 spin_unlock_irqrestore(priv->register_lock, flags);
660 static void write_scc_data(struct scc_priv *priv, int val, int fast)
663 switch (priv->type) {
665 outb(val, priv->scc_data);
668 outb_p(val, priv->scc_data);
672 outb_p(val, priv->scc_data);
674 spin_lock_irqsave(priv->register_lock, flags);
675 outb_p(0, priv->card_base + PI_DREQ_MASK);
676 outb_p(val, priv->scc_data);
677 outb(1, priv->card_base + PI_DREQ_MASK);
678 spin_unlock_irqrestore(priv->register_lock, flags);
685 static int read_scc(struct scc_priv *priv, int reg)
689 switch (priv->type) {
692 outb(reg, priv->scc_cmd);
693 return inb(priv->scc_cmd);
696 outb_p(reg, priv->scc_cmd);
697 return inb_p(priv->scc_cmd);
699 spin_lock_irqsave(priv->register_lock, flags);
700 outb_p(0, priv->card_base + PI_DREQ_MASK);
702 outb_p(reg, priv->scc_cmd);
703 rc = inb_p(priv->scc_cmd);
704 outb(1, priv->card_base + PI_DREQ_MASK);
705 spin_unlock_irqrestore(priv->register_lock, flags);
711 static int read_scc_data(struct scc_priv *priv)
715 switch (priv->type) {
717 return inb(priv->scc_data);
719 return inb_p(priv->scc_data);
721 spin_lock_irqsave(priv->register_lock, flags);
722 outb_p(0, priv->card_base + PI_DREQ_MASK);
723 rc = inb_p(priv->scc_data);
724 outb(1, priv->card_base + PI_DREQ_MASK);
725 spin_unlock_irqrestore(priv->register_lock, flags);
731 static int scc_open(struct net_device *dev)
733 struct scc_priv *priv = dev->priv;
734 struct scc_info *info = priv->info;
735 int card_base = priv->card_base;
737 /* Request IRQ if not already used by other channel */
738 if (!info->irq_used) {
739 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
745 /* Request DMA if required */
746 if (priv->param.dma >= 0) {
747 if (request_dma(priv->param.dma, "dmascc")) {
748 if (--info->irq_used == 0)
749 free_irq(dev->irq, info);
752 unsigned long flags = claim_dma_lock();
753 clear_dma_ff(priv->param.dma);
754 release_dma_lock(flags);
758 /* Initialize local variables */
761 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
763 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
767 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
768 /* X1 clock, SDLC mode */
769 write_scc(priv, R4, SDLC | X1CLK);
771 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
772 /* 8 bit RX char, RX disable */
773 write_scc(priv, R3, Rx8);
774 /* 8 bit TX char, TX disable */
775 write_scc(priv, R5, Tx8);
776 /* SDLC address field */
777 write_scc(priv, R6, 0);
779 write_scc(priv, R7, FLAG);
780 switch (priv->chip) {
783 write_scc(priv, R15, SHDLCE);
785 write_scc(priv, R7, AUTOEOM);
786 write_scc(priv, R15, 0);
790 write_scc(priv, R15, SHDLCE);
791 /* The following bits are set (see 2.5.2.1):
792 - Automatic EOM reset
793 - Interrupt request if RX FIFO is half full
794 This bit should be ignored in DMA mode (according to the
795 documentation), but actually isn't. The receiver doesn't work if
796 it is set. Thus, we have to clear it in DMA mode.
797 - Interrupt/DMA request if TX FIFO is completely empty
798 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
800 b) If cleared, DMA requests may follow each other very quickly,
801 filling up the TX FIFO.
802 Advantage: TX works even in case of high bus latency.
803 Disadvantage: Edge-triggered DMA request circuitry may miss
804 a request. No more data is delivered, resulting
805 in a TX FIFO underrun.
806 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
807 The PackeTwin doesn't. I don't know about the PI, but let's
808 assume it behaves like the PI2.
810 if (priv->param.dma >= 0) {
811 if (priv->type == TYPE_TWIN)
812 write_scc(priv, R7, AUTOEOM | TXFIFOE);
814 write_scc(priv, R7, AUTOEOM);
816 write_scc(priv, R7, AUTOEOM | RXFIFOH);
818 write_scc(priv, R15, 0);
821 /* Preset CRC, NRZ(I) encoding */
822 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
824 /* Configure baud rate generator */
825 if (priv->param.brg_tc >= 0) {
826 /* Program BR generator */
827 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
828 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
829 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
830 PackeTwin, not connected on the PI2); set DPLL source to BRG */
831 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
833 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
835 /* Disable BR generator */
836 write_scc(priv, R14, DTRREQ | BRSRC);
839 /* Configure clocks */
840 if (priv->type == TYPE_TWIN) {
841 /* Disable external TX clock receiver */
842 outb((info->twin_serial_cfg &=
843 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
844 card_base + TWIN_SERIAL_CFG);
846 write_scc(priv, R11, priv->param.clocks);
847 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
848 /* Enable external TX clock receiver */
849 outb((info->twin_serial_cfg |=
850 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
851 card_base + TWIN_SERIAL_CFG);
854 /* Configure PackeTwin */
855 if (priv->type == TYPE_TWIN) {
856 /* Assert DTR, enable interrupts */
857 outb((info->twin_serial_cfg |= TWIN_EI |
858 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
859 card_base + TWIN_SERIAL_CFG);
862 /* Read current status */
863 priv->rr0 = read_scc(priv, R0);
864 /* Enable DCD interrupt */
865 write_scc(priv, R15, DCDIE);
867 netif_start_queue(dev);
873 static int scc_close(struct net_device *dev)
875 struct scc_priv *priv = dev->priv;
876 struct scc_info *info = priv->info;
877 int card_base = priv->card_base;
879 netif_stop_queue(dev);
881 if (priv->type == TYPE_TWIN) {
883 outb((info->twin_serial_cfg &=
884 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
885 card_base + TWIN_SERIAL_CFG);
888 /* Reset channel, free DMA and IRQ */
889 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
890 if (priv->param.dma >= 0) {
891 if (priv->type == TYPE_TWIN)
892 outb(0, card_base + TWIN_DMA_CFG);
893 free_dma(priv->param.dma);
895 if (--info->irq_used == 0)
896 free_irq(dev->irq, info);
902 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
904 struct scc_priv *priv = dev->priv;
909 (ifr->ifr_data, &priv->param,
910 sizeof(struct scc_param)))
914 if (!capable(CAP_NET_ADMIN))
916 if (netif_running(dev))
919 (&priv->param, ifr->ifr_data,
920 sizeof(struct scc_param)))
929 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
931 struct scc_priv *priv = dev->priv;
935 /* Temporarily stop the scheduler feeding us packets */
936 netif_stop_queue(dev);
938 /* Transfer data to DMA buffer */
940 memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1);
941 priv->tx_len[i] = skb->len - 1;
943 /* Clear interrupts while we touch our circular buffers */
945 spin_lock_irqsave(&priv->ring_lock, flags);
946 /* Move the ring buffer's head */
947 priv->tx_head = (i + 1) % NUM_TX_BUF;
950 /* If we just filled up the last buffer, leave queue stopped.
951 The higher layers must wait until we have a DMA buffer
952 to accept the data. */
953 if (priv->tx_count < NUM_TX_BUF)
954 netif_wake_queue(dev);
956 /* Set new TX state */
957 if (priv->state == IDLE) {
958 /* Assert RTS, start timer */
959 priv->state = TX_HEAD;
960 priv->tx_start = jiffies;
961 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
962 write_scc(priv, R15, 0);
963 start_timer(priv, priv->param.txdelay, 0);
966 /* Turn interrupts back on and free buffer */
967 spin_unlock_irqrestore(&priv->ring_lock, flags);
974 static struct net_device_stats *scc_get_stats(struct net_device *dev)
976 struct scc_priv *priv = dev->priv;
982 static int scc_set_mac_address(struct net_device *dev, void *sa)
984 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
990 static inline void tx_on(struct scc_priv *priv)
995 if (priv->param.dma >= 0) {
996 n = (priv->chip == Z85230) ? 3 : 1;
997 /* Program DMA controller */
998 flags = claim_dma_lock();
999 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
1000 set_dma_addr(priv->param.dma,
1001 (int) priv->tx_buf[priv->tx_tail] + n);
1002 set_dma_count(priv->param.dma,
1003 priv->tx_len[priv->tx_tail] - n);
1004 release_dma_lock(flags);
1005 /* Enable TX underrun interrupt */
1006 write_scc(priv, R15, TxUIE);
1007 /* Configure DREQ */
1008 if (priv->type == TYPE_TWIN)
1009 outb((priv->param.dma ==
1010 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
1011 priv->card_base + TWIN_DMA_CFG);
1014 EXT_INT_ENAB | WT_FN_RDYFN |
1016 /* Write first byte(s) */
1017 spin_lock_irqsave(priv->register_lock, flags);
1018 for (i = 0; i < n; i++)
1019 write_scc_data(priv,
1020 priv->tx_buf[priv->tx_tail][i], 1);
1021 enable_dma(priv->param.dma);
1022 spin_unlock_irqrestore(priv->register_lock, flags);
1024 write_scc(priv, R15, TxUIE);
1026 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1029 /* Reset EOM latch if we do not have the AUTOEOM feature */
1030 if (priv->chip == Z8530)
1031 write_scc(priv, R0, RES_EOM_L);
1035 static inline void rx_on(struct scc_priv *priv)
1037 unsigned long flags;
1040 while (read_scc(priv, R0) & Rx_CH_AV)
1041 read_scc_data(priv);
1043 if (priv->param.dma >= 0) {
1044 /* Program DMA controller */
1045 flags = claim_dma_lock();
1046 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1047 set_dma_addr(priv->param.dma,
1048 (int) priv->rx_buf[priv->rx_head]);
1049 set_dma_count(priv->param.dma, BUF_SIZE);
1050 release_dma_lock(flags);
1051 enable_dma(priv->param.dma);
1052 /* Configure PackeTwin DMA */
1053 if (priv->type == TYPE_TWIN) {
1054 outb((priv->param.dma ==
1055 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1056 priv->card_base + TWIN_DMA_CFG);
1058 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1059 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1060 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1062 /* Reset current frame */
1064 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1065 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1068 write_scc(priv, R0, ERR_RES);
1069 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1073 static inline void rx_off(struct scc_priv *priv)
1075 /* Disable receiver */
1076 write_scc(priv, R3, Rx8);
1077 /* Disable DREQ / RX interrupt */
1078 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1079 outb(0, priv->card_base + TWIN_DMA_CFG);
1081 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1083 if (priv->param.dma >= 0)
1084 disable_dma(priv->param.dma);
1088 static void start_timer(struct scc_priv *priv, int t, int r15)
1090 unsigned long flags;
1092 outb(priv->tmr_mode, priv->tmr_ctrl);
1098 outb(t & 0xFF, priv->tmr_cnt);
1099 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1100 if (priv->type != TYPE_TWIN) {
1101 write_scc(priv, R15, r15 | CTSIE);
1104 restore_flags(flags);
1109 static inline unsigned char random(void)
1111 /* See "Numerical Recipes in C", second edition, p. 284 */
1112 rand = rand * 1664525L + 1013904223L;
1113 return (unsigned char) (rand >> 24);
1116 static inline void z8530_isr(struct scc_info *info)
1120 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1122 rx_isr(&info->priv[0]);
1123 } else if (is & CHATxIP) {
1124 tx_isr(&info->priv[0]);
1125 } else if (is & CHAEXT) {
1126 es_isr(&info->priv[0]);
1127 } else if (is & CHBRxIP) {
1128 rx_isr(&info->priv[1]);
1129 } else if (is & CHBTxIP) {
1130 tx_isr(&info->priv[1]);
1132 es_isr(&info->priv[1]);
1134 write_scc(&info->priv[0], R0, RES_H_IUS);
1138 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1141 /* Ok, no interrupts pending from this 8530. The INT line should
1146 static irqreturn_t scc_isr(int irq, void *dev_id, struct pt_regs *regs)
1148 struct scc_info *info = dev_id;
1150 spin_lock(info->priv[0].register_lock);
1151 /* At this point interrupts are enabled, and the interrupt under service
1152 is already acknowledged, but masked off.
1154 Interrupt processing: We loop until we know that the IRQ line is
1155 low. If another positive edge occurs afterwards during the ISR,
1156 another interrupt will be triggered by the interrupt controller
1157 as soon as the IRQ level is enabled again (see asm/irq.h).
1159 Bottom-half handlers will be processed after scc_isr(). This is
1160 important, since we only have small ringbuffers and want new data
1161 to be fetched/delivered immediately. */
1163 if (info->priv[0].type == TYPE_TWIN) {
1164 int is, card_base = info->priv[0].card_base;
1165 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1167 if (is & TWIN_SCC_MSK) {
1169 } else if (is & TWIN_TMR1_MSK) {
1170 inb(card_base + TWIN_CLR_TMR1);
1171 tm_isr(&info->priv[0]);
1173 inb(card_base + TWIN_CLR_TMR2);
1174 tm_isr(&info->priv[1]);
1179 spin_unlock(info->priv[0].register_lock);
1184 static void rx_isr(struct scc_priv *priv)
1186 if (priv->param.dma >= 0) {
1187 /* Check special condition and perform error reset. See 2.4.7.5. */
1188 special_condition(priv, read_scc(priv, R1));
1189 write_scc(priv, R0, ERR_RES);
1191 /* Check special condition for each character. Error reset not necessary.
1192 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1194 while (read_scc(priv, R0) & Rx_CH_AV) {
1195 rc = read_scc(priv, R1);
1196 if (priv->rx_ptr < BUF_SIZE)
1197 priv->rx_buf[priv->rx_head][priv->
1199 read_scc_data(priv);
1202 read_scc_data(priv);
1204 special_condition(priv, rc);
1210 static void special_condition(struct scc_priv *priv, int rc)
1213 unsigned long flags;
1215 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1218 /* Receiver overrun */
1220 if (priv->param.dma < 0)
1221 write_scc(priv, R0, ERR_RES);
1222 } else if (rc & END_FR) {
1223 /* End of frame. Get byte count */
1224 if (priv->param.dma >= 0) {
1225 flags = claim_dma_lock();
1226 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1228 release_dma_lock(flags);
1230 cb = priv->rx_ptr - 2;
1232 if (priv->rx_over) {
1233 /* We had an overrun */
1234 priv->stats.rx_errors++;
1235 if (priv->rx_over == 2)
1236 priv->stats.rx_length_errors++;
1238 priv->stats.rx_fifo_errors++;
1240 } else if (rc & CRC_ERR) {
1241 /* Count invalid CRC only if packet length >= minimum */
1243 priv->stats.rx_errors++;
1244 priv->stats.rx_crc_errors++;
1248 if (priv->rx_count < NUM_RX_BUF - 1) {
1249 /* Put good frame in FIFO */
1250 priv->rx_len[priv->rx_head] = cb;
1255 schedule_work(&priv->rx_work);
1257 priv->stats.rx_errors++;
1258 priv->stats.rx_over_errors++;
1262 /* Get ready for new frame */
1263 if (priv->param.dma >= 0) {
1264 flags = claim_dma_lock();
1265 set_dma_addr(priv->param.dma,
1266 (int) priv->rx_buf[priv->rx_head]);
1267 set_dma_count(priv->param.dma, BUF_SIZE);
1268 release_dma_lock(flags);
1276 static void rx_bh(void *arg)
1278 struct scc_priv *priv = arg;
1279 int i = priv->rx_tail;
1281 unsigned long flags;
1282 struct sk_buff *skb;
1283 unsigned char *data;
1285 spin_lock_irqsave(&priv->ring_lock, flags);
1286 while (priv->rx_count) {
1287 spin_unlock_irqrestore(&priv->ring_lock, flags);
1288 cb = priv->rx_len[i];
1289 /* Allocate buffer */
1290 skb = dev_alloc_skb(cb + 1);
1293 priv->stats.rx_dropped++;
1296 data = skb_put(skb, cb + 1);
1298 memcpy(&data[1], priv->rx_buf[i], cb);
1299 skb->protocol = ax25_type_trans(skb, priv->dev);
1301 priv->dev->last_rx = jiffies;
1302 priv->stats.rx_packets++;
1303 priv->stats.rx_bytes += cb;
1305 spin_lock_irqsave(&priv->ring_lock, flags);
1307 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1310 spin_unlock_irqrestore(&priv->ring_lock, flags);
1314 static void tx_isr(struct scc_priv *priv)
1316 int i = priv->tx_tail, p = priv->tx_ptr;
1318 /* Suspend TX interrupts if we don't want to send anything.
1320 if (p == priv->tx_len[i]) {
1321 write_scc(priv, R0, RES_Tx_P);
1325 /* Write characters */
1326 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1327 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1330 /* Reset EOM latch of Z8530 */
1331 if (!priv->tx_ptr && p && priv->chip == Z8530)
1332 write_scc(priv, R0, RES_EOM_L);
1338 static void es_isr(struct scc_priv *priv)
1340 int i, rr0, drr0, res;
1341 unsigned long flags;
1343 /* Read status, reset interrupt bit (open latches) */
1344 rr0 = read_scc(priv, R0);
1345 write_scc(priv, R0, RES_EXT_INT);
1346 drr0 = priv->rr0 ^ rr0;
1349 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1350 it might have already been cleared again by AUTOEOM. */
1351 if (priv->state == TX_DATA) {
1352 /* Get remaining bytes */
1354 if (priv->param.dma >= 0) {
1355 disable_dma(priv->param.dma);
1356 flags = claim_dma_lock();
1357 res = get_dma_residue(priv->param.dma);
1358 release_dma_lock(flags);
1360 res = priv->tx_len[i] - priv->tx_ptr;
1363 /* Disable DREQ / TX interrupt */
1364 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1365 outb(0, priv->card_base + TWIN_DMA_CFG);
1367 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1369 /* Update packet statistics */
1370 priv->stats.tx_errors++;
1371 priv->stats.tx_fifo_errors++;
1372 /* Other underrun interrupts may already be waiting */
1373 write_scc(priv, R0, RES_EXT_INT);
1374 write_scc(priv, R0, RES_EXT_INT);
1376 /* Update packet statistics */
1377 priv->stats.tx_packets++;
1378 priv->stats.tx_bytes += priv->tx_len[i];
1379 /* Remove frame from FIFO */
1380 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1382 /* Inform upper layers */
1383 netif_wake_queue(priv->dev);
1386 write_scc(priv, R15, 0);
1387 if (priv->tx_count &&
1388 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1389 priv->state = TX_PAUSE;
1390 start_timer(priv, priv->param.txpause, 0);
1392 priv->state = TX_TAIL;
1393 start_timer(priv, priv->param.txtail, 0);
1397 /* DCD transition */
1400 switch (priv->state) {
1403 priv->state = DCD_ON;
1404 write_scc(priv, R15, 0);
1405 start_timer(priv, priv->param.dcdon, 0);
1408 switch (priv->state) {
1411 priv->state = DCD_OFF;
1412 write_scc(priv, R15, 0);
1413 start_timer(priv, priv->param.dcdoff, 0);
1418 /* CTS transition */
1419 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1425 static void tm_isr(struct scc_priv *priv)
1427 switch (priv->state) {
1431 priv->state = TX_DATA;
1434 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1435 priv->state = RTS_OFF;
1436 if (priv->type != TYPE_TWIN)
1437 write_scc(priv, R15, 0);
1438 start_timer(priv, priv->param.rtsoff, 0);
1441 write_scc(priv, R15, DCDIE);
1442 priv->rr0 = read_scc(priv, R0);
1443 if (priv->rr0 & DCD) {
1444 priv->stats.collisions++;
1446 priv->state = RX_ON;
1449 start_timer(priv, priv->param.waittime, DCDIE);
1453 if (priv->tx_count) {
1454 priv->state = TX_HEAD;
1455 priv->tx_start = jiffies;
1457 TxCRC_ENAB | RTS | TxENAB | Tx8);
1458 write_scc(priv, R15, 0);
1459 start_timer(priv, priv->param.txdelay, 0);
1462 if (priv->type != TYPE_TWIN)
1463 write_scc(priv, R15, DCDIE);
1468 write_scc(priv, R15, DCDIE);
1469 priv->rr0 = read_scc(priv, R0);
1470 if (priv->rr0 & DCD) {
1472 priv->state = RX_ON;
1476 random() / priv->param.persist *
1477 priv->param.slottime, DCDIE);