2 * Driver for high-speed SCC boards (those with DMA support)
3 * Copyright (C) 1997-2000 Klaus Kudielka
5 * S5SCC/DMA support by Janko Koleznik S52HI
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/module.h>
24 #include <linux/delay.h>
25 #include <linux/errno.h>
26 #include <linux/if_arp.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/sockios.h>
36 #include <linux/workqueue.h>
37 #include <asm/atomic.h>
38 #include <asm/bitops.h>
42 #include <asm/uaccess.h>
47 /* Number of buffers per channel */
49 #define NUM_TX_BUF 2 /* NUM_TX_BUF >= 1 (min. 2 recommended) */
50 #define NUM_RX_BUF 6 /* NUM_RX_BUF >= 1 (min. 2 recommended) */
51 #define BUF_SIZE 1576 /* BUF_SIZE >= mtu + hard_header_len */
56 #define HW_PI { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
57 0, 8, 1843200, 3686400 }
58 #define HW_PI2 { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
59 0, 8, 3686400, 7372800 }
60 #define HW_TWIN { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
61 0, 4, 6144000, 6144000 }
62 #define HW_S5 { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
63 0, 8, 4915200, 9830400 }
65 #define HARDWARE { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
67 #define TMR_0_HZ 25600 /* Frequency of timer 0 */
75 #define MAX_NUM_DEVS 32
78 /* SCC chips supported */
84 #define CHIPNAMES { "Z8530", "Z85C30", "Z85230" }
89 /* 8530 registers relative to card base */
91 #define SCCB_DATA 0x01
93 #define SCCA_DATA 0x03
95 /* 8253/8254 registers relative to card base */
101 /* Additional PI/PI2 registers relative to card base */
102 #define PI_DREQ_MASK 0x04
104 /* Additional PackeTwin registers relative to card base */
105 #define TWIN_INT_REG 0x08
106 #define TWIN_CLR_TMR1 0x09
107 #define TWIN_CLR_TMR2 0x0a
108 #define TWIN_SPARE_1 0x0b
109 #define TWIN_DMA_CFG 0x08
110 #define TWIN_SERIAL_CFG 0x09
111 #define TWIN_DMA_CLR_FF 0x0a
112 #define TWIN_SPARE_2 0x0b
115 /* PackeTwin I/O register values */
118 #define TWIN_SCC_MSK 0x01
119 #define TWIN_TMR1_MSK 0x02
120 #define TWIN_TMR2_MSK 0x04
121 #define TWIN_INT_MSK 0x07
124 #define TWIN_DTRA_ON 0x01
125 #define TWIN_DTRB_ON 0x02
126 #define TWIN_EXTCLKA 0x04
127 #define TWIN_EXTCLKB 0x08
128 #define TWIN_LOOPA_ON 0x10
129 #define TWIN_LOOPB_ON 0x20
133 #define TWIN_DMA_HDX_T1 0x08
134 #define TWIN_DMA_HDX_R1 0x0a
135 #define TWIN_DMA_HDX_T3 0x14
136 #define TWIN_DMA_HDX_R3 0x16
137 #define TWIN_DMA_FDX_T3R1 0x1b
138 #define TWIN_DMA_FDX_T1R3 0x1d
157 #define SIOCGSCCPARAM SIOCDEVPRIVATE
158 #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
164 int pclk_hz; /* frequency of BRG input (don't change) */
165 int brg_tc; /* BRG terminal count; BRG disabled if < 0 */
166 int nrzi; /* 0 (nrz), 1 (nrzi) */
167 int clocks; /* see dmascc_cfg documentation */
168 int txdelay; /* [1/TMR_0_HZ] */
169 int txtimeout; /* [1/HZ] */
170 int txtail; /* [1/TMR_0_HZ] */
171 int waittime; /* [1/TMR_0_HZ] */
172 int slottime; /* [1/TMR_0_HZ] */
173 int persist; /* 1 ... 256 */
174 int dma; /* -1 (disable), 0, 1, 3 */
175 int txpause; /* [1/TMR_0_HZ] */
176 int rtsoff; /* [1/TMR_0_HZ] */
177 int dcdon; /* [1/TMR_0_HZ] */
178 int dcdoff; /* [1/TMR_0_HZ] */
181 struct scc_hardware {
196 struct net_device *dev;
197 struct scc_info *info;
198 struct net_device_stats stats;
200 int card_base, scc_cmd, scc_data;
201 int tmr_cnt, tmr_ctrl, tmr_mode;
202 struct scc_param param;
203 char rx_buf[NUM_RX_BUF][BUF_SIZE];
204 int rx_len[NUM_RX_BUF];
206 struct work_struct rx_work;
207 int rx_head, rx_tail, rx_count;
209 char tx_buf[NUM_TX_BUF][BUF_SIZE];
210 int tx_len[NUM_TX_BUF];
212 int tx_head, tx_tail, tx_count;
214 unsigned long tx_start;
216 spinlock_t *register_lock; /* Per scc_info */
217 spinlock_t ring_lock;
223 struct net_device *dev[2];
224 struct scc_priv priv[2];
225 struct scc_info *next;
226 spinlock_t register_lock; /* Per device register lock */
230 /* Function declarations */
231 static int setup_adapter(int card_base, int type, int n) __init;
233 static void write_scc(struct scc_priv *priv, int reg, int val);
234 static void write_scc_data(struct scc_priv *priv, int val, int fast);
235 static int read_scc(struct scc_priv *priv, int reg);
236 static int read_scc_data(struct scc_priv *priv);
238 static int scc_open(struct net_device *dev);
239 static int scc_close(struct net_device *dev);
240 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
241 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
242 static struct net_device_stats *scc_get_stats(struct net_device *dev);
243 static int scc_set_mac_address(struct net_device *dev, void *sa);
245 static inline void tx_on(struct scc_priv *priv);
246 static inline void rx_on(struct scc_priv *priv);
247 static inline void rx_off(struct scc_priv *priv);
248 static void start_timer(struct scc_priv *priv, int t, int r15);
249 static inline unsigned char random(void);
251 static inline void z8530_isr(struct scc_info *info);
252 static irqreturn_t scc_isr(int irq, void *dev_id);
253 static void rx_isr(struct scc_priv *priv);
254 static void special_condition(struct scc_priv *priv, int rc);
255 static void rx_bh(void *arg);
256 static void tx_isr(struct scc_priv *priv);
257 static void es_isr(struct scc_priv *priv);
258 static void tm_isr(struct scc_priv *priv);
261 /* Initialization variables */
263 static int io[MAX_NUM_DEVS] __initdata = { 0, };
265 /* Beware! hw[] is also used in cleanup_module(). */
266 static struct scc_hardware hw[NUM_TYPES] __initdata_or_module = HARDWARE;
267 static char ax25_broadcast[7] __initdata =
268 { 'Q' << 1, 'S' << 1, 'T' << 1, ' ' << 1, ' ' << 1, ' ' << 1,
270 static char ax25_test[7] __initdata =
271 { 'L' << 1, 'I' << 1, 'N' << 1, 'U' << 1, 'X' << 1, ' ' << 1,
275 /* Global variables */
277 static struct scc_info *first;
278 static unsigned long rand;
281 MODULE_AUTHOR("Klaus Kudielka");
282 MODULE_DESCRIPTION("Driver for high-speed SCC boards");
283 module_param_array(io, int, NULL, 0);
284 MODULE_LICENSE("GPL");
286 static void __exit dmascc_exit(void)
289 struct scc_info *info;
294 /* Unregister devices */
295 for (i = 0; i < 2; i++)
296 unregister_netdev(info->dev[i]);
299 if (info->priv[0].type == TYPE_TWIN)
300 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
301 write_scc(&info->priv[0], R9, FHWRES);
302 release_region(info->dev[0]->base_addr,
303 hw[info->priv[0].type].io_size);
305 for (i = 0; i < 2; i++)
306 free_netdev(info->dev[i]);
314 static int __init dmascc_init(void)
317 int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
320 unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
321 counting[MAX_NUM_DEVS];
323 /* Initialize random number generator */
325 /* Cards found = 0 */
327 /* Warning message */
329 printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
331 /* Run autodetection for each card type */
332 for (h = 0; h < NUM_TYPES; h++) {
335 /* User-specified I/O address regions */
336 for (i = 0; i < hw[h].num_devs; i++)
338 for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
340 hw[h].io_region) / hw[h].io_delta;
341 if (j >= 0 && j < hw[h].num_devs
343 j * hw[h].io_delta == io[i]) {
348 /* Default I/O address regions */
349 for (i = 0; i < hw[h].num_devs; i++) {
351 hw[h].io_region + i * hw[h].io_delta;
355 /* Check valid I/O address regions */
356 for (i = 0; i < hw[h].num_devs; i++)
359 (base[i], hw[h].io_size, "dmascc"))
363 base[i] + hw[h].tmr_offset +
366 base[i] + hw[h].tmr_offset +
369 base[i] + hw[h].tmr_offset +
375 for (i = 0; i < hw[h].num_devs; i++)
377 /* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
379 outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
381 outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
383 /* Timer 1: LSB+MSB, Mode 0, HZ/10 */
385 outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
386 outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
390 /* Timer 2: LSB+MSB, Mode 0 */
394 /* Wait until counter registers are loaded */
395 udelay(2000000 / TMR_0_HZ);
398 while (jiffies - time < 13) {
399 for (i = 0; i < hw[h].num_devs; i++)
400 if (base[i] && counting[i]) {
401 /* Read back Timer 1: latch; read LSB; read MSB */
404 inb(t1[i]) + (inb(t1[i]) << 8);
405 /* Also check whether counter did wrap */
407 || t_val > TMR_0_HZ / HZ * 10)
409 delay[i] = jiffies - start[i];
413 /* Evaluate measurements */
414 for (i = 0; i < hw[h].num_devs; i++)
416 if ((delay[i] >= 9 && delay[i] <= 11) &&
417 /* Ok, we have found an adapter */
418 (setup_adapter(base[i], h, n) == 0))
421 release_region(base[i],
427 /* If any adapter was successfully initialized, return ok */
431 /* If no adapter found, return error */
432 printk(KERN_INFO "dmascc: no adapters found\n");
436 module_init(dmascc_init);
437 module_exit(dmascc_exit);
439 static void __init dev_setup(struct net_device *dev)
441 dev->type = ARPHRD_AX25;
442 dev->hard_header_len = AX25_MAX_HEADER_LEN;
444 dev->addr_len = AX25_ADDR_LEN;
445 dev->tx_queue_len = 64;
446 memcpy(dev->broadcast, ax25_broadcast, AX25_ADDR_LEN);
447 memcpy(dev->dev_addr, ax25_test, AX25_ADDR_LEN);
450 static int __init setup_adapter(int card_base, int type, int n)
453 struct scc_info *info;
454 struct net_device *dev;
455 struct scc_priv *priv;
458 int tmr_base = card_base + hw[type].tmr_offset;
459 int scc_base = card_base + hw[type].scc_offset;
460 char *chipnames[] = CHIPNAMES;
462 /* Allocate memory */
463 info = kmalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
465 printk(KERN_ERR "dmascc: "
466 "could not allocate memory for %s at %#3x\n",
467 hw[type].name, card_base);
471 /* Initialize what is necessary for write_scc and write_scc_data */
472 memset(info, 0, sizeof(struct scc_info));
474 info->dev[0] = alloc_netdev(0, "", dev_setup);
476 printk(KERN_ERR "dmascc: "
477 "could not allocate memory for %s at %#3x\n",
478 hw[type].name, card_base);
482 info->dev[1] = alloc_netdev(0, "", dev_setup);
484 printk(KERN_ERR "dmascc: "
485 "could not allocate memory for %s at %#3x\n",
486 hw[type].name, card_base);
489 spin_lock_init(&info->register_lock);
491 priv = &info->priv[0];
493 priv->card_base = card_base;
494 priv->scc_cmd = scc_base + SCCA_CMD;
495 priv->scc_data = scc_base + SCCA_DATA;
496 priv->register_lock = &info->register_lock;
499 write_scc(priv, R9, FHWRES | MIE | NV);
501 /* Determine type of chip by enabling SDLC/HDLC enhancements */
502 write_scc(priv, R15, SHDLCE);
503 if (!read_scc(priv, R15)) {
504 /* WR7' not present. This is an ordinary Z8530 SCC. */
507 /* Put one character in TX FIFO */
508 write_scc_data(priv, 0, 0);
509 if (read_scc(priv, R0) & Tx_BUF_EMP) {
510 /* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
513 /* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
517 write_scc(priv, R15, 0);
519 /* Start IRQ auto-detection */
520 irqs = probe_irq_on();
522 /* Enable interrupts */
523 if (type == TYPE_TWIN) {
524 outb(0, card_base + TWIN_DMA_CFG);
525 inb(card_base + TWIN_CLR_TMR1);
526 inb(card_base + TWIN_CLR_TMR2);
527 info->twin_serial_cfg = TWIN_EI;
528 outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
530 write_scc(priv, R15, CTSIE);
531 write_scc(priv, R0, RES_EXT_INT);
532 write_scc(priv, R1, EXT_INT_ENAB);
536 outb(1, tmr_base + TMR_CNT1);
537 outb(0, tmr_base + TMR_CNT1);
539 /* Wait and detect IRQ */
541 while (jiffies - time < 2 + HZ / TMR_0_HZ);
542 irq = probe_irq_off(irqs);
544 /* Clear pending interrupt, disable interrupts */
545 if (type == TYPE_TWIN) {
546 inb(card_base + TWIN_CLR_TMR1);
548 write_scc(priv, R1, 0);
549 write_scc(priv, R15, 0);
550 write_scc(priv, R0, RES_EXT_INT);
555 "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
556 hw[type].name, card_base, irq);
560 /* Set up data structures */
561 for (i = 0; i < 2; i++) {
563 priv = &info->priv[i];
569 spin_lock_init(&priv->ring_lock);
570 priv->register_lock = &info->register_lock;
571 priv->card_base = card_base;
572 priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
573 priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
574 priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
575 priv->tmr_ctrl = tmr_base + TMR_CTRL;
576 priv->tmr_mode = i ? 0xb0 : 0x70;
577 priv->param.pclk_hz = hw[type].pclk_hz;
578 priv->param.brg_tc = -1;
579 priv->param.clocks = TCTRxCP | RCRTxCP;
580 priv->param.persist = 256;
581 priv->param.dma = -1;
582 INIT_WORK(&priv->rx_work, rx_bh, priv);
584 sprintf(dev->name, "dmascc%i", 2 * n + i);
585 dev->base_addr = card_base;
587 dev->open = scc_open;
588 dev->stop = scc_close;
589 dev->do_ioctl = scc_ioctl;
590 dev->hard_start_xmit = scc_send_packet;
591 dev->get_stats = scc_get_stats;
592 dev->hard_header = ax25_hard_header;
593 dev->rebuild_header = ax25_rebuild_header;
594 dev->set_mac_address = scc_set_mac_address;
596 if (register_netdev(info->dev[0])) {
597 printk(KERN_ERR "dmascc: could not register %s\n",
601 if (register_netdev(info->dev[1])) {
602 printk(KERN_ERR "dmascc: could not register %s\n",
610 printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
611 hw[type].name, chipnames[chip], card_base, irq);
615 unregister_netdev(info->dev[0]);
617 if (info->priv[0].type == TYPE_TWIN)
618 outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
619 write_scc(&info->priv[0], R9, FHWRES);
620 free_netdev(info->dev[1]);
622 free_netdev(info->dev[0]);
630 /* Driver functions */
632 static void write_scc(struct scc_priv *priv, int reg, int val)
635 switch (priv->type) {
638 outb(reg, priv->scc_cmd);
639 outb(val, priv->scc_cmd);
643 outb_p(reg, priv->scc_cmd);
644 outb_p(val, priv->scc_cmd);
647 spin_lock_irqsave(priv->register_lock, flags);
648 outb_p(0, priv->card_base + PI_DREQ_MASK);
650 outb_p(reg, priv->scc_cmd);
651 outb_p(val, priv->scc_cmd);
652 outb(1, priv->card_base + PI_DREQ_MASK);
653 spin_unlock_irqrestore(priv->register_lock, flags);
659 static void write_scc_data(struct scc_priv *priv, int val, int fast)
662 switch (priv->type) {
664 outb(val, priv->scc_data);
667 outb_p(val, priv->scc_data);
671 outb_p(val, priv->scc_data);
673 spin_lock_irqsave(priv->register_lock, flags);
674 outb_p(0, priv->card_base + PI_DREQ_MASK);
675 outb_p(val, priv->scc_data);
676 outb(1, priv->card_base + PI_DREQ_MASK);
677 spin_unlock_irqrestore(priv->register_lock, flags);
684 static int read_scc(struct scc_priv *priv, int reg)
688 switch (priv->type) {
691 outb(reg, priv->scc_cmd);
692 return inb(priv->scc_cmd);
695 outb_p(reg, priv->scc_cmd);
696 return inb_p(priv->scc_cmd);
698 spin_lock_irqsave(priv->register_lock, flags);
699 outb_p(0, priv->card_base + PI_DREQ_MASK);
701 outb_p(reg, priv->scc_cmd);
702 rc = inb_p(priv->scc_cmd);
703 outb(1, priv->card_base + PI_DREQ_MASK);
704 spin_unlock_irqrestore(priv->register_lock, flags);
710 static int read_scc_data(struct scc_priv *priv)
714 switch (priv->type) {
716 return inb(priv->scc_data);
718 return inb_p(priv->scc_data);
720 spin_lock_irqsave(priv->register_lock, flags);
721 outb_p(0, priv->card_base + PI_DREQ_MASK);
722 rc = inb_p(priv->scc_data);
723 outb(1, priv->card_base + PI_DREQ_MASK);
724 spin_unlock_irqrestore(priv->register_lock, flags);
730 static int scc_open(struct net_device *dev)
732 struct scc_priv *priv = dev->priv;
733 struct scc_info *info = priv->info;
734 int card_base = priv->card_base;
736 /* Request IRQ if not already used by other channel */
737 if (!info->irq_used) {
738 if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
744 /* Request DMA if required */
745 if (priv->param.dma >= 0) {
746 if (request_dma(priv->param.dma, "dmascc")) {
747 if (--info->irq_used == 0)
748 free_irq(dev->irq, info);
751 unsigned long flags = claim_dma_lock();
752 clear_dma_ff(priv->param.dma);
753 release_dma_lock(flags);
757 /* Initialize local variables */
760 priv->rx_head = priv->rx_tail = priv->rx_count = 0;
762 priv->tx_head = priv->tx_tail = priv->tx_count = 0;
766 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
767 /* X1 clock, SDLC mode */
768 write_scc(priv, R4, SDLC | X1CLK);
770 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
771 /* 8 bit RX char, RX disable */
772 write_scc(priv, R3, Rx8);
773 /* 8 bit TX char, TX disable */
774 write_scc(priv, R5, Tx8);
775 /* SDLC address field */
776 write_scc(priv, R6, 0);
778 write_scc(priv, R7, FLAG);
779 switch (priv->chip) {
782 write_scc(priv, R15, SHDLCE);
784 write_scc(priv, R7, AUTOEOM);
785 write_scc(priv, R15, 0);
789 write_scc(priv, R15, SHDLCE);
790 /* The following bits are set (see 2.5.2.1):
791 - Automatic EOM reset
792 - Interrupt request if RX FIFO is half full
793 This bit should be ignored in DMA mode (according to the
794 documentation), but actually isn't. The receiver doesn't work if
795 it is set. Thus, we have to clear it in DMA mode.
796 - Interrupt/DMA request if TX FIFO is completely empty
797 a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
799 b) If cleared, DMA requests may follow each other very quickly,
800 filling up the TX FIFO.
801 Advantage: TX works even in case of high bus latency.
802 Disadvantage: Edge-triggered DMA request circuitry may miss
803 a request. No more data is delivered, resulting
804 in a TX FIFO underrun.
805 Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
806 The PackeTwin doesn't. I don't know about the PI, but let's
807 assume it behaves like the PI2.
809 if (priv->param.dma >= 0) {
810 if (priv->type == TYPE_TWIN)
811 write_scc(priv, R7, AUTOEOM | TXFIFOE);
813 write_scc(priv, R7, AUTOEOM);
815 write_scc(priv, R7, AUTOEOM | RXFIFOH);
817 write_scc(priv, R15, 0);
820 /* Preset CRC, NRZ(I) encoding */
821 write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
823 /* Configure baud rate generator */
824 if (priv->param.brg_tc >= 0) {
825 /* Program BR generator */
826 write_scc(priv, R12, priv->param.brg_tc & 0xFF);
827 write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
828 /* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
829 PackeTwin, not connected on the PI2); set DPLL source to BRG */
830 write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
832 write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
834 /* Disable BR generator */
835 write_scc(priv, R14, DTRREQ | BRSRC);
838 /* Configure clocks */
839 if (priv->type == TYPE_TWIN) {
840 /* Disable external TX clock receiver */
841 outb((info->twin_serial_cfg &=
842 ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
843 card_base + TWIN_SERIAL_CFG);
845 write_scc(priv, R11, priv->param.clocks);
846 if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
847 /* Enable external TX clock receiver */
848 outb((info->twin_serial_cfg |=
849 (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
850 card_base + TWIN_SERIAL_CFG);
853 /* Configure PackeTwin */
854 if (priv->type == TYPE_TWIN) {
855 /* Assert DTR, enable interrupts */
856 outb((info->twin_serial_cfg |= TWIN_EI |
857 (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
858 card_base + TWIN_SERIAL_CFG);
861 /* Read current status */
862 priv->rr0 = read_scc(priv, R0);
863 /* Enable DCD interrupt */
864 write_scc(priv, R15, DCDIE);
866 netif_start_queue(dev);
872 static int scc_close(struct net_device *dev)
874 struct scc_priv *priv = dev->priv;
875 struct scc_info *info = priv->info;
876 int card_base = priv->card_base;
878 netif_stop_queue(dev);
880 if (priv->type == TYPE_TWIN) {
882 outb((info->twin_serial_cfg &=
883 (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
884 card_base + TWIN_SERIAL_CFG);
887 /* Reset channel, free DMA and IRQ */
888 write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
889 if (priv->param.dma >= 0) {
890 if (priv->type == TYPE_TWIN)
891 outb(0, card_base + TWIN_DMA_CFG);
892 free_dma(priv->param.dma);
894 if (--info->irq_used == 0)
895 free_irq(dev->irq, info);
901 static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
903 struct scc_priv *priv = dev->priv;
908 (ifr->ifr_data, &priv->param,
909 sizeof(struct scc_param)))
913 if (!capable(CAP_NET_ADMIN))
915 if (netif_running(dev))
918 (&priv->param, ifr->ifr_data,
919 sizeof(struct scc_param)))
928 static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
930 struct scc_priv *priv = dev->priv;
934 /* Temporarily stop the scheduler feeding us packets */
935 netif_stop_queue(dev);
937 /* Transfer data to DMA buffer */
939 memcpy(priv->tx_buf[i], skb->data + 1, skb->len - 1);
940 priv->tx_len[i] = skb->len - 1;
942 /* Clear interrupts while we touch our circular buffers */
944 spin_lock_irqsave(&priv->ring_lock, flags);
945 /* Move the ring buffer's head */
946 priv->tx_head = (i + 1) % NUM_TX_BUF;
949 /* If we just filled up the last buffer, leave queue stopped.
950 The higher layers must wait until we have a DMA buffer
951 to accept the data. */
952 if (priv->tx_count < NUM_TX_BUF)
953 netif_wake_queue(dev);
955 /* Set new TX state */
956 if (priv->state == IDLE) {
957 /* Assert RTS, start timer */
958 priv->state = TX_HEAD;
959 priv->tx_start = jiffies;
960 write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
961 write_scc(priv, R15, 0);
962 start_timer(priv, priv->param.txdelay, 0);
965 /* Turn interrupts back on and free buffer */
966 spin_unlock_irqrestore(&priv->ring_lock, flags);
973 static struct net_device_stats *scc_get_stats(struct net_device *dev)
975 struct scc_priv *priv = dev->priv;
981 static int scc_set_mac_address(struct net_device *dev, void *sa)
983 memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
989 static inline void tx_on(struct scc_priv *priv)
994 if (priv->param.dma >= 0) {
995 n = (priv->chip == Z85230) ? 3 : 1;
996 /* Program DMA controller */
997 flags = claim_dma_lock();
998 set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
999 set_dma_addr(priv->param.dma,
1000 (int) priv->tx_buf[priv->tx_tail] + n);
1001 set_dma_count(priv->param.dma,
1002 priv->tx_len[priv->tx_tail] - n);
1003 release_dma_lock(flags);
1004 /* Enable TX underrun interrupt */
1005 write_scc(priv, R15, TxUIE);
1006 /* Configure DREQ */
1007 if (priv->type == TYPE_TWIN)
1008 outb((priv->param.dma ==
1009 1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
1010 priv->card_base + TWIN_DMA_CFG);
1013 EXT_INT_ENAB | WT_FN_RDYFN |
1015 /* Write first byte(s) */
1016 spin_lock_irqsave(priv->register_lock, flags);
1017 for (i = 0; i < n; i++)
1018 write_scc_data(priv,
1019 priv->tx_buf[priv->tx_tail][i], 1);
1020 enable_dma(priv->param.dma);
1021 spin_unlock_irqrestore(priv->register_lock, flags);
1023 write_scc(priv, R15, TxUIE);
1025 EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1028 /* Reset EOM latch if we do not have the AUTOEOM feature */
1029 if (priv->chip == Z8530)
1030 write_scc(priv, R0, RES_EOM_L);
1034 static inline void rx_on(struct scc_priv *priv)
1036 unsigned long flags;
1039 while (read_scc(priv, R0) & Rx_CH_AV)
1040 read_scc_data(priv);
1042 if (priv->param.dma >= 0) {
1043 /* Program DMA controller */
1044 flags = claim_dma_lock();
1045 set_dma_mode(priv->param.dma, DMA_MODE_READ);
1046 set_dma_addr(priv->param.dma,
1047 (int) priv->rx_buf[priv->rx_head]);
1048 set_dma_count(priv->param.dma, BUF_SIZE);
1049 release_dma_lock(flags);
1050 enable_dma(priv->param.dma);
1051 /* Configure PackeTwin DMA */
1052 if (priv->type == TYPE_TWIN) {
1053 outb((priv->param.dma ==
1054 1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1055 priv->card_base + TWIN_DMA_CFG);
1057 /* Sp. cond. intr. only, ext int enable, RX DMA enable */
1058 write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1059 WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1061 /* Reset current frame */
1063 /* Intr. on all Rx characters and Sp. cond., ext int enable */
1064 write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1067 write_scc(priv, R0, ERR_RES);
1068 write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1072 static inline void rx_off(struct scc_priv *priv)
1074 /* Disable receiver */
1075 write_scc(priv, R3, Rx8);
1076 /* Disable DREQ / RX interrupt */
1077 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1078 outb(0, priv->card_base + TWIN_DMA_CFG);
1080 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1082 if (priv->param.dma >= 0)
1083 disable_dma(priv->param.dma);
1087 static void start_timer(struct scc_priv *priv, int t, int r15)
1089 unsigned long flags;
1091 outb(priv->tmr_mode, priv->tmr_ctrl);
1097 outb(t & 0xFF, priv->tmr_cnt);
1098 outb((t >> 8) & 0xFF, priv->tmr_cnt);
1099 if (priv->type != TYPE_TWIN) {
1100 write_scc(priv, R15, r15 | CTSIE);
1103 restore_flags(flags);
1108 static inline unsigned char random(void)
1110 /* See "Numerical Recipes in C", second edition, p. 284 */
1111 rand = rand * 1664525L + 1013904223L;
1112 return (unsigned char) (rand >> 24);
1115 static inline void z8530_isr(struct scc_info *info)
1119 while ((is = read_scc(&info->priv[0], R3)) && i--) {
1121 rx_isr(&info->priv[0]);
1122 } else if (is & CHATxIP) {
1123 tx_isr(&info->priv[0]);
1124 } else if (is & CHAEXT) {
1125 es_isr(&info->priv[0]);
1126 } else if (is & CHBRxIP) {
1127 rx_isr(&info->priv[1]);
1128 } else if (is & CHBTxIP) {
1129 tx_isr(&info->priv[1]);
1131 es_isr(&info->priv[1]);
1133 write_scc(&info->priv[0], R0, RES_H_IUS);
1137 printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1140 /* Ok, no interrupts pending from this 8530. The INT line should
1145 static irqreturn_t scc_isr(int irq, void *dev_id)
1147 struct scc_info *info = dev_id;
1149 spin_lock(info->priv[0].register_lock);
1150 /* At this point interrupts are enabled, and the interrupt under service
1151 is already acknowledged, but masked off.
1153 Interrupt processing: We loop until we know that the IRQ line is
1154 low. If another positive edge occurs afterwards during the ISR,
1155 another interrupt will be triggered by the interrupt controller
1156 as soon as the IRQ level is enabled again (see asm/irq.h).
1158 Bottom-half handlers will be processed after scc_isr(). This is
1159 important, since we only have small ringbuffers and want new data
1160 to be fetched/delivered immediately. */
1162 if (info->priv[0].type == TYPE_TWIN) {
1163 int is, card_base = info->priv[0].card_base;
1164 while ((is = ~inb(card_base + TWIN_INT_REG)) &
1166 if (is & TWIN_SCC_MSK) {
1168 } else if (is & TWIN_TMR1_MSK) {
1169 inb(card_base + TWIN_CLR_TMR1);
1170 tm_isr(&info->priv[0]);
1172 inb(card_base + TWIN_CLR_TMR2);
1173 tm_isr(&info->priv[1]);
1178 spin_unlock(info->priv[0].register_lock);
1183 static void rx_isr(struct scc_priv *priv)
1185 if (priv->param.dma >= 0) {
1186 /* Check special condition and perform error reset. See 2.4.7.5. */
1187 special_condition(priv, read_scc(priv, R1));
1188 write_scc(priv, R0, ERR_RES);
1190 /* Check special condition for each character. Error reset not necessary.
1191 Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1193 while (read_scc(priv, R0) & Rx_CH_AV) {
1194 rc = read_scc(priv, R1);
1195 if (priv->rx_ptr < BUF_SIZE)
1196 priv->rx_buf[priv->rx_head][priv->
1198 read_scc_data(priv);
1201 read_scc_data(priv);
1203 special_condition(priv, rc);
1209 static void special_condition(struct scc_priv *priv, int rc)
1212 unsigned long flags;
1214 /* See Figure 2-15. Only overrun and EOF need to be checked. */
1217 /* Receiver overrun */
1219 if (priv->param.dma < 0)
1220 write_scc(priv, R0, ERR_RES);
1221 } else if (rc & END_FR) {
1222 /* End of frame. Get byte count */
1223 if (priv->param.dma >= 0) {
1224 flags = claim_dma_lock();
1225 cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1227 release_dma_lock(flags);
1229 cb = priv->rx_ptr - 2;
1231 if (priv->rx_over) {
1232 /* We had an overrun */
1233 priv->stats.rx_errors++;
1234 if (priv->rx_over == 2)
1235 priv->stats.rx_length_errors++;
1237 priv->stats.rx_fifo_errors++;
1239 } else if (rc & CRC_ERR) {
1240 /* Count invalid CRC only if packet length >= minimum */
1242 priv->stats.rx_errors++;
1243 priv->stats.rx_crc_errors++;
1247 if (priv->rx_count < NUM_RX_BUF - 1) {
1248 /* Put good frame in FIFO */
1249 priv->rx_len[priv->rx_head] = cb;
1254 schedule_work(&priv->rx_work);
1256 priv->stats.rx_errors++;
1257 priv->stats.rx_over_errors++;
1261 /* Get ready for new frame */
1262 if (priv->param.dma >= 0) {
1263 flags = claim_dma_lock();
1264 set_dma_addr(priv->param.dma,
1265 (int) priv->rx_buf[priv->rx_head]);
1266 set_dma_count(priv->param.dma, BUF_SIZE);
1267 release_dma_lock(flags);
1275 static void rx_bh(void *arg)
1277 struct scc_priv *priv = arg;
1278 int i = priv->rx_tail;
1280 unsigned long flags;
1281 struct sk_buff *skb;
1282 unsigned char *data;
1284 spin_lock_irqsave(&priv->ring_lock, flags);
1285 while (priv->rx_count) {
1286 spin_unlock_irqrestore(&priv->ring_lock, flags);
1287 cb = priv->rx_len[i];
1288 /* Allocate buffer */
1289 skb = dev_alloc_skb(cb + 1);
1292 priv->stats.rx_dropped++;
1295 data = skb_put(skb, cb + 1);
1297 memcpy(&data[1], priv->rx_buf[i], cb);
1298 skb->protocol = ax25_type_trans(skb, priv->dev);
1300 priv->dev->last_rx = jiffies;
1301 priv->stats.rx_packets++;
1302 priv->stats.rx_bytes += cb;
1304 spin_lock_irqsave(&priv->ring_lock, flags);
1306 priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1309 spin_unlock_irqrestore(&priv->ring_lock, flags);
1313 static void tx_isr(struct scc_priv *priv)
1315 int i = priv->tx_tail, p = priv->tx_ptr;
1317 /* Suspend TX interrupts if we don't want to send anything.
1319 if (p == priv->tx_len[i]) {
1320 write_scc(priv, R0, RES_Tx_P);
1324 /* Write characters */
1325 while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1326 write_scc_data(priv, priv->tx_buf[i][p++], 0);
1329 /* Reset EOM latch of Z8530 */
1330 if (!priv->tx_ptr && p && priv->chip == Z8530)
1331 write_scc(priv, R0, RES_EOM_L);
1337 static void es_isr(struct scc_priv *priv)
1339 int i, rr0, drr0, res;
1340 unsigned long flags;
1342 /* Read status, reset interrupt bit (open latches) */
1343 rr0 = read_scc(priv, R0);
1344 write_scc(priv, R0, RES_EXT_INT);
1345 drr0 = priv->rr0 ^ rr0;
1348 /* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1349 it might have already been cleared again by AUTOEOM. */
1350 if (priv->state == TX_DATA) {
1351 /* Get remaining bytes */
1353 if (priv->param.dma >= 0) {
1354 disable_dma(priv->param.dma);
1355 flags = claim_dma_lock();
1356 res = get_dma_residue(priv->param.dma);
1357 release_dma_lock(flags);
1359 res = priv->tx_len[i] - priv->tx_ptr;
1362 /* Disable DREQ / TX interrupt */
1363 if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1364 outb(0, priv->card_base + TWIN_DMA_CFG);
1366 write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1368 /* Update packet statistics */
1369 priv->stats.tx_errors++;
1370 priv->stats.tx_fifo_errors++;
1371 /* Other underrun interrupts may already be waiting */
1372 write_scc(priv, R0, RES_EXT_INT);
1373 write_scc(priv, R0, RES_EXT_INT);
1375 /* Update packet statistics */
1376 priv->stats.tx_packets++;
1377 priv->stats.tx_bytes += priv->tx_len[i];
1378 /* Remove frame from FIFO */
1379 priv->tx_tail = (i + 1) % NUM_TX_BUF;
1381 /* Inform upper layers */
1382 netif_wake_queue(priv->dev);
1385 write_scc(priv, R15, 0);
1386 if (priv->tx_count &&
1387 (jiffies - priv->tx_start) < priv->param.txtimeout) {
1388 priv->state = TX_PAUSE;
1389 start_timer(priv, priv->param.txpause, 0);
1391 priv->state = TX_TAIL;
1392 start_timer(priv, priv->param.txtail, 0);
1396 /* DCD transition */
1399 switch (priv->state) {
1402 priv->state = DCD_ON;
1403 write_scc(priv, R15, 0);
1404 start_timer(priv, priv->param.dcdon, 0);
1407 switch (priv->state) {
1410 priv->state = DCD_OFF;
1411 write_scc(priv, R15, 0);
1412 start_timer(priv, priv->param.dcdoff, 0);
1417 /* CTS transition */
1418 if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1424 static void tm_isr(struct scc_priv *priv)
1426 switch (priv->state) {
1430 priv->state = TX_DATA;
1433 write_scc(priv, R5, TxCRC_ENAB | Tx8);
1434 priv->state = RTS_OFF;
1435 if (priv->type != TYPE_TWIN)
1436 write_scc(priv, R15, 0);
1437 start_timer(priv, priv->param.rtsoff, 0);
1440 write_scc(priv, R15, DCDIE);
1441 priv->rr0 = read_scc(priv, R0);
1442 if (priv->rr0 & DCD) {
1443 priv->stats.collisions++;
1445 priv->state = RX_ON;
1448 start_timer(priv, priv->param.waittime, DCDIE);
1452 if (priv->tx_count) {
1453 priv->state = TX_HEAD;
1454 priv->tx_start = jiffies;
1456 TxCRC_ENAB | RTS | TxENAB | Tx8);
1457 write_scc(priv, R15, 0);
1458 start_timer(priv, priv->param.txdelay, 0);
1461 if (priv->type != TYPE_TWIN)
1462 write_scc(priv, R15, DCDIE);
1467 write_scc(priv, R15, DCDIE);
1468 priv->rr0 = read_scc(priv, R0);
1469 if (priv->rr0 & DCD) {
1471 priv->state = RX_ON;
1475 random() / priv->param.persist *
1476 priv->param.slottime, DCDIE);