1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
5 * Authors: Donald Becker <becker@scyld.com>
6 * Tommy Thorn <thorn@daimi.aau.dk>
7 * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 * Alan Cox <gw4pts@gw4pts.ampr.org>
9 * Peter Bauer <100136.3530@compuserve.com>
10 * Niibe Yutaka <gniibe@mri.co.jp>
11 * Nimrod Zimerman <zimerman@mailandnews.com>
14 * Modularization and ifreq/ifmap support by Alan Cox.
15 * Rewritten by Niibe Yutaka.
16 * parport-sharing awareness code by Philip Blundell.
17 * SMP locking by Niibe Yutaka.
18 * Support for parallel ports with no IRQ (poll mode),
19 * Modifications to use the parallel port API
24 * - Module initialization.
26 * - Make sure other end is OK, before sending a packet.
27 * - Fix immediate timer problem.
30 * - Changed {enable,disable}_irq handling to make it work
31 * with new ("stack") semantics.
33 * This program is free software; you can redistribute it and/or
34 * modify it under the terms of the GNU General Public License
35 * as published by the Free Software Foundation; either version
36 * 2 of the License, or (at your option) any later version.
40 * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
41 * inspired by Russ Nelson's parallel port packet driver.
44 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 * Because of the necessity to communicate to DOS machines with the
46 * Crynwr packet driver, Peter Bauer changed the protocol again
47 * back to original protocol.
49 * This version follows original PLIP protocol.
50 * So, this PLIP can't communicate the PLIP of Linux v1.0.
54 * To use with DOS box, please do (Turn on ARP switch):
55 * # ifconfig plip[0-2] arp
57 static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
61 Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 "parallel.asm" parallel port packet driver.
64 The "Crynwr" parallel port standard specifies the following protocol:
65 Trigger by sending nibble '0x8' (this causes interrupt on other end)
70 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
73 The packet is encapsulated as if it were ethernet.
75 The cable used is a de facto standard parallel null cable -- sold as
76 a "LapLink" cable by various places. You'll need a 12-conductor cable to
77 make one yourself. The wiring is:
80 D0->ERROR 2 - 15 15 - 2
81 D1->SLCT 3 - 13 13 - 3
82 D2->PAPOUT 4 - 12 12 - 4
84 D4->BUSY 6 - 11 11 - 6
85 Do not connect the other pins. They are
87 STROBE is 1, FEED is 14, INIT is 16
88 extra grounds are 18,19,20,21,22,23,24
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/types.h>
94 #include <linux/fcntl.h>
95 #include <linux/interrupt.h>
96 #include <linux/string.h>
97 #include <linux/if_ether.h>
99 #include <linux/errno.h>
100 #include <linux/delay.h>
101 #include <linux/init.h>
102 #include <linux/netdevice.h>
103 #include <linux/etherdevice.h>
104 #include <linux/inetdevice.h>
105 #include <linux/skbuff.h>
106 #include <linux/if_plip.h>
107 #include <linux/workqueue.h>
108 #include <linux/spinlock.h>
109 #include <linux/parport.h>
110 #include <linux/bitops.h>
112 #include <net/neighbour.h>
114 #include <asm/system.h>
116 #include <asm/byteorder.h>
117 #include <asm/semaphore.h>
119 /* Maximum number of devices to support. */
122 /* Use 0 for production, 1 for verification, >2 for debug */
126 static const unsigned int net_debug = NET_DEBUG;
128 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
129 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
131 /* In micro second */
132 #define PLIP_DELAY_UNIT 1
134 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
135 #define PLIP_TRIGGER_WAIT 500
137 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
138 #define PLIP_NIBBLE_WAIT 3000
141 static void plip_kick_bh(struct work_struct *work);
142 static void plip_bh(struct work_struct *work);
143 static void plip_timer_bh(struct work_struct *work);
145 /* Interrupt handler */
146 static void plip_interrupt(int irq, void *dev_id);
148 /* Functions for DEV methods */
149 static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
150 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
151 unsigned short type, void *daddr,
152 void *saddr, unsigned len);
153 static int plip_hard_header_cache(struct neighbour *neigh,
154 struct hh_cache *hh);
155 static int plip_open(struct net_device *dev);
156 static int plip_close(struct net_device *dev);
157 static struct net_device_stats *plip_get_stats(struct net_device *dev);
158 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
159 static int plip_preempt(void *handle);
160 static void plip_wakeup(void *handle);
162 enum plip_connection_state {
170 enum plip_packet_state {
179 enum plip_nibble_state {
186 enum plip_packet_state state;
187 enum plip_nibble_state nibble;
190 #if defined(__LITTLE_ENDIAN)
193 #elif defined(__BIG_ENDIAN)
197 #error "Please fix the endianness defines in <asm/byteorder.h>"
203 unsigned char checksum;
209 struct net_device_stats enet_stats;
210 struct net_device *dev;
211 struct work_struct immediate;
212 struct delayed_work deferred;
213 struct delayed_work timer;
214 struct plip_local snd_data;
215 struct plip_local rcv_data;
216 struct pardevice *pardev;
217 unsigned long trigger;
218 unsigned long nibble;
219 enum plip_connection_state connection;
220 unsigned short timeout_count;
223 int should_relinquish;
224 int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev,
225 unsigned short type, void *daddr,
226 void *saddr, unsigned len);
227 int (*orig_hard_header_cache)(struct neighbour *neigh,
228 struct hh_cache *hh);
231 struct semaphore killed_timer_sem;
234 static inline void enable_parport_interrupts (struct net_device *dev)
238 struct parport *port =
239 ((struct net_local *)dev->priv)->pardev->port;
240 port->ops->enable_irq (port);
244 static inline void disable_parport_interrupts (struct net_device *dev)
248 struct parport *port =
249 ((struct net_local *)dev->priv)->pardev->port;
250 port->ops->disable_irq (port);
254 static inline void write_data (struct net_device *dev, unsigned char data)
256 struct parport *port =
257 ((struct net_local *)dev->priv)->pardev->port;
259 port->ops->write_data (port, data);
262 static inline unsigned char read_status (struct net_device *dev)
264 struct parport *port =
265 ((struct net_local *)dev->priv)->pardev->port;
267 return port->ops->read_status (port);
270 /* Entry point of PLIP driver.
271 Probe the hardware, and register/initialize the driver.
273 PLIP is rather weird, because of the way it interacts with the parport
274 system. It is _not_ initialised from Space.c. Instead, plip_init()
275 is called, and that function makes up a "struct net_device" for each port, and
280 plip_init_netdev(struct net_device *dev)
282 struct net_local *nl = netdev_priv(dev);
284 /* Then, override parts of it */
285 dev->hard_start_xmit = plip_tx_packet;
286 dev->open = plip_open;
287 dev->stop = plip_close;
288 dev->get_stats = plip_get_stats;
289 dev->do_ioctl = plip_ioctl;
290 dev->header_cache_update = NULL;
291 dev->tx_queue_len = 10;
292 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
293 memset(dev->dev_addr, 0xfc, ETH_ALEN);
295 /* Set the private structure */
296 nl->orig_hard_header = dev->hard_header;
297 dev->hard_header = plip_hard_header;
299 nl->orig_hard_header_cache = dev->hard_header_cache;
300 dev->hard_header_cache = plip_hard_header_cache;
305 /* Initialize constants */
306 nl->trigger = PLIP_TRIGGER_WAIT;
307 nl->nibble = PLIP_NIBBLE_WAIT;
309 /* Initialize task queue structures */
310 INIT_WORK(&nl->immediate, plip_bh);
311 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
314 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
316 spin_lock_init(&nl->lock);
319 /* Bottom half handler for the delayed request.
320 This routine is kicked by do_timer().
321 Request `plip_bh' to be invoked. */
323 plip_kick_bh(struct work_struct *work)
325 struct net_local *nl =
326 container_of(work, struct net_local, deferred.work);
329 schedule_work(&nl->immediate);
332 /* Forward declarations of internal routines */
333 static int plip_none(struct net_device *, struct net_local *,
334 struct plip_local *, struct plip_local *);
335 static int plip_receive_packet(struct net_device *, struct net_local *,
336 struct plip_local *, struct plip_local *);
337 static int plip_send_packet(struct net_device *, struct net_local *,
338 struct plip_local *, struct plip_local *);
339 static int plip_connection_close(struct net_device *, struct net_local *,
340 struct plip_local *, struct plip_local *);
341 static int plip_error(struct net_device *, struct net_local *,
342 struct plip_local *, struct plip_local *);
343 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
344 struct plip_local *snd,
345 struct plip_local *rcv,
353 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
354 struct plip_local *snd, struct plip_local *rcv);
356 static const plip_func connection_state_table[] =
361 plip_connection_close,
365 /* Bottom half handler of PLIP. */
367 plip_bh(struct work_struct *work)
369 struct net_local *nl = container_of(work, struct net_local, immediate);
370 struct plip_local *snd = &nl->snd_data;
371 struct plip_local *rcv = &nl->rcv_data;
376 f = connection_state_table[nl->connection];
377 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
378 && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
380 schedule_delayed_work(&nl->deferred, 1);
385 plip_timer_bh(struct work_struct *work)
387 struct net_local *nl =
388 container_of(work, struct net_local, timer.work);
390 if (!(atomic_read (&nl->kill_timer))) {
391 plip_interrupt (-1, nl->dev);
393 schedule_delayed_work(&nl->timer, 1);
396 up (&nl->killed_timer_sem);
401 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
402 struct plip_local *snd, struct plip_local *rcv,
407 * This is tricky. If we got here from the beginning of send (either
408 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
409 * already disabled. With the old variant of {enable,disable}_irq()
410 * extra disable_irq() was a no-op. Now it became mortal - it's
411 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
412 * that is). So we have to treat HS_TIMEOUT and ERROR from send
416 spin_lock_irq(&nl->lock);
417 if (nl->connection == PLIP_CN_SEND) {
419 if (error != ERROR) { /* Timeout */
421 if ((error == HS_TIMEOUT
422 && nl->timeout_count <= 10)
423 || nl->timeout_count <= 3) {
424 spin_unlock_irq(&nl->lock);
425 /* Try again later */
428 c0 = read_status(dev);
429 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
430 dev->name, snd->state, c0);
433 nl->enet_stats.tx_errors++;
434 nl->enet_stats.tx_aborted_errors++;
435 } else if (nl->connection == PLIP_CN_RECEIVE) {
436 if (rcv->state == PLIP_PK_TRIGGER) {
437 /* Transmission was interrupted. */
438 spin_unlock_irq(&nl->lock);
441 if (error != ERROR) { /* Timeout */
442 if (++nl->timeout_count <= 3) {
443 spin_unlock_irq(&nl->lock);
444 /* Try again later */
447 c0 = read_status(dev);
448 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
449 dev->name, rcv->state, c0);
451 nl->enet_stats.rx_dropped++;
453 rcv->state = PLIP_PK_DONE;
458 snd->state = PLIP_PK_DONE;
460 dev_kfree_skb(snd->skb);
463 spin_unlock_irq(&nl->lock);
464 if (error == HS_TIMEOUT) {
466 synchronize_irq(dev->irq);
468 disable_parport_interrupts (dev);
469 netif_stop_queue (dev);
470 nl->connection = PLIP_CN_ERROR;
471 write_data (dev, 0x00);
477 plip_none(struct net_device *dev, struct net_local *nl,
478 struct plip_local *snd, struct plip_local *rcv)
483 /* PLIP_RECEIVE --- receive a byte(two nibbles)
484 Returns OK on success, TIMEOUT on timeout */
486 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
487 enum plip_nibble_state *ns_p, unsigned char *data_p)
489 unsigned char c0, c1;
496 c0 = read_status(dev);
497 udelay(PLIP_DELAY_UNIT);
498 if ((c0 & 0x80) == 0) {
499 c1 = read_status(dev);
506 *data_p = (c0 >> 3) & 0x0f;
507 write_data (dev, 0x10); /* send ACK */
513 c0 = read_status(dev);
514 udelay(PLIP_DELAY_UNIT);
516 c1 = read_status(dev);
523 *data_p |= (c0 << 1) & 0xf0;
524 write_data (dev, 0x00); /* send ACK */
525 *ns_p = PLIP_NB_BEGIN;
533 * Determine the packet's protocol ID. The rule here is that we
534 * assume 802.3 if the type field is short enough to be a length.
535 * This is normal practice and works for any 'now in use' protocol.
537 * PLIP is ethernet ish but the daddr might not be valid if unicast.
538 * PLIP fortunately has no bus architecture (its Point-to-point).
540 * We can't fix the daddr thing as that quirk (more bug) is embedded
541 * in far too many old systems not all even running Linux.
544 static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
549 skb->mac.raw=skb->data;
550 skb_pull(skb,dev->hard_header_len);
555 if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
556 skb->pkt_type=PACKET_BROADCAST;
558 skb->pkt_type=PACKET_MULTICAST;
562 * This ALLMULTI check should be redundant by 1.4
563 * so don't forget to remove it.
566 if (ntohs(eth->h_proto) >= 1536)
572 * This is a magic hack to spot IPX packets. Older Novell breaks
573 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
574 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
575 * won't work for fault tolerant netware but does for the rest.
577 if (*(unsigned short *)rawp == 0xFFFF)
578 return htons(ETH_P_802_3);
583 return htons(ETH_P_802_2);
586 /* PLIP_RECEIVE_PACKET --- receive a packet */
588 plip_receive_packet(struct net_device *dev, struct net_local *nl,
589 struct plip_local *snd, struct plip_local *rcv)
591 unsigned short nibble_timeout = nl->nibble;
594 switch (rcv->state) {
595 case PLIP_PK_TRIGGER:
597 /* Don't need to synchronize irq, as we can safely ignore it */
598 disable_parport_interrupts (dev);
599 write_data (dev, 0x01); /* send ACK */
601 printk(KERN_DEBUG "%s: receive start\n", dev->name);
602 rcv->state = PLIP_PK_LENGTH_LSB;
603 rcv->nibble = PLIP_NB_BEGIN;
605 case PLIP_PK_LENGTH_LSB:
606 if (snd->state != PLIP_PK_DONE) {
607 if (plip_receive(nl->trigger, dev,
608 &rcv->nibble, &rcv->length.b.lsb)) {
609 /* collision, here dev->tbusy == 1 */
610 rcv->state = PLIP_PK_DONE;
612 nl->connection = PLIP_CN_SEND;
613 schedule_delayed_work(&nl->deferred, 1);
614 enable_parport_interrupts (dev);
619 if (plip_receive(nibble_timeout, dev,
620 &rcv->nibble, &rcv->length.b.lsb))
623 rcv->state = PLIP_PK_LENGTH_MSB;
625 case PLIP_PK_LENGTH_MSB:
626 if (plip_receive(nibble_timeout, dev,
627 &rcv->nibble, &rcv->length.b.msb))
629 if (rcv->length.h > dev->mtu + dev->hard_header_len
630 || rcv->length.h < 8) {
631 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
634 /* Malloc up new buffer. */
635 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
636 if (rcv->skb == NULL) {
637 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
640 skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
641 skb_put(rcv->skb,rcv->length.h);
643 rcv->state = PLIP_PK_DATA;
648 lbuf = rcv->skb->data;
650 if (plip_receive(nibble_timeout, dev,
651 &rcv->nibble, &lbuf[rcv->byte]))
653 while (++rcv->byte < rcv->length.h);
655 rcv->checksum += lbuf[--rcv->byte];
657 rcv->state = PLIP_PK_CHECKSUM;
659 case PLIP_PK_CHECKSUM:
660 if (plip_receive(nibble_timeout, dev,
661 &rcv->nibble, &rcv->data))
663 if (rcv->data != rcv->checksum) {
664 nl->enet_stats.rx_crc_errors++;
666 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
669 rcv->state = PLIP_PK_DONE;
672 /* Inform the upper layer for the arrival of a packet. */
673 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
675 dev->last_rx = jiffies;
676 nl->enet_stats.rx_bytes += rcv->length.h;
677 nl->enet_stats.rx_packets++;
680 printk(KERN_DEBUG "%s: receive end\n", dev->name);
682 /* Close the connection. */
683 write_data (dev, 0x00);
684 spin_lock_irq(&nl->lock);
685 if (snd->state != PLIP_PK_DONE) {
686 nl->connection = PLIP_CN_SEND;
687 spin_unlock_irq(&nl->lock);
688 schedule_work(&nl->immediate);
689 enable_parport_interrupts (dev);
693 nl->connection = PLIP_CN_NONE;
694 spin_unlock_irq(&nl->lock);
695 enable_parport_interrupts (dev);
703 /* PLIP_SEND --- send a byte (two nibbles)
704 Returns OK on success, TIMEOUT when timeout */
706 plip_send(unsigned short nibble_timeout, struct net_device *dev,
707 enum plip_nibble_state *ns_p, unsigned char data)
714 write_data (dev, data & 0x0f);
718 write_data (dev, 0x10 | (data & 0x0f));
721 c0 = read_status(dev);
722 if ((c0 & 0x80) == 0)
726 udelay(PLIP_DELAY_UNIT);
728 write_data (dev, 0x10 | (data >> 4));
732 write_data (dev, (data >> 4));
735 c0 = read_status(dev);
740 udelay(PLIP_DELAY_UNIT);
742 *ns_p = PLIP_NB_BEGIN;
748 /* PLIP_SEND_PACKET --- send a packet */
750 plip_send_packet(struct net_device *dev, struct net_local *nl,
751 struct plip_local *snd, struct plip_local *rcv)
753 unsigned short nibble_timeout = nl->nibble;
758 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
759 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
760 snd->state = PLIP_PK_DONE;
765 switch (snd->state) {
766 case PLIP_PK_TRIGGER:
767 if ((read_status(dev) & 0xf8) != 0x80)
770 /* Trigger remote rx interrupt. */
771 write_data (dev, 0x08);
774 udelay(PLIP_DELAY_UNIT);
775 spin_lock_irq(&nl->lock);
776 if (nl->connection == PLIP_CN_RECEIVE) {
777 spin_unlock_irq(&nl->lock);
779 nl->enet_stats.collisions++;
782 c0 = read_status(dev);
784 spin_unlock_irq(&nl->lock);
786 synchronize_irq(dev->irq);
787 if (nl->connection == PLIP_CN_RECEIVE) {
789 We don't need to enable irq,
790 as it is soon disabled. */
791 /* Yes, we do. New variant of
792 {enable,disable}_irq *counts*
795 nl->enet_stats.collisions++;
798 disable_parport_interrupts (dev);
800 printk(KERN_DEBUG "%s: send start\n", dev->name);
801 snd->state = PLIP_PK_LENGTH_LSB;
802 snd->nibble = PLIP_NB_BEGIN;
803 nl->timeout_count = 0;
806 spin_unlock_irq(&nl->lock);
808 write_data (dev, 0x00);
813 case PLIP_PK_LENGTH_LSB:
814 if (plip_send(nibble_timeout, dev,
815 &snd->nibble, snd->length.b.lsb))
817 snd->state = PLIP_PK_LENGTH_MSB;
819 case PLIP_PK_LENGTH_MSB:
820 if (plip_send(nibble_timeout, dev,
821 &snd->nibble, snd->length.b.msb))
823 snd->state = PLIP_PK_DATA;
829 if (plip_send(nibble_timeout, dev,
830 &snd->nibble, lbuf[snd->byte]))
832 while (++snd->byte < snd->length.h);
834 snd->checksum += lbuf[--snd->byte];
836 snd->state = PLIP_PK_CHECKSUM;
838 case PLIP_PK_CHECKSUM:
839 if (plip_send(nibble_timeout, dev,
840 &snd->nibble, snd->checksum))
843 nl->enet_stats.tx_bytes += snd->skb->len;
844 dev_kfree_skb(snd->skb);
845 nl->enet_stats.tx_packets++;
846 snd->state = PLIP_PK_DONE;
849 /* Close the connection */
850 write_data (dev, 0x00);
853 printk(KERN_DEBUG "%s: send end\n", dev->name);
854 nl->connection = PLIP_CN_CLOSING;
856 schedule_delayed_work(&nl->deferred, 1);
857 enable_parport_interrupts (dev);
865 plip_connection_close(struct net_device *dev, struct net_local *nl,
866 struct plip_local *snd, struct plip_local *rcv)
868 spin_lock_irq(&nl->lock);
869 if (nl->connection == PLIP_CN_CLOSING) {
870 nl->connection = PLIP_CN_NONE;
871 netif_wake_queue (dev);
873 spin_unlock_irq(&nl->lock);
874 if (nl->should_relinquish) {
875 nl->should_relinquish = nl->port_owner = 0;
876 parport_release(nl->pardev);
881 /* PLIP_ERROR --- wait till other end settled */
883 plip_error(struct net_device *dev, struct net_local *nl,
884 struct plip_local *snd, struct plip_local *rcv)
886 unsigned char status;
888 status = read_status(dev);
889 if ((status & 0xf8) == 0x80) {
891 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
892 nl->connection = PLIP_CN_NONE;
893 nl->should_relinquish = 0;
894 netif_start_queue (dev);
895 enable_parport_interrupts (dev);
897 netif_wake_queue (dev);
900 schedule_delayed_work(&nl->deferred, 1);
906 /* Handle the parallel port interrupts. */
908 plip_interrupt(int irq, void *dev_id)
910 struct net_device *dev = dev_id;
911 struct net_local *nl;
912 struct plip_local *rcv;
915 nl = netdev_priv(dev);
918 spin_lock_irq (&nl->lock);
920 c0 = read_status(dev);
921 if ((c0 & 0xf8) != 0xc0) {
922 if ((dev->irq != -1) && (net_debug > 1))
923 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
924 spin_unlock_irq (&nl->lock);
929 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
931 switch (nl->connection) {
932 case PLIP_CN_CLOSING:
933 netif_wake_queue (dev);
936 rcv->state = PLIP_PK_TRIGGER;
937 nl->connection = PLIP_CN_RECEIVE;
938 nl->timeout_count = 0;
939 schedule_work(&nl->immediate);
942 case PLIP_CN_RECEIVE:
943 /* May occur because there is race condition
944 around test and set of dev->interrupt.
945 Ignore this interrupt. */
949 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
953 spin_unlock_irq(&nl->lock);
957 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
959 struct net_local *nl = netdev_priv(dev);
960 struct plip_local *snd = &nl->snd_data;
962 if (netif_queue_stopped(dev))
965 /* We may need to grab the bus */
966 if (!nl->port_owner) {
967 if (parport_claim(nl->pardev))
972 netif_stop_queue (dev);
974 if (skb->len > dev->mtu + dev->hard_header_len) {
975 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
976 netif_start_queue (dev);
981 printk(KERN_DEBUG "%s: send request\n", dev->name);
983 spin_lock_irq(&nl->lock);
984 dev->trans_start = jiffies;
986 snd->length.h = skb->len;
987 snd->state = PLIP_PK_TRIGGER;
988 if (nl->connection == PLIP_CN_NONE) {
989 nl->connection = PLIP_CN_SEND;
990 nl->timeout_count = 0;
992 schedule_work(&nl->immediate);
993 spin_unlock_irq(&nl->lock);
999 plip_rewrite_address(struct net_device *dev, struct ethhdr *eth)
1001 struct in_device *in_dev;
1003 if ((in_dev=dev->ip_ptr) != NULL) {
1004 /* Any address will do - we take the first */
1005 struct in_ifaddr *ifa=in_dev->ifa_list;
1007 memcpy(eth->h_source, dev->dev_addr, 6);
1008 memset(eth->h_dest, 0xfc, 2);
1009 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1015 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1016 unsigned short type, void *daddr,
1017 void *saddr, unsigned len)
1019 struct net_local *nl = netdev_priv(dev);
1022 if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0)
1023 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1028 int plip_hard_header_cache(struct neighbour *neigh,
1029 struct hh_cache *hh)
1031 struct net_local *nl = neigh->dev->priv;
1034 if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0)
1038 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1039 HH_DATA_OFF(sizeof(*eth)));
1040 plip_rewrite_address (neigh->dev, eth);
1046 /* Open/initialize the board. This is called (in the current kernel)
1047 sometime after booting when the 'ifconfig' program is run.
1049 This routine gets exclusive access to the parallel port by allocating
1053 plip_open(struct net_device *dev)
1055 struct net_local *nl = netdev_priv(dev);
1056 struct in_device *in_dev;
1059 if (!nl->port_owner) {
1060 if (parport_claim(nl->pardev)) return -EAGAIN;
1064 nl->should_relinquish = 0;
1066 /* Clear the data port. */
1067 write_data (dev, 0x00);
1069 /* Enable rx interrupt. */
1070 enable_parport_interrupts (dev);
1073 atomic_set (&nl->kill_timer, 0);
1074 schedule_delayed_work(&nl->timer, 1);
1077 /* Initialize the state machine. */
1078 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1079 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1080 nl->connection = PLIP_CN_NONE;
1081 nl->is_deferred = 0;
1083 /* Fill in the MAC-level header.
1084 We used to abuse dev->broadcast to store the point-to-point
1085 MAC address, but we no longer do it. Instead, we fetch the
1086 interface address whenever it is needed, which is cheap enough
1087 because we use the hh_cache. Actually, abusing dev->broadcast
1088 didn't work, because when using plip_open the point-to-point
1089 address isn't yet known.
1090 PLIP doesn't have a real MAC address, but we need it to be
1091 DOS compatible, and to properly support taps (otherwise,
1092 when the device address isn't identical to the address of a
1093 received frame, the kernel incorrectly drops it). */
1095 if ((in_dev=dev->ip_ptr) != NULL) {
1096 /* Any address will do - we take the first. We already
1097 have the first two bytes filled with 0xfc, from
1099 struct in_ifaddr *ifa=in_dev->ifa_list;
1101 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1105 netif_start_queue (dev);
1110 /* The inverse routine to plip_open (). */
1112 plip_close(struct net_device *dev)
1114 struct net_local *nl = netdev_priv(dev);
1115 struct plip_local *snd = &nl->snd_data;
1116 struct plip_local *rcv = &nl->rcv_data;
1118 netif_stop_queue (dev);
1120 synchronize_irq(dev->irq);
1124 init_MUTEX_LOCKED (&nl->killed_timer_sem);
1125 atomic_set (&nl->kill_timer, 1);
1126 down (&nl->killed_timer_sem);
1130 outb(0x00, PAR_DATA(dev));
1132 nl->is_deferred = 0;
1133 nl->connection = PLIP_CN_NONE;
1134 if (nl->port_owner) {
1135 parport_release(nl->pardev);
1139 snd->state = PLIP_PK_DONE;
1141 dev_kfree_skb(snd->skb);
1144 rcv->state = PLIP_PK_DONE;
1146 kfree_skb(rcv->skb);
1152 outb(0x00, PAR_CONTROL(dev));
1158 plip_preempt(void *handle)
1160 struct net_device *dev = (struct net_device *)handle;
1161 struct net_local *nl = netdev_priv(dev);
1163 /* Stand our ground if a datagram is on the wire */
1164 if (nl->connection != PLIP_CN_NONE) {
1165 nl->should_relinquish = 1;
1169 nl->port_owner = 0; /* Remember that we released the bus */
1174 plip_wakeup(void *handle)
1176 struct net_device *dev = (struct net_device *)handle;
1177 struct net_local *nl = netdev_priv(dev);
1179 if (nl->port_owner) {
1180 /* Why are we being woken up? */
1181 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1182 if (!parport_claim(nl->pardev))
1183 /* bus_owner is already set (but why?) */
1184 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1189 if (!(dev->flags & IFF_UP))
1190 /* Don't need the port when the interface is down */
1193 if (!parport_claim(nl->pardev)) {
1195 /* Clear the data port. */
1196 write_data (dev, 0x00);
1202 static struct net_device_stats *
1203 plip_get_stats(struct net_device *dev)
1205 struct net_local *nl = netdev_priv(dev);
1206 struct net_device_stats *r = &nl->enet_stats;
1212 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1214 struct net_local *nl = netdev_priv(dev);
1215 struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1217 if (cmd != SIOCDEVPLIP)
1221 case PLIP_GET_TIMEOUT:
1222 pc->trigger = nl->trigger;
1223 pc->nibble = nl->nibble;
1225 case PLIP_SET_TIMEOUT:
1226 if(!capable(CAP_NET_ADMIN))
1228 nl->trigger = pc->trigger;
1229 nl->nibble = pc->nibble;
1237 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1240 module_param_array(parport, int, NULL, 0);
1241 module_param(timid, int, 0);
1242 MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1244 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1247 plip_searchfor(int list[], int a)
1250 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1251 if (list[i] == a) return 1;
1256 /* plip_attach() is called (by the parport code) when a port is
1257 * available to use. */
1258 static void plip_attach (struct parport *port)
1261 struct net_device *dev;
1262 struct net_local *nl;
1263 char name[IFNAMSIZ];
1265 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1266 plip_searchfor(parport, port->number)) {
1267 if (unit == PLIP_MAX) {
1268 printk(KERN_ERR "plip: too many devices\n");
1272 sprintf(name, "plip%d", unit);
1273 dev = alloc_etherdev(sizeof(struct net_local));
1275 printk(KERN_ERR "plip: memory squeeze\n");
1279 strcpy(dev->name, name);
1281 SET_MODULE_OWNER(dev);
1282 dev->irq = port->irq;
1283 dev->base_addr = port->base;
1284 if (port->irq == -1) {
1285 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1286 "which is fairly inefficient!\n", port->name);
1289 nl = netdev_priv(dev);
1291 nl->pardev = parport_register_device(port, name, plip_preempt,
1292 plip_wakeup, plip_interrupt,
1296 printk(KERN_ERR "%s: parport_register failed\n", name);
1301 plip_init_netdev(dev);
1303 if (register_netdev(dev)) {
1304 printk(KERN_ERR "%s: network register failed\n", name);
1305 goto err_parport_unregister;
1308 printk(KERN_INFO "%s", version);
1310 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1312 dev->name, dev->base_addr, dev->irq);
1314 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1316 dev->name, dev->base_addr);
1317 dev_plip[unit++] = dev;
1321 err_parport_unregister:
1322 parport_unregister_device(nl->pardev);
1328 /* plip_detach() is called (by the parport code) when a port is
1329 * no longer available to use. */
1330 static void plip_detach (struct parport *port)
1335 static struct parport_driver plip_driver = {
1337 .attach = plip_attach,
1338 .detach = plip_detach
1341 static void __exit plip_cleanup_module (void)
1343 struct net_device *dev;
1346 parport_unregister_driver (&plip_driver);
1348 for (i=0; i < PLIP_MAX; i++) {
1349 if ((dev = dev_plip[i])) {
1350 struct net_local *nl = netdev_priv(dev);
1351 unregister_netdev(dev);
1353 parport_release(nl->pardev);
1354 parport_unregister_device(nl->pardev);
1363 static int parport_ptr;
1365 static int __init plip_setup(char *str)
1369 str = get_options(str, ARRAY_SIZE(ints), ints);
1372 if (!strncmp(str, "parport", 7)) {
1373 int n = simple_strtoul(str+7, NULL, 10);
1374 if (parport_ptr < PLIP_MAX)
1375 parport[parport_ptr++] = n;
1377 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1379 } else if (!strcmp(str, "timid")) {
1382 if (ints[0] == 0 || ints[1] == 0) {
1383 /* disable driver on "plip=" or "plip=0" */
1386 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1393 __setup("plip=", plip_setup);
1395 #endif /* !MODULE */
1397 static int __init plip_init (void)
1399 if (parport[0] == -2)
1402 if (parport[0] != -1 && timid) {
1403 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1407 if (parport_register_driver (&plip_driver)) {
1408 printk (KERN_WARNING "plip: couldn't register driver\n");
1415 module_init(plip_init);
1416 module_exit(plip_cleanup_module);
1417 MODULE_LICENSE("GPL");
1421 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"