2 * u_serial.c - utilities for USB gadget "serial port"/TTY support
4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5 * Copyright (C) 2008 David Brownell
6 * Copyright (C) 2008 by Nokia Corporation
8 * This code also borrows from usbserial.c, which is
9 * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
10 * Copyright (C) 2000 Peter Berger (pberger@brimson.com)
11 * Copyright (C) 2000 Al Borchers (alborchers@steinerpoint.com)
13 * This software is distributed under the terms of the GNU General
14 * Public License ("GPL") as published by the Free Software Foundation,
15 * either version 2 of that License or (at your option) any later version.
18 /* #define VERBOSE_DEBUG */
20 #include <linux/kernel.h>
21 #include <linux/interrupt.h>
22 #include <linux/device.h>
23 #include <linux/delay.h>
24 #include <linux/tty.h>
25 #include <linux/tty_flip.h>
31 * This component encapsulates the TTY layer glue needed to provide basic
32 * "serial port" functionality through the USB gadget stack. Each such
33 * port is exposed through a /dev/ttyGS* node.
35 * After initialization (gserial_setup), these TTY port devices stay
36 * available until they are removed (gserial_cleanup). Each one may be
37 * connected to a USB function (gserial_connect), or disconnected (with
38 * gserial_disconnect) when the USB host issues a config change event.
39 * Data can only flow when the port is connected to the host.
41 * A given TTY port can be made available in multiple configurations.
42 * For example, each one might expose a ttyGS0 node which provides a
43 * login application. In one case that might use CDC ACM interface 0,
44 * while another configuration might use interface 3 for that. The
45 * work to handle that (including descriptor management) is not part
48 * Configurations may expose more than one TTY port. For example, if
49 * ttyGS0 provides login service, then ttyGS1 might provide dialer access
50 * for a telephone or fax link. And ttyGS2 might be something that just
51 * needs a simple byte stream interface for some messaging protocol that
52 * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
55 #define PREFIX "ttyGS"
58 * gserial is the lifecycle interface, used by USB functions
59 * gs_port is the I/O nexus, used by the tty driver
60 * tty_struct links to the tty/filesystem framework
62 * gserial <---> gs_port ... links will be null when the USB link is
63 * inactive; managed by gserial_{connect,disconnect}(). each gserial
64 * instance can wrap its own USB control protocol.
65 * gserial->ioport == usb_ep->driver_data ... gs_port
66 * gs_port->port_usb ... gserial
68 * gs_port <---> tty_struct ... links will be null when the TTY file
69 * isn't opened; managed by gs_open()/gs_close()
70 * gserial->port_tty ... tty_struct
71 * tty_struct->driver_data ... gserial
74 /* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
75 * next layer of buffering. For TX that's a circular buffer; for RX
76 * consider it a NOP. A third layer is provided by the TTY code.
79 #define WRITE_BUF_SIZE 8192 /* TX only */
90 * The port structure holds info for each port, one for each minor number
91 * (and thus for each /dev/ node).
94 spinlock_t port_lock; /* guard port_* access */
96 struct gserial *port_usb;
97 struct tty_struct *port_tty;
100 bool openclose; /* open/close in progress */
103 wait_queue_head_t close_wait; /* wait for last close */
105 struct list_head read_pool;
106 struct list_head read_queue;
108 struct tasklet_struct push;
110 struct list_head write_pool;
111 struct gs_buf port_write_buf;
112 wait_queue_head_t drain_wait; /* wait while writes drain */
114 /* REVISIT this state ... */
115 struct usb_cdc_line_coding port_line_coding; /* 8-N-1 etc */
118 /* increase N_PORTS if you need more */
120 static struct portmaster {
121 struct mutex lock; /* protect open/close */
122 struct gs_port *port;
124 static unsigned n_ports;
126 #define GS_CLOSE_TIMEOUT 15 /* seconds */
131 #define pr_vdebug(fmt, arg...) \
134 #define pr_vdebug(fmt, arg...) \
135 ({ if (0) pr_debug(fmt, ##arg); })
138 /*-------------------------------------------------------------------------*/
140 /* Circular Buffer */
145 * Allocate a circular buffer and all associated memory.
147 static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
149 gb->buf_buf = kmalloc(size, GFP_KERNEL);
150 if (gb->buf_buf == NULL)
154 gb->buf_put = gb->buf_buf;
155 gb->buf_get = gb->buf_buf;
163 * Free the buffer and all associated memory.
165 static void gs_buf_free(struct gs_buf *gb)
174 * Clear out all data in the circular buffer.
176 static void gs_buf_clear(struct gs_buf *gb)
178 gb->buf_get = gb->buf_put;
179 /* equivalent to a get of all data available */
185 * Return the number of bytes of data written into the circular
188 static unsigned gs_buf_data_avail(struct gs_buf *gb)
190 return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
196 * Return the number of bytes of space available in the circular
199 static unsigned gs_buf_space_avail(struct gs_buf *gb)
201 return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
207 * Copy data data from a user buffer and put it into the circular buffer.
208 * Restrict to the amount of space available.
210 * Return the number of bytes copied.
213 gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
217 len = gs_buf_space_avail(gb);
224 len = gb->buf_buf + gb->buf_size - gb->buf_put;
226 memcpy(gb->buf_put, buf, len);
227 memcpy(gb->buf_buf, buf+len, count - len);
228 gb->buf_put = gb->buf_buf + count - len;
230 memcpy(gb->buf_put, buf, count);
232 gb->buf_put += count;
233 else /* count == len */
234 gb->buf_put = gb->buf_buf;
243 * Get data from the circular buffer and copy to the given buffer.
244 * Restrict to the amount of data available.
246 * Return the number of bytes copied.
249 gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
253 len = gs_buf_data_avail(gb);
260 len = gb->buf_buf + gb->buf_size - gb->buf_get;
262 memcpy(buf, gb->buf_get, len);
263 memcpy(buf+len, gb->buf_buf, count - len);
264 gb->buf_get = gb->buf_buf + count - len;
266 memcpy(buf, gb->buf_get, count);
268 gb->buf_get += count;
269 else /* count == len */
270 gb->buf_get = gb->buf_buf;
276 /*-------------------------------------------------------------------------*/
278 /* I/O glue between TTY (upper) and USB function (lower) driver layers */
283 * Allocate a usb_request and its buffer. Returns a pointer to the
284 * usb_request or NULL if there is an error.
287 gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
289 struct usb_request *req;
291 req = usb_ep_alloc_request(ep, kmalloc_flags);
295 req->buf = kmalloc(len, kmalloc_flags);
296 if (req->buf == NULL) {
297 usb_ep_free_request(ep, req);
308 * Free a usb_request and its buffer.
310 void gs_free_req(struct usb_ep *ep, struct usb_request *req)
313 usb_ep_free_request(ep, req);
319 * If there is data to send, a packet is built in the given
320 * buffer and the size is returned. If there is no data to
321 * send, 0 is returned.
323 * Called with port_lock held.
326 gs_send_packet(struct gs_port *port, char *packet, unsigned size)
330 len = gs_buf_data_avail(&port->port_write_buf);
334 size = gs_buf_get(&port->port_write_buf, packet, size);
341 * This function finds available write requests, calls
342 * gs_send_packet to fill these packets with data, and
343 * continues until either there are no more write requests
344 * available or no more data to send. This function is
345 * run whenever data arrives or write requests are available.
347 * Context: caller owns port_lock; port_usb is non-null.
349 static int gs_start_tx(struct gs_port *port)
351 __releases(&port->port_lock)
352 __acquires(&port->port_lock)
355 struct list_head *pool = &port->write_pool;
356 struct usb_ep *in = port->port_usb->in;
358 bool do_tty_wake = false;
360 while (!list_empty(pool)) {
361 struct usb_request *req;
364 req = list_entry(pool->next, struct usb_request, list);
365 len = gs_send_packet(port, req->buf, in->maxpacket);
367 wake_up_interruptible(&port->drain_wait);
373 list_del(&req->list);
375 pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
376 port->port_num, len, *((u8 *)req->buf),
377 *((u8 *)req->buf+1), *((u8 *)req->buf+2));
379 /* Drop lock while we call out of driver; completions
380 * could be issued while we do so. Disconnection may
381 * happen too; maybe immediately before we queue this!
383 * NOTE that we may keep sending data for a while after
384 * the TTY closed (dev->ioport->port_tty is NULL).
386 spin_unlock(&port->port_lock);
387 status = usb_ep_queue(in, req, GFP_ATOMIC);
388 spin_lock(&port->port_lock);
391 pr_debug("%s: %s %s err %d\n",
392 __func__, "queue", in->name, status);
393 list_add(&req->list, pool);
397 /* abort immediately after disconnect */
402 if (do_tty_wake && port->port_tty)
403 tty_wakeup(port->port_tty);
408 * Context: caller owns port_lock, and port_usb is set
410 static unsigned gs_start_rx(struct gs_port *port)
412 __releases(&port->port_lock)
413 __acquires(&port->port_lock)
416 struct list_head *pool = &port->read_pool;
417 struct usb_ep *out = port->port_usb->out;
418 unsigned started = 0;
420 while (!list_empty(pool)) {
421 struct usb_request *req;
423 struct tty_struct *tty;
425 /* no more rx if closed */
426 tty = port->port_tty;
430 req = list_entry(pool->next, struct usb_request, list);
431 list_del(&req->list);
432 req->length = out->maxpacket;
434 /* drop lock while we call out; the controller driver
435 * may need to call us back (e.g. for disconnect)
437 spin_unlock(&port->port_lock);
438 status = usb_ep_queue(out, req, GFP_ATOMIC);
439 spin_lock(&port->port_lock);
442 pr_debug("%s: %s %s err %d\n",
443 __func__, "queue", out->name, status);
444 list_add(&req->list, pool);
449 /* abort immediately after disconnect */
457 * RX tasklet takes data out of the RX queue and hands it up to the TTY
458 * layer until it refuses to take any more data (or is throttled back).
459 * Then it issues reads for any further data.
461 * If the RX queue becomes full enough that no usb_request is queued,
462 * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
463 * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
464 * can be buffered before the TTY layer's buffers (currently 64 KB).
466 static void gs_rx_push(unsigned long _port)
468 struct gs_port *port = (void *)_port;
469 struct tty_struct *tty;
470 struct list_head *queue = &port->read_queue;
471 bool disconnect = false;
472 bool do_push = false;
474 /* hand any queued data to the tty */
475 spin_lock_irq(&port->port_lock);
476 tty = port->port_tty;
477 while (!list_empty(queue)) {
478 struct usb_request *req;
480 req = list_first_entry(queue, struct usb_request, list);
482 /* discard data if tty was closed */
486 /* leave data queued if tty was rx throttled */
487 if (test_bit(TTY_THROTTLED, &tty->flags))
490 switch (req->status) {
493 pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
497 /* presumably a transient fault */
498 pr_warning(PREFIX "%d: unexpected RX status %d\n",
499 port->port_num, req->status);
502 /* normal completion */
506 /* push data to (open) tty */
508 char *packet = req->buf;
509 unsigned size = req->actual;
513 /* we may have pushed part of this packet already... */
520 count = tty_insert_flip_string(tty, packet, size);
524 /* stop pushing; TTY layer can't handle more */
525 port->n_read += count;
526 pr_vdebug(PREFIX "%d: rx block %d/%d\n",
534 list_move(&req->list, &port->read_pool);
537 /* Push from tty to ldisc; this is immediate with low_latency, and
538 * may trigger callbacks to this driver ... so drop the spinlock.
540 if (tty && do_push) {
541 spin_unlock_irq(&port->port_lock);
542 tty_flip_buffer_push(tty);
543 wake_up_interruptible(&tty->read_wait);
544 spin_lock_irq(&port->port_lock);
546 /* tty may have been closed */
547 tty = port->port_tty;
551 /* We want our data queue to become empty ASAP, keeping data
552 * in the tty and ldisc (not here). If we couldn't push any
553 * this time around, there may be trouble unless there's an
554 * implicit tty_unthrottle() call on its way...
556 * REVISIT we should probably add a timer to keep the tasklet
557 * from starving ... but it's not clear that case ever happens.
559 if (!list_empty(queue) && tty) {
560 if (!test_bit(TTY_THROTTLED, &tty->flags)) {
562 tasklet_schedule(&port->push);
564 pr_warning(PREFIX "%d: RX not scheduled?\n",
569 /* If we're still connected, refill the USB RX queue. */
570 if (!disconnect && port->port_usb)
573 spin_unlock_irq(&port->port_lock);
576 static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
578 struct gs_port *port = ep->driver_data;
580 /* Queue all received data until the tty layer is ready for it. */
581 spin_lock(&port->port_lock);
582 list_add_tail(&req->list, &port->read_queue);
583 tasklet_schedule(&port->push);
584 spin_unlock(&port->port_lock);
587 static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
589 struct gs_port *port = ep->driver_data;
591 spin_lock(&port->port_lock);
592 list_add(&req->list, &port->write_pool);
594 switch (req->status) {
596 /* presumably a transient fault */
597 pr_warning("%s: unexpected %s status %d\n",
598 __func__, ep->name, req->status);
601 /* normal completion */
607 pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
611 spin_unlock(&port->port_lock);
614 static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
616 struct usb_request *req;
618 while (!list_empty(head)) {
619 req = list_entry(head->next, struct usb_request, list);
620 list_del(&req->list);
621 gs_free_req(ep, req);
625 static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
626 void (*fn)(struct usb_ep *, struct usb_request *))
629 struct usb_request *req;
631 /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
632 * do quite that many this time, don't fail ... we just won't
633 * be as speedy as we might otherwise be.
635 for (i = 0; i < QUEUE_SIZE; i++) {
636 req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
638 return list_empty(head) ? -ENOMEM : 0;
640 list_add_tail(&req->list, head);
646 * gs_start_io - start USB I/O streams
647 * @dev: encapsulates endpoints to use
648 * Context: holding port_lock; port_tty and port_usb are non-null
650 * We only start I/O when something is connected to both sides of
651 * this port. If nothing is listening on the host side, we may
652 * be pointlessly filling up our TX buffers and FIFO.
654 static int gs_start_io(struct gs_port *port)
656 struct list_head *head = &port->read_pool;
657 struct usb_ep *ep = port->port_usb->out;
661 /* Allocate RX and TX I/O buffers. We can't easily do this much
662 * earlier (with GFP_KERNEL) because the requests are coupled to
663 * endpoints, as are the packet sizes we'll be using. Different
664 * configurations may use different endpoints with a given port;
665 * and high speed vs full speed changes packet sizes too.
667 status = gs_alloc_requests(ep, head, gs_read_complete);
671 status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
674 gs_free_requests(ep, head);
678 /* queue read requests */
680 started = gs_start_rx(port);
682 /* unblock any pending writes into our circular buffer */
684 tty_wakeup(port->port_tty);
686 gs_free_requests(ep, head);
687 gs_free_requests(port->port_usb->in, &port->write_pool);
694 /*-------------------------------------------------------------------------*/
699 * gs_open sets up the link between a gs_port and its associated TTY.
700 * That link is broken *only* by TTY close(), and all driver methods
703 static int gs_open(struct tty_struct *tty, struct file *file)
705 int port_num = tty->index;
706 struct gs_port *port;
709 if (port_num < 0 || port_num >= n_ports)
713 mutex_lock(&ports[port_num].lock);
714 port = ports[port_num].port;
718 spin_lock_irq(&port->port_lock);
720 /* already open? Great. */
721 if (port->open_count) {
725 /* currently opening/closing? wait ... */
726 } else if (port->openclose) {
729 /* ... else we do the work */
732 port->openclose = true;
734 spin_unlock_irq(&port->port_lock);
736 mutex_unlock(&ports[port_num].lock);
743 /* must do the work */
746 /* wait for EAGAIN task to finish */
748 /* REVISIT could have a waitchannel here, if
749 * concurrent open performance is important
753 } while (status != -EAGAIN);
755 /* Do the "real open" */
756 spin_lock_irq(&port->port_lock);
758 /* allocate circular buffer on first open */
759 if (port->port_write_buf.buf_buf == NULL) {
761 spin_unlock_irq(&port->port_lock);
762 status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
763 spin_lock_irq(&port->port_lock);
766 pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
767 port->port_num, tty, file);
768 port->openclose = false;
769 goto exit_unlock_port;
773 /* REVISIT if REMOVED (ports[].port NULL), abort the open
774 * to let rmmod work faster (but this way isn't wrong).
777 /* REVISIT maybe wait for "carrier detect" */
779 tty->driver_data = port;
780 port->port_tty = tty;
782 port->open_count = 1;
783 port->openclose = false;
785 /* low_latency means ldiscs work in tasklet context, without
786 * needing a workqueue schedule ... easier to keep up.
788 tty->low_latency = 1;
790 /* if connected, start the I/O stream */
791 if (port->port_usb) {
792 struct gserial *gser = port->port_usb;
794 pr_debug("gs_open: start ttyGS%d\n", port->port_num);
801 pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
806 spin_unlock_irq(&port->port_lock);
810 static int gs_writes_finished(struct gs_port *p)
814 /* return true on disconnect or empty buffer */
815 spin_lock_irq(&p->port_lock);
816 cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
817 spin_unlock_irq(&p->port_lock);
822 static void gs_close(struct tty_struct *tty, struct file *file)
824 struct gs_port *port = tty->driver_data;
825 struct gserial *gser;
827 spin_lock_irq(&port->port_lock);
829 if (port->open_count != 1) {
830 if (port->open_count == 0)
837 pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
839 /* mark port as closing but in use; we can drop port lock
840 * and sleep if necessary
842 port->openclose = true;
843 port->open_count = 0;
845 gser = port->port_usb;
846 if (gser && gser->disconnect)
847 gser->disconnect(gser);
849 /* wait for circular write buffer to drain, disconnect, or at
850 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
852 if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
853 spin_unlock_irq(&port->port_lock);
854 wait_event_interruptible_timeout(port->drain_wait,
855 gs_writes_finished(port),
856 GS_CLOSE_TIMEOUT * HZ);
857 spin_lock_irq(&port->port_lock);
858 gser = port->port_usb;
861 /* Iff we're disconnected, there can be no I/O in flight so it's
862 * ok to free the circular buffer; else just scrub it. And don't
863 * let the push tasklet fire again until we're re-opened.
866 gs_buf_free(&port->port_write_buf);
868 gs_buf_clear(&port->port_write_buf);
870 tty->driver_data = NULL;
871 port->port_tty = NULL;
873 port->openclose = false;
875 pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
876 port->port_num, tty, file);
878 wake_up_interruptible(&port->close_wait);
880 spin_unlock_irq(&port->port_lock);
883 static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
885 struct gs_port *port = tty->driver_data;
889 pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
890 port->port_num, tty, count);
892 spin_lock_irqsave(&port->port_lock, flags);
894 count = gs_buf_put(&port->port_write_buf, buf, count);
895 /* treat count == 0 as flush_chars() */
897 status = gs_start_tx(port);
898 spin_unlock_irqrestore(&port->port_lock, flags);
903 static int gs_put_char(struct tty_struct *tty, unsigned char ch)
905 struct gs_port *port = tty->driver_data;
909 pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
910 port->port_num, tty, ch, __builtin_return_address(0));
912 spin_lock_irqsave(&port->port_lock, flags);
913 status = gs_buf_put(&port->port_write_buf, &ch, 1);
914 spin_unlock_irqrestore(&port->port_lock, flags);
919 static void gs_flush_chars(struct tty_struct *tty)
921 struct gs_port *port = tty->driver_data;
924 pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
926 spin_lock_irqsave(&port->port_lock, flags);
929 spin_unlock_irqrestore(&port->port_lock, flags);
932 static int gs_write_room(struct tty_struct *tty)
934 struct gs_port *port = tty->driver_data;
938 spin_lock_irqsave(&port->port_lock, flags);
940 room = gs_buf_space_avail(&port->port_write_buf);
941 spin_unlock_irqrestore(&port->port_lock, flags);
943 pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
944 port->port_num, tty, room);
949 static int gs_chars_in_buffer(struct tty_struct *tty)
951 struct gs_port *port = tty->driver_data;
955 spin_lock_irqsave(&port->port_lock, flags);
956 chars = gs_buf_data_avail(&port->port_write_buf);
957 spin_unlock_irqrestore(&port->port_lock, flags);
959 pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
960 port->port_num, tty, chars);
965 /* undo side effects of setting TTY_THROTTLED */
966 static void gs_unthrottle(struct tty_struct *tty)
968 struct gs_port *port = tty->driver_data;
971 spin_lock_irqsave(&port->port_lock, flags);
972 if (port->port_usb) {
973 /* Kickstart read queue processing. We don't do xon/xoff,
974 * rts/cts, or other handshaking with the host, but if the
975 * read queue backs up enough we'll be NAKing OUT packets.
977 tasklet_schedule(&port->push);
978 pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
980 spin_unlock_irqrestore(&port->port_lock, flags);
983 static int gs_break_ctl(struct tty_struct *tty, int duration)
985 struct gs_port *port = tty->driver_data;
987 struct gserial *gser;
989 pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
990 port->port_num, duration);
992 spin_lock_irq(&port->port_lock);
993 gser = port->port_usb;
994 if (gser && gser->send_break)
995 status = gser->send_break(gser, duration);
996 spin_unlock_irq(&port->port_lock);
1001 static const struct tty_operations gs_tty_ops = {
1005 .put_char = gs_put_char,
1006 .flush_chars = gs_flush_chars,
1007 .write_room = gs_write_room,
1008 .chars_in_buffer = gs_chars_in_buffer,
1009 .unthrottle = gs_unthrottle,
1010 .break_ctl = gs_break_ctl,
1013 /*-------------------------------------------------------------------------*/
1015 static struct tty_driver *gs_tty_driver;
1018 gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
1020 struct gs_port *port;
1022 port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
1026 spin_lock_init(&port->port_lock);
1027 init_waitqueue_head(&port->close_wait);
1028 init_waitqueue_head(&port->drain_wait);
1030 tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
1032 INIT_LIST_HEAD(&port->read_pool);
1033 INIT_LIST_HEAD(&port->read_queue);
1034 INIT_LIST_HEAD(&port->write_pool);
1036 port->port_num = port_num;
1037 port->port_line_coding = *coding;
1039 ports[port_num].port = port;
1045 * gserial_setup - initialize TTY driver for one or more ports
1046 * @g: gadget to associate with these ports
1047 * @count: how many ports to support
1048 * Context: may sleep
1050 * The TTY stack needs to know in advance how many devices it should
1051 * plan to manage. Use this call to set up the ports you will be
1052 * exporting through USB. Later, connect them to functions based
1053 * on what configuration is activated by the USB host; and disconnect
1054 * them as appropriate.
1056 * An example would be a two-configuration device in which both
1057 * configurations expose port 0, but through different functions.
1058 * One configuration could even expose port 1 while the other
1061 * Returns negative errno or zero.
1063 int __init gserial_setup(struct usb_gadget *g, unsigned count)
1066 struct usb_cdc_line_coding coding;
1069 if (count == 0 || count > N_PORTS)
1072 gs_tty_driver = alloc_tty_driver(count);
1076 gs_tty_driver->owner = THIS_MODULE;
1077 gs_tty_driver->driver_name = "g_serial";
1078 gs_tty_driver->name = PREFIX;
1079 /* uses dynamically assigned dev_t values */
1081 gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1082 gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1083 gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1084 gs_tty_driver->init_termios = tty_std_termios;
1086 /* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
1087 * MS-Windows. Otherwise, most of these flags shouldn't affect
1088 * anything unless we were to actually hook up to a serial line.
1090 gs_tty_driver->init_termios.c_cflag =
1091 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1092 gs_tty_driver->init_termios.c_ispeed = 9600;
1093 gs_tty_driver->init_termios.c_ospeed = 9600;
1095 coding.dwDTERate = __constant_cpu_to_le32(9600);
1096 coding.bCharFormat = 8;
1097 coding.bParityType = USB_CDC_NO_PARITY;
1098 coding.bDataBits = USB_CDC_1_STOP_BITS;
1100 tty_set_operations(gs_tty_driver, &gs_tty_ops);
1102 /* make devices be openable */
1103 for (i = 0; i < count; i++) {
1104 mutex_init(&ports[i].lock);
1105 status = gs_port_alloc(i, &coding);
1113 /* export the driver ... */
1114 status = tty_register_driver(gs_tty_driver);
1116 put_tty_driver(gs_tty_driver);
1117 pr_err("%s: cannot register, err %d\n",
1122 /* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
1123 for (i = 0; i < count; i++) {
1124 struct device *tty_dev;
1126 tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
1127 if (IS_ERR(tty_dev))
1128 pr_warning("%s: no classdev for port %d, err %ld\n",
1129 __func__, i, PTR_ERR(tty_dev));
1132 pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
1133 count, (count == 1) ? "" : "s");
1138 kfree(ports[count].port);
1139 put_tty_driver(gs_tty_driver);
1140 gs_tty_driver = NULL;
1144 static int gs_closed(struct gs_port *port)
1148 spin_lock_irq(&port->port_lock);
1149 cond = (port->open_count == 0) && !port->openclose;
1150 spin_unlock_irq(&port->port_lock);
1155 * gserial_cleanup - remove TTY-over-USB driver and devices
1156 * Context: may sleep
1158 * This is called to free all resources allocated by @gserial_setup().
1159 * Accordingly, it may need to wait until some open /dev/ files have
1162 * The caller must have issued @gserial_disconnect() for any ports
1163 * that had previously been connected, so that there is never any
1164 * I/O pending when it's called.
1166 void gserial_cleanup(void)
1169 struct gs_port *port;
1174 /* start sysfs and /dev/ttyGS* node removal */
1175 for (i = 0; i < n_ports; i++)
1176 tty_unregister_device(gs_tty_driver, i);
1178 for (i = 0; i < n_ports; i++) {
1179 /* prevent new opens */
1180 mutex_lock(&ports[i].lock);
1181 port = ports[i].port;
1182 ports[i].port = NULL;
1183 mutex_unlock(&ports[i].lock);
1185 tasklet_kill(&port->push);
1187 /* wait for old opens to finish */
1188 wait_event(port->close_wait, gs_closed(port));
1190 WARN_ON(port->port_usb != NULL);
1196 tty_unregister_driver(gs_tty_driver);
1197 gs_tty_driver = NULL;
1199 pr_debug("%s: cleaned up ttyGS* support\n", __func__);
1203 * gserial_connect - notify TTY I/O glue that USB link is active
1204 * @gser: the function, set up with endpoints and descriptors
1205 * @port_num: which port is active
1206 * Context: any (usually from irq)
1208 * This is called activate endpoints and let the TTY layer know that
1209 * the connection is active ... not unlike "carrier detect". It won't
1210 * necessarily start I/O queues; unless the TTY is held open by any
1211 * task, there would be no point. However, the endpoints will be
1212 * activated so the USB host can perform I/O, subject to basic USB
1213 * hardware flow control.
1215 * Caller needs to have set up the endpoints and USB function in @dev
1216 * before calling this, as well as the appropriate (speed-specific)
1217 * endpoint descriptors, and also have set up the TTY driver by calling
1220 * Returns negative errno or zero.
1221 * On success, ep->driver_data will be overwritten.
1223 int gserial_connect(struct gserial *gser, u8 port_num)
1225 struct gs_port *port;
1226 unsigned long flags;
1229 if (!gs_tty_driver || port_num >= n_ports)
1232 /* we "know" gserial_cleanup() hasn't been called */
1233 port = ports[port_num].port;
1235 /* activate the endpoints */
1236 status = usb_ep_enable(gser->in, gser->in_desc);
1239 gser->in->driver_data = port;
1241 status = usb_ep_enable(gser->out, gser->out_desc);
1244 gser->out->driver_data = port;
1246 /* then tell the tty glue that I/O can work */
1247 spin_lock_irqsave(&port->port_lock, flags);
1248 gser->ioport = port;
1249 port->port_usb = gser;
1251 /* REVISIT unclear how best to handle this state...
1252 * we don't really couple it with the Linux TTY.
1254 gser->port_line_coding = port->port_line_coding;
1256 /* REVISIT if waiting on "carrier detect", signal. */
1258 /* if it's already open, start I/O ... and notify the serial
1259 * protocol about open/close status (connect/disconnect).
1261 if (port->open_count) {
1262 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
1265 gser->connect(gser);
1267 if (gser->disconnect)
1268 gser->disconnect(gser);
1271 spin_unlock_irqrestore(&port->port_lock, flags);
1276 usb_ep_disable(gser->in);
1277 gser->in->driver_data = NULL;
1282 * gserial_disconnect - notify TTY I/O glue that USB link is inactive
1283 * @gser: the function, on which gserial_connect() was called
1284 * Context: any (usually from irq)
1286 * This is called to deactivate endpoints and let the TTY layer know
1287 * that the connection went inactive ... not unlike "hangup".
1289 * On return, the state is as if gserial_connect() had never been called;
1290 * there is no active USB I/O on these endpoints.
1292 void gserial_disconnect(struct gserial *gser)
1294 struct gs_port *port = gser->ioport;
1295 unsigned long flags;
1300 /* tell the TTY glue not to do I/O here any more */
1301 spin_lock_irqsave(&port->port_lock, flags);
1303 /* REVISIT as above: how best to track this? */
1304 port->port_line_coding = gser->port_line_coding;
1306 port->port_usb = NULL;
1307 gser->ioport = NULL;
1308 if (port->open_count > 0 || port->openclose) {
1309 wake_up_interruptible(&port->drain_wait);
1311 tty_hangup(port->port_tty);
1313 spin_unlock_irqrestore(&port->port_lock, flags);
1315 /* disable endpoints, aborting down any active I/O */
1316 usb_ep_disable(gser->out);
1317 gser->out->driver_data = NULL;
1319 usb_ep_disable(gser->in);
1320 gser->in->driver_data = NULL;
1322 /* finally, free any unused/unusable I/O buffers */
1323 spin_lock_irqsave(&port->port_lock, flags);
1324 if (port->open_count == 0 && !port->openclose)
1325 gs_buf_free(&port->port_write_buf);
1326 gs_free_requests(gser->out, &port->read_pool);
1327 gs_free_requests(gser->out, &port->read_queue);
1328 gs_free_requests(gser->in, &port->write_pool);
1329 spin_unlock_irqrestore(&port->port_lock, flags);