2 * linux/net/iucv/af_iucv.c
4 * IUCV protocol stack for Linux on zSeries
6 * Copyright 2006 IBM Corporation
8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
11 #define KMSG_COMPONENT "af_iucv"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/list.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/skbuff.h>
22 #include <linux/init.h>
23 #include <linux/poll.h>
25 #include <asm/ebcdic.h>
26 #include <asm/cpcmd.h>
27 #include <linux/kmod.h>
29 #include <net/iucv/iucv.h>
30 #include <net/iucv/af_iucv.h>
34 static char iucv_userid[80];
36 static struct proto_ops iucv_sock_ops;
38 static struct proto iucv_proto = {
41 .obj_size = sizeof(struct iucv_sock),
44 /* special AF_IUCV IPRM messages */
45 static const u8 iprm_shutdown[8] =
46 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
48 #define TRGCLS_SIZE (sizeof(((struct iucv_message *)0)->class))
50 /* macros to set/get socket control buffer at correct offset */
51 #define CB_TAG(skb) ((skb)->cb) /* iucv message tag */
52 #define CB_TAG_LEN (sizeof(((struct iucv_message *) 0)->tag))
53 #define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
54 #define CB_TRGCLS_LEN (TRGCLS_SIZE)
56 #define __iucv_sock_wait(sk, condition, timeo, ret) \
58 DEFINE_WAIT(__wait); \
59 long __timeo = timeo; \
61 while (!(condition)) { \
62 prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
67 if (signal_pending(current)) { \
68 ret = sock_intr_errno(__timeo); \
72 __timeo = schedule_timeout(__timeo); \
74 ret = sock_error(sk); \
78 finish_wait(sk->sk_sleep, &__wait); \
81 #define iucv_sock_wait(sk, condition, timeo) \
85 __iucv_sock_wait(sk, condition, timeo, __ret); \
89 static void iucv_sock_kill(struct sock *sk);
90 static void iucv_sock_close(struct sock *sk);
92 /* Call Back functions */
93 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
94 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
95 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
96 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
98 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
99 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
101 static struct iucv_sock_list iucv_sk_list = {
102 .lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
103 .autobind_name = ATOMIC_INIT(0)
106 static struct iucv_handler af_iucv_handler = {
107 .path_pending = iucv_callback_connreq,
108 .path_complete = iucv_callback_connack,
109 .path_severed = iucv_callback_connrej,
110 .message_pending = iucv_callback_rx,
111 .message_complete = iucv_callback_txdone,
112 .path_quiesced = iucv_callback_shutdown,
115 static inline void high_nmcpy(unsigned char *dst, char *src)
120 static inline void low_nmcpy(unsigned char *dst, char *src)
122 memcpy(&dst[8], src, 8);
126 * iucv_msg_length() - Returns the length of an iucv message.
127 * @msg: Pointer to struct iucv_message, MUST NOT be NULL
129 * The function returns the length of the specified iucv message @msg of data
130 * stored in a buffer and of data stored in the parameter list (PRMDATA).
132 * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
134 * PRMDATA[0..6] socket data (max 7 bytes);
135 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
137 * The socket data length is computed by substracting the socket data length
139 * If the socket data len is greater 7, then PRMDATA can be used for special
140 * notifications (see iucv_sock_shutdown); and further,
141 * if the socket data len is > 7, the function returns 8.
143 * Use this function to allocate socket buffers to store iucv message data.
145 static inline size_t iucv_msg_length(struct iucv_message *msg)
149 if (msg->flags & IUCV_IPRMDATA) {
150 datalen = 0xff - msg->rmmsg[7];
151 return (datalen < 8) ? datalen : 8;
157 * iucv_sock_in_state() - check for specific states
158 * @sk: sock structure
159 * @state: first iucv sk state
160 * @state: second iucv sk state
162 * Returns true if the socket in either in the first or second state.
164 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
166 return (sk->sk_state == state || sk->sk_state == state2);
170 * iucv_below_msglim() - function to check if messages can be sent
171 * @sk: sock structure
173 * Returns true if the send queue length is lower than the message limit.
174 * Always returns true if the socket is not connected (no iucv path for
175 * checking the message limit).
177 static inline int iucv_below_msglim(struct sock *sk)
179 struct iucv_sock *iucv = iucv_sk(sk);
181 if (sk->sk_state != IUCV_CONNECTED)
183 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
187 * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
189 static void iucv_sock_wake_msglim(struct sock *sk)
191 read_lock(&sk->sk_callback_lock);
192 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
193 wake_up_interruptible_all(sk->sk_sleep);
194 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
195 read_unlock(&sk->sk_callback_lock);
199 static void iucv_sock_timeout(unsigned long arg)
201 struct sock *sk = (struct sock *)arg;
204 sk->sk_err = ETIMEDOUT;
205 sk->sk_state_change(sk);
212 static void iucv_sock_clear_timer(struct sock *sk)
214 sk_stop_timer(sk, &sk->sk_timer);
217 static struct sock *__iucv_get_sock_by_name(char *nm)
220 struct hlist_node *node;
222 sk_for_each(sk, node, &iucv_sk_list.head)
223 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
229 static void iucv_sock_destruct(struct sock *sk)
231 skb_queue_purge(&sk->sk_receive_queue);
232 skb_queue_purge(&sk->sk_write_queue);
236 static void iucv_sock_cleanup_listen(struct sock *parent)
240 /* Close non-accepted connections */
241 while ((sk = iucv_accept_dequeue(parent, NULL))) {
246 parent->sk_state = IUCV_CLOSED;
247 sock_set_flag(parent, SOCK_ZAPPED);
251 static void iucv_sock_kill(struct sock *sk)
253 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
256 iucv_sock_unlink(&iucv_sk_list, sk);
257 sock_set_flag(sk, SOCK_DEAD);
261 /* Close an IUCV socket */
262 static void iucv_sock_close(struct sock *sk)
264 unsigned char user_data[16];
265 struct iucv_sock *iucv = iucv_sk(sk);
269 iucv_sock_clear_timer(sk);
272 switch (sk->sk_state) {
274 iucv_sock_cleanup_listen(sk);
281 sk->sk_state = IUCV_CLOSING;
282 sk->sk_state_change(sk);
284 if (!skb_queue_empty(&iucv->send_skb_q)) {
285 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
286 timeo = sk->sk_lingertime;
288 timeo = IUCV_DISCONN_TIMEOUT;
289 err = iucv_sock_wait(sk,
290 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
294 case IUCV_CLOSING: /* fall through */
295 sk->sk_state = IUCV_CLOSED;
296 sk->sk_state_change(sk);
299 low_nmcpy(user_data, iucv->src_name);
300 high_nmcpy(user_data, iucv->dst_name);
301 ASCEBC(user_data, sizeof(user_data));
302 err = iucv_path_sever(iucv->path, user_data);
303 iucv_path_free(iucv->path);
307 sk->sk_err = ECONNRESET;
308 sk->sk_state_change(sk);
310 skb_queue_purge(&iucv->send_skb_q);
311 skb_queue_purge(&iucv->backlog_skb_q);
313 sock_set_flag(sk, SOCK_ZAPPED);
317 sock_set_flag(sk, SOCK_ZAPPED);
325 static void iucv_sock_init(struct sock *sk, struct sock *parent)
328 sk->sk_type = parent->sk_type;
331 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
335 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
339 sock_init_data(sock, sk);
340 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
341 spin_lock_init(&iucv_sk(sk)->accept_q_lock);
342 skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
343 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list);
344 spin_lock_init(&iucv_sk(sk)->message_q.lock);
345 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
346 iucv_sk(sk)->send_tag = 0;
347 iucv_sk(sk)->flags = 0;
348 iucv_sk(sk)->msglimit = IUCV_QUEUELEN_DEFAULT;
349 iucv_sk(sk)->path = NULL;
350 memset(&iucv_sk(sk)->src_user_id , 0, 32);
352 sk->sk_destruct = iucv_sock_destruct;
353 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
354 sk->sk_allocation = GFP_DMA;
356 sock_reset_flag(sk, SOCK_ZAPPED);
358 sk->sk_protocol = proto;
359 sk->sk_state = IUCV_OPEN;
361 setup_timer(&sk->sk_timer, iucv_sock_timeout, (unsigned long)sk);
363 iucv_sock_link(&iucv_sk_list, sk);
367 /* Create an IUCV socket */
368 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol)
372 if (protocol && protocol != PF_IUCV)
373 return -EPROTONOSUPPORT;
375 sock->state = SS_UNCONNECTED;
377 switch (sock->type) {
379 sock->ops = &iucv_sock_ops;
382 /* currently, proto ops can handle both sk types */
383 sock->ops = &iucv_sock_ops;
386 return -ESOCKTNOSUPPORT;
389 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
393 iucv_sock_init(sk, NULL);
398 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
400 write_lock_bh(&l->lock);
401 sk_add_node(sk, &l->head);
402 write_unlock_bh(&l->lock);
405 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
407 write_lock_bh(&l->lock);
408 sk_del_node_init(sk);
409 write_unlock_bh(&l->lock);
412 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
415 struct iucv_sock *par = iucv_sk(parent);
418 spin_lock_irqsave(&par->accept_q_lock, flags);
419 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
420 spin_unlock_irqrestore(&par->accept_q_lock, flags);
421 iucv_sk(sk)->parent = parent;
422 parent->sk_ack_backlog++;
425 void iucv_accept_unlink(struct sock *sk)
428 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
430 spin_lock_irqsave(&par->accept_q_lock, flags);
431 list_del_init(&iucv_sk(sk)->accept_q);
432 spin_unlock_irqrestore(&par->accept_q_lock, flags);
433 iucv_sk(sk)->parent->sk_ack_backlog--;
434 iucv_sk(sk)->parent = NULL;
438 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
440 struct iucv_sock *isk, *n;
443 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
444 sk = (struct sock *) isk;
447 if (sk->sk_state == IUCV_CLOSED) {
448 iucv_accept_unlink(sk);
453 if (sk->sk_state == IUCV_CONNECTED ||
454 sk->sk_state == IUCV_SEVERED ||
456 iucv_accept_unlink(sk);
458 sock_graft(sk, newsock);
460 if (sk->sk_state == IUCV_SEVERED)
461 sk->sk_state = IUCV_DISCONN;
472 /* Bind an unbound socket */
473 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
476 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
477 struct sock *sk = sock->sk;
478 struct iucv_sock *iucv;
481 /* Verify the input sockaddr */
482 if (!addr || addr->sa_family != AF_IUCV)
486 if (sk->sk_state != IUCV_OPEN) {
491 write_lock_bh(&iucv_sk_list.lock);
494 if (__iucv_get_sock_by_name(sa->siucv_name)) {
503 /* Bind the socket */
504 memcpy(iucv->src_name, sa->siucv_name, 8);
506 /* Copy the user id */
507 memcpy(iucv->src_user_id, iucv_userid, 8);
508 sk->sk_state = IUCV_BOUND;
512 /* Release the socket list lock */
513 write_unlock_bh(&iucv_sk_list.lock);
519 /* Automatically bind an unbound socket */
520 static int iucv_sock_autobind(struct sock *sk)
522 struct iucv_sock *iucv = iucv_sk(sk);
523 char query_buffer[80];
527 /* Set the userid and name */
528 cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
532 memcpy(iucv->src_user_id, query_buffer, 8);
534 write_lock_bh(&iucv_sk_list.lock);
536 sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
537 while (__iucv_get_sock_by_name(name)) {
538 sprintf(name, "%08x",
539 atomic_inc_return(&iucv_sk_list.autobind_name));
542 write_unlock_bh(&iucv_sk_list.lock);
544 memcpy(&iucv->src_name, name, 8);
549 /* Connect an unconnected socket */
550 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
553 struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
554 struct sock *sk = sock->sk;
555 struct iucv_sock *iucv;
556 unsigned char user_data[16];
559 if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
562 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
565 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
568 if (sk->sk_state == IUCV_OPEN) {
569 err = iucv_sock_autobind(sk);
576 /* Set the destination information */
577 memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
578 memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
580 high_nmcpy(user_data, sa->siucv_name);
581 low_nmcpy(user_data, iucv_sk(sk)->src_name);
582 ASCEBC(user_data, sizeof(user_data));
586 iucv->path = iucv_path_alloc(iucv->msglimit,
587 IUCV_IPRMDATA, GFP_KERNEL);
592 err = iucv_path_connect(iucv->path, &af_iucv_handler,
593 sa->siucv_user_id, NULL, user_data, sk);
595 iucv_path_free(iucv->path);
598 case 0x0b: /* Target communicator is not logged on */
601 case 0x0d: /* Max connections for this guest exceeded */
602 case 0x0e: /* Max connections for target guest exceeded */
605 case 0x0f: /* Missing IUCV authorization */
615 if (sk->sk_state != IUCV_CONNECTED) {
616 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
618 sock_sndtimeo(sk, flags & O_NONBLOCK));
621 if (sk->sk_state == IUCV_DISCONN) {
626 iucv_path_sever(iucv->path, NULL);
627 iucv_path_free(iucv->path);
636 /* Move a socket into listening state. */
637 static int iucv_sock_listen(struct socket *sock, int backlog)
639 struct sock *sk = sock->sk;
645 if (sk->sk_state != IUCV_BOUND)
648 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
651 sk->sk_max_ack_backlog = backlog;
652 sk->sk_ack_backlog = 0;
653 sk->sk_state = IUCV_LISTEN;
661 /* Accept a pending connection */
662 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
665 DECLARE_WAITQUEUE(wait, current);
666 struct sock *sk = sock->sk, *nsk;
670 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
672 if (sk->sk_state != IUCV_LISTEN) {
677 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
679 /* Wait for an incoming connection */
680 add_wait_queue_exclusive(sk->sk_sleep, &wait);
681 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
682 set_current_state(TASK_INTERRUPTIBLE);
689 timeo = schedule_timeout(timeo);
690 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
692 if (sk->sk_state != IUCV_LISTEN) {
697 if (signal_pending(current)) {
698 err = sock_intr_errno(timeo);
703 set_current_state(TASK_RUNNING);
704 remove_wait_queue(sk->sk_sleep, &wait);
709 newsock->state = SS_CONNECTED;
716 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
719 struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
720 struct sock *sk = sock->sk;
722 addr->sa_family = AF_IUCV;
723 *len = sizeof(struct sockaddr_iucv);
726 memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
727 memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
729 memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
730 memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
732 memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
733 memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
734 memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
740 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
742 * @msg: Pointer to a struct iucv_message
743 * @skb: The socket data to send, skb->len MUST BE <= 7
745 * Send the socket data in the parameter list in the iucv message
746 * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
747 * list and the socket data len at index 7 (last byte).
748 * See also iucv_msg_length().
750 * Returns the error code from the iucv_message_send() call.
752 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
757 memcpy(prmdata, (void *) skb->data, skb->len);
758 prmdata[7] = 0xff - (u8) skb->len;
759 return iucv_message_send(path, msg, IUCV_IPRMDATA, 0,
760 (void *) prmdata, 8);
763 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
764 struct msghdr *msg, size_t len)
766 struct sock *sk = sock->sk;
767 struct iucv_sock *iucv = iucv_sk(sk);
769 struct iucv_message txmsg;
770 struct cmsghdr *cmsg;
776 int noblock = msg->msg_flags & MSG_DONTWAIT;
778 err = sock_error(sk);
782 if (msg->msg_flags & MSG_OOB)
785 /* SOCK_SEQPACKET: we do not support segmented records */
786 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
791 if (sk->sk_shutdown & SEND_SHUTDOWN) {
796 /* Return if the socket is not in connected state */
797 if (sk->sk_state != IUCV_CONNECTED) {
802 /* initialize defaults */
803 cmsg_done = 0; /* check for duplicate headers */
806 /* iterate over control messages */
807 for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
808 cmsg = CMSG_NXTHDR(msg, cmsg)) {
810 if (!CMSG_OK(msg, cmsg)) {
815 if (cmsg->cmsg_level != SOL_IUCV)
818 if (cmsg->cmsg_type & cmsg_done) {
822 cmsg_done |= cmsg->cmsg_type;
824 switch (cmsg->cmsg_type) {
825 case SCM_IUCV_TRGCLS:
826 if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
831 /* set iucv message target class */
833 (void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
844 /* allocate one skb for each iucv message:
845 * this is fine for SOCK_SEQPACKET (unless we want to support
846 * segmented records using the MSG_EOR flag), but
847 * for SOCK_STREAM we might want to improve it in future */
848 skb = sock_alloc_send_skb(sk, len, noblock, &err);
851 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
856 /* wait if outstanding messages for iucv path has reached */
857 timeo = sock_sndtimeo(sk, noblock);
858 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
862 /* return -ECONNRESET if the socket is no longer connected */
863 if (sk->sk_state != IUCV_CONNECTED) {
868 /* increment and save iucv message tag for msg_completion cbk */
869 txmsg.tag = iucv->send_tag++;
870 memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
871 skb_queue_tail(&iucv->send_skb_q, skb);
873 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
875 err = iucv_send_iprm(iucv->path, &txmsg, skb);
877 /* on success: there is no message_complete callback
878 * for an IPRMDATA msg; remove skb from send queue */
880 skb_unlink(skb, &iucv->send_skb_q);
884 /* this error should never happen since the
885 * IUCV_IPRMDATA path flag is set... sever path */
887 iucv_path_sever(iucv->path, NULL);
888 skb_unlink(skb, &iucv->send_skb_q);
893 err = iucv_message_send(iucv->path, &txmsg, 0, 0,
894 (void *) skb->data, skb->len);
898 memcpy(user_id, iucv->dst_user_id, 8);
900 memcpy(appl_id, iucv->dst_name, 8);
901 pr_err("Application %s on z/VM guest %s"
902 " exceeds message limit\n",
907 skb_unlink(skb, &iucv->send_skb_q);
921 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
923 int dataleft, size, copied = 0;
924 struct sk_buff *nskb;
928 if (dataleft >= sk->sk_rcvbuf / 4)
929 size = sk->sk_rcvbuf / 4;
933 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
937 /* copy target class to control buffer of new skb */
938 memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN);
940 /* copy data fragment */
941 memcpy(nskb->data, skb->data + copied, size);
945 skb_reset_transport_header(nskb);
946 skb_reset_network_header(nskb);
949 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
955 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
956 struct iucv_path *path,
957 struct iucv_message *msg)
962 len = iucv_msg_length(msg);
964 /* store msg target class in the second 4 bytes of skb ctrl buffer */
965 /* Note: the first 4 bytes are reserved for msg tag */
966 memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN);
968 /* check for special IPRM messages (e.g. iucv_sock_shutdown) */
969 if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
970 if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
975 rc = iucv_message_receive(path, msg, msg->flags & IUCV_IPRMDATA,
976 skb->data, len, NULL);
981 /* we need to fragment iucv messages for SOCK_STREAM only;
982 * for SOCK_SEQPACKET, it is only relevant if we support
983 * record segmentation using MSG_EOR (see also recvmsg()) */
984 if (sk->sk_type == SOCK_STREAM &&
985 skb->truesize >= sk->sk_rcvbuf / 4) {
986 rc = iucv_fragment_skb(sk, skb, len);
990 iucv_path_sever(path, NULL);
993 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
995 skb_reset_transport_header(skb);
996 skb_reset_network_header(skb);
1001 if (sock_queue_rcv_skb(sk, skb))
1002 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1005 static void iucv_process_message_q(struct sock *sk)
1007 struct iucv_sock *iucv = iucv_sk(sk);
1008 struct sk_buff *skb;
1009 struct sock_msg_q *p, *n;
1011 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1012 skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1015 iucv_process_message(sk, skb, p->path, &p->msg);
1018 if (!skb_queue_empty(&iucv->backlog_skb_q))
1023 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1024 struct msghdr *msg, size_t len, int flags)
1026 int noblock = flags & MSG_DONTWAIT;
1027 struct sock *sk = sock->sk;
1028 struct iucv_sock *iucv = iucv_sk(sk);
1029 unsigned int copied, rlen;
1030 struct sk_buff *skb, *rskb, *cskb;
1033 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) &&
1034 skb_queue_empty(&iucv->backlog_skb_q) &&
1035 skb_queue_empty(&sk->sk_receive_queue) &&
1036 list_empty(&iucv->message_q.list))
1039 if (flags & (MSG_OOB))
1042 /* receive/dequeue next skb:
1043 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1044 skb = skb_recv_datagram(sk, flags, noblock, &err);
1046 if (sk->sk_shutdown & RCV_SHUTDOWN)
1051 rlen = skb->len; /* real length of skb */
1052 copied = min_t(unsigned int, rlen, len);
1055 if (memcpy_toiovec(msg->msg_iov, cskb->data, copied)) {
1056 if (!(flags & MSG_PEEK))
1057 skb_queue_head(&sk->sk_receive_queue, skb);
1061 /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1062 if (sk->sk_type == SOCK_SEQPACKET) {
1064 msg->msg_flags |= MSG_TRUNC;
1065 /* each iucv message contains a complete record */
1066 msg->msg_flags |= MSG_EOR;
1069 /* create control message to store iucv msg target class:
1070 * get the trgcls from the control buffer of the skb due to
1071 * fragmentation of original iucv message. */
1072 err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1073 CB_TRGCLS_LEN, CB_TRGCLS(skb));
1075 if (!(flags & MSG_PEEK))
1076 skb_queue_head(&sk->sk_receive_queue, skb);
1080 /* Mark read part of skb as used */
1081 if (!(flags & MSG_PEEK)) {
1083 /* SOCK_STREAM: re-queue skb if it contains unreceived data */
1084 if (sk->sk_type == SOCK_STREAM) {
1085 skb_pull(skb, copied);
1087 skb_queue_head(&sk->sk_receive_queue, skb);
1094 /* Queue backlog skbs */
1095 rskb = skb_dequeue(&iucv->backlog_skb_q);
1097 if (sock_queue_rcv_skb(sk, rskb)) {
1098 skb_queue_head(&iucv->backlog_skb_q,
1102 rskb = skb_dequeue(&iucv->backlog_skb_q);
1105 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1106 spin_lock_bh(&iucv->message_q.lock);
1107 if (!list_empty(&iucv->message_q.list))
1108 iucv_process_message_q(sk);
1109 spin_unlock_bh(&iucv->message_q.lock);
1114 /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1115 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1121 static inline unsigned int iucv_accept_poll(struct sock *parent)
1123 struct iucv_sock *isk, *n;
1126 list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1127 sk = (struct sock *) isk;
1129 if (sk->sk_state == IUCV_CONNECTED)
1130 return POLLIN | POLLRDNORM;
1136 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1139 struct sock *sk = sock->sk;
1140 unsigned int mask = 0;
1142 poll_wait(file, sk->sk_sleep, wait);
1144 if (sk->sk_state == IUCV_LISTEN)
1145 return iucv_accept_poll(sk);
1147 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1150 if (sk->sk_shutdown & RCV_SHUTDOWN)
1153 if (sk->sk_shutdown == SHUTDOWN_MASK)
1156 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1157 (sk->sk_shutdown & RCV_SHUTDOWN))
1158 mask |= POLLIN | POLLRDNORM;
1160 if (sk->sk_state == IUCV_CLOSED)
1163 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED)
1166 if (sock_writeable(sk))
1167 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1169 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1174 static int iucv_sock_shutdown(struct socket *sock, int how)
1176 struct sock *sk = sock->sk;
1177 struct iucv_sock *iucv = iucv_sk(sk);
1178 struct iucv_message txmsg;
1183 if ((how & ~SHUTDOWN_MASK) || !how)
1187 switch (sk->sk_state) {
1196 sk->sk_shutdown |= how;
1200 if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1203 err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
1204 (void *) iprm_shutdown, 8);
1220 if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1221 err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
1225 skb_queue_purge(&sk->sk_receive_queue);
1228 /* Wake up anyone sleeping in poll */
1229 sk->sk_state_change(sk);
1236 static int iucv_sock_release(struct socket *sock)
1238 struct sock *sk = sock->sk;
1244 iucv_sock_close(sk);
1246 /* Unregister with IUCV base support */
1247 if (iucv_sk(sk)->path) {
1248 iucv_path_sever(iucv_sk(sk)->path, NULL);
1249 iucv_path_free(iucv_sk(sk)->path);
1250 iucv_sk(sk)->path = NULL;
1258 /* getsockopt and setsockopt */
1259 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1260 char __user *optval, int optlen)
1262 struct sock *sk = sock->sk;
1263 struct iucv_sock *iucv = iucv_sk(sk);
1267 if (level != SOL_IUCV)
1268 return -ENOPROTOOPT;
1270 if (optlen < sizeof(int))
1273 if (get_user(val, (int __user *) optval))
1280 case SO_IPRMDATA_MSG:
1282 iucv->flags |= IUCV_IPRMDATA;
1284 iucv->flags &= ~IUCV_IPRMDATA;
1287 switch (sk->sk_state) {
1290 if (val < 1 || val > (u16)(~0))
1293 iucv->msglimit = val;
1309 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1310 char __user *optval, int __user *optlen)
1312 struct sock *sk = sock->sk;
1313 struct iucv_sock *iucv = iucv_sk(sk);
1316 if (level != SOL_IUCV)
1317 return -ENOPROTOOPT;
1319 if (get_user(len, optlen))
1325 len = min_t(unsigned int, len, sizeof(int));
1328 case SO_IPRMDATA_MSG:
1329 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1333 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1334 : iucv->msglimit; /* default */
1338 return -ENOPROTOOPT;
1341 if (put_user(len, optlen))
1343 if (copy_to_user(optval, &val, len))
1350 /* Callback wrappers - called from iucv base support */
1351 static int iucv_callback_connreq(struct iucv_path *path,
1352 u8 ipvmid[8], u8 ipuser[16])
1354 unsigned char user_data[16];
1355 unsigned char nuser_data[16];
1356 unsigned char src_name[8];
1357 struct hlist_node *node;
1358 struct sock *sk, *nsk;
1359 struct iucv_sock *iucv, *niucv;
1362 memcpy(src_name, ipuser, 8);
1363 EBCASC(src_name, 8);
1364 /* Find out if this path belongs to af_iucv. */
1365 read_lock(&iucv_sk_list.lock);
1368 sk_for_each(sk, node, &iucv_sk_list.head)
1369 if (sk->sk_state == IUCV_LISTEN &&
1370 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1372 * Found a listening socket with
1373 * src_name == ipuser[0-7].
1378 read_unlock(&iucv_sk_list.lock);
1380 /* No socket found, not one of our paths. */
1385 /* Check if parent socket is listening */
1386 low_nmcpy(user_data, iucv->src_name);
1387 high_nmcpy(user_data, iucv->dst_name);
1388 ASCEBC(user_data, sizeof(user_data));
1389 if (sk->sk_state != IUCV_LISTEN) {
1390 err = iucv_path_sever(path, user_data);
1391 iucv_path_free(path);
1395 /* Check for backlog size */
1396 if (sk_acceptq_is_full(sk)) {
1397 err = iucv_path_sever(path, user_data);
1398 iucv_path_free(path);
1402 /* Create the new socket */
1403 nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1405 err = iucv_path_sever(path, user_data);
1406 iucv_path_free(path);
1410 niucv = iucv_sk(nsk);
1411 iucv_sock_init(nsk, sk);
1413 /* Set the new iucv_sock */
1414 memcpy(niucv->dst_name, ipuser + 8, 8);
1415 EBCASC(niucv->dst_name, 8);
1416 memcpy(niucv->dst_user_id, ipvmid, 8);
1417 memcpy(niucv->src_name, iucv->src_name, 8);
1418 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1421 /* Call iucv_accept */
1422 high_nmcpy(nuser_data, ipuser + 8);
1423 memcpy(nuser_data + 8, niucv->src_name, 8);
1424 ASCEBC(nuser_data + 8, 8);
1426 /* set message limit for path based on msglimit of accepting socket */
1427 niucv->msglimit = iucv->msglimit;
1428 path->msglim = iucv->msglimit;
1429 err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
1431 err = iucv_path_sever(path, user_data);
1432 iucv_path_free(path);
1433 iucv_sock_kill(nsk);
1437 iucv_accept_enqueue(sk, nsk);
1439 /* Wake up accept */
1440 nsk->sk_state = IUCV_CONNECTED;
1441 sk->sk_data_ready(sk, 1);
1448 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1450 struct sock *sk = path->private;
1452 sk->sk_state = IUCV_CONNECTED;
1453 sk->sk_state_change(sk);
1456 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1458 struct sock *sk = path->private;
1459 struct iucv_sock *iucv = iucv_sk(sk);
1460 struct sk_buff *skb;
1461 struct sock_msg_q *save_msg;
1464 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1465 iucv_message_reject(path, msg);
1469 spin_lock(&iucv->message_q.lock);
1471 if (!list_empty(&iucv->message_q.list) ||
1472 !skb_queue_empty(&iucv->backlog_skb_q))
1475 len = atomic_read(&sk->sk_rmem_alloc);
1476 len += iucv_msg_length(msg) + sizeof(struct sk_buff);
1477 if (len > sk->sk_rcvbuf)
1480 skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1484 iucv_process_message(sk, skb, path, msg);
1488 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1491 save_msg->path = path;
1492 save_msg->msg = *msg;
1494 list_add_tail(&save_msg->list, &iucv->message_q.list);
1497 spin_unlock(&iucv->message_q.lock);
1500 static void iucv_callback_txdone(struct iucv_path *path,
1501 struct iucv_message *msg)
1503 struct sock *sk = path->private;
1504 struct sk_buff *this = NULL;
1505 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1506 struct sk_buff *list_skb = list->next;
1507 unsigned long flags;
1509 if (!skb_queue_empty(list)) {
1510 spin_lock_irqsave(&list->lock, flags);
1512 while (list_skb != (struct sk_buff *)list) {
1513 if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) {
1517 list_skb = list_skb->next;
1520 __skb_unlink(this, list);
1522 spin_unlock_irqrestore(&list->lock, flags);
1526 /* wake up any process waiting for sending */
1527 iucv_sock_wake_msglim(sk);
1532 if (sk->sk_state == IUCV_CLOSING) {
1533 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1534 sk->sk_state = IUCV_CLOSED;
1535 sk->sk_state_change(sk);
1541 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1543 struct sock *sk = path->private;
1545 if (!list_empty(&iucv_sk(sk)->accept_q))
1546 sk->sk_state = IUCV_SEVERED;
1548 sk->sk_state = IUCV_DISCONN;
1550 sk->sk_state_change(sk);
1553 /* called if the other communication side shuts down its RECV direction;
1554 * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1556 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1558 struct sock *sk = path->private;
1561 if (sk->sk_state != IUCV_CLOSED) {
1562 sk->sk_shutdown |= SEND_SHUTDOWN;
1563 sk->sk_state_change(sk);
1568 static struct proto_ops iucv_sock_ops = {
1570 .owner = THIS_MODULE,
1571 .release = iucv_sock_release,
1572 .bind = iucv_sock_bind,
1573 .connect = iucv_sock_connect,
1574 .listen = iucv_sock_listen,
1575 .accept = iucv_sock_accept,
1576 .getname = iucv_sock_getname,
1577 .sendmsg = iucv_sock_sendmsg,
1578 .recvmsg = iucv_sock_recvmsg,
1579 .poll = iucv_sock_poll,
1580 .ioctl = sock_no_ioctl,
1581 .mmap = sock_no_mmap,
1582 .socketpair = sock_no_socketpair,
1583 .shutdown = iucv_sock_shutdown,
1584 .setsockopt = iucv_sock_setsockopt,
1585 .getsockopt = iucv_sock_getsockopt,
1588 static struct net_proto_family iucv_sock_family_ops = {
1590 .owner = THIS_MODULE,
1591 .create = iucv_sock_create,
1594 static int __init afiucv_init(void)
1598 if (!MACHINE_IS_VM) {
1599 pr_err("The af_iucv module cannot be loaded"
1601 err = -EPROTONOSUPPORT;
1604 cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
1605 if (unlikely(err)) {
1607 err = -EPROTONOSUPPORT;
1611 err = iucv_register(&af_iucv_handler, 0);
1614 err = proto_register(&iucv_proto, 0);
1617 err = sock_register(&iucv_sock_family_ops);
1623 proto_unregister(&iucv_proto);
1625 iucv_unregister(&af_iucv_handler, 0);
1630 static void __exit afiucv_exit(void)
1632 sock_unregister(PF_IUCV);
1633 proto_unregister(&iucv_proto);
1634 iucv_unregister(&af_iucv_handler, 0);
1637 module_init(afiucv_init);
1638 module_exit(afiucv_exit);
1640 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
1641 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
1642 MODULE_VERSION(VERSION);
1643 MODULE_LICENSE("GPL");
1644 MODULE_ALIAS_NETPROTO(PF_IUCV);