2 * Server-side socket management
4 * Copyright (C) 1999 Marcus Meissner, Ove Kåven
6 * FIXME: we use read|write access in all cases. Shouldn't we depend that
7 * on the access of the current handle?
18 #ifdef HAVE_SYS_ERRNO_H
19 # include <sys/errno.h>
22 #include <sys/types.h>
23 #ifdef HAVE_SYS_SOCKET_H
24 # include <sys/socket.h>
26 #include <sys/ioctl.h>
27 #ifdef HAVE_SYS_FILIO_H
28 # include <sys/filio.h>
41 /* To avoid conflicts with the Unix socket headers. Plus we only need a few
49 struct object obj; /* object header */
50 unsigned int state; /* status bits */
51 unsigned int mask; /* event mask */
52 unsigned int hmask; /* held (blocked) events */
53 unsigned int pmask; /* pending events */
54 unsigned int flags; /* socket flags */
55 struct event *event; /* event object */
56 int errors[FD_MAX_EVENTS]; /* event errors */
57 struct async_queue read_q; /* Queue for asynchronous reads */
58 struct async_queue write_q; /* Queue for asynchronous writes */
61 static void sock_dump( struct object *obj, int verbose );
62 static int sock_signaled( struct object *obj, struct thread *thread );
63 static int sock_get_poll_events( struct object *obj );
64 static void sock_poll_event( struct object *obj, int event );
65 static int sock_get_fd( struct object *obj );
66 static int sock_get_info( struct object *obj, struct get_file_info_reply *reply, int *flags );
67 static void sock_destroy( struct object *obj );
68 static int sock_get_error( int err );
69 static void sock_set_error(void);
71 static const struct object_ops sock_ops =
73 sizeof(struct sock), /* size */
75 add_queue, /* add_queue */
76 remove_queue, /* remove_queue */
77 sock_signaled, /* signaled */
78 no_satisfied, /* satisfied */
79 sock_get_poll_events, /* get_poll_events */
80 sock_poll_event, /* poll_event */
81 sock_get_fd, /* get_fd */
83 sock_get_info, /* get_file_info */
84 NULL, /* queue_async */
85 sock_destroy /* destroy */
88 static void sock_reselect( struct sock *sock )
90 int ev = sock_get_poll_events( &sock->obj );
94 fprintf(stderr,"sock_reselect(%d): new mask %x\n", sock->obj.fd, ev);
96 if (sock->obj.select == -1) {
97 /* previously unconnected socket, is this reselect supposed to connect it? */
98 if (!(sock->state & ~FD_WINE_NONBLOCKING)) return;
99 /* ok, it is, attach it to the wineserver's main poll loop */
100 add_select_user( &sock->obj );
102 /* update condition mask */
103 set_select_events( &sock->obj, ev );
105 /* check whether condition is satisfied already */
106 pfd.fd = sock->obj.fd;
111 sock_poll_event( &sock->obj, pfd.revents);
114 inline static int sock_error(int s)
116 unsigned int optval = 0, optlen;
118 optlen = sizeof(optval);
119 getsockopt(s, SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
120 return optval ? sock_get_error(optval) : 0;
123 static void sock_poll_event( struct object *obj, int event )
125 struct sock *sock = (struct sock *)obj;
127 assert( sock->obj.ops == &sock_ops );
129 fprintf(stderr, "socket %d select event: %x\n", sock->obj.fd, event);
130 if (sock->state & FD_CONNECT)
135 /* we got connected */
136 sock->state |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
137 sock->state &= ~FD_CONNECT;
138 sock->pmask |= FD_CONNECT;
139 sock->errors[FD_CONNECT_BIT] = 0;
141 fprintf(stderr, "socket %d connection success\n", sock->obj.fd);
143 else if (event & (POLLERR|POLLHUP))
145 /* we didn't get connected? */
146 sock->state &= ~FD_CONNECT;
147 sock->pmask |= FD_CONNECT;
148 sock->errors[FD_CONNECT_BIT] = sock_error( sock->obj.fd );
150 fprintf(stderr, "socket %d connection failure\n", sock->obj.fd);
153 if (sock->state & FD_WINE_LISTENING)
158 /* incoming connection */
159 sock->pmask |= FD_ACCEPT;
160 sock->errors[FD_ACCEPT_BIT] = 0;
161 sock->hmask |= FD_ACCEPT;
163 else if (event & (POLLERR|POLLHUP))
165 /* failed incoming connection? */
166 sock->pmask |= FD_ACCEPT;
167 sock->errors[FD_ACCEPT_BIT] = sock_error( sock->obj.fd );
168 sock->hmask |= FD_ACCEPT;
172 /* normal data flow */
177 /* Linux 2.4 doesn't report POLLHUP if only one side of the socket
178 * has been closed, so we need to check for it explicitly here */
179 if (!recv( sock->obj.fd, &dummy, 1, MSG_PEEK )) event = POLLHUP;
183 sock->pmask |= FD_READ;
184 sock->hmask |= FD_READ;
185 sock->errors[FD_READ_BIT] = 0;
187 fprintf(stderr, "socket %d is readable\n", sock->obj.fd );
192 sock->pmask |= FD_WRITE;
193 sock->hmask |= FD_WRITE;
194 sock->errors[FD_WRITE_BIT] = 0;
196 fprintf(stderr, "socket %d is writable\n", sock->obj.fd);
200 sock->pmask |= FD_OOB;
201 sock->hmask |= FD_OOB;
202 sock->errors[FD_OOB_BIT] = 0;
204 fprintf(stderr, "socket %d got OOB data\n", sock->obj.fd);
206 if (((event & POLLERR) || ((event & (POLLIN|POLLHUP)) == POLLHUP))
207 && (sock->state & (FD_READ|FD_WRITE))) {
209 sock->errors[FD_CLOSE_BIT] = sock_error( sock->obj.fd );
210 sock->state &= ~(FD_WINE_CONNECTED|FD_READ|FD_WRITE);
211 sock->pmask |= FD_CLOSE;
213 fprintf(stderr, "socket %d aborted by error %d\n",
214 sock->obj.fd, sock->errors[FD_CLOSE_BIT]);
218 if (event & (POLLERR|POLLHUP))
219 set_select_events( &sock->obj, -1 );
221 sock_reselect( sock );
222 /* wake up anyone waiting for whatever just happened */
223 emask = sock->pmask & sock->mask;
224 if (debug_level && emask)
225 fprintf(stderr, "socket %d pending events: %x\n", sock->obj.fd, emask);
226 if (emask && sock->event) {
227 if (debug_level) fprintf(stderr, "signalling event ptr %p\n", sock->event);
228 set_event(sock->event);
231 /* if anyone is stupid enough to wait on the socket object itself,
232 * maybe we should wake them up too, just in case? */
233 wake_up( &sock->obj, 0 );
236 static void sock_dump( struct object *obj, int verbose )
238 struct sock *sock = (struct sock *)obj;
239 assert( obj->ops == &sock_ops );
240 printf( "Socket fd=%d, state=%x, mask=%x, pending=%x, held=%x\n",
241 sock->obj.fd, sock->state,
242 sock->mask, sock->pmask, sock->hmask );
245 static int sock_signaled( struct object *obj, struct thread *thread )
247 struct sock *sock = (struct sock *)obj;
248 assert( obj->ops == &sock_ops );
250 return check_select_events( sock->obj.fd, sock_get_poll_events( &sock->obj ) );
253 static int sock_get_poll_events( struct object *obj )
255 struct sock *sock = (struct sock *)obj;
256 unsigned int mask = sock->mask & sock->state & ~sock->hmask;
259 assert( obj->ops == &sock_ops );
261 if (sock->state & FD_CONNECT)
262 /* connecting, wait for writable */
264 if (sock->state & FD_WINE_LISTENING)
265 /* listening, wait for readable */
266 return (sock->hmask & FD_ACCEPT) ? 0 : POLLIN;
268 if (mask & FD_READ) ev |= POLLIN | POLLPRI;
269 if (mask & FD_WRITE) ev |= POLLOUT;
273 static int sock_get_fd( struct object *obj )
275 struct sock *sock = (struct sock *)obj;
276 assert( obj->ops == &sock_ops );
280 static int sock_get_info( struct object *obj, struct get_file_info_reply *reply, int *flags )
282 struct sock *sock = (struct sock*) obj;
283 assert ( obj->ops == &sock_ops );
287 reply->type = FILE_TYPE_PIPE;
289 reply->access_time = 0;
290 reply->write_time = 0;
291 reply->size_high = 0;
294 reply->index_high = 0;
295 reply->index_low = 0;
299 if (sock->flags & WSA_FLAG_OVERLAPPED) *flags |= FD_FLAG_OVERLAPPED;
300 return FD_TYPE_DEFAULT;
303 static void sock_destroy( struct object *obj )
305 struct sock *sock = (struct sock *)obj;
306 assert( obj->ops == &sock_ops );
308 /* FIXME: special socket shutdown stuff? */
310 if ( sock->flags & WSA_FLAG_OVERLAPPED )
312 destroy_async_queue ( &sock->read_q );
313 destroy_async_queue ( &sock->write_q );
318 /* if the service thread was waiting for the event object,
319 * we should now signal it, to let the service thread
320 * object detect that it is now orphaned... */
321 if (sock->mask & FD_WINE_SERVEVENT)
322 set_event( sock->event );
323 /* we're through with it */
324 release_object( sock->event );
328 /* create a new and unconnected socket */
329 static struct object *create_socket( int family, int type, int protocol, unsigned int flags )
334 sockfd = socket( family, type, protocol );
336 fprintf(stderr,"socket(%d,%d,%d)=%d\n",family,type,protocol,sockfd);
341 fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
342 if (!(sock = alloc_object( &sock_ops, -1 ))) return NULL;
343 sock->obj.fd = sockfd;
344 sock->state = (type != SOCK_STREAM) ? (FD_READ|FD_WRITE) : 0;
350 sock_reselect( sock );
352 if (sock->flags & WSA_FLAG_OVERLAPPED)
354 init_async_queue (&sock->read_q);
355 init_async_queue (&sock->write_q);
360 /* accept a socket (creates a new fd) */
361 static struct object *accept_socket( handle_t handle )
363 struct sock *acceptsock;
366 struct sockaddr saddr;
369 sock=(struct sock*)get_handle_obj(current->process,handle,
370 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
373 /* Try to accept(2). We can't be safe that this an already connected socket
374 * or that accept() is allowed on it. In those cases we will get -1/errno
377 slen = sizeof(saddr);
378 acceptfd = accept(sock->obj.fd,&saddr,&slen);
381 release_object( sock );
384 if (!(acceptsock = alloc_object( &sock_ops, -1 )))
386 release_object( sock );
390 /* newly created socket gets the same properties of the listening socket */
391 fcntl(acceptfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
392 acceptsock->obj.fd = acceptfd;
393 acceptsock->state = FD_WINE_CONNECTED|FD_READ|FD_WRITE;
394 if (sock->state & FD_WINE_NONBLOCKING)
395 acceptsock->state |= FD_WINE_NONBLOCKING;
396 acceptsock->mask = sock->mask;
397 acceptsock->hmask = 0;
398 acceptsock->pmask = 0;
399 acceptsock->event = NULL;
400 if (sock->event && !(sock->mask & FD_WINE_SERVEVENT))
401 acceptsock->event = (struct event *)grab_object( sock->event );
402 acceptsock->flags = sock->flags;
403 if ( acceptsock->flags & WSA_FLAG_OVERLAPPED )
405 init_async_queue ( &acceptsock->read_q );
406 init_async_queue ( &acceptsock->write_q );
409 sock_reselect( acceptsock );
411 sock->pmask &= ~FD_ACCEPT;
412 sock->hmask &= ~FD_ACCEPT;
413 sock_reselect( sock );
414 release_object( sock );
415 return &acceptsock->obj;
418 /* set the last error depending on errno */
419 static int sock_get_error( int err )
423 case EINTR: return WSAEINTR; break;
424 case EBADF: return WSAEBADF; break;
426 case EACCES: return WSAEACCES; break;
427 case EFAULT: return WSAEFAULT; break;
428 case EINVAL: return WSAEINVAL; break;
429 case EMFILE: return WSAEMFILE; break;
430 case EWOULDBLOCK: return WSAEWOULDBLOCK; break;
431 case EINPROGRESS: return WSAEINPROGRESS; break;
432 case EALREADY: return WSAEALREADY; break;
433 case ENOTSOCK: return WSAENOTSOCK; break;
434 case EDESTADDRREQ: return WSAEDESTADDRREQ; break;
435 case EMSGSIZE: return WSAEMSGSIZE; break;
436 case EPROTOTYPE: return WSAEPROTOTYPE; break;
437 case ENOPROTOOPT: return WSAENOPROTOOPT; break;
438 case EPROTONOSUPPORT: return WSAEPROTONOSUPPORT; break;
439 case ESOCKTNOSUPPORT: return WSAESOCKTNOSUPPORT; break;
440 case EOPNOTSUPP: return WSAEOPNOTSUPP; break;
441 case EPFNOSUPPORT: return WSAEPFNOSUPPORT; break;
442 case EAFNOSUPPORT: return WSAEAFNOSUPPORT; break;
443 case EADDRINUSE: return WSAEADDRINUSE; break;
444 case EADDRNOTAVAIL: return WSAEADDRNOTAVAIL; break;
445 case ENETDOWN: return WSAENETDOWN; break;
446 case ENETUNREACH: return WSAENETUNREACH; break;
447 case ENETRESET: return WSAENETRESET; break;
448 case ECONNABORTED: return WSAECONNABORTED; break;
450 case ECONNRESET: return WSAECONNRESET; break;
451 case ENOBUFS: return WSAENOBUFS; break;
452 case EISCONN: return WSAEISCONN; break;
453 case ENOTCONN: return WSAENOTCONN; break;
454 case ESHUTDOWN: return WSAESHUTDOWN; break;
455 case ETOOMANYREFS: return WSAETOOMANYREFS; break;
456 case ETIMEDOUT: return WSAETIMEDOUT; break;
457 case ECONNREFUSED: return WSAECONNREFUSED; break;
458 case ELOOP: return WSAELOOP; break;
459 case ENAMETOOLONG: return WSAENAMETOOLONG; break;
460 case EHOSTDOWN: return WSAEHOSTDOWN; break;
461 case EHOSTUNREACH: return WSAEHOSTUNREACH; break;
462 case ENOTEMPTY: return WSAENOTEMPTY; break;
464 case EPROCLIM: return WSAEPROCLIM; break;
467 case EUSERS: return WSAEUSERS; break;
470 case EDQUOT: return WSAEDQUOT; break;
473 case ESTALE: return WSAESTALE; break;
476 case EREMOTE: return WSAEREMOTE; break;
478 default: errno=err; perror("sock_set_error"); return ERROR_UNKNOWN; break;
482 /* set the last error depending on errno */
483 static void sock_set_error(void)
485 set_error( sock_get_error( errno ) );
488 /* create a socket */
489 DECL_HANDLER(create_socket)
494 if ((obj = create_socket( req->family, req->type, req->protocol, req->flags )) != NULL)
496 reply->handle = alloc_handle( current->process, obj, req->access, req->inherit );
497 release_object( obj );
501 /* accept a socket */
502 DECL_HANDLER(accept_socket)
507 if ((obj = accept_socket( req->lhandle )) != NULL)
509 reply->handle = alloc_handle( current->process, obj, req->access, req->inherit );
510 release_object( obj );
514 /* set socket event parameters */
515 DECL_HANDLER(set_socket_event)
518 struct event *oevent;
521 sock=(struct sock*)get_handle_obj(current->process,req->handle,GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
524 oevent = sock->event;
526 sock->mask = req->mask;
527 sock->event = get_event_obj( current->process, req->event, EVENT_MODIFY_STATE );
528 if (debug_level && sock->event) fprintf(stderr, "event ptr: %p\n", sock->event);
529 sock_reselect( sock );
531 sock->state |= FD_WINE_NONBLOCKING;
533 /* if a network event is pending, signal the event object
534 it is possible that FD_CONNECT or FD_ACCEPT network events has happened
535 before a WSAEventSelect() was done on it.
536 (when dealing with Asynchronous socket) */
537 if (sock->pmask & sock->mask)
538 set_event(sock->event);
542 if ((oevent != sock->event) && (omask & FD_WINE_SERVEVENT))
543 /* if the service thread was waiting for the old event object,
544 * we should now signal it, to let the service thread
545 * object detect that it is now orphaned... */
547 /* we're through with it */
548 release_object( oevent );
550 release_object( &sock->obj );
553 /* get socket event parameters */
554 DECL_HANDLER(get_socket_event)
558 sock=(struct sock*)get_handle_obj(current->process,req->handle,GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
564 set_error( WSAENOTSOCK );
567 reply->mask = sock->mask;
568 reply->pmask = sock->pmask;
569 reply->state = sock->state;
570 set_reply_data( sock->errors, min( get_reply_max_size(), sizeof(sock->errors) ));
574 handle_t s_event = req->s_event;
577 struct event *sevent = get_event_obj(current->process, req->s_event, 0);
578 if (sevent == sock->event) s_event = 0;
579 release_object( sevent );
585 struct event *cevent = get_event_obj(current->process, req->c_event, EVENT_MODIFY_STATE);
586 reset_event( cevent );
587 release_object( cevent );
590 sock_reselect( sock );
592 else set_error(WSAEINVAL);
594 release_object( &sock->obj );
597 /* re-enable pending socket events */
598 DECL_HANDLER(enable_socket_event)
602 sock=(struct sock*)get_handle_obj(current->process,req->handle,GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
605 sock->pmask &= ~req->mask; /* is this safe? */
606 sock->hmask &= ~req->mask;
607 sock->state |= req->sstate;
608 sock->state &= ~req->cstate;
609 sock_reselect( sock );
611 /* service trigger */
612 if (req->mask & FD_WINE_SERVEVENT)
614 sock->pmask |= FD_WINE_SERVEVENT;
616 if (debug_level) fprintf(stderr, "signalling service event ptr %p\n", sock->event);
617 set_event(sock->event);
621 release_object( &sock->obj );