2 * Server-side socket management
4 * Copyright (C) 1999 Marcus Meissner, Ove Kåven
6 * FIXME: we use read|write access in all cases. Shouldn't we depend that
7 * on the access of the current handle?
18 #ifdef HAVE_SYS_ERRNO_H
19 # include <sys/errno.h>
22 #include <sys/types.h>
23 #ifdef HAVE_SYS_SOCKET_H
24 # include <sys/socket.h>
26 #include <sys/ioctl.h>
27 #ifdef HAVE_SYS_FILIO_H
28 # include <sys/filio.h>
43 struct object obj; /* object header */
44 unsigned int state; /* status bits */
45 unsigned int mask; /* event mask */
46 unsigned int hmask; /* held (blocked) events */
47 unsigned int pmask; /* pending events */
48 struct event *event; /* event object */
49 int errors[FD_MAX_EVENTS]; /* event errors */
52 static void sock_dump( struct object *obj, int verbose );
53 static int sock_signaled( struct object *obj, struct thread *thread );
54 static int sock_get_poll_events( struct object *obj );
55 static void sock_poll_event( struct object *obj, int event );
56 static int sock_get_fd( struct object *obj );
57 static void sock_destroy( struct object *obj );
58 static int sock_get_error( int err );
59 static void sock_set_error(void);
61 static const struct object_ops sock_ops =
63 sizeof(struct sock), /* size */
65 add_queue, /* add_queue */
66 remove_queue, /* remove_queue */
67 sock_signaled, /* signaled */
68 no_satisfied, /* satisfied */
69 sock_get_poll_events, /* get_poll_events */
70 sock_poll_event, /* poll_event */
71 sock_get_fd, /* get_fd */
73 no_get_file_info, /* get_file_info */
74 sock_destroy /* destroy */
77 static void sock_reselect( struct sock *sock )
79 int ev = sock_get_poll_events( &sock->obj );
83 fprintf(stderr,"sock_reselect(%d): new mask %x\n", sock->obj.fd, ev);
85 if (sock->obj.select == -1) {
86 /* previously unconnected socket, is this reselect supposed to connect it? */
87 if (!(sock->state & ~FD_WINE_NONBLOCKING)) return;
88 /* ok, it is, attach it to the wineserver's main poll loop */
89 add_select_user( &sock->obj );
91 /* update condition mask */
92 set_select_events( &sock->obj, ev );
94 /* check whether condition is satisfied already */
95 pfd.fd = sock->obj.fd;
100 sock_poll_event( &sock->obj, pfd.revents);
103 inline static int sock_error(int s)
105 unsigned int optval = 0, optlen;
107 optlen = sizeof(optval);
108 getsockopt(s, SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
109 return optval ? sock_get_error(optval) : 0;
112 static void sock_poll_event( struct object *obj, int event )
114 struct sock *sock = (struct sock *)obj;
116 assert( sock->obj.ops == &sock_ops );
118 fprintf(stderr, "socket %d select event: %x\n", sock->obj.fd, event);
119 if (sock->state & FD_CONNECT)
124 /* we got connected */
125 sock->state |= FD_WINE_CONNECTED|FD_READ|FD_WRITE;
126 sock->state &= ~FD_CONNECT;
127 sock->pmask |= FD_CONNECT;
128 sock->errors[FD_CONNECT_BIT] = 0;
130 fprintf(stderr, "socket %d connection success\n", sock->obj.fd);
132 else if (event & (POLLERR|POLLHUP))
134 /* we didn't get connected? */
135 sock->state &= ~FD_CONNECT;
136 sock->pmask |= FD_CONNECT;
137 sock->errors[FD_CONNECT_BIT] = sock_error( sock->obj.fd );
139 fprintf(stderr, "socket %d connection failure\n", sock->obj.fd);
142 if (sock->state & FD_WINE_LISTENING)
147 /* incoming connection */
148 sock->pmask |= FD_ACCEPT;
149 sock->errors[FD_ACCEPT_BIT] = 0;
150 sock->hmask |= FD_ACCEPT;
152 else if (event & (POLLERR|POLLHUP))
154 /* failed incoming connection? */
155 sock->pmask |= FD_ACCEPT;
156 sock->errors[FD_ACCEPT_BIT] = sock_error( sock->obj.fd );
157 sock->hmask |= FD_ACCEPT;
161 /* normal data flow */
166 /* Linux 2.4 doesn't report POLLHUP if only one side of the socket
167 * has been closed, so we need to check for it explicitly here */
168 if (!recv( sock->obj.fd, &dummy, 1, MSG_PEEK )) event = POLLHUP;
172 sock->pmask |= FD_READ;
173 sock->hmask |= FD_READ;
174 sock->errors[FD_READ_BIT] = 0;
176 fprintf(stderr, "socket %d is readable\n", sock->obj.fd );
181 sock->pmask |= FD_WRITE;
182 sock->hmask |= FD_WRITE;
183 sock->errors[FD_WRITE_BIT] = 0;
185 fprintf(stderr, "socket %d is writable\n", sock->obj.fd);
189 sock->pmask |= FD_OOB;
190 sock->hmask |= FD_OOB;
191 sock->errors[FD_OOB_BIT] = 0;
193 fprintf(stderr, "socket %d got OOB data\n", sock->obj.fd);
195 if (((event & POLLERR) || ((event & (POLLIN|POLLHUP)) == POLLHUP))
196 && (sock->state & (FD_READ|FD_WRITE))) {
198 sock->errors[FD_CLOSE_BIT] = sock_error( sock->obj.fd );
199 sock->state &= ~(FD_WINE_CONNECTED|FD_READ|FD_WRITE);
200 sock->pmask |= FD_CLOSE;
202 fprintf(stderr, "socket %d aborted by error %d\n",
203 sock->obj.fd, sock->errors[FD_CLOSE_BIT]);
207 if (event & (POLLERR|POLLHUP))
208 set_select_events( &sock->obj, -1 );
210 sock_reselect( sock );
211 /* wake up anyone waiting for whatever just happened */
212 emask = sock->pmask & sock->mask;
213 if (debug_level && emask)
214 fprintf(stderr, "socket %d pending events: %x\n", sock->obj.fd, emask);
215 if (emask && sock->event) {
216 if (debug_level) fprintf(stderr, "signalling event ptr %p\n", sock->event);
217 set_event(sock->event);
220 /* if anyone is stupid enough to wait on the socket object itself,
221 * maybe we should wake them up too, just in case? */
222 wake_up( &sock->obj, 0 );
225 static void sock_dump( struct object *obj, int verbose )
227 struct sock *sock = (struct sock *)obj;
228 assert( obj->ops == &sock_ops );
229 printf( "Socket fd=%d, state=%x, mask=%x, pending=%x, held=%x\n",
230 sock->obj.fd, sock->state,
231 sock->mask, sock->pmask, sock->hmask );
234 static int sock_signaled( struct object *obj, struct thread *thread )
236 struct sock *sock = (struct sock *)obj;
237 assert( obj->ops == &sock_ops );
239 return check_select_events( sock->obj.fd, sock_get_poll_events( &sock->obj ) );
242 static int sock_get_poll_events( struct object *obj )
244 struct sock *sock = (struct sock *)obj;
245 unsigned int mask = sock->mask & sock->state & ~sock->hmask;
248 assert( obj->ops == &sock_ops );
250 if (sock->state & FD_CONNECT)
251 /* connecting, wait for writable */
253 if (sock->state & FD_WINE_LISTENING)
254 /* listening, wait for readable */
255 return (sock->hmask & FD_ACCEPT) ? 0 : POLLIN;
257 if (mask & FD_READ) ev |= POLLIN | POLLPRI;
258 if (mask & FD_WRITE) ev |= POLLOUT;
262 static int sock_get_fd( struct object *obj )
264 struct sock *sock = (struct sock *)obj;
265 assert( obj->ops == &sock_ops );
269 static void sock_destroy( struct object *obj )
271 struct sock *sock = (struct sock *)obj;
272 assert( obj->ops == &sock_ops );
274 /* FIXME: special socket shutdown stuff? */
277 /* if the service thread was waiting for the event object,
278 * we should now signal it, to let the service thread
279 * object detect that it is now orphaned... */
280 if (sock->mask & FD_WINE_SERVEVENT)
281 set_event( sock->event );
282 /* we're through with it */
283 release_object( sock->event );
287 /* create a new and unconnected socket */
288 static struct object *create_socket( int family, int type, int protocol )
293 sockfd = socket( family, type, protocol );
295 fprintf(stderr,"socket(%d,%d,%d)=%d\n",family,type,protocol,sockfd);
300 fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
301 if (!(sock = alloc_object( &sock_ops, -1 ))) return NULL;
302 sock->obj.fd = sockfd;
303 sock->state = (type != SOCK_STREAM) ? (FD_READ|FD_WRITE) : 0;
308 sock_reselect( sock );
313 /* accept a socket (creates a new fd) */
314 static struct object *accept_socket( handle_t handle )
316 struct sock *acceptsock;
319 struct sockaddr saddr;
322 sock=(struct sock*)get_handle_obj(current->process,handle,
323 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
326 /* Try to accept(2). We can't be safe that this an already connected socket
327 * or that accept() is allowed on it. In those cases we will get -1/errno
330 slen = sizeof(saddr);
331 acceptfd = accept(sock->obj.fd,&saddr,&slen);
334 release_object( sock );
337 if (!(acceptsock = alloc_object( &sock_ops, -1 )))
339 release_object( sock );
343 /* newly created socket gets the same properties of the listening socket */
344 fcntl(acceptfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
345 acceptsock->obj.fd = acceptfd;
346 acceptsock->state = FD_WINE_CONNECTED|FD_READ|FD_WRITE;
347 if (sock->state & FD_WINE_NONBLOCKING)
348 acceptsock->state |= FD_WINE_NONBLOCKING;
349 acceptsock->mask = sock->mask;
350 acceptsock->hmask = 0;
351 acceptsock->pmask = 0;
352 acceptsock->event = NULL;
353 if (sock->event && !(sock->mask & FD_WINE_SERVEVENT))
354 acceptsock->event = (struct event *)grab_object( sock->event );
356 sock_reselect( acceptsock );
358 sock->pmask &= ~FD_ACCEPT;
359 sock->hmask &= ~FD_ACCEPT;
360 sock_reselect( sock );
361 release_object( sock );
362 return &acceptsock->obj;
365 /* set the last error depending on errno */
366 static int sock_get_error( int err )
370 case EINTR: return WSAEINTR; break;
371 case EBADF: return WSAEBADF; break;
373 case EACCES: return WSAEACCES; break;
374 case EFAULT: return WSAEFAULT; break;
375 case EINVAL: return WSAEINVAL; break;
376 case EMFILE: return WSAEMFILE; break;
377 case EWOULDBLOCK: return WSAEWOULDBLOCK; break;
378 case EINPROGRESS: return WSAEINPROGRESS; break;
379 case EALREADY: return WSAEALREADY; break;
380 case ENOTSOCK: return WSAENOTSOCK; break;
381 case EDESTADDRREQ: return WSAEDESTADDRREQ; break;
382 case EMSGSIZE: return WSAEMSGSIZE; break;
383 case EPROTOTYPE: return WSAEPROTOTYPE; break;
384 case ENOPROTOOPT: return WSAENOPROTOOPT; break;
385 case EPROTONOSUPPORT: return WSAEPROTONOSUPPORT; break;
386 case ESOCKTNOSUPPORT: return WSAESOCKTNOSUPPORT; break;
387 case EOPNOTSUPP: return WSAEOPNOTSUPP; break;
388 case EPFNOSUPPORT: return WSAEPFNOSUPPORT; break;
389 case EAFNOSUPPORT: return WSAEAFNOSUPPORT; break;
390 case EADDRINUSE: return WSAEADDRINUSE; break;
391 case EADDRNOTAVAIL: return WSAEADDRNOTAVAIL; break;
392 case ENETDOWN: return WSAENETDOWN; break;
393 case ENETUNREACH: return WSAENETUNREACH; break;
394 case ENETRESET: return WSAENETRESET; break;
395 case ECONNABORTED: return WSAECONNABORTED; break;
397 case ECONNRESET: return WSAECONNRESET; break;
398 case ENOBUFS: return WSAENOBUFS; break;
399 case EISCONN: return WSAEISCONN; break;
400 case ENOTCONN: return WSAENOTCONN; break;
401 case ESHUTDOWN: return WSAESHUTDOWN; break;
402 case ETOOMANYREFS: return WSAETOOMANYREFS; break;
403 case ETIMEDOUT: return WSAETIMEDOUT; break;
404 case ECONNREFUSED: return WSAECONNREFUSED; break;
405 case ELOOP: return WSAELOOP; break;
406 case ENAMETOOLONG: return WSAENAMETOOLONG; break;
407 case EHOSTDOWN: return WSAEHOSTDOWN; break;
408 case EHOSTUNREACH: return WSAEHOSTUNREACH; break;
409 case ENOTEMPTY: return WSAENOTEMPTY; break;
411 case EPROCLIM: return WSAEPROCLIM; break;
414 case EUSERS: return WSAEUSERS; break;
417 case EDQUOT: return WSAEDQUOT; break;
420 case ESTALE: return WSAESTALE; break;
423 case EREMOTE: return WSAEREMOTE; break;
425 default: errno=err; perror("sock_set_error"); return ERROR_UNKNOWN; break;
429 /* set the last error depending on errno */
430 static void sock_set_error(void)
432 set_error( sock_get_error( errno ) );
435 /* create a socket */
436 DECL_HANDLER(create_socket)
441 if ((obj = create_socket( req->family, req->type, req->protocol )) != NULL)
443 req->handle = alloc_handle( current->process, obj, req->access, req->inherit );
444 release_object( obj );
448 /* accept a socket */
449 DECL_HANDLER(accept_socket)
454 if ((obj = accept_socket( req->lhandle )) != NULL)
456 req->handle = alloc_handle( current->process, obj, req->access, req->inherit );
457 release_object( obj );
461 /* set socket event parameters */
462 DECL_HANDLER(set_socket_event)
465 struct event *oevent;
468 sock=(struct sock*)get_handle_obj(current->process,req->handle,GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
471 oevent = sock->event;
473 sock->mask = req->mask;
474 sock->event = get_event_obj( current->process, req->event, EVENT_MODIFY_STATE );
475 if (debug_level && sock->event) fprintf(stderr, "event ptr: %p\n", sock->event);
476 sock_reselect( sock );
478 sock->state |= FD_WINE_NONBLOCKING;
480 /* if a network event is pending, signal the event object
481 it is possible that FD_CONNECT or FD_ACCEPT network events has happened
482 before a WSAEventSelect() was done on it.
483 (when dealing with Asynchronous socket) */
484 if (sock->pmask & sock->mask)
485 set_event(sock->event);
489 if ((oevent != sock->event) && (omask & FD_WINE_SERVEVENT))
490 /* if the service thread was waiting for the old event object,
491 * we should now signal it, to let the service thread
492 * object detect that it is now orphaned... */
494 /* we're through with it */
495 release_object( oevent );
497 release_object( &sock->obj );
500 /* get socket event parameters */
501 DECL_HANDLER(get_socket_event)
506 sock=(struct sock*)get_handle_obj(current->process,req->handle,GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
512 set_error(WSAENOTSOCK);
515 req->mask = sock->mask;
516 req->pmask = sock->pmask;
517 req->state = sock->state;
518 size = min( get_req_data_size(req), sizeof(sock->errors) );
519 memcpy( get_req_data(req), sock->errors, size );
520 set_req_data_size( req, size );
526 struct event *sevent = get_event_obj(current->process, req->s_event, 0);
527 if (sevent == sock->event)
529 release_object( sevent );
535 struct event *cevent = get_event_obj(current->process, req->c_event, EVENT_MODIFY_STATE);
536 reset_event( cevent );
537 release_object( cevent );
540 sock_reselect( sock );
542 else set_error(WSAEINVAL);
544 release_object( &sock->obj );
547 /* re-enable pending socket events */
548 DECL_HANDLER(enable_socket_event)
552 sock=(struct sock*)get_handle_obj(current->process,req->handle,GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
555 sock->pmask &= ~req->mask; /* is this safe? */
556 sock->hmask &= ~req->mask;
557 sock->state |= req->sstate;
558 sock->state &= ~req->cstate;
559 sock_reselect( sock );
561 /* service trigger */
562 if (req->mask & FD_WINE_SERVEVENT)
564 sock->pmask |= FD_WINE_SERVEVENT;
566 if (debug_level) fprintf(stderr, "signalling service event ptr %p\n", sock->event);
567 set_event(sock->event);
571 release_object( &sock->obj );