2 * Server-side socket management
4 * Copyright (C) 1999 Marcus Meissner, Ove Kåven
6 * FIXME: we use read|write access in all cases. Shouldn't we depend that
7 * on the access of the current handle?
18 #ifdef HAVE_SYS_ERRNO_H
19 # include <sys/errno.h>
22 #include <sys/types.h>
23 #ifdef HAVE_SYS_SOCKET_H
24 # include <sys/socket.h>
26 #include <sys/ioctl.h>
27 #ifdef HAVE_SYS_FILIO_H
28 # include <sys/filio.h>
43 struct object obj; /* object header */
44 unsigned int state; /* status bits */
45 unsigned int mask; /* event mask */
46 unsigned int hmask; /* held (blocked) events */
47 unsigned int pmask; /* pending events */
48 struct event *event; /* event object */
49 int errors[FD_MAX_EVENTS]; /* event errors */
52 static void sock_dump( struct object *obj, int verbose );
53 static int sock_signaled( struct object *obj, struct thread *thread );
54 static int sock_get_poll_events( struct object *obj );
55 static void sock_poll_event( struct object *obj, int event );
56 static int sock_get_fd( struct object *obj );
57 static void sock_destroy( struct object *obj );
58 static int sock_get_error( int err );
59 static void sock_set_error(void);
61 static const struct object_ops sock_ops =
63 sizeof(struct sock), /* size */
65 add_queue, /* add_queue */
66 remove_queue, /* remove_queue */
67 sock_signaled, /* signaled */
68 no_satisfied, /* satisfied */
69 sock_get_poll_events, /* get_poll_events */
70 sock_poll_event, /* poll_event */
71 sock_get_fd, /* get_fd */
73 no_get_file_info, /* get_file_info */
74 sock_destroy /* destroy */
77 static void sock_reselect( struct sock *sock )
79 int ev = sock_get_poll_events( &sock->obj );
83 fprintf(stderr,"sock_reselect(%d): new mask %x\n", sock->obj.fd, ev);
85 if (sock->obj.select == -1) {
86 /* previously unconnected socket, is this reselect supposed to connect it? */
87 if (!sock->state) return;
88 /* ok, it is, attach it to the wineserver's main poll loop */
89 add_select_user( &sock->obj );
91 /* update condition mask */
92 set_select_events( &sock->obj, ev );
94 /* check whether condition is satisfied already */
95 pfd.fd = sock->obj.fd;
100 sock_poll_event( &sock->obj, pfd.revents);
103 inline static int sock_error(int s)
105 unsigned int optval = 0, optlen;
107 optlen = sizeof(optval);
108 getsockopt(s, SOL_SOCKET, SO_ERROR, (void *) &optval, &optlen);
109 return optval ? sock_get_error(optval) : 0;
112 static void sock_poll_event( struct object *obj, int event )
114 struct sock *sock = (struct sock *)obj;
116 assert( sock->obj.ops == &sock_ops );
118 fprintf(stderr, "socket %d select event: %x\n", sock->obj.fd, event);
119 if (sock->state & WS_FD_CONNECT)
124 /* we got connected */
125 sock->state |= WS_FD_CONNECTED|WS_FD_READ|WS_FD_WRITE;
126 sock->state &= ~WS_FD_CONNECT;
127 sock->pmask |= FD_CONNECT;
128 sock->errors[FD_CONNECT_BIT] = 0;
130 fprintf(stderr, "socket %d connection success\n", sock->obj.fd);
132 else if (event & (POLLERR|POLLHUP))
134 /* we didn't get connected? */
135 sock->state &= ~WS_FD_CONNECT;
136 sock->pmask |= FD_CONNECT;
137 sock->errors[FD_CONNECT_BIT] = sock_error( sock->obj.fd );
139 fprintf(stderr, "socket %d connection failure\n", sock->obj.fd);
142 if (sock->state & WS_FD_LISTENING)
147 /* incoming connection */
148 sock->pmask |= FD_ACCEPT;
149 sock->errors[FD_ACCEPT_BIT] = 0;
150 sock->hmask |= FD_ACCEPT;
152 else if (event & (POLLERR|POLLHUP))
154 /* failed incoming connection? */
155 sock->pmask |= FD_ACCEPT;
156 sock->errors[FD_ACCEPT_BIT] = sock_error( sock->obj.fd );
157 sock->hmask |= FD_ACCEPT;
161 /* normal data flow */
165 sock->pmask |= FD_READ;
166 sock->hmask |= FD_READ;
167 sock->errors[FD_READ_BIT] = 0;
169 fprintf(stderr, "socket %d is readable\n", sock->obj.fd );
173 sock->pmask |= FD_WRITE;
174 sock->hmask |= FD_WRITE;
175 sock->errors[FD_WRITE_BIT] = 0;
177 fprintf(stderr, "socket %d is writable\n", sock->obj.fd);
181 sock->pmask |= FD_OOB;
182 sock->hmask |= FD_OOB;
183 sock->errors[FD_OOB_BIT] = 0;
185 fprintf(stderr, "socket %d got OOB data\n", sock->obj.fd);
187 if (((event & POLLERR) || ((event & (POLLIN|POLLHUP)) == POLLHUP))
188 && (sock->state & (WS_FD_READ|WS_FD_WRITE))) {
190 sock->errors[FD_CLOSE_BIT] = sock_error( sock->obj.fd );
191 sock->state &= ~(WS_FD_CONNECTED|WS_FD_READ|WS_FD_WRITE);
192 sock->pmask |= FD_CLOSE;
194 fprintf(stderr, "socket %d aborted by error %d\n",
195 sock->obj.fd, sock->errors[FD_CLOSE_BIT]);
199 if (event & (POLLERR|POLLHUP))
200 set_select_events( &sock->obj, -1 );
202 sock_reselect( sock );
203 /* wake up anyone waiting for whatever just happened */
204 emask = sock->pmask & sock->mask;
205 if (debug_level && emask)
206 fprintf(stderr, "socket %d pending events: %x\n", sock->obj.fd, emask);
207 if (emask && sock->event) {
208 if (debug_level) fprintf(stderr, "signalling event ptr %p\n", sock->event);
209 set_event(sock->event);
212 /* if anyone is stupid enough to wait on the socket object itself,
213 * maybe we should wake them up too, just in case? */
214 wake_up( &sock->obj, 0 );
217 static void sock_dump( struct object *obj, int verbose )
219 struct sock *sock = (struct sock *)obj;
220 assert( obj->ops == &sock_ops );
221 printf( "Socket fd=%d, state=%x, mask=%x, pending=%x, held=%x\n",
222 sock->obj.fd, sock->state,
223 sock->mask, sock->pmask, sock->hmask );
226 static int sock_signaled( struct object *obj, struct thread *thread )
228 struct sock *sock = (struct sock *)obj;
229 assert( obj->ops == &sock_ops );
231 return check_select_events( sock->obj.fd, sock_get_poll_events( &sock->obj ) );
234 static int sock_get_poll_events( struct object *obj )
236 struct sock *sock = (struct sock *)obj;
237 unsigned int mask = sock->mask & sock->state & ~sock->hmask;
240 assert( obj->ops == &sock_ops );
242 if (sock->state & WS_FD_CONNECT)
243 /* connecting, wait for writable */
245 if (sock->state & WS_FD_LISTENING)
246 /* listening, wait for readable */
247 return (sock->hmask & FD_ACCEPT) ? 0 : POLLIN;
249 if (mask & FD_READ) ev |= POLLIN | POLLPRI;
250 if (mask & FD_WRITE) ev |= POLLOUT;
254 static int sock_get_fd( struct object *obj )
256 struct sock *sock = (struct sock *)obj;
257 assert( obj->ops == &sock_ops );
261 static void sock_destroy( struct object *obj )
263 struct sock *sock = (struct sock *)obj;
264 assert( obj->ops == &sock_ops );
266 /* FIXME: special socket shutdown stuff? */
269 /* if the service thread was waiting for the event object,
270 * we should now signal it, to let the service thread
271 * object detect that it is now orphaned... */
272 if (sock->mask & WS_FD_SERVEVENT)
273 set_event( sock->event );
274 /* we're through with it */
275 release_object( sock->event );
279 /* create a new and unconnected socket */
280 static struct object *create_socket( int family, int type, int protocol )
285 sockfd = socket( family, type, protocol );
287 fprintf(stderr,"socket(%d,%d,%d)=%d\n",family,type,protocol,sockfd);
292 fcntl(sockfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
293 if (!(sock = alloc_object( &sock_ops, -1 ))) return NULL;
294 sock->obj.fd = sockfd;
295 sock->state = (type != SOCK_STREAM) ? (WS_FD_READ|WS_FD_WRITE) : 0;
300 sock_reselect( sock );
305 /* accept a socket (creates a new fd) */
306 static struct object *accept_socket( handle_t handle )
308 struct sock *acceptsock;
311 struct sockaddr saddr;
314 sock=(struct sock*)get_handle_obj(current->process,handle,
315 GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
318 /* Try to accept(2). We can't be safe that this an already connected socket
319 * or that accept() is allowed on it. In those cases we will get -1/errno
322 slen = sizeof(saddr);
323 acceptfd = accept(sock->obj.fd,&saddr,&slen);
326 release_object( sock );
329 if (!(acceptsock = alloc_object( &sock_ops, -1 )))
331 release_object( sock );
335 /* newly created socket gets the same properties of the listening socket */
336 fcntl(acceptfd, F_SETFL, O_NONBLOCK); /* make socket nonblocking */
337 acceptsock->obj.fd = acceptfd;
338 acceptsock->state = WS_FD_CONNECTED|WS_FD_READ|WS_FD_WRITE;
339 if (sock->state & WS_FD_NONBLOCKING)
340 acceptsock->state |= WS_FD_NONBLOCKING;
341 acceptsock->mask = sock->mask;
342 acceptsock->hmask = 0;
343 acceptsock->pmask = 0;
344 acceptsock->event = NULL;
345 if (sock->event && !(sock->mask & WS_FD_SERVEVENT))
346 acceptsock->event = (struct event *)grab_object( sock->event );
348 sock_reselect( acceptsock );
350 sock->pmask &= ~FD_ACCEPT;
351 sock->hmask &= ~FD_ACCEPT;
352 sock_reselect( sock );
353 release_object( sock );
354 return &acceptsock->obj;
357 /* set the last error depending on errno */
358 static int sock_get_error( int err )
362 case EINTR: return WSAEINTR; break;
363 case EBADF: return WSAEBADF; break;
365 case EACCES: return WSAEACCES; break;
366 case EFAULT: return WSAEFAULT; break;
367 case EINVAL: return WSAEINVAL; break;
368 case EMFILE: return WSAEMFILE; break;
369 case EWOULDBLOCK: return WSAEWOULDBLOCK; break;
370 case EINPROGRESS: return WSAEINPROGRESS; break;
371 case EALREADY: return WSAEALREADY; break;
372 case ENOTSOCK: return WSAENOTSOCK; break;
373 case EDESTADDRREQ: return WSAEDESTADDRREQ; break;
374 case EMSGSIZE: return WSAEMSGSIZE; break;
375 case EPROTOTYPE: return WSAEPROTOTYPE; break;
376 case ENOPROTOOPT: return WSAENOPROTOOPT; break;
377 case EPROTONOSUPPORT: return WSAEPROTONOSUPPORT; break;
378 case ESOCKTNOSUPPORT: return WSAESOCKTNOSUPPORT; break;
379 case EOPNOTSUPP: return WSAEOPNOTSUPP; break;
380 case EPFNOSUPPORT: return WSAEPFNOSUPPORT; break;
381 case EAFNOSUPPORT: return WSAEAFNOSUPPORT; break;
382 case EADDRINUSE: return WSAEADDRINUSE; break;
383 case EADDRNOTAVAIL: return WSAEADDRNOTAVAIL; break;
384 case ENETDOWN: return WSAENETDOWN; break;
385 case ENETUNREACH: return WSAENETUNREACH; break;
386 case ENETRESET: return WSAENETRESET; break;
387 case ECONNABORTED: return WSAECONNABORTED; break;
389 case ECONNRESET: return WSAECONNRESET; break;
390 case ENOBUFS: return WSAENOBUFS; break;
391 case EISCONN: return WSAEISCONN; break;
392 case ENOTCONN: return WSAENOTCONN; break;
393 case ESHUTDOWN: return WSAESHUTDOWN; break;
394 case ETOOMANYREFS: return WSAETOOMANYREFS; break;
395 case ETIMEDOUT: return WSAETIMEDOUT; break;
396 case ECONNREFUSED: return WSAECONNREFUSED; break;
397 case ELOOP: return WSAELOOP; break;
398 case ENAMETOOLONG: return WSAENAMETOOLONG; break;
399 case EHOSTDOWN: return WSAEHOSTDOWN; break;
400 case EHOSTUNREACH: return WSAEHOSTUNREACH; break;
401 case ENOTEMPTY: return WSAENOTEMPTY; break;
403 case EPROCLIM: return WSAEPROCLIM; break;
406 case EUSERS: return WSAEUSERS; break;
409 case EDQUOT: return WSAEDQUOT; break;
412 case ESTALE: return WSAESTALE; break;
415 case EREMOTE: return WSAEREMOTE; break;
417 default: errno=err; perror("sock_set_error"); return ERROR_UNKNOWN; break;
421 /* set the last error depending on errno */
422 static void sock_set_error(void)
424 set_error( sock_get_error( errno ) );
427 /* create a socket */
428 DECL_HANDLER(create_socket)
433 if ((obj = create_socket( req->family, req->type, req->protocol )) != NULL)
435 req->handle = alloc_handle( current->process, obj, req->access, req->inherit );
436 release_object( obj );
440 /* accept a socket */
441 DECL_HANDLER(accept_socket)
446 if ((obj = accept_socket( req->lhandle )) != NULL)
448 req->handle = alloc_handle( current->process, obj, req->access, req->inherit );
449 release_object( obj );
453 /* set socket event parameters */
454 DECL_HANDLER(set_socket_event)
457 struct event *oevent;
460 sock=(struct sock*)get_handle_obj(current->process,req->handle,GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
463 oevent = sock->event;
465 sock->mask = req->mask;
466 sock->event = get_event_obj( current->process, req->event, EVENT_MODIFY_STATE );
467 if (debug_level && sock->event) fprintf(stderr, "event ptr: %p\n", sock->event);
468 sock_reselect( sock );
470 sock->state |= WS_FD_NONBLOCKING;
472 /* if a network event is pending, signal the event object
473 it is possible that FD_CONNECT or FD_ACCEPT network events has happened
474 before a WSAEventSelect() was done on it.
475 (when dealing with Asynchronous socket) */
476 if (sock->pmask & sock->mask)
477 set_event(sock->event);
481 if ((oevent != sock->event) && (omask & WS_FD_SERVEVENT))
482 /* if the service thread was waiting for the old event object,
483 * we should now signal it, to let the service thread
484 * object detect that it is now orphaned... */
486 /* we're through with it */
487 release_object( oevent );
489 release_object( &sock->obj );
492 /* get socket event parameters */
493 DECL_HANDLER(get_socket_event)
498 sock=(struct sock*)get_handle_obj(current->process,req->handle,GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
504 set_error(WSAENOTSOCK);
507 req->mask = sock->mask;
508 req->pmask = sock->pmask;
509 req->state = sock->state;
510 size = min( get_req_data_size(req), sizeof(sock->errors) );
511 memcpy( get_req_data(req), sock->errors, size );
512 set_req_data_size( req, size );
518 struct event *sevent = get_event_obj(current->process, req->s_event, 0);
519 if (sevent == sock->event)
521 release_object( sevent );
527 struct event *cevent = get_event_obj(current->process, req->c_event, EVENT_MODIFY_STATE);
528 reset_event( cevent );
529 release_object( cevent );
532 sock_reselect( sock );
534 else set_error(WSAEINVAL);
536 release_object( &sock->obj );
539 /* re-enable pending socket events */
540 DECL_HANDLER(enable_socket_event)
544 sock=(struct sock*)get_handle_obj(current->process,req->handle,GENERIC_READ|GENERIC_WRITE|SYNCHRONIZE,&sock_ops);
547 sock->pmask &= ~req->mask; /* is this safe? */
548 sock->hmask &= ~req->mask;
549 sock->state |= req->sstate;
550 sock->state &= ~req->cstate;
551 sock_reselect( sock );
553 /* service trigger */
554 if (req->mask & WS_FD_SERVEVENT)
556 sock->pmask |= WS_FD_SERVEVENT;
558 if (debug_level) fprintf(stderr, "signalling service event ptr %p\n", sock->event);
559 set_event(sock->event);
563 release_object( &sock->obj );