Merge branch 'ab/config-based-hooks-base' into seen
[git] / compat / simple-ipc / ipc-win32.c
1 #include "cache.h"
2 #include "simple-ipc.h"
3 #include "strbuf.h"
4 #include "pkt-line.h"
5 #include "thread-utils.h"
6
7 #ifndef SUPPORTS_SIMPLE_IPC
8 /*
9  * This source file should only be compiled when Simple IPC is supported.
10  * See the top-level Makefile.
11  */
12 #error SUPPORTS_SIMPLE_IPC not defined
13 #endif
14
15 static int initialize_pipe_name(const char *path, wchar_t *wpath, size_t alloc)
16 {
17         int off = 0;
18         struct strbuf realpath = STRBUF_INIT;
19
20         if (!strbuf_realpath(&realpath, path, 0))
21                 return -1;
22
23         off = swprintf(wpath, alloc, L"\\\\.\\pipe\\");
24         if (xutftowcs(wpath + off, realpath.buf, alloc - off) < 0)
25                 return -1;
26
27         /* Handle drive prefix */
28         if (wpath[off] && wpath[off + 1] == L':') {
29                 wpath[off + 1] = L'_';
30                 off += 2;
31         }
32
33         for (; wpath[off]; off++)
34                 if (wpath[off] == L'/')
35                         wpath[off] = L'\\';
36
37         strbuf_release(&realpath);
38         return 0;
39 }
40
41 static enum ipc_active_state get_active_state(wchar_t *pipe_path)
42 {
43         if (WaitNamedPipeW(pipe_path, NMPWAIT_USE_DEFAULT_WAIT))
44                 return IPC_STATE__LISTENING;
45
46         if (GetLastError() == ERROR_SEM_TIMEOUT)
47                 return IPC_STATE__NOT_LISTENING;
48
49         if (GetLastError() == ERROR_FILE_NOT_FOUND)
50                 return IPC_STATE__PATH_NOT_FOUND;
51
52         return IPC_STATE__OTHER_ERROR;
53 }
54
55 enum ipc_active_state ipc_get_active_state(const char *path)
56 {
57         wchar_t pipe_path[MAX_PATH];
58
59         if (initialize_pipe_name(path, pipe_path, ARRAY_SIZE(pipe_path)) < 0)
60                 return IPC_STATE__INVALID_PATH;
61
62         return get_active_state(pipe_path);
63 }
64
65 #define WAIT_STEP_MS (50)
66
67 static enum ipc_active_state connect_to_server(
68         const wchar_t *wpath,
69         DWORD timeout_ms,
70         const struct ipc_client_connect_options *options,
71         int *pfd)
72 {
73         DWORD t_start_ms, t_waited_ms;
74         DWORD step_ms;
75         HANDLE hPipe = INVALID_HANDLE_VALUE;
76         DWORD mode = PIPE_READMODE_BYTE;
77         DWORD gle;
78
79         *pfd = -1;
80
81         for (;;) {
82                 hPipe = CreateFileW(wpath, GENERIC_READ | GENERIC_WRITE,
83                                     0, NULL, OPEN_EXISTING, 0, NULL);
84                 if (hPipe != INVALID_HANDLE_VALUE)
85                         break;
86
87                 gle = GetLastError();
88
89                 switch (gle) {
90                 case ERROR_FILE_NOT_FOUND:
91                         if (!options->wait_if_not_found)
92                                 return IPC_STATE__PATH_NOT_FOUND;
93                         if (!timeout_ms)
94                                 return IPC_STATE__PATH_NOT_FOUND;
95
96                         step_ms = (timeout_ms < WAIT_STEP_MS) ?
97                                 timeout_ms : WAIT_STEP_MS;
98                         sleep_millisec(step_ms);
99
100                         timeout_ms -= step_ms;
101                         break; /* try again */
102
103                 case ERROR_PIPE_BUSY:
104                         if (!options->wait_if_busy)
105                                 return IPC_STATE__NOT_LISTENING;
106                         if (!timeout_ms)
107                                 return IPC_STATE__NOT_LISTENING;
108
109                         t_start_ms = (DWORD)(getnanotime() / 1000000);
110
111                         if (!WaitNamedPipeW(wpath, timeout_ms)) {
112                                 if (GetLastError() == ERROR_SEM_TIMEOUT)
113                                         return IPC_STATE__NOT_LISTENING;
114
115                                 return IPC_STATE__OTHER_ERROR;
116                         }
117
118                         /*
119                          * A pipe server instance became available.
120                          * Race other client processes to connect to
121                          * it.
122                          *
123                          * But first decrement our overall timeout so
124                          * that we don't starve if we keep losing the
125                          * race.  But also guard against special
126                          * NPMWAIT_ values (0 and -1).
127                          */
128                         t_waited_ms = (DWORD)(getnanotime() / 1000000) - t_start_ms;
129                         if (t_waited_ms < timeout_ms)
130                                 timeout_ms -= t_waited_ms;
131                         else
132                                 timeout_ms = 1;
133                         break; /* try again */
134
135                 default:
136                         return IPC_STATE__OTHER_ERROR;
137                 }
138         }
139
140         if (!SetNamedPipeHandleState(hPipe, &mode, NULL, NULL)) {
141                 CloseHandle(hPipe);
142                 return IPC_STATE__OTHER_ERROR;
143         }
144
145         *pfd = _open_osfhandle((intptr_t)hPipe, O_RDWR|O_BINARY);
146         if (*pfd < 0) {
147                 CloseHandle(hPipe);
148                 return IPC_STATE__OTHER_ERROR;
149         }
150
151         /* fd now owns hPipe */
152
153         return IPC_STATE__LISTENING;
154 }
155
156 /*
157  * The default connection timeout for Windows clients.
158  *
159  * This is not currently part of the ipc_ API (nor the config settings)
160  * because of differences between Windows and other platforms.
161  *
162  * This value was chosen at random.
163  */
164 #define WINDOWS_CONNECTION_TIMEOUT_MS (30000)
165
166 enum ipc_active_state ipc_client_try_connect(
167         const char *path,
168         const struct ipc_client_connect_options *options,
169         struct ipc_client_connection **p_connection)
170 {
171         wchar_t wpath[MAX_PATH];
172         enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
173         int fd = -1;
174
175         *p_connection = NULL;
176
177         trace2_region_enter("ipc-client", "try-connect", NULL);
178         trace2_data_string("ipc-client", NULL, "try-connect/path", path);
179
180         if (initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath)) < 0)
181                 state = IPC_STATE__INVALID_PATH;
182         else
183                 state = connect_to_server(wpath, WINDOWS_CONNECTION_TIMEOUT_MS,
184                                           options, &fd);
185
186         trace2_data_intmax("ipc-client", NULL, "try-connect/state",
187                            (intmax_t)state);
188         trace2_region_leave("ipc-client", "try-connect", NULL);
189
190         if (state == IPC_STATE__LISTENING) {
191                 (*p_connection) = xcalloc(1, sizeof(struct ipc_client_connection));
192                 (*p_connection)->fd = fd;
193         }
194
195         return state;
196 }
197
198 void ipc_client_close_connection(struct ipc_client_connection *connection)
199 {
200         if (!connection)
201                 return;
202
203         if (connection->fd != -1)
204                 close(connection->fd);
205
206         free(connection);
207 }
208
209 int ipc_client_send_command_to_connection(
210         struct ipc_client_connection *connection,
211         const char *message, size_t message_len,
212         struct strbuf *answer)
213 {
214         int ret = 0;
215
216         strbuf_setlen(answer, 0);
217
218         trace2_region_enter("ipc-client", "send-command", NULL);
219
220         if (write_packetized_from_buf_no_flush(message, message_len,
221                                                connection->fd) < 0 ||
222             packet_flush_gently(connection->fd) < 0) {
223                 ret = error(_("could not send IPC command"));
224                 goto done;
225         }
226
227         FlushFileBuffers((HANDLE)_get_osfhandle(connection->fd));
228
229         if (read_packetized_to_strbuf(
230                     connection->fd, answer,
231                     PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR) < 0) {
232                 ret = error(_("could not read IPC response"));
233                 goto done;
234         }
235
236 done:
237         trace2_region_leave("ipc-client", "send-command", NULL);
238         return ret;
239 }
240
241 int ipc_client_send_command(const char *path,
242                             const struct ipc_client_connect_options *options,
243                             const char *message, size_t message_len,
244                             struct strbuf *response)
245 {
246         int ret = -1;
247         enum ipc_active_state state;
248         struct ipc_client_connection *connection = NULL;
249
250         state = ipc_client_try_connect(path, options, &connection);
251
252         if (state != IPC_STATE__LISTENING)
253                 return ret;
254
255         ret = ipc_client_send_command_to_connection(connection,
256                                                     message, message_len,
257                                                     response);
258
259         ipc_client_close_connection(connection);
260
261         return ret;
262 }
263
264 /*
265  * Duplicate the given pipe handle and wrap it in a file descriptor so
266  * that we can use pkt-line on it.
267  */
268 static int dup_fd_from_pipe(const HANDLE pipe)
269 {
270         HANDLE process = GetCurrentProcess();
271         HANDLE handle;
272         int fd;
273
274         if (!DuplicateHandle(process, pipe, process, &handle, 0, FALSE,
275                              DUPLICATE_SAME_ACCESS)) {
276                 errno = err_win_to_posix(GetLastError());
277                 return -1;
278         }
279
280         fd = _open_osfhandle((intptr_t)handle, O_RDWR|O_BINARY);
281         if (fd < 0) {
282                 errno = err_win_to_posix(GetLastError());
283                 CloseHandle(handle);
284                 return -1;
285         }
286
287         /*
288          * `handle` is now owned by `fd` and will be automatically closed
289          * when the descriptor is closed.
290          */
291
292         return fd;
293 }
294
295 /*
296  * Magic numbers used to annotate callback instance data.
297  * These are used to help guard against accidentally passing the
298  * wrong instance data across multiple levels of callbacks (which
299  * is easy to do if there are `void*` arguments).
300  */
301 enum magic {
302         MAGIC_SERVER_REPLY_DATA,
303         MAGIC_SERVER_THREAD_DATA,
304         MAGIC_SERVER_DATA,
305 };
306
307 struct ipc_server_reply_data {
308         enum magic magic;
309         int fd;
310         struct ipc_server_thread_data *server_thread_data;
311 };
312
313 struct ipc_server_thread_data {
314         enum magic magic;
315         struct ipc_server_thread_data *next_thread;
316         struct ipc_server_data *server_data;
317         pthread_t pthread_id;
318         HANDLE hPipe;
319 };
320
321 /*
322  * On Windows, the conceptual "ipc-server" is implemented as a pool of
323  * n idential/peer "server-thread" threads.  That is, there is no
324  * hierarchy of threads; and therefore no controller thread managing
325  * the pool.  Each thread has an independent handle to the named pipe,
326  * receives incoming connections, processes the client, and re-uses
327  * the pipe for the next client connection.
328  *
329  * Therefore, the "ipc-server" only needs to maintain a list of the
330  * spawned threads for eventual "join" purposes.
331  *
332  * A single "stop-event" is visible to all of the server threads to
333  * tell them to shutdown (when idle).
334  */
335 struct ipc_server_data {
336         enum magic magic;
337         ipc_server_application_cb *application_cb;
338         void *application_data;
339         struct strbuf buf_path;
340         wchar_t wpath[MAX_PATH];
341
342         HANDLE hEventStopRequested;
343         struct ipc_server_thread_data *thread_list;
344         int is_stopped;
345 };
346
347 enum connect_result {
348         CR_CONNECTED = 0,
349         CR_CONNECT_PENDING,
350         CR_CONNECT_ERROR,
351         CR_WAIT_ERROR,
352         CR_SHUTDOWN,
353 };
354
355 static enum connect_result queue_overlapped_connect(
356         struct ipc_server_thread_data *server_thread_data,
357         OVERLAPPED *lpo)
358 {
359         if (ConnectNamedPipe(server_thread_data->hPipe, lpo))
360                 goto failed;
361
362         switch (GetLastError()) {
363         case ERROR_IO_PENDING:
364                 return CR_CONNECT_PENDING;
365
366         case ERROR_PIPE_CONNECTED:
367                 SetEvent(lpo->hEvent);
368                 return CR_CONNECTED;
369
370         default:
371                 break;
372         }
373
374 failed:
375         error(_("ConnectNamedPipe failed for '%s' (%lu)"),
376               server_thread_data->server_data->buf_path.buf,
377               GetLastError());
378         return CR_CONNECT_ERROR;
379 }
380
381 /*
382  * Use Windows Overlapped IO to wait for a connection or for our event
383  * to be signalled.
384  */
385 static enum connect_result wait_for_connection(
386         struct ipc_server_thread_data *server_thread_data,
387         OVERLAPPED *lpo)
388 {
389         enum connect_result r;
390         HANDLE waitHandles[2];
391         DWORD dwWaitResult;
392
393         r = queue_overlapped_connect(server_thread_data, lpo);
394         if (r != CR_CONNECT_PENDING)
395                 return r;
396
397         waitHandles[0] = server_thread_data->server_data->hEventStopRequested;
398         waitHandles[1] = lpo->hEvent;
399
400         dwWaitResult = WaitForMultipleObjects(2, waitHandles, FALSE, INFINITE);
401         switch (dwWaitResult) {
402         case WAIT_OBJECT_0 + 0:
403                 return CR_SHUTDOWN;
404
405         case WAIT_OBJECT_0 + 1:
406                 ResetEvent(lpo->hEvent);
407                 return CR_CONNECTED;
408
409         default:
410                 return CR_WAIT_ERROR;
411         }
412 }
413
414 /*
415  * Forward declare our reply callback function so that any compiler
416  * errors are reported when we actually define the function (in addition
417  * to any errors reported when we try to pass this callback function as
418  * a parameter in a function call).  The former are easier to understand.
419  */
420 static ipc_server_reply_cb do_io_reply_callback;
421
422 /*
423  * Relay application's response message to the client process.
424  * (We do not flush at this point because we allow the caller
425  * to chunk data to the client thru us.)
426  */
427 static int do_io_reply_callback(struct ipc_server_reply_data *reply_data,
428                        const char *response, size_t response_len)
429 {
430         if (reply_data->magic != MAGIC_SERVER_REPLY_DATA)
431                 BUG("reply_cb called with wrong instance data");
432
433         return write_packetized_from_buf_no_flush(response, response_len,
434                                                   reply_data->fd);
435 }
436
437 /*
438  * Receive the request/command from the client and pass it to the
439  * registered request-callback.  The request-callback will compose
440  * a response and call our reply-callback to send it to the client.
441  *
442  * Simple-IPC only contains one round trip, so we flush and close
443  * here after the response.
444  */
445 static int do_io(struct ipc_server_thread_data *server_thread_data)
446 {
447         struct strbuf buf = STRBUF_INIT;
448         struct ipc_server_reply_data reply_data;
449         int ret = 0;
450
451         reply_data.magic = MAGIC_SERVER_REPLY_DATA;
452         reply_data.server_thread_data = server_thread_data;
453
454         reply_data.fd = dup_fd_from_pipe(server_thread_data->hPipe);
455         if (reply_data.fd < 0)
456                 return error(_("could not create fd from pipe for '%s'"),
457                              server_thread_data->server_data->buf_path.buf);
458
459         ret = read_packetized_to_strbuf(
460                 reply_data.fd, &buf,
461                 PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR);
462         if (ret >= 0) {
463                 ret = server_thread_data->server_data->application_cb(
464                         server_thread_data->server_data->application_data,
465                         buf.buf, buf.len, do_io_reply_callback, &reply_data);
466
467                 packet_flush_gently(reply_data.fd);
468
469                 FlushFileBuffers((HANDLE)_get_osfhandle((reply_data.fd)));
470         }
471         else {
472                 /*
473                  * The client probably disconnected/shutdown before it
474                  * could send a well-formed message.  Ignore it.
475                  */
476         }
477
478         strbuf_release(&buf);
479         close(reply_data.fd);
480
481         return ret;
482 }
483
484 /*
485  * Handle IPC request and response with this connected client.  And reset
486  * the pipe to prepare for the next client.
487  */
488 static int use_connection(struct ipc_server_thread_data *server_thread_data)
489 {
490         int ret;
491
492         ret = do_io(server_thread_data);
493
494         FlushFileBuffers(server_thread_data->hPipe);
495         DisconnectNamedPipe(server_thread_data->hPipe);
496
497         return ret;
498 }
499
500 /*
501  * Thread proc for an IPC server worker thread.  It handles a series of
502  * connections from clients.  It cleans and reuses the hPipe between each
503  * client.
504  */
505 static void *server_thread_proc(void *_server_thread_data)
506 {
507         struct ipc_server_thread_data *server_thread_data = _server_thread_data;
508         HANDLE hEventConnected = INVALID_HANDLE_VALUE;
509         OVERLAPPED oConnect;
510         enum connect_result cr;
511         int ret;
512
513         assert(server_thread_data->hPipe != INVALID_HANDLE_VALUE);
514
515         trace2_thread_start("ipc-server");
516         trace2_data_string("ipc-server", NULL, "pipe",
517                            server_thread_data->server_data->buf_path.buf);
518
519         hEventConnected = CreateEventW(NULL, TRUE, FALSE, NULL);
520
521         memset(&oConnect, 0, sizeof(oConnect));
522         oConnect.hEvent = hEventConnected;
523
524         for (;;) {
525                 cr = wait_for_connection(server_thread_data, &oConnect);
526
527                 switch (cr) {
528                 case CR_SHUTDOWN:
529                         goto finished;
530
531                 case CR_CONNECTED:
532                         ret = use_connection(server_thread_data);
533                         if (ret == SIMPLE_IPC_QUIT) {
534                                 ipc_server_stop_async(
535                                         server_thread_data->server_data);
536                                 goto finished;
537                         }
538                         if (ret > 0) {
539                                 /*
540                                  * Ignore (transient) IO errors with this
541                                  * client and reset for the next client.
542                                  */
543                         }
544                         break;
545
546                 case CR_CONNECT_PENDING:
547                         /* By construction, this should not happen. */
548                         BUG("ipc-server[%s]: unexpeced CR_CONNECT_PENDING",
549                             server_thread_data->server_data->buf_path.buf);
550
551                 case CR_CONNECT_ERROR:
552                 case CR_WAIT_ERROR:
553                         /*
554                          * Ignore these theoretical errors.
555                          */
556                         DisconnectNamedPipe(server_thread_data->hPipe);
557                         break;
558
559                 default:
560                         BUG("unandled case after wait_for_connection");
561                 }
562         }
563
564 finished:
565         CloseHandle(server_thread_data->hPipe);
566         CloseHandle(hEventConnected);
567
568         trace2_thread_exit();
569         return NULL;
570 }
571
572 static HANDLE create_new_pipe(wchar_t *wpath, int is_first)
573 {
574         HANDLE hPipe;
575         DWORD dwOpenMode, dwPipeMode;
576         LPSECURITY_ATTRIBUTES lpsa = NULL;
577
578         dwOpenMode = PIPE_ACCESS_INBOUND | PIPE_ACCESS_OUTBOUND |
579                 FILE_FLAG_OVERLAPPED;
580
581         dwPipeMode = PIPE_TYPE_MESSAGE | PIPE_READMODE_BYTE | PIPE_WAIT |
582                 PIPE_REJECT_REMOTE_CLIENTS;
583
584         if (is_first) {
585                 dwOpenMode |= FILE_FLAG_FIRST_PIPE_INSTANCE;
586
587                 /*
588                  * On Windows, the first server pipe instance gets to
589                  * set the ACL / Security Attributes on the named
590                  * pipe; subsequent instances inherit and cannot
591                  * change them.
592                  *
593                  * TODO Should we allow the application layer to
594                  * specify security attributes, such as `LocalService`
595                  * or `LocalSystem`, when we create the named pipe?
596                  * This question is probably not important when the
597                  * daemon is started by a foreground user process and
598                  * only needs to talk to the current user, but may be
599                  * if the daemon is run via the Control Panel as a
600                  * System Service.
601                  */
602         }
603
604         hPipe = CreateNamedPipeW(wpath, dwOpenMode, dwPipeMode,
605                                  PIPE_UNLIMITED_INSTANCES, 1024, 1024, 0, lpsa);
606
607         return hPipe;
608 }
609
610 int ipc_server_run_async(struct ipc_server_data **returned_server_data,
611                          const char *path, const struct ipc_server_opts *opts,
612                          ipc_server_application_cb *application_cb,
613                          void *application_data)
614 {
615         struct ipc_server_data *server_data;
616         wchar_t wpath[MAX_PATH];
617         HANDLE hPipeFirst = INVALID_HANDLE_VALUE;
618         int k;
619         int ret = 0;
620         int nr_threads = opts->nr_threads;
621
622         *returned_server_data = NULL;
623
624         ret = initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath));
625         if (ret < 0) {
626                 errno = EINVAL;
627                 return -1;
628         }
629
630         hPipeFirst = create_new_pipe(wpath, 1);
631         if (hPipeFirst == INVALID_HANDLE_VALUE) {
632                 errno = EADDRINUSE;
633                 return -2;
634         }
635
636         server_data = xcalloc(1, sizeof(*server_data));
637         server_data->magic = MAGIC_SERVER_DATA;
638         server_data->application_cb = application_cb;
639         server_data->application_data = application_data;
640         server_data->hEventStopRequested = CreateEvent(NULL, TRUE, FALSE, NULL);
641         strbuf_init(&server_data->buf_path, 0);
642         strbuf_addstr(&server_data->buf_path, path);
643         wcscpy(server_data->wpath, wpath);
644
645         if (nr_threads < 1)
646                 nr_threads = 1;
647
648         for (k = 0; k < nr_threads; k++) {
649                 struct ipc_server_thread_data *std;
650
651                 std = xcalloc(1, sizeof(*std));
652                 std->magic = MAGIC_SERVER_THREAD_DATA;
653                 std->server_data = server_data;
654                 std->hPipe = INVALID_HANDLE_VALUE;
655
656                 std->hPipe = (k == 0)
657                         ? hPipeFirst
658                         : create_new_pipe(server_data->wpath, 0);
659
660                 if (std->hPipe == INVALID_HANDLE_VALUE) {
661                         /*
662                          * If we've reached a pipe instance limit for
663                          * this path, just use fewer threads.
664                          */
665                         free(std);
666                         break;
667                 }
668
669                 if (pthread_create(&std->pthread_id, NULL,
670                                    server_thread_proc, std)) {
671                         /*
672                          * Likewise, if we're out of threads, just use
673                          * fewer threads than requested.
674                          *
675                          * However, we just give up if we can't even get
676                          * one thread.  This should not happen.
677                          */
678                         if (k == 0)
679                                 die(_("could not start thread[0] for '%s'"),
680                                     path);
681
682                         CloseHandle(std->hPipe);
683                         free(std);
684                         break;
685                 }
686
687                 std->next_thread = server_data->thread_list;
688                 server_data->thread_list = std;
689         }
690
691         *returned_server_data = server_data;
692         return 0;
693 }
694
695 int ipc_server_stop_async(struct ipc_server_data *server_data)
696 {
697         if (!server_data)
698                 return 0;
699
700         /*
701          * Gently tell all of the ipc_server threads to shutdown.
702          * This will be seen the next time they are idle (and waiting
703          * for a connection).
704          *
705          * We DO NOT attempt to force them to drop an active connection.
706          */
707         SetEvent(server_data->hEventStopRequested);
708         return 0;
709 }
710
711 int ipc_server_await(struct ipc_server_data *server_data)
712 {
713         DWORD dwWaitResult;
714
715         if (!server_data)
716                 return 0;
717
718         dwWaitResult = WaitForSingleObject(server_data->hEventStopRequested, INFINITE);
719         if (dwWaitResult != WAIT_OBJECT_0)
720                 return error(_("wait for hEvent failed for '%s'"),
721                              server_data->buf_path.buf);
722
723         while (server_data->thread_list) {
724                 struct ipc_server_thread_data *std = server_data->thread_list;
725
726                 pthread_join(std->pthread_id, NULL);
727
728                 server_data->thread_list = std->next_thread;
729                 free(std);
730         }
731
732         server_data->is_stopped = 1;
733
734         return 0;
735 }
736
737 void ipc_server_free(struct ipc_server_data *server_data)
738 {
739         if (!server_data)
740                 return;
741
742         if (!server_data->is_stopped)
743                 BUG("cannot free ipc-server while running for '%s'",
744                     server_data->buf_path.buf);
745
746         strbuf_release(&server_data->buf_path);
747
748         if (server_data->hEventStopRequested != INVALID_HANDLE_VALUE)
749                 CloseHandle(server_data->hEventStopRequested);
750
751         while (server_data->thread_list) {
752                 struct ipc_server_thread_data *std = server_data->thread_list;
753
754                 server_data->thread_list = std->next_thread;
755                 free(std);
756         }
757
758         free(server_data);
759 }