xdiff users: use designated initializers for out_line
[git] / compat / simple-ipc / ipc-win32.c
1 #include "cache.h"
2 #include "simple-ipc.h"
3 #include "strbuf.h"
4 #include "pkt-line.h"
5 #include "thread-utils.h"
6
7 #ifndef GIT_WINDOWS_NATIVE
8 #error This file can only be compiled on Windows
9 #endif
10
11 static int initialize_pipe_name(const char *path, wchar_t *wpath, size_t alloc)
12 {
13         int off = 0;
14         struct strbuf realpath = STRBUF_INIT;
15
16         if (!strbuf_realpath(&realpath, path, 0))
17                 return -1;
18
19         off = swprintf(wpath, alloc, L"\\\\.\\pipe\\");
20         if (xutftowcs(wpath + off, realpath.buf, alloc - off) < 0)
21                 return -1;
22
23         /* Handle drive prefix */
24         if (wpath[off] && wpath[off + 1] == L':') {
25                 wpath[off + 1] = L'_';
26                 off += 2;
27         }
28
29         for (; wpath[off]; off++)
30                 if (wpath[off] == L'/')
31                         wpath[off] = L'\\';
32
33         strbuf_release(&realpath);
34         return 0;
35 }
36
37 static enum ipc_active_state get_active_state(wchar_t *pipe_path)
38 {
39         if (WaitNamedPipeW(pipe_path, NMPWAIT_USE_DEFAULT_WAIT))
40                 return IPC_STATE__LISTENING;
41
42         if (GetLastError() == ERROR_SEM_TIMEOUT)
43                 return IPC_STATE__NOT_LISTENING;
44
45         if (GetLastError() == ERROR_FILE_NOT_FOUND)
46                 return IPC_STATE__PATH_NOT_FOUND;
47
48         return IPC_STATE__OTHER_ERROR;
49 }
50
51 enum ipc_active_state ipc_get_active_state(const char *path)
52 {
53         wchar_t pipe_path[MAX_PATH];
54
55         if (initialize_pipe_name(path, pipe_path, ARRAY_SIZE(pipe_path)) < 0)
56                 return IPC_STATE__INVALID_PATH;
57
58         return get_active_state(pipe_path);
59 }
60
61 #define WAIT_STEP_MS (50)
62
63 static enum ipc_active_state connect_to_server(
64         const wchar_t *wpath,
65         DWORD timeout_ms,
66         const struct ipc_client_connect_options *options,
67         int *pfd)
68 {
69         DWORD t_start_ms, t_waited_ms;
70         DWORD step_ms;
71         HANDLE hPipe = INVALID_HANDLE_VALUE;
72         DWORD mode = PIPE_READMODE_BYTE;
73         DWORD gle;
74
75         *pfd = -1;
76
77         for (;;) {
78                 hPipe = CreateFileW(wpath, GENERIC_READ | GENERIC_WRITE,
79                                     0, NULL, OPEN_EXISTING, 0, NULL);
80                 if (hPipe != INVALID_HANDLE_VALUE)
81                         break;
82
83                 gle = GetLastError();
84
85                 switch (gle) {
86                 case ERROR_FILE_NOT_FOUND:
87                         if (!options->wait_if_not_found)
88                                 return IPC_STATE__PATH_NOT_FOUND;
89                         if (!timeout_ms)
90                                 return IPC_STATE__PATH_NOT_FOUND;
91
92                         step_ms = (timeout_ms < WAIT_STEP_MS) ?
93                                 timeout_ms : WAIT_STEP_MS;
94                         sleep_millisec(step_ms);
95
96                         timeout_ms -= step_ms;
97                         break; /* try again */
98
99                 case ERROR_PIPE_BUSY:
100                         if (!options->wait_if_busy)
101                                 return IPC_STATE__NOT_LISTENING;
102                         if (!timeout_ms)
103                                 return IPC_STATE__NOT_LISTENING;
104
105                         t_start_ms = (DWORD)(getnanotime() / 1000000);
106
107                         if (!WaitNamedPipeW(wpath, timeout_ms)) {
108                                 if (GetLastError() == ERROR_SEM_TIMEOUT)
109                                         return IPC_STATE__NOT_LISTENING;
110
111                                 return IPC_STATE__OTHER_ERROR;
112                         }
113
114                         /*
115                          * A pipe server instance became available.
116                          * Race other client processes to connect to
117                          * it.
118                          *
119                          * But first decrement our overall timeout so
120                          * that we don't starve if we keep losing the
121                          * race.  But also guard against special
122                          * NPMWAIT_ values (0 and -1).
123                          */
124                         t_waited_ms = (DWORD)(getnanotime() / 1000000) - t_start_ms;
125                         if (t_waited_ms < timeout_ms)
126                                 timeout_ms -= t_waited_ms;
127                         else
128                                 timeout_ms = 1;
129                         break; /* try again */
130
131                 default:
132                         return IPC_STATE__OTHER_ERROR;
133                 }
134         }
135
136         if (!SetNamedPipeHandleState(hPipe, &mode, NULL, NULL)) {
137                 CloseHandle(hPipe);
138                 return IPC_STATE__OTHER_ERROR;
139         }
140
141         *pfd = _open_osfhandle((intptr_t)hPipe, O_RDWR|O_BINARY);
142         if (*pfd < 0) {
143                 CloseHandle(hPipe);
144                 return IPC_STATE__OTHER_ERROR;
145         }
146
147         /* fd now owns hPipe */
148
149         return IPC_STATE__LISTENING;
150 }
151
152 /*
153  * The default connection timeout for Windows clients.
154  *
155  * This is not currently part of the ipc_ API (nor the config settings)
156  * because of differences between Windows and other platforms.
157  *
158  * This value was chosen at random.
159  */
160 #define WINDOWS_CONNECTION_TIMEOUT_MS (30000)
161
162 enum ipc_active_state ipc_client_try_connect(
163         const char *path,
164         const struct ipc_client_connect_options *options,
165         struct ipc_client_connection **p_connection)
166 {
167         wchar_t wpath[MAX_PATH];
168         enum ipc_active_state state = IPC_STATE__OTHER_ERROR;
169         int fd = -1;
170
171         *p_connection = NULL;
172
173         trace2_region_enter("ipc-client", "try-connect", NULL);
174         trace2_data_string("ipc-client", NULL, "try-connect/path", path);
175
176         if (initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath)) < 0)
177                 state = IPC_STATE__INVALID_PATH;
178         else
179                 state = connect_to_server(wpath, WINDOWS_CONNECTION_TIMEOUT_MS,
180                                           options, &fd);
181
182         trace2_data_intmax("ipc-client", NULL, "try-connect/state",
183                            (intmax_t)state);
184         trace2_region_leave("ipc-client", "try-connect", NULL);
185
186         if (state == IPC_STATE__LISTENING) {
187                 (*p_connection) = xcalloc(1, sizeof(struct ipc_client_connection));
188                 (*p_connection)->fd = fd;
189         }
190
191         return state;
192 }
193
194 void ipc_client_close_connection(struct ipc_client_connection *connection)
195 {
196         if (!connection)
197                 return;
198
199         if (connection->fd != -1)
200                 close(connection->fd);
201
202         free(connection);
203 }
204
205 int ipc_client_send_command_to_connection(
206         struct ipc_client_connection *connection,
207         const char *message, struct strbuf *answer)
208 {
209         int ret = 0;
210
211         strbuf_setlen(answer, 0);
212
213         trace2_region_enter("ipc-client", "send-command", NULL);
214
215         if (write_packetized_from_buf_no_flush(message, strlen(message),
216                                                connection->fd) < 0 ||
217             packet_flush_gently(connection->fd) < 0) {
218                 ret = error(_("could not send IPC command"));
219                 goto done;
220         }
221
222         FlushFileBuffers((HANDLE)_get_osfhandle(connection->fd));
223
224         if (read_packetized_to_strbuf(
225                     connection->fd, answer,
226                     PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR) < 0) {
227                 ret = error(_("could not read IPC response"));
228                 goto done;
229         }
230
231 done:
232         trace2_region_leave("ipc-client", "send-command", NULL);
233         return ret;
234 }
235
236 int ipc_client_send_command(const char *path,
237                             const struct ipc_client_connect_options *options,
238                             const char *message, struct strbuf *response)
239 {
240         int ret = -1;
241         enum ipc_active_state state;
242         struct ipc_client_connection *connection = NULL;
243
244         state = ipc_client_try_connect(path, options, &connection);
245
246         if (state != IPC_STATE__LISTENING)
247                 return ret;
248
249         ret = ipc_client_send_command_to_connection(connection, message, response);
250
251         ipc_client_close_connection(connection);
252
253         return ret;
254 }
255
256 /*
257  * Duplicate the given pipe handle and wrap it in a file descriptor so
258  * that we can use pkt-line on it.
259  */
260 static int dup_fd_from_pipe(const HANDLE pipe)
261 {
262         HANDLE process = GetCurrentProcess();
263         HANDLE handle;
264         int fd;
265
266         if (!DuplicateHandle(process, pipe, process, &handle, 0, FALSE,
267                              DUPLICATE_SAME_ACCESS)) {
268                 errno = err_win_to_posix(GetLastError());
269                 return -1;
270         }
271
272         fd = _open_osfhandle((intptr_t)handle, O_RDWR|O_BINARY);
273         if (fd < 0) {
274                 errno = err_win_to_posix(GetLastError());
275                 CloseHandle(handle);
276                 return -1;
277         }
278
279         /*
280          * `handle` is now owned by `fd` and will be automatically closed
281          * when the descriptor is closed.
282          */
283
284         return fd;
285 }
286
287 /*
288  * Magic numbers used to annotate callback instance data.
289  * These are used to help guard against accidentally passing the
290  * wrong instance data across multiple levels of callbacks (which
291  * is easy to do if there are `void*` arguments).
292  */
293 enum magic {
294         MAGIC_SERVER_REPLY_DATA,
295         MAGIC_SERVER_THREAD_DATA,
296         MAGIC_SERVER_DATA,
297 };
298
299 struct ipc_server_reply_data {
300         enum magic magic;
301         int fd;
302         struct ipc_server_thread_data *server_thread_data;
303 };
304
305 struct ipc_server_thread_data {
306         enum magic magic;
307         struct ipc_server_thread_data *next_thread;
308         struct ipc_server_data *server_data;
309         pthread_t pthread_id;
310         HANDLE hPipe;
311 };
312
313 /*
314  * On Windows, the conceptual "ipc-server" is implemented as a pool of
315  * n idential/peer "server-thread" threads.  That is, there is no
316  * hierarchy of threads; and therefore no controller thread managing
317  * the pool.  Each thread has an independent handle to the named pipe,
318  * receives incoming connections, processes the client, and re-uses
319  * the pipe for the next client connection.
320  *
321  * Therefore, the "ipc-server" only needs to maintain a list of the
322  * spawned threads for eventual "join" purposes.
323  *
324  * A single "stop-event" is visible to all of the server threads to
325  * tell them to shutdown (when idle).
326  */
327 struct ipc_server_data {
328         enum magic magic;
329         ipc_server_application_cb *application_cb;
330         void *application_data;
331         struct strbuf buf_path;
332         wchar_t wpath[MAX_PATH];
333
334         HANDLE hEventStopRequested;
335         struct ipc_server_thread_data *thread_list;
336         int is_stopped;
337 };
338
339 enum connect_result {
340         CR_CONNECTED = 0,
341         CR_CONNECT_PENDING,
342         CR_CONNECT_ERROR,
343         CR_WAIT_ERROR,
344         CR_SHUTDOWN,
345 };
346
347 static enum connect_result queue_overlapped_connect(
348         struct ipc_server_thread_data *server_thread_data,
349         OVERLAPPED *lpo)
350 {
351         if (ConnectNamedPipe(server_thread_data->hPipe, lpo))
352                 goto failed;
353
354         switch (GetLastError()) {
355         case ERROR_IO_PENDING:
356                 return CR_CONNECT_PENDING;
357
358         case ERROR_PIPE_CONNECTED:
359                 SetEvent(lpo->hEvent);
360                 return CR_CONNECTED;
361
362         default:
363                 break;
364         }
365
366 failed:
367         error(_("ConnectNamedPipe failed for '%s' (%lu)"),
368               server_thread_data->server_data->buf_path.buf,
369               GetLastError());
370         return CR_CONNECT_ERROR;
371 }
372
373 /*
374  * Use Windows Overlapped IO to wait for a connection or for our event
375  * to be signalled.
376  */
377 static enum connect_result wait_for_connection(
378         struct ipc_server_thread_data *server_thread_data,
379         OVERLAPPED *lpo)
380 {
381         enum connect_result r;
382         HANDLE waitHandles[2];
383         DWORD dwWaitResult;
384
385         r = queue_overlapped_connect(server_thread_data, lpo);
386         if (r != CR_CONNECT_PENDING)
387                 return r;
388
389         waitHandles[0] = server_thread_data->server_data->hEventStopRequested;
390         waitHandles[1] = lpo->hEvent;
391
392         dwWaitResult = WaitForMultipleObjects(2, waitHandles, FALSE, INFINITE);
393         switch (dwWaitResult) {
394         case WAIT_OBJECT_0 + 0:
395                 return CR_SHUTDOWN;
396
397         case WAIT_OBJECT_0 + 1:
398                 ResetEvent(lpo->hEvent);
399                 return CR_CONNECTED;
400
401         default:
402                 return CR_WAIT_ERROR;
403         }
404 }
405
406 /*
407  * Forward declare our reply callback function so that any compiler
408  * errors are reported when we actually define the function (in addition
409  * to any errors reported when we try to pass this callback function as
410  * a parameter in a function call).  The former are easier to understand.
411  */
412 static ipc_server_reply_cb do_io_reply_callback;
413
414 /*
415  * Relay application's response message to the client process.
416  * (We do not flush at this point because we allow the caller
417  * to chunk data to the client thru us.)
418  */
419 static int do_io_reply_callback(struct ipc_server_reply_data *reply_data,
420                        const char *response, size_t response_len)
421 {
422         if (reply_data->magic != MAGIC_SERVER_REPLY_DATA)
423                 BUG("reply_cb called with wrong instance data");
424
425         return write_packetized_from_buf_no_flush(response, response_len,
426                                                   reply_data->fd);
427 }
428
429 /*
430  * Receive the request/command from the client and pass it to the
431  * registered request-callback.  The request-callback will compose
432  * a response and call our reply-callback to send it to the client.
433  *
434  * Simple-IPC only contains one round trip, so we flush and close
435  * here after the response.
436  */
437 static int do_io(struct ipc_server_thread_data *server_thread_data)
438 {
439         struct strbuf buf = STRBUF_INIT;
440         struct ipc_server_reply_data reply_data;
441         int ret = 0;
442
443         reply_data.magic = MAGIC_SERVER_REPLY_DATA;
444         reply_data.server_thread_data = server_thread_data;
445
446         reply_data.fd = dup_fd_from_pipe(server_thread_data->hPipe);
447         if (reply_data.fd < 0)
448                 return error(_("could not create fd from pipe for '%s'"),
449                              server_thread_data->server_data->buf_path.buf);
450
451         ret = read_packetized_to_strbuf(
452                 reply_data.fd, &buf,
453                 PACKET_READ_GENTLE_ON_EOF | PACKET_READ_GENTLE_ON_READ_ERROR);
454         if (ret >= 0) {
455                 ret = server_thread_data->server_data->application_cb(
456                         server_thread_data->server_data->application_data,
457                         buf.buf, do_io_reply_callback, &reply_data);
458
459                 packet_flush_gently(reply_data.fd);
460
461                 FlushFileBuffers((HANDLE)_get_osfhandle((reply_data.fd)));
462         }
463         else {
464                 /*
465                  * The client probably disconnected/shutdown before it
466                  * could send a well-formed message.  Ignore it.
467                  */
468         }
469
470         strbuf_release(&buf);
471         close(reply_data.fd);
472
473         return ret;
474 }
475
476 /*
477  * Handle IPC request and response with this connected client.  And reset
478  * the pipe to prepare for the next client.
479  */
480 static int use_connection(struct ipc_server_thread_data *server_thread_data)
481 {
482         int ret;
483
484         ret = do_io(server_thread_data);
485
486         FlushFileBuffers(server_thread_data->hPipe);
487         DisconnectNamedPipe(server_thread_data->hPipe);
488
489         return ret;
490 }
491
492 /*
493  * Thread proc for an IPC server worker thread.  It handles a series of
494  * connections from clients.  It cleans and reuses the hPipe between each
495  * client.
496  */
497 static void *server_thread_proc(void *_server_thread_data)
498 {
499         struct ipc_server_thread_data *server_thread_data = _server_thread_data;
500         HANDLE hEventConnected = INVALID_HANDLE_VALUE;
501         OVERLAPPED oConnect;
502         enum connect_result cr;
503         int ret;
504
505         assert(server_thread_data->hPipe != INVALID_HANDLE_VALUE);
506
507         trace2_thread_start("ipc-server");
508         trace2_data_string("ipc-server", NULL, "pipe",
509                            server_thread_data->server_data->buf_path.buf);
510
511         hEventConnected = CreateEventW(NULL, TRUE, FALSE, NULL);
512
513         memset(&oConnect, 0, sizeof(oConnect));
514         oConnect.hEvent = hEventConnected;
515
516         for (;;) {
517                 cr = wait_for_connection(server_thread_data, &oConnect);
518
519                 switch (cr) {
520                 case CR_SHUTDOWN:
521                         goto finished;
522
523                 case CR_CONNECTED:
524                         ret = use_connection(server_thread_data);
525                         if (ret == SIMPLE_IPC_QUIT) {
526                                 ipc_server_stop_async(
527                                         server_thread_data->server_data);
528                                 goto finished;
529                         }
530                         if (ret > 0) {
531                                 /*
532                                  * Ignore (transient) IO errors with this
533                                  * client and reset for the next client.
534                                  */
535                         }
536                         break;
537
538                 case CR_CONNECT_PENDING:
539                         /* By construction, this should not happen. */
540                         BUG("ipc-server[%s]: unexpeced CR_CONNECT_PENDING",
541                             server_thread_data->server_data->buf_path.buf);
542
543                 case CR_CONNECT_ERROR:
544                 case CR_WAIT_ERROR:
545                         /*
546                          * Ignore these theoretical errors.
547                          */
548                         DisconnectNamedPipe(server_thread_data->hPipe);
549                         break;
550
551                 default:
552                         BUG("unandled case after wait_for_connection");
553                 }
554         }
555
556 finished:
557         CloseHandle(server_thread_data->hPipe);
558         CloseHandle(hEventConnected);
559
560         trace2_thread_exit();
561         return NULL;
562 }
563
564 static HANDLE create_new_pipe(wchar_t *wpath, int is_first)
565 {
566         HANDLE hPipe;
567         DWORD dwOpenMode, dwPipeMode;
568         LPSECURITY_ATTRIBUTES lpsa = NULL;
569
570         dwOpenMode = PIPE_ACCESS_INBOUND | PIPE_ACCESS_OUTBOUND |
571                 FILE_FLAG_OVERLAPPED;
572
573         dwPipeMode = PIPE_TYPE_MESSAGE | PIPE_READMODE_BYTE | PIPE_WAIT |
574                 PIPE_REJECT_REMOTE_CLIENTS;
575
576         if (is_first) {
577                 dwOpenMode |= FILE_FLAG_FIRST_PIPE_INSTANCE;
578
579                 /*
580                  * On Windows, the first server pipe instance gets to
581                  * set the ACL / Security Attributes on the named
582                  * pipe; subsequent instances inherit and cannot
583                  * change them.
584                  *
585                  * TODO Should we allow the application layer to
586                  * specify security attributes, such as `LocalService`
587                  * or `LocalSystem`, when we create the named pipe?
588                  * This question is probably not important when the
589                  * daemon is started by a foreground user process and
590                  * only needs to talk to the current user, but may be
591                  * if the daemon is run via the Control Panel as a
592                  * System Service.
593                  */
594         }
595
596         hPipe = CreateNamedPipeW(wpath, dwOpenMode, dwPipeMode,
597                                  PIPE_UNLIMITED_INSTANCES, 1024, 1024, 0, lpsa);
598
599         return hPipe;
600 }
601
602 int ipc_server_run_async(struct ipc_server_data **returned_server_data,
603                          const char *path, const struct ipc_server_opts *opts,
604                          ipc_server_application_cb *application_cb,
605                          void *application_data)
606 {
607         struct ipc_server_data *server_data;
608         wchar_t wpath[MAX_PATH];
609         HANDLE hPipeFirst = INVALID_HANDLE_VALUE;
610         int k;
611         int ret = 0;
612         int nr_threads = opts->nr_threads;
613
614         *returned_server_data = NULL;
615
616         ret = initialize_pipe_name(path, wpath, ARRAY_SIZE(wpath));
617         if (ret < 0) {
618                 errno = EINVAL;
619                 return -1;
620         }
621
622         hPipeFirst = create_new_pipe(wpath, 1);
623         if (hPipeFirst == INVALID_HANDLE_VALUE) {
624                 errno = EADDRINUSE;
625                 return -2;
626         }
627
628         server_data = xcalloc(1, sizeof(*server_data));
629         server_data->magic = MAGIC_SERVER_DATA;
630         server_data->application_cb = application_cb;
631         server_data->application_data = application_data;
632         server_data->hEventStopRequested = CreateEvent(NULL, TRUE, FALSE, NULL);
633         strbuf_init(&server_data->buf_path, 0);
634         strbuf_addstr(&server_data->buf_path, path);
635         wcscpy(server_data->wpath, wpath);
636
637         if (nr_threads < 1)
638                 nr_threads = 1;
639
640         for (k = 0; k < nr_threads; k++) {
641                 struct ipc_server_thread_data *std;
642
643                 std = xcalloc(1, sizeof(*std));
644                 std->magic = MAGIC_SERVER_THREAD_DATA;
645                 std->server_data = server_data;
646                 std->hPipe = INVALID_HANDLE_VALUE;
647
648                 std->hPipe = (k == 0)
649                         ? hPipeFirst
650                         : create_new_pipe(server_data->wpath, 0);
651
652                 if (std->hPipe == INVALID_HANDLE_VALUE) {
653                         /*
654                          * If we've reached a pipe instance limit for
655                          * this path, just use fewer threads.
656                          */
657                         free(std);
658                         break;
659                 }
660
661                 if (pthread_create(&std->pthread_id, NULL,
662                                    server_thread_proc, std)) {
663                         /*
664                          * Likewise, if we're out of threads, just use
665                          * fewer threads than requested.
666                          *
667                          * However, we just give up if we can't even get
668                          * one thread.  This should not happen.
669                          */
670                         if (k == 0)
671                                 die(_("could not start thread[0] for '%s'"),
672                                     path);
673
674                         CloseHandle(std->hPipe);
675                         free(std);
676                         break;
677                 }
678
679                 std->next_thread = server_data->thread_list;
680                 server_data->thread_list = std;
681         }
682
683         *returned_server_data = server_data;
684         return 0;
685 }
686
687 int ipc_server_stop_async(struct ipc_server_data *server_data)
688 {
689         if (!server_data)
690                 return 0;
691
692         /*
693          * Gently tell all of the ipc_server threads to shutdown.
694          * This will be seen the next time they are idle (and waiting
695          * for a connection).
696          *
697          * We DO NOT attempt to force them to drop an active connection.
698          */
699         SetEvent(server_data->hEventStopRequested);
700         return 0;
701 }
702
703 int ipc_server_await(struct ipc_server_data *server_data)
704 {
705         DWORD dwWaitResult;
706
707         if (!server_data)
708                 return 0;
709
710         dwWaitResult = WaitForSingleObject(server_data->hEventStopRequested, INFINITE);
711         if (dwWaitResult != WAIT_OBJECT_0)
712                 return error(_("wait for hEvent failed for '%s'"),
713                              server_data->buf_path.buf);
714
715         while (server_data->thread_list) {
716                 struct ipc_server_thread_data *std = server_data->thread_list;
717
718                 pthread_join(std->pthread_id, NULL);
719
720                 server_data->thread_list = std->next_thread;
721                 free(std);
722         }
723
724         server_data->is_stopped = 1;
725
726         return 0;
727 }
728
729 void ipc_server_free(struct ipc_server_data *server_data)
730 {
731         if (!server_data)
732                 return;
733
734         if (!server_data->is_stopped)
735                 BUG("cannot free ipc-server while running for '%s'",
736                     server_data->buf_path.buf);
737
738         strbuf_release(&server_data->buf_path);
739
740         if (server_data->hEventStopRequested != INVALID_HANDLE_VALUE)
741                 CloseHandle(server_data->hEventStopRequested);
742
743         while (server_data->thread_list) {
744                 struct ipc_server_thread_data *std = server_data->thread_list;
745
746                 server_data->thread_list = std->next_thread;
747                 free(std);
748         }
749
750         free(server_data);
751 }