- the last event was not looked at when compressing console events
[wine] / programs / rpcss / np_server.c
1 /*
2  * RPCSS named pipe server
3  *
4  * Copyright (C) 2002 Greg Turner
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20
21 #include <assert.h>
22
23 #include "ntstatus.h"
24 #include "rpcss.h"
25 #include "wine/debug.h"
26
27 WINE_DEFAULT_DEBUG_CHANNEL(ole);
28
29 static HANDLE np_server_end;
30 static HANDLE np_server_work_event;
31 static CRITICAL_SECTION np_server_cs;
32 static LONG srv_thread_count;
33 static BOOL server_live;
34
35 LONG RPCSS_SrvThreadCount(void)
36 {
37   return srv_thread_count;
38 }
39
40 BOOL RPCSS_UnBecomePipeServer(void)
41 {
42   BOOL rslt = TRUE;
43   DWORD wait_result;
44   HANDLE master_mutex = RPCSS_GetMasterMutex();
45
46   WINE_TRACE("\n");
47
48   wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
49
50   switch (wait_result) {
51     case WAIT_ABANDONED: /* ? */
52     case WAIT_OBJECT_0:
53       /* we have ownership */
54       break;
55     case WAIT_FAILED:
56     case WAIT_TIMEOUT:
57     default: 
58       WINE_ERR("This should never happen: couldn't enter mutex.\n");
59       /* this is totally unacceptable.  no graceful out exists */
60       assert(FALSE);
61   }
62
63   /* now that we have the master mutex, we can safely stop
64      listening on the pipe.  Before we proceed, we do a final
65      check that it's OK to shut down to ensure atomicity */
66
67   if (!RPCSS_ReadyToDie())
68     rslt = FALSE;
69   else {
70     WINE_TRACE("shutting down pipe.\n");
71     server_live = FALSE;
72     if (!CloseHandle(np_server_end))
73       WINE_WARN("Failed to close named pipe.\n");
74     if (!CloseHandle(np_server_work_event))
75       WINE_WARN("Failed to close the event handle.\n");
76     DeleteCriticalSection(&np_server_cs);
77   }
78
79   if (!ReleaseMutex(master_mutex))
80     WINE_ERR("Unable to leave master mutex!??\n");
81
82   return rslt;
83 }
84
85 void RPCSS_ServerProcessRANMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply)
86 {
87   WINE_TRACE("\n");
88   /* we do absolutely nothing, but on the server end,
89      the lazy timeout is reset as a result of our connection. */
90   RPCSS_SetMaxLazyTimeout(pMsg->message.ranmsg.timeout);
91   RPCSS_SetLazyTimeRemaining(RPCSS_GetMaxLazyTimeout());
92   pReply->as_uint = 0;
93 }
94
95 void RPCSS_ServerProcessREGISTEREPMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply,
96   char *vardata)
97 {
98   WINE_TRACE("\n");
99
100   RPCSS_RegisterRpcEndpoints(
101     pMsg->message.registerepmsg.iface, 
102     pMsg->message.registerepmsg.object_count, 
103     pMsg->message.registerepmsg.binding_count, 
104     pMsg->message.registerepmsg.no_replace, 
105     vardata, 
106     pMsg->vardata_payload_size
107   );
108
109   /* no reply */
110   pReply->as_uint = 0;
111 }
112
113 void RPCSS_ServerProcessUNREGISTEREPMessage(PRPCSS_NP_MESSAGE pMsg,
114   PRPCSS_NP_REPLY pReply, char *vardata)
115 {
116   WINE_TRACE("\n");
117
118   RPCSS_UnregisterRpcEndpoints(
119     pMsg->message.unregisterepmsg.iface,
120     pMsg->message.unregisterepmsg.object_count,
121     pMsg->message.unregisterepmsg.binding_count,
122     vardata,
123     pMsg->vardata_payload_size
124   );
125
126   /* no reply */
127   pReply->as_uint = 0;
128 }
129
130 void RPCSS_ServerProcessRESOLVEEPMessage(PRPCSS_NP_MESSAGE pMsg,
131   PRPCSS_NP_REPLY pReply, char *vardata)
132 {
133   WINE_TRACE("\n");
134
135   /* for now, reply is placed into *pReply.as_string, on success, by RPCSS_ResolveRpcEndpoints */
136   ZeroMemory(pReply->as_string, MAX_RPCSS_NP_REPLY_STRING_LEN);
137   RPCSS_ResolveRpcEndpoints(
138     pMsg->message.resolveepmsg.iface,
139     pMsg->message.resolveepmsg.object,
140     vardata,
141     pReply->as_string
142   );
143 }
144
145 void RPCSS_ServerProcessMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply, char *vardata)
146 {
147   WINE_TRACE("\n");
148   switch (pMsg->message_type) {
149     case RPCSS_NP_MESSAGE_TYPEID_RANMSG:
150       RPCSS_ServerProcessRANMessage(pMsg, pReply);
151       break;
152     case RPCSS_NP_MESSAGE_TYPEID_REGISTEREPMSG:
153       RPCSS_ServerProcessREGISTEREPMessage(pMsg, pReply, vardata);
154       break;
155     case RPCSS_NP_MESSAGE_TYPEID_UNREGISTEREPMSG:
156       RPCSS_ServerProcessUNREGISTEREPMessage(pMsg, pReply, vardata);
157       break;
158     case RPCSS_NP_MESSAGE_TYPEID_RESOLVEEPMSG:
159       RPCSS_ServerProcessRESOLVEEPMessage(pMsg, pReply, vardata);
160       break;
161     default:
162       WINE_ERR("Message type unknown!!  No action taken.\n");
163   }
164 }
165
166 /* each message gets its own thread.  this is it. */
167 VOID HandlerThread(LPVOID lpvPipeHandle)
168 {
169   RPCSS_NP_MESSAGE msg, vardata_payload_msg;
170   char *c, *vardata = NULL;
171   RPCSS_NP_REPLY reply;
172   DWORD bytesread, written;
173   BOOL success, had_payload = FALSE;
174   HANDLE mypipe;
175
176   mypipe = (HANDLE) lpvPipeHandle;
177
178   WINE_TRACE("mypipe: %p\n", mypipe);
179
180   success = ReadFile(
181     mypipe,                   /* pipe handle */
182     (char *) &msg,            /* message buffer */
183     sizeof(RPCSS_NP_MESSAGE), /* message buffer size */
184     &bytesread,               /* receives number of bytes read */
185     NULL                      /* not overlapped */
186   );
187
188   if (msg.vardata_payload_size) {
189     had_payload = TRUE;
190     /* this fudge space allows us not to worry about exceeding the buffer space
191        on the last read */
192     vardata = LocalAlloc(LPTR, (msg.vardata_payload_size) + VARDATA_PAYLOAD_BYTES);
193     if (!vardata) {
194       WINE_ERR("vardata memory allocation failure.\n");
195       success = FALSE;
196     } else {
197       for ( c = vardata; (c - vardata) < msg.vardata_payload_size; 
198             c += VARDATA_PAYLOAD_BYTES) {
199         success = ReadFile(
200           mypipe,
201           (char *) &vardata_payload_msg,
202           sizeof(RPCSS_NP_MESSAGE),
203           &bytesread,
204           NULL
205         );
206         if ( (!success) || (bytesread != sizeof(RPCSS_NP_MESSAGE)) ||
207              (vardata_payload_msg.message_type != RPCSS_NP_MESSAGE_TYPEID_VARDATAPAYLOADMSG) ) {
208           WINE_ERR("vardata payload read failure! (s=%s,br=%ld,exp_br=%d,mt=%u,mt_exp=%u\n",
209             success ? "TRUE" : "FALSE", bytesread, sizeof(RPCSS_NP_MESSAGE), 
210             vardata_payload_msg.message_type, RPCSS_NP_MESSAGE_TYPEID_VARDATAPAYLOADMSG);
211           success = FALSE;
212           break;
213         }
214         CopyMemory(c, vardata_payload_msg.message.vardatapayloadmsg.payload, VARDATA_PAYLOAD_BYTES);
215         WINE_TRACE("payload read.\n");
216       }
217     }
218   }
219
220   if (success && (bytesread == sizeof(RPCSS_NP_MESSAGE))) {
221     WINE_TRACE("read success.\n");
222     /* process the message and send a reply, serializing requests. */
223     EnterCriticalSection(&np_server_cs);
224     WINE_TRACE("processing message.\n");
225     RPCSS_ServerProcessMessage(&msg, &reply, vardata);
226     LeaveCriticalSection(&np_server_cs);
227
228     if (had_payload) LocalFree(vardata);
229
230     WINE_TRACE("message processed, sending reply....\n");
231
232     success = WriteFile(
233       mypipe,                 /* pipe handle */
234       (char *) &reply,        /* reply buffer */
235       sizeof(RPCSS_NP_REPLY), /* reply buffer size */
236       &written,               /* receives number of bytes written */
237       NULL                    /* not overlapped */
238     );
239
240     if ( (!success) || (written != sizeof(RPCSS_NP_REPLY)) )
241       WINE_WARN("Message reply failed. (successs=%s, br=%ld, exp_br=%d)\n",
242         success ? "TRUE" : "FALSE", written, sizeof(RPCSS_NP_REPLY));
243     else
244       WINE_TRACE("Reply sent successfully.\n");
245   } else 
246     WINE_WARN("Message receipt failed.\n");
247
248   FlushFileBuffers(mypipe);
249   DisconnectNamedPipe(mypipe);
250   CloseHandle(mypipe);
251   InterlockedDecrement(&srv_thread_count);
252 }
253
254 VOID NPMainWorkThread(LPVOID ignored)
255 {
256   BOOL connected;
257   HANDLE hthread, master_mutex = RPCSS_GetMasterMutex();
258   DWORD threadid, wait_result;
259
260   WINE_TRACE("\n");
261
262   while (server_live) {
263     connected = ConnectNamedPipe(np_server_end, NULL) ? 
264       TRUE : (GetLastError() == ERROR_PIPE_CONNECTED);
265
266     if (connected) {
267       /* is "work" the act of connecting pipes, or the act of serving
268          requests successfully?  for now I will make it the former. */
269       if (!SetEvent(np_server_work_event))
270         WINE_WARN("failed to signal np_server_work_event.\n");
271
272       /* Create a thread for this client.  */
273       InterlockedIncrement(&srv_thread_count);
274       hthread = CreateThread( 
275         NULL,                      /* no security attribute */ 
276         0,                         /* default stack size */
277         (LPTHREAD_START_ROUTINE) HandlerThread, 
278         (LPVOID) np_server_end,    /* thread parameter */
279         0,                         /* not suspended */
280         &threadid                  /* returns thread ID  (not used) */
281       );
282
283       if (hthread) {
284         WINE_TRACE("Spawned handler thread: %p\n", hthread);
285         CloseHandle(hthread);
286        
287         /* for safety's sake, hold the mutex while we switch the pipe */
288
289         wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
290
291         switch (wait_result) {
292           case WAIT_ABANDONED: /* ? */
293           case WAIT_OBJECT_0:
294             /* we have ownership */
295             break;
296           case WAIT_FAILED:
297           case WAIT_TIMEOUT:
298           default: 
299             /* huh? */
300             wait_result = WAIT_FAILED;
301         }
302
303         if (wait_result == WAIT_FAILED) {
304           WINE_ERR("Couldn't enter master mutex.  Expect prolems.\n");
305         } else {
306           /* now create a new named pipe instance to listen on */
307           np_server_end = CreateNamedPipe(
308             NAME_RPCSS_NAMED_PIPE,                                 /* pipe name */
309             PIPE_ACCESS_DUPLEX,                                    /* pipe open mode */
310             PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT, /* pipe-specific modes */
311             PIPE_UNLIMITED_INSTANCES,                              /* maximum instances */
312             sizeof(RPCSS_NP_REPLY),                                /* output buffer size */
313             sizeof(RPCSS_NP_MESSAGE),                              /* input buffer size */
314             2000,                                                  /* time-out interval */
315             NULL                                                   /* SD */
316           );
317
318           if (np_server_end == INVALID_HANDLE_VALUE) {
319             WINE_ERR("Failed to recreate named pipe!\n");
320             /* not sure what to do? */
321             assert(FALSE);
322           }
323   
324           if (!ReleaseMutex(master_mutex))
325             WINE_ERR("Uh oh.  Couldn't leave master mutex.  Expect deadlock.\n");
326         }
327       } else {
328         WINE_ERR("Failed to spawn handler thread!\n");
329         DisconnectNamedPipe(np_server_end);
330         InterlockedDecrement(&srv_thread_count);
331       }
332     }
333   }
334   WINE_TRACE("Server thread shutdown.\n");
335 }
336
337 HANDLE RPCSS_NPConnect(void)
338 {
339   HANDLE the_pipe = NULL;
340   DWORD dwmode, wait_result;
341   HANDLE master_mutex = RPCSS_GetMasterMutex();
342   
343   WINE_TRACE("\n");
344
345   while (TRUE) {
346
347     wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
348     switch (wait_result) {
349       case WAIT_ABANDONED: 
350       case WAIT_OBJECT_0:
351         break;
352       case WAIT_FAILED:
353       case WAIT_TIMEOUT:
354       default: 
355         WINE_ERR("This should never happen: couldn't enter mutex.\n");
356         return NULL;
357     }
358
359     /* try to open the client side of the named pipe. */
360     the_pipe = CreateFileA(
361       NAME_RPCSS_NAMED_PIPE,           /* pipe name */
362       GENERIC_READ | GENERIC_WRITE,    /* r/w access */
363       0,                               /* no sharing */
364       NULL,                            /* no security attributes */
365       OPEN_EXISTING,                   /* open an existing pipe */
366       0,                               /* default attributes */
367       NULL                             /* no template file */
368     );
369
370     if (the_pipe != INVALID_HANDLE_VALUE)
371       break;
372
373     if (GetLastError() != ERROR_PIPE_BUSY) {
374       WINE_WARN("Unable to open named pipe %s (assuming unavailable).\n", 
375         wine_dbgstr_a(NAME_RPCSS_NAMED_PIPE));
376       the_pipe = NULL;
377       break;
378     }
379
380     WINE_WARN("Named pipe busy (will wait)\n");
381     
382     if (!ReleaseMutex(master_mutex))
383       WINE_ERR("Failed to release master mutex.  Expect deadlock.\n");
384
385     /* wait for the named pipe.  We are only 
386        willing to wait only 5 seconds.  It should be available /very/ soon. */
387     if (! WaitNamedPipeA(NAME_RPCSS_NAMED_PIPE, MASTER_MUTEX_WAITNAMEDPIPE_TIMEOUT))
388     {
389       WINE_ERR("Named pipe unavailable after waiting.  Something is probably wrong.\n");
390       return NULL;
391     }
392
393   }
394
395   if (the_pipe) {
396     dwmode = PIPE_READMODE_MESSAGE;
397     /* SetNamedPipeHandleState not implemented ATM, but still seems to work somehow. */
398     if (! SetNamedPipeHandleState(the_pipe, &dwmode, NULL, NULL))
399       WINE_WARN("Failed to set pipe handle state\n");
400   }
401
402   if (!ReleaseMutex(master_mutex))
403     WINE_ERR("Uh oh, failed to leave the RPC Master Mutex!\n");
404
405   return the_pipe;
406 }
407
408 BOOL RPCSS_SendReceiveNPMsg(HANDLE np, PRPCSS_NP_MESSAGE msg, PRPCSS_NP_REPLY reply)
409 {
410   DWORD count;
411
412   WINE_TRACE("(np == %p, msg == %p, reply == %p)\n", np, msg, reply);
413
414   if (! WriteFile(np, msg, sizeof(RPCSS_NP_MESSAGE), &count, NULL)) {
415     WINE_ERR("write failed.\n");
416     return FALSE;
417   }
418
419   if (count != sizeof(RPCSS_NP_MESSAGE)) {
420     WINE_ERR("write count mismatch.\n");
421     return FALSE;
422   }
423
424   if (! ReadFile(np, reply, sizeof(RPCSS_NP_REPLY), &count, NULL)) {
425     WINE_ERR("read failed.\n");
426     return FALSE;
427   }
428
429   if (count != sizeof(RPCSS_NP_REPLY)) {
430     WINE_ERR("read count mismatch. got %ld, expected %u.\n", count, sizeof(RPCSS_NP_REPLY));
431     return FALSE;
432   }
433
434   /* message execution was successful */
435   return TRUE;
436 }
437
438 BOOL RPCSS_BecomePipeServer(void)
439 {
440   RPCSS_NP_MESSAGE msg;
441   RPCSS_NP_REPLY reply;
442   BOOL rslt = TRUE;
443   HANDLE client_handle, hthread, master_mutex = RPCSS_GetMasterMutex();
444   DWORD threadid, wait_result;
445
446   WINE_TRACE("\n");
447
448   wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
449
450   switch (wait_result) {
451     case WAIT_ABANDONED: /* ? */
452     case WAIT_OBJECT_0:
453       /* we have ownership */
454       break;
455     case WAIT_FAILED:
456     case WAIT_TIMEOUT:
457     default: 
458       WINE_ERR("Couldn't enter master mutex.\n");
459       return FALSE;
460   }
461
462   /* now we have the master mutex.  during this time we will
463    *
464    *   o check if an rpcss already listens on the pipe.  If so,
465    *     we will tell it we were invoked, which will cause the
466    *     other end to update its timeouts.  After, we just return
467    *     false.
468    * 
469    *   o otherwise, we establish the pipe for ourselves and get
470    *     ready to listen on it
471    */
472   
473   if ((client_handle = RPCSS_NPConnect()) != NULL) {
474     msg.message_type = RPCSS_NP_MESSAGE_TYPEID_RANMSG;
475     msg.message.ranmsg.timeout = RPCSS_GetMaxLazyTimeout();
476     msg.vardata_payload_size = 0;
477     if (!RPCSS_SendReceiveNPMsg(client_handle, &msg, &reply))
478       WINE_ERR("Something is amiss: RPC_SendReceive failed.\n");
479     rslt = FALSE;
480   }
481   if (rslt) {
482     np_server_work_event = CreateEventA(NULL, FALSE, FALSE, "RpcNpServerWorkEvent");
483     if (np_server_work_event == NULL) {
484       /* dunno what we can do then */
485       WINE_ERR("Unable to create the np_server_work_event\n");
486       assert(FALSE);
487     }
488     InitializeCriticalSection(&np_server_cs);
489
490     np_server_end = CreateNamedPipe(
491       NAME_RPCSS_NAMED_PIPE,                                   /* pipe name */
492       PIPE_ACCESS_DUPLEX,                                      /* pipe open mode */
493       PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,   /* pipe-specific modes */
494       PIPE_UNLIMITED_INSTANCES,                                /* maximum number of instances */
495       sizeof(RPCSS_NP_REPLY),                                  /* output buffer size */
496       sizeof(RPCSS_NP_MESSAGE),                                /* input buffer size */
497       2000,                                                    /* time-out interval */
498       NULL                                                     /* SD */
499     );
500
501     if (np_server_end == INVALID_HANDLE_VALUE) {
502       WINE_ERR("Failed to create named pipe!\n");
503       DeleteCriticalSection(&np_server_cs);
504       if (!CloseHandle(np_server_work_event)) /* we will leak the handle... */
505         WINE_WARN("Failed to close np_server_work_event handle!\n");
506       np_server_work_event = NULL;
507       np_server_end = NULL;
508       rslt = FALSE;
509     }
510   }
511
512   server_live = rslt;
513
514   if (rslt) {
515     /* OK, now spawn the (single) server thread */
516     hthread = CreateThread( 
517       NULL,                      /* no security attribute */ 
518       0,                         /* default stack size */
519       (LPTHREAD_START_ROUTINE) NPMainWorkThread,
520       NULL,             /* thread parameter */
521       0,                         /* not suspended */
522       &threadid                  /* returns thread ID  (not used) */
523     );
524     if (hthread) {
525       WINE_TRACE("Created server thread.\n");
526       CloseHandle(hthread);
527     } else {
528       WINE_ERR("Serious error: unable to create server thread!\n");
529       if (!CloseHandle(np_server_work_event)) /* we will leak the handle... */
530         WINE_WARN("Failed to close np_server_work_event handle!\n");
531       if (!CloseHandle(np_server_end)) /* we will leak the handle... */
532         WINE_WARN("Unable to close named pipe handle!\n");
533       DeleteCriticalSection(&np_server_cs);
534       np_server_end = NULL;
535       np_server_work_event = NULL;
536       rslt = FALSE;
537       server_live = FALSE;
538     }
539   }
540   if (!ReleaseMutex(master_mutex))
541     WINE_ERR("Unable to leave master mutex!??\n");
542
543   return rslt;
544 }
545
546 BOOL RPCSS_NPDoWork(void)
547
548   DWORD waitresult = WaitForSingleObject(np_server_work_event, 1000);
549  
550   if (waitresult == WAIT_TIMEOUT)
551     return FALSE;
552   if (waitresult == WAIT_OBJECT_0)
553     return TRUE;
554
555   return FALSE;
556 }