include: winres.h no longer exists in PSDK, fix winresrc.h includes.
[wine] / programs / rpcss / np_server.c
1 /*
2  * RPCSS named pipe server
3  *
4  * Copyright (C) 2002 Greg Turner
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
19  */
20
21 #include <assert.h>
22
23 #include "rpcss.h"
24 #include "wine/debug.h"
25
26 WINE_DEFAULT_DEBUG_CHANNEL(ole);
27
28 static HANDLE np_server_end;
29 static HANDLE np_server_work_event;
30 static CRITICAL_SECTION np_server_cs;
31 static LONG srv_thread_count;
32 static BOOL server_live;
33
34 LONG RPCSS_SrvThreadCount(void)
35 {
36   return srv_thread_count;
37 }
38
39 BOOL RPCSS_UnBecomePipeServer(void)
40 {
41   WINE_TRACE("\n");
42
43   WINE_TRACE("shutting down pipe.\n");
44   server_live = FALSE;
45   if (!CloseHandle(np_server_end))
46     WINE_WARN("Failed to close named pipe.\n");
47   if (!CloseHandle(np_server_work_event))
48     WINE_WARN("Failed to close the event handle.\n");
49   DeleteCriticalSection(&np_server_cs);
50
51   return TRUE;
52 }
53
54 static void RPCSS_ServerProcessRANMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply)
55 {
56   WINE_TRACE("\n");
57   pReply->as_uint = 0;
58 }
59
60 static void RPCSS_ServerProcessREGISTEREPMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply,
61   char *vardata)
62 {
63   WINE_TRACE("\n");
64
65   RPCSS_RegisterRpcEndpoints(
66     pMsg->message.registerepmsg.iface, 
67     pMsg->message.registerepmsg.object_count, 
68     pMsg->message.registerepmsg.binding_count, 
69     pMsg->message.registerepmsg.no_replace, 
70     vardata, 
71     pMsg->vardata_payload_size
72   );
73
74   /* no reply */
75   pReply->as_uint = 0;
76 }
77
78 static void RPCSS_ServerProcessUNREGISTEREPMessage(PRPCSS_NP_MESSAGE pMsg,
79   PRPCSS_NP_REPLY pReply, char *vardata)
80 {
81   WINE_TRACE("\n");
82
83   RPCSS_UnregisterRpcEndpoints(
84     pMsg->message.unregisterepmsg.iface,
85     pMsg->message.unregisterepmsg.object_count,
86     pMsg->message.unregisterepmsg.binding_count,
87     vardata,
88     pMsg->vardata_payload_size
89   );
90
91   /* no reply */
92   pReply->as_uint = 0;
93 }
94
95 static void RPCSS_ServerProcessRESOLVEEPMessage(PRPCSS_NP_MESSAGE pMsg,
96   PRPCSS_NP_REPLY pReply, char *vardata)
97 {
98   WINE_TRACE("\n");
99
100   /* for now, reply is placed into *pReply.as_string, on success, by RPCSS_ResolveRpcEndpoints */
101   ZeroMemory(pReply->as_string, MAX_RPCSS_NP_REPLY_STRING_LEN);
102   RPCSS_ResolveRpcEndpoints(
103     pMsg->message.resolveepmsg.iface,
104     pMsg->message.resolveepmsg.object,
105     vardata,
106     pReply->as_string
107   );
108 }
109
110 static void RPCSS_ServerProcessMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply, char *vardata)
111 {
112   WINE_TRACE("\n");
113   switch (pMsg->message_type) {
114     case RPCSS_NP_MESSAGE_TYPEID_RANMSG:
115       RPCSS_ServerProcessRANMessage(pMsg, pReply);
116       break;
117     case RPCSS_NP_MESSAGE_TYPEID_REGISTEREPMSG:
118       RPCSS_ServerProcessREGISTEREPMessage(pMsg, pReply, vardata);
119       break;
120     case RPCSS_NP_MESSAGE_TYPEID_UNREGISTEREPMSG:
121       RPCSS_ServerProcessUNREGISTEREPMessage(pMsg, pReply, vardata);
122       break;
123     case RPCSS_NP_MESSAGE_TYPEID_RESOLVEEPMSG:
124       RPCSS_ServerProcessRESOLVEEPMessage(pMsg, pReply, vardata);
125       break;
126     default:
127       WINE_ERR("Message type unknown!!  No action taken.\n");
128   }
129 }
130
131 /* each message gets its own thread.  this is it. */
132 static DWORD WINAPI HandlerThread(LPVOID lpvPipeHandle)
133 {
134   RPCSS_NP_MESSAGE msg, vardata_payload_msg;
135   char *c, *vardata = NULL;
136   RPCSS_NP_REPLY reply;
137   DWORD bytesread, written;
138   BOOL success, had_payload = FALSE;
139   HANDLE mypipe;
140
141   mypipe = (HANDLE) lpvPipeHandle;
142
143   WINE_TRACE("mypipe: %p\n", mypipe);
144
145   success = ReadFile(
146     mypipe,                   /* pipe handle */
147     (char *) &msg,            /* message buffer */
148     sizeof(RPCSS_NP_MESSAGE), /* message buffer size */
149     &bytesread,               /* receives number of bytes read */
150     NULL                      /* not overlapped */
151   );
152
153   if (msg.vardata_payload_size) {
154     had_payload = TRUE;
155     /* this fudge space allows us not to worry about exceeding the buffer space
156        on the last read */
157     vardata = LocalAlloc(LPTR, (msg.vardata_payload_size) + VARDATA_PAYLOAD_BYTES);
158     if (!vardata) {
159       WINE_ERR("vardata memory allocation failure.\n");
160       success = FALSE;
161     } else {
162       for ( c = vardata; (c - vardata) < msg.vardata_payload_size; 
163             c += VARDATA_PAYLOAD_BYTES) {
164         success = ReadFile(
165           mypipe,
166           (char *) &vardata_payload_msg,
167           sizeof(RPCSS_NP_MESSAGE),
168           &bytesread,
169           NULL
170         );
171         if ( (!success) || (bytesread != sizeof(RPCSS_NP_MESSAGE)) ||
172              (vardata_payload_msg.message_type != RPCSS_NP_MESSAGE_TYPEID_VARDATAPAYLOADMSG) ) {
173           WINE_ERR("vardata payload read failure! (s=%s,br=%d,mt=%u,mt_exp=%u\n",
174             success ? "TRUE" : "FALSE", bytesread,
175             vardata_payload_msg.message_type, RPCSS_NP_MESSAGE_TYPEID_VARDATAPAYLOADMSG);
176           success = FALSE;
177           break;
178         }
179         CopyMemory(c, vardata_payload_msg.message.vardatapayloadmsg.payload, VARDATA_PAYLOAD_BYTES);
180         WINE_TRACE("payload read.\n");
181       }
182     }
183   }
184
185   if (success && (bytesread == sizeof(RPCSS_NP_MESSAGE))) {
186     WINE_TRACE("read success.\n");
187     /* process the message and send a reply, serializing requests. */
188     EnterCriticalSection(&np_server_cs);
189     WINE_TRACE("processing message.\n");
190     RPCSS_ServerProcessMessage(&msg, &reply, vardata);
191     LeaveCriticalSection(&np_server_cs);
192
193     if (had_payload) LocalFree(vardata);
194
195     WINE_TRACE("message processed, sending reply....\n");
196
197     success = WriteFile(
198       mypipe,                 /* pipe handle */
199       (char *) &reply,        /* reply buffer */
200       sizeof(RPCSS_NP_REPLY), /* reply buffer size */
201       &written,               /* receives number of bytes written */
202       NULL                    /* not overlapped */
203     );
204
205     if ( (!success) || (written != sizeof(RPCSS_NP_REPLY)) )
206       WINE_WARN("Message reply failed. (success=%d, br=%d)\n", success, written);
207     else
208       WINE_TRACE("Reply sent successfully.\n");
209   } else 
210     WINE_WARN("Message receipt failed.\n");
211
212   FlushFileBuffers(mypipe);
213   DisconnectNamedPipe(mypipe);
214   CloseHandle(mypipe);
215   InterlockedDecrement(&srv_thread_count);
216   return 0;
217 }
218
219 static DWORD WINAPI NPMainWorkThread(LPVOID ignored)
220 {
221   BOOL connected;
222   HANDLE hthread, master_mutex = RPCSS_GetMasterMutex();
223   DWORD threadid, wait_result;
224
225   WINE_TRACE("\n");
226
227   while (server_live) {
228     connected = ConnectNamedPipe(np_server_end, NULL) ? 
229       TRUE : (GetLastError() == ERROR_PIPE_CONNECTED);
230
231     if (connected) {
232       /* is "work" the act of connecting pipes, or the act of serving
233          requests successfully?  for now I will make it the former. */
234       if (!SetEvent(np_server_work_event))
235         WINE_WARN("failed to signal np_server_work_event.\n");
236
237       /* Create a thread for this client.  */
238       InterlockedIncrement(&srv_thread_count);
239       hthread = CreateThread( 
240         NULL,                      /* no security attribute */ 
241         0,                         /* default stack size */
242         HandlerThread,
243         (LPVOID) np_server_end,    /* thread parameter */
244         0,                         /* not suspended */
245         &threadid                  /* returns thread ID  (not used) */
246       );
247
248       if (hthread) {
249         WINE_TRACE("Spawned handler thread: %p\n", hthread);
250         CloseHandle(hthread);
251        
252         /* for safety's sake, hold the mutex while we switch the pipe */
253
254         wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
255
256         switch (wait_result) {
257           case WAIT_ABANDONED: /* ? */
258           case WAIT_OBJECT_0:
259             /* we have ownership */
260             break;
261           case WAIT_FAILED:
262           case WAIT_TIMEOUT:
263           default: 
264             /* huh? */
265             wait_result = WAIT_FAILED;
266         }
267
268         if (wait_result == WAIT_FAILED) {
269           WINE_ERR("Couldn't enter master mutex.  Expect prolems.\n");
270         } else {
271           /* now create a new named pipe instance to listen on */
272           np_server_end = CreateNamedPipe(
273             NAME_RPCSS_NAMED_PIPE,                                 /* pipe name */
274             PIPE_ACCESS_DUPLEX,                                    /* pipe open mode */
275             PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT, /* pipe-specific modes */
276             PIPE_UNLIMITED_INSTANCES,                              /* maximum instances */
277             sizeof(RPCSS_NP_REPLY),                                /* output buffer size */
278             sizeof(RPCSS_NP_MESSAGE),                              /* input buffer size */
279             2000,                                                  /* time-out interval */
280             NULL                                                   /* SD */
281           );
282
283           if (np_server_end == INVALID_HANDLE_VALUE) {
284             WINE_ERR("Failed to recreate named pipe!\n");
285             /* not sure what to do? */
286             assert(FALSE);
287           }
288   
289           if (!ReleaseMutex(master_mutex))
290             WINE_ERR("Uh oh.  Couldn't leave master mutex.  Expect deadlock.\n");
291         }
292       } else {
293         WINE_ERR("Failed to spawn handler thread!\n");
294         DisconnectNamedPipe(np_server_end);
295         InterlockedDecrement(&srv_thread_count);
296       }
297     }
298   }
299   WINE_TRACE("Server thread shutdown.\n");
300   return 0;
301 }
302
303 static HANDLE RPCSS_NPConnect(void)
304 {
305   HANDLE the_pipe;
306   DWORD dwmode, wait_result;
307   HANDLE master_mutex = RPCSS_GetMasterMutex();
308   
309   WINE_TRACE("\n");
310
311   while (TRUE) {
312
313     wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
314     switch (wait_result) {
315       case WAIT_ABANDONED: 
316       case WAIT_OBJECT_0:
317         break;
318       case WAIT_FAILED:
319       case WAIT_TIMEOUT:
320       default: 
321         WINE_ERR("This should never happen: couldn't enter mutex.\n");
322         return NULL;
323     }
324
325     /* try to open the client side of the named pipe. */
326     the_pipe = CreateFileA(
327       NAME_RPCSS_NAMED_PIPE,           /* pipe name */
328       GENERIC_READ | GENERIC_WRITE,    /* r/w access */
329       0,                               /* no sharing */
330       NULL,                            /* no security attributes */
331       OPEN_EXISTING,                   /* open an existing pipe */
332       0,                               /* default attributes */
333       NULL                             /* no template file */
334     );
335
336     if (the_pipe != INVALID_HANDLE_VALUE)
337       break;
338
339     if (GetLastError() != ERROR_PIPE_BUSY) {
340       WINE_WARN("Unable to open named pipe %s (assuming unavailable).\n", 
341         wine_dbgstr_a(NAME_RPCSS_NAMED_PIPE));
342       break;
343     }
344
345     WINE_WARN("Named pipe busy (will wait)\n");
346     
347     if (!ReleaseMutex(master_mutex))
348       WINE_ERR("Failed to release master mutex.  Expect deadlock.\n");
349
350     /* wait for the named pipe.  We are only 
351        willing to wait only 5 seconds.  It should be available /very/ soon. */
352     if (! WaitNamedPipeA(NAME_RPCSS_NAMED_PIPE, MASTER_MUTEX_WAITNAMEDPIPE_TIMEOUT))
353     {
354       WINE_ERR("Named pipe unavailable after waiting.  Something is probably wrong.\n");
355       return NULL;
356     }
357
358   }
359
360   if (the_pipe != INVALID_HANDLE_VALUE) {
361     dwmode = PIPE_READMODE_MESSAGE;
362     /* SetNamedPipeHandleState not implemented ATM, but still seems to work somehow. */
363     if (! SetNamedPipeHandleState(the_pipe, &dwmode, NULL, NULL))
364       WINE_WARN("Failed to set pipe handle state\n");
365   }
366
367   if (!ReleaseMutex(master_mutex))
368     WINE_ERR("Uh oh, failed to leave the RPC Master Mutex!\n");
369
370   return the_pipe;
371 }
372
373 static BOOL RPCSS_SendReceiveNPMsg(HANDLE np, PRPCSS_NP_MESSAGE msg, PRPCSS_NP_REPLY reply)
374 {
375   DWORD count;
376
377   WINE_TRACE("(np == %p, msg == %p, reply == %p)\n", np, msg, reply);
378
379   if (! WriteFile(np, msg, sizeof(RPCSS_NP_MESSAGE), &count, NULL)) {
380     WINE_ERR("write failed.\n");
381     return FALSE;
382   }
383
384   if (count != sizeof(RPCSS_NP_MESSAGE)) {
385     WINE_ERR("write count mismatch.\n");
386     return FALSE;
387   }
388
389   if (! ReadFile(np, reply, sizeof(RPCSS_NP_REPLY), &count, NULL)) {
390     WINE_ERR("read failed.\n");
391     return FALSE;
392   }
393
394   if (count != sizeof(RPCSS_NP_REPLY)) {
395     WINE_ERR("read count mismatch, got %d.\n", count);
396     return FALSE;
397   }
398
399   /* message execution was successful */
400   return TRUE;
401 }
402
403 BOOL RPCSS_BecomePipeServer(void)
404 {
405   RPCSS_NP_MESSAGE msg;
406   RPCSS_NP_REPLY reply;
407   BOOL rslt = TRUE;
408   HANDLE client_handle, hthread, master_mutex = RPCSS_GetMasterMutex();
409   DWORD threadid, wait_result;
410
411   WINE_TRACE("\n");
412
413   wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
414
415   switch (wait_result) {
416     case WAIT_ABANDONED: /* ? */
417     case WAIT_OBJECT_0:
418       /* we have ownership */
419       break;
420     case WAIT_FAILED:
421     case WAIT_TIMEOUT:
422     default: 
423       WINE_ERR("Couldn't enter master mutex.\n");
424       return FALSE;
425   }
426
427   /* now we have the master mutex.  during this time we will
428    *
429    *   o check if an rpcss already listens on the pipe.  If so,
430    *     we will tell it we were invoked, which will cause the
431    *     other end to update its timeouts.  After, we just return
432    *     false.
433    * 
434    *   o otherwise, we establish the pipe for ourselves and get
435    *     ready to listen on it
436    */
437   
438   if ((client_handle = RPCSS_NPConnect()) != INVALID_HANDLE_VALUE) {
439     msg.message_type = RPCSS_NP_MESSAGE_TYPEID_RANMSG;
440     msg.message.ranmsg.timeout = 1000;
441     msg.vardata_payload_size = 0;
442     if (!RPCSS_SendReceiveNPMsg(client_handle, &msg, &reply))
443       WINE_ERR("Something is amiss: RPC_SendReceive failed.\n");
444     CloseHandle(client_handle);
445     rslt = FALSE;
446   }
447   if (rslt) {
448     np_server_work_event = CreateEventA(NULL, FALSE, FALSE, "RpcNpServerWorkEvent");
449     if (np_server_work_event == NULL) {
450       /* dunno what we can do then */
451       WINE_ERR("Unable to create the np_server_work_event\n");
452       assert(FALSE);
453     }
454     InitializeCriticalSection(&np_server_cs);
455
456     np_server_end = CreateNamedPipe(
457       NAME_RPCSS_NAMED_PIPE,                                   /* pipe name */
458       PIPE_ACCESS_DUPLEX,                                      /* pipe open mode */
459       PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,   /* pipe-specific modes */
460       PIPE_UNLIMITED_INSTANCES,                                /* maximum number of instances */
461       sizeof(RPCSS_NP_REPLY),                                  /* output buffer size */
462       sizeof(RPCSS_NP_MESSAGE),                                /* input buffer size */
463       2000,                                                    /* time-out interval */
464       NULL                                                     /* SD */
465     );
466
467     if (np_server_end == INVALID_HANDLE_VALUE) {
468       WINE_ERR("Failed to create named pipe!\n");
469       DeleteCriticalSection(&np_server_cs);
470       if (!CloseHandle(np_server_work_event)) /* we will leak the handle... */
471         WINE_WARN("Failed to close np_server_work_event handle!\n");
472       np_server_work_event = NULL;
473       np_server_end = NULL;
474       rslt = FALSE;
475     }
476   }
477
478   server_live = rslt;
479
480   if (rslt) {
481     /* OK, now spawn the (single) server thread */
482     hthread = CreateThread( 
483       NULL,                      /* no security attribute */ 
484       0,                         /* default stack size */
485       NPMainWorkThread,
486       NULL,             /* thread parameter */
487       0,                         /* not suspended */
488       &threadid                  /* returns thread ID  (not used) */
489     );
490     if (hthread) {
491       WINE_TRACE("Created server thread.\n");
492       CloseHandle(hthread);
493     } else {
494       WINE_ERR("Serious error: unable to create server thread!\n");
495       if (!CloseHandle(np_server_work_event)) /* we will leak the handle... */
496         WINE_WARN("Failed to close np_server_work_event handle!\n");
497       if (!CloseHandle(np_server_end)) /* we will leak the handle... */
498         WINE_WARN("Unable to close named pipe handle!\n");
499       DeleteCriticalSection(&np_server_cs);
500       np_server_end = NULL;
501       np_server_work_event = NULL;
502       rslt = FALSE;
503       server_live = FALSE;
504     }
505   }
506   if (!ReleaseMutex(master_mutex))
507     WINE_ERR("Unable to leave master mutex!??\n");
508
509   return rslt;
510 }
511
512 BOOL RPCSS_NPDoWork(HANDLE exit_handle)
513 {
514   HANDLE handles[2];
515   DWORD waitresult;
516
517   handles[0] = np_server_work_event;
518   handles[1] = exit_handle;
519   waitresult = WaitForMultipleObjects(2, handles, FALSE, INFINITE);
520  
521   if (waitresult == WAIT_OBJECT_0)
522     return TRUE;
523
524   return FALSE;
525 }