comctl32: A couple fixes for tab icon offsets.
[wine] / programs / rpcss / np_server.c
1 /*
2  * RPCSS named pipe server
3  *
4  * Copyright (C) 2002 Greg Turner
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20
21 #include <assert.h>
22
23 #include "rpcss.h"
24 #include "wine/debug.h"
25
26 WINE_DEFAULT_DEBUG_CHANNEL(ole);
27
28 static HANDLE np_server_end;
29 static HANDLE np_server_work_event;
30 static CRITICAL_SECTION np_server_cs;
31 static LONG srv_thread_count;
32 static BOOL server_live;
33
34 LONG RPCSS_SrvThreadCount(void)
35 {
36   return srv_thread_count;
37 }
38
39 BOOL RPCSS_UnBecomePipeServer(void)
40 {
41   BOOL rslt = TRUE;
42   DWORD wait_result;
43   HANDLE master_mutex = RPCSS_GetMasterMutex();
44
45   WINE_TRACE("\n");
46
47   wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
48
49   switch (wait_result) {
50     case WAIT_ABANDONED: /* ? */
51     case WAIT_OBJECT_0:
52       /* we have ownership */
53       break;
54     case WAIT_FAILED:
55     case WAIT_TIMEOUT:
56     default: 
57       WINE_ERR("This should never happen: couldn't enter mutex.\n");
58       /* this is totally unacceptable.  no graceful out exists */
59       assert(FALSE);
60   }
61
62   /* now that we have the master mutex, we can safely stop
63      listening on the pipe.  Before we proceed, we do a final
64      check that it's OK to shut down to ensure atomicity */
65
66   if (!RPCSS_ReadyToDie())
67     rslt = FALSE;
68   else {
69     WINE_TRACE("shutting down pipe.\n");
70     server_live = FALSE;
71     if (!CloseHandle(np_server_end))
72       WINE_WARN("Failed to close named pipe.\n");
73     if (!CloseHandle(np_server_work_event))
74       WINE_WARN("Failed to close the event handle.\n");
75     DeleteCriticalSection(&np_server_cs);
76   }
77
78   if (!ReleaseMutex(master_mutex))
79     WINE_ERR("Unable to leave master mutex!??\n");
80
81   return rslt;
82 }
83
84 static void RPCSS_ServerProcessRANMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply)
85 {
86   WINE_TRACE("\n");
87   /* we do absolutely nothing, but on the server end,
88      the lazy timeout is reset as a result of our connection. */
89   RPCSS_SetMaxLazyTimeout(pMsg->message.ranmsg.timeout);
90   RPCSS_SetLazyTimeRemaining(RPCSS_GetMaxLazyTimeout());
91   pReply->as_uint = 0;
92 }
93
94 static void RPCSS_ServerProcessREGISTEREPMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply,
95   char *vardata)
96 {
97   WINE_TRACE("\n");
98
99   RPCSS_RegisterRpcEndpoints(
100     pMsg->message.registerepmsg.iface, 
101     pMsg->message.registerepmsg.object_count, 
102     pMsg->message.registerepmsg.binding_count, 
103     pMsg->message.registerepmsg.no_replace, 
104     vardata, 
105     pMsg->vardata_payload_size
106   );
107
108   /* no reply */
109   pReply->as_uint = 0;
110 }
111
112 static void RPCSS_ServerProcessUNREGISTEREPMessage(PRPCSS_NP_MESSAGE pMsg,
113   PRPCSS_NP_REPLY pReply, char *vardata)
114 {
115   WINE_TRACE("\n");
116
117   RPCSS_UnregisterRpcEndpoints(
118     pMsg->message.unregisterepmsg.iface,
119     pMsg->message.unregisterepmsg.object_count,
120     pMsg->message.unregisterepmsg.binding_count,
121     vardata,
122     pMsg->vardata_payload_size
123   );
124
125   /* no reply */
126   pReply->as_uint = 0;
127 }
128
129 static void RPCSS_ServerProcessRESOLVEEPMessage(PRPCSS_NP_MESSAGE pMsg,
130   PRPCSS_NP_REPLY pReply, char *vardata)
131 {
132   WINE_TRACE("\n");
133
134   /* for now, reply is placed into *pReply.as_string, on success, by RPCSS_ResolveRpcEndpoints */
135   ZeroMemory(pReply->as_string, MAX_RPCSS_NP_REPLY_STRING_LEN);
136   RPCSS_ResolveRpcEndpoints(
137     pMsg->message.resolveepmsg.iface,
138     pMsg->message.resolveepmsg.object,
139     vardata,
140     pReply->as_string
141   );
142 }
143
144 static void RPCSS_ServerProcessMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply, char *vardata)
145 {
146   WINE_TRACE("\n");
147   switch (pMsg->message_type) {
148     case RPCSS_NP_MESSAGE_TYPEID_RANMSG:
149       RPCSS_ServerProcessRANMessage(pMsg, pReply);
150       break;
151     case RPCSS_NP_MESSAGE_TYPEID_REGISTEREPMSG:
152       RPCSS_ServerProcessREGISTEREPMessage(pMsg, pReply, vardata);
153       break;
154     case RPCSS_NP_MESSAGE_TYPEID_UNREGISTEREPMSG:
155       RPCSS_ServerProcessUNREGISTEREPMessage(pMsg, pReply, vardata);
156       break;
157     case RPCSS_NP_MESSAGE_TYPEID_RESOLVEEPMSG:
158       RPCSS_ServerProcessRESOLVEEPMessage(pMsg, pReply, vardata);
159       break;
160     default:
161       WINE_ERR("Message type unknown!!  No action taken.\n");
162   }
163 }
164
165 /* each message gets its own thread.  this is it. */
166 static VOID HandlerThread(LPVOID lpvPipeHandle)
167 {
168   RPCSS_NP_MESSAGE msg, vardata_payload_msg;
169   char *c, *vardata = NULL;
170   RPCSS_NP_REPLY reply;
171   DWORD bytesread, written;
172   BOOL success, had_payload = FALSE;
173   HANDLE mypipe;
174
175   mypipe = (HANDLE) lpvPipeHandle;
176
177   WINE_TRACE("mypipe: %p\n", mypipe);
178
179   success = ReadFile(
180     mypipe,                   /* pipe handle */
181     (char *) &msg,            /* message buffer */
182     sizeof(RPCSS_NP_MESSAGE), /* message buffer size */
183     &bytesread,               /* receives number of bytes read */
184     NULL                      /* not overlapped */
185   );
186
187   if (msg.vardata_payload_size) {
188     had_payload = TRUE;
189     /* this fudge space allows us not to worry about exceeding the buffer space
190        on the last read */
191     vardata = LocalAlloc(LPTR, (msg.vardata_payload_size) + VARDATA_PAYLOAD_BYTES);
192     if (!vardata) {
193       WINE_ERR("vardata memory allocation failure.\n");
194       success = FALSE;
195     } else {
196       for ( c = vardata; (c - vardata) < msg.vardata_payload_size; 
197             c += VARDATA_PAYLOAD_BYTES) {
198         success = ReadFile(
199           mypipe,
200           (char *) &vardata_payload_msg,
201           sizeof(RPCSS_NP_MESSAGE),
202           &bytesread,
203           NULL
204         );
205         if ( (!success) || (bytesread != sizeof(RPCSS_NP_MESSAGE)) ||
206              (vardata_payload_msg.message_type != RPCSS_NP_MESSAGE_TYPEID_VARDATAPAYLOADMSG) ) {
207           WINE_ERR("vardata payload read failure! (s=%s,br=%ld,exp_br=%d,mt=%u,mt_exp=%u\n",
208             success ? "TRUE" : "FALSE", bytesread, sizeof(RPCSS_NP_MESSAGE), 
209             vardata_payload_msg.message_type, RPCSS_NP_MESSAGE_TYPEID_VARDATAPAYLOADMSG);
210           success = FALSE;
211           break;
212         }
213         CopyMemory(c, vardata_payload_msg.message.vardatapayloadmsg.payload, VARDATA_PAYLOAD_BYTES);
214         WINE_TRACE("payload read.\n");
215       }
216     }
217   }
218
219   if (success && (bytesread == sizeof(RPCSS_NP_MESSAGE))) {
220     WINE_TRACE("read success.\n");
221     /* process the message and send a reply, serializing requests. */
222     EnterCriticalSection(&np_server_cs);
223     WINE_TRACE("processing message.\n");
224     RPCSS_ServerProcessMessage(&msg, &reply, vardata);
225     LeaveCriticalSection(&np_server_cs);
226
227     if (had_payload) LocalFree(vardata);
228
229     WINE_TRACE("message processed, sending reply....\n");
230
231     success = WriteFile(
232       mypipe,                 /* pipe handle */
233       (char *) &reply,        /* reply buffer */
234       sizeof(RPCSS_NP_REPLY), /* reply buffer size */
235       &written,               /* receives number of bytes written */
236       NULL                    /* not overlapped */
237     );
238
239     if ( (!success) || (written != sizeof(RPCSS_NP_REPLY)) )
240       WINE_WARN("Message reply failed. (successs=%s, br=%ld, exp_br=%d)\n",
241         success ? "TRUE" : "FALSE", written, sizeof(RPCSS_NP_REPLY));
242     else
243       WINE_TRACE("Reply sent successfully.\n");
244   } else 
245     WINE_WARN("Message receipt failed.\n");
246
247   FlushFileBuffers(mypipe);
248   DisconnectNamedPipe(mypipe);
249   CloseHandle(mypipe);
250   InterlockedDecrement(&srv_thread_count);
251 }
252
253 static VOID NPMainWorkThread(LPVOID ignored)
254 {
255   BOOL connected;
256   HANDLE hthread, master_mutex = RPCSS_GetMasterMutex();
257   DWORD threadid, wait_result;
258
259   WINE_TRACE("\n");
260
261   while (server_live) {
262     connected = ConnectNamedPipe(np_server_end, NULL) ? 
263       TRUE : (GetLastError() == ERROR_PIPE_CONNECTED);
264
265     if (connected) {
266       /* is "work" the act of connecting pipes, or the act of serving
267          requests successfully?  for now I will make it the former. */
268       if (!SetEvent(np_server_work_event))
269         WINE_WARN("failed to signal np_server_work_event.\n");
270
271       /* Create a thread for this client.  */
272       InterlockedIncrement(&srv_thread_count);
273       hthread = CreateThread( 
274         NULL,                      /* no security attribute */ 
275         0,                         /* default stack size */
276         (LPTHREAD_START_ROUTINE) HandlerThread, 
277         (LPVOID) np_server_end,    /* thread parameter */
278         0,                         /* not suspended */
279         &threadid                  /* returns thread ID  (not used) */
280       );
281
282       if (hthread) {
283         WINE_TRACE("Spawned handler thread: %p\n", hthread);
284         CloseHandle(hthread);
285        
286         /* for safety's sake, hold the mutex while we switch the pipe */
287
288         wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
289
290         switch (wait_result) {
291           case WAIT_ABANDONED: /* ? */
292           case WAIT_OBJECT_0:
293             /* we have ownership */
294             break;
295           case WAIT_FAILED:
296           case WAIT_TIMEOUT:
297           default: 
298             /* huh? */
299             wait_result = WAIT_FAILED;
300         }
301
302         if (wait_result == WAIT_FAILED) {
303           WINE_ERR("Couldn't enter master mutex.  Expect prolems.\n");
304         } else {
305           /* now create a new named pipe instance to listen on */
306           np_server_end = CreateNamedPipe(
307             NAME_RPCSS_NAMED_PIPE,                                 /* pipe name */
308             PIPE_ACCESS_DUPLEX,                                    /* pipe open mode */
309             PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT, /* pipe-specific modes */
310             PIPE_UNLIMITED_INSTANCES,                              /* maximum instances */
311             sizeof(RPCSS_NP_REPLY),                                /* output buffer size */
312             sizeof(RPCSS_NP_MESSAGE),                              /* input buffer size */
313             2000,                                                  /* time-out interval */
314             NULL                                                   /* SD */
315           );
316
317           if (np_server_end == INVALID_HANDLE_VALUE) {
318             WINE_ERR("Failed to recreate named pipe!\n");
319             /* not sure what to do? */
320             assert(FALSE);
321           }
322   
323           if (!ReleaseMutex(master_mutex))
324             WINE_ERR("Uh oh.  Couldn't leave master mutex.  Expect deadlock.\n");
325         }
326       } else {
327         WINE_ERR("Failed to spawn handler thread!\n");
328         DisconnectNamedPipe(np_server_end);
329         InterlockedDecrement(&srv_thread_count);
330       }
331     }
332   }
333   WINE_TRACE("Server thread shutdown.\n");
334 }
335
336 static HANDLE RPCSS_NPConnect(void)
337 {
338   HANDLE the_pipe = NULL;
339   DWORD dwmode, wait_result;
340   HANDLE master_mutex = RPCSS_GetMasterMutex();
341   
342   WINE_TRACE("\n");
343
344   while (TRUE) {
345
346     wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
347     switch (wait_result) {
348       case WAIT_ABANDONED: 
349       case WAIT_OBJECT_0:
350         break;
351       case WAIT_FAILED:
352       case WAIT_TIMEOUT:
353       default: 
354         WINE_ERR("This should never happen: couldn't enter mutex.\n");
355         return NULL;
356     }
357
358     /* try to open the client side of the named pipe. */
359     the_pipe = CreateFileA(
360       NAME_RPCSS_NAMED_PIPE,           /* pipe name */
361       GENERIC_READ | GENERIC_WRITE,    /* r/w access */
362       0,                               /* no sharing */
363       NULL,                            /* no security attributes */
364       OPEN_EXISTING,                   /* open an existing pipe */
365       0,                               /* default attributes */
366       NULL                             /* no template file */
367     );
368
369     if (the_pipe != INVALID_HANDLE_VALUE)
370       break;
371
372     if (GetLastError() != ERROR_PIPE_BUSY) {
373       WINE_WARN("Unable to open named pipe %s (assuming unavailable).\n", 
374         wine_dbgstr_a(NAME_RPCSS_NAMED_PIPE));
375       the_pipe = NULL;
376       break;
377     }
378
379     WINE_WARN("Named pipe busy (will wait)\n");
380     
381     if (!ReleaseMutex(master_mutex))
382       WINE_ERR("Failed to release master mutex.  Expect deadlock.\n");
383
384     /* wait for the named pipe.  We are only 
385        willing to wait only 5 seconds.  It should be available /very/ soon. */
386     if (! WaitNamedPipeA(NAME_RPCSS_NAMED_PIPE, MASTER_MUTEX_WAITNAMEDPIPE_TIMEOUT))
387     {
388       WINE_ERR("Named pipe unavailable after waiting.  Something is probably wrong.\n");
389       return NULL;
390     }
391
392   }
393
394   if (the_pipe) {
395     dwmode = PIPE_READMODE_MESSAGE;
396     /* SetNamedPipeHandleState not implemented ATM, but still seems to work somehow. */
397     if (! SetNamedPipeHandleState(the_pipe, &dwmode, NULL, NULL))
398       WINE_WARN("Failed to set pipe handle state\n");
399   }
400
401   if (!ReleaseMutex(master_mutex))
402     WINE_ERR("Uh oh, failed to leave the RPC Master Mutex!\n");
403
404   return the_pipe;
405 }
406
407 static BOOL RPCSS_SendReceiveNPMsg(HANDLE np, PRPCSS_NP_MESSAGE msg, PRPCSS_NP_REPLY reply)
408 {
409   DWORD count;
410
411   WINE_TRACE("(np == %p, msg == %p, reply == %p)\n", np, msg, reply);
412
413   if (! WriteFile(np, msg, sizeof(RPCSS_NP_MESSAGE), &count, NULL)) {
414     WINE_ERR("write failed.\n");
415     return FALSE;
416   }
417
418   if (count != sizeof(RPCSS_NP_MESSAGE)) {
419     WINE_ERR("write count mismatch.\n");
420     return FALSE;
421   }
422
423   if (! ReadFile(np, reply, sizeof(RPCSS_NP_REPLY), &count, NULL)) {
424     WINE_ERR("read failed.\n");
425     return FALSE;
426   }
427
428   if (count != sizeof(RPCSS_NP_REPLY)) {
429     WINE_ERR("read count mismatch. got %ld, expected %u.\n", count, sizeof(RPCSS_NP_REPLY));
430     return FALSE;
431   }
432
433   /* message execution was successful */
434   return TRUE;
435 }
436
437 BOOL RPCSS_BecomePipeServer(void)
438 {
439   RPCSS_NP_MESSAGE msg;
440   RPCSS_NP_REPLY reply;
441   BOOL rslt = TRUE;
442   HANDLE client_handle, hthread, master_mutex = RPCSS_GetMasterMutex();
443   DWORD threadid, wait_result;
444
445   WINE_TRACE("\n");
446
447   wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
448
449   switch (wait_result) {
450     case WAIT_ABANDONED: /* ? */
451     case WAIT_OBJECT_0:
452       /* we have ownership */
453       break;
454     case WAIT_FAILED:
455     case WAIT_TIMEOUT:
456     default: 
457       WINE_ERR("Couldn't enter master mutex.\n");
458       return FALSE;
459   }
460
461   /* now we have the master mutex.  during this time we will
462    *
463    *   o check if an rpcss already listens on the pipe.  If so,
464    *     we will tell it we were invoked, which will cause the
465    *     other end to update its timeouts.  After, we just return
466    *     false.
467    * 
468    *   o otherwise, we establish the pipe for ourselves and get
469    *     ready to listen on it
470    */
471   
472   if ((client_handle = RPCSS_NPConnect()) != NULL) {
473     msg.message_type = RPCSS_NP_MESSAGE_TYPEID_RANMSG;
474     msg.message.ranmsg.timeout = RPCSS_GetMaxLazyTimeout();
475     msg.vardata_payload_size = 0;
476     if (!RPCSS_SendReceiveNPMsg(client_handle, &msg, &reply))
477       WINE_ERR("Something is amiss: RPC_SendReceive failed.\n");
478     rslt = FALSE;
479   }
480   if (rslt) {
481     np_server_work_event = CreateEventA(NULL, FALSE, FALSE, "RpcNpServerWorkEvent");
482     if (np_server_work_event == NULL) {
483       /* dunno what we can do then */
484       WINE_ERR("Unable to create the np_server_work_event\n");
485       assert(FALSE);
486     }
487     InitializeCriticalSection(&np_server_cs);
488
489     np_server_end = CreateNamedPipe(
490       NAME_RPCSS_NAMED_PIPE,                                   /* pipe name */
491       PIPE_ACCESS_DUPLEX,                                      /* pipe open mode */
492       PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT,   /* pipe-specific modes */
493       PIPE_UNLIMITED_INSTANCES,                                /* maximum number of instances */
494       sizeof(RPCSS_NP_REPLY),                                  /* output buffer size */
495       sizeof(RPCSS_NP_MESSAGE),                                /* input buffer size */
496       2000,                                                    /* time-out interval */
497       NULL                                                     /* SD */
498     );
499
500     if (np_server_end == INVALID_HANDLE_VALUE) {
501       WINE_ERR("Failed to create named pipe!\n");
502       DeleteCriticalSection(&np_server_cs);
503       if (!CloseHandle(np_server_work_event)) /* we will leak the handle... */
504         WINE_WARN("Failed to close np_server_work_event handle!\n");
505       np_server_work_event = NULL;
506       np_server_end = NULL;
507       rslt = FALSE;
508     }
509   }
510
511   server_live = rslt;
512
513   if (rslt) {
514     /* OK, now spawn the (single) server thread */
515     hthread = CreateThread( 
516       NULL,                      /* no security attribute */ 
517       0,                         /* default stack size */
518       (LPTHREAD_START_ROUTINE) NPMainWorkThread,
519       NULL,             /* thread parameter */
520       0,                         /* not suspended */
521       &threadid                  /* returns thread ID  (not used) */
522     );
523     if (hthread) {
524       WINE_TRACE("Created server thread.\n");
525       CloseHandle(hthread);
526     } else {
527       WINE_ERR("Serious error: unable to create server thread!\n");
528       if (!CloseHandle(np_server_work_event)) /* we will leak the handle... */
529         WINE_WARN("Failed to close np_server_work_event handle!\n");
530       if (!CloseHandle(np_server_end)) /* we will leak the handle... */
531         WINE_WARN("Unable to close named pipe handle!\n");
532       DeleteCriticalSection(&np_server_cs);
533       np_server_end = NULL;
534       np_server_work_event = NULL;
535       rslt = FALSE;
536       server_live = FALSE;
537     }
538   }
539   if (!ReleaseMutex(master_mutex))
540     WINE_ERR("Unable to leave master mutex!??\n");
541
542   return rslt;
543 }
544
545 BOOL RPCSS_NPDoWork(void)
546
547   DWORD waitresult = WaitForSingleObject(np_server_work_event, 1000);
548  
549   if (waitresult == WAIT_TIMEOUT)
550     return FALSE;
551   if (waitresult == WAIT_OBJECT_0)
552     return TRUE;
553
554   return FALSE;
555 }