2 * RPCSS named pipe server
4 * Copyright (C) 2002 Greg Turner
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
24 #include "wine/debug.h"
26 WINE_DEFAULT_DEBUG_CHANNEL(ole);
28 static HANDLE np_server_end;
29 static HANDLE np_server_work_event;
30 static CRITICAL_SECTION np_server_cs;
31 static LONG srv_thread_count;
32 static BOOL server_live;
34 LONG RPCSS_SrvThreadCount(void)
36 return srv_thread_count;
39 BOOL RPCSS_UnBecomePipeServer(void)
43 WINE_TRACE("shutting down pipe.\n");
45 if (!CloseHandle(np_server_end))
46 WINE_WARN("Failed to close named pipe.\n");
47 if (!CloseHandle(np_server_work_event))
48 WINE_WARN("Failed to close the event handle.\n");
49 DeleteCriticalSection(&np_server_cs);
54 static void RPCSS_ServerProcessRANMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply)
60 static void RPCSS_ServerProcessREGISTEREPMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply,
65 RPCSS_RegisterRpcEndpoints(
66 pMsg->message.registerepmsg.iface,
67 pMsg->message.registerepmsg.object_count,
68 pMsg->message.registerepmsg.binding_count,
69 pMsg->message.registerepmsg.no_replace,
71 pMsg->vardata_payload_size
78 static void RPCSS_ServerProcessUNREGISTEREPMessage(PRPCSS_NP_MESSAGE pMsg,
79 PRPCSS_NP_REPLY pReply, char *vardata)
83 RPCSS_UnregisterRpcEndpoints(
84 pMsg->message.unregisterepmsg.iface,
85 pMsg->message.unregisterepmsg.object_count,
86 pMsg->message.unregisterepmsg.binding_count,
88 pMsg->vardata_payload_size
95 static void RPCSS_ServerProcessRESOLVEEPMessage(PRPCSS_NP_MESSAGE pMsg,
96 PRPCSS_NP_REPLY pReply, char *vardata)
100 /* for now, reply is placed into *pReply.as_string, on success, by RPCSS_ResolveRpcEndpoints */
101 ZeroMemory(pReply->as_string, MAX_RPCSS_NP_REPLY_STRING_LEN);
102 RPCSS_ResolveRpcEndpoints(
103 pMsg->message.resolveepmsg.iface,
104 pMsg->message.resolveepmsg.object,
110 static void RPCSS_ServerProcessMessage(PRPCSS_NP_MESSAGE pMsg, PRPCSS_NP_REPLY pReply, char *vardata)
113 ZeroMemory(pReply, sizeof(*pReply));
114 switch (pMsg->message_type) {
115 case RPCSS_NP_MESSAGE_TYPEID_RANMSG:
116 RPCSS_ServerProcessRANMessage(pMsg, pReply);
118 case RPCSS_NP_MESSAGE_TYPEID_REGISTEREPMSG:
119 RPCSS_ServerProcessREGISTEREPMessage(pMsg, pReply, vardata);
121 case RPCSS_NP_MESSAGE_TYPEID_UNREGISTEREPMSG:
122 RPCSS_ServerProcessUNREGISTEREPMessage(pMsg, pReply, vardata);
124 case RPCSS_NP_MESSAGE_TYPEID_RESOLVEEPMSG:
125 RPCSS_ServerProcessRESOLVEEPMessage(pMsg, pReply, vardata);
128 WINE_ERR("Message type unknown!! No action taken.\n");
132 /* each message gets its own thread. this is it. */
133 static DWORD WINAPI HandlerThread(LPVOID lpvPipeHandle)
135 RPCSS_NP_MESSAGE msg, vardata_payload_msg;
136 char *c, *vardata = NULL;
137 RPCSS_NP_REPLY reply;
138 DWORD bytesread, written;
139 BOOL success, had_payload = FALSE;
142 mypipe = (HANDLE) lpvPipeHandle;
144 WINE_TRACE("mypipe: %p\n", mypipe);
147 mypipe, /* pipe handle */
148 (char *) &msg, /* message buffer */
149 sizeof(RPCSS_NP_MESSAGE), /* message buffer size */
150 &bytesread, /* receives number of bytes read */
151 NULL /* not overlapped */
154 if (msg.vardata_payload_size) {
156 /* this fudge space allows us not to worry about exceeding the buffer space
158 vardata = LocalAlloc(LPTR, (msg.vardata_payload_size) + VARDATA_PAYLOAD_BYTES);
160 WINE_ERR("vardata memory allocation failure.\n");
163 for ( c = vardata; (c - vardata) < msg.vardata_payload_size;
164 c += VARDATA_PAYLOAD_BYTES) {
167 (char *) &vardata_payload_msg,
168 sizeof(RPCSS_NP_MESSAGE),
172 if ( (!success) || (bytesread != sizeof(RPCSS_NP_MESSAGE)) ||
173 (vardata_payload_msg.message_type != RPCSS_NP_MESSAGE_TYPEID_VARDATAPAYLOADMSG) ) {
174 WINE_ERR("vardata payload read failure! (s=%s,br=%d,mt=%u,mt_exp=%u\n",
175 success ? "TRUE" : "FALSE", bytesread,
176 vardata_payload_msg.message_type, RPCSS_NP_MESSAGE_TYPEID_VARDATAPAYLOADMSG);
180 CopyMemory(c, vardata_payload_msg.message.vardatapayloadmsg.payload, VARDATA_PAYLOAD_BYTES);
181 WINE_TRACE("payload read.\n");
186 if (success && (bytesread == sizeof(RPCSS_NP_MESSAGE))) {
187 WINE_TRACE("read success.\n");
188 /* process the message and send a reply, serializing requests. */
189 EnterCriticalSection(&np_server_cs);
190 WINE_TRACE("processing message.\n");
191 RPCSS_ServerProcessMessage(&msg, &reply, vardata);
192 LeaveCriticalSection(&np_server_cs);
194 if (had_payload) LocalFree(vardata);
196 WINE_TRACE("message processed, sending reply....\n");
199 mypipe, /* pipe handle */
200 (char *) &reply, /* reply buffer */
201 sizeof(RPCSS_NP_REPLY), /* reply buffer size */
202 &written, /* receives number of bytes written */
203 NULL /* not overlapped */
206 if ( (!success) || (written != sizeof(RPCSS_NP_REPLY)) )
207 WINE_WARN("Message reply failed. (success=%d, br=%d)\n", success, written);
209 WINE_TRACE("Reply sent successfully.\n");
211 WINE_WARN("Message receipt failed.\n");
213 FlushFileBuffers(mypipe);
214 DisconnectNamedPipe(mypipe);
216 InterlockedDecrement(&srv_thread_count);
220 static DWORD WINAPI NPMainWorkThread(LPVOID ignored)
223 HANDLE hthread, master_mutex = RPCSS_GetMasterMutex();
224 DWORD threadid, wait_result;
228 while (server_live) {
229 connected = ConnectNamedPipe(np_server_end, NULL) ?
230 TRUE : (GetLastError() == ERROR_PIPE_CONNECTED);
233 /* is "work" the act of connecting pipes, or the act of serving
234 requests successfully? for now I will make it the former. */
235 if (!SetEvent(np_server_work_event))
236 WINE_WARN("failed to signal np_server_work_event.\n");
238 /* Create a thread for this client. */
239 InterlockedIncrement(&srv_thread_count);
240 hthread = CreateThread(
241 NULL, /* no security attribute */
242 0, /* default stack size */
244 (LPVOID) np_server_end, /* thread parameter */
245 0, /* not suspended */
246 &threadid /* returns thread ID (not used) */
250 WINE_TRACE("Spawned handler thread: %p\n", hthread);
251 CloseHandle(hthread);
253 /* for safety's sake, hold the mutex while we switch the pipe */
255 wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
257 switch (wait_result) {
258 case WAIT_ABANDONED: /* ? */
260 /* we have ownership */
266 wait_result = WAIT_FAILED;
269 if (wait_result == WAIT_FAILED) {
270 WINE_ERR("Couldn't enter master mutex. Expect problems.\n");
272 /* now create a new named pipe instance to listen on */
273 np_server_end = CreateNamedPipe(
274 NAME_RPCSS_NAMED_PIPE, /* pipe name */
275 PIPE_ACCESS_DUPLEX, /* pipe open mode */
276 PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT, /* pipe-specific modes */
277 PIPE_UNLIMITED_INSTANCES, /* maximum instances */
278 sizeof(RPCSS_NP_REPLY), /* output buffer size */
279 sizeof(RPCSS_NP_MESSAGE), /* input buffer size */
280 2000, /* time-out interval */
284 if (np_server_end == INVALID_HANDLE_VALUE) {
285 WINE_ERR("Failed to recreate named pipe!\n");
286 /* not sure what to do? */
290 if (!ReleaseMutex(master_mutex))
291 WINE_ERR("Uh oh. Couldn't leave master mutex. Expect deadlock.\n");
294 WINE_ERR("Failed to spawn handler thread!\n");
295 DisconnectNamedPipe(np_server_end);
296 InterlockedDecrement(&srv_thread_count);
300 WINE_TRACE("Server thread shutdown.\n");
304 static HANDLE RPCSS_NPConnect(void)
307 DWORD dwmode, wait_result;
308 HANDLE master_mutex = RPCSS_GetMasterMutex();
314 wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
315 switch (wait_result) {
322 WINE_ERR("This should never happen: couldn't enter mutex.\n");
326 /* try to open the client side of the named pipe. */
327 the_pipe = CreateFileA(
328 NAME_RPCSS_NAMED_PIPE, /* pipe name */
329 GENERIC_READ | GENERIC_WRITE, /* r/w access */
331 NULL, /* no security attributes */
332 OPEN_EXISTING, /* open an existing pipe */
333 0, /* default attributes */
334 NULL /* no template file */
337 if (the_pipe != INVALID_HANDLE_VALUE)
340 if (GetLastError() != ERROR_PIPE_BUSY) {
341 WINE_WARN("Unable to open named pipe %s (assuming unavailable).\n",
342 wine_dbgstr_a(NAME_RPCSS_NAMED_PIPE));
346 WINE_WARN("Named pipe busy (will wait)\n");
348 if (!ReleaseMutex(master_mutex))
349 WINE_ERR("Failed to release master mutex. Expect deadlock.\n");
351 /* wait for the named pipe. We are only
352 willing to wait only 5 seconds. It should be available /very/ soon. */
353 if (! WaitNamedPipeA(NAME_RPCSS_NAMED_PIPE, MASTER_MUTEX_WAITNAMEDPIPE_TIMEOUT))
355 WINE_ERR("Named pipe unavailable after waiting. Something is probably wrong.\n");
361 if (the_pipe != INVALID_HANDLE_VALUE) {
362 dwmode = PIPE_READMODE_MESSAGE;
363 /* SetNamedPipeHandleState not implemented ATM, but still seems to work somehow. */
364 if (! SetNamedPipeHandleState(the_pipe, &dwmode, NULL, NULL))
365 WINE_WARN("Failed to set pipe handle state\n");
368 if (!ReleaseMutex(master_mutex))
369 WINE_ERR("Uh oh, failed to leave the RPC Master Mutex!\n");
374 static BOOL RPCSS_SendReceiveNPMsg(HANDLE np, PRPCSS_NP_MESSAGE msg, PRPCSS_NP_REPLY reply)
378 WINE_TRACE("(np == %p, msg == %p, reply == %p)\n", np, msg, reply);
380 if (! WriteFile(np, msg, sizeof(RPCSS_NP_MESSAGE), &count, NULL)) {
381 WINE_ERR("write failed.\n");
385 if (count != sizeof(RPCSS_NP_MESSAGE)) {
386 WINE_ERR("write count mismatch.\n");
390 if (! ReadFile(np, reply, sizeof(RPCSS_NP_REPLY), &count, NULL)) {
391 WINE_ERR("read failed.\n");
395 if (count != sizeof(RPCSS_NP_REPLY)) {
396 WINE_ERR("read count mismatch, got %d.\n", count);
400 /* message execution was successful */
404 BOOL RPCSS_BecomePipeServer(void)
406 RPCSS_NP_MESSAGE msg;
407 RPCSS_NP_REPLY reply;
409 HANDLE client_handle, hthread, master_mutex = RPCSS_GetMasterMutex();
410 DWORD threadid, wait_result;
414 wait_result = WaitForSingleObject(master_mutex, MASTER_MUTEX_TIMEOUT);
416 switch (wait_result) {
417 case WAIT_ABANDONED: /* ? */
419 /* we have ownership */
424 WINE_ERR("Couldn't enter master mutex.\n");
428 /* now we have the master mutex. during this time we will
430 * o check if an rpcss already listens on the pipe. If so,
431 * we will tell it we were invoked, which will cause the
432 * other end to update its timeouts. After, we just return
435 * o otherwise, we establish the pipe for ourselves and get
436 * ready to listen on it
439 if ((client_handle = RPCSS_NPConnect()) != INVALID_HANDLE_VALUE) {
440 ZeroMemory(&msg, sizeof(msg));
441 msg.message_type = RPCSS_NP_MESSAGE_TYPEID_RANMSG;
442 msg.message.ranmsg.timeout = 1000;
443 msg.vardata_payload_size = 0;
444 if (!RPCSS_SendReceiveNPMsg(client_handle, &msg, &reply))
445 WINE_ERR("Something is amiss: RPC_SendReceive failed.\n");
446 CloseHandle(client_handle);
450 np_server_work_event = CreateEventA(NULL, FALSE, FALSE, "RpcNpServerWorkEvent");
451 if (np_server_work_event == NULL) {
452 /* dunno what we can do then */
453 WINE_ERR("Unable to create the np_server_work_event\n");
456 InitializeCriticalSection(&np_server_cs);
458 np_server_end = CreateNamedPipe(
459 NAME_RPCSS_NAMED_PIPE, /* pipe name */
460 PIPE_ACCESS_DUPLEX, /* pipe open mode */
461 PIPE_TYPE_MESSAGE | PIPE_READMODE_MESSAGE | PIPE_WAIT, /* pipe-specific modes */
462 PIPE_UNLIMITED_INSTANCES, /* maximum number of instances */
463 sizeof(RPCSS_NP_REPLY), /* output buffer size */
464 sizeof(RPCSS_NP_MESSAGE), /* input buffer size */
465 2000, /* time-out interval */
469 if (np_server_end == INVALID_HANDLE_VALUE) {
470 WINE_ERR("Failed to create named pipe!\n");
471 DeleteCriticalSection(&np_server_cs);
472 if (!CloseHandle(np_server_work_event)) /* we will leak the handle... */
473 WINE_WARN("Failed to close np_server_work_event handle!\n");
474 np_server_work_event = NULL;
475 np_server_end = NULL;
483 /* OK, now spawn the (single) server thread */
484 hthread = CreateThread(
485 NULL, /* no security attribute */
486 0, /* default stack size */
488 NULL, /* thread parameter */
489 0, /* not suspended */
490 &threadid /* returns thread ID (not used) */
493 WINE_TRACE("Created server thread.\n");
494 CloseHandle(hthread);
496 WINE_ERR("Serious error: unable to create server thread!\n");
497 if (!CloseHandle(np_server_work_event)) /* we will leak the handle... */
498 WINE_WARN("Failed to close np_server_work_event handle!\n");
499 if (!CloseHandle(np_server_end)) /* we will leak the handle... */
500 WINE_WARN("Unable to close named pipe handle!\n");
501 DeleteCriticalSection(&np_server_cs);
502 np_server_end = NULL;
503 np_server_work_event = NULL;
508 if (!ReleaseMutex(master_mutex))
509 WINE_ERR("Unable to leave master mutex!??\n");
514 BOOL RPCSS_NPDoWork(HANDLE exit_handle)
519 handles[0] = np_server_work_event;
520 handles[1] = exit_handle;
521 waitresult = WaitForMultipleObjects(2, handles, FALSE, INFINITE);
523 if (waitresult == WAIT_OBJECT_0)