6 * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
7 * Copyright (C) 2004 by Latchesar Ionkov <lucho@ionkov.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to:
21 * Free Software Foundation
22 * 51 Franklin Street, Fifth Floor
23 * Boston, MA 02111-1301 USA
27 #include <linux/config.h>
28 #include <linux/module.h>
29 #include <linux/errno.h>
31 #include <linux/kthread.h>
32 #include <linux/idr.h>
37 #include "transport.h"
42 * dprintcond - print condition of session info
43 * @v9ses: session info structure
44 * @req: RPC request structure
49 dprintcond(struct v9fs_session_info *v9ses, struct v9fs_rpcreq *req)
51 dprintk(DEBUG_MUX, "condition: %d, %p\n", v9ses->transport->status,
57 * xread - force read of a certain number of bytes
58 * @v9ses: session info structure
59 * @ptr: pointer to buffer
60 * @sz: number of bytes to read
62 * Chuck Cranor CS-533 project1
65 static int xread(struct v9fs_session_info *v9ses, void *ptr, unsigned long sz)
70 ret = v9ses->transport->read(v9ses->transport, ptr, sz - rd);
72 dprintk(DEBUG_ERROR, "xread errno %d\n", ret);
82 * read_message - read a full 9P2000 fcall packet
83 * @v9ses: session info structure
84 * @rcall: fcall structure to read into
85 * @rcalllen: size of fcall buffer
90 read_message(struct v9fs_session_info *v9ses,
91 struct v9fs_fcall *rcall, int rcalllen)
98 res = xread(v9ses, buf, sizeof(buf));
101 "Reading of count field failed returned: %d\n", res);
107 "Reading of count field failed returned: %d\n", res);
111 size = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
112 dprintk(DEBUG_MUX, "got a packet count: %d\n", size);
114 /* adjust for the four bytes of size */
117 if (size > v9ses->maxdata) {
118 dprintk(DEBUG_ERROR, "packet too big: %d\n", size);
122 data = kmalloc(size, GFP_KERNEL);
124 eprintk(KERN_WARNING, "out of memory\n");
128 res = xread(v9ses, data, size);
130 dprintk(DEBUG_ERROR, "Reading of fcall failed returned: %d\n",
136 /* we now have an in-memory string that is the reply.
137 * deserialize it. There is very little to go wrong at this point
138 * save for v9fs_alloc errors.
140 res = v9fs_deserialize_fcall(v9ses, size, data, v9ses->maxdata,
152 * v9fs_recv - receive an RPC response for a particular tag
153 * @v9ses: session info structure
154 * @req: RPC request structure
158 static int v9fs_recv(struct v9fs_session_info *v9ses, struct v9fs_rpcreq *req)
162 dprintk(DEBUG_MUX, "waiting for response: %d\n", req->tcall->tag);
163 ret = wait_event_interruptible(v9ses->read_wait,
164 ((v9ses->transport->status != Connected) ||
165 (req->rcall != 0) || dprintcond(v9ses, req)));
167 dprintk(DEBUG_MUX, "got it: rcall %p\n", req->rcall);
168 if (v9ses->transport->status == Disconnected)
172 spin_lock(&v9ses->muxlock);
173 list_del(&req->next);
174 spin_unlock(&v9ses->muxlock);
181 * v9fs_send - send a 9P request
182 * @v9ses: session info structure
183 * @req: RPC request to send
187 static int v9fs_send(struct v9fs_session_info *v9ses, struct v9fs_rpcreq *req)
191 struct v9fs_fcall *tcall = req->tcall;
193 data = kmalloc(v9ses->maxdata + V9FS_IOHDRSZ, GFP_KERNEL);
197 tcall->size = 0; /* enforce size recalculation */
199 v9fs_serialize_fcall(v9ses, tcall, data,
200 v9ses->maxdata + V9FS_IOHDRSZ);
204 spin_lock(&v9ses->muxlock);
205 list_add(&req->next, &v9ses->mux_fcalls);
206 spin_unlock(&v9ses->muxlock);
208 dprintk(DEBUG_MUX, "sending message: tag %d size %d\n", tcall->tag,
210 ret = v9ses->transport->write(v9ses->transport, data, tcall->size);
212 if (ret != tcall->size) {
213 spin_lock(&v9ses->muxlock);
214 list_del(&req->next);
217 spin_unlock(&v9ses->muxlock);
229 * v9fs_mux_rpc - send a request, receive a response
230 * @v9ses: session info structure
231 * @tcall: fcall to send
232 * @rcall: buffer to place response into
237 v9fs_mux_rpc(struct v9fs_session_info *v9ses, struct v9fs_fcall *tcall,
238 struct v9fs_fcall **rcall)
241 struct v9fs_fcall *fcall = NULL;
242 struct v9fs_rpcreq req;
251 if (tcall->id != TVERSION) {
252 tid = v9fs_get_idpool(&v9ses->tidpool);
262 ret = v9fs_send(v9ses, &req);
265 if (tcall->id != TVERSION)
266 v9fs_put_idpool(tid, &v9ses->tidpool);
267 dprintk(DEBUG_MUX, "error %d\n", ret);
271 ret = v9fs_recv(v9ses, &req);
275 dprintk(DEBUG_MUX, "received: tag=%x, ret=%d\n", tcall->tag, ret);
276 if (ret == -ERESTARTSYS) {
277 if (v9ses->transport->status != Disconnected
278 && tcall->id != TFLUSH) {
281 dprintk(DEBUG_MUX, "flushing the tag: %d\n",
283 clear_thread_flag(TIF_SIGPENDING);
284 v9fs_t_flush(v9ses, tcall->tag);
285 spin_lock_irqsave(¤t->sighand->siglock, flags);
287 spin_unlock_irqrestore(¤t->sighand->siglock,
289 dprintk(DEBUG_MUX, "flushing done\n");
299 if (fcall->id == RERROR) {
300 ret = v9fs_errstr2errno(fcall->params.rerror.error);
301 if (ret == 0) { /* string match failed */
302 if (fcall->params.rerror.errno)
303 ret = -(fcall->params.rerror.errno);
307 } else if (fcall->id != tcall->id + 1) {
309 "fcall mismatch: expected %d, got %d\n",
310 tcall->id + 1, fcall->id);
316 if (tcall->id != TVERSION)
317 v9fs_put_idpool(tid, &v9ses->tidpool);
327 * v9fs_mux_cancel_requests - cancels all pending requests
329 * @v9ses: session info structure
330 * @err: error code to return to the requests
332 void v9fs_mux_cancel_requests(struct v9fs_session_info *v9ses, int err)
334 struct v9fs_rpcreq *rptr;
335 struct v9fs_rpcreq *rreq;
337 dprintk(DEBUG_MUX, " %d\n", err);
338 spin_lock(&v9ses->muxlock);
339 list_for_each_entry_safe(rreq, rptr, &v9ses->mux_fcalls, next) {
342 spin_unlock(&v9ses->muxlock);
343 wake_up_all(&v9ses->read_wait);
347 * v9fs_recvproc - kproc to handle demultiplexing responses
348 * @data: session info structure
352 static int v9fs_recvproc(void *data)
354 struct v9fs_session_info *v9ses = (struct v9fs_session_info *)data;
355 struct v9fs_fcall *rcall = NULL;
356 struct v9fs_rpcreq *rptr;
357 struct v9fs_rpcreq *req;
358 struct v9fs_rpcreq *rreq;
361 allow_signal(SIGKILL);
362 set_current_state(TASK_INTERRUPTIBLE);
363 complete(&v9ses->proccmpl);
364 while (!kthread_should_stop() && err >= 0) {
365 req = rptr = rreq = NULL;
367 rcall = kmalloc(v9ses->maxdata + V9FS_IOHDRSZ, GFP_KERNEL);
369 eprintk(KERN_ERR, "no memory for buffers\n");
373 err = read_message(v9ses, rcall, v9ses->maxdata + V9FS_IOHDRSZ);
378 spin_lock(&v9ses->muxlock);
379 list_for_each_entry_safe(rreq, rptr, &v9ses->mux_fcalls, next) {
380 if (rreq->tcall->tag == rcall->tag) {
387 if (req && (req->tcall->id == TFLUSH)) {
388 struct v9fs_rpcreq *treq = NULL;
389 list_for_each_entry_safe(treq, rptr, &v9ses->mux_fcalls, next) {
390 if (treq->tcall->tag ==
391 req->tcall->params.tflush.oldtag) {
392 list_del(&rptr->next);
399 spin_unlock(&v9ses->muxlock);
403 "unexpected response: id %d tag %d\n",
404 rcall->id, rcall->tag);
409 wake_up_all(&v9ses->read_wait);
410 set_current_state(TASK_INTERRUPTIBLE);
413 /* Inform all pending processes about the failure */
414 wake_up_all(&v9ses->read_wait);
416 if (signal_pending(current))
417 complete(&v9ses->proccmpl);
419 dprintk(DEBUG_MUX, "recvproc: end\n");
420 v9ses->recvproc = NULL;
426 * v9fs_mux_init - initialize multiplexer (spawn kproc)
427 * @v9ses: session info structure
428 * @dev_name: mount device information (to create unique kproc)
432 int v9fs_mux_init(struct v9fs_session_info *v9ses, const char *dev_name)
436 strncpy(procname, dev_name, sizeof(procname));
437 procname[sizeof(procname) - 1] = 0;
439 init_waitqueue_head(&v9ses->read_wait);
440 init_completion(&v9ses->fcread);
441 init_completion(&v9ses->proccmpl);
442 spin_lock_init(&v9ses->muxlock);
443 INIT_LIST_HEAD(&v9ses->mux_fcalls);
444 v9ses->recvproc = NULL;
445 v9ses->curfcall = NULL;
447 v9ses->recvproc = kthread_create(v9fs_recvproc, v9ses,
448 "v9fs_recvproc %s", procname);
450 if (IS_ERR(v9ses->recvproc)) {
451 eprintk(KERN_ERR, "cannot create receiving thread\n");
452 v9fs_session_close(v9ses);
453 return -ECONNABORTED;
456 wake_up_process(v9ses->recvproc);
457 wait_for_completion(&v9ses->proccmpl);