Merge master.kernel.org:/pub/scm/linux/kernel/git/holtmann/bluetooth-2.6
[linux-2.6] / fs / smbfs / request.c
1 /*
2  *  request.c
3  *
4  *  Copyright (C) 2001 by Urban Widmark
5  *
6  *  Please add a note about your changes to smbfs in the ChangeLog file.
7  */
8
9 #include <linux/types.h>
10 #include <linux/fs.h>
11 #include <linux/slab.h>
12 #include <linux/net.h>
13
14 #include <linux/smb_fs.h>
15 #include <linux/smbno.h>
16 #include <linux/smb_mount.h>
17
18 #include "smb_debug.h"
19 #include "request.h"
20 #include "proto.h"
21
22 /* #define SMB_SLAB_DEBUG       (SLAB_RED_ZONE | SLAB_POISON) */
23 #define SMB_SLAB_DEBUG  0
24
25 #define ROUND_UP(x) (((x)+3) & ~3)
26
27 /* cache for request structures */
28 static kmem_cache_t *req_cachep;
29
30 static int smb_request_send_req(struct smb_request *req);
31
32 /*
33   /proc/slabinfo:
34   name, active, num, objsize, active_slabs, num_slaps, #pages
35 */
36
37
38 int smb_init_request_cache(void)
39 {
40         req_cachep = kmem_cache_create("smb_request",
41                                        sizeof(struct smb_request), 0,
42                                        SMB_SLAB_DEBUG | SLAB_HWCACHE_ALIGN,
43                                        NULL, NULL);
44         if (req_cachep == NULL)
45                 return -ENOMEM;
46
47         return 0;
48 }
49
50 void smb_destroy_request_cache(void)
51 {
52         if (kmem_cache_destroy(req_cachep))
53                 printk(KERN_INFO "smb_destroy_request_cache: not all structures were freed\n");
54 }
55
56 /*
57  * Allocate and initialise a request structure
58  */
59 static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server,
60                                                 int bufsize)
61 {
62         struct smb_request *req;
63         unsigned char *buf = NULL;
64
65         req = kmem_cache_alloc(req_cachep, SLAB_KERNEL);
66         VERBOSE("allocating request: %p\n", req);
67         if (!req)
68                 goto out;
69
70         if (bufsize > 0) {
71                 buf = smb_kmalloc(bufsize, GFP_NOFS);
72                 if (!buf) {
73                         kmem_cache_free(req_cachep, req);
74                         return NULL;
75                 }
76         }
77
78         memset(req, 0, sizeof(struct smb_request));
79         req->rq_buffer = buf;
80         req->rq_bufsize = bufsize;
81         req->rq_server = server;
82         init_waitqueue_head(&req->rq_wait);
83         INIT_LIST_HEAD(&req->rq_queue);
84         atomic_set(&req->rq_count, 1);
85
86 out:
87         return req;
88 }
89
90 struct smb_request *smb_alloc_request(struct smb_sb_info *server, int bufsize)
91 {
92         struct smb_request *req = NULL;
93
94         for (;;) {
95                 atomic_inc(&server->nr_requests);
96                 if (atomic_read(&server->nr_requests) <= MAX_REQUEST_HARD) {
97                         req = smb_do_alloc_request(server, bufsize);
98                         if (req != NULL)
99                                 break;
100                 }
101
102 #if 0
103                 /*
104                  * Try to free up at least one request in order to stay
105                  * below the hard limit
106                  */
107                 if (nfs_try_to_free_pages(server))
108                         continue;
109
110                 if (signalled() && (server->flags & NFS_MOUNT_INTR))
111                         return ERR_PTR(-ERESTARTSYS);
112                 current->policy = SCHED_YIELD;
113                 schedule();
114 #else
115                 /* FIXME: we want something like nfs does above, but that
116                    requires changes to all callers and can wait. */
117                 break;
118 #endif
119         }
120         return req;
121 }
122
123 static void smb_free_request(struct smb_request *req)
124 {
125         atomic_dec(&req->rq_server->nr_requests);
126         if (req->rq_buffer && !(req->rq_flags & SMB_REQ_STATIC))
127                 smb_kfree(req->rq_buffer);
128         if (req->rq_trans2buffer)
129                 smb_kfree(req->rq_trans2buffer);
130         kmem_cache_free(req_cachep, req);
131 }
132
133 /*
134  * What prevents a rget to race with a rput? The count must never drop to zero
135  * while it is in use. Only rput if it is ok that it is free'd.
136  */
137 static void smb_rget(struct smb_request *req)
138 {
139         atomic_inc(&req->rq_count);
140 }
141 void smb_rput(struct smb_request *req)
142 {
143         if (atomic_dec_and_test(&req->rq_count)) {
144                 list_del_init(&req->rq_queue);
145                 smb_free_request(req);
146         }
147 }
148
149 /* setup to receive the data part of the SMB */
150 static int smb_setup_bcc(struct smb_request *req)
151 {
152         int result = 0;
153         req->rq_rlen = smb_len(req->rq_header) + 4 - req->rq_bytes_recvd;
154
155         if (req->rq_rlen > req->rq_bufsize) {
156                 PARANOIA("Packet too large %d > %d\n",
157                          req->rq_rlen, req->rq_bufsize);
158                 return -ENOBUFS;
159         }
160
161         req->rq_iov[0].iov_base = req->rq_buffer;
162         req->rq_iov[0].iov_len  = req->rq_rlen;
163         req->rq_iovlen = 1;
164
165         return result;
166 }
167
168 /*
169  * Prepare a "normal" request structure.
170  */
171 static int smb_setup_request(struct smb_request *req)
172 {
173         int len = smb_len(req->rq_header) + 4;
174         req->rq_slen = len;
175
176         /* if we expect a data part in the reply we set the iov's to read it */
177         if (req->rq_resp_bcc)
178                 req->rq_setup_read = smb_setup_bcc;
179
180         /* This tries to support re-using the same request */
181         req->rq_bytes_sent = 0;
182         req->rq_rcls = 0;
183         req->rq_err = 0;
184         req->rq_errno = 0;
185         req->rq_fragment = 0;
186         if (req->rq_trans2buffer)
187                 smb_kfree(req->rq_trans2buffer);
188
189         return 0;
190 }
191
192 /*
193  * Prepare a transaction2 request structure
194  */
195 static int smb_setup_trans2request(struct smb_request *req)
196 {
197         struct smb_sb_info *server = req->rq_server;
198         int mparam, mdata;
199         static unsigned char padding[4];
200
201         /* I know the following is very ugly, but I want to build the
202            smb packet as efficiently as possible. */
203
204         const int smb_parameters = 15;
205         const int header = SMB_HEADER_LEN + 2 * smb_parameters + 2;
206         const int oparam = ROUND_UP(header + 3);
207         const int odata  = ROUND_UP(oparam + req->rq_lparm);
208         const int bcc = (req->rq_data ? odata + req->rq_ldata :
209                                         oparam + req->rq_lparm) - header;
210
211         if ((bcc + oparam) > server->opt.max_xmit)
212                 return -ENOMEM;
213         smb_setup_header(req, SMBtrans2, smb_parameters, bcc);
214
215         /*
216          * max parameters + max data + max setup == bufsize to make NT4 happy
217          * and not abort the transfer or split into multiple responses. It also
218          * makes smbfs happy as handling packets larger than the buffer size
219          * is extra work.
220          *
221          * OS/2 is probably going to hate me for this ...
222          */
223         mparam = SMB_TRANS2_MAX_PARAM;
224         mdata = req->rq_bufsize - mparam;
225
226         mdata = server->opt.max_xmit - mparam - 100;
227         if (mdata < 1024) {
228                 mdata = 1024;
229                 mparam = 20;
230         }
231
232 #if 0
233         /* NT/win2k has ~4k max_xmit, so with this we request more than it wants
234            to return as one SMB. Useful for testing the fragmented trans2
235            handling. */
236         mdata = 8192;
237 #endif
238
239         WSET(req->rq_header, smb_tpscnt, req->rq_lparm);
240         WSET(req->rq_header, smb_tdscnt, req->rq_ldata);
241         WSET(req->rq_header, smb_mprcnt, mparam);
242         WSET(req->rq_header, smb_mdrcnt, mdata);
243         WSET(req->rq_header, smb_msrcnt, 0);    /* max setup always 0 ? */
244         WSET(req->rq_header, smb_flags, 0);
245         DSET(req->rq_header, smb_timeout, 0);
246         WSET(req->rq_header, smb_pscnt, req->rq_lparm);
247         WSET(req->rq_header, smb_psoff, oparam - 4);
248         WSET(req->rq_header, smb_dscnt, req->rq_ldata);
249         WSET(req->rq_header, smb_dsoff, req->rq_data ? odata - 4 : 0);
250         *(req->rq_header + smb_suwcnt) = 0x01;          /* setup count */
251         *(req->rq_header + smb_suwcnt + 1) = 0x00;      /* reserved */
252         WSET(req->rq_header, smb_setup0, req->rq_trans2_command);
253
254         req->rq_iovlen = 2;
255         req->rq_iov[0].iov_base = (void *) req->rq_header;
256         req->rq_iov[0].iov_len = oparam;
257         req->rq_iov[1].iov_base = (req->rq_parm==NULL) ? padding : req->rq_parm;
258         req->rq_iov[1].iov_len = req->rq_lparm;
259         req->rq_slen = oparam + req->rq_lparm;
260
261         if (req->rq_data) {
262                 req->rq_iovlen += 2;
263                 req->rq_iov[2].iov_base = padding;
264                 req->rq_iov[2].iov_len = odata - oparam - req->rq_lparm;
265                 req->rq_iov[3].iov_base = req->rq_data;
266                 req->rq_iov[3].iov_len = req->rq_ldata;
267                 req->rq_slen = odata + req->rq_ldata;
268         }
269
270         /* always a data part for trans2 replies */
271         req->rq_setup_read = smb_setup_bcc;
272
273         return 0;
274 }
275
276 /*
277  * Add a request and tell smbiod to process it
278  */
279 int smb_add_request(struct smb_request *req)
280 {
281         long timeleft;
282         struct smb_sb_info *server = req->rq_server;
283         int result = 0;
284
285         smb_setup_request(req);
286         if (req->rq_trans2_command) {
287                 if (req->rq_buffer == NULL) {
288                         PARANOIA("trans2 attempted without response buffer!\n");
289                         return -EIO;
290                 }
291                 result = smb_setup_trans2request(req);
292         }
293         if (result < 0)
294                 return result;
295
296 #ifdef SMB_DEBUG_PACKET_SIZE
297         add_xmit_stats(req);
298 #endif
299
300         /* add 'req' to the queue of requests */
301         if (smb_lock_server_interruptible(server))
302                 return -EINTR;
303
304         /*
305          * Try to send the request as the process. If that fails we queue the
306          * request and let smbiod send it later.
307          */
308
309         /* FIXME: each server has a number on the maximum number of parallel
310            requests. 10, 50 or so. We should not allow more requests to be
311            active. */
312         if (server->mid > 0xf000)
313                 server->mid = 0;
314         req->rq_mid = server->mid++;
315         WSET(req->rq_header, smb_mid, req->rq_mid);
316
317         result = 0;
318         if (server->state == CONN_VALID) {
319                 if (list_empty(&server->xmitq))
320                         result = smb_request_send_req(req);
321                 if (result < 0) {
322                         /* Connection lost? */
323                         server->conn_error = result;
324                         server->state = CONN_INVALID;
325                 }
326         }
327         if (result != 1)
328                 list_add_tail(&req->rq_queue, &server->xmitq);
329         smb_rget(req);
330
331         if (server->state != CONN_VALID)
332                 smbiod_retry(server);
333
334         smb_unlock_server(server);
335
336         smbiod_wake_up();
337
338         timeleft = wait_event_interruptible_timeout(req->rq_wait,
339                                     req->rq_flags & SMB_REQ_RECEIVED, 30*HZ);
340         if (!timeleft || signal_pending(current)) {
341                 /*
342                  * On timeout or on interrupt we want to try and remove the
343                  * request from the recvq/xmitq.
344                  */
345                 smb_lock_server(server);
346                 if (!(req->rq_flags & SMB_REQ_RECEIVED)) {
347                         list_del_init(&req->rq_queue);
348                         smb_rput(req);
349                 }
350                 smb_unlock_server(server);
351         }
352
353         if (!timeleft) {
354                 PARANOIA("request [%p, mid=%d] timed out!\n",
355                          req, req->rq_mid);
356                 VERBOSE("smb_com:  %02x\n", *(req->rq_header + smb_com));
357                 VERBOSE("smb_rcls: %02x\n", *(req->rq_header + smb_rcls));
358                 VERBOSE("smb_flg:  %02x\n", *(req->rq_header + smb_flg));
359                 VERBOSE("smb_tid:  %04x\n", WVAL(req->rq_header, smb_tid));
360                 VERBOSE("smb_pid:  %04x\n", WVAL(req->rq_header, smb_pid));
361                 VERBOSE("smb_uid:  %04x\n", WVAL(req->rq_header, smb_uid));
362                 VERBOSE("smb_mid:  %04x\n", WVAL(req->rq_header, smb_mid));
363                 VERBOSE("smb_wct:  %02x\n", *(req->rq_header + smb_wct));
364
365                 req->rq_rcls = ERRSRV;
366                 req->rq_err  = ERRtimeout;
367
368                 /* Just in case it was "stuck" */
369                 smbiod_wake_up();
370         }
371         VERBOSE("woke up, rcls=%d\n", req->rq_rcls);
372
373         if (req->rq_rcls != 0)
374                 req->rq_errno = smb_errno(req);
375         if (signal_pending(current))
376                 req->rq_errno = -ERESTARTSYS;
377         return req->rq_errno;
378 }
379
380 /*
381  * Send a request and place it on the recvq if successfully sent.
382  * Must be called with the server lock held.
383  */
384 static int smb_request_send_req(struct smb_request *req)
385 {
386         struct smb_sb_info *server = req->rq_server;
387         int result;
388
389         if (req->rq_bytes_sent == 0) {
390                 WSET(req->rq_header, smb_tid, server->opt.tid);
391                 WSET(req->rq_header, smb_pid, 1);
392                 WSET(req->rq_header, smb_uid, server->opt.server_uid);
393         }
394
395         result = smb_send_request(req);
396         if (result < 0 && result != -EAGAIN)
397                 goto out;
398
399         result = 0;
400         if (!(req->rq_flags & SMB_REQ_TRANSMITTED))
401                 goto out;
402
403         list_del_init(&req->rq_queue);
404         list_add_tail(&req->rq_queue, &server->recvq);
405         result = 1;
406 out:
407         return result;
408 }
409
410 /*
411  * Sends one request for this server. (smbiod)
412  * Must be called with the server lock held.
413  * Returns: <0 on error
414  *           0 if no request could be completely sent
415  *           1 if all data for one request was sent
416  */
417 int smb_request_send_server(struct smb_sb_info *server)
418 {
419         struct list_head *head;
420         struct smb_request *req;
421         int result;
422
423         if (server->state != CONN_VALID)
424                 return 0;
425
426         /* dequeue first request, if any */
427         req = NULL;
428         head = server->xmitq.next;
429         if (head != &server->xmitq) {
430                 req = list_entry(head, struct smb_request, rq_queue);
431         }
432         if (!req)
433                 return 0;
434
435         result = smb_request_send_req(req);
436         if (result < 0) {
437                 server->conn_error = result;
438                 list_del_init(&req->rq_queue);
439                 list_add(&req->rq_queue, &server->xmitq);
440                 result = -EIO;
441                 goto out;
442         }
443
444 out:
445         return result;
446 }
447
448 /*
449  * Try to find a request matching this "mid". Typically the first entry will
450  * be the matching one.
451  */
452 static struct smb_request *find_request(struct smb_sb_info *server, int mid)
453 {
454         struct list_head *tmp;
455         struct smb_request *req = NULL;
456
457         list_for_each(tmp, &server->recvq) {
458                 req = list_entry(tmp, struct smb_request, rq_queue);
459                 if (req->rq_mid == mid) {
460                         break;
461                 }
462                 req = NULL;
463         }
464
465         if (!req) {
466                 VERBOSE("received reply with mid %d but no request!\n",
467                         WVAL(server->header, smb_mid));
468                 server->rstate = SMB_RECV_DROP;
469         }
470
471         return req;
472 }
473
474 /*
475  * Called when we have read the smb header and believe this is a response.
476  */
477 static int smb_init_request(struct smb_sb_info *server, struct smb_request *req)
478 {
479         int hdrlen, wct;
480
481         memcpy(req->rq_header, server->header, SMB_HEADER_LEN);
482
483         wct = *(req->rq_header + smb_wct);
484         if (wct > 20) { 
485                 PARANOIA("wct too large, %d > 20\n", wct);
486                 server->rstate = SMB_RECV_DROP;
487                 return 0;
488         }
489
490         req->rq_resp_wct = wct;
491         hdrlen = SMB_HEADER_LEN + wct*2 + 2;
492         VERBOSE("header length: %d   smb_wct: %2d\n", hdrlen, wct);
493
494         req->rq_bytes_recvd = SMB_HEADER_LEN;
495         req->rq_rlen = hdrlen;
496         req->rq_iov[0].iov_base = req->rq_header;
497         req->rq_iov[0].iov_len  = hdrlen;
498         req->rq_iovlen = 1;
499         server->rstate = SMB_RECV_PARAM;
500
501 #ifdef SMB_DEBUG_PACKET_SIZE
502         add_recv_stats(smb_len(server->header));
503 #endif
504         return 0;
505 }
506
507 /*
508  * Reads the SMB parameters
509  */
510 static int smb_recv_param(struct smb_sb_info *server, struct smb_request *req)
511 {
512         int result;
513
514         result = smb_receive(server, req);
515         if (result < 0)
516                 return result;
517         if (req->rq_bytes_recvd < req->rq_rlen)
518                 return 0;
519
520         VERBOSE("result: %d   smb_bcc:  %04x\n", result,
521                 WVAL(req->rq_header, SMB_HEADER_LEN +
522                      (*(req->rq_header + smb_wct) * 2)));
523
524         result = 0;
525         req->rq_iov[0].iov_base = NULL;
526         req->rq_rlen = 0;
527         if (req->rq_callback)
528                 req->rq_callback(req);
529         else if (req->rq_setup_read)
530                 result = req->rq_setup_read(req);
531         if (result < 0) {
532                 server->rstate = SMB_RECV_DROP;
533                 return result;
534         }
535
536         server->rstate = req->rq_rlen > 0 ? SMB_RECV_DATA : SMB_RECV_END;
537
538         req->rq_bytes_recvd = 0;        // recvd out of the iov
539
540         VERBOSE("rlen: %d\n", req->rq_rlen);
541         if (req->rq_rlen < 0) {
542                 PARANOIA("Parameters read beyond end of packet!\n");
543                 server->rstate = SMB_RECV_END;
544                 return -EIO;
545         }
546         return 0;
547 }
548
549 /*
550  * Reads the SMB data
551  */
552 static int smb_recv_data(struct smb_sb_info *server, struct smb_request *req)
553 {
554         int result;
555
556         result = smb_receive(server, req);
557         if (result < 0)
558                 goto out;
559         if (req->rq_bytes_recvd < req->rq_rlen)
560                 goto out;
561         server->rstate = SMB_RECV_END;
562 out:
563         VERBOSE("result: %d\n", result);
564         return result;
565 }
566
567 /*
568  * Receive a transaction2 response
569  * Return: 0 if the response has been fully read
570  *         1 if there are further "fragments" to read
571  *        <0 if there is an error
572  */
573 static int smb_recv_trans2(struct smb_sb_info *server, struct smb_request *req)
574 {
575         unsigned char *inbuf;
576         unsigned int parm_disp, parm_offset, parm_count, parm_tot;
577         unsigned int data_disp, data_offset, data_count, data_tot;
578         int hdrlen = SMB_HEADER_LEN + req->rq_resp_wct*2 - 2;
579
580         VERBOSE("handling trans2\n");
581
582         inbuf = req->rq_header;
583         data_tot    = WVAL(inbuf, smb_tdrcnt);
584         parm_tot    = WVAL(inbuf, smb_tprcnt);
585         parm_disp   = WVAL(inbuf, smb_prdisp);
586         parm_offset = WVAL(inbuf, smb_proff);
587         parm_count  = WVAL(inbuf, smb_prcnt);
588         data_disp   = WVAL(inbuf, smb_drdisp);
589         data_offset = WVAL(inbuf, smb_droff);
590         data_count  = WVAL(inbuf, smb_drcnt);
591
592         /* Modify offset for the split header/buffer we use */
593         if (data_count || data_offset) {
594                 if (unlikely(data_offset < hdrlen))
595                         goto out_bad_data;
596                 else
597                         data_offset -= hdrlen;
598         }
599         if (parm_count || parm_offset) {
600                 if (unlikely(parm_offset < hdrlen))
601                         goto out_bad_parm;
602                 else
603                         parm_offset -= hdrlen;
604         }
605
606         if (parm_count == parm_tot && data_count == data_tot) {
607                 /*
608                  * This packet has all the trans2 data.
609                  *
610                  * We setup the request so that this will be the common
611                  * case. It may be a server error to not return a
612                  * response that fits.
613                  */
614                 VERBOSE("single trans2 response  "
615                         "dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
616                         data_count, parm_count,
617                         data_offset, parm_offset);
618                 req->rq_ldata = data_count;
619                 req->rq_lparm = parm_count;
620                 req->rq_data = req->rq_buffer + data_offset;
621                 req->rq_parm = req->rq_buffer + parm_offset;
622                 if (unlikely(parm_offset + parm_count > req->rq_rlen))
623                         goto out_bad_parm;
624                 if (unlikely(data_offset + data_count > req->rq_rlen))
625                         goto out_bad_data;
626                 return 0;
627         }
628
629         VERBOSE("multi trans2 response  "
630                 "frag=%d, dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
631                 req->rq_fragment,
632                 data_count, parm_count,
633                 data_offset, parm_offset);
634
635         if (!req->rq_fragment) {
636                 int buf_len;
637
638                 /* We got the first trans2 fragment */
639                 req->rq_fragment = 1;
640                 req->rq_total_data = data_tot;
641                 req->rq_total_parm = parm_tot;
642                 req->rq_ldata = 0;
643                 req->rq_lparm = 0;
644
645                 buf_len = data_tot + parm_tot;
646                 if (buf_len > SMB_MAX_PACKET_SIZE)
647                         goto out_too_long;
648
649                 req->rq_trans2bufsize = buf_len;
650                 req->rq_trans2buffer = smb_kmalloc(buf_len, GFP_NOFS);
651                 if (!req->rq_trans2buffer)
652                         goto out_no_mem;
653                 memset(req->rq_trans2buffer, 0, buf_len);
654
655                 req->rq_parm = req->rq_trans2buffer;
656                 req->rq_data = req->rq_trans2buffer + parm_tot;
657         } else if (unlikely(req->rq_total_data < data_tot ||
658                             req->rq_total_parm < parm_tot))
659                 goto out_data_grew;
660
661         if (unlikely(parm_disp + parm_count > req->rq_total_parm ||
662                      parm_offset + parm_count > req->rq_rlen))
663                 goto out_bad_parm;
664         if (unlikely(data_disp + data_count > req->rq_total_data ||
665                      data_offset + data_count > req->rq_rlen))
666                 goto out_bad_data;
667
668         inbuf = req->rq_buffer;
669         memcpy(req->rq_parm + parm_disp, inbuf + parm_offset, parm_count);
670         memcpy(req->rq_data + data_disp, inbuf + data_offset, data_count);
671
672         req->rq_ldata += data_count;
673         req->rq_lparm += parm_count;
674
675         /*
676          * Check whether we've received all of the data. Note that
677          * we use the packet totals -- total lengths might shrink!
678          */
679         if (req->rq_ldata >= data_tot && req->rq_lparm >= parm_tot) {
680                 req->rq_ldata = data_tot;
681                 req->rq_lparm = parm_tot;
682                 return 0;
683         }
684         return 1;
685
686 out_too_long:
687         printk(KERN_ERR "smb_trans2: data/param too long, data=%u, parm=%u\n",
688                 data_tot, parm_tot);
689         goto out_EIO;
690 out_no_mem:
691         printk(KERN_ERR "smb_trans2: couldn't allocate data area of %d bytes\n",
692                req->rq_trans2bufsize);
693         req->rq_errno = -ENOMEM;
694         goto out;
695 out_data_grew:
696         printk(KERN_ERR "smb_trans2: data/params grew!\n");
697         goto out_EIO;
698 out_bad_parm:
699         printk(KERN_ERR "smb_trans2: invalid parms, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
700                parm_disp, parm_count, parm_tot, parm_offset);
701         goto out_EIO;
702 out_bad_data:
703         printk(KERN_ERR "smb_trans2: invalid data, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
704                data_disp, data_count, data_tot, data_offset);
705 out_EIO:
706         req->rq_errno = -EIO;
707 out:
708         return req->rq_errno;
709 }
710
711 /*
712  * State machine for receiving responses. We handle the fact that we can't
713  * read the full response in one try by having states telling us how much we
714  * have read.
715  *
716  * Must be called with the server lock held (only called from smbiod).
717  *
718  * Return: <0 on error
719  */
720 int smb_request_recv(struct smb_sb_info *server)
721 {
722         struct smb_request *req = NULL;
723         int result = 0;
724
725         if (smb_recv_available(server) <= 0)
726                 return 0;
727
728         VERBOSE("state: %d\n", server->rstate);
729         switch (server->rstate) {
730         case SMB_RECV_DROP:
731                 result = smb_receive_drop(server);
732                 if (result < 0)
733                         break;
734                 if (server->rstate == SMB_RECV_DROP)
735                         break;
736                 server->rstate = SMB_RECV_START;
737                 /* fallthrough */
738         case SMB_RECV_START:
739                 server->smb_read = 0;
740                 server->rstate = SMB_RECV_HEADER;
741                 /* fallthrough */
742         case SMB_RECV_HEADER:
743                 result = smb_receive_header(server);
744                 if (result < 0)
745                         break;
746                 if (server->rstate == SMB_RECV_HEADER)
747                         break;
748                 if (! (*(server->header + smb_flg) & SMB_FLAGS_REPLY) ) {
749                         server->rstate = SMB_RECV_REQUEST;
750                         break;
751                 }
752                 if (server->rstate != SMB_RECV_HCOMPLETE)
753                         break;
754                 /* fallthrough */
755         case SMB_RECV_HCOMPLETE:
756                 req = find_request(server, WVAL(server->header, smb_mid));
757                 if (!req)
758                         break;
759                 smb_init_request(server, req);
760                 req->rq_rcls = *(req->rq_header + smb_rcls);
761                 req->rq_err  = WVAL(req->rq_header, smb_err);
762                 if (server->rstate != SMB_RECV_PARAM)
763                         break;
764                 /* fallthrough */
765         case SMB_RECV_PARAM:
766                 if (!req)
767                         req = find_request(server,WVAL(server->header,smb_mid));
768                 if (!req)
769                         break;
770                 result = smb_recv_param(server, req);
771                 if (result < 0)
772                         break;
773                 if (server->rstate != SMB_RECV_DATA)
774                         break;
775                 /* fallthrough */
776         case SMB_RECV_DATA:
777                 if (!req)
778                         req = find_request(server,WVAL(server->header,smb_mid));
779                 if (!req)
780                         break;
781                 result = smb_recv_data(server, req);
782                 if (result < 0)
783                         break;
784                 break;
785
786                 /* We should never be called with any of these states */
787         case SMB_RECV_END:
788         case SMB_RECV_REQUEST:
789                 server->rstate = SMB_RECV_END;
790                 break;
791         }
792
793         if (result < 0) {
794                 /* We saw an error */
795                 return result;
796         }
797
798         if (server->rstate != SMB_RECV_END)
799                 return 0;
800
801         result = 0;
802         if (req->rq_trans2_command && req->rq_rcls == SUCCESS)
803                 result = smb_recv_trans2(server, req);
804
805         /*
806          * Response completely read. Drop any extra bytes sent by the server.
807          * (Yes, servers sometimes add extra bytes to responses)
808          */
809         VERBOSE("smb_len: %d   smb_read: %d\n",
810                 server->smb_len, server->smb_read);
811         if (server->smb_read < server->smb_len)
812                 smb_receive_drop(server);
813
814         server->rstate = SMB_RECV_START;
815
816         if (!result) {
817                 list_del_init(&req->rq_queue);
818                 req->rq_flags |= SMB_REQ_RECEIVED;
819                 smb_rput(req);
820                 wake_up_interruptible(&req->rq_wait);
821         }
822         return 0;
823 }