4 * Copyright (C) 2001 by Urban Widmark
6 * Please add a note about your changes to smbfs in the ChangeLog file.
9 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/net.h>
14 #include <linux/smb_fs.h>
15 #include <linux/smbno.h>
16 #include <linux/smb_mount.h>
18 #include "smb_debug.h"
22 /* #define SMB_SLAB_DEBUG (SLAB_RED_ZONE | SLAB_POISON) */
23 #define SMB_SLAB_DEBUG 0
25 #define ROUND_UP(x) (((x)+3) & ~3)
27 /* cache for request structures */
28 static kmem_cache_t *req_cachep;
30 static int smb_request_send_req(struct smb_request *req);
34 name, active, num, objsize, active_slabs, num_slaps, #pages
38 int smb_init_request_cache(void)
40 req_cachep = kmem_cache_create("smb_request",
41 sizeof(struct smb_request), 0,
42 SMB_SLAB_DEBUG | SLAB_HWCACHE_ALIGN,
44 if (req_cachep == NULL)
50 void smb_destroy_request_cache(void)
52 if (kmem_cache_destroy(req_cachep))
53 printk(KERN_INFO "smb_destroy_request_cache: not all structures were freed\n");
57 * Allocate and initialise a request structure
59 static struct smb_request *smb_do_alloc_request(struct smb_sb_info *server,
62 struct smb_request *req;
63 unsigned char *buf = NULL;
65 req = kmem_cache_alloc(req_cachep, SLAB_KERNEL);
66 VERBOSE("allocating request: %p\n", req);
71 buf = kmalloc(bufsize, GFP_NOFS);
73 kmem_cache_free(req_cachep, req);
78 memset(req, 0, sizeof(struct smb_request));
80 req->rq_bufsize = bufsize;
81 req->rq_server = server;
82 init_waitqueue_head(&req->rq_wait);
83 INIT_LIST_HEAD(&req->rq_queue);
84 atomic_set(&req->rq_count, 1);
90 struct smb_request *smb_alloc_request(struct smb_sb_info *server, int bufsize)
92 struct smb_request *req = NULL;
95 atomic_inc(&server->nr_requests);
96 if (atomic_read(&server->nr_requests) <= MAX_REQUEST_HARD) {
97 req = smb_do_alloc_request(server, bufsize);
104 * Try to free up at least one request in order to stay
105 * below the hard limit
107 if (nfs_try_to_free_pages(server))
110 if (signalled() && (server->flags & NFS_MOUNT_INTR))
111 return ERR_PTR(-ERESTARTSYS);
112 current->policy = SCHED_YIELD;
115 /* FIXME: we want something like nfs does above, but that
116 requires changes to all callers and can wait. */
123 static void smb_free_request(struct smb_request *req)
125 atomic_dec(&req->rq_server->nr_requests);
126 if (req->rq_buffer && !(req->rq_flags & SMB_REQ_STATIC))
127 kfree(req->rq_buffer);
128 kfree(req->rq_trans2buffer);
129 kmem_cache_free(req_cachep, req);
133 * What prevents a rget to race with a rput? The count must never drop to zero
134 * while it is in use. Only rput if it is ok that it is free'd.
136 static void smb_rget(struct smb_request *req)
138 atomic_inc(&req->rq_count);
140 void smb_rput(struct smb_request *req)
142 if (atomic_dec_and_test(&req->rq_count)) {
143 list_del_init(&req->rq_queue);
144 smb_free_request(req);
148 /* setup to receive the data part of the SMB */
149 static int smb_setup_bcc(struct smb_request *req)
152 req->rq_rlen = smb_len(req->rq_header) + 4 - req->rq_bytes_recvd;
154 if (req->rq_rlen > req->rq_bufsize) {
155 PARANOIA("Packet too large %d > %d\n",
156 req->rq_rlen, req->rq_bufsize);
160 req->rq_iov[0].iov_base = req->rq_buffer;
161 req->rq_iov[0].iov_len = req->rq_rlen;
168 * Prepare a "normal" request structure.
170 static int smb_setup_request(struct smb_request *req)
172 int len = smb_len(req->rq_header) + 4;
175 /* if we expect a data part in the reply we set the iov's to read it */
176 if (req->rq_resp_bcc)
177 req->rq_setup_read = smb_setup_bcc;
179 /* This tries to support re-using the same request */
180 req->rq_bytes_sent = 0;
184 req->rq_fragment = 0;
185 kfree(req->rq_trans2buffer);
191 * Prepare a transaction2 request structure
193 static int smb_setup_trans2request(struct smb_request *req)
195 struct smb_sb_info *server = req->rq_server;
197 static unsigned char padding[4];
199 /* I know the following is very ugly, but I want to build the
200 smb packet as efficiently as possible. */
202 const int smb_parameters = 15;
203 const int header = SMB_HEADER_LEN + 2 * smb_parameters + 2;
204 const int oparam = ROUND_UP(header + 3);
205 const int odata = ROUND_UP(oparam + req->rq_lparm);
206 const int bcc = (req->rq_data ? odata + req->rq_ldata :
207 oparam + req->rq_lparm) - header;
209 if ((bcc + oparam) > server->opt.max_xmit)
211 smb_setup_header(req, SMBtrans2, smb_parameters, bcc);
214 * max parameters + max data + max setup == bufsize to make NT4 happy
215 * and not abort the transfer or split into multiple responses. It also
216 * makes smbfs happy as handling packets larger than the buffer size
219 * OS/2 is probably going to hate me for this ...
221 mparam = SMB_TRANS2_MAX_PARAM;
222 mdata = req->rq_bufsize - mparam;
224 mdata = server->opt.max_xmit - mparam - 100;
231 /* NT/win2k has ~4k max_xmit, so with this we request more than it wants
232 to return as one SMB. Useful for testing the fragmented trans2
237 WSET(req->rq_header, smb_tpscnt, req->rq_lparm);
238 WSET(req->rq_header, smb_tdscnt, req->rq_ldata);
239 WSET(req->rq_header, smb_mprcnt, mparam);
240 WSET(req->rq_header, smb_mdrcnt, mdata);
241 WSET(req->rq_header, smb_msrcnt, 0); /* max setup always 0 ? */
242 WSET(req->rq_header, smb_flags, 0);
243 DSET(req->rq_header, smb_timeout, 0);
244 WSET(req->rq_header, smb_pscnt, req->rq_lparm);
245 WSET(req->rq_header, smb_psoff, oparam - 4);
246 WSET(req->rq_header, smb_dscnt, req->rq_ldata);
247 WSET(req->rq_header, smb_dsoff, req->rq_data ? odata - 4 : 0);
248 *(req->rq_header + smb_suwcnt) = 0x01; /* setup count */
249 *(req->rq_header + smb_suwcnt + 1) = 0x00; /* reserved */
250 WSET(req->rq_header, smb_setup0, req->rq_trans2_command);
253 req->rq_iov[0].iov_base = (void *) req->rq_header;
254 req->rq_iov[0].iov_len = oparam;
255 req->rq_iov[1].iov_base = (req->rq_parm==NULL) ? padding : req->rq_parm;
256 req->rq_iov[1].iov_len = req->rq_lparm;
257 req->rq_slen = oparam + req->rq_lparm;
261 req->rq_iov[2].iov_base = padding;
262 req->rq_iov[2].iov_len = odata - oparam - req->rq_lparm;
263 req->rq_iov[3].iov_base = req->rq_data;
264 req->rq_iov[3].iov_len = req->rq_ldata;
265 req->rq_slen = odata + req->rq_ldata;
268 /* always a data part for trans2 replies */
269 req->rq_setup_read = smb_setup_bcc;
275 * Add a request and tell smbiod to process it
277 int smb_add_request(struct smb_request *req)
280 struct smb_sb_info *server = req->rq_server;
283 smb_setup_request(req);
284 if (req->rq_trans2_command) {
285 if (req->rq_buffer == NULL) {
286 PARANOIA("trans2 attempted without response buffer!\n");
289 result = smb_setup_trans2request(req);
294 #ifdef SMB_DEBUG_PACKET_SIZE
298 /* add 'req' to the queue of requests */
299 if (smb_lock_server_interruptible(server))
303 * Try to send the request as the process. If that fails we queue the
304 * request and let smbiod send it later.
307 /* FIXME: each server has a number on the maximum number of parallel
308 requests. 10, 50 or so. We should not allow more requests to be
310 if (server->mid > 0xf000)
312 req->rq_mid = server->mid++;
313 WSET(req->rq_header, smb_mid, req->rq_mid);
316 if (server->state == CONN_VALID) {
317 if (list_empty(&server->xmitq))
318 result = smb_request_send_req(req);
320 /* Connection lost? */
321 server->conn_error = result;
322 server->state = CONN_INVALID;
326 list_add_tail(&req->rq_queue, &server->xmitq);
329 if (server->state != CONN_VALID)
330 smbiod_retry(server);
332 smb_unlock_server(server);
336 timeleft = wait_event_interruptible_timeout(req->rq_wait,
337 req->rq_flags & SMB_REQ_RECEIVED, 30*HZ);
338 if (!timeleft || signal_pending(current)) {
340 * On timeout or on interrupt we want to try and remove the
341 * request from the recvq/xmitq.
342 * First check if the request is still part of a queue. (May
343 * have been removed by some error condition)
345 smb_lock_server(server);
346 if (!list_empty(&req->rq_queue)) {
347 list_del_init(&req->rq_queue);
350 smb_unlock_server(server);
354 PARANOIA("request [%p, mid=%d] timed out!\n",
356 VERBOSE("smb_com: %02x\n", *(req->rq_header + smb_com));
357 VERBOSE("smb_rcls: %02x\n", *(req->rq_header + smb_rcls));
358 VERBOSE("smb_flg: %02x\n", *(req->rq_header + smb_flg));
359 VERBOSE("smb_tid: %04x\n", WVAL(req->rq_header, smb_tid));
360 VERBOSE("smb_pid: %04x\n", WVAL(req->rq_header, smb_pid));
361 VERBOSE("smb_uid: %04x\n", WVAL(req->rq_header, smb_uid));
362 VERBOSE("smb_mid: %04x\n", WVAL(req->rq_header, smb_mid));
363 VERBOSE("smb_wct: %02x\n", *(req->rq_header + smb_wct));
365 req->rq_rcls = ERRSRV;
366 req->rq_err = ERRtimeout;
368 /* Just in case it was "stuck" */
371 VERBOSE("woke up, rcls=%d\n", req->rq_rcls);
373 if (req->rq_rcls != 0)
374 req->rq_errno = smb_errno(req);
375 if (signal_pending(current))
376 req->rq_errno = -ERESTARTSYS;
377 return req->rq_errno;
381 * Send a request and place it on the recvq if successfully sent.
382 * Must be called with the server lock held.
384 static int smb_request_send_req(struct smb_request *req)
386 struct smb_sb_info *server = req->rq_server;
389 if (req->rq_bytes_sent == 0) {
390 WSET(req->rq_header, smb_tid, server->opt.tid);
391 WSET(req->rq_header, smb_pid, 1);
392 WSET(req->rq_header, smb_uid, server->opt.server_uid);
395 result = smb_send_request(req);
396 if (result < 0 && result != -EAGAIN)
400 if (!(req->rq_flags & SMB_REQ_TRANSMITTED))
403 list_move_tail(&req->rq_queue, &server->recvq);
410 * Sends one request for this server. (smbiod)
411 * Must be called with the server lock held.
412 * Returns: <0 on error
413 * 0 if no request could be completely sent
414 * 1 if all data for one request was sent
416 int smb_request_send_server(struct smb_sb_info *server)
418 struct list_head *head;
419 struct smb_request *req;
422 if (server->state != CONN_VALID)
425 /* dequeue first request, if any */
427 head = server->xmitq.next;
428 if (head != &server->xmitq) {
429 req = list_entry(head, struct smb_request, rq_queue);
434 result = smb_request_send_req(req);
436 server->conn_error = result;
437 list_move(&req->rq_queue, &server->xmitq);
447 * Try to find a request matching this "mid". Typically the first entry will
448 * be the matching one.
450 static struct smb_request *find_request(struct smb_sb_info *server, int mid)
452 struct list_head *tmp;
453 struct smb_request *req = NULL;
455 list_for_each(tmp, &server->recvq) {
456 req = list_entry(tmp, struct smb_request, rq_queue);
457 if (req->rq_mid == mid) {
464 VERBOSE("received reply with mid %d but no request!\n",
465 WVAL(server->header, smb_mid));
466 server->rstate = SMB_RECV_DROP;
473 * Called when we have read the smb header and believe this is a response.
475 static int smb_init_request(struct smb_sb_info *server, struct smb_request *req)
479 memcpy(req->rq_header, server->header, SMB_HEADER_LEN);
481 wct = *(req->rq_header + smb_wct);
483 PARANOIA("wct too large, %d > 20\n", wct);
484 server->rstate = SMB_RECV_DROP;
488 req->rq_resp_wct = wct;
489 hdrlen = SMB_HEADER_LEN + wct*2 + 2;
490 VERBOSE("header length: %d smb_wct: %2d\n", hdrlen, wct);
492 req->rq_bytes_recvd = SMB_HEADER_LEN;
493 req->rq_rlen = hdrlen;
494 req->rq_iov[0].iov_base = req->rq_header;
495 req->rq_iov[0].iov_len = hdrlen;
497 server->rstate = SMB_RECV_PARAM;
499 #ifdef SMB_DEBUG_PACKET_SIZE
500 add_recv_stats(smb_len(server->header));
506 * Reads the SMB parameters
508 static int smb_recv_param(struct smb_sb_info *server, struct smb_request *req)
512 result = smb_receive(server, req);
515 if (req->rq_bytes_recvd < req->rq_rlen)
518 VERBOSE("result: %d smb_bcc: %04x\n", result,
519 WVAL(req->rq_header, SMB_HEADER_LEN +
520 (*(req->rq_header + smb_wct) * 2)));
523 req->rq_iov[0].iov_base = NULL;
525 if (req->rq_callback)
526 req->rq_callback(req);
527 else if (req->rq_setup_read)
528 result = req->rq_setup_read(req);
530 server->rstate = SMB_RECV_DROP;
534 server->rstate = req->rq_rlen > 0 ? SMB_RECV_DATA : SMB_RECV_END;
536 req->rq_bytes_recvd = 0; // recvd out of the iov
538 VERBOSE("rlen: %d\n", req->rq_rlen);
539 if (req->rq_rlen < 0) {
540 PARANOIA("Parameters read beyond end of packet!\n");
541 server->rstate = SMB_RECV_END;
550 static int smb_recv_data(struct smb_sb_info *server, struct smb_request *req)
554 result = smb_receive(server, req);
557 if (req->rq_bytes_recvd < req->rq_rlen)
559 server->rstate = SMB_RECV_END;
561 VERBOSE("result: %d\n", result);
566 * Receive a transaction2 response
567 * Return: 0 if the response has been fully read
568 * 1 if there are further "fragments" to read
569 * <0 if there is an error
571 static int smb_recv_trans2(struct smb_sb_info *server, struct smb_request *req)
573 unsigned char *inbuf;
574 unsigned int parm_disp, parm_offset, parm_count, parm_tot;
575 unsigned int data_disp, data_offset, data_count, data_tot;
576 int hdrlen = SMB_HEADER_LEN + req->rq_resp_wct*2 - 2;
578 VERBOSE("handling trans2\n");
580 inbuf = req->rq_header;
581 data_tot = WVAL(inbuf, smb_tdrcnt);
582 parm_tot = WVAL(inbuf, smb_tprcnt);
583 parm_disp = WVAL(inbuf, smb_prdisp);
584 parm_offset = WVAL(inbuf, smb_proff);
585 parm_count = WVAL(inbuf, smb_prcnt);
586 data_disp = WVAL(inbuf, smb_drdisp);
587 data_offset = WVAL(inbuf, smb_droff);
588 data_count = WVAL(inbuf, smb_drcnt);
590 /* Modify offset for the split header/buffer we use */
591 if (data_count || data_offset) {
592 if (unlikely(data_offset < hdrlen))
595 data_offset -= hdrlen;
597 if (parm_count || parm_offset) {
598 if (unlikely(parm_offset < hdrlen))
601 parm_offset -= hdrlen;
604 if (parm_count == parm_tot && data_count == data_tot) {
606 * This packet has all the trans2 data.
608 * We setup the request so that this will be the common
609 * case. It may be a server error to not return a
610 * response that fits.
612 VERBOSE("single trans2 response "
613 "dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
614 data_count, parm_count,
615 data_offset, parm_offset);
616 req->rq_ldata = data_count;
617 req->rq_lparm = parm_count;
618 req->rq_data = req->rq_buffer + data_offset;
619 req->rq_parm = req->rq_buffer + parm_offset;
620 if (unlikely(parm_offset + parm_count > req->rq_rlen))
622 if (unlikely(data_offset + data_count > req->rq_rlen))
627 VERBOSE("multi trans2 response "
628 "frag=%d, dcnt=%u, pcnt=%u, doff=%u, poff=%u\n",
630 data_count, parm_count,
631 data_offset, parm_offset);
633 if (!req->rq_fragment) {
636 /* We got the first trans2 fragment */
637 req->rq_fragment = 1;
638 req->rq_total_data = data_tot;
639 req->rq_total_parm = parm_tot;
643 buf_len = data_tot + parm_tot;
644 if (buf_len > SMB_MAX_PACKET_SIZE)
647 req->rq_trans2bufsize = buf_len;
648 req->rq_trans2buffer = kzalloc(buf_len, GFP_NOFS);
649 if (!req->rq_trans2buffer)
652 req->rq_parm = req->rq_trans2buffer;
653 req->rq_data = req->rq_trans2buffer + parm_tot;
654 } else if (unlikely(req->rq_total_data < data_tot ||
655 req->rq_total_parm < parm_tot))
658 if (unlikely(parm_disp + parm_count > req->rq_total_parm ||
659 parm_offset + parm_count > req->rq_rlen))
661 if (unlikely(data_disp + data_count > req->rq_total_data ||
662 data_offset + data_count > req->rq_rlen))
665 inbuf = req->rq_buffer;
666 memcpy(req->rq_parm + parm_disp, inbuf + parm_offset, parm_count);
667 memcpy(req->rq_data + data_disp, inbuf + data_offset, data_count);
669 req->rq_ldata += data_count;
670 req->rq_lparm += parm_count;
673 * Check whether we've received all of the data. Note that
674 * we use the packet totals -- total lengths might shrink!
676 if (req->rq_ldata >= data_tot && req->rq_lparm >= parm_tot) {
677 req->rq_ldata = data_tot;
678 req->rq_lparm = parm_tot;
684 printk(KERN_ERR "smb_trans2: data/param too long, data=%u, parm=%u\n",
688 printk(KERN_ERR "smb_trans2: couldn't allocate data area of %d bytes\n",
689 req->rq_trans2bufsize);
690 req->rq_errno = -ENOMEM;
693 printk(KERN_ERR "smb_trans2: data/params grew!\n");
696 printk(KERN_ERR "smb_trans2: invalid parms, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
697 parm_disp, parm_count, parm_tot, parm_offset);
700 printk(KERN_ERR "smb_trans2: invalid data, disp=%u, cnt=%u, tot=%u, ofs=%u\n",
701 data_disp, data_count, data_tot, data_offset);
703 req->rq_errno = -EIO;
705 return req->rq_errno;
709 * State machine for receiving responses. We handle the fact that we can't
710 * read the full response in one try by having states telling us how much we
713 * Must be called with the server lock held (only called from smbiod).
715 * Return: <0 on error
717 int smb_request_recv(struct smb_sb_info *server)
719 struct smb_request *req = NULL;
722 if (smb_recv_available(server) <= 0)
725 VERBOSE("state: %d\n", server->rstate);
726 switch (server->rstate) {
728 result = smb_receive_drop(server);
731 if (server->rstate == SMB_RECV_DROP)
733 server->rstate = SMB_RECV_START;
736 server->smb_read = 0;
737 server->rstate = SMB_RECV_HEADER;
739 case SMB_RECV_HEADER:
740 result = smb_receive_header(server);
743 if (server->rstate == SMB_RECV_HEADER)
745 if (! (*(server->header + smb_flg) & SMB_FLAGS_REPLY) ) {
746 server->rstate = SMB_RECV_REQUEST;
749 if (server->rstate != SMB_RECV_HCOMPLETE)
752 case SMB_RECV_HCOMPLETE:
753 req = find_request(server, WVAL(server->header, smb_mid));
756 smb_init_request(server, req);
757 req->rq_rcls = *(req->rq_header + smb_rcls);
758 req->rq_err = WVAL(req->rq_header, smb_err);
759 if (server->rstate != SMB_RECV_PARAM)
764 req = find_request(server,WVAL(server->header,smb_mid));
767 result = smb_recv_param(server, req);
770 if (server->rstate != SMB_RECV_DATA)
775 req = find_request(server,WVAL(server->header,smb_mid));
778 result = smb_recv_data(server, req);
783 /* We should never be called with any of these states */
785 case SMB_RECV_REQUEST:
790 /* We saw an error */
794 if (server->rstate != SMB_RECV_END)
798 if (req->rq_trans2_command && req->rq_rcls == SUCCESS)
799 result = smb_recv_trans2(server, req);
802 * Response completely read. Drop any extra bytes sent by the server.
803 * (Yes, servers sometimes add extra bytes to responses)
805 VERBOSE("smb_len: %d smb_read: %d\n",
806 server->smb_len, server->smb_read);
807 if (server->smb_read < server->smb_len)
808 smb_receive_drop(server);
810 server->rstate = SMB_RECV_START;
813 list_del_init(&req->rq_queue);
814 req->rq_flags |= SMB_REQ_RECEIVED;
816 wake_up_interruptible(&req->rq_wait);