4 * Copyright (C) International Business Machines Corp., 2002,2007
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
40 #define DECLARE_GLOBALS_HERE
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
46 #include <linux/key-type.h>
47 #include "cifs_spnego.h"
48 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
50 #ifdef CONFIG_CIFS_QUOTA
51 static struct quotactl_ops cifs_quotactl_ops;
57 unsigned int oplockEnabled = 1;
58 unsigned int experimEnabled = 0;
59 unsigned int linuxExtEnabled = 1;
60 unsigned int lookupCacheEnabled = 1;
61 unsigned int multiuser_mount = 0;
62 unsigned int extended_security = CIFSSEC_DEF;
63 /* unsigned int ntlmv2_support = 0; */
64 unsigned int sign_CIFS_PDUs = 1;
65 extern struct task_struct *oplockThread; /* remove sparse warning */
66 struct task_struct *oplockThread = NULL;
67 /* extern struct task_struct * dnotifyThread; remove sparse warning */
68 static struct task_struct *dnotifyThread = NULL;
69 static const struct super_operations cifs_super_ops;
70 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
71 module_param(CIFSMaxBufSize, int, 0);
72 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
73 "Default: 16384 Range: 8192 to 130048");
74 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
75 module_param(cifs_min_rcv, int, 0);
76 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
78 unsigned int cifs_min_small = 30;
79 module_param(cifs_min_small, int, 0);
80 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
82 unsigned int cifs_max_pending = CIFS_MAX_REQ;
83 module_param(cifs_max_pending, int, 0);
84 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
85 "Default: 50 Range: 2 to 256");
87 extern mempool_t *cifs_sm_req_poolp;
88 extern mempool_t *cifs_req_poolp;
89 extern mempool_t *cifs_mid_poolp;
91 extern struct kmem_cache *cifs_oplock_cachep;
94 cifs_read_super(struct super_block *sb, void *data,
95 const char *devname, int silent)
98 struct cifs_sb_info *cifs_sb;
101 /* BB should we make this contingent on mount parm? */
102 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
103 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
104 cifs_sb = CIFS_SB(sb);
108 rc = cifs_mount(sb, cifs_sb, data, devname);
113 ("cifs_mount failed w/return code = %d", rc));
114 goto out_mount_failed;
117 sb->s_magic = CIFS_MAGIC_NUMBER;
118 sb->s_op = &cifs_super_ops;
119 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
121 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
122 #ifdef CONFIG_CIFS_QUOTA
123 sb->s_qcop = &cifs_quotactl_ops;
125 sb->s_blocksize = CIFS_MAX_MSGSIZE;
126 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
127 inode = iget(sb, ROOT_I);
134 sb->s_root = d_alloc_root(inode);
141 #ifdef CONFIG_CIFS_EXPERIMENTAL
142 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
143 cFYI(1, ("export ops supported"));
144 sb->s_export_op = &cifs_export_ops;
146 #endif /* EXPERIMENTAL */
151 cERROR(1, ("cifs_read_super: get root inode failed"));
157 if (cifs_sb->local_nls)
158 unload_nls(cifs_sb->local_nls);
165 cifs_put_super(struct super_block *sb)
168 struct cifs_sb_info *cifs_sb;
170 cFYI(1, ("In cifs_put_super"));
171 cifs_sb = CIFS_SB(sb);
172 if (cifs_sb == NULL) {
173 cFYI(1, ("Empty cifs superblock info passed to unmount"));
176 rc = cifs_umount(sb, cifs_sb);
178 cERROR(1, ("cifs_umount failed with return code %d", rc));
180 unload_nls(cifs_sb->local_nls);
186 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
188 struct super_block *sb = dentry->d_sb;
190 int rc = -EOPNOTSUPP;
191 struct cifs_sb_info *cifs_sb;
192 struct cifsTconInfo *pTcon;
196 cifs_sb = CIFS_SB(sb);
197 pTcon = cifs_sb->tcon;
199 buf->f_type = CIFS_MAGIC_NUMBER;
201 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
202 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
203 presumably be total path, but note
204 that some servers (includinng Samba 3)
205 have a shorter maximum path */
206 buf->f_files = 0; /* undefined */
207 buf->f_ffree = 0; /* unlimited */
209 /* BB we could add a second check for a QFS Unix capability bit */
210 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
211 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
212 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
213 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
215 /* Only need to call the old QFSInfo if failed
218 if (pTcon->ses->capabilities & CAP_NT_SMBS)
219 rc = CIFSSMBQFSInfo(xid, pTcon, buf); /* not supported by OS2 */
221 /* Some old Windows servers also do not support level 103, retry with
222 older level one if old server failed the previous call or we
223 bypassed it because we detected that this was an older LANMAN sess */
225 rc = SMBOldQFSInfo(xid, pTcon, buf);
229 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
231 return 0; /* always return success? what if volume is no
235 static int cifs_permission(struct inode *inode, int mask, struct nameidata *nd)
237 struct cifs_sb_info *cifs_sb;
239 cifs_sb = CIFS_SB(inode->i_sb);
241 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
243 else /* file mode might have been restricted at mount time
244 on the client (above and beyond ACL on servers) for
245 servers which do not support setting and viewing mode bits,
246 so allowing client to check permissions is useful */
247 return generic_permission(inode, mask, NULL);
250 static struct kmem_cache *cifs_inode_cachep;
251 static struct kmem_cache *cifs_req_cachep;
252 static struct kmem_cache *cifs_mid_cachep;
253 struct kmem_cache *cifs_oplock_cachep;
254 static struct kmem_cache *cifs_sm_req_cachep;
255 mempool_t *cifs_sm_req_poolp;
256 mempool_t *cifs_req_poolp;
257 mempool_t *cifs_mid_poolp;
259 static struct inode *
260 cifs_alloc_inode(struct super_block *sb)
262 struct cifsInodeInfo *cifs_inode;
263 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
266 cifs_inode->cifsAttrs = 0x20; /* default */
267 atomic_set(&cifs_inode->inUse, 0);
268 cifs_inode->time = 0;
269 cifs_inode->write_behind_rc = 0;
270 /* Until the file is open and we have gotten oplock
271 info back from the server, can not assume caching of
272 file data or metadata */
273 cifs_inode->clientCanCacheRead = FALSE;
274 cifs_inode->clientCanCacheAll = FALSE;
275 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
277 /* Can not set i_flags here - they get immediately overwritten
278 to zero by the VFS */
279 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
280 INIT_LIST_HEAD(&cifs_inode->openFileList);
281 return &cifs_inode->vfs_inode;
285 cifs_destroy_inode(struct inode *inode)
287 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
291 * cifs_show_options() is for displaying mount options in /proc/mounts.
292 * Not all settable options are displayed but most of the important
296 cifs_show_options(struct seq_file *s, struct vfsmount *m)
298 struct cifs_sb_info *cifs_sb;
300 cifs_sb = CIFS_SB(m->mnt_sb);
304 /* BB add prepath to mount options displayed */
305 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
306 if (cifs_sb->tcon->ses) {
307 if (cifs_sb->tcon->ses->userName)
308 seq_printf(s, ",username=%s",
309 cifs_sb->tcon->ses->userName);
310 if (cifs_sb->tcon->ses->domainName)
311 seq_printf(s, ",domain=%s",
312 cifs_sb->tcon->ses->domainName);
314 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
315 !(cifs_sb->tcon->unix_ext))
316 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
317 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
318 !(cifs_sb->tcon->unix_ext))
319 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
321 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
322 seq_printf(s, ",posixpaths");
323 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
324 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
329 #ifdef CONFIG_CIFS_QUOTA
330 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
331 struct fs_disk_quota *pdquota)
335 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
336 struct cifsTconInfo *pTcon;
339 pTcon = cifs_sb->tcon;
346 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
355 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
356 struct fs_disk_quota *pdquota)
360 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
361 struct cifsTconInfo *pTcon;
364 pTcon = cifs_sb->tcon;
370 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
379 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
383 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
384 struct cifsTconInfo *pTcon;
387 pTcon = cifs_sb->tcon;
393 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
402 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
406 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
407 struct cifsTconInfo *pTcon;
410 pTcon = cifs_sb->tcon;
416 cFYI(1, ("pqstats %p", qstats));
425 static struct quotactl_ops cifs_quotactl_ops = {
426 .set_xquota = cifs_xquota_set,
427 .get_xquota = cifs_xquota_set,
428 .set_xstate = cifs_xstate_set,
429 .get_xstate = cifs_xstate_get,
433 static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
435 struct cifs_sb_info *cifs_sb;
436 struct cifsTconInfo *tcon;
438 if (!(flags & MNT_FORCE))
440 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
444 tcon = cifs_sb->tcon;
447 down(&tcon->tconSem);
448 if (atomic_read(&tcon->useCount) == 1)
449 tcon->tidStatus = CifsExiting;
452 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
453 /* cancel_notify_requests(tcon); */
454 if (tcon->ses && tcon->ses->server) {
455 cFYI(1, ("wake up tasks now - umount begin not complete"));
456 wake_up_all(&tcon->ses->server->request_q);
457 wake_up_all(&tcon->ses->server->response_q);
458 msleep(1); /* yield */
459 /* we have to kick the requests once more */
460 wake_up_all(&tcon->ses->server->response_q);
463 /* BB FIXME - finish add checks for tidStatus BB */
468 #ifdef CONFIG_CIFS_STATS2
469 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
476 static int cifs_remount(struct super_block *sb, int *flags, char *data)
478 *flags |= MS_NODIRATIME;
482 static const struct super_operations cifs_super_ops = {
483 .read_inode = cifs_read_inode,
484 .put_super = cifs_put_super,
485 .statfs = cifs_statfs,
486 .alloc_inode = cifs_alloc_inode,
487 .destroy_inode = cifs_destroy_inode,
488 /* .drop_inode = generic_delete_inode,
489 .delete_inode = cifs_delete_inode, */ /* Do not need above two
490 functions unless later we add lazy close of inodes or unless the
491 kernel forgets to call us with the same number of releases (closes)
493 .show_options = cifs_show_options,
494 .umount_begin = cifs_umount_begin,
495 .remount_fs = cifs_remount,
496 #ifdef CONFIG_CIFS_STATS2
497 .show_stats = cifs_show_stats,
502 cifs_get_sb(struct file_system_type *fs_type,
503 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
506 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
508 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
515 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
517 up_write(&sb->s_umount);
518 deactivate_super(sb);
521 sb->s_flags |= MS_ACTIVE;
522 return simple_set_mnt(mnt, sb);
525 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
526 unsigned long nr_segs, loff_t pos)
528 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
531 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
532 if (!CIFS_I(inode)->clientCanCacheAll)
533 filemap_fdatawrite(inode->i_mapping);
537 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
539 /* origin == SEEK_END => we must revalidate the cached file length */
540 if (origin == SEEK_END) {
543 /* some applications poll for the file length in this strange
544 way so we must seek to end on non-oplocked files by
545 setting the revalidate time to zero */
546 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
548 retval = cifs_revalidate(file->f_path.dentry);
550 return (loff_t)retval;
552 return remote_llseek(file, offset, origin);
555 static struct file_system_type cifs_fs_type = {
556 .owner = THIS_MODULE,
558 .get_sb = cifs_get_sb,
559 .kill_sb = kill_anon_super,
562 const struct inode_operations cifs_dir_inode_ops = {
563 .create = cifs_create,
564 .lookup = cifs_lookup,
565 .getattr = cifs_getattr,
566 .unlink = cifs_unlink,
567 .link = cifs_hardlink,
570 .rename = cifs_rename,
571 .permission = cifs_permission,
572 /* revalidate:cifs_revalidate, */
573 .setattr = cifs_setattr,
574 .symlink = cifs_symlink,
576 #ifdef CONFIG_CIFS_XATTR
577 .setxattr = cifs_setxattr,
578 .getxattr = cifs_getxattr,
579 .listxattr = cifs_listxattr,
580 .removexattr = cifs_removexattr,
584 const struct inode_operations cifs_file_inode_ops = {
585 /* revalidate:cifs_revalidate, */
586 .setattr = cifs_setattr,
587 .getattr = cifs_getattr, /* do we need this anymore? */
588 .rename = cifs_rename,
589 .permission = cifs_permission,
590 #ifdef CONFIG_CIFS_XATTR
591 .setxattr = cifs_setxattr,
592 .getxattr = cifs_getxattr,
593 .listxattr = cifs_listxattr,
594 .removexattr = cifs_removexattr,
598 const struct inode_operations cifs_symlink_inode_ops = {
599 .readlink = generic_readlink,
600 .follow_link = cifs_follow_link,
601 .put_link = cifs_put_link,
602 .permission = cifs_permission,
603 /* BB add the following two eventually */
604 /* revalidate: cifs_revalidate,
605 setattr: cifs_notify_change, *//* BB do we need notify change */
606 #ifdef CONFIG_CIFS_XATTR
607 .setxattr = cifs_setxattr,
608 .getxattr = cifs_getxattr,
609 .listxattr = cifs_listxattr,
610 .removexattr = cifs_removexattr,
614 const struct file_operations cifs_file_ops = {
615 .read = do_sync_read,
616 .write = do_sync_write,
617 .aio_read = generic_file_aio_read,
618 .aio_write = cifs_file_aio_write,
620 .release = cifs_close,
624 .mmap = cifs_file_mmap,
625 .splice_read = generic_file_splice_read,
626 .llseek = cifs_llseek,
627 #ifdef CONFIG_CIFS_POSIX
629 #endif /* CONFIG_CIFS_POSIX */
631 #ifdef CONFIG_CIFS_EXPERIMENTAL
632 .dir_notify = cifs_dir_notify,
633 #endif /* CONFIG_CIFS_EXPERIMENTAL */
636 const struct file_operations cifs_file_direct_ops = {
637 /* no mmap, no aio, no readv -
638 BB reevaluate whether they can be done with directio, no cache */
639 .read = cifs_user_read,
640 .write = cifs_user_write,
642 .release = cifs_close,
646 .splice_read = generic_file_splice_read,
647 #ifdef CONFIG_CIFS_POSIX
649 #endif /* CONFIG_CIFS_POSIX */
650 .llseek = cifs_llseek,
651 #ifdef CONFIG_CIFS_EXPERIMENTAL
652 .dir_notify = cifs_dir_notify,
653 #endif /* CONFIG_CIFS_EXPERIMENTAL */
655 const struct file_operations cifs_file_nobrl_ops = {
656 .read = do_sync_read,
657 .write = do_sync_write,
658 .aio_read = generic_file_aio_read,
659 .aio_write = cifs_file_aio_write,
661 .release = cifs_close,
664 .mmap = cifs_file_mmap,
665 .splice_read = generic_file_splice_read,
666 .llseek = cifs_llseek,
667 #ifdef CONFIG_CIFS_POSIX
669 #endif /* CONFIG_CIFS_POSIX */
671 #ifdef CONFIG_CIFS_EXPERIMENTAL
672 .dir_notify = cifs_dir_notify,
673 #endif /* CONFIG_CIFS_EXPERIMENTAL */
676 const struct file_operations cifs_file_direct_nobrl_ops = {
677 /* no mmap, no aio, no readv -
678 BB reevaluate whether they can be done with directio, no cache */
679 .read = cifs_user_read,
680 .write = cifs_user_write,
682 .release = cifs_close,
685 .splice_read = generic_file_splice_read,
686 #ifdef CONFIG_CIFS_POSIX
688 #endif /* CONFIG_CIFS_POSIX */
689 .llseek = cifs_llseek,
690 #ifdef CONFIG_CIFS_EXPERIMENTAL
691 .dir_notify = cifs_dir_notify,
692 #endif /* CONFIG_CIFS_EXPERIMENTAL */
695 const struct file_operations cifs_dir_ops = {
696 .readdir = cifs_readdir,
697 .release = cifs_closedir,
698 .read = generic_read_dir,
699 #ifdef CONFIG_CIFS_EXPERIMENTAL
700 .dir_notify = cifs_dir_notify,
701 #endif /* CONFIG_CIFS_EXPERIMENTAL */
706 cifs_init_once(struct kmem_cache *cachep, void *inode)
708 struct cifsInodeInfo *cifsi = inode;
710 inode_init_once(&cifsi->vfs_inode);
711 INIT_LIST_HEAD(&cifsi->lockList);
715 cifs_init_inodecache(void)
717 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
718 sizeof(struct cifsInodeInfo),
719 0, (SLAB_RECLAIM_ACCOUNT|
722 if (cifs_inode_cachep == NULL)
729 cifs_destroy_inodecache(void)
731 kmem_cache_destroy(cifs_inode_cachep);
735 cifs_init_request_bufs(void)
737 if (CIFSMaxBufSize < 8192) {
738 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
739 Unicode path name has to fit in any SMB/CIFS path based frames */
740 CIFSMaxBufSize = 8192;
741 } else if (CIFSMaxBufSize > 1024*127) {
742 CIFSMaxBufSize = 1024 * 127;
744 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
746 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
747 cifs_req_cachep = kmem_cache_create("cifs_request",
749 MAX_CIFS_HDR_SIZE, 0,
750 SLAB_HWCACHE_ALIGN, NULL);
751 if (cifs_req_cachep == NULL)
754 if (cifs_min_rcv < 1)
756 else if (cifs_min_rcv > 64) {
758 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
761 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
764 if (cifs_req_poolp == NULL) {
765 kmem_cache_destroy(cifs_req_cachep);
768 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
769 almost all handle based requests (but not write response, nor is it
770 sufficient for path based requests). A smaller size would have
771 been more efficient (compacting multiple slab items on one 4k page)
772 for the case in which debug was on, but this larger size allows
773 more SMBs to use small buffer alloc and is still much more
774 efficient to alloc 1 per page off the slab compared to 17K (5page)
775 alloc of large cifs buffers even when page debugging is on */
776 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
777 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
779 if (cifs_sm_req_cachep == NULL) {
780 mempool_destroy(cifs_req_poolp);
781 kmem_cache_destroy(cifs_req_cachep);
785 if (cifs_min_small < 2)
787 else if (cifs_min_small > 256) {
788 cifs_min_small = 256;
789 cFYI(1, ("cifs_min_small set to maximum (256)"));
792 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
795 if (cifs_sm_req_poolp == NULL) {
796 mempool_destroy(cifs_req_poolp);
797 kmem_cache_destroy(cifs_req_cachep);
798 kmem_cache_destroy(cifs_sm_req_cachep);
806 cifs_destroy_request_bufs(void)
808 mempool_destroy(cifs_req_poolp);
809 kmem_cache_destroy(cifs_req_cachep);
810 mempool_destroy(cifs_sm_req_poolp);
811 kmem_cache_destroy(cifs_sm_req_cachep);
817 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
818 sizeof(struct mid_q_entry), 0,
819 SLAB_HWCACHE_ALIGN, NULL);
820 if (cifs_mid_cachep == NULL)
823 /* 3 is a reasonable minimum number of simultaneous operations */
824 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
825 if (cifs_mid_poolp == NULL) {
826 kmem_cache_destroy(cifs_mid_cachep);
830 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
831 sizeof(struct oplock_q_entry), 0,
832 SLAB_HWCACHE_ALIGN, NULL);
833 if (cifs_oplock_cachep == NULL) {
834 mempool_destroy(cifs_mid_poolp);
835 kmem_cache_destroy(cifs_mid_cachep);
843 cifs_destroy_mids(void)
845 mempool_destroy(cifs_mid_poolp);
846 kmem_cache_destroy(cifs_mid_cachep);
847 kmem_cache_destroy(cifs_oplock_cachep);
850 static int cifs_oplock_thread(void *dummyarg)
852 struct oplock_q_entry *oplock_item;
853 struct cifsTconInfo *pTcon;
863 spin_lock(&GlobalMid_Lock);
864 if (list_empty(&GlobalOplock_Q)) {
865 spin_unlock(&GlobalMid_Lock);
866 set_current_state(TASK_INTERRUPTIBLE);
867 schedule_timeout(39*HZ);
869 oplock_item = list_entry(GlobalOplock_Q.next,
870 struct oplock_q_entry, qhead);
872 cFYI(1, ("found oplock item to write out"));
873 pTcon = oplock_item->tcon;
874 inode = oplock_item->pinode;
875 netfid = oplock_item->netfid;
876 spin_unlock(&GlobalMid_Lock);
877 DeleteOplockQEntry(oplock_item);
878 /* can not grab inode sem here since it would
879 deadlock when oplock received on delete
880 since vfs_unlink holds the i_mutex across
882 /* mutex_lock(&inode->i_mutex);*/
883 if (S_ISREG(inode->i_mode)) {
885 filemap_fdatawrite(inode->i_mapping);
886 if (CIFS_I(inode)->clientCanCacheRead
888 waitrc = filemap_fdatawait(inode->i_mapping);
889 invalidate_remote_inode(inode);
895 /* mutex_unlock(&inode->i_mutex);*/
897 CIFS_I(inode)->write_behind_rc = rc;
898 cFYI(1, ("Oplock flush inode %p rc %d",
901 /* releasing stale oplock after recent reconnect
902 of smb session using a now incorrect file
903 handle is not a data integrity issue but do
904 not bother sending an oplock release if session
905 to server still is disconnected since oplock
906 already released by the server in that case */
907 if (pTcon->tidStatus != CifsNeedReconnect) {
908 rc = CIFSSMBLock(0, pTcon, netfid,
909 0 /* len */ , 0 /* offset */, 0,
910 0, LOCKING_ANDX_OPLOCK_RELEASE,
912 cFYI(1, ("Oplock release rc = %d", rc));
915 spin_unlock(&GlobalMid_Lock);
916 set_current_state(TASK_INTERRUPTIBLE);
917 schedule_timeout(1); /* yield in case q were corrupt */
919 } while (!kthread_should_stop());
924 static int cifs_dnotify_thread(void *dummyarg)
926 struct list_head *tmp;
927 struct cifsSesInfo *ses;
932 set_current_state(TASK_INTERRUPTIBLE);
933 schedule_timeout(15*HZ);
934 read_lock(&GlobalSMBSeslock);
935 /* check if any stuck requests that need
936 to be woken up and wakeq so the
937 thread can wake up and error out */
938 list_for_each(tmp, &GlobalSMBSessionList) {
939 ses = list_entry(tmp, struct cifsSesInfo,
941 if (ses && ses->server &&
942 atomic_read(&ses->server->inFlight))
943 wake_up_all(&ses->server->response_q);
945 read_unlock(&GlobalSMBSeslock);
946 } while (!kthread_should_stop());
955 #ifdef CONFIG_PROC_FS
958 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
959 INIT_LIST_HEAD(&GlobalSMBSessionList);
960 INIT_LIST_HEAD(&GlobalTreeConnectionList);
961 INIT_LIST_HEAD(&GlobalOplock_Q);
962 #ifdef CONFIG_CIFS_EXPERIMENTAL
963 INIT_LIST_HEAD(&GlobalDnotifyReqList);
964 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
967 * Initialize Global counters
969 atomic_set(&sesInfoAllocCount, 0);
970 atomic_set(&tconInfoAllocCount, 0);
971 atomic_set(&tcpSesAllocCount, 0);
972 atomic_set(&tcpSesReconnectCount, 0);
973 atomic_set(&tconInfoReconnectCount, 0);
975 atomic_set(&bufAllocCount, 0);
976 atomic_set(&smBufAllocCount, 0);
977 #ifdef CONFIG_CIFS_STATS2
978 atomic_set(&totBufAllocCount, 0);
979 atomic_set(&totSmBufAllocCount, 0);
980 #endif /* CONFIG_CIFS_STATS2 */
982 atomic_set(&midCount, 0);
983 GlobalCurrentXid = 0;
984 GlobalTotalActiveXid = 0;
985 GlobalMaxActiveXid = 0;
986 memset(Local_System_Name, 0, 15);
987 rwlock_init(&GlobalSMBSeslock);
988 spin_lock_init(&GlobalMid_Lock);
990 if (cifs_max_pending < 2) {
991 cifs_max_pending = 2;
992 cFYI(1, ("cifs_max_pending set to min of 2"));
993 } else if (cifs_max_pending > 256) {
994 cifs_max_pending = 256;
995 cFYI(1, ("cifs_max_pending set to max of 256"));
998 rc = cifs_init_inodecache();
1000 goto out_clean_proc;
1002 rc = cifs_init_mids();
1004 goto out_destroy_inodecache;
1006 rc = cifs_init_request_bufs();
1008 goto out_destroy_mids;
1010 rc = register_filesystem(&cifs_fs_type);
1012 goto out_destroy_request_bufs;
1013 #ifdef CONFIG_CIFS_UPCALL
1014 rc = register_key_type(&cifs_spnego_key_type);
1016 goto out_unregister_filesystem;
1018 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1019 if (IS_ERR(oplockThread)) {
1020 rc = PTR_ERR(oplockThread);
1021 cERROR(1, ("error %d create oplock thread", rc));
1022 goto out_unregister_key_type;
1025 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1026 if (IS_ERR(dnotifyThread)) {
1027 rc = PTR_ERR(dnotifyThread);
1028 cERROR(1, ("error %d create dnotify thread", rc));
1029 goto out_stop_oplock_thread;
1034 out_stop_oplock_thread:
1035 kthread_stop(oplockThread);
1036 out_unregister_key_type:
1037 #ifdef CONFIG_CIFS_UPCALL
1038 unregister_key_type(&cifs_spnego_key_type);
1039 out_unregister_filesystem:
1041 unregister_filesystem(&cifs_fs_type);
1042 out_destroy_request_bufs:
1043 cifs_destroy_request_bufs();
1045 cifs_destroy_mids();
1046 out_destroy_inodecache:
1047 cifs_destroy_inodecache();
1049 #ifdef CONFIG_PROC_FS
1058 cFYI(0, ("exit_cifs"));
1059 #ifdef CONFIG_PROC_FS
1062 #ifdef CONFIG_CIFS_UPCALL
1063 unregister_key_type(&cifs_spnego_key_type);
1065 unregister_filesystem(&cifs_fs_type);
1066 cifs_destroy_inodecache();
1067 cifs_destroy_mids();
1068 cifs_destroy_request_bufs();
1069 kthread_stop(oplockThread);
1070 kthread_stop(dnotifyThread);
1073 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1074 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1076 ("VFS to access servers complying with the SNIA CIFS Specification "
1077 "e.g. Samba and Windows");
1078 MODULE_VERSION(CIFS_VERSION);
1079 module_init(init_cifs)
1080 module_exit(exit_cifs)