4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
40 #define DECLARE_GLOBALS_HERE
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
46 #include <linux/key-type.h>
47 #include "dns_resolve.h"
48 #include "cifs_spnego.h"
49 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
51 #ifdef CONFIG_CIFS_QUOTA
52 static struct quotactl_ops cifs_quotactl_ops;
58 unsigned int oplockEnabled = 1;
59 unsigned int experimEnabled = 0;
60 unsigned int linuxExtEnabled = 1;
61 unsigned int lookupCacheEnabled = 1;
62 unsigned int multiuser_mount = 0;
63 unsigned int extended_security = CIFSSEC_DEF;
64 /* unsigned int ntlmv2_support = 0; */
65 unsigned int sign_CIFS_PDUs = 1;
66 extern struct task_struct *oplockThread; /* remove sparse warning */
67 struct task_struct *oplockThread = NULL;
68 /* extern struct task_struct * dnotifyThread; remove sparse warning */
69 static struct task_struct *dnotifyThread = NULL;
70 static const struct super_operations cifs_super_ops;
71 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
72 module_param(CIFSMaxBufSize, int, 0);
73 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
74 "Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
79 unsigned int cifs_min_small = 30;
80 module_param(cifs_min_small, int, 0);
81 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
83 unsigned int cifs_max_pending = CIFS_MAX_REQ;
84 module_param(cifs_max_pending, int, 0);
85 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
86 "Default: 50 Range: 2 to 256");
88 extern mempool_t *cifs_sm_req_poolp;
89 extern mempool_t *cifs_req_poolp;
90 extern mempool_t *cifs_mid_poolp;
92 extern struct kmem_cache *cifs_oplock_cachep;
95 cifs_read_super(struct super_block *sb, void *data,
96 const char *devname, int silent)
99 struct cifs_sb_info *cifs_sb;
102 /* BB should we make this contingent on mount parm? */
103 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
104 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
105 cifs_sb = CIFS_SB(sb);
109 #ifdef CONFIG_CIFS_DFS_UPCALL
110 /* copy mount params to sb for use in submounts */
111 /* BB: should we move this after the mount so we
112 * do not have to do the copy on failed mounts?
113 * BB: May be it is better to do simple copy before
114 * complex operation (mount), and in case of fail
115 * just exit instead of doing mount and attempting
116 * undo it if this copy fails?*/
118 int len = strlen(data);
119 cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
120 if (cifs_sb->mountdata == NULL) {
121 kfree(sb->s_fs_info);
122 sb->s_fs_info = NULL;
125 strncpy(cifs_sb->mountdata, data, len + 1);
126 cifs_sb->mountdata[len] = '\0';
130 rc = cifs_mount(sb, cifs_sb, data, devname);
135 ("cifs_mount failed w/return code = %d", rc));
136 goto out_mount_failed;
139 sb->s_magic = CIFS_MAGIC_NUMBER;
140 sb->s_op = &cifs_super_ops;
141 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
143 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
144 #ifdef CONFIG_CIFS_QUOTA
145 sb->s_qcop = &cifs_quotactl_ops;
147 sb->s_blocksize = CIFS_MAX_MSGSIZE;
148 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode = cifs_iget(sb, ROOT_I);
157 sb->s_root = d_alloc_root(inode);
164 #ifdef CONFIG_CIFS_EXPERIMENTAL
165 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
166 cFYI(1, ("export ops supported"));
167 sb->s_export_op = &cifs_export_ops;
169 #endif /* EXPERIMENTAL */
174 cERROR(1, ("cifs_read_super: get root inode failed"));
180 #ifdef CONFIG_CIFS_DFS_UPCALL
181 if (cifs_sb->mountdata) {
182 kfree(cifs_sb->mountdata);
183 cifs_sb->mountdata = NULL;
186 if (cifs_sb->local_nls)
187 unload_nls(cifs_sb->local_nls);
194 cifs_put_super(struct super_block *sb)
197 struct cifs_sb_info *cifs_sb;
199 cFYI(1, ("In cifs_put_super"));
200 cifs_sb = CIFS_SB(sb);
201 if (cifs_sb == NULL) {
202 cFYI(1, ("Empty cifs superblock info passed to unmount"));
205 rc = cifs_umount(sb, cifs_sb);
207 cERROR(1, ("cifs_umount failed with return code %d", rc));
208 #ifdef CONFIG_CIFS_DFS_UPCALL
209 if (cifs_sb->mountdata) {
210 kfree(cifs_sb->mountdata);
211 cifs_sb->mountdata = NULL;
215 unload_nls(cifs_sb->local_nls);
221 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
223 struct super_block *sb = dentry->d_sb;
224 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
225 struct cifsTconInfo *tcon = cifs_sb->tcon;
226 int rc = -EOPNOTSUPP;
231 buf->f_type = CIFS_MAGIC_NUMBER;
234 * PATH_MAX may be too long - it would presumably be total path,
235 * but note that some servers (includinng Samba 3) have a shorter
238 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
240 buf->f_namelen = PATH_MAX;
241 buf->f_files = 0; /* undefined */
242 buf->f_ffree = 0; /* unlimited */
245 * We could add a second check for a QFS Unix capability bit
247 if ((tcon->ses->capabilities & CAP_UNIX) &&
248 (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability)))
249 rc = CIFSSMBQFSPosixInfo(xid, tcon, buf);
252 * Only need to call the old QFSInfo if failed on newer one,
255 if (rc && (tcon->ses->capabilities & CAP_NT_SMBS))
256 rc = CIFSSMBQFSInfo(xid, tcon, buf);
259 * Some old Windows servers also do not support level 103, retry with
260 * older level one if old server failed the previous call or we
261 * bypassed it because we detected that this was an older LANMAN sess
264 rc = SMBOldQFSInfo(xid, tcon, buf);
270 static int cifs_permission(struct inode *inode, int mask, struct nameidata *nd)
272 struct cifs_sb_info *cifs_sb;
274 cifs_sb = CIFS_SB(inode->i_sb);
276 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
278 else /* file mode might have been restricted at mount time
279 on the client (above and beyond ACL on servers) for
280 servers which do not support setting and viewing mode bits,
281 so allowing client to check permissions is useful */
282 return generic_permission(inode, mask, NULL);
285 static struct kmem_cache *cifs_inode_cachep;
286 static struct kmem_cache *cifs_req_cachep;
287 static struct kmem_cache *cifs_mid_cachep;
288 struct kmem_cache *cifs_oplock_cachep;
289 static struct kmem_cache *cifs_sm_req_cachep;
290 mempool_t *cifs_sm_req_poolp;
291 mempool_t *cifs_req_poolp;
292 mempool_t *cifs_mid_poolp;
294 static struct inode *
295 cifs_alloc_inode(struct super_block *sb)
297 struct cifsInodeInfo *cifs_inode;
298 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
301 cifs_inode->cifsAttrs = 0x20; /* default */
302 atomic_set(&cifs_inode->inUse, 0);
303 cifs_inode->time = 0;
304 cifs_inode->write_behind_rc = 0;
305 /* Until the file is open and we have gotten oplock
306 info back from the server, can not assume caching of
307 file data or metadata */
308 cifs_inode->clientCanCacheRead = false;
309 cifs_inode->clientCanCacheAll = false;
310 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
312 /* Can not set i_flags here - they get immediately overwritten
313 to zero by the VFS */
314 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
315 INIT_LIST_HEAD(&cifs_inode->openFileList);
316 return &cifs_inode->vfs_inode;
320 cifs_destroy_inode(struct inode *inode)
322 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
326 * cifs_show_options() is for displaying mount options in /proc/mounts.
327 * Not all settable options are displayed but most of the important
331 cifs_show_options(struct seq_file *s, struct vfsmount *m)
333 struct cifs_sb_info *cifs_sb;
335 cifs_sb = CIFS_SB(m->mnt_sb);
339 /* BB add prepath to mount options displayed */
340 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
341 if (cifs_sb->tcon->ses) {
342 if (cifs_sb->tcon->ses->userName)
343 seq_printf(s, ",username=%s",
344 cifs_sb->tcon->ses->userName);
345 if (cifs_sb->tcon->ses->domainName)
346 seq_printf(s, ",domain=%s",
347 cifs_sb->tcon->ses->domainName);
349 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
350 !(cifs_sb->tcon->unix_ext))
351 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
352 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
353 !(cifs_sb->tcon->unix_ext))
354 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
355 if (!cifs_sb->tcon->unix_ext) {
356 seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
357 cifs_sb->mnt_file_mode,
358 cifs_sb->mnt_dir_mode);
360 if (cifs_sb->tcon->seal)
361 seq_printf(s, ",seal");
362 if (cifs_sb->tcon->nocase)
363 seq_printf(s, ",nocase");
364 if (cifs_sb->tcon->retry)
365 seq_printf(s, ",hard");
367 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
368 seq_printf(s, ",posixpaths");
369 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
370 seq_printf(s, ",setuids");
371 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
372 seq_printf(s, ",serverino");
373 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
374 seq_printf(s, ",directio");
375 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
376 seq_printf(s, ",nouser_xattr");
377 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
378 seq_printf(s, ",mapchars");
379 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
380 seq_printf(s, ",sfu");
381 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
382 seq_printf(s, ",nobrl");
383 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
384 seq_printf(s, ",cifsacl");
385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
386 seq_printf(s, ",dynperm");
387 if (m->mnt_sb->s_flags & MS_POSIXACL)
388 seq_printf(s, ",acl");
390 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
391 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
396 #ifdef CONFIG_CIFS_QUOTA
397 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
398 struct fs_disk_quota *pdquota)
402 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
403 struct cifsTconInfo *pTcon;
406 pTcon = cifs_sb->tcon;
413 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
422 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
423 struct fs_disk_quota *pdquota)
427 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
428 struct cifsTconInfo *pTcon;
431 pTcon = cifs_sb->tcon;
437 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
446 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
450 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
451 struct cifsTconInfo *pTcon;
454 pTcon = cifs_sb->tcon;
460 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
469 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
473 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
474 struct cifsTconInfo *pTcon;
477 pTcon = cifs_sb->tcon;
483 cFYI(1, ("pqstats %p", qstats));
492 static struct quotactl_ops cifs_quotactl_ops = {
493 .set_xquota = cifs_xquota_set,
494 .get_xquota = cifs_xquota_get,
495 .set_xstate = cifs_xstate_set,
496 .get_xstate = cifs_xstate_get,
500 static void cifs_umount_begin(struct super_block *sb)
502 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
503 struct cifsTconInfo *tcon;
508 tcon = cifs_sb->tcon;
511 down(&tcon->tconSem);
512 if (atomic_read(&tcon->useCount) == 1)
513 tcon->tidStatus = CifsExiting;
516 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
517 /* cancel_notify_requests(tcon); */
518 if (tcon->ses && tcon->ses->server) {
519 cFYI(1, ("wake up tasks now - umount begin not complete"));
520 wake_up_all(&tcon->ses->server->request_q);
521 wake_up_all(&tcon->ses->server->response_q);
522 msleep(1); /* yield */
523 /* we have to kick the requests once more */
524 wake_up_all(&tcon->ses->server->response_q);
527 /* BB FIXME - finish add checks for tidStatus BB */
532 #ifdef CONFIG_CIFS_STATS2
533 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
540 static int cifs_remount(struct super_block *sb, int *flags, char *data)
542 *flags |= MS_NODIRATIME;
546 static const struct super_operations cifs_super_ops = {
547 .put_super = cifs_put_super,
548 .statfs = cifs_statfs,
549 .alloc_inode = cifs_alloc_inode,
550 .destroy_inode = cifs_destroy_inode,
551 /* .drop_inode = generic_delete_inode,
552 .delete_inode = cifs_delete_inode, */ /* Do not need above two
553 functions unless later we add lazy close of inodes or unless the
554 kernel forgets to call us with the same number of releases (closes)
556 .show_options = cifs_show_options,
557 .umount_begin = cifs_umount_begin,
558 .remount_fs = cifs_remount,
559 #ifdef CONFIG_CIFS_STATS2
560 .show_stats = cifs_show_stats,
565 cifs_get_sb(struct file_system_type *fs_type,
566 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
569 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
571 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
578 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
580 up_write(&sb->s_umount);
581 deactivate_super(sb);
584 sb->s_flags |= MS_ACTIVE;
585 return simple_set_mnt(mnt, sb);
588 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
589 unsigned long nr_segs, loff_t pos)
591 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
594 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
595 if (!CIFS_I(inode)->clientCanCacheAll)
596 filemap_fdatawrite(inode->i_mapping);
600 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
602 /* origin == SEEK_END => we must revalidate the cached file length */
603 if (origin == SEEK_END) {
606 /* some applications poll for the file length in this strange
607 way so we must seek to end on non-oplocked files by
608 setting the revalidate time to zero */
609 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
611 retval = cifs_revalidate(file->f_path.dentry);
613 return (loff_t)retval;
615 return generic_file_llseek_unlocked(file, offset, origin);
618 struct file_system_type cifs_fs_type = {
619 .owner = THIS_MODULE,
621 .get_sb = cifs_get_sb,
622 .kill_sb = kill_anon_super,
625 const struct inode_operations cifs_dir_inode_ops = {
626 .create = cifs_create,
627 .lookup = cifs_lookup,
628 .getattr = cifs_getattr,
629 .unlink = cifs_unlink,
630 .link = cifs_hardlink,
633 .rename = cifs_rename,
634 .permission = cifs_permission,
635 /* revalidate:cifs_revalidate, */
636 .setattr = cifs_setattr,
637 .symlink = cifs_symlink,
639 #ifdef CONFIG_CIFS_XATTR
640 .setxattr = cifs_setxattr,
641 .getxattr = cifs_getxattr,
642 .listxattr = cifs_listxattr,
643 .removexattr = cifs_removexattr,
647 const struct inode_operations cifs_file_inode_ops = {
648 /* revalidate:cifs_revalidate, */
649 .setattr = cifs_setattr,
650 .getattr = cifs_getattr, /* do we need this anymore? */
651 .rename = cifs_rename,
652 .permission = cifs_permission,
653 #ifdef CONFIG_CIFS_XATTR
654 .setxattr = cifs_setxattr,
655 .getxattr = cifs_getxattr,
656 .listxattr = cifs_listxattr,
657 .removexattr = cifs_removexattr,
661 const struct inode_operations cifs_symlink_inode_ops = {
662 .readlink = generic_readlink,
663 .follow_link = cifs_follow_link,
664 .put_link = cifs_put_link,
665 .permission = cifs_permission,
666 /* BB add the following two eventually */
667 /* revalidate: cifs_revalidate,
668 setattr: cifs_notify_change, *//* BB do we need notify change */
669 #ifdef CONFIG_CIFS_XATTR
670 .setxattr = cifs_setxattr,
671 .getxattr = cifs_getxattr,
672 .listxattr = cifs_listxattr,
673 .removexattr = cifs_removexattr,
677 const struct file_operations cifs_file_ops = {
678 .read = do_sync_read,
679 .write = do_sync_write,
680 .aio_read = generic_file_aio_read,
681 .aio_write = cifs_file_aio_write,
683 .release = cifs_close,
687 .mmap = cifs_file_mmap,
688 .splice_read = generic_file_splice_read,
689 .llseek = cifs_llseek,
690 #ifdef CONFIG_CIFS_POSIX
691 .unlocked_ioctl = cifs_ioctl,
692 #endif /* CONFIG_CIFS_POSIX */
694 #ifdef CONFIG_CIFS_EXPERIMENTAL
695 .dir_notify = cifs_dir_notify,
696 #endif /* CONFIG_CIFS_EXPERIMENTAL */
699 const struct file_operations cifs_file_direct_ops = {
700 /* no mmap, no aio, no readv -
701 BB reevaluate whether they can be done with directio, no cache */
702 .read = cifs_user_read,
703 .write = cifs_user_write,
705 .release = cifs_close,
709 .splice_read = generic_file_splice_read,
710 #ifdef CONFIG_CIFS_POSIX
711 .unlocked_ioctl = cifs_ioctl,
712 #endif /* CONFIG_CIFS_POSIX */
713 .llseek = cifs_llseek,
714 #ifdef CONFIG_CIFS_EXPERIMENTAL
715 .dir_notify = cifs_dir_notify,
716 #endif /* CONFIG_CIFS_EXPERIMENTAL */
718 const struct file_operations cifs_file_nobrl_ops = {
719 .read = do_sync_read,
720 .write = do_sync_write,
721 .aio_read = generic_file_aio_read,
722 .aio_write = cifs_file_aio_write,
724 .release = cifs_close,
727 .mmap = cifs_file_mmap,
728 .splice_read = generic_file_splice_read,
729 .llseek = cifs_llseek,
730 #ifdef CONFIG_CIFS_POSIX
731 .unlocked_ioctl = cifs_ioctl,
732 #endif /* CONFIG_CIFS_POSIX */
734 #ifdef CONFIG_CIFS_EXPERIMENTAL
735 .dir_notify = cifs_dir_notify,
736 #endif /* CONFIG_CIFS_EXPERIMENTAL */
739 const struct file_operations cifs_file_direct_nobrl_ops = {
740 /* no mmap, no aio, no readv -
741 BB reevaluate whether they can be done with directio, no cache */
742 .read = cifs_user_read,
743 .write = cifs_user_write,
745 .release = cifs_close,
748 .splice_read = generic_file_splice_read,
749 #ifdef CONFIG_CIFS_POSIX
750 .unlocked_ioctl = cifs_ioctl,
751 #endif /* CONFIG_CIFS_POSIX */
752 .llseek = cifs_llseek,
753 #ifdef CONFIG_CIFS_EXPERIMENTAL
754 .dir_notify = cifs_dir_notify,
755 #endif /* CONFIG_CIFS_EXPERIMENTAL */
758 const struct file_operations cifs_dir_ops = {
759 .readdir = cifs_readdir,
760 .release = cifs_closedir,
761 .read = generic_read_dir,
762 #ifdef CONFIG_CIFS_EXPERIMENTAL
763 .dir_notify = cifs_dir_notify,
764 #endif /* CONFIG_CIFS_EXPERIMENTAL */
765 .unlocked_ioctl = cifs_ioctl,
769 cifs_init_once(struct kmem_cache *cachep, void *inode)
771 struct cifsInodeInfo *cifsi = inode;
773 inode_init_once(&cifsi->vfs_inode);
774 INIT_LIST_HEAD(&cifsi->lockList);
778 cifs_init_inodecache(void)
780 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
781 sizeof(struct cifsInodeInfo),
782 0, (SLAB_RECLAIM_ACCOUNT|
785 if (cifs_inode_cachep == NULL)
792 cifs_destroy_inodecache(void)
794 kmem_cache_destroy(cifs_inode_cachep);
798 cifs_init_request_bufs(void)
800 if (CIFSMaxBufSize < 8192) {
801 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
802 Unicode path name has to fit in any SMB/CIFS path based frames */
803 CIFSMaxBufSize = 8192;
804 } else if (CIFSMaxBufSize > 1024*127) {
805 CIFSMaxBufSize = 1024 * 127;
807 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
809 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
810 cifs_req_cachep = kmem_cache_create("cifs_request",
812 MAX_CIFS_HDR_SIZE, 0,
813 SLAB_HWCACHE_ALIGN, NULL);
814 if (cifs_req_cachep == NULL)
817 if (cifs_min_rcv < 1)
819 else if (cifs_min_rcv > 64) {
821 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
824 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
827 if (cifs_req_poolp == NULL) {
828 kmem_cache_destroy(cifs_req_cachep);
831 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
832 almost all handle based requests (but not write response, nor is it
833 sufficient for path based requests). A smaller size would have
834 been more efficient (compacting multiple slab items on one 4k page)
835 for the case in which debug was on, but this larger size allows
836 more SMBs to use small buffer alloc and is still much more
837 efficient to alloc 1 per page off the slab compared to 17K (5page)
838 alloc of large cifs buffers even when page debugging is on */
839 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
840 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
842 if (cifs_sm_req_cachep == NULL) {
843 mempool_destroy(cifs_req_poolp);
844 kmem_cache_destroy(cifs_req_cachep);
848 if (cifs_min_small < 2)
850 else if (cifs_min_small > 256) {
851 cifs_min_small = 256;
852 cFYI(1, ("cifs_min_small set to maximum (256)"));
855 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
858 if (cifs_sm_req_poolp == NULL) {
859 mempool_destroy(cifs_req_poolp);
860 kmem_cache_destroy(cifs_req_cachep);
861 kmem_cache_destroy(cifs_sm_req_cachep);
869 cifs_destroy_request_bufs(void)
871 mempool_destroy(cifs_req_poolp);
872 kmem_cache_destroy(cifs_req_cachep);
873 mempool_destroy(cifs_sm_req_poolp);
874 kmem_cache_destroy(cifs_sm_req_cachep);
880 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
881 sizeof(struct mid_q_entry), 0,
882 SLAB_HWCACHE_ALIGN, NULL);
883 if (cifs_mid_cachep == NULL)
886 /* 3 is a reasonable minimum number of simultaneous operations */
887 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
888 if (cifs_mid_poolp == NULL) {
889 kmem_cache_destroy(cifs_mid_cachep);
893 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
894 sizeof(struct oplock_q_entry), 0,
895 SLAB_HWCACHE_ALIGN, NULL);
896 if (cifs_oplock_cachep == NULL) {
897 mempool_destroy(cifs_mid_poolp);
898 kmem_cache_destroy(cifs_mid_cachep);
906 cifs_destroy_mids(void)
908 mempool_destroy(cifs_mid_poolp);
909 kmem_cache_destroy(cifs_mid_cachep);
910 kmem_cache_destroy(cifs_oplock_cachep);
913 static int cifs_oplock_thread(void *dummyarg)
915 struct oplock_q_entry *oplock_item;
916 struct cifsTconInfo *pTcon;
926 spin_lock(&GlobalMid_Lock);
927 if (list_empty(&GlobalOplock_Q)) {
928 spin_unlock(&GlobalMid_Lock);
929 set_current_state(TASK_INTERRUPTIBLE);
930 schedule_timeout(39*HZ);
932 oplock_item = list_entry(GlobalOplock_Q.next,
933 struct oplock_q_entry, qhead);
935 cFYI(1, ("found oplock item to write out"));
936 pTcon = oplock_item->tcon;
937 inode = oplock_item->pinode;
938 netfid = oplock_item->netfid;
939 spin_unlock(&GlobalMid_Lock);
940 DeleteOplockQEntry(oplock_item);
941 /* can not grab inode sem here since it would
942 deadlock when oplock received on delete
943 since vfs_unlink holds the i_mutex across
945 /* mutex_lock(&inode->i_mutex);*/
946 if (S_ISREG(inode->i_mode)) {
948 filemap_fdatawrite(inode->i_mapping);
949 if (CIFS_I(inode)->clientCanCacheRead
951 waitrc = filemap_fdatawait(inode->i_mapping);
952 invalidate_remote_inode(inode);
958 /* mutex_unlock(&inode->i_mutex);*/
960 CIFS_I(inode)->write_behind_rc = rc;
961 cFYI(1, ("Oplock flush inode %p rc %d",
964 /* releasing stale oplock after recent reconnect
965 of smb session using a now incorrect file
966 handle is not a data integrity issue but do
967 not bother sending an oplock release if session
968 to server still is disconnected since oplock
969 already released by the server in that case */
970 if (pTcon->tidStatus != CifsNeedReconnect) {
971 rc = CIFSSMBLock(0, pTcon, netfid,
972 0 /* len */ , 0 /* offset */, 0,
973 0, LOCKING_ANDX_OPLOCK_RELEASE,
974 false /* wait flag */);
975 cFYI(1, ("Oplock release rc = %d", rc));
978 spin_unlock(&GlobalMid_Lock);
979 set_current_state(TASK_INTERRUPTIBLE);
980 schedule_timeout(1); /* yield in case q were corrupt */
982 } while (!kthread_should_stop());
987 static int cifs_dnotify_thread(void *dummyarg)
989 struct list_head *tmp;
990 struct cifsSesInfo *ses;
995 set_current_state(TASK_INTERRUPTIBLE);
996 schedule_timeout(15*HZ);
997 read_lock(&GlobalSMBSeslock);
998 /* check if any stuck requests that need
999 to be woken up and wakeq so the
1000 thread can wake up and error out */
1001 list_for_each(tmp, &GlobalSMBSessionList) {
1002 ses = list_entry(tmp, struct cifsSesInfo,
1004 if (ses && ses->server &&
1005 atomic_read(&ses->server->inFlight))
1006 wake_up_all(&ses->server->response_q);
1008 read_unlock(&GlobalSMBSeslock);
1009 } while (!kthread_should_stop());
1019 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
1020 INIT_LIST_HEAD(&GlobalSMBSessionList);
1021 INIT_LIST_HEAD(&GlobalTreeConnectionList);
1022 INIT_LIST_HEAD(&GlobalOplock_Q);
1023 #ifdef CONFIG_CIFS_EXPERIMENTAL
1024 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1025 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1028 * Initialize Global counters
1030 atomic_set(&sesInfoAllocCount, 0);
1031 atomic_set(&tconInfoAllocCount, 0);
1032 atomic_set(&tcpSesAllocCount, 0);
1033 atomic_set(&tcpSesReconnectCount, 0);
1034 atomic_set(&tconInfoReconnectCount, 0);
1036 atomic_set(&bufAllocCount, 0);
1037 atomic_set(&smBufAllocCount, 0);
1038 #ifdef CONFIG_CIFS_STATS2
1039 atomic_set(&totBufAllocCount, 0);
1040 atomic_set(&totSmBufAllocCount, 0);
1041 #endif /* CONFIG_CIFS_STATS2 */
1043 atomic_set(&midCount, 0);
1044 GlobalCurrentXid = 0;
1045 GlobalTotalActiveXid = 0;
1046 GlobalMaxActiveXid = 0;
1047 memset(Local_System_Name, 0, 15);
1048 rwlock_init(&GlobalSMBSeslock);
1049 spin_lock_init(&GlobalMid_Lock);
1051 if (cifs_max_pending < 2) {
1052 cifs_max_pending = 2;
1053 cFYI(1, ("cifs_max_pending set to min of 2"));
1054 } else if (cifs_max_pending > 256) {
1055 cifs_max_pending = 256;
1056 cFYI(1, ("cifs_max_pending set to max of 256"));
1059 rc = cifs_init_inodecache();
1061 goto out_clean_proc;
1063 rc = cifs_init_mids();
1065 goto out_destroy_inodecache;
1067 rc = cifs_init_request_bufs();
1069 goto out_destroy_mids;
1071 rc = register_filesystem(&cifs_fs_type);
1073 goto out_destroy_request_bufs;
1074 #ifdef CONFIG_CIFS_UPCALL
1075 rc = register_key_type(&cifs_spnego_key_type);
1077 goto out_unregister_filesystem;
1079 #ifdef CONFIG_CIFS_DFS_UPCALL
1080 rc = register_key_type(&key_type_dns_resolver);
1082 goto out_unregister_key_type;
1084 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1085 if (IS_ERR(oplockThread)) {
1086 rc = PTR_ERR(oplockThread);
1087 cERROR(1, ("error %d create oplock thread", rc));
1088 goto out_unregister_dfs_key_type;
1091 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1092 if (IS_ERR(dnotifyThread)) {
1093 rc = PTR_ERR(dnotifyThread);
1094 cERROR(1, ("error %d create dnotify thread", rc));
1095 goto out_stop_oplock_thread;
1100 out_stop_oplock_thread:
1101 kthread_stop(oplockThread);
1102 out_unregister_dfs_key_type:
1103 #ifdef CONFIG_CIFS_DFS_UPCALL
1104 unregister_key_type(&key_type_dns_resolver);
1105 out_unregister_key_type:
1107 #ifdef CONFIG_CIFS_UPCALL
1108 unregister_key_type(&cifs_spnego_key_type);
1109 out_unregister_filesystem:
1111 unregister_filesystem(&cifs_fs_type);
1112 out_destroy_request_bufs:
1113 cifs_destroy_request_bufs();
1115 cifs_destroy_mids();
1116 out_destroy_inodecache:
1117 cifs_destroy_inodecache();
1126 cFYI(DBG2, ("exit_cifs"));
1128 #ifdef CONFIG_CIFS_DFS_UPCALL
1129 cifs_dfs_release_automount_timer();
1130 unregister_key_type(&key_type_dns_resolver);
1132 #ifdef CONFIG_CIFS_UPCALL
1133 unregister_key_type(&cifs_spnego_key_type);
1135 unregister_filesystem(&cifs_fs_type);
1136 cifs_destroy_inodecache();
1137 cifs_destroy_mids();
1138 cifs_destroy_request_bufs();
1139 kthread_stop(oplockThread);
1140 kthread_stop(dnotifyThread);
1143 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1144 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1146 ("VFS to access servers complying with the SNIA CIFS Specification "
1147 "e.g. Samba and Windows");
1148 MODULE_VERSION(CIFS_VERSION);
1149 module_init(init_cifs)
1150 module_exit(exit_cifs)