4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
40 #define DECLARE_GLOBALS_HERE
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
46 #include <linux/key-type.h>
47 #include "dns_resolve.h"
48 #include "cifs_spnego.h"
49 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
51 #ifdef CONFIG_CIFS_QUOTA
52 static struct quotactl_ops cifs_quotactl_ops;
58 unsigned int oplockEnabled = 1;
59 unsigned int experimEnabled = 0;
60 unsigned int linuxExtEnabled = 1;
61 unsigned int lookupCacheEnabled = 1;
62 unsigned int multiuser_mount = 0;
63 unsigned int extended_security = CIFSSEC_DEF;
64 /* unsigned int ntlmv2_support = 0; */
65 unsigned int sign_CIFS_PDUs = 1;
66 extern struct task_struct *oplockThread; /* remove sparse warning */
67 struct task_struct *oplockThread = NULL;
68 /* extern struct task_struct * dnotifyThread; remove sparse warning */
69 static struct task_struct *dnotifyThread = NULL;
70 static const struct super_operations cifs_super_ops;
71 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
72 module_param(CIFSMaxBufSize, int, 0);
73 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
74 "Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
79 unsigned int cifs_min_small = 30;
80 module_param(cifs_min_small, int, 0);
81 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
83 unsigned int cifs_max_pending = CIFS_MAX_REQ;
84 module_param(cifs_max_pending, int, 0);
85 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
86 "Default: 50 Range: 2 to 256");
88 extern mempool_t *cifs_sm_req_poolp;
89 extern mempool_t *cifs_req_poolp;
90 extern mempool_t *cifs_mid_poolp;
92 extern struct kmem_cache *cifs_oplock_cachep;
95 cifs_read_super(struct super_block *sb, void *data,
96 const char *devname, int silent)
99 struct cifs_sb_info *cifs_sb;
102 /* BB should we make this contingent on mount parm? */
103 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
104 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
105 cifs_sb = CIFS_SB(sb);
109 #ifdef CONFIG_CIFS_DFS_UPCALL
110 /* copy mount params to sb for use in submounts */
111 /* BB: should we move this after the mount so we
112 * do not have to do the copy on failed mounts?
113 * BB: May be it is better to do simple copy before
114 * complex operation (mount), and in case of fail
115 * just exit instead of doing mount and attempting
116 * undo it if this copy fails?*/
118 int len = strlen(data);
119 cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
120 if (cifs_sb->mountdata == NULL) {
121 kfree(sb->s_fs_info);
122 sb->s_fs_info = NULL;
125 strncpy(cifs_sb->mountdata, data, len + 1);
126 cifs_sb->mountdata[len] = '\0';
130 rc = cifs_mount(sb, cifs_sb, data, devname);
135 ("cifs_mount failed w/return code = %d", rc));
136 goto out_mount_failed;
139 sb->s_magic = CIFS_MAGIC_NUMBER;
140 sb->s_op = &cifs_super_ops;
141 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
143 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
144 #ifdef CONFIG_CIFS_QUOTA
145 sb->s_qcop = &cifs_quotactl_ops;
147 sb->s_blocksize = CIFS_MAX_MSGSIZE;
148 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode = cifs_iget(sb, ROOT_I);
157 sb->s_root = d_alloc_root(inode);
164 #ifdef CONFIG_CIFS_EXPERIMENTAL
165 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
166 cFYI(1, ("export ops supported"));
167 sb->s_export_op = &cifs_export_ops;
169 #endif /* EXPERIMENTAL */
174 cERROR(1, ("cifs_read_super: get root inode failed"));
178 cifs_umount(sb, cifs_sb);
182 #ifdef CONFIG_CIFS_DFS_UPCALL
183 if (cifs_sb->mountdata) {
184 kfree(cifs_sb->mountdata);
185 cifs_sb->mountdata = NULL;
188 if (cifs_sb->local_nls)
189 unload_nls(cifs_sb->local_nls);
196 cifs_put_super(struct super_block *sb)
199 struct cifs_sb_info *cifs_sb;
201 cFYI(1, ("In cifs_put_super"));
202 cifs_sb = CIFS_SB(sb);
203 if (cifs_sb == NULL) {
204 cFYI(1, ("Empty cifs superblock info passed to unmount"));
207 rc = cifs_umount(sb, cifs_sb);
209 cERROR(1, ("cifs_umount failed with return code %d", rc));
210 #ifdef CONFIG_CIFS_DFS_UPCALL
211 if (cifs_sb->mountdata) {
212 kfree(cifs_sb->mountdata);
213 cifs_sb->mountdata = NULL;
217 unload_nls(cifs_sb->local_nls);
223 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
225 struct super_block *sb = dentry->d_sb;
226 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
227 struct cifsTconInfo *tcon = cifs_sb->tcon;
228 int rc = -EOPNOTSUPP;
233 buf->f_type = CIFS_MAGIC_NUMBER;
236 * PATH_MAX may be too long - it would presumably be total path,
237 * but note that some servers (includinng Samba 3) have a shorter
240 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
242 buf->f_namelen = PATH_MAX;
243 buf->f_files = 0; /* undefined */
244 buf->f_ffree = 0; /* unlimited */
247 * We could add a second check for a QFS Unix capability bit
249 if ((tcon->ses->capabilities & CAP_UNIX) &&
250 (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability)))
251 rc = CIFSSMBQFSPosixInfo(xid, tcon, buf);
254 * Only need to call the old QFSInfo if failed on newer one,
257 if (rc && (tcon->ses->capabilities & CAP_NT_SMBS))
258 rc = CIFSSMBQFSInfo(xid, tcon, buf);
261 * Some old Windows servers also do not support level 103, retry with
262 * older level one if old server failed the previous call or we
263 * bypassed it because we detected that this was an older LANMAN sess
266 rc = SMBOldQFSInfo(xid, tcon, buf);
272 static int cifs_permission(struct inode *inode, int mask)
274 struct cifs_sb_info *cifs_sb;
276 cifs_sb = CIFS_SB(inode->i_sb);
278 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
280 else /* file mode might have been restricted at mount time
281 on the client (above and beyond ACL on servers) for
282 servers which do not support setting and viewing mode bits,
283 so allowing client to check permissions is useful */
284 return generic_permission(inode, mask, NULL);
287 static struct kmem_cache *cifs_inode_cachep;
288 static struct kmem_cache *cifs_req_cachep;
289 static struct kmem_cache *cifs_mid_cachep;
290 struct kmem_cache *cifs_oplock_cachep;
291 static struct kmem_cache *cifs_sm_req_cachep;
292 mempool_t *cifs_sm_req_poolp;
293 mempool_t *cifs_req_poolp;
294 mempool_t *cifs_mid_poolp;
296 static struct inode *
297 cifs_alloc_inode(struct super_block *sb)
299 struct cifsInodeInfo *cifs_inode;
300 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
303 cifs_inode->cifsAttrs = 0x20; /* default */
304 atomic_set(&cifs_inode->inUse, 0);
305 cifs_inode->time = 0;
306 cifs_inode->write_behind_rc = 0;
307 /* Until the file is open and we have gotten oplock
308 info back from the server, can not assume caching of
309 file data or metadata */
310 cifs_inode->clientCanCacheRead = false;
311 cifs_inode->clientCanCacheAll = false;
312 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
314 /* Can not set i_flags here - they get immediately overwritten
315 to zero by the VFS */
316 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
317 INIT_LIST_HEAD(&cifs_inode->openFileList);
318 return &cifs_inode->vfs_inode;
322 cifs_destroy_inode(struct inode *inode)
324 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
328 * cifs_show_options() is for displaying mount options in /proc/mounts.
329 * Not all settable options are displayed but most of the important
333 cifs_show_options(struct seq_file *s, struct vfsmount *m)
335 struct cifs_sb_info *cifs_sb;
337 cifs_sb = CIFS_SB(m->mnt_sb);
341 /* BB add prepath to mount options displayed */
342 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
343 if (cifs_sb->tcon->ses) {
344 if (cifs_sb->tcon->ses->userName)
345 seq_printf(s, ",username=%s",
346 cifs_sb->tcon->ses->userName);
347 if (cifs_sb->tcon->ses->domainName)
348 seq_printf(s, ",domain=%s",
349 cifs_sb->tcon->ses->domainName);
351 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
352 !(cifs_sb->tcon->unix_ext))
353 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
354 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
355 !(cifs_sb->tcon->unix_ext))
356 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
357 if (!cifs_sb->tcon->unix_ext) {
358 seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
359 cifs_sb->mnt_file_mode,
360 cifs_sb->mnt_dir_mode);
362 if (cifs_sb->tcon->seal)
363 seq_printf(s, ",seal");
364 if (cifs_sb->tcon->nocase)
365 seq_printf(s, ",nocase");
366 if (cifs_sb->tcon->retry)
367 seq_printf(s, ",hard");
369 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
370 seq_printf(s, ",posixpaths");
371 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
372 seq_printf(s, ",setuids");
373 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
374 seq_printf(s, ",serverino");
375 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
376 seq_printf(s, ",directio");
377 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
378 seq_printf(s, ",nouser_xattr");
379 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
380 seq_printf(s, ",mapchars");
381 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
382 seq_printf(s, ",sfu");
383 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
384 seq_printf(s, ",nobrl");
385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
386 seq_printf(s, ",cifsacl");
387 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
388 seq_printf(s, ",dynperm");
389 if (m->mnt_sb->s_flags & MS_POSIXACL)
390 seq_printf(s, ",acl");
392 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
393 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
398 #ifdef CONFIG_CIFS_QUOTA
399 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
400 struct fs_disk_quota *pdquota)
404 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
405 struct cifsTconInfo *pTcon;
408 pTcon = cifs_sb->tcon;
415 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
424 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
425 struct fs_disk_quota *pdquota)
429 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
430 struct cifsTconInfo *pTcon;
433 pTcon = cifs_sb->tcon;
439 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
448 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
452 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
453 struct cifsTconInfo *pTcon;
456 pTcon = cifs_sb->tcon;
462 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
471 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
475 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
476 struct cifsTconInfo *pTcon;
479 pTcon = cifs_sb->tcon;
485 cFYI(1, ("pqstats %p", qstats));
494 static struct quotactl_ops cifs_quotactl_ops = {
495 .set_xquota = cifs_xquota_set,
496 .get_xquota = cifs_xquota_get,
497 .set_xstate = cifs_xstate_set,
498 .get_xstate = cifs_xstate_get,
502 static void cifs_umount_begin(struct super_block *sb)
504 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
505 struct cifsTconInfo *tcon;
510 tcon = cifs_sb->tcon;
513 down(&tcon->tconSem);
514 if (atomic_read(&tcon->useCount) == 1)
515 tcon->tidStatus = CifsExiting;
518 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
519 /* cancel_notify_requests(tcon); */
520 if (tcon->ses && tcon->ses->server) {
521 cFYI(1, ("wake up tasks now - umount begin not complete"));
522 wake_up_all(&tcon->ses->server->request_q);
523 wake_up_all(&tcon->ses->server->response_q);
524 msleep(1); /* yield */
525 /* we have to kick the requests once more */
526 wake_up_all(&tcon->ses->server->response_q);
529 /* BB FIXME - finish add checks for tidStatus BB */
534 #ifdef CONFIG_CIFS_STATS2
535 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
542 static int cifs_remount(struct super_block *sb, int *flags, char *data)
544 *flags |= MS_NODIRATIME;
548 static const struct super_operations cifs_super_ops = {
549 .put_super = cifs_put_super,
550 .statfs = cifs_statfs,
551 .alloc_inode = cifs_alloc_inode,
552 .destroy_inode = cifs_destroy_inode,
553 /* .drop_inode = generic_delete_inode,
554 .delete_inode = cifs_delete_inode, */ /* Do not need above two
555 functions unless later we add lazy close of inodes or unless the
556 kernel forgets to call us with the same number of releases (closes)
558 .show_options = cifs_show_options,
559 .umount_begin = cifs_umount_begin,
560 .remount_fs = cifs_remount,
561 #ifdef CONFIG_CIFS_STATS2
562 .show_stats = cifs_show_stats,
567 cifs_get_sb(struct file_system_type *fs_type,
568 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
571 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
573 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
580 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
582 up_write(&sb->s_umount);
583 deactivate_super(sb);
586 sb->s_flags |= MS_ACTIVE;
587 return simple_set_mnt(mnt, sb);
590 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
591 unsigned long nr_segs, loff_t pos)
593 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
596 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
597 if (!CIFS_I(inode)->clientCanCacheAll)
598 filemap_fdatawrite(inode->i_mapping);
602 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
604 /* origin == SEEK_END => we must revalidate the cached file length */
605 if (origin == SEEK_END) {
608 /* some applications poll for the file length in this strange
609 way so we must seek to end on non-oplocked files by
610 setting the revalidate time to zero */
611 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
613 retval = cifs_revalidate(file->f_path.dentry);
615 return (loff_t)retval;
617 return generic_file_llseek_unlocked(file, offset, origin);
620 struct file_system_type cifs_fs_type = {
621 .owner = THIS_MODULE,
623 .get_sb = cifs_get_sb,
624 .kill_sb = kill_anon_super,
627 const struct inode_operations cifs_dir_inode_ops = {
628 .create = cifs_create,
629 .lookup = cifs_lookup,
630 .getattr = cifs_getattr,
631 .unlink = cifs_unlink,
632 .link = cifs_hardlink,
635 .rename = cifs_rename,
636 .permission = cifs_permission,
637 /* revalidate:cifs_revalidate, */
638 .setattr = cifs_setattr,
639 .symlink = cifs_symlink,
641 #ifdef CONFIG_CIFS_XATTR
642 .setxattr = cifs_setxattr,
643 .getxattr = cifs_getxattr,
644 .listxattr = cifs_listxattr,
645 .removexattr = cifs_removexattr,
649 const struct inode_operations cifs_file_inode_ops = {
650 /* revalidate:cifs_revalidate, */
651 .setattr = cifs_setattr,
652 .getattr = cifs_getattr, /* do we need this anymore? */
653 .rename = cifs_rename,
654 .permission = cifs_permission,
655 #ifdef CONFIG_CIFS_XATTR
656 .setxattr = cifs_setxattr,
657 .getxattr = cifs_getxattr,
658 .listxattr = cifs_listxattr,
659 .removexattr = cifs_removexattr,
663 const struct inode_operations cifs_symlink_inode_ops = {
664 .readlink = generic_readlink,
665 .follow_link = cifs_follow_link,
666 .put_link = cifs_put_link,
667 .permission = cifs_permission,
668 /* BB add the following two eventually */
669 /* revalidate: cifs_revalidate,
670 setattr: cifs_notify_change, *//* BB do we need notify change */
671 #ifdef CONFIG_CIFS_XATTR
672 .setxattr = cifs_setxattr,
673 .getxattr = cifs_getxattr,
674 .listxattr = cifs_listxattr,
675 .removexattr = cifs_removexattr,
679 const struct file_operations cifs_file_ops = {
680 .read = do_sync_read,
681 .write = do_sync_write,
682 .aio_read = generic_file_aio_read,
683 .aio_write = cifs_file_aio_write,
685 .release = cifs_close,
689 .mmap = cifs_file_mmap,
690 .splice_read = generic_file_splice_read,
691 .llseek = cifs_llseek,
692 #ifdef CONFIG_CIFS_POSIX
693 .unlocked_ioctl = cifs_ioctl,
694 #endif /* CONFIG_CIFS_POSIX */
696 #ifdef CONFIG_CIFS_EXPERIMENTAL
697 .dir_notify = cifs_dir_notify,
698 #endif /* CONFIG_CIFS_EXPERIMENTAL */
701 const struct file_operations cifs_file_direct_ops = {
702 /* no mmap, no aio, no readv -
703 BB reevaluate whether they can be done with directio, no cache */
704 .read = cifs_user_read,
705 .write = cifs_user_write,
707 .release = cifs_close,
711 .splice_read = generic_file_splice_read,
712 #ifdef CONFIG_CIFS_POSIX
713 .unlocked_ioctl = cifs_ioctl,
714 #endif /* CONFIG_CIFS_POSIX */
715 .llseek = cifs_llseek,
716 #ifdef CONFIG_CIFS_EXPERIMENTAL
717 .dir_notify = cifs_dir_notify,
718 #endif /* CONFIG_CIFS_EXPERIMENTAL */
720 const struct file_operations cifs_file_nobrl_ops = {
721 .read = do_sync_read,
722 .write = do_sync_write,
723 .aio_read = generic_file_aio_read,
724 .aio_write = cifs_file_aio_write,
726 .release = cifs_close,
729 .mmap = cifs_file_mmap,
730 .splice_read = generic_file_splice_read,
731 .llseek = cifs_llseek,
732 #ifdef CONFIG_CIFS_POSIX
733 .unlocked_ioctl = cifs_ioctl,
734 #endif /* CONFIG_CIFS_POSIX */
736 #ifdef CONFIG_CIFS_EXPERIMENTAL
737 .dir_notify = cifs_dir_notify,
738 #endif /* CONFIG_CIFS_EXPERIMENTAL */
741 const struct file_operations cifs_file_direct_nobrl_ops = {
742 /* no mmap, no aio, no readv -
743 BB reevaluate whether they can be done with directio, no cache */
744 .read = cifs_user_read,
745 .write = cifs_user_write,
747 .release = cifs_close,
750 .splice_read = generic_file_splice_read,
751 #ifdef CONFIG_CIFS_POSIX
752 .unlocked_ioctl = cifs_ioctl,
753 #endif /* CONFIG_CIFS_POSIX */
754 .llseek = cifs_llseek,
755 #ifdef CONFIG_CIFS_EXPERIMENTAL
756 .dir_notify = cifs_dir_notify,
757 #endif /* CONFIG_CIFS_EXPERIMENTAL */
760 const struct file_operations cifs_dir_ops = {
761 .readdir = cifs_readdir,
762 .release = cifs_closedir,
763 .read = generic_read_dir,
764 #ifdef CONFIG_CIFS_EXPERIMENTAL
765 .dir_notify = cifs_dir_notify,
766 #endif /* CONFIG_CIFS_EXPERIMENTAL */
767 .unlocked_ioctl = cifs_ioctl,
771 cifs_init_once(void *inode)
773 struct cifsInodeInfo *cifsi = inode;
775 inode_init_once(&cifsi->vfs_inode);
776 INIT_LIST_HEAD(&cifsi->lockList);
780 cifs_init_inodecache(void)
782 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
783 sizeof(struct cifsInodeInfo),
784 0, (SLAB_RECLAIM_ACCOUNT|
787 if (cifs_inode_cachep == NULL)
794 cifs_destroy_inodecache(void)
796 kmem_cache_destroy(cifs_inode_cachep);
800 cifs_init_request_bufs(void)
802 if (CIFSMaxBufSize < 8192) {
803 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
804 Unicode path name has to fit in any SMB/CIFS path based frames */
805 CIFSMaxBufSize = 8192;
806 } else if (CIFSMaxBufSize > 1024*127) {
807 CIFSMaxBufSize = 1024 * 127;
809 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
811 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
812 cifs_req_cachep = kmem_cache_create("cifs_request",
814 MAX_CIFS_HDR_SIZE, 0,
815 SLAB_HWCACHE_ALIGN, NULL);
816 if (cifs_req_cachep == NULL)
819 if (cifs_min_rcv < 1)
821 else if (cifs_min_rcv > 64) {
823 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
826 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
829 if (cifs_req_poolp == NULL) {
830 kmem_cache_destroy(cifs_req_cachep);
833 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
834 almost all handle based requests (but not write response, nor is it
835 sufficient for path based requests). A smaller size would have
836 been more efficient (compacting multiple slab items on one 4k page)
837 for the case in which debug was on, but this larger size allows
838 more SMBs to use small buffer alloc and is still much more
839 efficient to alloc 1 per page off the slab compared to 17K (5page)
840 alloc of large cifs buffers even when page debugging is on */
841 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
842 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
844 if (cifs_sm_req_cachep == NULL) {
845 mempool_destroy(cifs_req_poolp);
846 kmem_cache_destroy(cifs_req_cachep);
850 if (cifs_min_small < 2)
852 else if (cifs_min_small > 256) {
853 cifs_min_small = 256;
854 cFYI(1, ("cifs_min_small set to maximum (256)"));
857 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
860 if (cifs_sm_req_poolp == NULL) {
861 mempool_destroy(cifs_req_poolp);
862 kmem_cache_destroy(cifs_req_cachep);
863 kmem_cache_destroy(cifs_sm_req_cachep);
871 cifs_destroy_request_bufs(void)
873 mempool_destroy(cifs_req_poolp);
874 kmem_cache_destroy(cifs_req_cachep);
875 mempool_destroy(cifs_sm_req_poolp);
876 kmem_cache_destroy(cifs_sm_req_cachep);
882 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
883 sizeof(struct mid_q_entry), 0,
884 SLAB_HWCACHE_ALIGN, NULL);
885 if (cifs_mid_cachep == NULL)
888 /* 3 is a reasonable minimum number of simultaneous operations */
889 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
890 if (cifs_mid_poolp == NULL) {
891 kmem_cache_destroy(cifs_mid_cachep);
895 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
896 sizeof(struct oplock_q_entry), 0,
897 SLAB_HWCACHE_ALIGN, NULL);
898 if (cifs_oplock_cachep == NULL) {
899 mempool_destroy(cifs_mid_poolp);
900 kmem_cache_destroy(cifs_mid_cachep);
908 cifs_destroy_mids(void)
910 mempool_destroy(cifs_mid_poolp);
911 kmem_cache_destroy(cifs_mid_cachep);
912 kmem_cache_destroy(cifs_oplock_cachep);
915 static int cifs_oplock_thread(void *dummyarg)
917 struct oplock_q_entry *oplock_item;
918 struct cifsTconInfo *pTcon;
928 spin_lock(&GlobalMid_Lock);
929 if (list_empty(&GlobalOplock_Q)) {
930 spin_unlock(&GlobalMid_Lock);
931 set_current_state(TASK_INTERRUPTIBLE);
932 schedule_timeout(39*HZ);
934 oplock_item = list_entry(GlobalOplock_Q.next,
935 struct oplock_q_entry, qhead);
936 cFYI(1, ("found oplock item to write out"));
937 pTcon = oplock_item->tcon;
938 inode = oplock_item->pinode;
939 netfid = oplock_item->netfid;
940 spin_unlock(&GlobalMid_Lock);
941 DeleteOplockQEntry(oplock_item);
942 /* can not grab inode sem here since it would
943 deadlock when oplock received on delete
944 since vfs_unlink holds the i_mutex across
946 /* mutex_lock(&inode->i_mutex);*/
947 if (S_ISREG(inode->i_mode)) {
948 rc = filemap_fdatawrite(inode->i_mapping);
949 if (CIFS_I(inode)->clientCanCacheRead == 0) {
950 waitrc = filemap_fdatawait(
952 invalidate_remote_inode(inode);
958 /* mutex_unlock(&inode->i_mutex);*/
960 CIFS_I(inode)->write_behind_rc = rc;
961 cFYI(1, ("Oplock flush inode %p rc %d",
964 /* releasing stale oplock after recent reconnect
965 of smb session using a now incorrect file
966 handle is not a data integrity issue but do
967 not bother sending an oplock release if session
968 to server still is disconnected since oplock
969 already released by the server in that case */
970 if (pTcon->tidStatus != CifsNeedReconnect) {
971 rc = CIFSSMBLock(0, pTcon, netfid,
972 0 /* len */ , 0 /* offset */, 0,
973 0, LOCKING_ANDX_OPLOCK_RELEASE,
974 false /* wait flag */);
975 cFYI(1, ("Oplock release rc = %d", rc));
977 set_current_state(TASK_INTERRUPTIBLE);
978 schedule_timeout(1); /* yield in case q were corrupt */
980 } while (!kthread_should_stop());
985 static int cifs_dnotify_thread(void *dummyarg)
987 struct list_head *tmp;
988 struct cifsSesInfo *ses;
993 set_current_state(TASK_INTERRUPTIBLE);
994 schedule_timeout(15*HZ);
995 read_lock(&GlobalSMBSeslock);
996 /* check if any stuck requests that need
997 to be woken up and wakeq so the
998 thread can wake up and error out */
999 list_for_each(tmp, &GlobalSMBSessionList) {
1000 ses = list_entry(tmp, struct cifsSesInfo,
1002 if (ses->server && atomic_read(&ses->server->inFlight))
1003 wake_up_all(&ses->server->response_q);
1005 read_unlock(&GlobalSMBSeslock);
1006 } while (!kthread_should_stop());
1016 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
1017 INIT_LIST_HEAD(&GlobalSMBSessionList);
1018 INIT_LIST_HEAD(&GlobalTreeConnectionList);
1019 INIT_LIST_HEAD(&GlobalOplock_Q);
1020 #ifdef CONFIG_CIFS_EXPERIMENTAL
1021 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1022 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1025 * Initialize Global counters
1027 atomic_set(&sesInfoAllocCount, 0);
1028 atomic_set(&tconInfoAllocCount, 0);
1029 atomic_set(&tcpSesAllocCount, 0);
1030 atomic_set(&tcpSesReconnectCount, 0);
1031 atomic_set(&tconInfoReconnectCount, 0);
1033 atomic_set(&bufAllocCount, 0);
1034 atomic_set(&smBufAllocCount, 0);
1035 #ifdef CONFIG_CIFS_STATS2
1036 atomic_set(&totBufAllocCount, 0);
1037 atomic_set(&totSmBufAllocCount, 0);
1038 #endif /* CONFIG_CIFS_STATS2 */
1040 atomic_set(&midCount, 0);
1041 GlobalCurrentXid = 0;
1042 GlobalTotalActiveXid = 0;
1043 GlobalMaxActiveXid = 0;
1044 memset(Local_System_Name, 0, 15);
1045 rwlock_init(&GlobalSMBSeslock);
1046 spin_lock_init(&GlobalMid_Lock);
1048 if (cifs_max_pending < 2) {
1049 cifs_max_pending = 2;
1050 cFYI(1, ("cifs_max_pending set to min of 2"));
1051 } else if (cifs_max_pending > 256) {
1052 cifs_max_pending = 256;
1053 cFYI(1, ("cifs_max_pending set to max of 256"));
1056 rc = cifs_init_inodecache();
1058 goto out_clean_proc;
1060 rc = cifs_init_mids();
1062 goto out_destroy_inodecache;
1064 rc = cifs_init_request_bufs();
1066 goto out_destroy_mids;
1068 rc = register_filesystem(&cifs_fs_type);
1070 goto out_destroy_request_bufs;
1071 #ifdef CONFIG_CIFS_UPCALL
1072 rc = register_key_type(&cifs_spnego_key_type);
1074 goto out_unregister_filesystem;
1076 #ifdef CONFIG_CIFS_DFS_UPCALL
1077 rc = register_key_type(&key_type_dns_resolver);
1079 goto out_unregister_key_type;
1081 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1082 if (IS_ERR(oplockThread)) {
1083 rc = PTR_ERR(oplockThread);
1084 cERROR(1, ("error %d create oplock thread", rc));
1085 goto out_unregister_dfs_key_type;
1088 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1089 if (IS_ERR(dnotifyThread)) {
1090 rc = PTR_ERR(dnotifyThread);
1091 cERROR(1, ("error %d create dnotify thread", rc));
1092 goto out_stop_oplock_thread;
1097 out_stop_oplock_thread:
1098 kthread_stop(oplockThread);
1099 out_unregister_dfs_key_type:
1100 #ifdef CONFIG_CIFS_DFS_UPCALL
1101 unregister_key_type(&key_type_dns_resolver);
1102 out_unregister_key_type:
1104 #ifdef CONFIG_CIFS_UPCALL
1105 unregister_key_type(&cifs_spnego_key_type);
1106 out_unregister_filesystem:
1108 unregister_filesystem(&cifs_fs_type);
1109 out_destroy_request_bufs:
1110 cifs_destroy_request_bufs();
1112 cifs_destroy_mids();
1113 out_destroy_inodecache:
1114 cifs_destroy_inodecache();
1123 cFYI(DBG2, ("exit_cifs"));
1125 #ifdef CONFIG_CIFS_DFS_UPCALL
1126 cifs_dfs_release_automount_timer();
1127 unregister_key_type(&key_type_dns_resolver);
1129 #ifdef CONFIG_CIFS_UPCALL
1130 unregister_key_type(&cifs_spnego_key_type);
1132 unregister_filesystem(&cifs_fs_type);
1133 cifs_destroy_inodecache();
1134 cifs_destroy_mids();
1135 cifs_destroy_request_bufs();
1136 kthread_stop(oplockThread);
1137 kthread_stop(dnotifyThread);
1140 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1141 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1143 ("VFS to access servers complying with the SNIA CIFS Specification "
1144 "e.g. Samba and Windows");
1145 MODULE_VERSION(CIFS_VERSION);
1146 module_init(init_cifs)
1147 module_exit(exit_cifs)