4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
40 #define DECLARE_GLOBALS_HERE
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
46 #include <linux/key-type.h>
47 #include "dns_resolve.h"
48 #include "cifs_spnego.h"
49 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
51 #ifdef CONFIG_CIFS_QUOTA
52 static struct quotactl_ops cifs_quotactl_ops;
58 unsigned int oplockEnabled = 1;
59 unsigned int experimEnabled = 0;
60 unsigned int linuxExtEnabled = 1;
61 unsigned int lookupCacheEnabled = 1;
62 unsigned int multiuser_mount = 0;
63 unsigned int extended_security = CIFSSEC_DEF;
64 /* unsigned int ntlmv2_support = 0; */
65 unsigned int sign_CIFS_PDUs = 1;
66 extern struct task_struct *oplockThread; /* remove sparse warning */
67 struct task_struct *oplockThread = NULL;
68 /* extern struct task_struct * dnotifyThread; remove sparse warning */
69 static struct task_struct *dnotifyThread = NULL;
70 static const struct super_operations cifs_super_ops;
71 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
72 module_param(CIFSMaxBufSize, int, 0);
73 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
74 "Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
79 unsigned int cifs_min_small = 30;
80 module_param(cifs_min_small, int, 0);
81 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
83 unsigned int cifs_max_pending = CIFS_MAX_REQ;
84 module_param(cifs_max_pending, int, 0);
85 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
86 "Default: 50 Range: 2 to 256");
88 extern mempool_t *cifs_sm_req_poolp;
89 extern mempool_t *cifs_req_poolp;
90 extern mempool_t *cifs_mid_poolp;
92 extern struct kmem_cache *cifs_oplock_cachep;
95 cifs_read_super(struct super_block *sb, void *data,
96 const char *devname, int silent)
99 struct cifs_sb_info *cifs_sb;
102 /* BB should we make this contingent on mount parm? */
103 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
104 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
105 cifs_sb = CIFS_SB(sb);
109 #ifdef CONFIG_CIFS_DFS_UPCALL
110 /* copy mount params to sb for use in submounts */
111 /* BB: should we move this after the mount so we
112 * do not have to do the copy on failed mounts?
113 * BB: May be it is better to do simple copy before
114 * complex operation (mount), and in case of fail
115 * just exit instead of doing mount and attempting
116 * undo it if this copy fails?*/
118 int len = strlen(data);
119 cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
120 if (cifs_sb->mountdata == NULL) {
121 kfree(sb->s_fs_info);
122 sb->s_fs_info = NULL;
125 strncpy(cifs_sb->mountdata, data, len + 1);
126 cifs_sb->mountdata[len] = '\0';
130 rc = cifs_mount(sb, cifs_sb, data, devname);
135 ("cifs_mount failed w/return code = %d", rc));
136 goto out_mount_failed;
139 sb->s_magic = CIFS_MAGIC_NUMBER;
140 sb->s_op = &cifs_super_ops;
141 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
143 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
144 #ifdef CONFIG_CIFS_QUOTA
145 sb->s_qcop = &cifs_quotactl_ops;
147 sb->s_blocksize = CIFS_MAX_MSGSIZE;
148 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode = cifs_iget(sb, ROOT_I);
157 sb->s_root = d_alloc_root(inode);
164 #ifdef CONFIG_CIFS_EXPERIMENTAL
165 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
166 cFYI(1, ("export ops supported"));
167 sb->s_export_op = &cifs_export_ops;
169 #endif /* EXPERIMENTAL */
174 cERROR(1, ("cifs_read_super: get root inode failed"));
178 cifs_umount(sb, cifs_sb);
182 #ifdef CONFIG_CIFS_DFS_UPCALL
183 if (cifs_sb->mountdata) {
184 kfree(cifs_sb->mountdata);
185 cifs_sb->mountdata = NULL;
188 if (cifs_sb->local_nls)
189 unload_nls(cifs_sb->local_nls);
196 cifs_put_super(struct super_block *sb)
199 struct cifs_sb_info *cifs_sb;
201 cFYI(1, ("In cifs_put_super"));
202 cifs_sb = CIFS_SB(sb);
203 if (cifs_sb == NULL) {
204 cFYI(1, ("Empty cifs superblock info passed to unmount"));
207 rc = cifs_umount(sb, cifs_sb);
209 cERROR(1, ("cifs_umount failed with return code %d", rc));
210 #ifdef CONFIG_CIFS_DFS_UPCALL
211 if (cifs_sb->mountdata) {
212 kfree(cifs_sb->mountdata);
213 cifs_sb->mountdata = NULL;
217 unload_nls(cifs_sb->local_nls);
223 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
225 struct super_block *sb = dentry->d_sb;
226 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
227 struct cifsTconInfo *tcon = cifs_sb->tcon;
228 int rc = -EOPNOTSUPP;
233 buf->f_type = CIFS_MAGIC_NUMBER;
236 * PATH_MAX may be too long - it would presumably be total path,
237 * but note that some servers (includinng Samba 3) have a shorter
240 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
242 buf->f_namelen = PATH_MAX;
243 buf->f_files = 0; /* undefined */
244 buf->f_ffree = 0; /* unlimited */
247 * We could add a second check for a QFS Unix capability bit
249 if ((tcon->ses->capabilities & CAP_UNIX) &&
250 (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability)))
251 rc = CIFSSMBQFSPosixInfo(xid, tcon, buf);
254 * Only need to call the old QFSInfo if failed on newer one,
257 if (rc && (tcon->ses->capabilities & CAP_NT_SMBS))
258 rc = CIFSSMBQFSInfo(xid, tcon, buf);
261 * Some old Windows servers also do not support level 103, retry with
262 * older level one if old server failed the previous call or we
263 * bypassed it because we detected that this was an older LANMAN sess
266 rc = SMBOldQFSInfo(xid, tcon, buf);
272 static int cifs_permission(struct inode *inode, int mask)
274 struct cifs_sb_info *cifs_sb;
276 cifs_sb = CIFS_SB(inode->i_sb);
278 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
279 if ((mask & MAY_EXEC) && !execute_ok(inode))
283 } else /* file mode might have been restricted at mount time
284 on the client (above and beyond ACL on servers) for
285 servers which do not support setting and viewing mode bits,
286 so allowing client to check permissions is useful */
287 return generic_permission(inode, mask, NULL);
290 static struct kmem_cache *cifs_inode_cachep;
291 static struct kmem_cache *cifs_req_cachep;
292 static struct kmem_cache *cifs_mid_cachep;
293 struct kmem_cache *cifs_oplock_cachep;
294 static struct kmem_cache *cifs_sm_req_cachep;
295 mempool_t *cifs_sm_req_poolp;
296 mempool_t *cifs_req_poolp;
297 mempool_t *cifs_mid_poolp;
299 static struct inode *
300 cifs_alloc_inode(struct super_block *sb)
302 struct cifsInodeInfo *cifs_inode;
303 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
306 cifs_inode->cifsAttrs = 0x20; /* default */
307 atomic_set(&cifs_inode->inUse, 0);
308 cifs_inode->time = 0;
309 cifs_inode->write_behind_rc = 0;
310 /* Until the file is open and we have gotten oplock
311 info back from the server, can not assume caching of
312 file data or metadata */
313 cifs_inode->clientCanCacheRead = false;
314 cifs_inode->clientCanCacheAll = false;
315 cifs_inode->delete_pending = false;
316 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
318 /* Can not set i_flags here - they get immediately overwritten
319 to zero by the VFS */
320 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
321 INIT_LIST_HEAD(&cifs_inode->openFileList);
322 return &cifs_inode->vfs_inode;
326 cifs_destroy_inode(struct inode *inode)
328 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
332 * cifs_show_options() is for displaying mount options in /proc/mounts.
333 * Not all settable options are displayed but most of the important
337 cifs_show_options(struct seq_file *s, struct vfsmount *m)
339 struct cifs_sb_info *cifs_sb;
341 cifs_sb = CIFS_SB(m->mnt_sb);
345 /* BB add prepath to mount options displayed */
346 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
347 if (cifs_sb->tcon->ses) {
348 if (cifs_sb->tcon->ses->userName)
349 seq_printf(s, ",username=%s",
350 cifs_sb->tcon->ses->userName);
351 if (cifs_sb->tcon->ses->domainName)
352 seq_printf(s, ",domain=%s",
353 cifs_sb->tcon->ses->domainName);
355 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
356 !(cifs_sb->tcon->unix_ext))
357 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
358 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
359 !(cifs_sb->tcon->unix_ext))
360 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
361 if (!cifs_sb->tcon->unix_ext) {
362 seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
363 cifs_sb->mnt_file_mode,
364 cifs_sb->mnt_dir_mode);
366 if (cifs_sb->tcon->seal)
367 seq_printf(s, ",seal");
368 if (cifs_sb->tcon->nocase)
369 seq_printf(s, ",nocase");
370 if (cifs_sb->tcon->retry)
371 seq_printf(s, ",hard");
373 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
374 seq_printf(s, ",posixpaths");
375 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
376 seq_printf(s, ",setuids");
377 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
378 seq_printf(s, ",serverino");
379 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
380 seq_printf(s, ",directio");
381 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
382 seq_printf(s, ",nouser_xattr");
383 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
384 seq_printf(s, ",mapchars");
385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
386 seq_printf(s, ",sfu");
387 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
388 seq_printf(s, ",nobrl");
389 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
390 seq_printf(s, ",cifsacl");
391 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
392 seq_printf(s, ",dynperm");
393 if (m->mnt_sb->s_flags & MS_POSIXACL)
394 seq_printf(s, ",acl");
396 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
397 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
402 #ifdef CONFIG_CIFS_QUOTA
403 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
404 struct fs_disk_quota *pdquota)
408 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
409 struct cifsTconInfo *pTcon;
412 pTcon = cifs_sb->tcon;
419 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
428 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
429 struct fs_disk_quota *pdquota)
433 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
434 struct cifsTconInfo *pTcon;
437 pTcon = cifs_sb->tcon;
443 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
452 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
456 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
457 struct cifsTconInfo *pTcon;
460 pTcon = cifs_sb->tcon;
466 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
475 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
479 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
480 struct cifsTconInfo *pTcon;
483 pTcon = cifs_sb->tcon;
489 cFYI(1, ("pqstats %p", qstats));
498 static struct quotactl_ops cifs_quotactl_ops = {
499 .set_xquota = cifs_xquota_set,
500 .get_xquota = cifs_xquota_get,
501 .set_xstate = cifs_xstate_set,
502 .get_xstate = cifs_xstate_get,
506 static void cifs_umount_begin(struct super_block *sb)
508 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
509 struct cifsTconInfo *tcon;
514 tcon = cifs_sb->tcon;
517 down(&tcon->tconSem);
518 if (atomic_read(&tcon->useCount) == 1)
519 tcon->tidStatus = CifsExiting;
522 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
523 /* cancel_notify_requests(tcon); */
524 if (tcon->ses && tcon->ses->server) {
525 cFYI(1, ("wake up tasks now - umount begin not complete"));
526 wake_up_all(&tcon->ses->server->request_q);
527 wake_up_all(&tcon->ses->server->response_q);
528 msleep(1); /* yield */
529 /* we have to kick the requests once more */
530 wake_up_all(&tcon->ses->server->response_q);
533 /* BB FIXME - finish add checks for tidStatus BB */
538 #ifdef CONFIG_CIFS_STATS2
539 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
546 static int cifs_remount(struct super_block *sb, int *flags, char *data)
548 *flags |= MS_NODIRATIME;
552 static const struct super_operations cifs_super_ops = {
553 .put_super = cifs_put_super,
554 .statfs = cifs_statfs,
555 .alloc_inode = cifs_alloc_inode,
556 .destroy_inode = cifs_destroy_inode,
557 /* .drop_inode = generic_delete_inode,
558 .delete_inode = cifs_delete_inode, */ /* Do not need above two
559 functions unless later we add lazy close of inodes or unless the
560 kernel forgets to call us with the same number of releases (closes)
562 .show_options = cifs_show_options,
563 .umount_begin = cifs_umount_begin,
564 .remount_fs = cifs_remount,
565 #ifdef CONFIG_CIFS_STATS2
566 .show_stats = cifs_show_stats,
571 cifs_get_sb(struct file_system_type *fs_type,
572 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
575 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
577 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
584 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
586 up_write(&sb->s_umount);
587 deactivate_super(sb);
590 sb->s_flags |= MS_ACTIVE;
591 return simple_set_mnt(mnt, sb);
594 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
595 unsigned long nr_segs, loff_t pos)
597 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
600 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
601 if (!CIFS_I(inode)->clientCanCacheAll)
602 filemap_fdatawrite(inode->i_mapping);
606 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
608 /* origin == SEEK_END => we must revalidate the cached file length */
609 if (origin == SEEK_END) {
612 /* some applications poll for the file length in this strange
613 way so we must seek to end on non-oplocked files by
614 setting the revalidate time to zero */
615 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
617 retval = cifs_revalidate(file->f_path.dentry);
619 return (loff_t)retval;
621 return generic_file_llseek_unlocked(file, offset, origin);
624 #ifdef CONFIG_CIFS_EXPERIMENTAL
625 static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
627 /* note that this is called by vfs setlease with the BKL held
628 although I doubt that BKL is needed here in cifs */
629 struct inode *inode = file->f_path.dentry->d_inode;
631 if (!(S_ISREG(inode->i_mode)))
634 /* check if file is oplocked */
635 if (((arg == F_RDLCK) &&
636 (CIFS_I(inode)->clientCanCacheRead)) ||
638 (CIFS_I(inode)->clientCanCacheAll)))
639 return generic_setlease(file, arg, lease);
640 else if (CIFS_SB(inode->i_sb)->tcon->local_lease &&
641 !CIFS_I(inode)->clientCanCacheRead)
642 /* If the server claims to support oplock on this
643 file, then we still need to check oplock even
644 if the local_lease mount option is set, but there
645 are servers which do not support oplock for which
646 this mount option may be useful if the user
647 knows that the file won't be changed on the server
649 return generic_setlease(file, arg, lease);
655 struct file_system_type cifs_fs_type = {
656 .owner = THIS_MODULE,
658 .get_sb = cifs_get_sb,
659 .kill_sb = kill_anon_super,
662 const struct inode_operations cifs_dir_inode_ops = {
663 .create = cifs_create,
664 .lookup = cifs_lookup,
665 .getattr = cifs_getattr,
666 .unlink = cifs_unlink,
667 .link = cifs_hardlink,
670 .rename = cifs_rename,
671 .permission = cifs_permission,
672 /* revalidate:cifs_revalidate, */
673 .setattr = cifs_setattr,
674 .symlink = cifs_symlink,
676 #ifdef CONFIG_CIFS_XATTR
677 .setxattr = cifs_setxattr,
678 .getxattr = cifs_getxattr,
679 .listxattr = cifs_listxattr,
680 .removexattr = cifs_removexattr,
684 const struct inode_operations cifs_file_inode_ops = {
685 /* revalidate:cifs_revalidate, */
686 .setattr = cifs_setattr,
687 .getattr = cifs_getattr, /* do we need this anymore? */
688 .rename = cifs_rename,
689 .permission = cifs_permission,
690 #ifdef CONFIG_CIFS_XATTR
691 .setxattr = cifs_setxattr,
692 .getxattr = cifs_getxattr,
693 .listxattr = cifs_listxattr,
694 .removexattr = cifs_removexattr,
698 const struct inode_operations cifs_symlink_inode_ops = {
699 .readlink = generic_readlink,
700 .follow_link = cifs_follow_link,
701 .put_link = cifs_put_link,
702 .permission = cifs_permission,
703 /* BB add the following two eventually */
704 /* revalidate: cifs_revalidate,
705 setattr: cifs_notify_change, *//* BB do we need notify change */
706 #ifdef CONFIG_CIFS_XATTR
707 .setxattr = cifs_setxattr,
708 .getxattr = cifs_getxattr,
709 .listxattr = cifs_listxattr,
710 .removexattr = cifs_removexattr,
714 const struct file_operations cifs_file_ops = {
715 .read = do_sync_read,
716 .write = do_sync_write,
717 .aio_read = generic_file_aio_read,
718 .aio_write = cifs_file_aio_write,
720 .release = cifs_close,
724 .mmap = cifs_file_mmap,
725 .splice_read = generic_file_splice_read,
726 .llseek = cifs_llseek,
727 #ifdef CONFIG_CIFS_POSIX
728 .unlocked_ioctl = cifs_ioctl,
729 #endif /* CONFIG_CIFS_POSIX */
731 #ifdef CONFIG_CIFS_EXPERIMENTAL
732 .dir_notify = cifs_dir_notify,
733 .setlease = cifs_setlease,
734 #endif /* CONFIG_CIFS_EXPERIMENTAL */
737 const struct file_operations cifs_file_direct_ops = {
738 /* no mmap, no aio, no readv -
739 BB reevaluate whether they can be done with directio, no cache */
740 .read = cifs_user_read,
741 .write = cifs_user_write,
743 .release = cifs_close,
747 .splice_read = generic_file_splice_read,
748 #ifdef CONFIG_CIFS_POSIX
749 .unlocked_ioctl = cifs_ioctl,
750 #endif /* CONFIG_CIFS_POSIX */
751 .llseek = cifs_llseek,
752 #ifdef CONFIG_CIFS_EXPERIMENTAL
753 .dir_notify = cifs_dir_notify,
754 .setlease = cifs_setlease,
755 #endif /* CONFIG_CIFS_EXPERIMENTAL */
757 const struct file_operations cifs_file_nobrl_ops = {
758 .read = do_sync_read,
759 .write = do_sync_write,
760 .aio_read = generic_file_aio_read,
761 .aio_write = cifs_file_aio_write,
763 .release = cifs_close,
766 .mmap = cifs_file_mmap,
767 .splice_read = generic_file_splice_read,
768 .llseek = cifs_llseek,
769 #ifdef CONFIG_CIFS_POSIX
770 .unlocked_ioctl = cifs_ioctl,
771 #endif /* CONFIG_CIFS_POSIX */
773 #ifdef CONFIG_CIFS_EXPERIMENTAL
774 .dir_notify = cifs_dir_notify,
775 .setlease = cifs_setlease,
776 #endif /* CONFIG_CIFS_EXPERIMENTAL */
779 const struct file_operations cifs_file_direct_nobrl_ops = {
780 /* no mmap, no aio, no readv -
781 BB reevaluate whether they can be done with directio, no cache */
782 .read = cifs_user_read,
783 .write = cifs_user_write,
785 .release = cifs_close,
788 .splice_read = generic_file_splice_read,
789 #ifdef CONFIG_CIFS_POSIX
790 .unlocked_ioctl = cifs_ioctl,
791 #endif /* CONFIG_CIFS_POSIX */
792 .llseek = cifs_llseek,
793 #ifdef CONFIG_CIFS_EXPERIMENTAL
794 .dir_notify = cifs_dir_notify,
795 .setlease = cifs_setlease,
796 #endif /* CONFIG_CIFS_EXPERIMENTAL */
799 const struct file_operations cifs_dir_ops = {
800 .readdir = cifs_readdir,
801 .release = cifs_closedir,
802 .read = generic_read_dir,
803 #ifdef CONFIG_CIFS_EXPERIMENTAL
804 .dir_notify = cifs_dir_notify,
805 #endif /* CONFIG_CIFS_EXPERIMENTAL */
806 .unlocked_ioctl = cifs_ioctl,
807 .llseek = generic_file_llseek,
811 cifs_init_once(void *inode)
813 struct cifsInodeInfo *cifsi = inode;
815 inode_init_once(&cifsi->vfs_inode);
816 INIT_LIST_HEAD(&cifsi->lockList);
820 cifs_init_inodecache(void)
822 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
823 sizeof(struct cifsInodeInfo),
824 0, (SLAB_RECLAIM_ACCOUNT|
827 if (cifs_inode_cachep == NULL)
834 cifs_destroy_inodecache(void)
836 kmem_cache_destroy(cifs_inode_cachep);
840 cifs_init_request_bufs(void)
842 if (CIFSMaxBufSize < 8192) {
843 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
844 Unicode path name has to fit in any SMB/CIFS path based frames */
845 CIFSMaxBufSize = 8192;
846 } else if (CIFSMaxBufSize > 1024*127) {
847 CIFSMaxBufSize = 1024 * 127;
849 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
851 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
852 cifs_req_cachep = kmem_cache_create("cifs_request",
854 MAX_CIFS_HDR_SIZE, 0,
855 SLAB_HWCACHE_ALIGN, NULL);
856 if (cifs_req_cachep == NULL)
859 if (cifs_min_rcv < 1)
861 else if (cifs_min_rcv > 64) {
863 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
866 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
869 if (cifs_req_poolp == NULL) {
870 kmem_cache_destroy(cifs_req_cachep);
873 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
874 almost all handle based requests (but not write response, nor is it
875 sufficient for path based requests). A smaller size would have
876 been more efficient (compacting multiple slab items on one 4k page)
877 for the case in which debug was on, but this larger size allows
878 more SMBs to use small buffer alloc and is still much more
879 efficient to alloc 1 per page off the slab compared to 17K (5page)
880 alloc of large cifs buffers even when page debugging is on */
881 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
882 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
884 if (cifs_sm_req_cachep == NULL) {
885 mempool_destroy(cifs_req_poolp);
886 kmem_cache_destroy(cifs_req_cachep);
890 if (cifs_min_small < 2)
892 else if (cifs_min_small > 256) {
893 cifs_min_small = 256;
894 cFYI(1, ("cifs_min_small set to maximum (256)"));
897 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
900 if (cifs_sm_req_poolp == NULL) {
901 mempool_destroy(cifs_req_poolp);
902 kmem_cache_destroy(cifs_req_cachep);
903 kmem_cache_destroy(cifs_sm_req_cachep);
911 cifs_destroy_request_bufs(void)
913 mempool_destroy(cifs_req_poolp);
914 kmem_cache_destroy(cifs_req_cachep);
915 mempool_destroy(cifs_sm_req_poolp);
916 kmem_cache_destroy(cifs_sm_req_cachep);
922 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
923 sizeof(struct mid_q_entry), 0,
924 SLAB_HWCACHE_ALIGN, NULL);
925 if (cifs_mid_cachep == NULL)
928 /* 3 is a reasonable minimum number of simultaneous operations */
929 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
930 if (cifs_mid_poolp == NULL) {
931 kmem_cache_destroy(cifs_mid_cachep);
935 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
936 sizeof(struct oplock_q_entry), 0,
937 SLAB_HWCACHE_ALIGN, NULL);
938 if (cifs_oplock_cachep == NULL) {
939 mempool_destroy(cifs_mid_poolp);
940 kmem_cache_destroy(cifs_mid_cachep);
948 cifs_destroy_mids(void)
950 mempool_destroy(cifs_mid_poolp);
951 kmem_cache_destroy(cifs_mid_cachep);
952 kmem_cache_destroy(cifs_oplock_cachep);
955 static int cifs_oplock_thread(void *dummyarg)
957 struct oplock_q_entry *oplock_item;
958 struct cifsTconInfo *pTcon;
968 spin_lock(&GlobalMid_Lock);
969 if (list_empty(&GlobalOplock_Q)) {
970 spin_unlock(&GlobalMid_Lock);
971 set_current_state(TASK_INTERRUPTIBLE);
972 schedule_timeout(39*HZ);
974 oplock_item = list_entry(GlobalOplock_Q.next,
975 struct oplock_q_entry, qhead);
976 cFYI(1, ("found oplock item to write out"));
977 pTcon = oplock_item->tcon;
978 inode = oplock_item->pinode;
979 netfid = oplock_item->netfid;
980 spin_unlock(&GlobalMid_Lock);
981 DeleteOplockQEntry(oplock_item);
982 /* can not grab inode sem here since it would
983 deadlock when oplock received on delete
984 since vfs_unlink holds the i_mutex across
986 /* mutex_lock(&inode->i_mutex);*/
987 if (S_ISREG(inode->i_mode)) {
988 #ifdef CONFIG_CIFS_EXPERIMENTAL
989 if (CIFS_I(inode)->clientCanCacheAll == 0)
990 break_lease(inode, FMODE_READ);
991 else if (CIFS_I(inode)->clientCanCacheRead == 0)
992 break_lease(inode, FMODE_WRITE);
994 rc = filemap_fdatawrite(inode->i_mapping);
995 if (CIFS_I(inode)->clientCanCacheRead == 0) {
996 waitrc = filemap_fdatawait(
998 invalidate_remote_inode(inode);
1004 /* mutex_unlock(&inode->i_mutex);*/
1006 CIFS_I(inode)->write_behind_rc = rc;
1007 cFYI(1, ("Oplock flush inode %p rc %d",
1010 /* releasing stale oplock after recent reconnect
1011 of smb session using a now incorrect file
1012 handle is not a data integrity issue but do
1013 not bother sending an oplock release if session
1014 to server still is disconnected since oplock
1015 already released by the server in that case */
1016 if (pTcon->tidStatus != CifsNeedReconnect) {
1017 rc = CIFSSMBLock(0, pTcon, netfid,
1018 0 /* len */ , 0 /* offset */, 0,
1019 0, LOCKING_ANDX_OPLOCK_RELEASE,
1020 false /* wait flag */);
1021 cFYI(1, ("Oplock release rc = %d", rc));
1023 set_current_state(TASK_INTERRUPTIBLE);
1024 schedule_timeout(1); /* yield in case q were corrupt */
1026 } while (!kthread_should_stop());
1031 static int cifs_dnotify_thread(void *dummyarg)
1033 struct list_head *tmp;
1034 struct cifsSesInfo *ses;
1037 if (try_to_freeze())
1039 set_current_state(TASK_INTERRUPTIBLE);
1040 schedule_timeout(15*HZ);
1041 read_lock(&GlobalSMBSeslock);
1042 /* check if any stuck requests that need
1043 to be woken up and wakeq so the
1044 thread can wake up and error out */
1045 list_for_each(tmp, &GlobalSMBSessionList) {
1046 ses = list_entry(tmp, struct cifsSesInfo,
1048 if (ses->server && atomic_read(&ses->server->inFlight))
1049 wake_up_all(&ses->server->response_q);
1051 read_unlock(&GlobalSMBSeslock);
1052 } while (!kthread_should_stop());
1062 /* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
1063 INIT_LIST_HEAD(&GlobalSMBSessionList);
1064 INIT_LIST_HEAD(&GlobalTreeConnectionList);
1065 INIT_LIST_HEAD(&GlobalOplock_Q);
1066 #ifdef CONFIG_CIFS_EXPERIMENTAL
1067 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1068 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1071 * Initialize Global counters
1073 atomic_set(&sesInfoAllocCount, 0);
1074 atomic_set(&tconInfoAllocCount, 0);
1075 atomic_set(&tcpSesAllocCount, 0);
1076 atomic_set(&tcpSesReconnectCount, 0);
1077 atomic_set(&tconInfoReconnectCount, 0);
1079 atomic_set(&bufAllocCount, 0);
1080 atomic_set(&smBufAllocCount, 0);
1081 #ifdef CONFIG_CIFS_STATS2
1082 atomic_set(&totBufAllocCount, 0);
1083 atomic_set(&totSmBufAllocCount, 0);
1084 #endif /* CONFIG_CIFS_STATS2 */
1086 atomic_set(&midCount, 0);
1087 GlobalCurrentXid = 0;
1088 GlobalTotalActiveXid = 0;
1089 GlobalMaxActiveXid = 0;
1090 memset(Local_System_Name, 0, 15);
1091 rwlock_init(&GlobalSMBSeslock);
1092 spin_lock_init(&GlobalMid_Lock);
1094 if (cifs_max_pending < 2) {
1095 cifs_max_pending = 2;
1096 cFYI(1, ("cifs_max_pending set to min of 2"));
1097 } else if (cifs_max_pending > 256) {
1098 cifs_max_pending = 256;
1099 cFYI(1, ("cifs_max_pending set to max of 256"));
1102 rc = cifs_init_inodecache();
1104 goto out_clean_proc;
1106 rc = cifs_init_mids();
1108 goto out_destroy_inodecache;
1110 rc = cifs_init_request_bufs();
1112 goto out_destroy_mids;
1114 rc = register_filesystem(&cifs_fs_type);
1116 goto out_destroy_request_bufs;
1117 #ifdef CONFIG_CIFS_UPCALL
1118 rc = register_key_type(&cifs_spnego_key_type);
1120 goto out_unregister_filesystem;
1122 #ifdef CONFIG_CIFS_DFS_UPCALL
1123 rc = register_key_type(&key_type_dns_resolver);
1125 goto out_unregister_key_type;
1127 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1128 if (IS_ERR(oplockThread)) {
1129 rc = PTR_ERR(oplockThread);
1130 cERROR(1, ("error %d create oplock thread", rc));
1131 goto out_unregister_dfs_key_type;
1134 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1135 if (IS_ERR(dnotifyThread)) {
1136 rc = PTR_ERR(dnotifyThread);
1137 cERROR(1, ("error %d create dnotify thread", rc));
1138 goto out_stop_oplock_thread;
1143 out_stop_oplock_thread:
1144 kthread_stop(oplockThread);
1145 out_unregister_dfs_key_type:
1146 #ifdef CONFIG_CIFS_DFS_UPCALL
1147 unregister_key_type(&key_type_dns_resolver);
1148 out_unregister_key_type:
1150 #ifdef CONFIG_CIFS_UPCALL
1151 unregister_key_type(&cifs_spnego_key_type);
1152 out_unregister_filesystem:
1154 unregister_filesystem(&cifs_fs_type);
1155 out_destroy_request_bufs:
1156 cifs_destroy_request_bufs();
1158 cifs_destroy_mids();
1159 out_destroy_inodecache:
1160 cifs_destroy_inodecache();
1169 cFYI(DBG2, ("exit_cifs"));
1171 #ifdef CONFIG_CIFS_DFS_UPCALL
1172 cifs_dfs_release_automount_timer();
1173 unregister_key_type(&key_type_dns_resolver);
1175 #ifdef CONFIG_CIFS_UPCALL
1176 unregister_key_type(&cifs_spnego_key_type);
1178 unregister_filesystem(&cifs_fs_type);
1179 cifs_destroy_inodecache();
1180 cifs_destroy_mids();
1181 cifs_destroy_request_bufs();
1182 kthread_stop(oplockThread);
1183 kthread_stop(dnotifyThread);
1186 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1187 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1189 ("VFS to access servers complying with the SNIA CIFS Specification "
1190 "e.g. Samba and Windows");
1191 MODULE_VERSION(CIFS_VERSION);
1192 module_init(init_cifs)
1193 module_exit(exit_cifs)