4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
40 #define DECLARE_GLOBALS_HERE
42 #include "cifsproto.h"
43 #include "cifs_debug.h"
44 #include "cifs_fs_sb.h"
46 #include <linux/key-type.h>
47 #include "dns_resolve.h"
48 #include "cifs_spnego.h"
49 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
51 #ifdef CONFIG_CIFS_QUOTA
52 static struct quotactl_ops cifs_quotactl_ops;
58 unsigned int oplockEnabled = 1;
59 unsigned int experimEnabled = 0;
60 unsigned int linuxExtEnabled = 1;
61 unsigned int lookupCacheEnabled = 1;
62 unsigned int multiuser_mount = 0;
63 unsigned int extended_security = CIFSSEC_DEF;
64 /* unsigned int ntlmv2_support = 0; */
65 unsigned int sign_CIFS_PDUs = 1;
66 extern struct task_struct *oplockThread; /* remove sparse warning */
67 struct task_struct *oplockThread = NULL;
68 /* extern struct task_struct * dnotifyThread; remove sparse warning */
69 static struct task_struct *dnotifyThread = NULL;
70 static const struct super_operations cifs_super_ops;
71 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
72 module_param(CIFSMaxBufSize, int, 0);
73 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header). "
74 "Default: 16384 Range: 8192 to 130048");
75 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
76 module_param(cifs_min_rcv, int, 0);
77 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
79 unsigned int cifs_min_small = 30;
80 module_param(cifs_min_small, int, 0);
81 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
83 unsigned int cifs_max_pending = CIFS_MAX_REQ;
84 module_param(cifs_max_pending, int, 0);
85 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
86 "Default: 50 Range: 2 to 256");
88 extern mempool_t *cifs_sm_req_poolp;
89 extern mempool_t *cifs_req_poolp;
90 extern mempool_t *cifs_mid_poolp;
92 extern struct kmem_cache *cifs_oplock_cachep;
95 cifs_read_super(struct super_block *sb, void *data,
96 const char *devname, int silent)
99 struct cifs_sb_info *cifs_sb;
102 /* BB should we make this contingent on mount parm? */
103 sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
104 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
105 cifs_sb = CIFS_SB(sb);
109 #ifdef CONFIG_CIFS_DFS_UPCALL
110 /* copy mount params to sb for use in submounts */
111 /* BB: should we move this after the mount so we
112 * do not have to do the copy on failed mounts?
113 * BB: May be it is better to do simple copy before
114 * complex operation (mount), and in case of fail
115 * just exit instead of doing mount and attempting
116 * undo it if this copy fails?*/
118 int len = strlen(data);
119 cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
120 if (cifs_sb->mountdata == NULL) {
121 kfree(sb->s_fs_info);
122 sb->s_fs_info = NULL;
125 strncpy(cifs_sb->mountdata, data, len + 1);
126 cifs_sb->mountdata[len] = '\0';
130 rc = cifs_mount(sb, cifs_sb, data, devname);
135 ("cifs_mount failed w/return code = %d", rc));
136 goto out_mount_failed;
139 sb->s_magic = CIFS_MAGIC_NUMBER;
140 sb->s_op = &cifs_super_ops;
141 /* if (cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
143 cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
144 #ifdef CONFIG_CIFS_QUOTA
145 sb->s_qcop = &cifs_quotactl_ops;
147 sb->s_blocksize = CIFS_MAX_MSGSIZE;
148 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
149 inode = cifs_iget(sb, ROOT_I);
157 sb->s_root = d_alloc_root(inode);
164 #ifdef CONFIG_CIFS_EXPERIMENTAL
165 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
166 cFYI(1, ("export ops supported"));
167 sb->s_export_op = &cifs_export_ops;
169 #endif /* EXPERIMENTAL */
174 cERROR(1, ("cifs_read_super: get root inode failed"));
178 cifs_umount(sb, cifs_sb);
182 #ifdef CONFIG_CIFS_DFS_UPCALL
183 if (cifs_sb->mountdata) {
184 kfree(cifs_sb->mountdata);
185 cifs_sb->mountdata = NULL;
188 if (cifs_sb->local_nls)
189 unload_nls(cifs_sb->local_nls);
196 cifs_put_super(struct super_block *sb)
199 struct cifs_sb_info *cifs_sb;
201 cFYI(1, ("In cifs_put_super"));
202 cifs_sb = CIFS_SB(sb);
203 if (cifs_sb == NULL) {
204 cFYI(1, ("Empty cifs superblock info passed to unmount"));
207 rc = cifs_umount(sb, cifs_sb);
209 cERROR(1, ("cifs_umount failed with return code %d", rc));
210 #ifdef CONFIG_CIFS_DFS_UPCALL
211 if (cifs_sb->mountdata) {
212 kfree(cifs_sb->mountdata);
213 cifs_sb->mountdata = NULL;
217 unload_nls(cifs_sb->local_nls);
223 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
225 struct super_block *sb = dentry->d_sb;
226 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
227 struct cifsTconInfo *tcon = cifs_sb->tcon;
228 int rc = -EOPNOTSUPP;
233 buf->f_type = CIFS_MAGIC_NUMBER;
236 * PATH_MAX may be too long - it would presumably be total path,
237 * but note that some servers (includinng Samba 3) have a shorter
240 * Instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO.
242 buf->f_namelen = PATH_MAX;
243 buf->f_files = 0; /* undefined */
244 buf->f_ffree = 0; /* unlimited */
247 * We could add a second check for a QFS Unix capability bit
249 if ((tcon->ses->capabilities & CAP_UNIX) &&
250 (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability)))
251 rc = CIFSSMBQFSPosixInfo(xid, tcon, buf);
254 * Only need to call the old QFSInfo if failed on newer one,
257 if (rc && (tcon->ses->capabilities & CAP_NT_SMBS))
258 rc = CIFSSMBQFSInfo(xid, tcon, buf);
261 * Some old Windows servers also do not support level 103, retry with
262 * older level one if old server failed the previous call or we
263 * bypassed it because we detected that this was an older LANMAN sess
266 rc = SMBOldQFSInfo(xid, tcon, buf);
272 static int cifs_permission(struct inode *inode, int mask)
274 struct cifs_sb_info *cifs_sb;
276 cifs_sb = CIFS_SB(inode->i_sb);
278 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
279 if ((mask & MAY_EXEC) && !execute_ok(inode))
283 } else /* file mode might have been restricted at mount time
284 on the client (above and beyond ACL on servers) for
285 servers which do not support setting and viewing mode bits,
286 so allowing client to check permissions is useful */
287 return generic_permission(inode, mask, NULL);
290 static struct kmem_cache *cifs_inode_cachep;
291 static struct kmem_cache *cifs_req_cachep;
292 static struct kmem_cache *cifs_mid_cachep;
293 struct kmem_cache *cifs_oplock_cachep;
294 static struct kmem_cache *cifs_sm_req_cachep;
295 mempool_t *cifs_sm_req_poolp;
296 mempool_t *cifs_req_poolp;
297 mempool_t *cifs_mid_poolp;
299 static struct inode *
300 cifs_alloc_inode(struct super_block *sb)
302 struct cifsInodeInfo *cifs_inode;
303 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
306 cifs_inode->cifsAttrs = 0x20; /* default */
307 atomic_set(&cifs_inode->inUse, 0);
308 cifs_inode->time = 0;
309 cifs_inode->write_behind_rc = 0;
310 /* Until the file is open and we have gotten oplock
311 info back from the server, can not assume caching of
312 file data or metadata */
313 cifs_inode->clientCanCacheRead = false;
314 cifs_inode->clientCanCacheAll = false;
315 cifs_inode->delete_pending = false;
316 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
318 /* Can not set i_flags here - they get immediately overwritten
319 to zero by the VFS */
320 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;*/
321 INIT_LIST_HEAD(&cifs_inode->openFileList);
322 return &cifs_inode->vfs_inode;
326 cifs_destroy_inode(struct inode *inode)
328 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
332 * cifs_show_options() is for displaying mount options in /proc/mounts.
333 * Not all settable options are displayed but most of the important
337 cifs_show_options(struct seq_file *s, struct vfsmount *m)
339 struct cifs_sb_info *cifs_sb;
341 cifs_sb = CIFS_SB(m->mnt_sb);
345 /* BB add prepath to mount options displayed */
346 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
347 if (cifs_sb->tcon->ses) {
348 if (cifs_sb->tcon->ses->userName)
349 seq_printf(s, ",username=%s",
350 cifs_sb->tcon->ses->userName);
351 if (cifs_sb->tcon->ses->domainName)
352 seq_printf(s, ",domain=%s",
353 cifs_sb->tcon->ses->domainName);
355 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) ||
356 !(cifs_sb->tcon->unix_ext))
357 seq_printf(s, ",uid=%d", cifs_sb->mnt_uid);
358 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
359 !(cifs_sb->tcon->unix_ext))
360 seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
361 if (!cifs_sb->tcon->unix_ext) {
362 seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
363 cifs_sb->mnt_file_mode,
364 cifs_sb->mnt_dir_mode);
366 if (cifs_sb->tcon->seal)
367 seq_printf(s, ",seal");
368 if (cifs_sb->tcon->nocase)
369 seq_printf(s, ",nocase");
370 if (cifs_sb->tcon->retry)
371 seq_printf(s, ",hard");
373 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
374 seq_printf(s, ",posixpaths");
375 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
376 seq_printf(s, ",setuids");
377 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
378 seq_printf(s, ",serverino");
379 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
380 seq_printf(s, ",directio");
381 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
382 seq_printf(s, ",nouser_xattr");
383 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
384 seq_printf(s, ",mapchars");
385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
386 seq_printf(s, ",sfu");
387 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
388 seq_printf(s, ",nobrl");
389 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
390 seq_printf(s, ",cifsacl");
391 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
392 seq_printf(s, ",dynperm");
393 if (m->mnt_sb->s_flags & MS_POSIXACL)
394 seq_printf(s, ",acl");
396 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
397 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
402 #ifdef CONFIG_CIFS_QUOTA
403 int cifs_xquota_set(struct super_block *sb, int quota_type, qid_t qid,
404 struct fs_disk_quota *pdquota)
408 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
409 struct cifsTconInfo *pTcon;
412 pTcon = cifs_sb->tcon;
419 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
428 int cifs_xquota_get(struct super_block *sb, int quota_type, qid_t qid,
429 struct fs_disk_quota *pdquota)
433 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
434 struct cifsTconInfo *pTcon;
437 pTcon = cifs_sb->tcon;
443 cFYI(1, ("set type: 0x%x id: %d", quota_type, qid));
452 int cifs_xstate_set(struct super_block *sb, unsigned int flags, int operation)
456 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
457 struct cifsTconInfo *pTcon;
460 pTcon = cifs_sb->tcon;
466 cFYI(1, ("flags: 0x%x operation: 0x%x", flags, operation));
475 int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
479 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
480 struct cifsTconInfo *pTcon;
483 pTcon = cifs_sb->tcon;
489 cFYI(1, ("pqstats %p", qstats));
498 static struct quotactl_ops cifs_quotactl_ops = {
499 .set_xquota = cifs_xquota_set,
500 .get_xquota = cifs_xquota_get,
501 .set_xstate = cifs_xstate_set,
502 .get_xstate = cifs_xstate_get,
506 static void cifs_umount_begin(struct super_block *sb)
508 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
509 struct cifsTconInfo *tcon;
514 tcon = cifs_sb->tcon;
518 read_lock(&cifs_tcp_ses_lock);
519 if (tcon->tc_count == 1)
520 tcon->tidStatus = CifsExiting;
521 read_unlock(&cifs_tcp_ses_lock);
523 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
524 /* cancel_notify_requests(tcon); */
525 if (tcon->ses && tcon->ses->server) {
526 cFYI(1, ("wake up tasks now - umount begin not complete"));
527 wake_up_all(&tcon->ses->server->request_q);
528 wake_up_all(&tcon->ses->server->response_q);
529 msleep(1); /* yield */
530 /* we have to kick the requests once more */
531 wake_up_all(&tcon->ses->server->response_q);
534 /* BB FIXME - finish add checks for tidStatus BB */
539 #ifdef CONFIG_CIFS_STATS2
540 static int cifs_show_stats(struct seq_file *s, struct vfsmount *mnt)
547 static int cifs_remount(struct super_block *sb, int *flags, char *data)
549 *flags |= MS_NODIRATIME;
553 static const struct super_operations cifs_super_ops = {
554 .put_super = cifs_put_super,
555 .statfs = cifs_statfs,
556 .alloc_inode = cifs_alloc_inode,
557 .destroy_inode = cifs_destroy_inode,
558 /* .drop_inode = generic_delete_inode,
559 .delete_inode = cifs_delete_inode, */ /* Do not need above two
560 functions unless later we add lazy close of inodes or unless the
561 kernel forgets to call us with the same number of releases (closes)
563 .show_options = cifs_show_options,
564 .umount_begin = cifs_umount_begin,
565 .remount_fs = cifs_remount,
566 #ifdef CONFIG_CIFS_STATS2
567 .show_stats = cifs_show_stats,
572 cifs_get_sb(struct file_system_type *fs_type,
573 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
576 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
578 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
585 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
587 up_write(&sb->s_umount);
588 deactivate_super(sb);
591 sb->s_flags |= MS_ACTIVE;
592 return simple_set_mnt(mnt, sb);
595 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
596 unsigned long nr_segs, loff_t pos)
598 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
601 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
602 if (!CIFS_I(inode)->clientCanCacheAll)
603 filemap_fdatawrite(inode->i_mapping);
607 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
609 /* origin == SEEK_END => we must revalidate the cached file length */
610 if (origin == SEEK_END) {
613 /* some applications poll for the file length in this strange
614 way so we must seek to end on non-oplocked files by
615 setting the revalidate time to zero */
616 CIFS_I(file->f_path.dentry->d_inode)->time = 0;
618 retval = cifs_revalidate(file->f_path.dentry);
620 return (loff_t)retval;
622 return generic_file_llseek_unlocked(file, offset, origin);
625 #ifdef CONFIG_CIFS_EXPERIMENTAL
626 static int cifs_setlease(struct file *file, long arg, struct file_lock **lease)
628 /* note that this is called by vfs setlease with the BKL held
629 although I doubt that BKL is needed here in cifs */
630 struct inode *inode = file->f_path.dentry->d_inode;
632 if (!(S_ISREG(inode->i_mode)))
635 /* check if file is oplocked */
636 if (((arg == F_RDLCK) &&
637 (CIFS_I(inode)->clientCanCacheRead)) ||
639 (CIFS_I(inode)->clientCanCacheAll)))
640 return generic_setlease(file, arg, lease);
641 else if (CIFS_SB(inode->i_sb)->tcon->local_lease &&
642 !CIFS_I(inode)->clientCanCacheRead)
643 /* If the server claims to support oplock on this
644 file, then we still need to check oplock even
645 if the local_lease mount option is set, but there
646 are servers which do not support oplock for which
647 this mount option may be useful if the user
648 knows that the file won't be changed on the server
650 return generic_setlease(file, arg, lease);
656 struct file_system_type cifs_fs_type = {
657 .owner = THIS_MODULE,
659 .get_sb = cifs_get_sb,
660 .kill_sb = kill_anon_super,
663 const struct inode_operations cifs_dir_inode_ops = {
664 .create = cifs_create,
665 .lookup = cifs_lookup,
666 .getattr = cifs_getattr,
667 .unlink = cifs_unlink,
668 .link = cifs_hardlink,
671 .rename = cifs_rename,
672 .permission = cifs_permission,
673 /* revalidate:cifs_revalidate, */
674 .setattr = cifs_setattr,
675 .symlink = cifs_symlink,
677 #ifdef CONFIG_CIFS_XATTR
678 .setxattr = cifs_setxattr,
679 .getxattr = cifs_getxattr,
680 .listxattr = cifs_listxattr,
681 .removexattr = cifs_removexattr,
685 const struct inode_operations cifs_file_inode_ops = {
686 /* revalidate:cifs_revalidate, */
687 .setattr = cifs_setattr,
688 .getattr = cifs_getattr, /* do we need this anymore? */
689 .rename = cifs_rename,
690 .permission = cifs_permission,
691 #ifdef CONFIG_CIFS_XATTR
692 .setxattr = cifs_setxattr,
693 .getxattr = cifs_getxattr,
694 .listxattr = cifs_listxattr,
695 .removexattr = cifs_removexattr,
699 const struct inode_operations cifs_symlink_inode_ops = {
700 .readlink = generic_readlink,
701 .follow_link = cifs_follow_link,
702 .put_link = cifs_put_link,
703 .permission = cifs_permission,
704 /* BB add the following two eventually */
705 /* revalidate: cifs_revalidate,
706 setattr: cifs_notify_change, *//* BB do we need notify change */
707 #ifdef CONFIG_CIFS_XATTR
708 .setxattr = cifs_setxattr,
709 .getxattr = cifs_getxattr,
710 .listxattr = cifs_listxattr,
711 .removexattr = cifs_removexattr,
715 const struct file_operations cifs_file_ops = {
716 .read = do_sync_read,
717 .write = do_sync_write,
718 .aio_read = generic_file_aio_read,
719 .aio_write = cifs_file_aio_write,
721 .release = cifs_close,
725 .mmap = cifs_file_mmap,
726 .splice_read = generic_file_splice_read,
727 .llseek = cifs_llseek,
728 #ifdef CONFIG_CIFS_POSIX
729 .unlocked_ioctl = cifs_ioctl,
730 #endif /* CONFIG_CIFS_POSIX */
732 #ifdef CONFIG_CIFS_EXPERIMENTAL
733 .dir_notify = cifs_dir_notify,
734 .setlease = cifs_setlease,
735 #endif /* CONFIG_CIFS_EXPERIMENTAL */
738 const struct file_operations cifs_file_direct_ops = {
739 /* no mmap, no aio, no readv -
740 BB reevaluate whether they can be done with directio, no cache */
741 .read = cifs_user_read,
742 .write = cifs_user_write,
744 .release = cifs_close,
748 .splice_read = generic_file_splice_read,
749 #ifdef CONFIG_CIFS_POSIX
750 .unlocked_ioctl = cifs_ioctl,
751 #endif /* CONFIG_CIFS_POSIX */
752 .llseek = cifs_llseek,
753 #ifdef CONFIG_CIFS_EXPERIMENTAL
754 .dir_notify = cifs_dir_notify,
755 .setlease = cifs_setlease,
756 #endif /* CONFIG_CIFS_EXPERIMENTAL */
758 const struct file_operations cifs_file_nobrl_ops = {
759 .read = do_sync_read,
760 .write = do_sync_write,
761 .aio_read = generic_file_aio_read,
762 .aio_write = cifs_file_aio_write,
764 .release = cifs_close,
767 .mmap = cifs_file_mmap,
768 .splice_read = generic_file_splice_read,
769 .llseek = cifs_llseek,
770 #ifdef CONFIG_CIFS_POSIX
771 .unlocked_ioctl = cifs_ioctl,
772 #endif /* CONFIG_CIFS_POSIX */
774 #ifdef CONFIG_CIFS_EXPERIMENTAL
775 .dir_notify = cifs_dir_notify,
776 .setlease = cifs_setlease,
777 #endif /* CONFIG_CIFS_EXPERIMENTAL */
780 const struct file_operations cifs_file_direct_nobrl_ops = {
781 /* no mmap, no aio, no readv -
782 BB reevaluate whether they can be done with directio, no cache */
783 .read = cifs_user_read,
784 .write = cifs_user_write,
786 .release = cifs_close,
789 .splice_read = generic_file_splice_read,
790 #ifdef CONFIG_CIFS_POSIX
791 .unlocked_ioctl = cifs_ioctl,
792 #endif /* CONFIG_CIFS_POSIX */
793 .llseek = cifs_llseek,
794 #ifdef CONFIG_CIFS_EXPERIMENTAL
795 .dir_notify = cifs_dir_notify,
796 .setlease = cifs_setlease,
797 #endif /* CONFIG_CIFS_EXPERIMENTAL */
800 const struct file_operations cifs_dir_ops = {
801 .readdir = cifs_readdir,
802 .release = cifs_closedir,
803 .read = generic_read_dir,
804 #ifdef CONFIG_CIFS_EXPERIMENTAL
805 .dir_notify = cifs_dir_notify,
806 #endif /* CONFIG_CIFS_EXPERIMENTAL */
807 .unlocked_ioctl = cifs_ioctl,
808 .llseek = generic_file_llseek,
812 cifs_init_once(void *inode)
814 struct cifsInodeInfo *cifsi = inode;
816 inode_init_once(&cifsi->vfs_inode);
817 INIT_LIST_HEAD(&cifsi->lockList);
821 cifs_init_inodecache(void)
823 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
824 sizeof(struct cifsInodeInfo),
825 0, (SLAB_RECLAIM_ACCOUNT|
828 if (cifs_inode_cachep == NULL)
835 cifs_destroy_inodecache(void)
837 kmem_cache_destroy(cifs_inode_cachep);
841 cifs_init_request_bufs(void)
843 if (CIFSMaxBufSize < 8192) {
844 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
845 Unicode path name has to fit in any SMB/CIFS path based frames */
846 CIFSMaxBufSize = 8192;
847 } else if (CIFSMaxBufSize > 1024*127) {
848 CIFSMaxBufSize = 1024 * 127;
850 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
852 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
853 cifs_req_cachep = kmem_cache_create("cifs_request",
855 MAX_CIFS_HDR_SIZE, 0,
856 SLAB_HWCACHE_ALIGN, NULL);
857 if (cifs_req_cachep == NULL)
860 if (cifs_min_rcv < 1)
862 else if (cifs_min_rcv > 64) {
864 cERROR(1, ("cifs_min_rcv set to maximum (64)"));
867 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
870 if (cifs_req_poolp == NULL) {
871 kmem_cache_destroy(cifs_req_cachep);
874 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
875 almost all handle based requests (but not write response, nor is it
876 sufficient for path based requests). A smaller size would have
877 been more efficient (compacting multiple slab items on one 4k page)
878 for the case in which debug was on, but this larger size allows
879 more SMBs to use small buffer alloc and is still much more
880 efficient to alloc 1 per page off the slab compared to 17K (5page)
881 alloc of large cifs buffers even when page debugging is on */
882 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
883 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
885 if (cifs_sm_req_cachep == NULL) {
886 mempool_destroy(cifs_req_poolp);
887 kmem_cache_destroy(cifs_req_cachep);
891 if (cifs_min_small < 2)
893 else if (cifs_min_small > 256) {
894 cifs_min_small = 256;
895 cFYI(1, ("cifs_min_small set to maximum (256)"));
898 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
901 if (cifs_sm_req_poolp == NULL) {
902 mempool_destroy(cifs_req_poolp);
903 kmem_cache_destroy(cifs_req_cachep);
904 kmem_cache_destroy(cifs_sm_req_cachep);
912 cifs_destroy_request_bufs(void)
914 mempool_destroy(cifs_req_poolp);
915 kmem_cache_destroy(cifs_req_cachep);
916 mempool_destroy(cifs_sm_req_poolp);
917 kmem_cache_destroy(cifs_sm_req_cachep);
923 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
924 sizeof(struct mid_q_entry), 0,
925 SLAB_HWCACHE_ALIGN, NULL);
926 if (cifs_mid_cachep == NULL)
929 /* 3 is a reasonable minimum number of simultaneous operations */
930 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
931 if (cifs_mid_poolp == NULL) {
932 kmem_cache_destroy(cifs_mid_cachep);
936 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
937 sizeof(struct oplock_q_entry), 0,
938 SLAB_HWCACHE_ALIGN, NULL);
939 if (cifs_oplock_cachep == NULL) {
940 mempool_destroy(cifs_mid_poolp);
941 kmem_cache_destroy(cifs_mid_cachep);
949 cifs_destroy_mids(void)
951 mempool_destroy(cifs_mid_poolp);
952 kmem_cache_destroy(cifs_mid_cachep);
953 kmem_cache_destroy(cifs_oplock_cachep);
956 static int cifs_oplock_thread(void *dummyarg)
958 struct oplock_q_entry *oplock_item;
959 struct cifsTconInfo *pTcon;
969 spin_lock(&GlobalMid_Lock);
970 if (list_empty(&GlobalOplock_Q)) {
971 spin_unlock(&GlobalMid_Lock);
972 set_current_state(TASK_INTERRUPTIBLE);
973 schedule_timeout(39*HZ);
975 oplock_item = list_entry(GlobalOplock_Q.next,
976 struct oplock_q_entry, qhead);
977 cFYI(1, ("found oplock item to write out"));
978 pTcon = oplock_item->tcon;
979 inode = oplock_item->pinode;
980 netfid = oplock_item->netfid;
981 spin_unlock(&GlobalMid_Lock);
982 DeleteOplockQEntry(oplock_item);
983 /* can not grab inode sem here since it would
984 deadlock when oplock received on delete
985 since vfs_unlink holds the i_mutex across
987 /* mutex_lock(&inode->i_mutex);*/
988 if (S_ISREG(inode->i_mode)) {
989 #ifdef CONFIG_CIFS_EXPERIMENTAL
990 if (CIFS_I(inode)->clientCanCacheAll == 0)
991 break_lease(inode, FMODE_READ);
992 else if (CIFS_I(inode)->clientCanCacheRead == 0)
993 break_lease(inode, FMODE_WRITE);
995 rc = filemap_fdatawrite(inode->i_mapping);
996 if (CIFS_I(inode)->clientCanCacheRead == 0) {
997 waitrc = filemap_fdatawait(
999 invalidate_remote_inode(inode);
1005 /* mutex_unlock(&inode->i_mutex);*/
1007 CIFS_I(inode)->write_behind_rc = rc;
1008 cFYI(1, ("Oplock flush inode %p rc %d",
1011 /* releasing stale oplock after recent reconnect
1012 of smb session using a now incorrect file
1013 handle is not a data integrity issue but do
1014 not bother sending an oplock release if session
1015 to server still is disconnected since oplock
1016 already released by the server in that case */
1017 if (!pTcon->need_reconnect) {
1018 rc = CIFSSMBLock(0, pTcon, netfid,
1019 0 /* len */ , 0 /* offset */, 0,
1020 0, LOCKING_ANDX_OPLOCK_RELEASE,
1021 false /* wait flag */);
1022 cFYI(1, ("Oplock release rc = %d", rc));
1024 set_current_state(TASK_INTERRUPTIBLE);
1025 schedule_timeout(1); /* yield in case q were corrupt */
1027 } while (!kthread_should_stop());
1032 static int cifs_dnotify_thread(void *dummyarg)
1034 struct list_head *tmp;
1035 struct TCP_Server_Info *server;
1038 if (try_to_freeze())
1040 set_current_state(TASK_INTERRUPTIBLE);
1041 schedule_timeout(15*HZ);
1042 /* check if any stuck requests that need
1043 to be woken up and wakeq so the
1044 thread can wake up and error out */
1045 read_lock(&cifs_tcp_ses_lock);
1046 list_for_each(tmp, &cifs_tcp_ses_list) {
1047 server = list_entry(tmp, struct TCP_Server_Info,
1049 if (atomic_read(&server->inFlight))
1050 wake_up_all(&server->response_q);
1052 read_unlock(&cifs_tcp_ses_lock);
1053 } while (!kthread_should_stop());
1063 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1064 INIT_LIST_HEAD(&GlobalOplock_Q);
1065 #ifdef CONFIG_CIFS_EXPERIMENTAL
1066 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1067 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1070 * Initialize Global counters
1072 atomic_set(&sesInfoAllocCount, 0);
1073 atomic_set(&tconInfoAllocCount, 0);
1074 atomic_set(&tcpSesAllocCount, 0);
1075 atomic_set(&tcpSesReconnectCount, 0);
1076 atomic_set(&tconInfoReconnectCount, 0);
1078 atomic_set(&bufAllocCount, 0);
1079 atomic_set(&smBufAllocCount, 0);
1080 #ifdef CONFIG_CIFS_STATS2
1081 atomic_set(&totBufAllocCount, 0);
1082 atomic_set(&totSmBufAllocCount, 0);
1083 #endif /* CONFIG_CIFS_STATS2 */
1085 atomic_set(&midCount, 0);
1086 GlobalCurrentXid = 0;
1087 GlobalTotalActiveXid = 0;
1088 GlobalMaxActiveXid = 0;
1089 memset(Local_System_Name, 0, 15);
1090 rwlock_init(&GlobalSMBSeslock);
1091 rwlock_init(&cifs_tcp_ses_lock);
1092 spin_lock_init(&GlobalMid_Lock);
1094 if (cifs_max_pending < 2) {
1095 cifs_max_pending = 2;
1096 cFYI(1, ("cifs_max_pending set to min of 2"));
1097 } else if (cifs_max_pending > 256) {
1098 cifs_max_pending = 256;
1099 cFYI(1, ("cifs_max_pending set to max of 256"));
1102 rc = cifs_init_inodecache();
1104 goto out_clean_proc;
1106 rc = cifs_init_mids();
1108 goto out_destroy_inodecache;
1110 rc = cifs_init_request_bufs();
1112 goto out_destroy_mids;
1114 rc = register_filesystem(&cifs_fs_type);
1116 goto out_destroy_request_bufs;
1117 #ifdef CONFIG_CIFS_UPCALL
1118 rc = register_key_type(&cifs_spnego_key_type);
1120 goto out_unregister_filesystem;
1122 #ifdef CONFIG_CIFS_DFS_UPCALL
1123 rc = register_key_type(&key_type_dns_resolver);
1125 goto out_unregister_key_type;
1127 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
1128 if (IS_ERR(oplockThread)) {
1129 rc = PTR_ERR(oplockThread);
1130 cERROR(1, ("error %d create oplock thread", rc));
1131 goto out_unregister_dfs_key_type;
1134 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
1135 if (IS_ERR(dnotifyThread)) {
1136 rc = PTR_ERR(dnotifyThread);
1137 cERROR(1, ("error %d create dnotify thread", rc));
1138 goto out_stop_oplock_thread;
1143 out_stop_oplock_thread:
1144 kthread_stop(oplockThread);
1145 out_unregister_dfs_key_type:
1146 #ifdef CONFIG_CIFS_DFS_UPCALL
1147 unregister_key_type(&key_type_dns_resolver);
1148 out_unregister_key_type:
1150 #ifdef CONFIG_CIFS_UPCALL
1151 unregister_key_type(&cifs_spnego_key_type);
1152 out_unregister_filesystem:
1154 unregister_filesystem(&cifs_fs_type);
1155 out_destroy_request_bufs:
1156 cifs_destroy_request_bufs();
1158 cifs_destroy_mids();
1159 out_destroy_inodecache:
1160 cifs_destroy_inodecache();
1169 cFYI(DBG2, ("exit_cifs"));
1171 #ifdef CONFIG_CIFS_DFS_UPCALL
1172 cifs_dfs_release_automount_timer();
1173 unregister_key_type(&key_type_dns_resolver);
1175 #ifdef CONFIG_CIFS_UPCALL
1176 unregister_key_type(&cifs_spnego_key_type);
1178 unregister_filesystem(&cifs_fs_type);
1179 cifs_destroy_inodecache();
1180 cifs_destroy_mids();
1181 cifs_destroy_request_bufs();
1182 kthread_stop(oplockThread);
1183 kthread_stop(dnotifyThread);
1186 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1187 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1189 ("VFS to access servers complying with the SNIA CIFS Specification "
1190 "e.g. Samba and Windows");
1191 MODULE_VERSION(CIFS_VERSION);
1192 module_init(init_cifs)
1193 module_exit(exit_cifs)