4 * Copyright (C) International Business Machines Corp., 2002,2004
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
39 #define DECLARE_GLOBALS_HERE
41 #include "cifsproto.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
45 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
47 #ifdef CONFIG_CIFS_QUOTA
48 static struct quotactl_ops cifs_quotactl_ops;
54 unsigned int oplockEnabled = 1;
55 unsigned int experimEnabled = 0;
56 unsigned int linuxExtEnabled = 1;
57 unsigned int lookupCacheEnabled = 1;
58 unsigned int multiuser_mount = 0;
59 unsigned int extended_security = CIFSSEC_DEF;
60 /* unsigned int ntlmv2_support = 0; */
61 unsigned int sign_CIFS_PDUs = 1;
62 extern struct task_struct * oplockThread; /* remove sparse warning */
63 struct task_struct * oplockThread = NULL;
64 extern struct task_struct * dnotifyThread; /* remove sparse warning */
65 struct task_struct * dnotifyThread = NULL;
66 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
67 module_param(CIFSMaxBufSize, int, 0);
68 MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
69 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
70 module_param(cifs_min_rcv, int, 0);
71 MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
72 unsigned int cifs_min_small = 30;
73 module_param(cifs_min_small, int, 0);
74 MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
75 unsigned int cifs_max_pending = CIFS_MAX_REQ;
76 module_param(cifs_max_pending, int, 0);
77 MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
79 extern mempool_t *cifs_sm_req_poolp;
80 extern mempool_t *cifs_req_poolp;
81 extern mempool_t *cifs_mid_poolp;
83 extern kmem_cache_t *cifs_oplock_cachep;
86 cifs_read_super(struct super_block *sb, void *data,
87 const char *devname, int silent)
90 struct cifs_sb_info *cifs_sb;
93 sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
94 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
95 cifs_sb = CIFS_SB(sb);
99 rc = cifs_mount(sb, cifs_sb, data, devname);
104 ("cifs_mount failed w/return code = %d", rc));
105 goto out_mount_failed;
108 sb->s_magic = CIFS_MAGIC_NUMBER;
109 sb->s_op = &cifs_super_ops;
110 /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
111 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
112 #ifdef CONFIG_CIFS_QUOTA
113 sb->s_qcop = &cifs_quotactl_ops;
115 sb->s_blocksize = CIFS_MAX_MSGSIZE;
116 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
117 inode = iget(sb, ROOT_I);
124 sb->s_root = d_alloc_root(inode);
134 cERROR(1, ("cifs_read_super: get root inode failed"));
140 if(cifs_sb->local_nls)
141 unload_nls(cifs_sb->local_nls);
148 cifs_put_super(struct super_block *sb)
151 struct cifs_sb_info *cifs_sb;
153 cFYI(1, ("In cifs_put_super"));
154 cifs_sb = CIFS_SB(sb);
155 if(cifs_sb == NULL) {
156 cFYI(1,("Empty cifs superblock info passed to unmount"));
159 rc = cifs_umount(sb, cifs_sb);
161 cERROR(1, ("cifs_umount failed with return code %d", rc));
163 unload_nls(cifs_sb->local_nls);
169 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
171 struct super_block *sb = dentry->d_sb;
173 int rc = -EOPNOTSUPP;
174 struct cifs_sb_info *cifs_sb;
175 struct cifsTconInfo *pTcon;
179 cifs_sb = CIFS_SB(sb);
180 pTcon = cifs_sb->tcon;
182 buf->f_type = CIFS_MAGIC_NUMBER;
184 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
185 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
186 presumably be total path, but note
187 that some servers (includinng Samba 3)
188 have a shorter maximum path */
189 buf->f_files = 0; /* undefined */
190 buf->f_ffree = 0; /* unlimited */
192 /* BB we could add a second check for a QFS Unix capability bit */
193 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
194 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
195 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
196 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
198 /* Only need to call the old QFSInfo if failed
201 rc = CIFSSMBQFSInfo(xid, pTcon, buf);
203 /* Old Windows servers do not support level 103, retry with level
204 one if old server failed the previous call */
206 rc = SMBOldQFSInfo(xid, pTcon, buf);
211 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
213 return 0; /* always return success? what if volume is no
217 static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
219 struct cifs_sb_info *cifs_sb;
221 cifs_sb = CIFS_SB(inode->i_sb);
223 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
225 } else /* file mode might have been restricted at mount time
226 on the client (above and beyond ACL on servers) for
227 servers which do not support setting and viewing mode bits,
228 so allowing client to check permissions is useful */
229 return generic_permission(inode, mask, NULL);
232 static kmem_cache_t *cifs_inode_cachep;
233 static kmem_cache_t *cifs_req_cachep;
234 static kmem_cache_t *cifs_mid_cachep;
235 kmem_cache_t *cifs_oplock_cachep;
236 static kmem_cache_t *cifs_sm_req_cachep;
237 mempool_t *cifs_sm_req_poolp;
238 mempool_t *cifs_req_poolp;
239 mempool_t *cifs_mid_poolp;
241 static struct inode *
242 cifs_alloc_inode(struct super_block *sb)
244 struct cifsInodeInfo *cifs_inode;
245 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
248 cifs_inode->cifsAttrs = 0x20; /* default */
249 atomic_set(&cifs_inode->inUse, 0);
250 cifs_inode->time = 0;
251 /* Until the file is open and we have gotten oplock
252 info back from the server, can not assume caching of
253 file data or metadata */
254 cifs_inode->clientCanCacheRead = FALSE;
255 cifs_inode->clientCanCacheAll = FALSE;
256 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
257 cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;
258 INIT_LIST_HEAD(&cifs_inode->openFileList);
259 return &cifs_inode->vfs_inode;
263 cifs_destroy_inode(struct inode *inode)
265 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
269 * cifs_show_options() is for displaying mount options in /proc/mounts.
270 * Not all settable options are displayed but most of the important
274 cifs_show_options(struct seq_file *s, struct vfsmount *m)
276 struct cifs_sb_info *cifs_sb;
278 cifs_sb = CIFS_SB(m->mnt_sb);
282 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
283 if (cifs_sb->tcon->ses) {
284 if (cifs_sb->tcon->ses->userName)
285 seq_printf(s, ",username=%s",
286 cifs_sb->tcon->ses->userName);
287 if(cifs_sb->tcon->ses->domainName)
288 seq_printf(s, ",domain=%s",
289 cifs_sb->tcon->ses->domainName);
292 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
293 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
298 #ifdef CONFIG_CIFS_QUOTA
299 int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
300 struct fs_disk_quota * pdquota)
304 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
305 struct cifsTconInfo *pTcon;
308 pTcon = cifs_sb->tcon;
315 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
324 int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
325 struct fs_disk_quota * pdquota)
329 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
330 struct cifsTconInfo *pTcon;
333 pTcon = cifs_sb->tcon;
339 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
348 int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
352 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
353 struct cifsTconInfo *pTcon;
356 pTcon = cifs_sb->tcon;
362 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
371 int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
375 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
376 struct cifsTconInfo *pTcon;
379 pTcon = cifs_sb->tcon;
385 cFYI(1,("pqstats %p",qstats));
394 static struct quotactl_ops cifs_quotactl_ops = {
395 .set_xquota = cifs_xquota_set,
396 .get_xquota = cifs_xquota_set,
397 .set_xstate = cifs_xstate_set,
398 .get_xstate = cifs_xstate_get,
402 static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
404 struct cifs_sb_info *cifs_sb;
405 struct cifsTconInfo * tcon;
407 if (!(flags & MNT_FORCE))
409 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
413 tcon = cifs_sb->tcon;
416 down(&tcon->tconSem);
417 if (atomic_read(&tcon->useCount) == 1)
418 tcon->tidStatus = CifsExiting;
421 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
422 /* cancel_notify_requests(tcon); */
423 if(tcon->ses && tcon->ses->server)
425 cFYI(1,("wake up tasks now - umount begin not complete"));
426 wake_up_all(&tcon->ses->server->request_q);
427 wake_up_all(&tcon->ses->server->response_q);
428 msleep(1); /* yield */
429 /* we have to kick the requests once more */
430 wake_up_all(&tcon->ses->server->response_q);
433 /* BB FIXME - finish add checks for tidStatus BB */
438 static int cifs_remount(struct super_block *sb, int *flags, char *data)
440 *flags |= MS_NODIRATIME;
444 struct super_operations cifs_super_ops = {
445 .read_inode = cifs_read_inode,
446 .put_super = cifs_put_super,
447 .statfs = cifs_statfs,
448 .alloc_inode = cifs_alloc_inode,
449 .destroy_inode = cifs_destroy_inode,
450 /* .drop_inode = generic_delete_inode,
451 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
452 unless later we add lazy close of inodes or unless the kernel forgets to call
453 us with the same number of releases (closes) as opens */
454 .show_options = cifs_show_options,
455 .umount_begin = cifs_umount_begin,
456 .remount_fs = cifs_remount,
460 cifs_get_sb(struct file_system_type *fs_type,
461 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
464 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
466 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
473 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
475 up_write(&sb->s_umount);
476 deactivate_super(sb);
479 sb->s_flags |= MS_ACTIVE;
480 return simple_set_mnt(mnt, sb);
483 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
484 unsigned long nr_segs, loff_t pos)
486 struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
489 written = generic_file_aio_write(iocb, iov, nr_segs, pos);
490 if (!CIFS_I(inode)->clientCanCacheAll)
491 filemap_fdatawrite(inode->i_mapping);
495 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
497 /* origin == SEEK_END => we must revalidate the cached file length */
499 int retval = cifs_revalidate(file->f_dentry);
501 return (loff_t)retval;
503 return remote_llseek(file, offset, origin);
506 static struct file_system_type cifs_fs_type = {
507 .owner = THIS_MODULE,
509 .get_sb = cifs_get_sb,
510 .kill_sb = kill_anon_super,
513 struct inode_operations cifs_dir_inode_ops = {
514 .create = cifs_create,
515 .lookup = cifs_lookup,
516 .getattr = cifs_getattr,
517 .unlink = cifs_unlink,
518 .link = cifs_hardlink,
521 .rename = cifs_rename,
522 .permission = cifs_permission,
523 /* revalidate:cifs_revalidate, */
524 .setattr = cifs_setattr,
525 .symlink = cifs_symlink,
527 #ifdef CONFIG_CIFS_XATTR
528 .setxattr = cifs_setxattr,
529 .getxattr = cifs_getxattr,
530 .listxattr = cifs_listxattr,
531 .removexattr = cifs_removexattr,
535 struct inode_operations cifs_file_inode_ops = {
536 /* revalidate:cifs_revalidate, */
537 .setattr = cifs_setattr,
538 .getattr = cifs_getattr, /* do we need this anymore? */
539 .rename = cifs_rename,
540 .permission = cifs_permission,
541 #ifdef CONFIG_CIFS_XATTR
542 .setxattr = cifs_setxattr,
543 .getxattr = cifs_getxattr,
544 .listxattr = cifs_listxattr,
545 .removexattr = cifs_removexattr,
549 struct inode_operations cifs_symlink_inode_ops = {
550 .readlink = generic_readlink,
551 .follow_link = cifs_follow_link,
552 .put_link = cifs_put_link,
553 .permission = cifs_permission,
554 /* BB add the following two eventually */
555 /* revalidate: cifs_revalidate,
556 setattr: cifs_notify_change, *//* BB do we need notify change */
557 #ifdef CONFIG_CIFS_XATTR
558 .setxattr = cifs_setxattr,
559 .getxattr = cifs_getxattr,
560 .listxattr = cifs_listxattr,
561 .removexattr = cifs_removexattr,
565 const struct file_operations cifs_file_ops = {
566 .read = do_sync_read,
567 .write = do_sync_write,
568 .aio_read = generic_file_aio_read,
569 .aio_write = cifs_file_aio_write,
571 .release = cifs_close,
575 .mmap = cifs_file_mmap,
576 .sendfile = generic_file_sendfile,
577 .llseek = cifs_llseek,
578 #ifdef CONFIG_CIFS_POSIX
580 #endif /* CONFIG_CIFS_POSIX */
582 #ifdef CONFIG_CIFS_EXPERIMENTAL
583 .dir_notify = cifs_dir_notify,
584 #endif /* CONFIG_CIFS_EXPERIMENTAL */
587 const struct file_operations cifs_file_direct_ops = {
588 /* no mmap, no aio, no readv -
589 BB reevaluate whether they can be done with directio, no cache */
590 .read = cifs_user_read,
591 .write = cifs_user_write,
593 .release = cifs_close,
597 .sendfile = generic_file_sendfile, /* BB removeme BB */
598 #ifdef CONFIG_CIFS_POSIX
600 #endif /* CONFIG_CIFS_POSIX */
601 .llseek = cifs_llseek,
602 #ifdef CONFIG_CIFS_EXPERIMENTAL
603 .dir_notify = cifs_dir_notify,
604 #endif /* CONFIG_CIFS_EXPERIMENTAL */
606 const struct file_operations cifs_file_nobrl_ops = {
607 .read = do_sync_read,
608 .write = do_sync_write,
609 .aio_read = generic_file_aio_read,
610 .aio_write = cifs_file_aio_write,
612 .release = cifs_close,
615 .mmap = cifs_file_mmap,
616 .sendfile = generic_file_sendfile,
617 .llseek = cifs_llseek,
618 #ifdef CONFIG_CIFS_POSIX
620 #endif /* CONFIG_CIFS_POSIX */
622 #ifdef CONFIG_CIFS_EXPERIMENTAL
623 .dir_notify = cifs_dir_notify,
624 #endif /* CONFIG_CIFS_EXPERIMENTAL */
627 const struct file_operations cifs_file_direct_nobrl_ops = {
628 /* no mmap, no aio, no readv -
629 BB reevaluate whether they can be done with directio, no cache */
630 .read = cifs_user_read,
631 .write = cifs_user_write,
633 .release = cifs_close,
636 .sendfile = generic_file_sendfile, /* BB removeme BB */
637 #ifdef CONFIG_CIFS_POSIX
639 #endif /* CONFIG_CIFS_POSIX */
640 .llseek = cifs_llseek,
641 #ifdef CONFIG_CIFS_EXPERIMENTAL
642 .dir_notify = cifs_dir_notify,
643 #endif /* CONFIG_CIFS_EXPERIMENTAL */
646 const struct file_operations cifs_dir_ops = {
647 .readdir = cifs_readdir,
648 .release = cifs_closedir,
649 .read = generic_read_dir,
650 #ifdef CONFIG_CIFS_EXPERIMENTAL
651 .dir_notify = cifs_dir_notify,
652 #endif /* CONFIG_CIFS_EXPERIMENTAL */
657 cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
659 struct cifsInodeInfo *cifsi = inode;
661 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
662 SLAB_CTOR_CONSTRUCTOR) {
663 inode_init_once(&cifsi->vfs_inode);
664 INIT_LIST_HEAD(&cifsi->lockList);
669 cifs_init_inodecache(void)
671 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
672 sizeof (struct cifsInodeInfo),
673 0, (SLAB_RECLAIM_ACCOUNT|
675 cifs_init_once, NULL);
676 if (cifs_inode_cachep == NULL)
683 cifs_destroy_inodecache(void)
685 kmem_cache_destroy(cifs_inode_cachep);
689 cifs_init_request_bufs(void)
691 if(CIFSMaxBufSize < 8192) {
692 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
693 Unicode path name has to fit in any SMB/CIFS path based frames */
694 CIFSMaxBufSize = 8192;
695 } else if (CIFSMaxBufSize > 1024*127) {
696 CIFSMaxBufSize = 1024 * 127;
698 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
700 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
701 cifs_req_cachep = kmem_cache_create("cifs_request",
703 MAX_CIFS_HDR_SIZE, 0,
704 SLAB_HWCACHE_ALIGN, NULL, NULL);
705 if (cifs_req_cachep == NULL)
710 else if (cifs_min_rcv > 64) {
712 cERROR(1,("cifs_min_rcv set to maximum (64)"));
715 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
718 if(cifs_req_poolp == NULL) {
719 kmem_cache_destroy(cifs_req_cachep);
722 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
723 almost all handle based requests (but not write response, nor is it
724 sufficient for path based requests). A smaller size would have
725 been more efficient (compacting multiple slab items on one 4k page)
726 for the case in which debug was on, but this larger size allows
727 more SMBs to use small buffer alloc and is still much more
728 efficient to alloc 1 per page off the slab compared to 17K (5page)
729 alloc of large cifs buffers even when page debugging is on */
730 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
731 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
733 if (cifs_sm_req_cachep == NULL) {
734 mempool_destroy(cifs_req_poolp);
735 kmem_cache_destroy(cifs_req_cachep);
739 if(cifs_min_small < 2)
741 else if (cifs_min_small > 256) {
742 cifs_min_small = 256;
743 cFYI(1,("cifs_min_small set to maximum (256)"));
746 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
749 if(cifs_sm_req_poolp == NULL) {
750 mempool_destroy(cifs_req_poolp);
751 kmem_cache_destroy(cifs_req_cachep);
752 kmem_cache_destroy(cifs_sm_req_cachep);
760 cifs_destroy_request_bufs(void)
762 mempool_destroy(cifs_req_poolp);
763 kmem_cache_destroy(cifs_req_cachep);
764 mempool_destroy(cifs_sm_req_poolp);
765 kmem_cache_destroy(cifs_sm_req_cachep);
771 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
772 sizeof (struct mid_q_entry), 0,
773 SLAB_HWCACHE_ALIGN, NULL, NULL);
774 if (cifs_mid_cachep == NULL)
777 /* 3 is a reasonable minimum number of simultaneous operations */
778 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
779 if(cifs_mid_poolp == NULL) {
780 kmem_cache_destroy(cifs_mid_cachep);
784 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
785 sizeof (struct oplock_q_entry), 0,
786 SLAB_HWCACHE_ALIGN, NULL, NULL);
787 if (cifs_oplock_cachep == NULL) {
788 kmem_cache_destroy(cifs_mid_cachep);
789 mempool_destroy(cifs_mid_poolp);
797 cifs_destroy_mids(void)
799 mempool_destroy(cifs_mid_poolp);
800 kmem_cache_destroy(cifs_mid_cachep);
801 kmem_cache_destroy(cifs_oplock_cachep);
804 static int cifs_oplock_thread(void * dummyarg)
806 struct oplock_q_entry * oplock_item;
807 struct cifsTconInfo *pTcon;
808 struct inode * inode;
816 spin_lock(&GlobalMid_Lock);
817 if(list_empty(&GlobalOplock_Q)) {
818 spin_unlock(&GlobalMid_Lock);
819 set_current_state(TASK_INTERRUPTIBLE);
820 schedule_timeout(39*HZ);
822 oplock_item = list_entry(GlobalOplock_Q.next,
823 struct oplock_q_entry, qhead);
825 cFYI(1,("found oplock item to write out"));
826 pTcon = oplock_item->tcon;
827 inode = oplock_item->pinode;
828 netfid = oplock_item->netfid;
829 spin_unlock(&GlobalMid_Lock);
830 DeleteOplockQEntry(oplock_item);
831 /* can not grab inode sem here since it would
832 deadlock when oplock received on delete
833 since vfs_unlink holds the i_mutex across
835 /* mutex_lock(&inode->i_mutex);*/
836 if (S_ISREG(inode->i_mode)) {
837 rc = filemap_fdatawrite(inode->i_mapping);
838 if(CIFS_I(inode)->clientCanCacheRead == 0) {
839 filemap_fdatawait(inode->i_mapping);
840 invalidate_remote_inode(inode);
844 /* mutex_unlock(&inode->i_mutex);*/
846 CIFS_I(inode)->write_behind_rc = rc;
847 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
849 /* releasing a stale oplock after recent reconnection
850 of smb session using a now incorrect file
851 handle is not a data integrity issue but do
852 not bother sending an oplock release if session
853 to server still is disconnected since oplock
854 already released by the server in that case */
855 if(pTcon->tidStatus != CifsNeedReconnect) {
856 rc = CIFSSMBLock(0, pTcon, netfid,
857 0 /* len */ , 0 /* offset */, 0,
858 0, LOCKING_ANDX_OPLOCK_RELEASE,
860 cFYI(1,("Oplock release rc = %d ",rc));
863 spin_unlock(&GlobalMid_Lock);
864 set_current_state(TASK_INTERRUPTIBLE);
865 schedule_timeout(1); /* yield in case q were corrupt */
867 } while (!kthread_should_stop());
872 static int cifs_dnotify_thread(void * dummyarg)
874 struct list_head *tmp;
875 struct cifsSesInfo *ses;
880 set_current_state(TASK_INTERRUPTIBLE);
881 schedule_timeout(15*HZ);
882 read_lock(&GlobalSMBSeslock);
883 /* check if any stuck requests that need
884 to be woken up and wakeq so the
885 thread can wake up and error out */
886 list_for_each(tmp, &GlobalSMBSessionList) {
887 ses = list_entry(tmp, struct cifsSesInfo,
889 if(ses && ses->server &&
890 atomic_read(&ses->server->inFlight))
891 wake_up_all(&ses->server->response_q);
893 read_unlock(&GlobalSMBSeslock);
894 } while (!kthread_should_stop());
903 #ifdef CONFIG_PROC_FS
906 INIT_LIST_HEAD(&GlobalServerList); /* BB not implemented yet */
907 INIT_LIST_HEAD(&GlobalSMBSessionList);
908 INIT_LIST_HEAD(&GlobalTreeConnectionList);
909 INIT_LIST_HEAD(&GlobalOplock_Q);
910 #ifdef CONFIG_CIFS_EXPERIMENTAL
911 INIT_LIST_HEAD(&GlobalDnotifyReqList);
912 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
915 * Initialize Global counters
917 atomic_set(&sesInfoAllocCount, 0);
918 atomic_set(&tconInfoAllocCount, 0);
919 atomic_set(&tcpSesAllocCount,0);
920 atomic_set(&tcpSesReconnectCount, 0);
921 atomic_set(&tconInfoReconnectCount, 0);
923 atomic_set(&bufAllocCount, 0);
924 atomic_set(&smBufAllocCount, 0);
925 #ifdef CONFIG_CIFS_STATS2
926 atomic_set(&totBufAllocCount, 0);
927 atomic_set(&totSmBufAllocCount, 0);
928 #endif /* CONFIG_CIFS_STATS2 */
930 atomic_set(&midCount, 0);
931 GlobalCurrentXid = 0;
932 GlobalTotalActiveXid = 0;
933 GlobalMaxActiveXid = 0;
934 rwlock_init(&GlobalSMBSeslock);
935 spin_lock_init(&GlobalMid_Lock);
937 if(cifs_max_pending < 2) {
938 cifs_max_pending = 2;
939 cFYI(1,("cifs_max_pending set to min of 2"));
940 } else if(cifs_max_pending > 256) {
941 cifs_max_pending = 256;
942 cFYI(1,("cifs_max_pending set to max of 256"));
945 rc = cifs_init_inodecache();
949 rc = cifs_init_mids();
951 goto out_destroy_inodecache;
953 rc = cifs_init_request_bufs();
955 goto out_destroy_mids;
957 rc = register_filesystem(&cifs_fs_type);
959 goto out_destroy_request_bufs;
961 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
962 if (IS_ERR(oplockThread)) {
963 rc = PTR_ERR(oplockThread);
964 cERROR(1,("error %d create oplock thread", rc));
965 goto out_unregister_filesystem;
968 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
969 if (IS_ERR(dnotifyThread)) {
970 rc = PTR_ERR(dnotifyThread);
971 cERROR(1,("error %d create dnotify thread", rc));
972 goto out_stop_oplock_thread;
977 out_stop_oplock_thread:
978 kthread_stop(oplockThread);
979 out_unregister_filesystem:
980 unregister_filesystem(&cifs_fs_type);
981 out_destroy_request_bufs:
982 cifs_destroy_request_bufs();
985 out_destroy_inodecache:
986 cifs_destroy_inodecache();
988 #ifdef CONFIG_PROC_FS
997 cFYI(0, ("In unregister ie exit_cifs"));
998 #ifdef CONFIG_PROC_FS
1001 unregister_filesystem(&cifs_fs_type);
1002 cifs_destroy_inodecache();
1003 cifs_destroy_mids();
1004 cifs_destroy_request_bufs();
1005 kthread_stop(oplockThread);
1006 kthread_stop(dnotifyThread);
1009 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1010 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1012 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1013 MODULE_VERSION(CIFS_VERSION);
1014 module_init(init_cifs)
1015 module_exit(exit_cifs)