4 * Copyright (C) International Business Machines Corp., 2002,2004
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
39 #define DECLARE_GLOBALS_HERE
41 #include "cifsproto.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
45 #define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
47 #ifdef CONFIG_CIFS_QUOTA
48 static struct quotactl_ops cifs_quotactl_ops;
54 unsigned int oplockEnabled = 1;
55 unsigned int experimEnabled = 0;
56 unsigned int linuxExtEnabled = 1;
57 unsigned int lookupCacheEnabled = 1;
58 unsigned int multiuser_mount = 0;
59 unsigned int extended_security = CIFSSEC_DEF;
60 /* unsigned int ntlmv2_support = 0; */
61 unsigned int sign_CIFS_PDUs = 1;
62 extern struct task_struct * oplockThread; /* remove sparse warning */
63 struct task_struct * oplockThread = NULL;
64 extern struct task_struct * dnotifyThread; /* remove sparse warning */
65 struct task_struct * dnotifyThread = NULL;
66 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
67 module_param(CIFSMaxBufSize, int, 0);
68 MODULE_PARM_DESC(CIFSMaxBufSize,"Network buffer size (not including header). Default: 16384 Range: 8192 to 130048");
69 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
70 module_param(cifs_min_rcv, int, 0);
71 MODULE_PARM_DESC(cifs_min_rcv,"Network buffers in pool. Default: 4 Range: 1 to 64");
72 unsigned int cifs_min_small = 30;
73 module_param(cifs_min_small, int, 0);
74 MODULE_PARM_DESC(cifs_min_small,"Small network buffers in pool. Default: 30 Range: 2 to 256");
75 unsigned int cifs_max_pending = CIFS_MAX_REQ;
76 module_param(cifs_max_pending, int, 0);
77 MODULE_PARM_DESC(cifs_max_pending,"Simultaneous requests to server. Default: 50 Range: 2 to 256");
79 extern mempool_t *cifs_sm_req_poolp;
80 extern mempool_t *cifs_req_poolp;
81 extern mempool_t *cifs_mid_poolp;
83 extern kmem_cache_t *cifs_oplock_cachep;
86 cifs_read_super(struct super_block *sb, void *data,
87 const char *devname, int silent)
90 struct cifs_sb_info *cifs_sb;
93 sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
94 sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
95 cifs_sb = CIFS_SB(sb);
99 rc = cifs_mount(sb, cifs_sb, data, devname);
104 ("cifs_mount failed w/return code = %d", rc));
105 goto out_mount_failed;
108 sb->s_magic = CIFS_MAGIC_NUMBER;
109 sb->s_op = &cifs_super_ops;
110 /* if(cifs_sb->tcon->ses->server->maxBuf > MAX_CIFS_HDR_SIZE + 512)
111 sb->s_blocksize = cifs_sb->tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE; */
112 #ifdef CONFIG_CIFS_QUOTA
113 sb->s_qcop = &cifs_quotactl_ops;
115 sb->s_blocksize = CIFS_MAX_MSGSIZE;
116 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
117 inode = iget(sb, ROOT_I);
124 sb->s_root = d_alloc_root(inode);
134 cERROR(1, ("cifs_read_super: get root inode failed"));
140 if(cifs_sb->local_nls)
141 unload_nls(cifs_sb->local_nls);
148 cifs_put_super(struct super_block *sb)
151 struct cifs_sb_info *cifs_sb;
153 cFYI(1, ("In cifs_put_super"));
154 cifs_sb = CIFS_SB(sb);
155 if(cifs_sb == NULL) {
156 cFYI(1,("Empty cifs superblock info passed to unmount"));
159 rc = cifs_umount(sb, cifs_sb);
161 cERROR(1, ("cifs_umount failed with return code %d", rc));
163 unload_nls(cifs_sb->local_nls);
169 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
171 struct super_block *sb = dentry->d_sb;
173 int rc = -EOPNOTSUPP;
174 struct cifs_sb_info *cifs_sb;
175 struct cifsTconInfo *pTcon;
179 cifs_sb = CIFS_SB(sb);
180 pTcon = cifs_sb->tcon;
182 buf->f_type = CIFS_MAGIC_NUMBER;
184 /* instead could get the real value via SMB_QUERY_FS_ATTRIBUTE_INFO */
185 buf->f_namelen = PATH_MAX; /* PATH_MAX may be too long - it would
186 presumably be total path, but note
187 that some servers (includinng Samba 3)
188 have a shorter maximum path */
189 buf->f_files = 0; /* undefined */
190 buf->f_ffree = 0; /* unlimited */
192 /* BB we could add a second check for a QFS Unix capability bit */
193 /* BB FIXME check CIFS_POSIX_EXTENSIONS Unix cap first FIXME BB */
194 if ((pTcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS &
195 le64_to_cpu(pTcon->fsUnixInfo.Capability)))
196 rc = CIFSSMBQFSPosixInfo(xid, pTcon, buf);
198 /* Only need to call the old QFSInfo if failed
201 rc = CIFSSMBQFSInfo(xid, pTcon, buf);
203 /* Old Windows servers do not support level 103, retry with level
204 one if old server failed the previous call */
206 rc = SMBOldQFSInfo(xid, pTcon, buf);
211 /* BB get from info in tcon struct at mount time call to QFSAttrInfo */
213 return 0; /* always return success? what if volume is no
217 static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
219 struct cifs_sb_info *cifs_sb;
221 cifs_sb = CIFS_SB(inode->i_sb);
223 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
225 } else /* file mode might have been restricted at mount time
226 on the client (above and beyond ACL on servers) for
227 servers which do not support setting and viewing mode bits,
228 so allowing client to check permissions is useful */
229 return generic_permission(inode, mask, NULL);
232 static kmem_cache_t *cifs_inode_cachep;
233 static kmem_cache_t *cifs_req_cachep;
234 static kmem_cache_t *cifs_mid_cachep;
235 kmem_cache_t *cifs_oplock_cachep;
236 static kmem_cache_t *cifs_sm_req_cachep;
237 mempool_t *cifs_sm_req_poolp;
238 mempool_t *cifs_req_poolp;
239 mempool_t *cifs_mid_poolp;
241 static struct inode *
242 cifs_alloc_inode(struct super_block *sb)
244 struct cifsInodeInfo *cifs_inode;
245 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, SLAB_KERNEL);
248 cifs_inode->cifsAttrs = 0x20; /* default */
249 atomic_set(&cifs_inode->inUse, 0);
250 cifs_inode->time = 0;
251 /* Until the file is open and we have gotten oplock
252 info back from the server, can not assume caching of
253 file data or metadata */
254 cifs_inode->clientCanCacheRead = FALSE;
255 cifs_inode->clientCanCacheAll = FALSE;
256 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
257 cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME;
258 INIT_LIST_HEAD(&cifs_inode->openFileList);
259 return &cifs_inode->vfs_inode;
263 cifs_destroy_inode(struct inode *inode)
265 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
269 * cifs_show_options() is for displaying mount options in /proc/mounts.
270 * Not all settable options are displayed but most of the important
274 cifs_show_options(struct seq_file *s, struct vfsmount *m)
276 struct cifs_sb_info *cifs_sb;
278 cifs_sb = CIFS_SB(m->mnt_sb);
282 seq_printf(s, ",unc=%s", cifs_sb->tcon->treeName);
283 if (cifs_sb->tcon->ses) {
284 if (cifs_sb->tcon->ses->userName)
285 seq_printf(s, ",username=%s",
286 cifs_sb->tcon->ses->userName);
287 if(cifs_sb->tcon->ses->domainName)
288 seq_printf(s, ",domain=%s",
289 cifs_sb->tcon->ses->domainName);
292 seq_printf(s, ",rsize=%d",cifs_sb->rsize);
293 seq_printf(s, ",wsize=%d",cifs_sb->wsize);
298 #ifdef CONFIG_CIFS_QUOTA
299 int cifs_xquota_set(struct super_block * sb, int quota_type, qid_t qid,
300 struct fs_disk_quota * pdquota)
304 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
305 struct cifsTconInfo *pTcon;
308 pTcon = cifs_sb->tcon;
315 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
324 int cifs_xquota_get(struct super_block * sb, int quota_type, qid_t qid,
325 struct fs_disk_quota * pdquota)
329 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
330 struct cifsTconInfo *pTcon;
333 pTcon = cifs_sb->tcon;
339 cFYI(1,("set type: 0x%x id: %d",quota_type,qid));
348 int cifs_xstate_set(struct super_block * sb, unsigned int flags, int operation)
352 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
353 struct cifsTconInfo *pTcon;
356 pTcon = cifs_sb->tcon;
362 cFYI(1,("flags: 0x%x operation: 0x%x",flags,operation));
371 int cifs_xstate_get(struct super_block * sb, struct fs_quota_stat *qstats)
375 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
376 struct cifsTconInfo *pTcon;
379 pTcon = cifs_sb->tcon;
385 cFYI(1,("pqstats %p",qstats));
394 static struct quotactl_ops cifs_quotactl_ops = {
395 .set_xquota = cifs_xquota_set,
396 .get_xquota = cifs_xquota_set,
397 .set_xstate = cifs_xstate_set,
398 .get_xstate = cifs_xstate_get,
402 static void cifs_umount_begin(struct vfsmount * vfsmnt, int flags)
404 struct cifs_sb_info *cifs_sb;
405 struct cifsTconInfo * tcon;
407 if (!(flags & MNT_FORCE))
409 cifs_sb = CIFS_SB(vfsmnt->mnt_sb);
413 tcon = cifs_sb->tcon;
416 down(&tcon->tconSem);
417 if (atomic_read(&tcon->useCount) == 1)
418 tcon->tidStatus = CifsExiting;
421 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
422 /* cancel_notify_requests(tcon); */
423 if(tcon->ses && tcon->ses->server)
425 cFYI(1,("wake up tasks now - umount begin not complete"));
426 wake_up_all(&tcon->ses->server->request_q);
427 wake_up_all(&tcon->ses->server->response_q);
428 msleep(1); /* yield */
429 /* we have to kick the requests once more */
430 wake_up_all(&tcon->ses->server->response_q);
433 /* BB FIXME - finish add checks for tidStatus BB */
438 static int cifs_remount(struct super_block *sb, int *flags, char *data)
440 *flags |= MS_NODIRATIME;
444 struct super_operations cifs_super_ops = {
445 .read_inode = cifs_read_inode,
446 .put_super = cifs_put_super,
447 .statfs = cifs_statfs,
448 .alloc_inode = cifs_alloc_inode,
449 .destroy_inode = cifs_destroy_inode,
450 /* .drop_inode = generic_delete_inode,
451 .delete_inode = cifs_delete_inode, *//* Do not need the above two functions
452 unless later we add lazy close of inodes or unless the kernel forgets to call
453 us with the same number of releases (closes) as opens */
454 .show_options = cifs_show_options,
455 .umount_begin = cifs_umount_begin,
456 .remount_fs = cifs_remount,
460 cifs_get_sb(struct file_system_type *fs_type,
461 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
464 struct super_block *sb = sget(fs_type, NULL, set_anon_super, NULL);
466 cFYI(1, ("Devname: %s flags: %d ", dev_name, flags));
473 rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
475 up_write(&sb->s_umount);
476 deactivate_super(sb);
479 sb->s_flags |= MS_ACTIVE;
480 return simple_set_mnt(mnt, sb);
483 static ssize_t cifs_file_writev(struct file *file, const struct iovec *iov,
484 unsigned long nr_segs, loff_t *ppos)
486 struct inode *inode = file->f_dentry->d_inode;
489 written = generic_file_writev(file, iov, nr_segs, ppos);
490 if (!CIFS_I(inode)->clientCanCacheAll)
491 filemap_fdatawrite(inode->i_mapping);
495 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const char __user *buf,
496 size_t count, loff_t pos)
498 struct inode *inode = iocb->ki_filp->f_dentry->d_inode;
501 written = generic_file_aio_write(iocb, buf, count, pos);
502 if (!CIFS_I(inode)->clientCanCacheAll)
503 filemap_fdatawrite(inode->i_mapping);
507 static loff_t cifs_llseek(struct file *file, loff_t offset, int origin)
509 /* origin == SEEK_END => we must revalidate the cached file length */
511 int retval = cifs_revalidate(file->f_dentry);
513 return (loff_t)retval;
515 return remote_llseek(file, offset, origin);
518 static struct file_system_type cifs_fs_type = {
519 .owner = THIS_MODULE,
521 .get_sb = cifs_get_sb,
522 .kill_sb = kill_anon_super,
525 struct inode_operations cifs_dir_inode_ops = {
526 .create = cifs_create,
527 .lookup = cifs_lookup,
528 .getattr = cifs_getattr,
529 .unlink = cifs_unlink,
530 .link = cifs_hardlink,
533 .rename = cifs_rename,
534 .permission = cifs_permission,
535 /* revalidate:cifs_revalidate, */
536 .setattr = cifs_setattr,
537 .symlink = cifs_symlink,
539 #ifdef CONFIG_CIFS_XATTR
540 .setxattr = cifs_setxattr,
541 .getxattr = cifs_getxattr,
542 .listxattr = cifs_listxattr,
543 .removexattr = cifs_removexattr,
547 struct inode_operations cifs_file_inode_ops = {
548 /* revalidate:cifs_revalidate, */
549 .setattr = cifs_setattr,
550 .getattr = cifs_getattr, /* do we need this anymore? */
551 .rename = cifs_rename,
552 .permission = cifs_permission,
553 #ifdef CONFIG_CIFS_XATTR
554 .setxattr = cifs_setxattr,
555 .getxattr = cifs_getxattr,
556 .listxattr = cifs_listxattr,
557 .removexattr = cifs_removexattr,
561 struct inode_operations cifs_symlink_inode_ops = {
562 .readlink = generic_readlink,
563 .follow_link = cifs_follow_link,
564 .put_link = cifs_put_link,
565 .permission = cifs_permission,
566 /* BB add the following two eventually */
567 /* revalidate: cifs_revalidate,
568 setattr: cifs_notify_change, *//* BB do we need notify change */
569 #ifdef CONFIG_CIFS_XATTR
570 .setxattr = cifs_setxattr,
571 .getxattr = cifs_getxattr,
572 .listxattr = cifs_listxattr,
573 .removexattr = cifs_removexattr,
577 const struct file_operations cifs_file_ops = {
578 .read = do_sync_read,
579 .write = do_sync_write,
580 .readv = generic_file_readv,
581 .writev = cifs_file_writev,
582 .aio_read = generic_file_aio_read,
583 .aio_write = cifs_file_aio_write,
585 .release = cifs_close,
589 .mmap = cifs_file_mmap,
590 .sendfile = generic_file_sendfile,
591 .llseek = cifs_llseek,
592 #ifdef CONFIG_CIFS_POSIX
594 #endif /* CONFIG_CIFS_POSIX */
596 #ifdef CONFIG_CIFS_EXPERIMENTAL
597 .dir_notify = cifs_dir_notify,
598 #endif /* CONFIG_CIFS_EXPERIMENTAL */
601 const struct file_operations cifs_file_direct_ops = {
602 /* no mmap, no aio, no readv -
603 BB reevaluate whether they can be done with directio, no cache */
604 .read = cifs_user_read,
605 .write = cifs_user_write,
607 .release = cifs_close,
611 .sendfile = generic_file_sendfile, /* BB removeme BB */
612 #ifdef CONFIG_CIFS_POSIX
614 #endif /* CONFIG_CIFS_POSIX */
615 .llseek = cifs_llseek,
616 #ifdef CONFIG_CIFS_EXPERIMENTAL
617 .dir_notify = cifs_dir_notify,
618 #endif /* CONFIG_CIFS_EXPERIMENTAL */
620 const struct file_operations cifs_file_nobrl_ops = {
621 .read = do_sync_read,
622 .write = do_sync_write,
623 .readv = generic_file_readv,
624 .writev = cifs_file_writev,
625 .aio_read = generic_file_aio_read,
626 .aio_write = cifs_file_aio_write,
628 .release = cifs_close,
631 .mmap = cifs_file_mmap,
632 .sendfile = generic_file_sendfile,
633 .llseek = cifs_llseek,
634 #ifdef CONFIG_CIFS_POSIX
636 #endif /* CONFIG_CIFS_POSIX */
638 #ifdef CONFIG_CIFS_EXPERIMENTAL
639 .dir_notify = cifs_dir_notify,
640 #endif /* CONFIG_CIFS_EXPERIMENTAL */
643 const struct file_operations cifs_file_direct_nobrl_ops = {
644 /* no mmap, no aio, no readv -
645 BB reevaluate whether they can be done with directio, no cache */
646 .read = cifs_user_read,
647 .write = cifs_user_write,
649 .release = cifs_close,
652 .sendfile = generic_file_sendfile, /* BB removeme BB */
653 #ifdef CONFIG_CIFS_POSIX
655 #endif /* CONFIG_CIFS_POSIX */
656 .llseek = cifs_llseek,
657 #ifdef CONFIG_CIFS_EXPERIMENTAL
658 .dir_notify = cifs_dir_notify,
659 #endif /* CONFIG_CIFS_EXPERIMENTAL */
662 const struct file_operations cifs_dir_ops = {
663 .readdir = cifs_readdir,
664 .release = cifs_closedir,
665 .read = generic_read_dir,
666 #ifdef CONFIG_CIFS_EXPERIMENTAL
667 .dir_notify = cifs_dir_notify,
668 #endif /* CONFIG_CIFS_EXPERIMENTAL */
673 cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
675 struct cifsInodeInfo *cifsi = inode;
677 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
678 SLAB_CTOR_CONSTRUCTOR) {
679 inode_init_once(&cifsi->vfs_inode);
680 INIT_LIST_HEAD(&cifsi->lockList);
685 cifs_init_inodecache(void)
687 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
688 sizeof (struct cifsInodeInfo),
689 0, (SLAB_RECLAIM_ACCOUNT|
691 cifs_init_once, NULL);
692 if (cifs_inode_cachep == NULL)
699 cifs_destroy_inodecache(void)
701 kmem_cache_destroy(cifs_inode_cachep);
705 cifs_init_request_bufs(void)
707 if(CIFSMaxBufSize < 8192) {
708 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
709 Unicode path name has to fit in any SMB/CIFS path based frames */
710 CIFSMaxBufSize = 8192;
711 } else if (CIFSMaxBufSize > 1024*127) {
712 CIFSMaxBufSize = 1024 * 127;
714 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
716 /* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */
717 cifs_req_cachep = kmem_cache_create("cifs_request",
719 MAX_CIFS_HDR_SIZE, 0,
720 SLAB_HWCACHE_ALIGN, NULL, NULL);
721 if (cifs_req_cachep == NULL)
726 else if (cifs_min_rcv > 64) {
728 cERROR(1,("cifs_min_rcv set to maximum (64)"));
731 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
734 if(cifs_req_poolp == NULL) {
735 kmem_cache_destroy(cifs_req_cachep);
738 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
739 almost all handle based requests (but not write response, nor is it
740 sufficient for path based requests). A smaller size would have
741 been more efficient (compacting multiple slab items on one 4k page)
742 for the case in which debug was on, but this larger size allows
743 more SMBs to use small buffer alloc and is still much more
744 efficient to alloc 1 per page off the slab compared to 17K (5page)
745 alloc of large cifs buffers even when page debugging is on */
746 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
747 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
749 if (cifs_sm_req_cachep == NULL) {
750 mempool_destroy(cifs_req_poolp);
751 kmem_cache_destroy(cifs_req_cachep);
755 if(cifs_min_small < 2)
757 else if (cifs_min_small > 256) {
758 cifs_min_small = 256;
759 cFYI(1,("cifs_min_small set to maximum (256)"));
762 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
765 if(cifs_sm_req_poolp == NULL) {
766 mempool_destroy(cifs_req_poolp);
767 kmem_cache_destroy(cifs_req_cachep);
768 kmem_cache_destroy(cifs_sm_req_cachep);
776 cifs_destroy_request_bufs(void)
778 mempool_destroy(cifs_req_poolp);
779 kmem_cache_destroy(cifs_req_cachep);
780 mempool_destroy(cifs_sm_req_poolp);
781 kmem_cache_destroy(cifs_sm_req_cachep);
787 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
788 sizeof (struct mid_q_entry), 0,
789 SLAB_HWCACHE_ALIGN, NULL, NULL);
790 if (cifs_mid_cachep == NULL)
793 /* 3 is a reasonable minimum number of simultaneous operations */
794 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
795 if(cifs_mid_poolp == NULL) {
796 kmem_cache_destroy(cifs_mid_cachep);
800 cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs",
801 sizeof (struct oplock_q_entry), 0,
802 SLAB_HWCACHE_ALIGN, NULL, NULL);
803 if (cifs_oplock_cachep == NULL) {
804 kmem_cache_destroy(cifs_mid_cachep);
805 mempool_destroy(cifs_mid_poolp);
813 cifs_destroy_mids(void)
815 mempool_destroy(cifs_mid_poolp);
816 kmem_cache_destroy(cifs_mid_cachep);
817 kmem_cache_destroy(cifs_oplock_cachep);
820 static int cifs_oplock_thread(void * dummyarg)
822 struct oplock_q_entry * oplock_item;
823 struct cifsTconInfo *pTcon;
824 struct inode * inode;
832 spin_lock(&GlobalMid_Lock);
833 if(list_empty(&GlobalOplock_Q)) {
834 spin_unlock(&GlobalMid_Lock);
835 set_current_state(TASK_INTERRUPTIBLE);
836 schedule_timeout(39*HZ);
838 oplock_item = list_entry(GlobalOplock_Q.next,
839 struct oplock_q_entry, qhead);
841 cFYI(1,("found oplock item to write out"));
842 pTcon = oplock_item->tcon;
843 inode = oplock_item->pinode;
844 netfid = oplock_item->netfid;
845 spin_unlock(&GlobalMid_Lock);
846 DeleteOplockQEntry(oplock_item);
847 /* can not grab inode sem here since it would
848 deadlock when oplock received on delete
849 since vfs_unlink holds the i_mutex across
851 /* mutex_lock(&inode->i_mutex);*/
852 if (S_ISREG(inode->i_mode)) {
853 rc = filemap_fdatawrite(inode->i_mapping);
854 if(CIFS_I(inode)->clientCanCacheRead == 0) {
855 filemap_fdatawait(inode->i_mapping);
856 invalidate_remote_inode(inode);
860 /* mutex_unlock(&inode->i_mutex);*/
862 CIFS_I(inode)->write_behind_rc = rc;
863 cFYI(1,("Oplock flush inode %p rc %d",inode,rc));
865 /* releasing a stale oplock after recent reconnection
866 of smb session using a now incorrect file
867 handle is not a data integrity issue but do
868 not bother sending an oplock release if session
869 to server still is disconnected since oplock
870 already released by the server in that case */
871 if(pTcon->tidStatus != CifsNeedReconnect) {
872 rc = CIFSSMBLock(0, pTcon, netfid,
873 0 /* len */ , 0 /* offset */, 0,
874 0, LOCKING_ANDX_OPLOCK_RELEASE,
876 cFYI(1,("Oplock release rc = %d ",rc));
879 spin_unlock(&GlobalMid_Lock);
880 set_current_state(TASK_INTERRUPTIBLE);
881 schedule_timeout(1); /* yield in case q were corrupt */
883 } while (!kthread_should_stop());
888 static int cifs_dnotify_thread(void * dummyarg)
890 struct list_head *tmp;
891 struct cifsSesInfo *ses;
896 set_current_state(TASK_INTERRUPTIBLE);
897 schedule_timeout(15*HZ);
898 read_lock(&GlobalSMBSeslock);
899 /* check if any stuck requests that need
900 to be woken up and wakeq so the
901 thread can wake up and error out */
902 list_for_each(tmp, &GlobalSMBSessionList) {
903 ses = list_entry(tmp, struct cifsSesInfo,
905 if(ses && ses->server &&
906 atomic_read(&ses->server->inFlight))
907 wake_up_all(&ses->server->response_q);
909 read_unlock(&GlobalSMBSeslock);
910 } while (!kthread_should_stop());
919 #ifdef CONFIG_PROC_FS
922 INIT_LIST_HEAD(&GlobalServerList); /* BB not implemented yet */
923 INIT_LIST_HEAD(&GlobalSMBSessionList);
924 INIT_LIST_HEAD(&GlobalTreeConnectionList);
925 INIT_LIST_HEAD(&GlobalOplock_Q);
926 #ifdef CONFIG_CIFS_EXPERIMENTAL
927 INIT_LIST_HEAD(&GlobalDnotifyReqList);
928 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
931 * Initialize Global counters
933 atomic_set(&sesInfoAllocCount, 0);
934 atomic_set(&tconInfoAllocCount, 0);
935 atomic_set(&tcpSesAllocCount,0);
936 atomic_set(&tcpSesReconnectCount, 0);
937 atomic_set(&tconInfoReconnectCount, 0);
939 atomic_set(&bufAllocCount, 0);
940 atomic_set(&smBufAllocCount, 0);
941 #ifdef CONFIG_CIFS_STATS2
942 atomic_set(&totBufAllocCount, 0);
943 atomic_set(&totSmBufAllocCount, 0);
944 #endif /* CONFIG_CIFS_STATS2 */
946 atomic_set(&midCount, 0);
947 GlobalCurrentXid = 0;
948 GlobalTotalActiveXid = 0;
949 GlobalMaxActiveXid = 0;
950 rwlock_init(&GlobalSMBSeslock);
951 spin_lock_init(&GlobalMid_Lock);
953 if(cifs_max_pending < 2) {
954 cifs_max_pending = 2;
955 cFYI(1,("cifs_max_pending set to min of 2"));
956 } else if(cifs_max_pending > 256) {
957 cifs_max_pending = 256;
958 cFYI(1,("cifs_max_pending set to max of 256"));
961 rc = cifs_init_inodecache();
965 rc = cifs_init_mids();
967 goto out_destroy_inodecache;
969 rc = cifs_init_request_bufs();
971 goto out_destroy_mids;
973 rc = register_filesystem(&cifs_fs_type);
975 goto out_destroy_request_bufs;
977 oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
978 if (IS_ERR(oplockThread)) {
979 rc = PTR_ERR(oplockThread);
980 cERROR(1,("error %d create oplock thread", rc));
981 goto out_unregister_filesystem;
984 dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd");
985 if (IS_ERR(dnotifyThread)) {
986 rc = PTR_ERR(dnotifyThread);
987 cERROR(1,("error %d create dnotify thread", rc));
988 goto out_stop_oplock_thread;
993 out_stop_oplock_thread:
994 kthread_stop(oplockThread);
995 out_unregister_filesystem:
996 unregister_filesystem(&cifs_fs_type);
997 out_destroy_request_bufs:
998 cifs_destroy_request_bufs();
1000 cifs_destroy_mids();
1001 out_destroy_inodecache:
1002 cifs_destroy_inodecache();
1004 #ifdef CONFIG_PROC_FS
1013 cFYI(0, ("In unregister ie exit_cifs"));
1014 #ifdef CONFIG_PROC_FS
1017 unregister_filesystem(&cifs_fs_type);
1018 cifs_destroy_inodecache();
1019 cifs_destroy_mids();
1020 cifs_destroy_request_bufs();
1021 kthread_stop(oplockThread);
1022 kthread_stop(dnotifyThread);
1025 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
1026 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1028 ("VFS to access servers complying with the SNIA CIFS Specification e.g. Samba and Windows");
1029 MODULE_VERSION(CIFS_VERSION);
1030 module_init(init_cifs)
1031 module_exit(exit_cifs)