4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2007
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <asm/div64.h>
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
42 static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
48 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem);
50 mutex_init(&private_data->lock_mutex);
51 INIT_LIST_HEAD(&private_data->llist);
52 private_data->pfile = file; /* needed for writepage */
53 private_data->pInode = inode;
54 private_data->invalidHandle = FALSE;
55 private_data->closePend = FALSE;
56 /* we have to track num writers to the inode, since writepages
57 does not tell us which handle the write is for so there can
58 be a close (overlapping with write) of the filehandle that
59 cifs_writepages chose to use */
60 atomic_set(&private_data->wrtPending, 0);
65 static inline int cifs_convert_flags(unsigned int flags)
67 if ((flags & O_ACCMODE) == O_RDONLY)
69 else if ((flags & O_ACCMODE) == O_WRONLY)
71 else if ((flags & O_ACCMODE) == O_RDWR) {
72 /* GENERIC_ALL is too much permission to request
73 can cause unnecessary access denied on create */
74 /* return GENERIC_ALL; */
75 return (GENERIC_READ | GENERIC_WRITE);
81 static inline int cifs_get_disposition(unsigned int flags)
83 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
85 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
86 return FILE_OVERWRITE_IF;
87 else if ((flags & O_CREAT) == O_CREAT)
89 else if ((flags & O_TRUNC) == O_TRUNC)
90 return FILE_OVERWRITE;
95 /* all arguments to this function must be checked for validity in caller */
96 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
97 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
98 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
99 char *full_path, int xid)
101 struct timespec temp;
104 /* want handles we can use to read with first
105 in the list so we do not have to walk the
106 list to search for one in prepare_write */
107 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
108 list_add_tail(&pCifsFile->flist,
109 &pCifsInode->openFileList);
111 list_add(&pCifsFile->flist,
112 &pCifsInode->openFileList);
114 write_unlock(&GlobalSMBSeslock);
115 if (pCifsInode->clientCanCacheRead) {
116 /* we have the inode open somewhere else
117 no need to discard cache data */
118 goto client_can_cache;
121 /* BB need same check in cifs_create too? */
122 /* if not oplocked, invalidate inode pages if mtime or file
124 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
125 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
126 (file->f_path.dentry->d_inode->i_size ==
127 (loff_t)le64_to_cpu(buf->EndOfFile))) {
128 cFYI(1, ("inode unchanged on server"));
130 if (file->f_path.dentry->d_inode->i_mapping) {
131 /* BB no need to lock inode until after invalidate
132 since namei code should already have it locked? */
133 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
135 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
137 cFYI(1, ("invalidating remote inode since open detected it "
139 invalidate_remote_inode(file->f_path.dentry->d_inode);
144 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
145 full_path, inode->i_sb, xid);
147 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
148 full_path, buf, inode->i_sb, xid);
150 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
151 pCifsInode->clientCanCacheAll = TRUE;
152 pCifsInode->clientCanCacheRead = TRUE;
153 cFYI(1, ("Exclusive Oplock granted on inode %p",
154 file->f_path.dentry->d_inode));
155 } else if ((*oplock & 0xF) == OPLOCK_READ)
156 pCifsInode->clientCanCacheRead = TRUE;
161 int cifs_open(struct inode *inode, struct file *file)
165 struct cifs_sb_info *cifs_sb;
166 struct cifsTconInfo *pTcon;
167 struct cifsFileInfo *pCifsFile;
168 struct cifsInodeInfo *pCifsInode;
169 struct list_head *tmp;
170 char *full_path = NULL;
174 FILE_ALL_INFO *buf = NULL;
178 cifs_sb = CIFS_SB(inode->i_sb);
179 pTcon = cifs_sb->tcon;
181 if (file->f_flags & O_CREAT) {
182 /* search inode for this file and fill in file->private_data */
183 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
184 read_lock(&GlobalSMBSeslock);
185 list_for_each(tmp, &pCifsInode->openFileList) {
186 pCifsFile = list_entry(tmp, struct cifsFileInfo,
188 if ((pCifsFile->pfile == NULL) &&
189 (pCifsFile->pid == current->tgid)) {
190 /* mode set in cifs_create */
192 /* needed for writepage */
193 pCifsFile->pfile = file;
195 file->private_data = pCifsFile;
199 read_unlock(&GlobalSMBSeslock);
200 if (file->private_data != NULL) {
205 if (file->f_flags & O_EXCL)
206 cERROR(1, ("could not find file instance for "
207 "new file %p", file));
211 full_path = build_path_from_dentry(file->f_path.dentry);
212 if (full_path == NULL) {
217 cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
218 inode, file->f_flags, full_path));
219 desiredAccess = cifs_convert_flags(file->f_flags);
221 /*********************************************************************
222 * open flag mapping table:
224 * POSIX Flag CIFS Disposition
225 * ---------- ----------------
226 * O_CREAT FILE_OPEN_IF
227 * O_CREAT | O_EXCL FILE_CREATE
228 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
229 * O_TRUNC FILE_OVERWRITE
230 * none of the above FILE_OPEN
232 * Note that there is not a direct match between disposition
233 * FILE_SUPERSEDE (ie create whether or not file exists although
234 * O_CREAT | O_TRUNC is similar but truncates the existing
235 * file rather than creating a new file as FILE_SUPERSEDE does
236 * (which uses the attributes / metadata passed in on open call)
238 *? O_SYNC is a reasonable match to CIFS writethrough flag
239 *? and the read write flags match reasonably. O_LARGEFILE
240 *? is irrelevant because largefile support is always used
241 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
242 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
243 *********************************************************************/
245 disposition = cifs_get_disposition(file->f_flags);
252 /* BB pass O_SYNC flag through on file attributes .. BB */
254 /* Also refresh inode by passing in file_info buf returned by SMBOpen
255 and calling get_inode_info with returned buf (at least helps
256 non-Unix server case) */
258 /* BB we can not do this if this is the second open of a file
259 and the first handle has writebehind data, we might be
260 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
261 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
267 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
268 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
269 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
270 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
271 & CIFS_MOUNT_MAP_SPECIAL_CHR);
273 rc = -EIO; /* no NT SMB support fall into legacy open below */
276 /* Old server, try legacy style OpenX */
277 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
278 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
279 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
280 & CIFS_MOUNT_MAP_SPECIAL_CHR);
283 cFYI(1, ("cifs_open returned 0x%x", rc));
287 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
288 if (file->private_data == NULL) {
292 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
293 write_lock(&GlobalSMBSeslock);
294 list_add(&pCifsFile->tlist, &pTcon->openFileList);
296 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
298 rc = cifs_open_inode_helper(inode, file, pCifsInode,
300 &oplock, buf, full_path, xid);
302 write_unlock(&GlobalSMBSeslock);
305 if (oplock & CIFS_CREATE_ACTION) {
306 /* time to set mode which we can not set earlier due to
307 problems creating new read-only files */
308 if (pTcon->unix_ext) {
309 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
311 (__u64)-1, (__u64)-1, 0 /* dev */,
313 cifs_sb->mnt_cifs_flags &
314 CIFS_MOUNT_MAP_SPECIAL_CHR);
316 /* BB implement via Windows security descriptors eg
317 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
319 in the meantime could set r/o dos attribute when
320 perms are eg: mode & 0222 == 0 */
331 /* Try to reacquire byte range locks that were released when session */
332 /* to server was lost */
333 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
337 /* BB list all locks open on this file and relock */
342 static int cifs_reopen_file(struct file *file, int can_flush)
346 struct cifs_sb_info *cifs_sb;
347 struct cifsTconInfo *pTcon;
348 struct cifsFileInfo *pCifsFile;
349 struct cifsInodeInfo *pCifsInode;
351 char *full_path = NULL;
353 int disposition = FILE_OPEN;
356 if (file->private_data) {
357 pCifsFile = (struct cifsFileInfo *)file->private_data;
362 down(&pCifsFile->fh_sem);
363 if (pCifsFile->invalidHandle == FALSE) {
364 up(&pCifsFile->fh_sem);
369 if (file->f_path.dentry == NULL) {
370 cERROR(1, ("no valid name if dentry freed"));
373 goto reopen_error_exit;
376 inode = file->f_path.dentry->d_inode;
378 cERROR(1, ("inode not valid"));
381 goto reopen_error_exit;
384 cifs_sb = CIFS_SB(inode->i_sb);
385 pTcon = cifs_sb->tcon;
387 /* can not grab rename sem here because various ops, including
388 those that already have the rename sem can end up causing writepage
389 to get called and if the server was down that means we end up here,
390 and we can never tell if the caller already has the rename_sem */
391 full_path = build_path_from_dentry(file->f_path.dentry);
392 if (full_path == NULL) {
395 up(&pCifsFile->fh_sem);
400 cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
401 inode, file->f_flags, full_path));
402 desiredAccess = cifs_convert_flags(file->f_flags);
409 /* Can not refresh inode by passing in file_info buf to be returned
410 by SMBOpen and then calling get_inode_info with returned buf
411 since file might have write behind data that needs to be flushed
412 and server version of file size can be stale. If we knew for sure
413 that inode was not dirty locally we could do this */
415 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
416 CREATE_NOT_DIR, &netfid, &oplock, NULL,
417 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
418 CIFS_MOUNT_MAP_SPECIAL_CHR);
420 up(&pCifsFile->fh_sem);
421 cFYI(1, ("cifs_open returned 0x%x", rc));
422 cFYI(1, ("oplock: %d", oplock));
424 pCifsFile->netfid = netfid;
425 pCifsFile->invalidHandle = FALSE;
426 up(&pCifsFile->fh_sem);
427 pCifsInode = CIFS_I(inode);
430 rc = filemap_write_and_wait(inode->i_mapping);
432 CIFS_I(inode)->write_behind_rc = rc;
433 /* temporarily disable caching while we
434 go to server to get inode info */
435 pCifsInode->clientCanCacheAll = FALSE;
436 pCifsInode->clientCanCacheRead = FALSE;
438 rc = cifs_get_inode_info_unix(&inode,
439 full_path, inode->i_sb, xid);
441 rc = cifs_get_inode_info(&inode,
442 full_path, NULL, inode->i_sb,
444 } /* else we are writing out data to server already
445 and could deadlock if we tried to flush data, and
446 since we do not know if we have data that would
447 invalidate the current end of file on the server
448 we can not go to the server to get the new inod
450 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
451 pCifsInode->clientCanCacheAll = TRUE;
452 pCifsInode->clientCanCacheRead = TRUE;
453 cFYI(1, ("Exclusive Oplock granted on inode %p",
454 file->f_path.dentry->d_inode));
455 } else if ((oplock & 0xF) == OPLOCK_READ) {
456 pCifsInode->clientCanCacheRead = TRUE;
457 pCifsInode->clientCanCacheAll = FALSE;
459 pCifsInode->clientCanCacheRead = FALSE;
460 pCifsInode->clientCanCacheAll = FALSE;
462 cifs_relock_file(pCifsFile);
471 int cifs_close(struct inode *inode, struct file *file)
475 struct cifs_sb_info *cifs_sb;
476 struct cifsTconInfo *pTcon;
477 struct cifsFileInfo *pSMBFile =
478 (struct cifsFileInfo *)file->private_data;
482 cifs_sb = CIFS_SB(inode->i_sb);
483 pTcon = cifs_sb->tcon;
485 struct cifsLockInfo *li, *tmp;
487 pSMBFile->closePend = TRUE;
489 /* no sense reconnecting to close a file that is
491 if (pTcon->tidStatus != CifsNeedReconnect) {
493 while ((atomic_read(&pSMBFile->wrtPending) != 0)
494 && (timeout <= 2048)) {
495 /* Give write a better chance to get to
496 server ahead of the close. We do not
497 want to add a wait_q here as it would
498 increase the memory utilization as
499 the struct would be in each open file,
500 but this should give enough time to
502 #ifdef CONFIG_CIFS_DEBUG2
503 cFYI(1, ("close delay, write pending"));
508 if (atomic_read(&pSMBFile->wrtPending))
510 ("close with pending writes"));
511 rc = CIFSSMBClose(xid, pTcon,
516 /* Delete any outstanding lock records.
517 We'll lose them when the file is closed anyway. */
518 mutex_lock(&pSMBFile->lock_mutex);
519 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
520 list_del(&li->llist);
523 mutex_unlock(&pSMBFile->lock_mutex);
525 write_lock(&GlobalSMBSeslock);
526 list_del(&pSMBFile->flist);
527 list_del(&pSMBFile->tlist);
528 write_unlock(&GlobalSMBSeslock);
530 /* We waited above to give the SMBWrite a chance to issue
531 on the wire (so we do not get SMBWrite returning EBADF
532 if writepages is racing with close. Note that writepages
533 does not specify a file handle, so it is possible for a file
534 to be opened twice, and the application close the "wrong"
535 file handle - in these cases we delay long enough to allow
536 the SMBWrite to get on the wire before the SMB Close.
537 We allow total wait here over 45 seconds, more than
538 oplock break time, and more than enough to allow any write
539 to complete on the server, or to time out on the client */
540 while ((atomic_read(&pSMBFile->wrtPending) != 0)
541 && (timeout <= 50000)) {
542 cERROR(1, ("writes pending, delay free of handle"));
546 kfree(pSMBFile->search_resume_name);
547 kfree(file->private_data);
548 file->private_data = NULL;
552 read_lock(&GlobalSMBSeslock);
553 if (list_empty(&(CIFS_I(inode)->openFileList))) {
554 cFYI(1, ("closing last open instance for inode %p", inode));
555 /* if the file is not open we do not know if we can cache info
556 on this inode, much less write behind and read ahead */
557 CIFS_I(inode)->clientCanCacheRead = FALSE;
558 CIFS_I(inode)->clientCanCacheAll = FALSE;
560 read_unlock(&GlobalSMBSeslock);
561 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
562 rc = CIFS_I(inode)->write_behind_rc;
567 int cifs_closedir(struct inode *inode, struct file *file)
571 struct cifsFileInfo *pCFileStruct =
572 (struct cifsFileInfo *)file->private_data;
575 cFYI(1, ("Closedir inode = 0x%p", inode));
580 struct cifsTconInfo *pTcon;
581 struct cifs_sb_info *cifs_sb =
582 CIFS_SB(file->f_path.dentry->d_sb);
584 pTcon = cifs_sb->tcon;
586 cFYI(1, ("Freeing private data in close dir"));
587 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
588 (pCFileStruct->invalidHandle == FALSE)) {
589 pCFileStruct->invalidHandle = TRUE;
590 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
591 cFYI(1, ("Closing uncompleted readdir with rc %d",
593 /* not much we can do if it fails anyway, ignore rc */
596 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
598 cFYI(1, ("closedir free smb buf in srch struct"));
599 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
600 if (pCFileStruct->srch_inf.smallBuf)
601 cifs_small_buf_release(ptmp);
603 cifs_buf_release(ptmp);
605 ptmp = pCFileStruct->search_resume_name;
607 cFYI(1, ("closedir free resume name"));
608 pCFileStruct->search_resume_name = NULL;
611 kfree(file->private_data);
612 file->private_data = NULL;
614 /* BB can we lock the filestruct while this is going on? */
619 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
620 __u64 offset, __u8 lockType)
622 struct cifsLockInfo *li =
623 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
629 mutex_lock(&fid->lock_mutex);
630 list_add(&li->llist, &fid->llist);
631 mutex_unlock(&fid->lock_mutex);
635 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
641 int wait_flag = FALSE;
642 struct cifs_sb_info *cifs_sb;
643 struct cifsTconInfo *pTcon;
645 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
648 length = 1 + pfLock->fl_end - pfLock->fl_start;
652 cFYI(1, ("Lock parm: 0x%x flockflags: "
653 "0x%x flocktype: 0x%x start: %lld end: %lld",
654 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
657 if (pfLock->fl_flags & FL_POSIX)
659 if (pfLock->fl_flags & FL_FLOCK)
661 if (pfLock->fl_flags & FL_SLEEP) {
662 cFYI(1, ("Blocking lock"));
665 if (pfLock->fl_flags & FL_ACCESS)
666 cFYI(1, ("Process suspended by mandatory locking - "
667 "not implemented yet"));
668 if (pfLock->fl_flags & FL_LEASE)
669 cFYI(1, ("Lease on file - not implemented yet"));
670 if (pfLock->fl_flags &
671 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
672 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
674 if (pfLock->fl_type == F_WRLCK) {
675 cFYI(1, ("F_WRLCK "));
677 } else if (pfLock->fl_type == F_UNLCK) {
678 cFYI(1, ("F_UNLCK"));
680 /* Check if unlock includes more than
682 } else if (pfLock->fl_type == F_RDLCK) {
683 cFYI(1, ("F_RDLCK"));
684 lockType |= LOCKING_ANDX_SHARED_LOCK;
686 } else if (pfLock->fl_type == F_EXLCK) {
687 cFYI(1, ("F_EXLCK"));
689 } else if (pfLock->fl_type == F_SHLCK) {
690 cFYI(1, ("F_SHLCK"));
691 lockType |= LOCKING_ANDX_SHARED_LOCK;
694 cFYI(1, ("Unknown type of lock"));
696 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
697 pTcon = cifs_sb->tcon;
699 if (file->private_data == NULL) {
703 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
705 posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
706 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
708 /* BB add code here to normalize offset and length to
709 account for negative length which we can not accept over the
714 if (lockType & LOCKING_ANDX_SHARED_LOCK)
715 posix_lock_type = CIFS_RDLCK;
717 posix_lock_type = CIFS_WRLCK;
718 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
720 posix_lock_type, wait_flag);
725 /* BB we could chain these into one lock request BB */
726 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
727 0, 1, lockType, 0 /* wait flag */ );
729 rc = CIFSSMBLock(xid, pTcon, netfid, length,
730 pfLock->fl_start, 1 /* numUnlock */ ,
731 0 /* numLock */ , lockType,
733 pfLock->fl_type = F_UNLCK;
735 cERROR(1, ("Error unlocking previously locked "
736 "range %d during test of lock", rc));
740 /* if rc == ERR_SHARING_VIOLATION ? */
741 rc = 0; /* do not change lock type to unlock
742 since range in use */
749 if (!numLock && !numUnlock) {
750 /* if no lock or unlock then nothing
751 to do since we do not know what it is */
758 if (lockType & LOCKING_ANDX_SHARED_LOCK)
759 posix_lock_type = CIFS_RDLCK;
761 posix_lock_type = CIFS_WRLCK;
764 posix_lock_type = CIFS_UNLCK;
766 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
768 posix_lock_type, wait_flag);
770 struct cifsFileInfo *fid =
771 (struct cifsFileInfo *)file->private_data;
774 rc = CIFSSMBLock(xid, pTcon, netfid, length,
776 0, numLock, lockType, wait_flag);
779 /* For Windows locks we must store them. */
780 rc = store_file_lock(fid, length,
781 pfLock->fl_start, lockType);
783 } else if (numUnlock) {
784 /* For each stored lock that this unlock overlaps
785 completely, unlock it. */
787 struct cifsLockInfo *li, *tmp;
790 mutex_lock(&fid->lock_mutex);
791 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
792 if (pfLock->fl_start <= li->offset &&
793 (pfLock->fl_start + length) >=
794 (li->offset + li->length)) {
795 stored_rc = CIFSSMBLock(xid, pTcon,
797 li->length, li->offset,
798 1, 0, li->type, FALSE);
802 list_del(&li->llist);
806 mutex_unlock(&fid->lock_mutex);
810 if (pfLock->fl_flags & FL_POSIX)
811 posix_lock_file_wait(file, pfLock);
816 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
817 size_t write_size, loff_t *poffset)
820 unsigned int bytes_written = 0;
821 unsigned int total_written;
822 struct cifs_sb_info *cifs_sb;
823 struct cifsTconInfo *pTcon;
825 struct cifsFileInfo *open_file;
827 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
829 pTcon = cifs_sb->tcon;
832 (" write %d bytes to offset %lld of %s", write_size,
833 *poffset, file->f_path.dentry->d_name.name)); */
835 if (file->private_data == NULL)
837 open_file = (struct cifsFileInfo *) file->private_data;
841 if (*poffset > file->f_path.dentry->d_inode->i_size)
842 long_op = CIFS_VLONG_OP; /* writes past EOF take long time */
844 long_op = CIFS_LONG_OP;
846 for (total_written = 0; write_size > total_written;
847 total_written += bytes_written) {
849 while (rc == -EAGAIN) {
850 if (file->private_data == NULL) {
851 /* file has been closed on us */
853 /* if we have gotten here we have written some data
854 and blocked, and the file has been freed on us while
855 we blocked so return what we managed to write */
856 return total_written;
858 if (open_file->closePend) {
861 return total_written;
865 if (open_file->invalidHandle) {
866 /* we could deadlock if we called
867 filemap_fdatawait from here so tell
868 reopen_file not to flush data to server
870 rc = cifs_reopen_file(file, FALSE);
875 rc = CIFSSMBWrite(xid, pTcon,
877 min_t(const int, cifs_sb->wsize,
878 write_size - total_written),
879 *poffset, &bytes_written,
880 NULL, write_data + total_written, long_op);
882 if (rc || (bytes_written == 0)) {
890 *poffset += bytes_written;
891 long_op = CIFS_STD_OP; /* subsequent writes fast -
892 15 seconds is plenty */
895 cifs_stats_bytes_written(pTcon, total_written);
897 /* since the write may have blocked check these pointers again */
898 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
899 struct inode *inode = file->f_path.dentry->d_inode;
900 /* Do not update local mtime - server will set its actual value on write
901 * inode->i_ctime = inode->i_mtime =
902 * current_fs_time(inode->i_sb);*/
903 if (total_written > 0) {
904 spin_lock(&inode->i_lock);
905 if (*poffset > file->f_path.dentry->d_inode->i_size)
906 i_size_write(file->f_path.dentry->d_inode,
908 spin_unlock(&inode->i_lock);
910 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
913 return total_written;
916 static ssize_t cifs_write(struct file *file, const char *write_data,
917 size_t write_size, loff_t *poffset)
920 unsigned int bytes_written = 0;
921 unsigned int total_written;
922 struct cifs_sb_info *cifs_sb;
923 struct cifsTconInfo *pTcon;
925 struct cifsFileInfo *open_file;
927 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
929 pTcon = cifs_sb->tcon;
931 cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
932 *poffset, file->f_path.dentry->d_name.name));
934 if (file->private_data == NULL)
936 open_file = (struct cifsFileInfo *)file->private_data;
940 if (*poffset > file->f_path.dentry->d_inode->i_size)
941 long_op = CIFS_VLONG_OP; /* writes past EOF can be slow */
943 long_op = CIFS_LONG_OP;
945 for (total_written = 0; write_size > total_written;
946 total_written += bytes_written) {
948 while (rc == -EAGAIN) {
949 if (file->private_data == NULL) {
950 /* file has been closed on us */
952 /* if we have gotten here we have written some data
953 and blocked, and the file has been freed on us
954 while we blocked so return what we managed to
956 return total_written;
958 if (open_file->closePend) {
961 return total_written;
965 if (open_file->invalidHandle) {
966 /* we could deadlock if we called
967 filemap_fdatawait from here so tell
968 reopen_file not to flush data to
970 rc = cifs_reopen_file(file, FALSE);
974 if (experimEnabled || (pTcon->ses->server &&
975 ((pTcon->ses->server->secMode &
976 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
981 len = min((size_t)cifs_sb->wsize,
982 write_size - total_written);
983 /* iov[0] is reserved for smb header */
984 iov[1].iov_base = (char *)write_data +
986 iov[1].iov_len = len;
987 rc = CIFSSMBWrite2(xid, pTcon,
988 open_file->netfid, len,
989 *poffset, &bytes_written,
992 rc = CIFSSMBWrite(xid, pTcon,
994 min_t(const int, cifs_sb->wsize,
995 write_size - total_written),
996 *poffset, &bytes_written,
997 write_data + total_written,
1000 if (rc || (bytes_written == 0)) {
1008 *poffset += bytes_written;
1009 long_op = CIFS_STD_OP; /* subsequent writes fast -
1010 15 seconds is plenty */
1013 cifs_stats_bytes_written(pTcon, total_written);
1015 /* since the write may have blocked check these pointers again */
1016 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1017 /*BB We could make this contingent on superblock ATIME flag too */
1018 /* file->f_path.dentry->d_inode->i_ctime =
1019 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1020 if (total_written > 0) {
1021 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1022 if (*poffset > file->f_path.dentry->d_inode->i_size)
1023 i_size_write(file->f_path.dentry->d_inode,
1025 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1027 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1030 return total_written;
1033 #ifdef CONFIG_CIFS_EXPERIMENTAL
1034 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1036 struct cifsFileInfo *open_file = NULL;
1038 read_lock(&GlobalSMBSeslock);
1039 /* we could simply get the first_list_entry since write-only entries
1040 are always at the end of the list but since the first entry might
1041 have a close pending, we go through the whole list */
1042 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1043 if (open_file->closePend)
1045 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1046 (open_file->pfile->f_flags & O_RDONLY))) {
1047 if (!open_file->invalidHandle) {
1048 /* found a good file */
1049 /* lock it so it will not be closed on us */
1050 atomic_inc(&open_file->wrtPending);
1051 read_unlock(&GlobalSMBSeslock);
1053 } /* else might as well continue, and look for
1054 another, or simply have the caller reopen it
1055 again rather than trying to fix this handle */
1056 } else /* write only file */
1057 break; /* write only files are last so must be done */
1059 read_unlock(&GlobalSMBSeslock);
1064 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1066 struct cifsFileInfo *open_file;
1069 /* Having a null inode here (because mapping->host was set to zero by
1070 the VFS or MM) should not happen but we had reports of on oops (due to
1071 it being zero) during stress testcases so we need to check for it */
1073 if (cifs_inode == NULL) {
1074 cERROR(1, ("Null inode passed to cifs_writeable_file"));
1079 read_lock(&GlobalSMBSeslock);
1081 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1082 if (open_file->closePend)
1084 if (open_file->pfile &&
1085 ((open_file->pfile->f_flags & O_RDWR) ||
1086 (open_file->pfile->f_flags & O_WRONLY))) {
1087 atomic_inc(&open_file->wrtPending);
1089 if (!open_file->invalidHandle) {
1090 /* found a good writable file */
1091 read_unlock(&GlobalSMBSeslock);
1095 read_unlock(&GlobalSMBSeslock);
1096 /* Had to unlock since following call can block */
1097 rc = cifs_reopen_file(open_file->pfile, FALSE);
1099 if (!open_file->closePend)
1101 else { /* start over in case this was deleted */
1102 /* since the list could be modified */
1103 read_lock(&GlobalSMBSeslock);
1104 atomic_dec(&open_file->wrtPending);
1105 goto refind_writable;
1109 /* if it fails, try another handle if possible -
1110 (we can not do this if closePending since
1111 loop could be modified - in which case we
1112 have to start at the beginning of the list
1113 again. Note that it would be bad
1114 to hold up writepages here (rather than
1115 in caller) with continuous retries */
1116 cFYI(1, ("wp failed on reopen file"));
1117 read_lock(&GlobalSMBSeslock);
1118 /* can not use this handle, no write
1119 pending on this one after all */
1120 atomic_dec(&open_file->wrtPending);
1122 if (open_file->closePend) /* list could have changed */
1123 goto refind_writable;
1124 /* else we simply continue to the next entry. Thus
1125 we do not loop on reopen errors. If we
1126 can not reopen the file, for example if we
1127 reconnected to a server with another client
1128 racing to delete or lock the file we would not
1129 make progress if we restarted before the beginning
1130 of the loop here. */
1133 read_unlock(&GlobalSMBSeslock);
1137 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1139 struct address_space *mapping = page->mapping;
1140 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1143 int bytes_written = 0;
1144 struct cifs_sb_info *cifs_sb;
1145 struct cifsTconInfo *pTcon;
1146 struct inode *inode;
1147 struct cifsFileInfo *open_file;
1149 if (!mapping || !mapping->host)
1152 inode = page->mapping->host;
1153 cifs_sb = CIFS_SB(inode->i_sb);
1154 pTcon = cifs_sb->tcon;
1156 offset += (loff_t)from;
1157 write_data = kmap(page);
1160 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1165 /* racing with truncate? */
1166 if (offset > mapping->host->i_size) {
1168 return 0; /* don't care */
1171 /* check to make sure that we are not extending the file */
1172 if (mapping->host->i_size - offset < (loff_t)to)
1173 to = (unsigned)(mapping->host->i_size - offset);
1175 open_file = find_writable_file(CIFS_I(mapping->host));
1177 bytes_written = cifs_write(open_file->pfile, write_data,
1179 atomic_dec(&open_file->wrtPending);
1180 /* Does mm or vfs already set times? */
1181 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1182 if ((bytes_written > 0) && (offset)) {
1184 } else if (bytes_written < 0) {
1189 cFYI(1, ("No writeable filehandles for inode"));
1197 static int cifs_writepages(struct address_space *mapping,
1198 struct writeback_control *wbc)
1200 struct backing_dev_info *bdi = mapping->backing_dev_info;
1201 unsigned int bytes_to_write;
1202 unsigned int bytes_written;
1203 struct cifs_sb_info *cifs_sb;
1207 int range_whole = 0;
1214 struct cifsFileInfo *open_file;
1216 struct pagevec pvec;
1221 cifs_sb = CIFS_SB(mapping->host->i_sb);
1224 * If wsize is smaller that the page cache size, default to writing
1225 * one page at a time via cifs_writepage
1227 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1228 return generic_writepages(mapping, wbc);
1230 if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1231 if (cifs_sb->tcon->ses->server->secMode &
1232 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1233 if (!experimEnabled)
1234 return generic_writepages(mapping, wbc);
1236 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1238 return generic_writepages(mapping, wbc);
1242 * BB: Is this meaningful for a non-block-device file system?
1243 * If it is, we should test it again after we do I/O
1245 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1246 wbc->encountered_congestion = 1;
1253 pagevec_init(&pvec, 0);
1254 if (wbc->range_cyclic) {
1255 index = mapping->writeback_index; /* Start from prev offset */
1258 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1259 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1260 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1265 while (!done && (index <= end) &&
1266 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1267 PAGECACHE_TAG_DIRTY,
1268 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1277 for (i = 0; i < nr_pages; i++) {
1278 page = pvec.pages[i];
1280 * At this point we hold neither mapping->tree_lock nor
1281 * lock on the page itself: the page may be truncated or
1282 * invalidated (changing page->mapping to NULL), or even
1283 * swizzled back from swapper_space to tmpfs file
1289 else if (TestSetPageLocked(page))
1292 if (unlikely(page->mapping != mapping)) {
1297 if (!wbc->range_cyclic && page->index > end) {
1303 if (next && (page->index != next)) {
1304 /* Not next consecutive page */
1309 if (wbc->sync_mode != WB_SYNC_NONE)
1310 wait_on_page_writeback(page);
1312 if (PageWriteback(page) ||
1313 !clear_page_dirty_for_io(page)) {
1319 * This actually clears the dirty bit in the radix tree.
1320 * See cifs_writepage() for more commentary.
1322 set_page_writeback(page);
1324 if (page_offset(page) >= mapping->host->i_size) {
1327 end_page_writeback(page);
1332 * BB can we get rid of this? pages are held by pvec
1334 page_cache_get(page);
1336 len = min(mapping->host->i_size - page_offset(page),
1337 (loff_t)PAGE_CACHE_SIZE);
1339 /* reserve iov[0] for the smb header */
1341 iov[n_iov].iov_base = kmap(page);
1342 iov[n_iov].iov_len = len;
1343 bytes_to_write += len;
1347 offset = page_offset(page);
1349 next = page->index + 1;
1350 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1354 /* Search for a writable handle every time we call
1355 * CIFSSMBWrite2. We can't rely on the last handle
1356 * we used to still be valid
1358 open_file = find_writable_file(CIFS_I(mapping->host));
1360 cERROR(1, ("No writable handles for inode"));
1363 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1365 bytes_to_write, offset,
1366 &bytes_written, iov, n_iov,
1368 atomic_dec(&open_file->wrtPending);
1369 if (rc || bytes_written < bytes_to_write) {
1370 cERROR(1, ("Write2 ret %d, wrote %d",
1371 rc, bytes_written));
1372 /* BB what if continued retry is
1373 requested via mount flags? */
1375 set_bit(AS_ENOSPC, &mapping->flags);
1377 set_bit(AS_EIO, &mapping->flags);
1379 cifs_stats_bytes_written(cifs_sb->tcon,
1383 for (i = 0; i < n_iov; i++) {
1384 page = pvec.pages[first + i];
1385 /* Should we also set page error on
1386 success rc but too little data written? */
1387 /* BB investigate retry logic on temporary
1388 server crash cases and how recovery works
1389 when page marked as error */
1394 end_page_writeback(page);
1395 page_cache_release(page);
1397 if ((wbc->nr_to_write -= n_iov) <= 0)
1401 pagevec_release(&pvec);
1403 if (!scanned && !done) {
1405 * We hit the last page and there is more work to be done: wrap
1406 * back to the start of the file
1412 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1413 mapping->writeback_index = index;
1420 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1426 /* BB add check for wbc flags */
1427 page_cache_get(page);
1428 if (!PageUptodate(page)) {
1429 cFYI(1, ("ppw - page not up to date"));
1433 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1435 * A writepage() implementation always needs to do either this,
1436 * or re-dirty the page with "redirty_page_for_writepage()" in
1437 * the case of a failure.
1439 * Just unlocking the page will cause the radix tree tag-bits
1440 * to fail to update with the state of the page correctly.
1442 set_page_writeback(page);
1443 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1444 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1446 end_page_writeback(page);
1447 page_cache_release(page);
1452 static int cifs_commit_write(struct file *file, struct page *page,
1453 unsigned offset, unsigned to)
1457 struct inode *inode = page->mapping->host;
1458 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1462 cFYI(1, ("commit write for page %p up to position %lld for %d",
1463 page, position, to));
1464 spin_lock(&inode->i_lock);
1465 if (position > inode->i_size) {
1466 i_size_write(inode, position);
1468 spin_unlock(&inode->i_lock);
1469 if (!PageUptodate(page)) {
1470 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1471 /* can not rely on (or let) writepage write this data */
1473 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1478 /* this is probably better than directly calling
1479 partialpage_write since in this function the file handle is
1480 known which we might as well leverage */
1481 /* BB check if anything else missing out of ppw
1482 such as updating last write time */
1483 page_data = kmap(page);
1484 rc = cifs_write(file, page_data + offset, to-offset,
1488 /* else if (rc < 0) should we set writebehind rc? */
1491 set_page_dirty(page);
1498 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1502 struct inode *inode = file->f_path.dentry->d_inode;
1506 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1507 dentry->d_name.name, datasync));
1509 rc = filemap_write_and_wait(inode->i_mapping);
1511 rc = CIFS_I(inode)->write_behind_rc;
1512 CIFS_I(inode)->write_behind_rc = 0;
1518 /* static void cifs_sync_page(struct page *page)
1520 struct address_space *mapping;
1521 struct inode *inode;
1522 unsigned long index = page->index;
1523 unsigned int rpages = 0;
1526 cFYI(1, ("sync page %p",page));
1527 mapping = page->mapping;
1530 inode = mapping->host;
1534 /* fill in rpages then
1535 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1537 /* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1547 * As file closes, flush all cached write data for this inode checking
1548 * for write behind errors.
1550 int cifs_flush(struct file *file, fl_owner_t id)
1552 struct inode *inode = file->f_path.dentry->d_inode;
1555 /* Rather than do the steps manually:
1556 lock the inode for writing
1557 loop through pages looking for write behind data (dirty pages)
1558 coalesce into contiguous 16K (or smaller) chunks to write to server
1559 send to server (prefer in parallel)
1560 deal with writebehind errors
1561 unlock inode for writing
1562 filemapfdatawrite appears easier for the time being */
1564 rc = filemap_fdatawrite(inode->i_mapping);
1565 /* reset wb rc if we were able to write out dirty pages */
1567 rc = CIFS_I(inode)->write_behind_rc;
1568 CIFS_I(inode)->write_behind_rc = 0;
1571 cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
1576 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1577 size_t read_size, loff_t *poffset)
1580 unsigned int bytes_read = 0;
1581 unsigned int total_read = 0;
1582 unsigned int current_read_size;
1583 struct cifs_sb_info *cifs_sb;
1584 struct cifsTconInfo *pTcon;
1586 struct cifsFileInfo *open_file;
1587 char *smb_read_data;
1588 char __user *current_offset;
1589 struct smb_com_read_rsp *pSMBr;
1592 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1593 pTcon = cifs_sb->tcon;
1595 if (file->private_data == NULL) {
1599 open_file = (struct cifsFileInfo *)file->private_data;
1601 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1602 cFYI(1, ("attempting read on write only file instance"));
1604 for (total_read = 0, current_offset = read_data;
1605 read_size > total_read;
1606 total_read += bytes_read, current_offset += bytes_read) {
1607 current_read_size = min_t(const int, read_size - total_read,
1610 smb_read_data = NULL;
1611 while (rc == -EAGAIN) {
1612 int buf_type = CIFS_NO_BUFFER;
1613 if ((open_file->invalidHandle) &&
1614 (!open_file->closePend)) {
1615 rc = cifs_reopen_file(file, TRUE);
1619 rc = CIFSSMBRead(xid, pTcon,
1621 current_read_size, *poffset,
1622 &bytes_read, &smb_read_data,
1624 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1625 if (smb_read_data) {
1626 if (copy_to_user(current_offset,
1628 4 /* RFC1001 length field */ +
1629 le16_to_cpu(pSMBr->DataOffset),
1634 if (buf_type == CIFS_SMALL_BUFFER)
1635 cifs_small_buf_release(smb_read_data);
1636 else if (buf_type == CIFS_LARGE_BUFFER)
1637 cifs_buf_release(smb_read_data);
1638 smb_read_data = NULL;
1641 if (rc || (bytes_read == 0)) {
1649 cifs_stats_bytes_read(pTcon, bytes_read);
1650 *poffset += bytes_read;
1658 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1662 unsigned int bytes_read = 0;
1663 unsigned int total_read;
1664 unsigned int current_read_size;
1665 struct cifs_sb_info *cifs_sb;
1666 struct cifsTconInfo *pTcon;
1668 char *current_offset;
1669 struct cifsFileInfo *open_file;
1670 int buf_type = CIFS_NO_BUFFER;
1673 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1674 pTcon = cifs_sb->tcon;
1676 if (file->private_data == NULL) {
1680 open_file = (struct cifsFileInfo *)file->private_data;
1682 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1683 cFYI(1, ("attempting read on write only file instance"));
1685 for (total_read = 0, current_offset = read_data;
1686 read_size > total_read;
1687 total_read += bytes_read, current_offset += bytes_read) {
1688 current_read_size = min_t(const int, read_size - total_read,
1690 /* For windows me and 9x we do not want to request more
1691 than it negotiated since it will refuse the read then */
1693 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1694 current_read_size = min_t(const int, current_read_size,
1695 pTcon->ses->server->maxBuf - 128);
1698 while (rc == -EAGAIN) {
1699 if ((open_file->invalidHandle) &&
1700 (!open_file->closePend)) {
1701 rc = cifs_reopen_file(file, TRUE);
1705 rc = CIFSSMBRead(xid, pTcon,
1707 current_read_size, *poffset,
1708 &bytes_read, ¤t_offset,
1711 if (rc || (bytes_read == 0)) {
1719 cifs_stats_bytes_read(pTcon, total_read);
1720 *poffset += bytes_read;
1727 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1729 struct dentry *dentry = file->f_path.dentry;
1733 rc = cifs_revalidate(dentry);
1735 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1739 rc = generic_file_mmap(file, vma);
1745 static void cifs_copy_cache_pages(struct address_space *mapping,
1746 struct list_head *pages, int bytes_read, char *data,
1747 struct pagevec *plru_pvec)
1752 while (bytes_read > 0) {
1753 if (list_empty(pages))
1756 page = list_entry(pages->prev, struct page, lru);
1757 list_del(&page->lru);
1759 if (add_to_page_cache(page, mapping, page->index,
1761 page_cache_release(page);
1762 cFYI(1, ("Add page cache failed"));
1763 data += PAGE_CACHE_SIZE;
1764 bytes_read -= PAGE_CACHE_SIZE;
1768 target = kmap_atomic(page, KM_USER0);
1770 if (PAGE_CACHE_SIZE > bytes_read) {
1771 memcpy(target, data, bytes_read);
1772 /* zero the tail end of this partial page */
1773 memset(target + bytes_read, 0,
1774 PAGE_CACHE_SIZE - bytes_read);
1777 memcpy(target, data, PAGE_CACHE_SIZE);
1778 bytes_read -= PAGE_CACHE_SIZE;
1780 kunmap_atomic(target, KM_USER0);
1782 flush_dcache_page(page);
1783 SetPageUptodate(page);
1785 if (!pagevec_add(plru_pvec, page))
1786 __pagevec_lru_add(plru_pvec);
1787 data += PAGE_CACHE_SIZE;
1792 static int cifs_readpages(struct file *file, struct address_space *mapping,
1793 struct list_head *page_list, unsigned num_pages)
1799 struct cifs_sb_info *cifs_sb;
1800 struct cifsTconInfo *pTcon;
1801 unsigned int bytes_read = 0;
1802 unsigned int read_size, i;
1803 char *smb_read_data = NULL;
1804 struct smb_com_read_rsp *pSMBr;
1805 struct pagevec lru_pvec;
1806 struct cifsFileInfo *open_file;
1807 int buf_type = CIFS_NO_BUFFER;
1810 if (file->private_data == NULL) {
1814 open_file = (struct cifsFileInfo *)file->private_data;
1815 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1816 pTcon = cifs_sb->tcon;
1818 pagevec_init(&lru_pvec, 0);
1819 #ifdef CONFIG_CIFS_DEBUG2
1820 cFYI(1, ("rpages: num pages %d", num_pages));
1822 for (i = 0; i < num_pages; ) {
1823 unsigned contig_pages;
1824 struct page *tmp_page;
1825 unsigned long expected_index;
1827 if (list_empty(page_list))
1830 page = list_entry(page_list->prev, struct page, lru);
1831 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1833 /* count adjacent pages that we will read into */
1836 list_entry(page_list->prev, struct page, lru)->index;
1837 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1838 if (tmp_page->index == expected_index) {
1844 if (contig_pages + i > num_pages)
1845 contig_pages = num_pages - i;
1847 /* for reads over a certain size could initiate async
1850 read_size = contig_pages * PAGE_CACHE_SIZE;
1851 /* Read size needs to be in multiples of one page */
1852 read_size = min_t(const unsigned int, read_size,
1853 cifs_sb->rsize & PAGE_CACHE_MASK);
1854 #ifdef CONFIG_CIFS_DEBUG2
1855 cFYI(1, ("rpages: read size 0x%x contiguous pages %d",
1856 read_size, contig_pages));
1859 while (rc == -EAGAIN) {
1860 if ((open_file->invalidHandle) &&
1861 (!open_file->closePend)) {
1862 rc = cifs_reopen_file(file, TRUE);
1867 rc = CIFSSMBRead(xid, pTcon,
1870 &bytes_read, &smb_read_data,
1872 /* BB more RC checks ? */
1873 if (rc == -EAGAIN) {
1874 if (smb_read_data) {
1875 if (buf_type == CIFS_SMALL_BUFFER)
1876 cifs_small_buf_release(smb_read_data);
1877 else if (buf_type == CIFS_LARGE_BUFFER)
1878 cifs_buf_release(smb_read_data);
1879 smb_read_data = NULL;
1883 if ((rc < 0) || (smb_read_data == NULL)) {
1884 cFYI(1, ("Read error in readpages: %d", rc));
1886 } else if (bytes_read > 0) {
1887 task_io_account_read(bytes_read);
1888 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1889 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1890 smb_read_data + 4 /* RFC1001 hdr */ +
1891 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1893 i += bytes_read >> PAGE_CACHE_SHIFT;
1894 cifs_stats_bytes_read(pTcon, bytes_read);
1895 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1896 i++; /* account for partial page */
1898 /* server copy of file can have smaller size
1900 /* BB do we need to verify this common case ?
1901 this case is ok - if we are at server EOF
1902 we will hit it on next read */
1907 cFYI(1, ("No bytes read (%d) at offset %lld . "
1908 "Cleaning remaining pages from readahead list",
1909 bytes_read, offset));
1910 /* BB turn off caching and do new lookup on
1911 file size at server? */
1914 if (smb_read_data) {
1915 if (buf_type == CIFS_SMALL_BUFFER)
1916 cifs_small_buf_release(smb_read_data);
1917 else if (buf_type == CIFS_LARGE_BUFFER)
1918 cifs_buf_release(smb_read_data);
1919 smb_read_data = NULL;
1924 pagevec_lru_add(&lru_pvec);
1926 /* need to free smb_read_data buf before exit */
1927 if (smb_read_data) {
1928 if (buf_type == CIFS_SMALL_BUFFER)
1929 cifs_small_buf_release(smb_read_data);
1930 else if (buf_type == CIFS_LARGE_BUFFER)
1931 cifs_buf_release(smb_read_data);
1932 smb_read_data = NULL;
1939 static int cifs_readpage_worker(struct file *file, struct page *page,
1945 page_cache_get(page);
1946 read_data = kmap(page);
1947 /* for reads over a certain size could initiate async read ahead */
1949 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1954 cFYI(1, ("Bytes read %d", rc));
1956 file->f_path.dentry->d_inode->i_atime =
1957 current_fs_time(file->f_path.dentry->d_inode->i_sb);
1959 if (PAGE_CACHE_SIZE > rc)
1960 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1962 flush_dcache_page(page);
1963 SetPageUptodate(page);
1968 page_cache_release(page);
1972 static int cifs_readpage(struct file *file, struct page *page)
1974 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1980 if (file->private_data == NULL) {
1985 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1986 page, (int)offset, (int)offset));
1988 rc = cifs_readpage_worker(file, page, &offset);
1996 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
1998 struct cifsFileInfo *open_file;
2000 read_lock(&GlobalSMBSeslock);
2001 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2002 if (open_file->closePend)
2004 if (open_file->pfile &&
2005 ((open_file->pfile->f_flags & O_RDWR) ||
2006 (open_file->pfile->f_flags & O_WRONLY))) {
2007 read_unlock(&GlobalSMBSeslock);
2011 read_unlock(&GlobalSMBSeslock);
2015 /* We do not want to update the file size from server for inodes
2016 open for write - to avoid races with writepage extending
2017 the file - in the future we could consider allowing
2018 refreshing the inode only on increases in the file size
2019 but this is tricky to do without racing with writebehind
2020 page caching in the current Linux kernel design */
2021 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2026 if (is_inode_writable(cifsInode)) {
2027 /* This inode is open for write at least once */
2028 struct cifs_sb_info *cifs_sb;
2030 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2031 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
2032 /* since no page cache to corrupt on directio
2033 we can change size safely */
2037 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2045 static int cifs_prepare_write(struct file *file, struct page *page,
2046 unsigned from, unsigned to)
2052 cFYI(1, ("prepare write for page %p from %d to %d", page, from, to));
2053 if (PageUptodate(page))
2056 /* If we are writing a full page it will be up to date,
2057 no need to read from the server */
2058 if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
2059 SetPageUptodate(page);
2063 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2064 i_size = i_size_read(page->mapping->host);
2066 if ((offset >= i_size) ||
2067 ((from == 0) && (offset + to) >= i_size)) {
2069 * We don't need to read data beyond the end of the file.
2070 * zero it, and set the page uptodate
2072 simple_prepare_write(file, page, from, to);
2073 SetPageUptodate(page);
2074 } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2075 /* might as well read a page, it is fast enough */
2076 rc = cifs_readpage_worker(file, page, &offset);
2078 /* we could try using another file handle if there is one -
2079 but how would we lock it to prevent close of that handle
2080 racing with this read? In any case
2081 this will be written out by commit_write so is fine */
2084 /* we do not need to pass errors back
2085 e.g. if we do not have read access to the file
2086 because cifs_commit_write will do the right thing. -- shaggy */
2091 const struct address_space_operations cifs_addr_ops = {
2092 .readpage = cifs_readpage,
2093 .readpages = cifs_readpages,
2094 .writepage = cifs_writepage,
2095 .writepages = cifs_writepages,
2096 .prepare_write = cifs_prepare_write,
2097 .commit_write = cifs_commit_write,
2098 .set_page_dirty = __set_page_dirty_nobuffers,
2099 /* .sync_page = cifs_sync_page, */
2104 * cifs_readpages requires the server to support a buffer large enough to
2105 * contain the header plus one complete page of data. Otherwise, we need
2106 * to leave cifs_readpages out of the address space operations.
2108 const struct address_space_operations cifs_addr_ops_smallbuf = {
2109 .readpage = cifs_readpage,
2110 .writepage = cifs_writepage,
2111 .writepages = cifs_writepages,
2112 .prepare_write = cifs_prepare_write,
2113 .commit_write = cifs_commit_write,
2114 .set_page_dirty = __set_page_dirty_nobuffers,
2115 /* .sync_page = cifs_sync_page, */