4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2007
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <asm/div64.h>
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
42 static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
48 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem);
50 mutex_init(&private_data->lock_mutex);
51 INIT_LIST_HEAD(&private_data->llist);
52 private_data->pfile = file; /* needed for writepage */
53 private_data->pInode = inode;
54 private_data->invalidHandle = FALSE;
55 private_data->closePend = FALSE;
56 /* we have to track num writers to the inode, since writepages
57 does not tell us which handle the write is for so there can
58 be a close (overlapping with write) of the filehandle that
59 cifs_writepages chose to use */
60 atomic_set(&private_data->wrtPending, 0);
65 static inline int cifs_convert_flags(unsigned int flags)
67 if ((flags & O_ACCMODE) == O_RDONLY)
69 else if ((flags & O_ACCMODE) == O_WRONLY)
71 else if ((flags & O_ACCMODE) == O_RDWR) {
72 /* GENERIC_ALL is too much permission to request
73 can cause unnecessary access denied on create */
74 /* return GENERIC_ALL; */
75 return (GENERIC_READ | GENERIC_WRITE);
81 static inline int cifs_get_disposition(unsigned int flags)
83 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
85 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
86 return FILE_OVERWRITE_IF;
87 else if ((flags & O_CREAT) == O_CREAT)
89 else if ((flags & O_TRUNC) == O_TRUNC)
90 return FILE_OVERWRITE;
95 /* all arguments to this function must be checked for validity in caller */
96 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
97 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
98 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
99 char *full_path, int xid)
101 struct timespec temp;
104 /* want handles we can use to read with first
105 in the list so we do not have to walk the
106 list to search for one in prepare_write */
107 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
108 list_add_tail(&pCifsFile->flist,
109 &pCifsInode->openFileList);
111 list_add(&pCifsFile->flist,
112 &pCifsInode->openFileList);
114 write_unlock(&GlobalSMBSeslock);
115 if (pCifsInode->clientCanCacheRead) {
116 /* we have the inode open somewhere else
117 no need to discard cache data */
118 goto client_can_cache;
121 /* BB need same check in cifs_create too? */
122 /* if not oplocked, invalidate inode pages if mtime or file
124 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
125 if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
126 (file->f_path.dentry->d_inode->i_size ==
127 (loff_t)le64_to_cpu(buf->EndOfFile))) {
128 cFYI(1, ("inode unchanged on server"));
130 if (file->f_path.dentry->d_inode->i_mapping) {
131 /* BB no need to lock inode until after invalidate
132 since namei code should already have it locked? */
133 rc = filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
135 CIFS_I(file->f_path.dentry->d_inode)->write_behind_rc = rc;
137 cFYI(1, ("invalidating remote inode since open detected it "
139 invalidate_remote_inode(file->f_path.dentry->d_inode);
144 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
145 full_path, inode->i_sb, xid);
147 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
148 full_path, buf, inode->i_sb, xid);
150 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
151 pCifsInode->clientCanCacheAll = TRUE;
152 pCifsInode->clientCanCacheRead = TRUE;
153 cFYI(1, ("Exclusive Oplock granted on inode %p",
154 file->f_path.dentry->d_inode));
155 } else if ((*oplock & 0xF) == OPLOCK_READ)
156 pCifsInode->clientCanCacheRead = TRUE;
161 int cifs_open(struct inode *inode, struct file *file)
165 struct cifs_sb_info *cifs_sb;
166 struct cifsTconInfo *pTcon;
167 struct cifsFileInfo *pCifsFile;
168 struct cifsInodeInfo *pCifsInode;
169 struct list_head *tmp;
170 char *full_path = NULL;
174 FILE_ALL_INFO *buf = NULL;
178 cifs_sb = CIFS_SB(inode->i_sb);
179 pTcon = cifs_sb->tcon;
181 if (file->f_flags & O_CREAT) {
182 /* search inode for this file and fill in file->private_data */
183 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
184 read_lock(&GlobalSMBSeslock);
185 list_for_each(tmp, &pCifsInode->openFileList) {
186 pCifsFile = list_entry(tmp, struct cifsFileInfo,
188 if ((pCifsFile->pfile == NULL) &&
189 (pCifsFile->pid == current->tgid)) {
190 /* mode set in cifs_create */
192 /* needed for writepage */
193 pCifsFile->pfile = file;
195 file->private_data = pCifsFile;
199 read_unlock(&GlobalSMBSeslock);
200 if (file->private_data != NULL) {
205 if (file->f_flags & O_EXCL)
206 cERROR(1, ("could not find file instance for "
207 "new file %p", file));
211 full_path = build_path_from_dentry(file->f_path.dentry);
212 if (full_path == NULL) {
217 cFYI(1, ("inode = 0x%p file flags are 0x%x for %s",
218 inode, file->f_flags, full_path));
219 desiredAccess = cifs_convert_flags(file->f_flags);
221 /*********************************************************************
222 * open flag mapping table:
224 * POSIX Flag CIFS Disposition
225 * ---------- ----------------
226 * O_CREAT FILE_OPEN_IF
227 * O_CREAT | O_EXCL FILE_CREATE
228 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
229 * O_TRUNC FILE_OVERWRITE
230 * none of the above FILE_OPEN
232 * Note that there is not a direct match between disposition
233 * FILE_SUPERSEDE (ie create whether or not file exists although
234 * O_CREAT | O_TRUNC is similar but truncates the existing
235 * file rather than creating a new file as FILE_SUPERSEDE does
236 * (which uses the attributes / metadata passed in on open call)
238 *? O_SYNC is a reasonable match to CIFS writethrough flag
239 *? and the read write flags match reasonably. O_LARGEFILE
240 *? is irrelevant because largefile support is always used
241 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
242 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
243 *********************************************************************/
245 disposition = cifs_get_disposition(file->f_flags);
252 /* BB pass O_SYNC flag through on file attributes .. BB */
254 /* Also refresh inode by passing in file_info buf returned by SMBOpen
255 and calling get_inode_info with returned buf (at least helps
256 non-Unix server case) */
258 /* BB we can not do this if this is the second open of a file
259 and the first handle has writebehind data, we might be
260 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
261 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
267 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
268 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
269 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
270 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
271 & CIFS_MOUNT_MAP_SPECIAL_CHR);
273 rc = -EIO; /* no NT SMB support fall into legacy open below */
276 /* Old server, try legacy style OpenX */
277 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
278 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
279 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
280 & CIFS_MOUNT_MAP_SPECIAL_CHR);
283 cFYI(1, ("cifs_open returned 0x%x", rc));
287 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
288 if (file->private_data == NULL) {
292 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
293 write_lock(&GlobalSMBSeslock);
294 list_add(&pCifsFile->tlist, &pTcon->openFileList);
296 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
298 rc = cifs_open_inode_helper(inode, file, pCifsInode,
300 &oplock, buf, full_path, xid);
302 write_unlock(&GlobalSMBSeslock);
305 if (oplock & CIFS_CREATE_ACTION) {
306 /* time to set mode which we can not set earlier due to
307 problems creating new read-only files */
308 if (pTcon->unix_ext) {
309 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
311 (__u64)-1, (__u64)-1, 0 /* dev */,
313 cifs_sb->mnt_cifs_flags &
314 CIFS_MOUNT_MAP_SPECIAL_CHR);
316 /* BB implement via Windows security descriptors eg
317 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
319 in the meantime could set r/o dos attribute when
320 perms are eg: mode & 0222 == 0 */
331 /* Try to reacquire byte range locks that were released when session */
332 /* to server was lost */
333 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
337 /* BB list all locks open on this file and relock */
342 static int cifs_reopen_file(struct file *file, int can_flush)
346 struct cifs_sb_info *cifs_sb;
347 struct cifsTconInfo *pTcon;
348 struct cifsFileInfo *pCifsFile;
349 struct cifsInodeInfo *pCifsInode;
351 char *full_path = NULL;
353 int disposition = FILE_OPEN;
356 if (file->private_data)
357 pCifsFile = (struct cifsFileInfo *)file->private_data;
362 down(&pCifsFile->fh_sem);
363 if (pCifsFile->invalidHandle == FALSE) {
364 up(&pCifsFile->fh_sem);
369 if (file->f_path.dentry == NULL) {
370 cERROR(1, ("no valid name if dentry freed"));
373 goto reopen_error_exit;
376 inode = file->f_path.dentry->d_inode;
378 cERROR(1, ("inode not valid"));
381 goto reopen_error_exit;
384 cifs_sb = CIFS_SB(inode->i_sb);
385 pTcon = cifs_sb->tcon;
387 /* can not grab rename sem here because various ops, including
388 those that already have the rename sem can end up causing writepage
389 to get called and if the server was down that means we end up here,
390 and we can never tell if the caller already has the rename_sem */
391 full_path = build_path_from_dentry(file->f_path.dentry);
392 if (full_path == NULL) {
395 up(&pCifsFile->fh_sem);
400 cFYI(1, ("inode = 0x%p file flags 0x%x for %s",
401 inode, file->f_flags, full_path));
402 desiredAccess = cifs_convert_flags(file->f_flags);
409 /* Can not refresh inode by passing in file_info buf to be returned
410 by SMBOpen and then calling get_inode_info with returned buf
411 since file might have write behind data that needs to be flushed
412 and server version of file size can be stale. If we knew for sure
413 that inode was not dirty locally we could do this */
415 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
416 CREATE_NOT_DIR, &netfid, &oplock, NULL,
417 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
418 CIFS_MOUNT_MAP_SPECIAL_CHR);
420 up(&pCifsFile->fh_sem);
421 cFYI(1, ("cifs_open returned 0x%x", rc));
422 cFYI(1, ("oplock: %d", oplock));
424 pCifsFile->netfid = netfid;
425 pCifsFile->invalidHandle = FALSE;
426 up(&pCifsFile->fh_sem);
427 pCifsInode = CIFS_I(inode);
430 rc = filemap_write_and_wait(inode->i_mapping);
432 CIFS_I(inode)->write_behind_rc = rc;
433 /* temporarily disable caching while we
434 go to server to get inode info */
435 pCifsInode->clientCanCacheAll = FALSE;
436 pCifsInode->clientCanCacheRead = FALSE;
438 rc = cifs_get_inode_info_unix(&inode,
439 full_path, inode->i_sb, xid);
441 rc = cifs_get_inode_info(&inode,
442 full_path, NULL, inode->i_sb,
444 } /* else we are writing out data to server already
445 and could deadlock if we tried to flush data, and
446 since we do not know if we have data that would
447 invalidate the current end of file on the server
448 we can not go to the server to get the new inod
450 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
451 pCifsInode->clientCanCacheAll = TRUE;
452 pCifsInode->clientCanCacheRead = TRUE;
453 cFYI(1, ("Exclusive Oplock granted on inode %p",
454 file->f_path.dentry->d_inode));
455 } else if ((oplock & 0xF) == OPLOCK_READ) {
456 pCifsInode->clientCanCacheRead = TRUE;
457 pCifsInode->clientCanCacheAll = FALSE;
459 pCifsInode->clientCanCacheRead = FALSE;
460 pCifsInode->clientCanCacheAll = FALSE;
462 cifs_relock_file(pCifsFile);
471 int cifs_close(struct inode *inode, struct file *file)
475 struct cifs_sb_info *cifs_sb;
476 struct cifsTconInfo *pTcon;
477 struct cifsFileInfo *pSMBFile =
478 (struct cifsFileInfo *)file->private_data;
482 cifs_sb = CIFS_SB(inode->i_sb);
483 pTcon = cifs_sb->tcon;
485 struct cifsLockInfo *li, *tmp;
487 pSMBFile->closePend = TRUE;
489 /* no sense reconnecting to close a file that is
491 if (pTcon->tidStatus != CifsNeedReconnect) {
493 while ((atomic_read(&pSMBFile->wrtPending) != 0)
494 && (timeout <= 2048)) {
495 /* Give write a better chance to get to
496 server ahead of the close. We do not
497 want to add a wait_q here as it would
498 increase the memory utilization as
499 the struct would be in each open file,
500 but this should give enough time to
503 ("close delay, write pending"));
507 if (atomic_read(&pSMBFile->wrtPending))
509 ("close with pending writes"));
510 rc = CIFSSMBClose(xid, pTcon,
515 /* Delete any outstanding lock records.
516 We'll lose them when the file is closed anyway. */
517 mutex_lock(&pSMBFile->lock_mutex);
518 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
519 list_del(&li->llist);
522 mutex_unlock(&pSMBFile->lock_mutex);
524 write_lock(&GlobalSMBSeslock);
525 list_del(&pSMBFile->flist);
526 list_del(&pSMBFile->tlist);
527 write_unlock(&GlobalSMBSeslock);
529 /* We waited above to give the SMBWrite a chance to issue
530 on the wire (so we do not get SMBWrite returning EBADF
531 if writepages is racing with close. Note that writepages
532 does not specify a file handle, so it is possible for a file
533 to be opened twice, and the application close the "wrong"
534 file handle - in these cases we delay long enough to allow
535 the SMBWrite to get on the wire before the SMB Close.
536 We allow total wait here over 45 seconds, more than
537 oplock break time, and more than enough to allow any write
538 to complete on the server, or to time out on the client */
539 while ((atomic_read(&pSMBFile->wrtPending) != 0)
540 && (timeout <= 50000)) {
541 cERROR(1, ("writes pending, delay free of handle"));
545 kfree(pSMBFile->search_resume_name);
546 kfree(file->private_data);
547 file->private_data = NULL;
551 read_lock(&GlobalSMBSeslock);
552 if (list_empty(&(CIFS_I(inode)->openFileList))) {
553 cFYI(1, ("closing last open instance for inode %p", inode));
554 /* if the file is not open we do not know if we can cache info
555 on this inode, much less write behind and read ahead */
556 CIFS_I(inode)->clientCanCacheRead = FALSE;
557 CIFS_I(inode)->clientCanCacheAll = FALSE;
559 read_unlock(&GlobalSMBSeslock);
560 if ((rc == 0) && CIFS_I(inode)->write_behind_rc)
561 rc = CIFS_I(inode)->write_behind_rc;
566 int cifs_closedir(struct inode *inode, struct file *file)
570 struct cifsFileInfo *pCFileStruct =
571 (struct cifsFileInfo *)file->private_data;
574 cFYI(1, ("Closedir inode = 0x%p", inode));
579 struct cifsTconInfo *pTcon;
580 struct cifs_sb_info *cifs_sb =
581 CIFS_SB(file->f_path.dentry->d_sb);
583 pTcon = cifs_sb->tcon;
585 cFYI(1, ("Freeing private data in close dir"));
586 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
587 (pCFileStruct->invalidHandle == FALSE)) {
588 pCFileStruct->invalidHandle = TRUE;
589 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
590 cFYI(1, ("Closing uncompleted readdir with rc %d",
592 /* not much we can do if it fails anyway, ignore rc */
595 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
597 cFYI(1, ("closedir free smb buf in srch struct"));
598 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
599 if (pCFileStruct->srch_inf.smallBuf)
600 cifs_small_buf_release(ptmp);
602 cifs_buf_release(ptmp);
604 ptmp = pCFileStruct->search_resume_name;
606 cFYI(1, ("closedir free resume name"));
607 pCFileStruct->search_resume_name = NULL;
610 kfree(file->private_data);
611 file->private_data = NULL;
613 /* BB can we lock the filestruct while this is going on? */
618 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
619 __u64 offset, __u8 lockType)
621 struct cifsLockInfo *li =
622 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
628 mutex_lock(&fid->lock_mutex);
629 list_add(&li->llist, &fid->llist);
630 mutex_unlock(&fid->lock_mutex);
634 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
640 int wait_flag = FALSE;
641 struct cifs_sb_info *cifs_sb;
642 struct cifsTconInfo *pTcon;
644 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
647 length = 1 + pfLock->fl_end - pfLock->fl_start;
651 cFYI(1, ("Lock parm: 0x%x flockflags: "
652 "0x%x flocktype: 0x%x start: %lld end: %lld",
653 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
656 if (pfLock->fl_flags & FL_POSIX)
658 if (pfLock->fl_flags & FL_FLOCK)
660 if (pfLock->fl_flags & FL_SLEEP) {
661 cFYI(1, ("Blocking lock"));
664 if (pfLock->fl_flags & FL_ACCESS)
665 cFYI(1, ("Process suspended by mandatory locking - "
666 "not implemented yet"));
667 if (pfLock->fl_flags & FL_LEASE)
668 cFYI(1, ("Lease on file - not implemented yet"));
669 if (pfLock->fl_flags &
670 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
671 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
673 if (pfLock->fl_type == F_WRLCK) {
674 cFYI(1, ("F_WRLCK "));
676 } else if (pfLock->fl_type == F_UNLCK) {
677 cFYI(1, ("F_UNLCK"));
679 /* Check if unlock includes more than
681 } else if (pfLock->fl_type == F_RDLCK) {
682 cFYI(1, ("F_RDLCK"));
683 lockType |= LOCKING_ANDX_SHARED_LOCK;
685 } else if (pfLock->fl_type == F_EXLCK) {
686 cFYI(1, ("F_EXLCK"));
688 } else if (pfLock->fl_type == F_SHLCK) {
689 cFYI(1, ("F_SHLCK"));
690 lockType |= LOCKING_ANDX_SHARED_LOCK;
693 cFYI(1, ("Unknown type of lock"));
695 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
696 pTcon = cifs_sb->tcon;
698 if (file->private_data == NULL) {
702 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
704 posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
705 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
707 /* BB add code here to normalize offset and length to
708 account for negative length which we can not accept over the
713 if (lockType & LOCKING_ANDX_SHARED_LOCK)
714 posix_lock_type = CIFS_RDLCK;
716 posix_lock_type = CIFS_WRLCK;
717 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
719 posix_lock_type, wait_flag);
724 /* BB we could chain these into one lock request BB */
725 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
726 0, 1, lockType, 0 /* wait flag */ );
728 rc = CIFSSMBLock(xid, pTcon, netfid, length,
729 pfLock->fl_start, 1 /* numUnlock */ ,
730 0 /* numLock */ , lockType,
732 pfLock->fl_type = F_UNLCK;
734 cERROR(1, ("Error unlocking previously locked "
735 "range %d during test of lock", rc));
739 /* if rc == ERR_SHARING_VIOLATION ? */
740 rc = 0; /* do not change lock type to unlock
741 since range in use */
748 if (!numLock && !numUnlock) {
749 /* if no lock or unlock then nothing
750 to do since we do not know what it is */
757 if (lockType & LOCKING_ANDX_SHARED_LOCK)
758 posix_lock_type = CIFS_RDLCK;
760 posix_lock_type = CIFS_WRLCK;
763 posix_lock_type = CIFS_UNLCK;
765 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
767 posix_lock_type, wait_flag);
769 struct cifsFileInfo *fid =
770 (struct cifsFileInfo *)file->private_data;
773 rc = CIFSSMBLock(xid, pTcon, netfid, length,
775 0, numLock, lockType, wait_flag);
778 /* For Windows locks we must store them. */
779 rc = store_file_lock(fid, length,
780 pfLock->fl_start, lockType);
782 } else if (numUnlock) {
783 /* For each stored lock that this unlock overlaps
784 completely, unlock it. */
786 struct cifsLockInfo *li, *tmp;
789 mutex_lock(&fid->lock_mutex);
790 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
791 if (pfLock->fl_start <= li->offset &&
792 (pfLock->fl_start + length) >=
793 (li->offset + li->length)) {
794 stored_rc = CIFSSMBLock(xid, pTcon,
796 li->length, li->offset,
797 1, 0, li->type, FALSE);
801 list_del(&li->llist);
805 mutex_unlock(&fid->lock_mutex);
809 if (pfLock->fl_flags & FL_POSIX)
810 posix_lock_file_wait(file, pfLock);
815 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
816 size_t write_size, loff_t *poffset)
819 unsigned int bytes_written = 0;
820 unsigned int total_written;
821 struct cifs_sb_info *cifs_sb;
822 struct cifsTconInfo *pTcon;
824 struct cifsFileInfo *open_file;
826 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
828 pTcon = cifs_sb->tcon;
831 (" write %d bytes to offset %lld of %s", write_size,
832 *poffset, file->f_path.dentry->d_name.name)); */
834 if (file->private_data == NULL)
836 open_file = (struct cifsFileInfo *) file->private_data;
840 if (*poffset > file->f_path.dentry->d_inode->i_size)
841 long_op = CIFS_VLONG_OP; /* writes past EOF take long time */
843 long_op = CIFS_LONG_OP;
845 for (total_written = 0; write_size > total_written;
846 total_written += bytes_written) {
848 while (rc == -EAGAIN) {
849 if (file->private_data == NULL) {
850 /* file has been closed on us */
852 /* if we have gotten here we have written some data
853 and blocked, and the file has been freed on us while
854 we blocked so return what we managed to write */
855 return total_written;
857 if (open_file->closePend) {
860 return total_written;
864 if (open_file->invalidHandle) {
865 /* we could deadlock if we called
866 filemap_fdatawait from here so tell
867 reopen_file not to flush data to server
869 rc = cifs_reopen_file(file, FALSE);
874 rc = CIFSSMBWrite(xid, pTcon,
876 min_t(const int, cifs_sb->wsize,
877 write_size - total_written),
878 *poffset, &bytes_written,
879 NULL, write_data + total_written, long_op);
881 if (rc || (bytes_written == 0)) {
889 *poffset += bytes_written;
890 long_op = CIFS_STD_OP; /* subsequent writes fast -
891 15 seconds is plenty */
894 cifs_stats_bytes_written(pTcon, total_written);
896 /* since the write may have blocked check these pointers again */
897 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
898 struct inode *inode = file->f_path.dentry->d_inode;
899 /* Do not update local mtime - server will set its actual value on write
900 * inode->i_ctime = inode->i_mtime =
901 * current_fs_time(inode->i_sb);*/
902 if (total_written > 0) {
903 spin_lock(&inode->i_lock);
904 if (*poffset > file->f_path.dentry->d_inode->i_size)
905 i_size_write(file->f_path.dentry->d_inode,
907 spin_unlock(&inode->i_lock);
909 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
912 return total_written;
915 static ssize_t cifs_write(struct file *file, const char *write_data,
916 size_t write_size, loff_t *poffset)
919 unsigned int bytes_written = 0;
920 unsigned int total_written;
921 struct cifs_sb_info *cifs_sb;
922 struct cifsTconInfo *pTcon;
924 struct cifsFileInfo *open_file;
926 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
928 pTcon = cifs_sb->tcon;
930 cFYI(1, ("write %zd bytes to offset %lld of %s", write_size,
931 *poffset, file->f_path.dentry->d_name.name));
933 if (file->private_data == NULL)
935 open_file = (struct cifsFileInfo *)file->private_data;
939 if (*poffset > file->f_path.dentry->d_inode->i_size)
940 long_op = CIFS_VLONG_OP; /* writes past EOF can be slow */
942 long_op = CIFS_LONG_OP;
944 for (total_written = 0; write_size > total_written;
945 total_written += bytes_written) {
947 while (rc == -EAGAIN) {
948 if (file->private_data == NULL) {
949 /* file has been closed on us */
951 /* if we have gotten here we have written some data
952 and blocked, and the file has been freed on us
953 while we blocked so return what we managed to
955 return total_written;
957 if (open_file->closePend) {
960 return total_written;
964 if (open_file->invalidHandle) {
965 /* we could deadlock if we called
966 filemap_fdatawait from here so tell
967 reopen_file not to flush data to
969 rc = cifs_reopen_file(file, FALSE);
973 if (experimEnabled || (pTcon->ses->server &&
974 ((pTcon->ses->server->secMode &
975 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
980 len = min((size_t)cifs_sb->wsize,
981 write_size - total_written);
982 /* iov[0] is reserved for smb header */
983 iov[1].iov_base = (char *)write_data +
985 iov[1].iov_len = len;
986 rc = CIFSSMBWrite2(xid, pTcon,
987 open_file->netfid, len,
988 *poffset, &bytes_written,
991 rc = CIFSSMBWrite(xid, pTcon,
993 min_t(const int, cifs_sb->wsize,
994 write_size - total_written),
995 *poffset, &bytes_written,
996 write_data + total_written,
999 if (rc || (bytes_written == 0)) {
1007 *poffset += bytes_written;
1008 long_op = CIFS_STD_OP; /* subsequent writes fast -
1009 15 seconds is plenty */
1012 cifs_stats_bytes_written(pTcon, total_written);
1014 /* since the write may have blocked check these pointers again */
1015 if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1016 /*BB We could make this contingent on superblock ATIME flag too */
1017 /* file->f_path.dentry->d_inode->i_ctime =
1018 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1019 if (total_written > 0) {
1020 spin_lock(&file->f_path.dentry->d_inode->i_lock);
1021 if (*poffset > file->f_path.dentry->d_inode->i_size)
1022 i_size_write(file->f_path.dentry->d_inode,
1024 spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1026 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1029 return total_written;
1032 #ifdef CONFIG_CIFS_EXPERIMENTAL
1033 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode)
1035 struct cifsFileInfo *open_file = NULL;
1037 read_lock(&GlobalSMBSeslock);
1038 /* we could simply get the first_list_entry since write-only entries
1039 are always at the end of the list but since the first entry might
1040 have a close pending, we go through the whole list */
1041 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1042 if (open_file->closePend)
1044 if (open_file->pfile && ((open_file->pfile->f_flags & O_RDWR) ||
1045 (open_file->pfile->f_flags & O_RDONLY))) {
1046 if (!open_file->invalidHandle) {
1047 /* found a good file */
1048 /* lock it so it will not be closed on us */
1049 atomic_inc(&open_file->wrtPending);
1050 read_unlock(&GlobalSMBSeslock);
1052 } /* else might as well continue, and look for
1053 another, or simply have the caller reopen it
1054 again rather than trying to fix this handle */
1055 } else /* write only file */
1056 break; /* write only files are last so must be done */
1058 read_unlock(&GlobalSMBSeslock);
1063 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1065 struct cifsFileInfo *open_file;
1068 /* Having a null inode here (because mapping->host was set to zero by
1069 the VFS or MM) should not happen but we had reports of on oops (due to
1070 it being zero) during stress testcases so we need to check for it */
1072 if (cifs_inode == NULL) {
1073 cERROR(1, ("Null inode passed to cifs_writeable_file"));
1078 read_lock(&GlobalSMBSeslock);
1080 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1081 if (open_file->closePend)
1083 if (open_file->pfile &&
1084 ((open_file->pfile->f_flags & O_RDWR) ||
1085 (open_file->pfile->f_flags & O_WRONLY))) {
1086 atomic_inc(&open_file->wrtPending);
1088 if (!open_file->invalidHandle) {
1089 /* found a good writable file */
1090 read_unlock(&GlobalSMBSeslock);
1094 read_unlock(&GlobalSMBSeslock);
1095 /* Had to unlock since following call can block */
1096 rc = cifs_reopen_file(open_file->pfile, FALSE);
1098 if (!open_file->closePend)
1100 else { /* start over in case this was deleted */
1101 /* since the list could be modified */
1102 read_lock(&GlobalSMBSeslock);
1103 atomic_dec(&open_file->wrtPending);
1104 goto refind_writable;
1108 /* if it fails, try another handle if possible -
1109 (we can not do this if closePending since
1110 loop could be modified - in which case we
1111 have to start at the beginning of the list
1112 again. Note that it would be bad
1113 to hold up writepages here (rather than
1114 in caller) with continuous retries */
1115 cFYI(1, ("wp failed on reopen file"));
1116 read_lock(&GlobalSMBSeslock);
1117 /* can not use this handle, no write
1118 pending on this one after all */
1119 atomic_dec(&open_file->wrtPending);
1121 if (open_file->closePend) /* list could have changed */
1122 goto refind_writable;
1123 /* else we simply continue to the next entry. Thus
1124 we do not loop on reopen errors. If we
1125 can not reopen the file, for example if we
1126 reconnected to a server with another client
1127 racing to delete or lock the file we would not
1128 make progress if we restarted before the beginning
1129 of the loop here. */
1132 read_unlock(&GlobalSMBSeslock);
1136 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1138 struct address_space *mapping = page->mapping;
1139 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1142 int bytes_written = 0;
1143 struct cifs_sb_info *cifs_sb;
1144 struct cifsTconInfo *pTcon;
1145 struct inode *inode;
1146 struct cifsFileInfo *open_file;
1148 if (!mapping || !mapping->host)
1151 inode = page->mapping->host;
1152 cifs_sb = CIFS_SB(inode->i_sb);
1153 pTcon = cifs_sb->tcon;
1155 offset += (loff_t)from;
1156 write_data = kmap(page);
1159 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1164 /* racing with truncate? */
1165 if (offset > mapping->host->i_size) {
1167 return 0; /* don't care */
1170 /* check to make sure that we are not extending the file */
1171 if (mapping->host->i_size - offset < (loff_t)to)
1172 to = (unsigned)(mapping->host->i_size - offset);
1174 open_file = find_writable_file(CIFS_I(mapping->host));
1176 bytes_written = cifs_write(open_file->pfile, write_data,
1178 atomic_dec(&open_file->wrtPending);
1179 /* Does mm or vfs already set times? */
1180 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1181 if ((bytes_written > 0) && (offset))
1183 else if (bytes_written < 0)
1186 cFYI(1, ("No writeable filehandles for inode"));
1194 static int cifs_writepages(struct address_space *mapping,
1195 struct writeback_control *wbc)
1197 struct backing_dev_info *bdi = mapping->backing_dev_info;
1198 unsigned int bytes_to_write;
1199 unsigned int bytes_written;
1200 struct cifs_sb_info *cifs_sb;
1204 int range_whole = 0;
1211 struct cifsFileInfo *open_file;
1213 struct pagevec pvec;
1218 cifs_sb = CIFS_SB(mapping->host->i_sb);
1221 * If wsize is smaller that the page cache size, default to writing
1222 * one page at a time via cifs_writepage
1224 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1225 return generic_writepages(mapping, wbc);
1227 if ((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1228 if (cifs_sb->tcon->ses->server->secMode &
1229 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1230 if (!experimEnabled)
1231 return generic_writepages(mapping, wbc);
1233 iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1235 return generic_writepages(mapping, wbc);
1239 * BB: Is this meaningful for a non-block-device file system?
1240 * If it is, we should test it again after we do I/O
1242 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1243 wbc->encountered_congestion = 1;
1250 pagevec_init(&pvec, 0);
1251 if (wbc->range_cyclic) {
1252 index = mapping->writeback_index; /* Start from prev offset */
1255 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1256 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1257 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1262 while (!done && (index <= end) &&
1263 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1264 PAGECACHE_TAG_DIRTY,
1265 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1274 for (i = 0; i < nr_pages; i++) {
1275 page = pvec.pages[i];
1277 * At this point we hold neither mapping->tree_lock nor
1278 * lock on the page itself: the page may be truncated or
1279 * invalidated (changing page->mapping to NULL), or even
1280 * swizzled back from swapper_space to tmpfs file
1286 else if (TestSetPageLocked(page))
1289 if (unlikely(page->mapping != mapping)) {
1294 if (!wbc->range_cyclic && page->index > end) {
1300 if (next && (page->index != next)) {
1301 /* Not next consecutive page */
1306 if (wbc->sync_mode != WB_SYNC_NONE)
1307 wait_on_page_writeback(page);
1309 if (PageWriteback(page) ||
1310 !clear_page_dirty_for_io(page)) {
1316 * This actually clears the dirty bit in the radix tree.
1317 * See cifs_writepage() for more commentary.
1319 set_page_writeback(page);
1321 if (page_offset(page) >= mapping->host->i_size) {
1324 end_page_writeback(page);
1329 * BB can we get rid of this? pages are held by pvec
1331 page_cache_get(page);
1333 len = min(mapping->host->i_size - page_offset(page),
1334 (loff_t)PAGE_CACHE_SIZE);
1336 /* reserve iov[0] for the smb header */
1338 iov[n_iov].iov_base = kmap(page);
1339 iov[n_iov].iov_len = len;
1340 bytes_to_write += len;
1344 offset = page_offset(page);
1346 next = page->index + 1;
1347 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1351 /* Search for a writable handle every time we call
1352 * CIFSSMBWrite2. We can't rely on the last handle
1353 * we used to still be valid
1355 open_file = find_writable_file(CIFS_I(mapping->host));
1357 cERROR(1, ("No writable handles for inode"));
1360 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1362 bytes_to_write, offset,
1363 &bytes_written, iov, n_iov,
1365 atomic_dec(&open_file->wrtPending);
1366 if (rc || bytes_written < bytes_to_write) {
1367 cERROR(1, ("Write2 ret %d, wrote %d",
1368 rc, bytes_written));
1369 /* BB what if continued retry is
1370 requested via mount flags? */
1372 set_bit(AS_ENOSPC, &mapping->flags);
1374 set_bit(AS_EIO, &mapping->flags);
1376 cifs_stats_bytes_written(cifs_sb->tcon,
1380 for (i = 0; i < n_iov; i++) {
1381 page = pvec.pages[first + i];
1382 /* Should we also set page error on
1383 success rc but too little data written? */
1384 /* BB investigate retry logic on temporary
1385 server crash cases and how recovery works
1386 when page marked as error */
1391 end_page_writeback(page);
1392 page_cache_release(page);
1394 if ((wbc->nr_to_write -= n_iov) <= 0)
1398 pagevec_release(&pvec);
1400 if (!scanned && !done) {
1402 * We hit the last page and there is more work to be done: wrap
1403 * back to the start of the file
1409 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1410 mapping->writeback_index = index;
1417 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
1423 /* BB add check for wbc flags */
1424 page_cache_get(page);
1425 if (!PageUptodate(page))
1426 cFYI(1, ("ppw - page not up to date"));
1429 * Set the "writeback" flag, and clear "dirty" in the radix tree.
1431 * A writepage() implementation always needs to do either this,
1432 * or re-dirty the page with "redirty_page_for_writepage()" in
1433 * the case of a failure.
1435 * Just unlocking the page will cause the radix tree tag-bits
1436 * to fail to update with the state of the page correctly.
1438 set_page_writeback(page);
1439 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1440 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1442 end_page_writeback(page);
1443 page_cache_release(page);
1448 static int cifs_commit_write(struct file *file, struct page *page,
1449 unsigned offset, unsigned to)
1453 struct inode *inode = page->mapping->host;
1454 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1458 cFYI(1, ("commit write for page %p up to position %lld for %d",
1459 page, position, to));
1460 spin_lock(&inode->i_lock);
1461 if (position > inode->i_size)
1462 i_size_write(inode, position);
1464 spin_unlock(&inode->i_lock);
1465 if (!PageUptodate(page)) {
1466 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1467 /* can not rely on (or let) writepage write this data */
1469 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1474 /* this is probably better than directly calling
1475 partialpage_write since in this function the file handle is
1476 known which we might as well leverage */
1477 /* BB check if anything else missing out of ppw
1478 such as updating last write time */
1479 page_data = kmap(page);
1480 rc = cifs_write(file, page_data + offset, to-offset,
1484 /* else if (rc < 0) should we set writebehind rc? */
1487 set_page_dirty(page);
1494 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1498 struct inode *inode = file->f_path.dentry->d_inode;
1502 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1503 dentry->d_name.name, datasync));
1505 rc = filemap_write_and_wait(inode->i_mapping);
1507 rc = CIFS_I(inode)->write_behind_rc;
1508 CIFS_I(inode)->write_behind_rc = 0;
1514 /* static void cifs_sync_page(struct page *page)
1516 struct address_space *mapping;
1517 struct inode *inode;
1518 unsigned long index = page->index;
1519 unsigned int rpages = 0;
1522 cFYI(1, ("sync page %p",page));
1523 mapping = page->mapping;
1526 inode = mapping->host;
1530 /* fill in rpages then
1531 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1533 /* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1543 * As file closes, flush all cached write data for this inode checking
1544 * for write behind errors.
1546 int cifs_flush(struct file *file, fl_owner_t id)
1548 struct inode *inode = file->f_path.dentry->d_inode;
1551 /* Rather than do the steps manually:
1552 lock the inode for writing
1553 loop through pages looking for write behind data (dirty pages)
1554 coalesce into contiguous 16K (or smaller) chunks to write to server
1555 send to server (prefer in parallel)
1556 deal with writebehind errors
1557 unlock inode for writing
1558 filemapfdatawrite appears easier for the time being */
1560 rc = filemap_fdatawrite(inode->i_mapping);
1561 /* reset wb rc if we were able to write out dirty pages */
1563 rc = CIFS_I(inode)->write_behind_rc;
1564 CIFS_I(inode)->write_behind_rc = 0;
1567 cFYI(1, ("Flush inode %p file %p rc %d", inode, file, rc));
1572 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1573 size_t read_size, loff_t *poffset)
1576 unsigned int bytes_read = 0;
1577 unsigned int total_read = 0;
1578 unsigned int current_read_size;
1579 struct cifs_sb_info *cifs_sb;
1580 struct cifsTconInfo *pTcon;
1582 struct cifsFileInfo *open_file;
1583 char *smb_read_data;
1584 char __user *current_offset;
1585 struct smb_com_read_rsp *pSMBr;
1588 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1589 pTcon = cifs_sb->tcon;
1591 if (file->private_data == NULL) {
1595 open_file = (struct cifsFileInfo *)file->private_data;
1597 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1598 cFYI(1, ("attempting read on write only file instance"));
1600 for (total_read = 0, current_offset = read_data;
1601 read_size > total_read;
1602 total_read += bytes_read, current_offset += bytes_read) {
1603 current_read_size = min_t(const int, read_size - total_read,
1606 smb_read_data = NULL;
1607 while (rc == -EAGAIN) {
1608 int buf_type = CIFS_NO_BUFFER;
1609 if ((open_file->invalidHandle) &&
1610 (!open_file->closePend)) {
1611 rc = cifs_reopen_file(file, TRUE);
1615 rc = CIFSSMBRead(xid, pTcon,
1617 current_read_size, *poffset,
1618 &bytes_read, &smb_read_data,
1620 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1621 if (smb_read_data) {
1622 if (copy_to_user(current_offset,
1624 4 /* RFC1001 length field */ +
1625 le16_to_cpu(pSMBr->DataOffset),
1629 if (buf_type == CIFS_SMALL_BUFFER)
1630 cifs_small_buf_release(smb_read_data);
1631 else if (buf_type == CIFS_LARGE_BUFFER)
1632 cifs_buf_release(smb_read_data);
1633 smb_read_data = NULL;
1636 if (rc || (bytes_read == 0)) {
1644 cifs_stats_bytes_read(pTcon, bytes_read);
1645 *poffset += bytes_read;
1653 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1657 unsigned int bytes_read = 0;
1658 unsigned int total_read;
1659 unsigned int current_read_size;
1660 struct cifs_sb_info *cifs_sb;
1661 struct cifsTconInfo *pTcon;
1663 char *current_offset;
1664 struct cifsFileInfo *open_file;
1665 int buf_type = CIFS_NO_BUFFER;
1668 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1669 pTcon = cifs_sb->tcon;
1671 if (file->private_data == NULL) {
1675 open_file = (struct cifsFileInfo *)file->private_data;
1677 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1678 cFYI(1, ("attempting read on write only file instance"));
1680 for (total_read = 0, current_offset = read_data;
1681 read_size > total_read;
1682 total_read += bytes_read, current_offset += bytes_read) {
1683 current_read_size = min_t(const int, read_size - total_read,
1685 /* For windows me and 9x we do not want to request more
1686 than it negotiated since it will refuse the read then */
1688 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1689 current_read_size = min_t(const int, current_read_size,
1690 pTcon->ses->server->maxBuf - 128);
1693 while (rc == -EAGAIN) {
1694 if ((open_file->invalidHandle) &&
1695 (!open_file->closePend)) {
1696 rc = cifs_reopen_file(file, TRUE);
1700 rc = CIFSSMBRead(xid, pTcon,
1702 current_read_size, *poffset,
1703 &bytes_read, ¤t_offset,
1706 if (rc || (bytes_read == 0)) {
1714 cifs_stats_bytes_read(pTcon, total_read);
1715 *poffset += bytes_read;
1722 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1724 struct dentry *dentry = file->f_path.dentry;
1728 rc = cifs_revalidate(dentry);
1730 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1734 rc = generic_file_mmap(file, vma);
1740 static void cifs_copy_cache_pages(struct address_space *mapping,
1741 struct list_head *pages, int bytes_read, char *data,
1742 struct pagevec *plru_pvec)
1747 while (bytes_read > 0) {
1748 if (list_empty(pages))
1751 page = list_entry(pages->prev, struct page, lru);
1752 list_del(&page->lru);
1754 if (add_to_page_cache(page, mapping, page->index,
1756 page_cache_release(page);
1757 cFYI(1, ("Add page cache failed"));
1758 data += PAGE_CACHE_SIZE;
1759 bytes_read -= PAGE_CACHE_SIZE;
1763 target = kmap_atomic(page, KM_USER0);
1765 if (PAGE_CACHE_SIZE > bytes_read) {
1766 memcpy(target, data, bytes_read);
1767 /* zero the tail end of this partial page */
1768 memset(target + bytes_read, 0,
1769 PAGE_CACHE_SIZE - bytes_read);
1772 memcpy(target, data, PAGE_CACHE_SIZE);
1773 bytes_read -= PAGE_CACHE_SIZE;
1775 kunmap_atomic(target, KM_USER0);
1777 flush_dcache_page(page);
1778 SetPageUptodate(page);
1780 if (!pagevec_add(plru_pvec, page))
1781 __pagevec_lru_add(plru_pvec);
1782 data += PAGE_CACHE_SIZE;
1787 static int cifs_readpages(struct file *file, struct address_space *mapping,
1788 struct list_head *page_list, unsigned num_pages)
1794 struct cifs_sb_info *cifs_sb;
1795 struct cifsTconInfo *pTcon;
1796 unsigned int bytes_read = 0;
1797 unsigned int read_size, i;
1798 char *smb_read_data = NULL;
1799 struct smb_com_read_rsp *pSMBr;
1800 struct pagevec lru_pvec;
1801 struct cifsFileInfo *open_file;
1802 int buf_type = CIFS_NO_BUFFER;
1805 if (file->private_data == NULL) {
1809 open_file = (struct cifsFileInfo *)file->private_data;
1810 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1811 pTcon = cifs_sb->tcon;
1813 pagevec_init(&lru_pvec, 0);
1814 cFYI(DBG2, ("rpages: num pages %d", num_pages));
1815 for (i = 0; i < num_pages; ) {
1816 unsigned contig_pages;
1817 struct page *tmp_page;
1818 unsigned long expected_index;
1820 if (list_empty(page_list))
1823 page = list_entry(page_list->prev, struct page, lru);
1824 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1826 /* count adjacent pages that we will read into */
1829 list_entry(page_list->prev, struct page, lru)->index;
1830 list_for_each_entry_reverse(tmp_page, page_list, lru) {
1831 if (tmp_page->index == expected_index) {
1837 if (contig_pages + i > num_pages)
1838 contig_pages = num_pages - i;
1840 /* for reads over a certain size could initiate async
1843 read_size = contig_pages * PAGE_CACHE_SIZE;
1844 /* Read size needs to be in multiples of one page */
1845 read_size = min_t(const unsigned int, read_size,
1846 cifs_sb->rsize & PAGE_CACHE_MASK);
1847 cFYI(DBG2, ("rpages: read size 0x%x contiguous pages %d",
1848 read_size, contig_pages));
1850 while (rc == -EAGAIN) {
1851 if ((open_file->invalidHandle) &&
1852 (!open_file->closePend)) {
1853 rc = cifs_reopen_file(file, TRUE);
1858 rc = CIFSSMBRead(xid, pTcon,
1861 &bytes_read, &smb_read_data,
1863 /* BB more RC checks ? */
1864 if (rc == -EAGAIN) {
1865 if (smb_read_data) {
1866 if (buf_type == CIFS_SMALL_BUFFER)
1867 cifs_small_buf_release(smb_read_data);
1868 else if (buf_type == CIFS_LARGE_BUFFER)
1869 cifs_buf_release(smb_read_data);
1870 smb_read_data = NULL;
1874 if ((rc < 0) || (smb_read_data == NULL)) {
1875 cFYI(1, ("Read error in readpages: %d", rc));
1877 } else if (bytes_read > 0) {
1878 task_io_account_read(bytes_read);
1879 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1880 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1881 smb_read_data + 4 /* RFC1001 hdr */ +
1882 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1884 i += bytes_read >> PAGE_CACHE_SHIFT;
1885 cifs_stats_bytes_read(pTcon, bytes_read);
1886 if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1887 i++; /* account for partial page */
1889 /* server copy of file can have smaller size
1891 /* BB do we need to verify this common case ?
1892 this case is ok - if we are at server EOF
1893 we will hit it on next read */
1898 cFYI(1, ("No bytes read (%d) at offset %lld . "
1899 "Cleaning remaining pages from readahead list",
1900 bytes_read, offset));
1901 /* BB turn off caching and do new lookup on
1902 file size at server? */
1905 if (smb_read_data) {
1906 if (buf_type == CIFS_SMALL_BUFFER)
1907 cifs_small_buf_release(smb_read_data);
1908 else if (buf_type == CIFS_LARGE_BUFFER)
1909 cifs_buf_release(smb_read_data);
1910 smb_read_data = NULL;
1915 pagevec_lru_add(&lru_pvec);
1917 /* need to free smb_read_data buf before exit */
1918 if (smb_read_data) {
1919 if (buf_type == CIFS_SMALL_BUFFER)
1920 cifs_small_buf_release(smb_read_data);
1921 else if (buf_type == CIFS_LARGE_BUFFER)
1922 cifs_buf_release(smb_read_data);
1923 smb_read_data = NULL;
1930 static int cifs_readpage_worker(struct file *file, struct page *page,
1936 page_cache_get(page);
1937 read_data = kmap(page);
1938 /* for reads over a certain size could initiate async read ahead */
1940 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1945 cFYI(1, ("Bytes read %d", rc));
1947 file->f_path.dentry->d_inode->i_atime =
1948 current_fs_time(file->f_path.dentry->d_inode->i_sb);
1950 if (PAGE_CACHE_SIZE > rc)
1951 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1953 flush_dcache_page(page);
1954 SetPageUptodate(page);
1959 page_cache_release(page);
1963 static int cifs_readpage(struct file *file, struct page *page)
1965 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1971 if (file->private_data == NULL) {
1976 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1977 page, (int)offset, (int)offset));
1979 rc = cifs_readpage_worker(file, page, &offset);
1987 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
1989 struct cifsFileInfo *open_file;
1991 read_lock(&GlobalSMBSeslock);
1992 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1993 if (open_file->closePend)
1995 if (open_file->pfile &&
1996 ((open_file->pfile->f_flags & O_RDWR) ||
1997 (open_file->pfile->f_flags & O_WRONLY))) {
1998 read_unlock(&GlobalSMBSeslock);
2002 read_unlock(&GlobalSMBSeslock);
2006 /* We do not want to update the file size from server for inodes
2007 open for write - to avoid races with writepage extending
2008 the file - in the future we could consider allowing
2009 refreshing the inode only on increases in the file size
2010 but this is tricky to do without racing with writebehind
2011 page caching in the current Linux kernel design */
2012 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
2017 if (is_inode_writable(cifsInode)) {
2018 /* This inode is open for write at least once */
2019 struct cifs_sb_info *cifs_sb;
2021 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
2022 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
2023 /* since no page cache to corrupt on directio
2024 we can change size safely */
2028 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
2036 static int cifs_prepare_write(struct file *file, struct page *page,
2037 unsigned from, unsigned to)
2043 cFYI(1, ("prepare write for page %p from %d to %d", page, from, to));
2044 if (PageUptodate(page))
2047 /* If we are writing a full page it will be up to date,
2048 no need to read from the server */
2049 if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
2050 SetPageUptodate(page);
2054 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2055 i_size = i_size_read(page->mapping->host);
2057 if ((offset >= i_size) ||
2058 ((from == 0) && (offset + to) >= i_size)) {
2060 * We don't need to read data beyond the end of the file.
2061 * zero it, and set the page uptodate
2063 simple_prepare_write(file, page, from, to);
2064 SetPageUptodate(page);
2065 } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2066 /* might as well read a page, it is fast enough */
2067 rc = cifs_readpage_worker(file, page, &offset);
2069 /* we could try using another file handle if there is one -
2070 but how would we lock it to prevent close of that handle
2071 racing with this read? In any case
2072 this will be written out by commit_write so is fine */
2075 /* we do not need to pass errors back
2076 e.g. if we do not have read access to the file
2077 because cifs_commit_write will do the right thing. -- shaggy */
2082 const struct address_space_operations cifs_addr_ops = {
2083 .readpage = cifs_readpage,
2084 .readpages = cifs_readpages,
2085 .writepage = cifs_writepage,
2086 .writepages = cifs_writepages,
2087 .prepare_write = cifs_prepare_write,
2088 .commit_write = cifs_commit_write,
2089 .set_page_dirty = __set_page_dirty_nobuffers,
2090 /* .sync_page = cifs_sync_page, */
2095 * cifs_readpages requires the server to support a buffer large enough to
2096 * contain the header plus one complete page of data. Otherwise, we need
2097 * to leave cifs_readpages out of the address space operations.
2099 const struct address_space_operations cifs_addr_ops_smallbuf = {
2100 .readpage = cifs_readpage,
2101 .writepage = cifs_writepage,
2102 .writepages = cifs_writepages,
2103 .prepare_write = cifs_prepare_write,
2104 .commit_write = cifs_commit_write,
2105 .set_page_dirty = __set_page_dirty_nobuffers,
2106 /* .sync_page = cifs_sync_page, */