4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/mpage.h>
29 #include <linux/pagemap.h>
30 #include <linux/pagevec.h>
31 #include <linux/smp_lock.h>
32 #include <linux/writeback.h>
33 #include <linux/delay.h>
34 #include <asm/div64.h>
38 #include "cifsproto.h"
39 #include "cifs_unicode.h"
40 #include "cifs_debug.h"
41 #include "cifs_fs_sb.h"
43 static inline struct cifsFileInfo *cifs_init_private(
44 struct cifsFileInfo *private_data, struct inode *inode,
45 struct file *file, __u16 netfid)
47 memset(private_data, 0, sizeof(struct cifsFileInfo));
48 private_data->netfid = netfid;
49 private_data->pid = current->tgid;
50 init_MUTEX(&private_data->fh_sem);
51 init_MUTEX(&private_data->lock_sem);
52 INIT_LIST_HEAD(&private_data->llist);
53 private_data->pfile = file; /* needed for writepage */
54 private_data->pInode = inode;
55 private_data->invalidHandle = FALSE;
56 private_data->closePend = FALSE;
57 /* we have to track num writers to the inode, since writepages
58 does not tell us which handle the write is for so there can
59 be a close (overlapping with write) of the filehandle that
60 cifs_writepages chose to use */
61 atomic_set(&private_data->wrtPending,0);
66 static inline int cifs_convert_flags(unsigned int flags)
68 if ((flags & O_ACCMODE) == O_RDONLY)
70 else if ((flags & O_ACCMODE) == O_WRONLY)
72 else if ((flags & O_ACCMODE) == O_RDWR) {
73 /* GENERIC_ALL is too much permission to request
74 can cause unnecessary access denied on create */
75 /* return GENERIC_ALL; */
76 return (GENERIC_READ | GENERIC_WRITE);
82 static inline int cifs_get_disposition(unsigned int flags)
84 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
86 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
87 return FILE_OVERWRITE_IF;
88 else if ((flags & O_CREAT) == O_CREAT)
90 else if ((flags & O_TRUNC) == O_TRUNC)
91 return FILE_OVERWRITE;
96 /* all arguments to this function must be checked for validity in caller */
97 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
98 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
99 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
100 char *full_path, int xid)
102 struct timespec temp;
105 /* want handles we can use to read with first
106 in the list so we do not have to walk the
107 list to search for one in prepare_write */
108 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
109 list_add_tail(&pCifsFile->flist,
110 &pCifsInode->openFileList);
112 list_add(&pCifsFile->flist,
113 &pCifsInode->openFileList);
115 write_unlock(&GlobalSMBSeslock);
116 if (pCifsInode->clientCanCacheRead) {
117 /* we have the inode open somewhere else
118 no need to discard cache data */
119 goto client_can_cache;
122 /* BB need same check in cifs_create too? */
123 /* if not oplocked, invalidate inode pages if mtime or file
125 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
126 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
127 (file->f_dentry->d_inode->i_size ==
128 (loff_t)le64_to_cpu(buf->EndOfFile))) {
129 cFYI(1, ("inode unchanged on server"));
131 if (file->f_dentry->d_inode->i_mapping) {
132 /* BB no need to lock inode until after invalidate
133 since namei code should already have it locked? */
134 filemap_write_and_wait(file->f_dentry->d_inode->i_mapping);
136 cFYI(1, ("invalidating remote inode since open detected it "
138 invalidate_remote_inode(file->f_dentry->d_inode);
142 if (pTcon->ses->capabilities & CAP_UNIX)
143 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
144 full_path, inode->i_sb, xid);
146 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
147 full_path, buf, inode->i_sb, xid);
149 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
150 pCifsInode->clientCanCacheAll = TRUE;
151 pCifsInode->clientCanCacheRead = TRUE;
152 cFYI(1, ("Exclusive Oplock granted on inode %p",
153 file->f_dentry->d_inode));
154 } else if ((*oplock & 0xF) == OPLOCK_READ)
155 pCifsInode->clientCanCacheRead = TRUE;
160 int cifs_open(struct inode *inode, struct file *file)
164 struct cifs_sb_info *cifs_sb;
165 struct cifsTconInfo *pTcon;
166 struct cifsFileInfo *pCifsFile;
167 struct cifsInodeInfo *pCifsInode;
168 struct list_head *tmp;
169 char *full_path = NULL;
173 FILE_ALL_INFO *buf = NULL;
177 cifs_sb = CIFS_SB(inode->i_sb);
178 pTcon = cifs_sb->tcon;
180 if (file->f_flags & O_CREAT) {
181 /* search inode for this file and fill in file->private_data */
182 pCifsInode = CIFS_I(file->f_dentry->d_inode);
183 read_lock(&GlobalSMBSeslock);
184 list_for_each(tmp, &pCifsInode->openFileList) {
185 pCifsFile = list_entry(tmp, struct cifsFileInfo,
187 if ((pCifsFile->pfile == NULL) &&
188 (pCifsFile->pid == current->tgid)) {
189 /* mode set in cifs_create */
191 /* needed for writepage */
192 pCifsFile->pfile = file;
194 file->private_data = pCifsFile;
198 read_unlock(&GlobalSMBSeslock);
199 if (file->private_data != NULL) {
204 if (file->f_flags & O_EXCL)
205 cERROR(1, ("could not find file instance for "
206 "new file %p", file));
210 full_path = build_path_from_dentry(file->f_dentry);
211 if (full_path == NULL) {
216 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
217 inode, file->f_flags, full_path));
218 desiredAccess = cifs_convert_flags(file->f_flags);
220 /*********************************************************************
221 * open flag mapping table:
223 * POSIX Flag CIFS Disposition
224 * ---------- ----------------
225 * O_CREAT FILE_OPEN_IF
226 * O_CREAT | O_EXCL FILE_CREATE
227 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
228 * O_TRUNC FILE_OVERWRITE
229 * none of the above FILE_OPEN
231 * Note that there is not a direct match between disposition
232 * FILE_SUPERSEDE (ie create whether or not file exists although
233 * O_CREAT | O_TRUNC is similar but truncates the existing
234 * file rather than creating a new file as FILE_SUPERSEDE does
235 * (which uses the attributes / metadata passed in on open call)
237 *? O_SYNC is a reasonable match to CIFS writethrough flag
238 *? and the read write flags match reasonably. O_LARGEFILE
239 *? is irrelevant because largefile support is always used
240 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
241 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
242 *********************************************************************/
244 disposition = cifs_get_disposition(file->f_flags);
251 /* BB pass O_SYNC flag through on file attributes .. BB */
253 /* Also refresh inode by passing in file_info buf returned by SMBOpen
254 and calling get_inode_info with returned buf (at least helps
255 non-Unix server case) */
257 /* BB we can not do this if this is the second open of a file
258 and the first handle has writebehind data, we might be
259 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
260 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
266 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
267 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
268 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
269 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
270 & CIFS_MOUNT_MAP_SPECIAL_CHR);
272 rc = -EIO; /* no NT SMB support fall into legacy open below */
275 /* Old server, try legacy style OpenX */
276 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
277 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
278 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
279 & CIFS_MOUNT_MAP_SPECIAL_CHR);
282 cFYI(1, ("cifs_open returned 0x%x", rc));
286 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
287 if (file->private_data == NULL) {
291 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
292 write_lock(&GlobalSMBSeslock);
293 list_add(&pCifsFile->tlist, &pTcon->openFileList);
295 pCifsInode = CIFS_I(file->f_dentry->d_inode);
297 rc = cifs_open_inode_helper(inode, file, pCifsInode,
299 &oplock, buf, full_path, xid);
301 write_unlock(&GlobalSMBSeslock);
304 if (oplock & CIFS_CREATE_ACTION) {
305 /* time to set mode which we can not set earlier due to
306 problems creating new read-only files */
307 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
308 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
310 (__u64)-1, (__u64)-1, 0 /* dev */,
312 cifs_sb->mnt_cifs_flags &
313 CIFS_MOUNT_MAP_SPECIAL_CHR);
315 /* BB implement via Windows security descriptors eg
316 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
318 in the meantime could set r/o dos attribute when
319 perms are eg: mode & 0222 == 0 */
330 /* Try to reacquire byte range locks that were released when session */
331 /* to server was lost */
332 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
336 /* BB list all locks open on this file and relock */
341 static int cifs_reopen_file(struct inode *inode, struct file *file,
346 struct cifs_sb_info *cifs_sb;
347 struct cifsTconInfo *pTcon;
348 struct cifsFileInfo *pCifsFile;
349 struct cifsInodeInfo *pCifsInode;
350 char *full_path = NULL;
352 int disposition = FILE_OPEN;
357 if (file->private_data) {
358 pCifsFile = (struct cifsFileInfo *)file->private_data;
363 down(&pCifsFile->fh_sem);
364 if (pCifsFile->invalidHandle == FALSE) {
365 up(&pCifsFile->fh_sem);
370 if (file->f_dentry == NULL) {
371 up(&pCifsFile->fh_sem);
372 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
376 cifs_sb = CIFS_SB(inode->i_sb);
377 pTcon = cifs_sb->tcon;
378 /* can not grab rename sem here because various ops, including
379 those that already have the rename sem can end up causing writepage
380 to get called and if the server was down that means we end up here,
381 and we can never tell if the caller already has the rename_sem */
382 full_path = build_path_from_dentry(file->f_dentry);
383 if (full_path == NULL) {
384 up(&pCifsFile->fh_sem);
389 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
390 inode, file->f_flags,full_path));
391 desiredAccess = cifs_convert_flags(file->f_flags);
398 /* Can not refresh inode by passing in file_info buf to be returned
399 by SMBOpen and then calling get_inode_info with returned buf
400 since file might have write behind data that needs to be flushed
401 and server version of file size can be stale. If we knew for sure
402 that inode was not dirty locally we could do this */
404 /* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
406 up(&pCifsFile->fh_sem);
411 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
412 CREATE_NOT_DIR, &netfid, &oplock, NULL,
413 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
414 CIFS_MOUNT_MAP_SPECIAL_CHR);
416 up(&pCifsFile->fh_sem);
417 cFYI(1, ("cifs_open returned 0x%x", rc));
418 cFYI(1, ("oplock: %d", oplock));
420 pCifsFile->netfid = netfid;
421 pCifsFile->invalidHandle = FALSE;
422 up(&pCifsFile->fh_sem);
423 pCifsInode = CIFS_I(inode);
426 filemap_write_and_wait(inode->i_mapping);
427 /* temporarily disable caching while we
428 go to server to get inode info */
429 pCifsInode->clientCanCacheAll = FALSE;
430 pCifsInode->clientCanCacheRead = FALSE;
431 if (pTcon->ses->capabilities & CAP_UNIX)
432 rc = cifs_get_inode_info_unix(&inode,
433 full_path, inode->i_sb, xid);
435 rc = cifs_get_inode_info(&inode,
436 full_path, NULL, inode->i_sb,
438 } /* else we are writing out data to server already
439 and could deadlock if we tried to flush data, and
440 since we do not know if we have data that would
441 invalidate the current end of file on the server
442 we can not go to the server to get the new inod
444 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
445 pCifsInode->clientCanCacheAll = TRUE;
446 pCifsInode->clientCanCacheRead = TRUE;
447 cFYI(1, ("Exclusive Oplock granted on inode %p",
448 file->f_dentry->d_inode));
449 } else if ((oplock & 0xF) == OPLOCK_READ) {
450 pCifsInode->clientCanCacheRead = TRUE;
451 pCifsInode->clientCanCacheAll = FALSE;
453 pCifsInode->clientCanCacheRead = FALSE;
454 pCifsInode->clientCanCacheAll = FALSE;
456 cifs_relock_file(pCifsFile);
465 int cifs_close(struct inode *inode, struct file *file)
469 struct cifs_sb_info *cifs_sb;
470 struct cifsTconInfo *pTcon;
471 struct cifsFileInfo *pSMBFile =
472 (struct cifsFileInfo *)file->private_data;
476 cifs_sb = CIFS_SB(inode->i_sb);
477 pTcon = cifs_sb->tcon;
479 struct cifsLockInfo *li, *tmp;
481 pSMBFile->closePend = TRUE;
483 /* no sense reconnecting to close a file that is
485 if (pTcon->tidStatus != CifsNeedReconnect) {
487 while((atomic_read(&pSMBFile->wrtPending) != 0)
488 && (timeout < 1000) ) {
489 /* Give write a better chance to get to
490 server ahead of the close. We do not
491 want to add a wait_q here as it would
492 increase the memory utilization as
493 the struct would be in each open file,
494 but this should give enough time to
496 cERROR(1,("close with pending writes"));
500 rc = CIFSSMBClose(xid, pTcon,
505 /* Delete any outstanding lock records.
506 We'll lose them when the file is closed anyway. */
507 down(&pSMBFile->lock_sem);
508 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
509 list_del(&li->llist);
512 up(&pSMBFile->lock_sem);
514 write_lock(&GlobalSMBSeslock);
515 list_del(&pSMBFile->flist);
516 list_del(&pSMBFile->tlist);
517 write_unlock(&GlobalSMBSeslock);
518 kfree(pSMBFile->search_resume_name);
519 kfree(file->private_data);
520 file->private_data = NULL;
524 if (list_empty(&(CIFS_I(inode)->openFileList))) {
525 cFYI(1, ("closing last open instance for inode %p", inode));
526 /* if the file is not open we do not know if we can cache info
527 on this inode, much less write behind and read ahead */
528 CIFS_I(inode)->clientCanCacheRead = FALSE;
529 CIFS_I(inode)->clientCanCacheAll = FALSE;
531 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
532 rc = CIFS_I(inode)->write_behind_rc;
537 int cifs_closedir(struct inode *inode, struct file *file)
541 struct cifsFileInfo *pCFileStruct =
542 (struct cifsFileInfo *)file->private_data;
545 cFYI(1, ("Closedir inode = 0x%p", inode));
550 struct cifsTconInfo *pTcon;
551 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
553 pTcon = cifs_sb->tcon;
555 cFYI(1, ("Freeing private data in close dir"));
556 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
557 (pCFileStruct->invalidHandle == FALSE)) {
558 pCFileStruct->invalidHandle = TRUE;
559 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
560 cFYI(1, ("Closing uncompleted readdir with rc %d",
562 /* not much we can do if it fails anyway, ignore rc */
565 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
567 cFYI(1, ("closedir free smb buf in srch struct"));
568 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
569 if(pCFileStruct->srch_inf.smallBuf)
570 cifs_small_buf_release(ptmp);
572 cifs_buf_release(ptmp);
574 ptmp = pCFileStruct->search_resume_name;
576 cFYI(1, ("closedir free resume name"));
577 pCFileStruct->search_resume_name = NULL;
580 kfree(file->private_data);
581 file->private_data = NULL;
583 /* BB can we lock the filestruct while this is going on? */
588 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
589 __u64 offset, __u8 lockType)
591 struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
597 down(&fid->lock_sem);
598 list_add(&li->llist, &fid->llist);
603 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
609 int wait_flag = FALSE;
610 struct cifs_sb_info *cifs_sb;
611 struct cifsTconInfo *pTcon;
613 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
616 length = 1 + pfLock->fl_end - pfLock->fl_start;
620 cFYI(1, ("Lock parm: 0x%x flockflags: "
621 "0x%x flocktype: 0x%x start: %lld end: %lld",
622 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
625 if (pfLock->fl_flags & FL_POSIX)
627 if (pfLock->fl_flags & FL_FLOCK)
629 if (pfLock->fl_flags & FL_SLEEP) {
630 cFYI(1, ("Blocking lock"));
633 if (pfLock->fl_flags & FL_ACCESS)
634 cFYI(1, ("Process suspended by mandatory locking - "
635 "not implemented yet"));
636 if (pfLock->fl_flags & FL_LEASE)
637 cFYI(1, ("Lease on file - not implemented yet"));
638 if (pfLock->fl_flags &
639 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
640 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
642 if (pfLock->fl_type == F_WRLCK) {
643 cFYI(1, ("F_WRLCK "));
645 } else if (pfLock->fl_type == F_UNLCK) {
646 cFYI(1, ("F_UNLCK"));
648 /* Check if unlock includes more than
650 } else if (pfLock->fl_type == F_RDLCK) {
651 cFYI(1, ("F_RDLCK"));
652 lockType |= LOCKING_ANDX_SHARED_LOCK;
654 } else if (pfLock->fl_type == F_EXLCK) {
655 cFYI(1, ("F_EXLCK"));
657 } else if (pfLock->fl_type == F_SHLCK) {
658 cFYI(1, ("F_SHLCK"));
659 lockType |= LOCKING_ANDX_SHARED_LOCK;
662 cFYI(1, ("Unknown type of lock"));
664 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
665 pTcon = cifs_sb->tcon;
667 if (file->private_data == NULL) {
671 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
673 posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
674 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
676 /* BB add code here to normalize offset and length to
677 account for negative length which we can not accept over the
682 if(lockType & LOCKING_ANDX_SHARED_LOCK)
683 posix_lock_type = CIFS_RDLCK;
685 posix_lock_type = CIFS_WRLCK;
686 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
688 posix_lock_type, wait_flag);
693 /* BB we could chain these into one lock request BB */
694 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
695 0, 1, lockType, 0 /* wait flag */ );
697 rc = CIFSSMBLock(xid, pTcon, netfid, length,
698 pfLock->fl_start, 1 /* numUnlock */ ,
699 0 /* numLock */ , lockType,
701 pfLock->fl_type = F_UNLCK;
703 cERROR(1, ("Error unlocking previously locked "
704 "range %d during test of lock", rc));
708 /* if rc == ERR_SHARING_VIOLATION ? */
709 rc = 0; /* do not change lock type to unlock
710 since range in use */
717 if (!numLock && !numUnlock) {
718 /* if no lock or unlock then nothing
719 to do since we do not know what it is */
726 if(lockType & LOCKING_ANDX_SHARED_LOCK)
727 posix_lock_type = CIFS_RDLCK;
729 posix_lock_type = CIFS_WRLCK;
732 posix_lock_type = CIFS_UNLCK;
734 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
736 posix_lock_type, wait_flag);
738 struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data;
741 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
742 0, numLock, lockType, wait_flag);
745 /* For Windows locks we must store them. */
746 rc = store_file_lock(fid, length,
747 pfLock->fl_start, lockType);
749 } else if (numUnlock) {
750 /* For each stored lock that this unlock overlaps
751 completely, unlock it. */
753 struct cifsLockInfo *li, *tmp;
756 down(&fid->lock_sem);
757 list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
758 if (pfLock->fl_start <= li->offset &&
759 length >= li->length) {
760 stored_rc = CIFSSMBLock(xid, pTcon, netfid,
761 li->length, li->offset,
762 1, 0, li->type, FALSE);
766 list_del(&li->llist);
774 if (pfLock->fl_flags & FL_POSIX)
775 posix_lock_file_wait(file, pfLock);
780 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
781 size_t write_size, loff_t *poffset)
784 unsigned int bytes_written = 0;
785 unsigned int total_written;
786 struct cifs_sb_info *cifs_sb;
787 struct cifsTconInfo *pTcon;
789 struct cifsFileInfo *open_file;
791 if (file->f_dentry == NULL)
794 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
798 pTcon = cifs_sb->tcon;
801 (" write %d bytes to offset %lld of %s", write_size,
802 *poffset, file->f_dentry->d_name.name)); */
804 if (file->private_data == NULL)
807 open_file = (struct cifsFileInfo *) file->private_data;
810 if (file->f_dentry->d_inode == NULL) {
815 if (*poffset > file->f_dentry->d_inode->i_size)
816 long_op = 2; /* writes past end of file can take a long time */
820 for (total_written = 0; write_size > total_written;
821 total_written += bytes_written) {
823 while (rc == -EAGAIN) {
824 if (file->private_data == NULL) {
825 /* file has been closed on us */
827 /* if we have gotten here we have written some data
828 and blocked, and the file has been freed on us while
829 we blocked so return what we managed to write */
830 return total_written;
832 if (open_file->closePend) {
835 return total_written;
839 if (open_file->invalidHandle) {
840 if ((file->f_dentry == NULL) ||
841 (file->f_dentry->d_inode == NULL)) {
843 return total_written;
845 /* we could deadlock if we called
846 filemap_fdatawait from here so tell
847 reopen_file not to flush data to server
849 rc = cifs_reopen_file(file->f_dentry->d_inode,
855 rc = CIFSSMBWrite(xid, pTcon,
857 min_t(const int, cifs_sb->wsize,
858 write_size - total_written),
859 *poffset, &bytes_written,
860 NULL, write_data + total_written, long_op);
862 if (rc || (bytes_written == 0)) {
870 *poffset += bytes_written;
871 long_op = FALSE; /* subsequent writes fast -
872 15 seconds is plenty */
875 cifs_stats_bytes_written(pTcon, total_written);
877 /* since the write may have blocked check these pointers again */
878 if (file->f_dentry) {
879 if (file->f_dentry->d_inode) {
880 struct inode *inode = file->f_dentry->d_inode;
881 inode->i_ctime = inode->i_mtime =
882 current_fs_time(inode->i_sb);
883 if (total_written > 0) {
884 if (*poffset > file->f_dentry->d_inode->i_size)
885 i_size_write(file->f_dentry->d_inode,
888 mark_inode_dirty_sync(file->f_dentry->d_inode);
892 return total_written;
895 static ssize_t cifs_write(struct file *file, const char *write_data,
896 size_t write_size, loff_t *poffset)
899 unsigned int bytes_written = 0;
900 unsigned int total_written;
901 struct cifs_sb_info *cifs_sb;
902 struct cifsTconInfo *pTcon;
904 struct cifsFileInfo *open_file;
906 if (file->f_dentry == NULL)
909 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
913 pTcon = cifs_sb->tcon;
915 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
916 *poffset, file->f_dentry->d_name.name));
918 if (file->private_data == NULL)
921 open_file = (struct cifsFileInfo *)file->private_data;
924 if (file->f_dentry->d_inode == NULL) {
929 if (*poffset > file->f_dentry->d_inode->i_size)
930 long_op = 2; /* writes past end of file can take a long time */
934 for (total_written = 0; write_size > total_written;
935 total_written += bytes_written) {
937 while (rc == -EAGAIN) {
938 if (file->private_data == NULL) {
939 /* file has been closed on us */
941 /* if we have gotten here we have written some data
942 and blocked, and the file has been freed on us
943 while we blocked so return what we managed to
945 return total_written;
947 if (open_file->closePend) {
950 return total_written;
954 if (open_file->invalidHandle) {
955 if ((file->f_dentry == NULL) ||
956 (file->f_dentry->d_inode == NULL)) {
958 return total_written;
960 /* we could deadlock if we called
961 filemap_fdatawait from here so tell
962 reopen_file not to flush data to
964 rc = cifs_reopen_file(file->f_dentry->d_inode,
969 if(experimEnabled || (pTcon->ses->server &&
970 ((pTcon->ses->server->secMode &
971 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
976 len = min((size_t)cifs_sb->wsize,
977 write_size - total_written);
978 /* iov[0] is reserved for smb header */
979 iov[1].iov_base = (char *)write_data +
981 iov[1].iov_len = len;
982 rc = CIFSSMBWrite2(xid, pTcon,
983 open_file->netfid, len,
984 *poffset, &bytes_written,
987 rc = CIFSSMBWrite(xid, pTcon,
989 min_t(const int, cifs_sb->wsize,
990 write_size - total_written),
991 *poffset, &bytes_written,
992 write_data + total_written,
995 if (rc || (bytes_written == 0)) {
1003 *poffset += bytes_written;
1004 long_op = FALSE; /* subsequent writes fast -
1005 15 seconds is plenty */
1008 cifs_stats_bytes_written(pTcon, total_written);
1010 /* since the write may have blocked check these pointers again */
1011 if (file->f_dentry) {
1012 if (file->f_dentry->d_inode) {
1013 file->f_dentry->d_inode->i_ctime =
1014 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
1015 if (total_written > 0) {
1016 if (*poffset > file->f_dentry->d_inode->i_size)
1017 i_size_write(file->f_dentry->d_inode,
1020 mark_inode_dirty_sync(file->f_dentry->d_inode);
1024 return total_written;
1027 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1029 struct cifsFileInfo *open_file;
1032 /* Having a null inode here (because mapping->host was set to zero by
1033 the VFS or MM) should not happen but we had reports of on oops (due to
1034 it being zero) during stress testcases so we need to check for it */
1036 if(cifs_inode == NULL) {
1037 cERROR(1,("Null inode passed to cifs_writeable_file"));
1042 read_lock(&GlobalSMBSeslock);
1043 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1044 if (open_file->closePend)
1046 if (open_file->pfile &&
1047 ((open_file->pfile->f_flags & O_RDWR) ||
1048 (open_file->pfile->f_flags & O_WRONLY))) {
1049 atomic_inc(&open_file->wrtPending);
1050 read_unlock(&GlobalSMBSeslock);
1051 if((open_file->invalidHandle) &&
1052 (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
1053 rc = cifs_reopen_file(&cifs_inode->vfs_inode,
1054 open_file->pfile, FALSE);
1055 /* if it fails, try another handle - might be */
1056 /* dangerous to hold up writepages with retry */
1058 cFYI(1,("failed on reopen file in wp"));
1059 read_lock(&GlobalSMBSeslock);
1060 /* can not use this handle, no write
1061 pending on this one after all */
1063 (&open_file->wrtPending);
1070 read_unlock(&GlobalSMBSeslock);
1074 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1076 struct address_space *mapping = page->mapping;
1077 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1080 int bytes_written = 0;
1081 struct cifs_sb_info *cifs_sb;
1082 struct cifsTconInfo *pTcon;
1083 struct inode *inode;
1084 struct cifsFileInfo *open_file;
1086 if (!mapping || !mapping->host)
1089 inode = page->mapping->host;
1090 cifs_sb = CIFS_SB(inode->i_sb);
1091 pTcon = cifs_sb->tcon;
1093 offset += (loff_t)from;
1094 write_data = kmap(page);
1097 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1102 /* racing with truncate? */
1103 if (offset > mapping->host->i_size) {
1105 return 0; /* don't care */
1108 /* check to make sure that we are not extending the file */
1109 if (mapping->host->i_size - offset < (loff_t)to)
1110 to = (unsigned)(mapping->host->i_size - offset);
1112 open_file = find_writable_file(CIFS_I(mapping->host));
1114 bytes_written = cifs_write(open_file->pfile, write_data,
1116 atomic_dec(&open_file->wrtPending);
1117 /* Does mm or vfs already set times? */
1118 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1119 if ((bytes_written > 0) && (offset)) {
1121 } else if (bytes_written < 0) {
1126 cFYI(1, ("No writeable filehandles for inode"));
1134 static int cifs_writepages(struct address_space *mapping,
1135 struct writeback_control *wbc)
1137 struct backing_dev_info *bdi = mapping->backing_dev_info;
1138 unsigned int bytes_to_write;
1139 unsigned int bytes_written;
1140 struct cifs_sb_info *cifs_sb;
1144 int range_whole = 0;
1145 struct kvec iov[32];
1151 struct cifsFileInfo *open_file;
1153 struct pagevec pvec;
1158 cifs_sb = CIFS_SB(mapping->host->i_sb);
1161 * If wsize is smaller that the page cache size, default to writing
1162 * one page at a time via cifs_writepage
1164 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1165 return generic_writepages(mapping, wbc);
1167 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1168 if(cifs_sb->tcon->ses->server->secMode &
1169 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1171 return generic_writepages(mapping, wbc);
1174 * BB: Is this meaningful for a non-block-device file system?
1175 * If it is, we should test it again after we do I/O
1177 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1178 wbc->encountered_congestion = 1;
1184 pagevec_init(&pvec, 0);
1185 if (wbc->range_cyclic) {
1186 index = mapping->writeback_index; /* Start from prev offset */
1189 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1190 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1191 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1196 while (!done && (index <= end) &&
1197 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1198 PAGECACHE_TAG_DIRTY,
1199 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1208 for (i = 0; i < nr_pages; i++) {
1209 page = pvec.pages[i];
1211 * At this point we hold neither mapping->tree_lock nor
1212 * lock on the page itself: the page may be truncated or
1213 * invalidated (changing page->mapping to NULL), or even
1214 * swizzled back from swapper_space to tmpfs file
1220 else if (TestSetPageLocked(page))
1223 if (unlikely(page->mapping != mapping)) {
1228 if (!wbc->range_cyclic && page->index > end) {
1234 if (next && (page->index != next)) {
1235 /* Not next consecutive page */
1240 if (wbc->sync_mode != WB_SYNC_NONE)
1241 wait_on_page_writeback(page);
1243 if (PageWriteback(page) ||
1244 !test_clear_page_dirty(page)) {
1249 if (page_offset(page) >= mapping->host->i_size) {
1256 * BB can we get rid of this? pages are held by pvec
1258 page_cache_get(page);
1260 len = min(mapping->host->i_size - page_offset(page),
1261 (loff_t)PAGE_CACHE_SIZE);
1263 /* reserve iov[0] for the smb header */
1265 iov[n_iov].iov_base = kmap(page);
1266 iov[n_iov].iov_len = len;
1267 bytes_to_write += len;
1271 offset = page_offset(page);
1273 next = page->index + 1;
1274 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1278 /* Search for a writable handle every time we call
1279 * CIFSSMBWrite2. We can't rely on the last handle
1280 * we used to still be valid
1282 open_file = find_writable_file(CIFS_I(mapping->host));
1284 cERROR(1, ("No writable handles for inode"));
1287 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1289 bytes_to_write, offset,
1290 &bytes_written, iov, n_iov,
1292 atomic_dec(&open_file->wrtPending);
1293 if (rc || bytes_written < bytes_to_write) {
1294 cERROR(1,("Write2 ret %d, written = %d",
1295 rc, bytes_written));
1296 /* BB what if continued retry is
1297 requested via mount flags? */
1298 set_bit(AS_EIO, &mapping->flags);
1300 cifs_stats_bytes_written(cifs_sb->tcon,
1304 for (i = 0; i < n_iov; i++) {
1305 page = pvec.pages[first + i];
1306 /* Should we also set page error on
1307 success rc but too little data written? */
1308 /* BB investigate retry logic on temporary
1309 server crash cases and how recovery works
1310 when page marked as error */
1315 page_cache_release(page);
1317 if ((wbc->nr_to_write -= n_iov) <= 0)
1321 pagevec_release(&pvec);
1323 if (!scanned && !done) {
1325 * We hit the last page and there is more work to be done: wrap
1326 * back to the start of the file
1332 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1333 mapping->writeback_index = index;
1340 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1346 /* BB add check for wbc flags */
1347 page_cache_get(page);
1348 if (!PageUptodate(page)) {
1349 cFYI(1, ("ppw - page not up to date"));
1352 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1353 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1355 page_cache_release(page);
1360 static int cifs_commit_write(struct file *file, struct page *page,
1361 unsigned offset, unsigned to)
1365 struct inode *inode = page->mapping->host;
1366 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1370 cFYI(1, ("commit write for page %p up to position %lld for %d",
1371 page, position, to));
1372 if (position > inode->i_size) {
1373 i_size_write(inode, position);
1374 /* if (file->private_data == NULL) {
1377 open_file = (struct cifsFileInfo *)file->private_data;
1378 cifs_sb = CIFS_SB(inode->i_sb);
1380 while (rc == -EAGAIN) {
1381 if ((open_file->invalidHandle) &&
1382 (!open_file->closePend)) {
1383 rc = cifs_reopen_file(
1384 file->f_dentry->d_inode, file);
1388 if (!open_file->closePend) {
1389 rc = CIFSSMBSetFileSize(xid,
1390 cifs_sb->tcon, position,
1392 open_file->pid, FALSE);
1398 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1401 if (!PageUptodate(page)) {
1402 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1403 /* can not rely on (or let) writepage write this data */
1405 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1410 /* this is probably better than directly calling
1411 partialpage_write since in this function the file handle is
1412 known which we might as well leverage */
1413 /* BB check if anything else missing out of ppw
1414 such as updating last write time */
1415 page_data = kmap(page);
1416 rc = cifs_write(file, page_data + offset, to-offset,
1420 /* else if (rc < 0) should we set writebehind rc? */
1423 set_page_dirty(page);
1430 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1434 struct inode *inode = file->f_dentry->d_inode;
1438 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1439 dentry->d_name.name, datasync));
1441 rc = filemap_fdatawrite(inode->i_mapping);
1443 CIFS_I(inode)->write_behind_rc = 0;
1448 /* static void cifs_sync_page(struct page *page)
1450 struct address_space *mapping;
1451 struct inode *inode;
1452 unsigned long index = page->index;
1453 unsigned int rpages = 0;
1456 cFYI(1, ("sync page %p",page));
1457 mapping = page->mapping;
1460 inode = mapping->host;
1464 /* fill in rpages then
1465 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1467 /* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1477 * As file closes, flush all cached write data for this inode checking
1478 * for write behind errors.
1480 int cifs_flush(struct file *file, fl_owner_t id)
1482 struct inode * inode = file->f_dentry->d_inode;
1485 /* Rather than do the steps manually:
1486 lock the inode for writing
1487 loop through pages looking for write behind data (dirty pages)
1488 coalesce into contiguous 16K (or smaller) chunks to write to server
1489 send to server (prefer in parallel)
1490 deal with writebehind errors
1491 unlock inode for writing
1492 filemapfdatawrite appears easier for the time being */
1494 rc = filemap_fdatawrite(inode->i_mapping);
1495 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1496 CIFS_I(inode)->write_behind_rc = 0;
1498 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1503 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1504 size_t read_size, loff_t *poffset)
1507 unsigned int bytes_read = 0;
1508 unsigned int total_read = 0;
1509 unsigned int current_read_size;
1510 struct cifs_sb_info *cifs_sb;
1511 struct cifsTconInfo *pTcon;
1513 struct cifsFileInfo *open_file;
1514 char *smb_read_data;
1515 char __user *current_offset;
1516 struct smb_com_read_rsp *pSMBr;
1519 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1520 pTcon = cifs_sb->tcon;
1522 if (file->private_data == NULL) {
1526 open_file = (struct cifsFileInfo *)file->private_data;
1528 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1529 cFYI(1, ("attempting read on write only file instance"));
1531 for (total_read = 0, current_offset = read_data;
1532 read_size > total_read;
1533 total_read += bytes_read, current_offset += bytes_read) {
1534 current_read_size = min_t(const int, read_size - total_read,
1537 smb_read_data = NULL;
1538 while (rc == -EAGAIN) {
1539 int buf_type = CIFS_NO_BUFFER;
1540 if ((open_file->invalidHandle) &&
1541 (!open_file->closePend)) {
1542 rc = cifs_reopen_file(file->f_dentry->d_inode,
1547 rc = CIFSSMBRead(xid, pTcon,
1549 current_read_size, *poffset,
1550 &bytes_read, &smb_read_data,
1552 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1553 if (smb_read_data) {
1554 if (copy_to_user(current_offset,
1556 4 /* RFC1001 length field */ +
1557 le16_to_cpu(pSMBr->DataOffset),
1562 if(buf_type == CIFS_SMALL_BUFFER)
1563 cifs_small_buf_release(smb_read_data);
1564 else if(buf_type == CIFS_LARGE_BUFFER)
1565 cifs_buf_release(smb_read_data);
1566 smb_read_data = NULL;
1569 if (rc || (bytes_read == 0)) {
1577 cifs_stats_bytes_read(pTcon, bytes_read);
1578 *poffset += bytes_read;
1586 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1590 unsigned int bytes_read = 0;
1591 unsigned int total_read;
1592 unsigned int current_read_size;
1593 struct cifs_sb_info *cifs_sb;
1594 struct cifsTconInfo *pTcon;
1596 char *current_offset;
1597 struct cifsFileInfo *open_file;
1598 int buf_type = CIFS_NO_BUFFER;
1601 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1602 pTcon = cifs_sb->tcon;
1604 if (file->private_data == NULL) {
1608 open_file = (struct cifsFileInfo *)file->private_data;
1610 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1611 cFYI(1, ("attempting read on write only file instance"));
1613 for (total_read = 0, current_offset = read_data;
1614 read_size > total_read;
1615 total_read += bytes_read, current_offset += bytes_read) {
1616 current_read_size = min_t(const int, read_size - total_read,
1618 /* For windows me and 9x we do not want to request more
1619 than it negotiated since it will refuse the read then */
1621 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1622 current_read_size = min_t(const int, current_read_size,
1623 pTcon->ses->server->maxBuf - 128);
1626 while (rc == -EAGAIN) {
1627 if ((open_file->invalidHandle) &&
1628 (!open_file->closePend)) {
1629 rc = cifs_reopen_file(file->f_dentry->d_inode,
1634 rc = CIFSSMBRead(xid, pTcon,
1636 current_read_size, *poffset,
1637 &bytes_read, ¤t_offset,
1640 if (rc || (bytes_read == 0)) {
1648 cifs_stats_bytes_read(pTcon, total_read);
1649 *poffset += bytes_read;
1656 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1658 struct dentry *dentry = file->f_dentry;
1662 rc = cifs_revalidate(dentry);
1664 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1668 rc = generic_file_mmap(file, vma);
1674 static void cifs_copy_cache_pages(struct address_space *mapping,
1675 struct list_head *pages, int bytes_read, char *data,
1676 struct pagevec *plru_pvec)
1681 while (bytes_read > 0) {
1682 if (list_empty(pages))
1685 page = list_entry(pages->prev, struct page, lru);
1686 list_del(&page->lru);
1688 if (add_to_page_cache(page, mapping, page->index,
1690 page_cache_release(page);
1691 cFYI(1, ("Add page cache failed"));
1692 data += PAGE_CACHE_SIZE;
1693 bytes_read -= PAGE_CACHE_SIZE;
1697 target = kmap_atomic(page,KM_USER0);
1699 if (PAGE_CACHE_SIZE > bytes_read) {
1700 memcpy(target, data, bytes_read);
1701 /* zero the tail end of this partial page */
1702 memset(target + bytes_read, 0,
1703 PAGE_CACHE_SIZE - bytes_read);
1706 memcpy(target, data, PAGE_CACHE_SIZE);
1707 bytes_read -= PAGE_CACHE_SIZE;
1709 kunmap_atomic(target, KM_USER0);
1711 flush_dcache_page(page);
1712 SetPageUptodate(page);
1714 if (!pagevec_add(plru_pvec, page))
1715 __pagevec_lru_add(plru_pvec);
1716 data += PAGE_CACHE_SIZE;
1721 static int cifs_readpages(struct file *file, struct address_space *mapping,
1722 struct list_head *page_list, unsigned num_pages)
1728 struct cifs_sb_info *cifs_sb;
1729 struct cifsTconInfo *pTcon;
1731 unsigned int read_size,i;
1732 char *smb_read_data = NULL;
1733 struct smb_com_read_rsp *pSMBr;
1734 struct pagevec lru_pvec;
1735 struct cifsFileInfo *open_file;
1736 int buf_type = CIFS_NO_BUFFER;
1739 if (file->private_data == NULL) {
1743 open_file = (struct cifsFileInfo *)file->private_data;
1744 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1745 pTcon = cifs_sb->tcon;
1747 pagevec_init(&lru_pvec, 0);
1749 for (i = 0; i < num_pages; ) {
1750 unsigned contig_pages;
1751 struct page *tmp_page;
1752 unsigned long expected_index;
1754 if (list_empty(page_list))
1757 page = list_entry(page_list->prev, struct page, lru);
1758 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1760 /* count adjacent pages that we will read into */
1763 list_entry(page_list->prev, struct page, lru)->index;
1764 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1765 if (tmp_page->index == expected_index) {
1771 if (contig_pages + i > num_pages)
1772 contig_pages = num_pages - i;
1774 /* for reads over a certain size could initiate async
1777 read_size = contig_pages * PAGE_CACHE_SIZE;
1778 /* Read size needs to be in multiples of one page */
1779 read_size = min_t(const unsigned int, read_size,
1780 cifs_sb->rsize & PAGE_CACHE_MASK);
1783 while (rc == -EAGAIN) {
1784 if ((open_file->invalidHandle) &&
1785 (!open_file->closePend)) {
1786 rc = cifs_reopen_file(file->f_dentry->d_inode,
1792 rc = CIFSSMBRead(xid, pTcon,
1795 &bytes_read, &smb_read_data,
1797 /* BB more RC checks ? */
1799 if (smb_read_data) {
1800 if(buf_type == CIFS_SMALL_BUFFER)
1801 cifs_small_buf_release(smb_read_data);
1802 else if(buf_type == CIFS_LARGE_BUFFER)
1803 cifs_buf_release(smb_read_data);
1804 smb_read_data = NULL;
1808 if ((rc < 0) || (smb_read_data == NULL)) {
1809 cFYI(1, ("Read error in readpages: %d", rc));
1810 /* clean up remaing pages off list */
1811 while (!list_empty(page_list) && (i < num_pages)) {
1812 page = list_entry(page_list->prev, struct page,
1814 list_del(&page->lru);
1815 page_cache_release(page);
1818 } else if (bytes_read > 0) {
1819 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1820 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1821 smb_read_data + 4 /* RFC1001 hdr */ +
1822 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1824 i += bytes_read >> PAGE_CACHE_SHIFT;
1825 cifs_stats_bytes_read(pTcon, bytes_read);
1826 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1827 i++; /* account for partial page */
1829 /* server copy of file can have smaller size
1831 /* BB do we need to verify this common case ?
1832 this case is ok - if we are at server EOF
1833 we will hit it on next read */
1835 /* while (!list_empty(page_list) && (i < num_pages)) {
1836 page = list_entry(page_list->prev,
1838 list_del(&page->list);
1839 page_cache_release(page);
1844 cFYI(1, ("No bytes read (%d) at offset %lld . "
1845 "Cleaning remaining pages from readahead list",
1846 bytes_read, offset));
1847 /* BB turn off caching and do new lookup on
1848 file size at server? */
1849 while (!list_empty(page_list) && (i < num_pages)) {
1850 page = list_entry(page_list->prev, struct page,
1852 list_del(&page->lru);
1854 /* BB removeme - replace with zero of page? */
1855 page_cache_release(page);
1859 if (smb_read_data) {
1860 if(buf_type == CIFS_SMALL_BUFFER)
1861 cifs_small_buf_release(smb_read_data);
1862 else if(buf_type == CIFS_LARGE_BUFFER)
1863 cifs_buf_release(smb_read_data);
1864 smb_read_data = NULL;
1869 pagevec_lru_add(&lru_pvec);
1871 /* need to free smb_read_data buf before exit */
1872 if (smb_read_data) {
1873 if(buf_type == CIFS_SMALL_BUFFER)
1874 cifs_small_buf_release(smb_read_data);
1875 else if(buf_type == CIFS_LARGE_BUFFER)
1876 cifs_buf_release(smb_read_data);
1877 smb_read_data = NULL;
1884 static int cifs_readpage_worker(struct file *file, struct page *page,
1890 page_cache_get(page);
1891 read_data = kmap(page);
1892 /* for reads over a certain size could initiate async read ahead */
1894 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1899 cFYI(1, ("Bytes read %d",rc));
1901 file->f_dentry->d_inode->i_atime =
1902 current_fs_time(file->f_dentry->d_inode->i_sb);
1904 if (PAGE_CACHE_SIZE > rc)
1905 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1907 flush_dcache_page(page);
1908 SetPageUptodate(page);
1913 page_cache_release(page);
1917 static int cifs_readpage(struct file *file, struct page *page)
1919 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1925 if (file->private_data == NULL) {
1930 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1931 page, (int)offset, (int)offset));
1933 rc = cifs_readpage_worker(file, page, &offset);
1941 /* We do not want to update the file size from server for inodes
1942 open for write - to avoid races with writepage extending
1943 the file - in the future we could consider allowing
1944 refreshing the inode only on increases in the file size
1945 but this is tricky to do without racing with writebehind
1946 page caching in the current Linux kernel design */
1947 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1949 struct cifsFileInfo *open_file = NULL;
1952 open_file = find_writable_file(cifsInode);
1955 struct cifs_sb_info *cifs_sb;
1957 /* there is not actually a write pending so let
1958 this handle go free and allow it to
1959 be closable if needed */
1960 atomic_dec(&open_file->wrtPending);
1962 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1963 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1964 /* since no page cache to corrupt on directio
1965 we can change size safely */
1974 static int cifs_prepare_write(struct file *file, struct page *page,
1975 unsigned from, unsigned to)
1978 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1979 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1980 if (!PageUptodate(page)) {
1981 /* if (to - from != PAGE_CACHE_SIZE) {
1982 void *kaddr = kmap_atomic(page, KM_USER0);
1983 memset(kaddr, 0, from);
1984 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1985 flush_dcache_page(page);
1986 kunmap_atomic(kaddr, KM_USER0);
1988 /* If we are writing a full page it will be up to date,
1989 no need to read from the server */
1990 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1991 SetPageUptodate(page);
1993 /* might as well read a page, it is fast enough */
1994 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1995 rc = cifs_readpage_worker(file, page, &offset);
1997 /* should we try using another file handle if there is one -
1998 how would we lock it to prevent close of that handle
1999 racing with this read?
2000 In any case this will be written out by commit_write */
2004 /* BB should we pass any errors back?
2005 e.g. if we do not have read access to the file */
2009 const struct address_space_operations cifs_addr_ops = {
2010 .readpage = cifs_readpage,
2011 .readpages = cifs_readpages,
2012 .writepage = cifs_writepage,
2013 .writepages = cifs_writepages,
2014 .prepare_write = cifs_prepare_write,
2015 .commit_write = cifs_commit_write,
2016 .set_page_dirty = __set_page_dirty_nobuffers,
2017 /* .sync_page = cifs_sync_page, */
2022 * cifs_readpages requires the server to support a buffer large enough to
2023 * contain the header plus one complete page of data. Otherwise, we need
2024 * to leave cifs_readpages out of the address space operations.
2026 const struct address_space_operations cifs_addr_ops_smallbuf = {
2027 .readpage = cifs_readpage,
2028 .writepage = cifs_writepage,
2029 .writepages = cifs_writepages,
2030 .prepare_write = cifs_prepare_write,
2031 .commit_write = cifs_commit_write,
2032 .set_page_dirty = __set_page_dirty_nobuffers,
2033 /* .sync_page = cifs_sync_page, */