4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/backing-dev.h>
25 #include <linux/stat.h>
26 #include <linux/fcntl.h>
27 #include <linux/mpage.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/smp_lock.h>
31 #include <linux/writeback.h>
32 #include <linux/delay.h>
33 #include <asm/div64.h>
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
42 static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
48 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem);
50 private_data->pfile = file; /* needed for writepage */
51 private_data->pInode = inode;
52 private_data->invalidHandle = FALSE;
53 private_data->closePend = FALSE;
54 /* we have to track num writers to the inode, since writepages
55 does not tell us which handle the write is for so there can
56 be a close (overlapping with write) of the filehandle that
57 cifs_writepages chose to use */
58 atomic_set(&private_data->wrtPending,0);
63 static inline int cifs_convert_flags(unsigned int flags)
65 if ((flags & O_ACCMODE) == O_RDONLY)
67 else if ((flags & O_ACCMODE) == O_WRONLY)
69 else if ((flags & O_ACCMODE) == O_RDWR) {
70 /* GENERIC_ALL is too much permission to request
71 can cause unnecessary access denied on create */
72 /* return GENERIC_ALL; */
73 return (GENERIC_READ | GENERIC_WRITE);
79 static inline int cifs_get_disposition(unsigned int flags)
81 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
83 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
84 return FILE_OVERWRITE_IF;
85 else if ((flags & O_CREAT) == O_CREAT)
87 else if ((flags & O_TRUNC) == O_TRUNC)
88 return FILE_OVERWRITE;
93 /* all arguments to this function must be checked for validity in caller */
94 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
95 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
96 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
97 char *full_path, int xid)
102 /* want handles we can use to read with first
103 in the list so we do not have to walk the
104 list to search for one in prepare_write */
105 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
106 list_add_tail(&pCifsFile->flist,
107 &pCifsInode->openFileList);
109 list_add(&pCifsFile->flist,
110 &pCifsInode->openFileList);
112 write_unlock(&GlobalSMBSeslock);
113 if (pCifsInode->clientCanCacheRead) {
114 /* we have the inode open somewhere else
115 no need to discard cache data */
116 goto client_can_cache;
119 /* BB need same check in cifs_create too? */
120 /* if not oplocked, invalidate inode pages if mtime or file
122 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
123 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
124 (file->f_dentry->d_inode->i_size ==
125 (loff_t)le64_to_cpu(buf->EndOfFile))) {
126 cFYI(1, ("inode unchanged on server"));
128 if (file->f_dentry->d_inode->i_mapping) {
129 /* BB no need to lock inode until after invalidate
130 since namei code should already have it locked? */
131 filemap_write_and_wait(file->f_dentry->d_inode->i_mapping);
133 cFYI(1, ("invalidating remote inode since open detected it "
135 invalidate_remote_inode(file->f_dentry->d_inode);
139 if (pTcon->ses->capabilities & CAP_UNIX)
140 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
141 full_path, inode->i_sb, xid);
143 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
144 full_path, buf, inode->i_sb, xid);
146 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
147 pCifsInode->clientCanCacheAll = TRUE;
148 pCifsInode->clientCanCacheRead = TRUE;
149 cFYI(1, ("Exclusive Oplock granted on inode %p",
150 file->f_dentry->d_inode));
151 } else if ((*oplock & 0xF) == OPLOCK_READ)
152 pCifsInode->clientCanCacheRead = TRUE;
157 int cifs_open(struct inode *inode, struct file *file)
161 struct cifs_sb_info *cifs_sb;
162 struct cifsTconInfo *pTcon;
163 struct cifsFileInfo *pCifsFile;
164 struct cifsInodeInfo *pCifsInode;
165 struct list_head *tmp;
166 char *full_path = NULL;
170 FILE_ALL_INFO *buf = NULL;
174 cifs_sb = CIFS_SB(inode->i_sb);
175 pTcon = cifs_sb->tcon;
177 if (file->f_flags & O_CREAT) {
178 /* search inode for this file and fill in file->private_data */
179 pCifsInode = CIFS_I(file->f_dentry->d_inode);
180 read_lock(&GlobalSMBSeslock);
181 list_for_each(tmp, &pCifsInode->openFileList) {
182 pCifsFile = list_entry(tmp, struct cifsFileInfo,
184 if ((pCifsFile->pfile == NULL) &&
185 (pCifsFile->pid == current->tgid)) {
186 /* mode set in cifs_create */
188 /* needed for writepage */
189 pCifsFile->pfile = file;
191 file->private_data = pCifsFile;
195 read_unlock(&GlobalSMBSeslock);
196 if (file->private_data != NULL) {
201 if (file->f_flags & O_EXCL)
202 cERROR(1, ("could not find file instance for "
203 "new file %p", file));
207 full_path = build_path_from_dentry(file->f_dentry);
208 if (full_path == NULL) {
213 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
214 inode, file->f_flags, full_path));
215 desiredAccess = cifs_convert_flags(file->f_flags);
217 /*********************************************************************
218 * open flag mapping table:
220 * POSIX Flag CIFS Disposition
221 * ---------- ----------------
222 * O_CREAT FILE_OPEN_IF
223 * O_CREAT | O_EXCL FILE_CREATE
224 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
225 * O_TRUNC FILE_OVERWRITE
226 * none of the above FILE_OPEN
228 * Note that there is not a direct match between disposition
229 * FILE_SUPERSEDE (ie create whether or not file exists although
230 * O_CREAT | O_TRUNC is similar but truncates the existing
231 * file rather than creating a new file as FILE_SUPERSEDE does
232 * (which uses the attributes / metadata passed in on open call)
234 *? O_SYNC is a reasonable match to CIFS writethrough flag
235 *? and the read write flags match reasonably. O_LARGEFILE
236 *? is irrelevant because largefile support is always used
237 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
238 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
239 *********************************************************************/
241 disposition = cifs_get_disposition(file->f_flags);
248 /* BB pass O_SYNC flag through on file attributes .. BB */
250 /* Also refresh inode by passing in file_info buf returned by SMBOpen
251 and calling get_inode_info with returned buf (at least helps
252 non-Unix server case) */
254 /* BB we can not do this if this is the second open of a file
255 and the first handle has writebehind data, we might be
256 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
257 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
263 if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
264 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
265 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
266 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
267 & CIFS_MOUNT_MAP_SPECIAL_CHR);
269 rc = -EIO; /* no NT SMB support fall into legacy open below */
272 /* Old server, try legacy style OpenX */
273 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
274 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
275 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
276 & CIFS_MOUNT_MAP_SPECIAL_CHR);
279 cFYI(1, ("cifs_open returned 0x%x", rc));
283 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
284 if (file->private_data == NULL) {
288 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
289 write_lock(&GlobalSMBSeslock);
290 list_add(&pCifsFile->tlist, &pTcon->openFileList);
292 pCifsInode = CIFS_I(file->f_dentry->d_inode);
294 rc = cifs_open_inode_helper(inode, file, pCifsInode,
296 &oplock, buf, full_path, xid);
298 write_unlock(&GlobalSMBSeslock);
301 if (oplock & CIFS_CREATE_ACTION) {
302 /* time to set mode which we can not set earlier due to
303 problems creating new read-only files */
304 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
305 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
307 (__u64)-1, (__u64)-1, 0 /* dev */,
309 cifs_sb->mnt_cifs_flags &
310 CIFS_MOUNT_MAP_SPECIAL_CHR);
312 /* BB implement via Windows security descriptors eg
313 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
315 in the meantime could set r/o dos attribute when
316 perms are eg: mode & 0222 == 0 */
327 /* Try to reaquire byte range locks that were released when session */
328 /* to server was lost */
329 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
333 /* BB list all locks open on this file and relock */
338 static int cifs_reopen_file(struct inode *inode, struct file *file,
343 struct cifs_sb_info *cifs_sb;
344 struct cifsTconInfo *pTcon;
345 struct cifsFileInfo *pCifsFile;
346 struct cifsInodeInfo *pCifsInode;
347 char *full_path = NULL;
349 int disposition = FILE_OPEN;
354 if (file->private_data) {
355 pCifsFile = (struct cifsFileInfo *)file->private_data;
360 down(&pCifsFile->fh_sem);
361 if (pCifsFile->invalidHandle == FALSE) {
362 up(&pCifsFile->fh_sem);
367 if (file->f_dentry == NULL) {
368 up(&pCifsFile->fh_sem);
369 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
373 cifs_sb = CIFS_SB(inode->i_sb);
374 pTcon = cifs_sb->tcon;
375 /* can not grab rename sem here because various ops, including
376 those that already have the rename sem can end up causing writepage
377 to get called and if the server was down that means we end up here,
378 and we can never tell if the caller already has the rename_sem */
379 full_path = build_path_from_dentry(file->f_dentry);
380 if (full_path == NULL) {
381 up(&pCifsFile->fh_sem);
386 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
387 inode, file->f_flags,full_path));
388 desiredAccess = cifs_convert_flags(file->f_flags);
395 /* Can not refresh inode by passing in file_info buf to be returned
396 by SMBOpen and then calling get_inode_info with returned buf
397 since file might have write behind data that needs to be flushed
398 and server version of file size can be stale. If we knew for sure
399 that inode was not dirty locally we could do this */
401 /* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
403 up(&pCifsFile->fh_sem);
408 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
409 CREATE_NOT_DIR, &netfid, &oplock, NULL,
410 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
411 CIFS_MOUNT_MAP_SPECIAL_CHR);
413 up(&pCifsFile->fh_sem);
414 cFYI(1, ("cifs_open returned 0x%x", rc));
415 cFYI(1, ("oplock: %d", oplock));
417 pCifsFile->netfid = netfid;
418 pCifsFile->invalidHandle = FALSE;
419 up(&pCifsFile->fh_sem);
420 pCifsInode = CIFS_I(inode);
423 filemap_write_and_wait(inode->i_mapping);
424 /* temporarily disable caching while we
425 go to server to get inode info */
426 pCifsInode->clientCanCacheAll = FALSE;
427 pCifsInode->clientCanCacheRead = FALSE;
428 if (pTcon->ses->capabilities & CAP_UNIX)
429 rc = cifs_get_inode_info_unix(&inode,
430 full_path, inode->i_sb, xid);
432 rc = cifs_get_inode_info(&inode,
433 full_path, NULL, inode->i_sb,
435 } /* else we are writing out data to server already
436 and could deadlock if we tried to flush data, and
437 since we do not know if we have data that would
438 invalidate the current end of file on the server
439 we can not go to the server to get the new inod
441 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
442 pCifsInode->clientCanCacheAll = TRUE;
443 pCifsInode->clientCanCacheRead = TRUE;
444 cFYI(1, ("Exclusive Oplock granted on inode %p",
445 file->f_dentry->d_inode));
446 } else if ((oplock & 0xF) == OPLOCK_READ) {
447 pCifsInode->clientCanCacheRead = TRUE;
448 pCifsInode->clientCanCacheAll = FALSE;
450 pCifsInode->clientCanCacheRead = FALSE;
451 pCifsInode->clientCanCacheAll = FALSE;
453 cifs_relock_file(pCifsFile);
462 int cifs_close(struct inode *inode, struct file *file)
466 struct cifs_sb_info *cifs_sb;
467 struct cifsTconInfo *pTcon;
468 struct cifsFileInfo *pSMBFile =
469 (struct cifsFileInfo *)file->private_data;
473 cifs_sb = CIFS_SB(inode->i_sb);
474 pTcon = cifs_sb->tcon;
476 pSMBFile->closePend = TRUE;
478 /* no sense reconnecting to close a file that is
480 if (pTcon->tidStatus != CifsNeedReconnect) {
482 while((atomic_read(&pSMBFile->wrtPending) != 0)
483 && (timeout < 1000) ) {
484 /* Give write a better chance to get to
485 server ahead of the close. We do not
486 want to add a wait_q here as it would
487 increase the memory utilization as
488 the struct would be in each open file,
489 but this should give enough time to
491 cERROR(1,("close with pending writes"));
495 rc = CIFSSMBClose(xid, pTcon,
499 write_lock(&GlobalSMBSeslock);
500 list_del(&pSMBFile->flist);
501 list_del(&pSMBFile->tlist);
502 write_unlock(&GlobalSMBSeslock);
503 kfree(pSMBFile->search_resume_name);
504 kfree(file->private_data);
505 file->private_data = NULL;
509 if (list_empty(&(CIFS_I(inode)->openFileList))) {
510 cFYI(1, ("closing last open instance for inode %p", inode));
511 /* if the file is not open we do not know if we can cache info
512 on this inode, much less write behind and read ahead */
513 CIFS_I(inode)->clientCanCacheRead = FALSE;
514 CIFS_I(inode)->clientCanCacheAll = FALSE;
516 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
517 rc = CIFS_I(inode)->write_behind_rc;
522 int cifs_closedir(struct inode *inode, struct file *file)
526 struct cifsFileInfo *pCFileStruct =
527 (struct cifsFileInfo *)file->private_data;
530 cFYI(1, ("Closedir inode = 0x%p", inode));
535 struct cifsTconInfo *pTcon;
536 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
538 pTcon = cifs_sb->tcon;
540 cFYI(1, ("Freeing private data in close dir"));
541 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
542 (pCFileStruct->invalidHandle == FALSE)) {
543 pCFileStruct->invalidHandle = TRUE;
544 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
545 cFYI(1, ("Closing uncompleted readdir with rc %d",
547 /* not much we can do if it fails anyway, ignore rc */
550 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
552 cFYI(1, ("closedir free smb buf in srch struct"));
553 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
554 if(pCFileStruct->srch_inf.smallBuf)
555 cifs_small_buf_release(ptmp);
557 cifs_buf_release(ptmp);
559 ptmp = pCFileStruct->search_resume_name;
561 cFYI(1, ("closedir free resume name"));
562 pCFileStruct->search_resume_name = NULL;
565 kfree(file->private_data);
566 file->private_data = NULL;
568 /* BB can we lock the filestruct while this is going on? */
573 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
579 int wait_flag = FALSE;
580 struct cifs_sb_info *cifs_sb;
581 struct cifsTconInfo *pTcon;
583 __u8 lockType = LOCKING_ANDX_LARGE_FILES;
585 length = 1 + pfLock->fl_end - pfLock->fl_start;
589 cFYI(1, ("Lock parm: 0x%x flockflags: "
590 "0x%x flocktype: 0x%x start: %lld end: %lld",
591 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
594 if (pfLock->fl_flags & FL_POSIX)
596 if (pfLock->fl_flags & FL_FLOCK)
598 if (pfLock->fl_flags & FL_SLEEP) {
599 cFYI(1, ("Blocking lock"));
602 if (pfLock->fl_flags & FL_ACCESS)
603 cFYI(1, ("Process suspended by mandatory locking - "
604 "not implemented yet"));
605 if (pfLock->fl_flags & FL_LEASE)
606 cFYI(1, ("Lease on file - not implemented yet"));
607 if (pfLock->fl_flags &
608 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
609 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
611 if (pfLock->fl_type == F_WRLCK) {
612 cFYI(1, ("F_WRLCK "));
614 } else if (pfLock->fl_type == F_UNLCK) {
615 cFYI(1, ("F_UNLCK"));
617 /* Check if unlock includes more than
619 } else if (pfLock->fl_type == F_RDLCK) {
620 cFYI(1, ("F_RDLCK"));
621 lockType |= LOCKING_ANDX_SHARED_LOCK;
623 } else if (pfLock->fl_type == F_EXLCK) {
624 cFYI(1, ("F_EXLCK"));
626 } else if (pfLock->fl_type == F_SHLCK) {
627 cFYI(1, ("F_SHLCK"));
628 lockType |= LOCKING_ANDX_SHARED_LOCK;
631 cFYI(1, ("Unknown type of lock"));
633 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
634 pTcon = cifs_sb->tcon;
636 if (file->private_data == NULL) {
640 netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
643 /* BB add code here to normalize offset and length to
644 account for negative length which we can not accept over the
648 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
649 (CIFS_UNIX_FCNTL_CAP &
650 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
652 if(lockType & LOCKING_ANDX_SHARED_LOCK)
653 posix_lock_type = CIFS_RDLCK;
655 posix_lock_type = CIFS_WRLCK;
656 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
658 posix_lock_type, wait_flag);
663 /* BB we could chain these into one lock request BB */
664 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
665 0, 1, lockType, 0 /* wait flag */ );
667 rc = CIFSSMBLock(xid, pTcon, netfid, length,
668 pfLock->fl_start, 1 /* numUnlock */ ,
669 0 /* numLock */ , lockType,
671 pfLock->fl_type = F_UNLCK;
673 cERROR(1, ("Error unlocking previously locked "
674 "range %d during test of lock", rc));
678 /* if rc == ERR_SHARING_VIOLATION ? */
679 rc = 0; /* do not change lock type to unlock
680 since range in use */
686 if (experimEnabled &&
687 (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
688 (CIFS_UNIX_FCNTL_CAP &
689 le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
691 if(lockType & LOCKING_ANDX_SHARED_LOCK)
692 posix_lock_type = CIFS_RDLCK;
694 posix_lock_type = CIFS_WRLCK;
697 posix_lock_type = CIFS_UNLCK;
698 else if(numLock == 0) {
699 /* if no lock or unlock then nothing
700 to do since we do not know what it is */
704 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
706 posix_lock_type, wait_flag);
708 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
709 numUnlock, numLock, lockType, wait_flag);
710 if (pfLock->fl_flags & FL_POSIX)
711 posix_lock_file_wait(file, pfLock);
716 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
717 size_t write_size, loff_t *poffset)
720 unsigned int bytes_written = 0;
721 unsigned int total_written;
722 struct cifs_sb_info *cifs_sb;
723 struct cifsTconInfo *pTcon;
725 struct cifsFileInfo *open_file;
727 if (file->f_dentry == NULL)
730 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
734 pTcon = cifs_sb->tcon;
737 (" write %d bytes to offset %lld of %s", write_size,
738 *poffset, file->f_dentry->d_name.name)); */
740 if (file->private_data == NULL)
743 open_file = (struct cifsFileInfo *) file->private_data;
746 if (file->f_dentry->d_inode == NULL) {
751 if (*poffset > file->f_dentry->d_inode->i_size)
752 long_op = 2; /* writes past end of file can take a long time */
756 for (total_written = 0; write_size > total_written;
757 total_written += bytes_written) {
759 while (rc == -EAGAIN) {
760 if (file->private_data == NULL) {
761 /* file has been closed on us */
763 /* if we have gotten here we have written some data
764 and blocked, and the file has been freed on us while
765 we blocked so return what we managed to write */
766 return total_written;
768 if (open_file->closePend) {
771 return total_written;
775 if (open_file->invalidHandle) {
776 if ((file->f_dentry == NULL) ||
777 (file->f_dentry->d_inode == NULL)) {
779 return total_written;
781 /* we could deadlock if we called
782 filemap_fdatawait from here so tell
783 reopen_file not to flush data to server
785 rc = cifs_reopen_file(file->f_dentry->d_inode,
791 rc = CIFSSMBWrite(xid, pTcon,
793 min_t(const int, cifs_sb->wsize,
794 write_size - total_written),
795 *poffset, &bytes_written,
796 NULL, write_data + total_written, long_op);
798 if (rc || (bytes_written == 0)) {
806 *poffset += bytes_written;
807 long_op = FALSE; /* subsequent writes fast -
808 15 seconds is plenty */
811 cifs_stats_bytes_written(pTcon, total_written);
813 /* since the write may have blocked check these pointers again */
814 if (file->f_dentry) {
815 if (file->f_dentry->d_inode) {
816 struct inode *inode = file->f_dentry->d_inode;
817 inode->i_ctime = inode->i_mtime =
818 current_fs_time(inode->i_sb);
819 if (total_written > 0) {
820 if (*poffset > file->f_dentry->d_inode->i_size)
821 i_size_write(file->f_dentry->d_inode,
824 mark_inode_dirty_sync(file->f_dentry->d_inode);
828 return total_written;
831 static ssize_t cifs_write(struct file *file, const char *write_data,
832 size_t write_size, loff_t *poffset)
835 unsigned int bytes_written = 0;
836 unsigned int total_written;
837 struct cifs_sb_info *cifs_sb;
838 struct cifsTconInfo *pTcon;
840 struct cifsFileInfo *open_file;
842 if (file->f_dentry == NULL)
845 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
849 pTcon = cifs_sb->tcon;
851 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
852 *poffset, file->f_dentry->d_name.name));
854 if (file->private_data == NULL)
857 open_file = (struct cifsFileInfo *)file->private_data;
860 if (file->f_dentry->d_inode == NULL) {
865 if (*poffset > file->f_dentry->d_inode->i_size)
866 long_op = 2; /* writes past end of file can take a long time */
870 for (total_written = 0; write_size > total_written;
871 total_written += bytes_written) {
873 while (rc == -EAGAIN) {
874 if (file->private_data == NULL) {
875 /* file has been closed on us */
877 /* if we have gotten here we have written some data
878 and blocked, and the file has been freed on us
879 while we blocked so return what we managed to
881 return total_written;
883 if (open_file->closePend) {
886 return total_written;
890 if (open_file->invalidHandle) {
891 if ((file->f_dentry == NULL) ||
892 (file->f_dentry->d_inode == NULL)) {
894 return total_written;
896 /* we could deadlock if we called
897 filemap_fdatawait from here so tell
898 reopen_file not to flush data to
900 rc = cifs_reopen_file(file->f_dentry->d_inode,
905 if(experimEnabled || (pTcon->ses->server &&
906 ((pTcon->ses->server->secMode &
907 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
912 len = min((size_t)cifs_sb->wsize,
913 write_size - total_written);
914 /* iov[0] is reserved for smb header */
915 iov[1].iov_base = (char *)write_data +
917 iov[1].iov_len = len;
918 rc = CIFSSMBWrite2(xid, pTcon,
919 open_file->netfid, len,
920 *poffset, &bytes_written,
923 rc = CIFSSMBWrite(xid, pTcon,
925 min_t(const int, cifs_sb->wsize,
926 write_size - total_written),
927 *poffset, &bytes_written,
928 write_data + total_written,
931 if (rc || (bytes_written == 0)) {
939 *poffset += bytes_written;
940 long_op = FALSE; /* subsequent writes fast -
941 15 seconds is plenty */
944 cifs_stats_bytes_written(pTcon, total_written);
946 /* since the write may have blocked check these pointers again */
947 if (file->f_dentry) {
948 if (file->f_dentry->d_inode) {
949 file->f_dentry->d_inode->i_ctime =
950 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
951 if (total_written > 0) {
952 if (*poffset > file->f_dentry->d_inode->i_size)
953 i_size_write(file->f_dentry->d_inode,
956 mark_inode_dirty_sync(file->f_dentry->d_inode);
960 return total_written;
963 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
965 struct cifsFileInfo *open_file;
968 /* Having a null inode here (because mapping->host was set to zero by
969 the VFS or MM) should not happen but we had reports of on oops (due to
970 it being zero) during stress testcases so we need to check for it */
972 if(cifs_inode == NULL) {
973 cERROR(1,("Null inode passed to cifs_writeable_file"));
978 read_lock(&GlobalSMBSeslock);
979 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
980 if (open_file->closePend)
982 if (open_file->pfile &&
983 ((open_file->pfile->f_flags & O_RDWR) ||
984 (open_file->pfile->f_flags & O_WRONLY))) {
985 atomic_inc(&open_file->wrtPending);
986 read_unlock(&GlobalSMBSeslock);
987 if((open_file->invalidHandle) &&
988 (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
989 rc = cifs_reopen_file(&cifs_inode->vfs_inode,
990 open_file->pfile, FALSE);
991 /* if it fails, try another handle - might be */
992 /* dangerous to hold up writepages with retry */
994 cFYI(1,("failed on reopen file in wp"));
995 read_lock(&GlobalSMBSeslock);
996 /* can not use this handle, no write
997 pending on this one after all */
999 (&open_file->wrtPending);
1006 read_unlock(&GlobalSMBSeslock);
1010 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1012 struct address_space *mapping = page->mapping;
1013 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1016 int bytes_written = 0;
1017 struct cifs_sb_info *cifs_sb;
1018 struct cifsTconInfo *pTcon;
1019 struct inode *inode;
1020 struct cifsFileInfo *open_file;
1022 if (!mapping || !mapping->host)
1025 inode = page->mapping->host;
1026 cifs_sb = CIFS_SB(inode->i_sb);
1027 pTcon = cifs_sb->tcon;
1029 offset += (loff_t)from;
1030 write_data = kmap(page);
1033 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1038 /* racing with truncate? */
1039 if (offset > mapping->host->i_size) {
1041 return 0; /* don't care */
1044 /* check to make sure that we are not extending the file */
1045 if (mapping->host->i_size - offset < (loff_t)to)
1046 to = (unsigned)(mapping->host->i_size - offset);
1048 open_file = find_writable_file(CIFS_I(mapping->host));
1050 bytes_written = cifs_write(open_file->pfile, write_data,
1052 atomic_dec(&open_file->wrtPending);
1053 /* Does mm or vfs already set times? */
1054 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1055 if ((bytes_written > 0) && (offset)) {
1057 } else if (bytes_written < 0) {
1062 cFYI(1, ("No writeable filehandles for inode"));
1070 static int cifs_writepages(struct address_space *mapping,
1071 struct writeback_control *wbc)
1073 struct backing_dev_info *bdi = mapping->backing_dev_info;
1074 unsigned int bytes_to_write;
1075 unsigned int bytes_written;
1076 struct cifs_sb_info *cifs_sb;
1080 int range_whole = 0;
1081 struct kvec iov[32];
1087 struct cifsFileInfo *open_file;
1089 struct pagevec pvec;
1094 cifs_sb = CIFS_SB(mapping->host->i_sb);
1097 * If wsize is smaller that the page cache size, default to writing
1098 * one page at a time via cifs_writepage
1100 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1101 return generic_writepages(mapping, wbc);
1103 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1104 if(cifs_sb->tcon->ses->server->secMode &
1105 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1107 return generic_writepages(mapping, wbc);
1110 * BB: Is this meaningful for a non-block-device file system?
1111 * If it is, we should test it again after we do I/O
1113 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1114 wbc->encountered_congestion = 1;
1120 pagevec_init(&pvec, 0);
1121 if (wbc->range_cyclic) {
1122 index = mapping->writeback_index; /* Start from prev offset */
1125 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1126 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1127 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1132 while (!done && (index <= end) &&
1133 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1134 PAGECACHE_TAG_DIRTY,
1135 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1144 for (i = 0; i < nr_pages; i++) {
1145 page = pvec.pages[i];
1147 * At this point we hold neither mapping->tree_lock nor
1148 * lock on the page itself: the page may be truncated or
1149 * invalidated (changing page->mapping to NULL), or even
1150 * swizzled back from swapper_space to tmpfs file
1156 else if (TestSetPageLocked(page))
1159 if (unlikely(page->mapping != mapping)) {
1164 if (!wbc->range_cyclic && page->index > end) {
1170 if (next && (page->index != next)) {
1171 /* Not next consecutive page */
1176 if (wbc->sync_mode != WB_SYNC_NONE)
1177 wait_on_page_writeback(page);
1179 if (PageWriteback(page) ||
1180 !test_clear_page_dirty(page)) {
1185 if (page_offset(page) >= mapping->host->i_size) {
1192 * BB can we get rid of this? pages are held by pvec
1194 page_cache_get(page);
1196 len = min(mapping->host->i_size - page_offset(page),
1197 (loff_t)PAGE_CACHE_SIZE);
1199 /* reserve iov[0] for the smb header */
1201 iov[n_iov].iov_base = kmap(page);
1202 iov[n_iov].iov_len = len;
1203 bytes_to_write += len;
1207 offset = page_offset(page);
1209 next = page->index + 1;
1210 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1214 /* Search for a writable handle every time we call
1215 * CIFSSMBWrite2. We can't rely on the last handle
1216 * we used to still be valid
1218 open_file = find_writable_file(CIFS_I(mapping->host));
1220 cERROR(1, ("No writable handles for inode"));
1223 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1225 bytes_to_write, offset,
1226 &bytes_written, iov, n_iov,
1228 atomic_dec(&open_file->wrtPending);
1229 if (rc || bytes_written < bytes_to_write) {
1230 cERROR(1,("Write2 ret %d, written = %d",
1231 rc, bytes_written));
1232 /* BB what if continued retry is
1233 requested via mount flags? */
1234 set_bit(AS_EIO, &mapping->flags);
1236 cifs_stats_bytes_written(cifs_sb->tcon,
1240 for (i = 0; i < n_iov; i++) {
1241 page = pvec.pages[first + i];
1242 /* Should we also set page error on
1243 success rc but too little data written? */
1244 /* BB investigate retry logic on temporary
1245 server crash cases and how recovery works
1246 when page marked as error */
1251 page_cache_release(page);
1253 if ((wbc->nr_to_write -= n_iov) <= 0)
1257 pagevec_release(&pvec);
1259 if (!scanned && !done) {
1261 * We hit the last page and there is more work to be done: wrap
1262 * back to the start of the file
1268 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1269 mapping->writeback_index = index;
1276 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1282 /* BB add check for wbc flags */
1283 page_cache_get(page);
1284 if (!PageUptodate(page)) {
1285 cFYI(1, ("ppw - page not up to date"));
1288 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1289 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1291 page_cache_release(page);
1296 static int cifs_commit_write(struct file *file, struct page *page,
1297 unsigned offset, unsigned to)
1301 struct inode *inode = page->mapping->host;
1302 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1306 cFYI(1, ("commit write for page %p up to position %lld for %d",
1307 page, position, to));
1308 if (position > inode->i_size) {
1309 i_size_write(inode, position);
1310 /* if (file->private_data == NULL) {
1313 open_file = (struct cifsFileInfo *)file->private_data;
1314 cifs_sb = CIFS_SB(inode->i_sb);
1316 while (rc == -EAGAIN) {
1317 if ((open_file->invalidHandle) &&
1318 (!open_file->closePend)) {
1319 rc = cifs_reopen_file(
1320 file->f_dentry->d_inode, file);
1324 if (!open_file->closePend) {
1325 rc = CIFSSMBSetFileSize(xid,
1326 cifs_sb->tcon, position,
1328 open_file->pid, FALSE);
1334 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1337 if (!PageUptodate(page)) {
1338 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1339 /* can not rely on (or let) writepage write this data */
1341 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1346 /* this is probably better than directly calling
1347 partialpage_write since in this function the file handle is
1348 known which we might as well leverage */
1349 /* BB check if anything else missing out of ppw
1350 such as updating last write time */
1351 page_data = kmap(page);
1352 rc = cifs_write(file, page_data + offset, to-offset,
1356 /* else if (rc < 0) should we set writebehind rc? */
1359 set_page_dirty(page);
1366 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1370 struct inode *inode = file->f_dentry->d_inode;
1374 cFYI(1, ("Sync file - name: %s datasync: 0x%x",
1375 dentry->d_name.name, datasync));
1377 rc = filemap_fdatawrite(inode->i_mapping);
1379 CIFS_I(inode)->write_behind_rc = 0;
1384 /* static void cifs_sync_page(struct page *page)
1386 struct address_space *mapping;
1387 struct inode *inode;
1388 unsigned long index = page->index;
1389 unsigned int rpages = 0;
1392 cFYI(1, ("sync page %p",page));
1393 mapping = page->mapping;
1396 inode = mapping->host;
1400 /* fill in rpages then
1401 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1403 /* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1413 * As file closes, flush all cached write data for this inode checking
1414 * for write behind errors.
1416 int cifs_flush(struct file *file, fl_owner_t id)
1418 struct inode * inode = file->f_dentry->d_inode;
1421 /* Rather than do the steps manually:
1422 lock the inode for writing
1423 loop through pages looking for write behind data (dirty pages)
1424 coalesce into contiguous 16K (or smaller) chunks to write to server
1425 send to server (prefer in parallel)
1426 deal with writebehind errors
1427 unlock inode for writing
1428 filemapfdatawrite appears easier for the time being */
1430 rc = filemap_fdatawrite(inode->i_mapping);
1431 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1432 CIFS_I(inode)->write_behind_rc = 0;
1434 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1439 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1440 size_t read_size, loff_t *poffset)
1443 unsigned int bytes_read = 0;
1444 unsigned int total_read = 0;
1445 unsigned int current_read_size;
1446 struct cifs_sb_info *cifs_sb;
1447 struct cifsTconInfo *pTcon;
1449 struct cifsFileInfo *open_file;
1450 char *smb_read_data;
1451 char __user *current_offset;
1452 struct smb_com_read_rsp *pSMBr;
1455 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1456 pTcon = cifs_sb->tcon;
1458 if (file->private_data == NULL) {
1462 open_file = (struct cifsFileInfo *)file->private_data;
1464 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1465 cFYI(1, ("attempting read on write only file instance"));
1467 for (total_read = 0, current_offset = read_data;
1468 read_size > total_read;
1469 total_read += bytes_read, current_offset += bytes_read) {
1470 current_read_size = min_t(const int, read_size - total_read,
1473 smb_read_data = NULL;
1474 while (rc == -EAGAIN) {
1475 int buf_type = CIFS_NO_BUFFER;
1476 if ((open_file->invalidHandle) &&
1477 (!open_file->closePend)) {
1478 rc = cifs_reopen_file(file->f_dentry->d_inode,
1483 rc = CIFSSMBRead(xid, pTcon,
1485 current_read_size, *poffset,
1486 &bytes_read, &smb_read_data,
1488 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1489 if (smb_read_data) {
1490 if (copy_to_user(current_offset,
1492 4 /* RFC1001 length field */ +
1493 le16_to_cpu(pSMBr->DataOffset),
1498 if(buf_type == CIFS_SMALL_BUFFER)
1499 cifs_small_buf_release(smb_read_data);
1500 else if(buf_type == CIFS_LARGE_BUFFER)
1501 cifs_buf_release(smb_read_data);
1502 smb_read_data = NULL;
1505 if (rc || (bytes_read == 0)) {
1513 cifs_stats_bytes_read(pTcon, bytes_read);
1514 *poffset += bytes_read;
1522 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1526 unsigned int bytes_read = 0;
1527 unsigned int total_read;
1528 unsigned int current_read_size;
1529 struct cifs_sb_info *cifs_sb;
1530 struct cifsTconInfo *pTcon;
1532 char *current_offset;
1533 struct cifsFileInfo *open_file;
1534 int buf_type = CIFS_NO_BUFFER;
1537 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1538 pTcon = cifs_sb->tcon;
1540 if (file->private_data == NULL) {
1544 open_file = (struct cifsFileInfo *)file->private_data;
1546 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1547 cFYI(1, ("attempting read on write only file instance"));
1549 for (total_read = 0, current_offset = read_data;
1550 read_size > total_read;
1551 total_read += bytes_read, current_offset += bytes_read) {
1552 current_read_size = min_t(const int, read_size - total_read,
1554 /* For windows me and 9x we do not want to request more
1555 than it negotiated since it will refuse the read then */
1557 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1558 current_read_size = min_t(const int, current_read_size,
1559 pTcon->ses->server->maxBuf - 128);
1562 while (rc == -EAGAIN) {
1563 if ((open_file->invalidHandle) &&
1564 (!open_file->closePend)) {
1565 rc = cifs_reopen_file(file->f_dentry->d_inode,
1570 rc = CIFSSMBRead(xid, pTcon,
1572 current_read_size, *poffset,
1573 &bytes_read, ¤t_offset,
1576 if (rc || (bytes_read == 0)) {
1584 cifs_stats_bytes_read(pTcon, total_read);
1585 *poffset += bytes_read;
1592 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1594 struct dentry *dentry = file->f_dentry;
1598 rc = cifs_revalidate(dentry);
1600 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1604 rc = generic_file_mmap(file, vma);
1610 static void cifs_copy_cache_pages(struct address_space *mapping,
1611 struct list_head *pages, int bytes_read, char *data,
1612 struct pagevec *plru_pvec)
1617 while (bytes_read > 0) {
1618 if (list_empty(pages))
1621 page = list_entry(pages->prev, struct page, lru);
1622 list_del(&page->lru);
1624 if (add_to_page_cache(page, mapping, page->index,
1626 page_cache_release(page);
1627 cFYI(1, ("Add page cache failed"));
1628 data += PAGE_CACHE_SIZE;
1629 bytes_read -= PAGE_CACHE_SIZE;
1633 target = kmap_atomic(page,KM_USER0);
1635 if (PAGE_CACHE_SIZE > bytes_read) {
1636 memcpy(target, data, bytes_read);
1637 /* zero the tail end of this partial page */
1638 memset(target + bytes_read, 0,
1639 PAGE_CACHE_SIZE - bytes_read);
1642 memcpy(target, data, PAGE_CACHE_SIZE);
1643 bytes_read -= PAGE_CACHE_SIZE;
1645 kunmap_atomic(target, KM_USER0);
1647 flush_dcache_page(page);
1648 SetPageUptodate(page);
1650 if (!pagevec_add(plru_pvec, page))
1651 __pagevec_lru_add(plru_pvec);
1652 data += PAGE_CACHE_SIZE;
1657 static int cifs_readpages(struct file *file, struct address_space *mapping,
1658 struct list_head *page_list, unsigned num_pages)
1664 struct cifs_sb_info *cifs_sb;
1665 struct cifsTconInfo *pTcon;
1667 unsigned int read_size,i;
1668 char *smb_read_data = NULL;
1669 struct smb_com_read_rsp *pSMBr;
1670 struct pagevec lru_pvec;
1671 struct cifsFileInfo *open_file;
1672 int buf_type = CIFS_NO_BUFFER;
1675 if (file->private_data == NULL) {
1679 open_file = (struct cifsFileInfo *)file->private_data;
1680 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1681 pTcon = cifs_sb->tcon;
1683 pagevec_init(&lru_pvec, 0);
1685 for (i = 0; i < num_pages; ) {
1686 unsigned contig_pages;
1687 struct page *tmp_page;
1688 unsigned long expected_index;
1690 if (list_empty(page_list))
1693 page = list_entry(page_list->prev, struct page, lru);
1694 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1696 /* count adjacent pages that we will read into */
1699 list_entry(page_list->prev, struct page, lru)->index;
1700 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1701 if (tmp_page->index == expected_index) {
1707 if (contig_pages + i > num_pages)
1708 contig_pages = num_pages - i;
1710 /* for reads over a certain size could initiate async
1713 read_size = contig_pages * PAGE_CACHE_SIZE;
1714 /* Read size needs to be in multiples of one page */
1715 read_size = min_t(const unsigned int, read_size,
1716 cifs_sb->rsize & PAGE_CACHE_MASK);
1719 while (rc == -EAGAIN) {
1720 if ((open_file->invalidHandle) &&
1721 (!open_file->closePend)) {
1722 rc = cifs_reopen_file(file->f_dentry->d_inode,
1728 rc = CIFSSMBRead(xid, pTcon,
1731 &bytes_read, &smb_read_data,
1733 /* BB more RC checks ? */
1735 if (smb_read_data) {
1736 if(buf_type == CIFS_SMALL_BUFFER)
1737 cifs_small_buf_release(smb_read_data);
1738 else if(buf_type == CIFS_LARGE_BUFFER)
1739 cifs_buf_release(smb_read_data);
1740 smb_read_data = NULL;
1744 if ((rc < 0) || (smb_read_data == NULL)) {
1745 cFYI(1, ("Read error in readpages: %d", rc));
1746 /* clean up remaing pages off list */
1747 while (!list_empty(page_list) && (i < num_pages)) {
1748 page = list_entry(page_list->prev, struct page,
1750 list_del(&page->lru);
1751 page_cache_release(page);
1754 } else if (bytes_read > 0) {
1755 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1756 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1757 smb_read_data + 4 /* RFC1001 hdr */ +
1758 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1760 i += bytes_read >> PAGE_CACHE_SHIFT;
1761 cifs_stats_bytes_read(pTcon, bytes_read);
1762 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1763 i++; /* account for partial page */
1765 /* server copy of file can have smaller size
1767 /* BB do we need to verify this common case ?
1768 this case is ok - if we are at server EOF
1769 we will hit it on next read */
1771 /* while (!list_empty(page_list) && (i < num_pages)) {
1772 page = list_entry(page_list->prev,
1774 list_del(&page->list);
1775 page_cache_release(page);
1780 cFYI(1, ("No bytes read (%d) at offset %lld . "
1781 "Cleaning remaining pages from readahead list",
1782 bytes_read, offset));
1783 /* BB turn off caching and do new lookup on
1784 file size at server? */
1785 while (!list_empty(page_list) && (i < num_pages)) {
1786 page = list_entry(page_list->prev, struct page,
1788 list_del(&page->lru);
1790 /* BB removeme - replace with zero of page? */
1791 page_cache_release(page);
1795 if (smb_read_data) {
1796 if(buf_type == CIFS_SMALL_BUFFER)
1797 cifs_small_buf_release(smb_read_data);
1798 else if(buf_type == CIFS_LARGE_BUFFER)
1799 cifs_buf_release(smb_read_data);
1800 smb_read_data = NULL;
1805 pagevec_lru_add(&lru_pvec);
1807 /* need to free smb_read_data buf before exit */
1808 if (smb_read_data) {
1809 if(buf_type == CIFS_SMALL_BUFFER)
1810 cifs_small_buf_release(smb_read_data);
1811 else if(buf_type == CIFS_LARGE_BUFFER)
1812 cifs_buf_release(smb_read_data);
1813 smb_read_data = NULL;
1820 static int cifs_readpage_worker(struct file *file, struct page *page,
1826 page_cache_get(page);
1827 read_data = kmap(page);
1828 /* for reads over a certain size could initiate async read ahead */
1830 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1835 cFYI(1, ("Bytes read %d",rc));
1837 file->f_dentry->d_inode->i_atime =
1838 current_fs_time(file->f_dentry->d_inode->i_sb);
1840 if (PAGE_CACHE_SIZE > rc)
1841 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1843 flush_dcache_page(page);
1844 SetPageUptodate(page);
1849 page_cache_release(page);
1853 static int cifs_readpage(struct file *file, struct page *page)
1855 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1861 if (file->private_data == NULL) {
1866 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1867 page, (int)offset, (int)offset));
1869 rc = cifs_readpage_worker(file, page, &offset);
1877 /* We do not want to update the file size from server for inodes
1878 open for write - to avoid races with writepage extending
1879 the file - in the future we could consider allowing
1880 refreshing the inode only on increases in the file size
1881 but this is tricky to do without racing with writebehind
1882 page caching in the current Linux kernel design */
1883 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1885 struct cifsFileInfo *open_file = NULL;
1888 open_file = find_writable_file(cifsInode);
1891 struct cifs_sb_info *cifs_sb;
1893 /* there is not actually a write pending so let
1894 this handle go free and allow it to
1895 be closable if needed */
1896 atomic_dec(&open_file->wrtPending);
1898 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1899 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1900 /* since no page cache to corrupt on directio
1901 we can change size safely */
1910 static int cifs_prepare_write(struct file *file, struct page *page,
1911 unsigned from, unsigned to)
1914 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1915 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1916 if (!PageUptodate(page)) {
1917 /* if (to - from != PAGE_CACHE_SIZE) {
1918 void *kaddr = kmap_atomic(page, KM_USER0);
1919 memset(kaddr, 0, from);
1920 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1921 flush_dcache_page(page);
1922 kunmap_atomic(kaddr, KM_USER0);
1924 /* If we are writing a full page it will be up to date,
1925 no need to read from the server */
1926 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1927 SetPageUptodate(page);
1929 /* might as well read a page, it is fast enough */
1930 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1931 rc = cifs_readpage_worker(file, page, &offset);
1933 /* should we try using another file handle if there is one -
1934 how would we lock it to prevent close of that handle
1935 racing with this read?
1936 In any case this will be written out by commit_write */
1940 /* BB should we pass any errors back?
1941 e.g. if we do not have read access to the file */
1945 const struct address_space_operations cifs_addr_ops = {
1946 .readpage = cifs_readpage,
1947 .readpages = cifs_readpages,
1948 .writepage = cifs_writepage,
1949 .writepages = cifs_writepages,
1950 .prepare_write = cifs_prepare_write,
1951 .commit_write = cifs_commit_write,
1952 .set_page_dirty = __set_page_dirty_nobuffers,
1953 /* .sync_page = cifs_sync_page, */
1958 * cifs_readpages requires the server to support a buffer large enough to
1959 * contain the header plus one complete page of data. Otherwise, we need
1960 * to leave cifs_readpages out of the address space operations.
1962 const struct address_space_operations cifs_addr_ops_smallbuf = {
1963 .readpage = cifs_readpage,
1964 .writepage = cifs_writepage,
1965 .writepages = cifs_writepages,
1966 .prepare_write = cifs_prepare_write,
1967 .commit_write = cifs_commit_write,
1968 .set_page_dirty = __set_page_dirty_nobuffers,
1969 /* .sync_page = cifs_sync_page, */