4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2003
7 * Author(s): Steve French (sfrench@us.ibm.com)
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/backing-dev.h>
25 #include <linux/stat.h>
26 #include <linux/fcntl.h>
27 #include <linux/mpage.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/smp_lock.h>
31 #include <linux/writeback.h>
32 #include <linux/delay.h>
33 #include <asm/div64.h>
37 #include "cifsproto.h"
38 #include "cifs_unicode.h"
39 #include "cifs_debug.h"
40 #include "cifs_fs_sb.h"
42 static inline struct cifsFileInfo *cifs_init_private(
43 struct cifsFileInfo *private_data, struct inode *inode,
44 struct file *file, __u16 netfid)
46 memset(private_data, 0, sizeof(struct cifsFileInfo));
47 private_data->netfid = netfid;
48 private_data->pid = current->tgid;
49 init_MUTEX(&private_data->fh_sem);
50 private_data->pfile = file; /* needed for writepage */
51 private_data->pInode = inode;
52 private_data->invalidHandle = FALSE;
53 private_data->closePend = FALSE;
54 /* we have to track num writers to the inode, since writepages
55 does not tell us which handle the write is for so there can
56 be a close (overlapping with write) of the filehandle that
57 cifs_writepages chose to use */
58 atomic_set(&private_data->wrtPending,0);
63 static inline int cifs_convert_flags(unsigned int flags)
65 if ((flags & O_ACCMODE) == O_RDONLY)
67 else if ((flags & O_ACCMODE) == O_WRONLY)
69 else if ((flags & O_ACCMODE) == O_RDWR) {
70 /* GENERIC_ALL is too much permission to request
71 can cause unnecessary access denied on create */
72 /* return GENERIC_ALL; */
73 return (GENERIC_READ | GENERIC_WRITE);
79 static inline int cifs_get_disposition(unsigned int flags)
81 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
83 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
84 return FILE_OVERWRITE_IF;
85 else if ((flags & O_CREAT) == O_CREAT)
91 /* all arguments to this function must be checked for validity in caller */
92 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
93 struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
94 struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
95 char *full_path, int xid)
100 /* want handles we can use to read with first
101 in the list so we do not have to walk the
102 list to search for one in prepare_write */
103 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
104 list_add_tail(&pCifsFile->flist,
105 &pCifsInode->openFileList);
107 list_add(&pCifsFile->flist,
108 &pCifsInode->openFileList);
110 write_unlock(&GlobalSMBSeslock);
111 write_unlock(&file->f_owner.lock);
112 if (pCifsInode->clientCanCacheRead) {
113 /* we have the inode open somewhere else
114 no need to discard cache data */
115 goto client_can_cache;
118 /* BB need same check in cifs_create too? */
119 /* if not oplocked, invalidate inode pages if mtime or file
121 temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
122 if (timespec_equal(&file->f_dentry->d_inode->i_mtime, &temp) &&
123 (file->f_dentry->d_inode->i_size ==
124 (loff_t)le64_to_cpu(buf->EndOfFile))) {
125 cFYI(1, ("inode unchanged on server"));
127 if (file->f_dentry->d_inode->i_mapping) {
128 /* BB no need to lock inode until after invalidate
129 since namei code should already have it locked? */
130 filemap_write_and_wait(file->f_dentry->d_inode->i_mapping);
132 cFYI(1, ("invalidating remote inode since open detected it "
134 invalidate_remote_inode(file->f_dentry->d_inode);
138 if (pTcon->ses->capabilities & CAP_UNIX)
139 rc = cifs_get_inode_info_unix(&file->f_dentry->d_inode,
140 full_path, inode->i_sb, xid);
142 rc = cifs_get_inode_info(&file->f_dentry->d_inode,
143 full_path, buf, inode->i_sb, xid);
145 if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
146 pCifsInode->clientCanCacheAll = TRUE;
147 pCifsInode->clientCanCacheRead = TRUE;
148 cFYI(1, ("Exclusive Oplock granted on inode %p",
149 file->f_dentry->d_inode));
150 } else if ((*oplock & 0xF) == OPLOCK_READ)
151 pCifsInode->clientCanCacheRead = TRUE;
156 int cifs_open(struct inode *inode, struct file *file)
160 struct cifs_sb_info *cifs_sb;
161 struct cifsTconInfo *pTcon;
162 struct cifsFileInfo *pCifsFile;
163 struct cifsInodeInfo *pCifsInode;
164 struct list_head *tmp;
165 char *full_path = NULL;
169 FILE_ALL_INFO *buf = NULL;
173 cifs_sb = CIFS_SB(inode->i_sb);
174 pTcon = cifs_sb->tcon;
176 if (file->f_flags & O_CREAT) {
177 /* search inode for this file and fill in file->private_data */
178 pCifsInode = CIFS_I(file->f_dentry->d_inode);
179 read_lock(&GlobalSMBSeslock);
180 list_for_each(tmp, &pCifsInode->openFileList) {
181 pCifsFile = list_entry(tmp, struct cifsFileInfo,
183 if ((pCifsFile->pfile == NULL) &&
184 (pCifsFile->pid == current->tgid)) {
185 /* mode set in cifs_create */
187 /* needed for writepage */
188 pCifsFile->pfile = file;
190 file->private_data = pCifsFile;
194 read_unlock(&GlobalSMBSeslock);
195 if (file->private_data != NULL) {
200 if (file->f_flags & O_EXCL)
201 cERROR(1, ("could not find file instance for "
202 "new file %p ", file));
206 down(&inode->i_sb->s_vfs_rename_sem);
207 full_path = build_path_from_dentry(file->f_dentry);
208 up(&inode->i_sb->s_vfs_rename_sem);
209 if (full_path == NULL) {
214 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
215 inode, file->f_flags, full_path));
216 desiredAccess = cifs_convert_flags(file->f_flags);
218 /*********************************************************************
219 * open flag mapping table:
221 * POSIX Flag CIFS Disposition
222 * ---------- ----------------
223 * O_CREAT FILE_OPEN_IF
224 * O_CREAT | O_EXCL FILE_CREATE
225 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
226 * O_TRUNC FILE_OVERWRITE
227 * none of the above FILE_OPEN
229 * Note that there is not a direct match between disposition
230 * FILE_SUPERSEDE (ie create whether or not file exists although
231 * O_CREAT | O_TRUNC is similar but truncates the existing
232 * file rather than creating a new file as FILE_SUPERSEDE does
233 * (which uses the attributes / metadata passed in on open call)
235 *? O_SYNC is a reasonable match to CIFS writethrough flag
236 *? and the read write flags match reasonably. O_LARGEFILE
237 *? is irrelevant because largefile support is always used
238 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
239 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
240 *********************************************************************/
242 disposition = cifs_get_disposition(file->f_flags);
249 /* BB pass O_SYNC flag through on file attributes .. BB */
251 /* Also refresh inode by passing in file_info buf returned by SMBOpen
252 and calling get_inode_info with returned buf (at least helps
253 non-Unix server case) */
255 /* BB we can not do this if this is the second open of a file
256 and the first handle has writebehind data, we might be
257 able to simply do a filemap_fdatawrite/filemap_fdatawait first */
258 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
263 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
264 CREATE_NOT_DIR, &netfid, &oplock, buf,
265 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
266 & CIFS_MOUNT_MAP_SPECIAL_CHR);
268 /* Old server, try legacy style OpenX */
269 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
270 desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
271 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
272 & CIFS_MOUNT_MAP_SPECIAL_CHR);
275 cFYI(1, ("cifs_open returned 0x%x ", rc));
279 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
280 if (file->private_data == NULL) {
284 pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
285 write_lock(&file->f_owner.lock);
286 write_lock(&GlobalSMBSeslock);
287 list_add(&pCifsFile->tlist, &pTcon->openFileList);
289 pCifsInode = CIFS_I(file->f_dentry->d_inode);
291 rc = cifs_open_inode_helper(inode, file, pCifsInode,
293 &oplock, buf, full_path, xid);
295 write_unlock(&GlobalSMBSeslock);
296 write_unlock(&file->f_owner.lock);
299 if (oplock & CIFS_CREATE_ACTION) {
300 /* time to set mode which we can not set earlier due to
301 problems creating new read-only files */
302 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
303 CIFSSMBUnixSetPerms(xid, pTcon, full_path,
305 (__u64)-1, (__u64)-1, 0 /* dev */,
307 cifs_sb->mnt_cifs_flags &
308 CIFS_MOUNT_MAP_SPECIAL_CHR);
310 /* BB implement via Windows security descriptors eg
311 CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
313 in the meantime could set r/o dos attribute when
314 perms are eg: mode & 0222 == 0 */
325 /* Try to reaquire byte range locks that were released when session */
326 /* to server was lost */
327 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
331 /* BB list all locks open on this file and relock */
336 static int cifs_reopen_file(struct inode *inode, struct file *file,
341 struct cifs_sb_info *cifs_sb;
342 struct cifsTconInfo *pTcon;
343 struct cifsFileInfo *pCifsFile;
344 struct cifsInodeInfo *pCifsInode;
345 char *full_path = NULL;
347 int disposition = FILE_OPEN;
352 if (file->private_data) {
353 pCifsFile = (struct cifsFileInfo *)file->private_data;
358 down(&pCifsFile->fh_sem);
359 if (pCifsFile->invalidHandle == FALSE) {
360 up(&pCifsFile->fh_sem);
365 if (file->f_dentry == NULL) {
366 up(&pCifsFile->fh_sem);
367 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
371 cifs_sb = CIFS_SB(inode->i_sb);
372 pTcon = cifs_sb->tcon;
373 /* can not grab rename sem here because various ops, including
374 those that already have the rename sem can end up causing writepage
375 to get called and if the server was down that means we end up here,
376 and we can never tell if the caller already has the rename_sem */
377 full_path = build_path_from_dentry(file->f_dentry);
378 if (full_path == NULL) {
379 up(&pCifsFile->fh_sem);
384 cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
385 inode, file->f_flags,full_path));
386 desiredAccess = cifs_convert_flags(file->f_flags);
393 /* Can not refresh inode by passing in file_info buf to be returned
394 by SMBOpen and then calling get_inode_info with returned buf
395 since file might have write behind data that needs to be flushed
396 and server version of file size can be stale. If we knew for sure
397 that inode was not dirty locally we could do this */
399 /* buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
401 up(&pCifsFile->fh_sem);
406 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
407 CREATE_NOT_DIR, &netfid, &oplock, NULL,
408 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
409 CIFS_MOUNT_MAP_SPECIAL_CHR);
411 up(&pCifsFile->fh_sem);
412 cFYI(1, ("cifs_open returned 0x%x ", rc));
413 cFYI(1, ("oplock: %d ", oplock));
415 pCifsFile->netfid = netfid;
416 pCifsFile->invalidHandle = FALSE;
417 up(&pCifsFile->fh_sem);
418 pCifsInode = CIFS_I(inode);
421 filemap_write_and_wait(inode->i_mapping);
422 /* temporarily disable caching while we
423 go to server to get inode info */
424 pCifsInode->clientCanCacheAll = FALSE;
425 pCifsInode->clientCanCacheRead = FALSE;
426 if (pTcon->ses->capabilities & CAP_UNIX)
427 rc = cifs_get_inode_info_unix(&inode,
428 full_path, inode->i_sb, xid);
430 rc = cifs_get_inode_info(&inode,
431 full_path, NULL, inode->i_sb,
433 } /* else we are writing out data to server already
434 and could deadlock if we tried to flush data, and
435 since we do not know if we have data that would
436 invalidate the current end of file on the server
437 we can not go to the server to get the new inod
439 if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
440 pCifsInode->clientCanCacheAll = TRUE;
441 pCifsInode->clientCanCacheRead = TRUE;
442 cFYI(1, ("Exclusive Oplock granted on inode %p",
443 file->f_dentry->d_inode));
444 } else if ((oplock & 0xF) == OPLOCK_READ) {
445 pCifsInode->clientCanCacheRead = TRUE;
446 pCifsInode->clientCanCacheAll = FALSE;
448 pCifsInode->clientCanCacheRead = FALSE;
449 pCifsInode->clientCanCacheAll = FALSE;
451 cifs_relock_file(pCifsFile);
460 int cifs_close(struct inode *inode, struct file *file)
464 struct cifs_sb_info *cifs_sb;
465 struct cifsTconInfo *pTcon;
466 struct cifsFileInfo *pSMBFile =
467 (struct cifsFileInfo *)file->private_data;
471 cifs_sb = CIFS_SB(inode->i_sb);
472 pTcon = cifs_sb->tcon;
474 pSMBFile->closePend = TRUE;
475 write_lock(&file->f_owner.lock);
477 /* no sense reconnecting to close a file that is
479 if (pTcon->tidStatus != CifsNeedReconnect) {
481 while((atomic_read(&pSMBFile->wrtPending) != 0)
482 && (timeout < 1000) ) {
483 /* Give write a better chance to get to
484 server ahead of the close. We do not
485 want to add a wait_q here as it would
486 increase the memory utilization as
487 the struct would be in each open file,
488 but this should give enough time to
490 write_unlock(&file->f_owner.lock);
491 cERROR(1,("close with pending writes"));
493 write_lock(&file->f_owner.lock);
496 write_unlock(&file->f_owner.lock);
497 rc = CIFSSMBClose(xid, pTcon,
499 write_lock(&file->f_owner.lock);
502 write_lock(&GlobalSMBSeslock);
503 list_del(&pSMBFile->flist);
504 list_del(&pSMBFile->tlist);
505 write_unlock(&GlobalSMBSeslock);
506 write_unlock(&file->f_owner.lock);
507 kfree(pSMBFile->search_resume_name);
508 kfree(file->private_data);
509 file->private_data = NULL;
513 if (list_empty(&(CIFS_I(inode)->openFileList))) {
514 cFYI(1, ("closing last open instance for inode %p", inode));
515 /* if the file is not open we do not know if we can cache info
516 on this inode, much less write behind and read ahead */
517 CIFS_I(inode)->clientCanCacheRead = FALSE;
518 CIFS_I(inode)->clientCanCacheAll = FALSE;
520 if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
521 rc = CIFS_I(inode)->write_behind_rc;
526 int cifs_closedir(struct inode *inode, struct file *file)
530 struct cifsFileInfo *pCFileStruct =
531 (struct cifsFileInfo *)file->private_data;
534 cFYI(1, ("Closedir inode = 0x%p with ", inode));
539 struct cifsTconInfo *pTcon;
540 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_dentry->d_sb);
542 pTcon = cifs_sb->tcon;
544 cFYI(1, ("Freeing private data in close dir"));
545 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
546 (pCFileStruct->invalidHandle == FALSE)) {
547 pCFileStruct->invalidHandle = TRUE;
548 rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
549 cFYI(1, ("Closing uncompleted readdir with rc %d",
551 /* not much we can do if it fails anyway, ignore rc */
554 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
556 cFYI(1, ("closedir free smb buf in srch struct"));
557 pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
558 cifs_buf_release(ptmp);
560 ptmp = pCFileStruct->search_resume_name;
562 cFYI(1, ("closedir free resume name"));
563 pCFileStruct->search_resume_name = NULL;
566 kfree(file->private_data);
567 file->private_data = NULL;
569 /* BB can we lock the filestruct while this is going on? */
574 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
577 __u32 lockType = LOCKING_ANDX_LARGE_FILES;
581 int wait_flag = FALSE;
582 struct cifs_sb_info *cifs_sb;
583 struct cifsTconInfo *pTcon;
585 length = 1 + pfLock->fl_end - pfLock->fl_start;
589 cFYI(1, ("Lock parm: 0x%x flockflags: "
590 "0x%x flocktype: 0x%x start: %lld end: %lld",
591 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
594 if (pfLock->fl_flags & FL_POSIX)
596 if (pfLock->fl_flags & FL_FLOCK)
598 if (pfLock->fl_flags & FL_SLEEP) {
599 cFYI(1, ("Blocking lock "));
602 if (pfLock->fl_flags & FL_ACCESS)
603 cFYI(1, ("Process suspended by mandatory locking - "
604 "not implemented yet "));
605 if (pfLock->fl_flags & FL_LEASE)
606 cFYI(1, ("Lease on file - not implemented yet"));
607 if (pfLock->fl_flags &
608 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
609 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
611 if (pfLock->fl_type == F_WRLCK) {
612 cFYI(1, ("F_WRLCK "));
614 } else if (pfLock->fl_type == F_UNLCK) {
615 cFYI(1, ("F_UNLCK "));
617 } else if (pfLock->fl_type == F_RDLCK) {
618 cFYI(1, ("F_RDLCK "));
619 lockType |= LOCKING_ANDX_SHARED_LOCK;
621 } else if (pfLock->fl_type == F_EXLCK) {
622 cFYI(1, ("F_EXLCK "));
624 } else if (pfLock->fl_type == F_SHLCK) {
625 cFYI(1, ("F_SHLCK "));
626 lockType |= LOCKING_ANDX_SHARED_LOCK;
629 cFYI(1, ("Unknown type of lock "));
631 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
632 pTcon = cifs_sb->tcon;
634 if (file->private_data == NULL) {
640 rc = CIFSSMBLock(xid, pTcon,
641 ((struct cifsFileInfo *)file->
642 private_data)->netfid,
644 pfLock->fl_start, 0, 1, lockType,
647 rc = CIFSSMBLock(xid, pTcon,
648 ((struct cifsFileInfo *) file->
649 private_data)->netfid,
651 pfLock->fl_start, 1 /* numUnlock */ ,
652 0 /* numLock */ , lockType,
654 pfLock->fl_type = F_UNLCK;
656 cERROR(1, ("Error unlocking previously locked "
657 "range %d during test of lock ",
662 /* if rc == ERR_SHARING_VIOLATION ? */
663 rc = 0; /* do not change lock type to unlock
664 since range in use */
671 rc = CIFSSMBLock(xid, pTcon,
672 ((struct cifsFileInfo *) file->private_data)->
674 pfLock->fl_start, numUnlock, numLock, lockType,
676 if (pfLock->fl_flags & FL_POSIX)
677 posix_lock_file_wait(file, pfLock);
682 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
683 size_t write_size, loff_t *poffset)
686 unsigned int bytes_written = 0;
687 unsigned int total_written;
688 struct cifs_sb_info *cifs_sb;
689 struct cifsTconInfo *pTcon;
691 struct cifsFileInfo *open_file;
693 if (file->f_dentry == NULL)
696 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
700 pTcon = cifs_sb->tcon;
703 (" write %d bytes to offset %lld of %s", write_size,
704 *poffset, file->f_dentry->d_name.name)); */
706 if (file->private_data == NULL)
709 open_file = (struct cifsFileInfo *) file->private_data;
712 if (file->f_dentry->d_inode == NULL) {
717 if (*poffset > file->f_dentry->d_inode->i_size)
718 long_op = 2; /* writes past end of file can take a long time */
722 for (total_written = 0; write_size > total_written;
723 total_written += bytes_written) {
725 while (rc == -EAGAIN) {
726 if (file->private_data == NULL) {
727 /* file has been closed on us */
729 /* if we have gotten here we have written some data
730 and blocked, and the file has been freed on us while
731 we blocked so return what we managed to write */
732 return total_written;
734 if (open_file->closePend) {
737 return total_written;
741 if (open_file->invalidHandle) {
742 if ((file->f_dentry == NULL) ||
743 (file->f_dentry->d_inode == NULL)) {
745 return total_written;
747 /* we could deadlock if we called
748 filemap_fdatawait from here so tell
749 reopen_file not to flush data to server
751 rc = cifs_reopen_file(file->f_dentry->d_inode,
757 rc = CIFSSMBWrite(xid, pTcon,
759 min_t(const int, cifs_sb->wsize,
760 write_size - total_written),
761 *poffset, &bytes_written,
762 NULL, write_data + total_written, long_op);
764 if (rc || (bytes_written == 0)) {
772 *poffset += bytes_written;
773 long_op = FALSE; /* subsequent writes fast -
774 15 seconds is plenty */
777 cifs_stats_bytes_written(pTcon, total_written);
779 /* since the write may have blocked check these pointers again */
780 if (file->f_dentry) {
781 if (file->f_dentry->d_inode) {
782 struct inode *inode = file->f_dentry->d_inode;
783 inode->i_ctime = inode->i_mtime =
784 current_fs_time(inode->i_sb);
785 if (total_written > 0) {
786 if (*poffset > file->f_dentry->d_inode->i_size)
787 i_size_write(file->f_dentry->d_inode,
790 mark_inode_dirty_sync(file->f_dentry->d_inode);
794 return total_written;
797 static ssize_t cifs_write(struct file *file, const char *write_data,
798 size_t write_size, loff_t *poffset)
801 unsigned int bytes_written = 0;
802 unsigned int total_written;
803 struct cifs_sb_info *cifs_sb;
804 struct cifsTconInfo *pTcon;
806 struct cifsFileInfo *open_file;
808 if (file->f_dentry == NULL)
811 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
815 pTcon = cifs_sb->tcon;
817 cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
818 *poffset, file->f_dentry->d_name.name));
820 if (file->private_data == NULL)
823 open_file = (struct cifsFileInfo *)file->private_data;
826 if (file->f_dentry->d_inode == NULL) {
831 if (*poffset > file->f_dentry->d_inode->i_size)
832 long_op = 2; /* writes past end of file can take a long time */
836 for (total_written = 0; write_size > total_written;
837 total_written += bytes_written) {
839 while (rc == -EAGAIN) {
840 if (file->private_data == NULL) {
841 /* file has been closed on us */
843 /* if we have gotten here we have written some data
844 and blocked, and the file has been freed on us
845 while we blocked so return what we managed to
847 return total_written;
849 if (open_file->closePend) {
852 return total_written;
856 if (open_file->invalidHandle) {
857 if ((file->f_dentry == NULL) ||
858 (file->f_dentry->d_inode == NULL)) {
860 return total_written;
862 /* we could deadlock if we called
863 filemap_fdatawait from here so tell
864 reopen_file not to flush data to
866 rc = cifs_reopen_file(file->f_dentry->d_inode,
871 /* BB FIXME We can not sign across two buffers yet */
872 if((pTcon->ses->server->secMode &
873 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) == 0) {
877 len = min((size_t)cifs_sb->wsize,
878 write_size - total_written);
879 /* iov[0] is reserved for smb header */
880 iov[1].iov_base = (char *)write_data +
882 iov[1].iov_len = len;
883 rc = CIFSSMBWrite2(xid, pTcon,
884 open_file->netfid, len,
885 *poffset, &bytes_written,
888 /* BB FIXME fixup indentation of line below */
889 rc = CIFSSMBWrite(xid, pTcon,
891 min_t(const int, cifs_sb->wsize,
892 write_size - total_written),
893 *poffset, &bytes_written,
894 write_data + total_written, NULL, long_op);
896 if (rc || (bytes_written == 0)) {
904 *poffset += bytes_written;
905 long_op = FALSE; /* subsequent writes fast -
906 15 seconds is plenty */
909 cifs_stats_bytes_written(pTcon, total_written);
911 /* since the write may have blocked check these pointers again */
912 if (file->f_dentry) {
913 if (file->f_dentry->d_inode) {
914 file->f_dentry->d_inode->i_ctime =
915 file->f_dentry->d_inode->i_mtime = CURRENT_TIME;
916 if (total_written > 0) {
917 if (*poffset > file->f_dentry->d_inode->i_size)
918 i_size_write(file->f_dentry->d_inode,
921 mark_inode_dirty_sync(file->f_dentry->d_inode);
925 return total_written;
928 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
930 struct cifsFileInfo *open_file;
933 read_lock(&GlobalSMBSeslock);
934 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
935 if (open_file->closePend)
937 if (open_file->pfile &&
938 ((open_file->pfile->f_flags & O_RDWR) ||
939 (open_file->pfile->f_flags & O_WRONLY))) {
940 atomic_inc(&open_file->wrtPending);
941 read_unlock(&GlobalSMBSeslock);
942 if((open_file->invalidHandle) &&
943 (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
944 rc = cifs_reopen_file(&cifs_inode->vfs_inode,
945 open_file->pfile, FALSE);
946 /* if it fails, try another handle - might be */
947 /* dangerous to hold up writepages with retry */
949 cFYI(1,("failed on reopen file in wp"));
950 read_lock(&GlobalSMBSeslock);
951 /* can not use this handle, no write
952 pending on this one after all */
954 (&open_file->wrtPending);
961 read_unlock(&GlobalSMBSeslock);
965 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
967 struct address_space *mapping = page->mapping;
968 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
971 int bytes_written = 0;
972 struct cifs_sb_info *cifs_sb;
973 struct cifsTconInfo *pTcon;
975 struct cifsFileInfo *open_file;
977 if (!mapping || !mapping->host)
980 inode = page->mapping->host;
981 cifs_sb = CIFS_SB(inode->i_sb);
982 pTcon = cifs_sb->tcon;
984 offset += (loff_t)from;
985 write_data = kmap(page);
988 if ((to > PAGE_CACHE_SIZE) || (from > to)) {
993 /* racing with truncate? */
994 if (offset > mapping->host->i_size) {
996 return 0; /* don't care */
999 /* check to make sure that we are not extending the file */
1000 if (mapping->host->i_size - offset < (loff_t)to)
1001 to = (unsigned)(mapping->host->i_size - offset);
1003 open_file = find_writable_file(CIFS_I(mapping->host));
1005 bytes_written = cifs_write(open_file->pfile, write_data,
1007 atomic_dec(&open_file->wrtPending);
1008 /* Does mm or vfs already set times? */
1009 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1010 if ((bytes_written > 0) && (offset)) {
1012 } else if (bytes_written < 0) {
1017 cFYI(1, ("No writeable filehandles for inode"));
1025 static int cifs_writepages(struct address_space *mapping,
1026 struct writeback_control *wbc)
1028 struct backing_dev_info *bdi = mapping->backing_dev_info;
1029 unsigned int bytes_to_write;
1030 unsigned int bytes_written;
1031 struct cifs_sb_info *cifs_sb;
1036 struct kvec iov[32];
1042 struct cifsFileInfo *open_file;
1044 struct pagevec pvec;
1049 cifs_sb = CIFS_SB(mapping->host->i_sb);
1052 * If wsize is smaller that the page cache size, default to writing
1053 * one page at a time via cifs_writepage
1055 if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1056 return generic_writepages(mapping, wbc);
1058 /* BB FIXME we do not have code to sign across multiple buffers yet,
1059 so go to older writepage style write which we can sign if needed */
1060 if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1061 if(cifs_sb->tcon->ses->server->secMode &
1062 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1063 return generic_writepages(mapping, wbc);
1066 * BB: Is this meaningful for a non-block-device file system?
1067 * If it is, we should test it again after we do I/O
1069 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1070 wbc->encountered_congestion = 1;
1076 pagevec_init(&pvec, 0);
1077 if (wbc->sync_mode == WB_SYNC_NONE)
1078 index = mapping->writeback_index; /* Start from prev offset */
1083 if (wbc->start || wbc->end) {
1084 index = wbc->start >> PAGE_CACHE_SHIFT;
1085 end = wbc->end >> PAGE_CACHE_SHIFT;
1090 while (!done && (index <= end) &&
1091 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1092 PAGECACHE_TAG_DIRTY,
1093 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1102 for (i = 0; i < nr_pages; i++) {
1103 page = pvec.pages[i];
1105 * At this point we hold neither mapping->tree_lock nor
1106 * lock on the page itself: the page may be truncated or
1107 * invalidated (changing page->mapping to NULL), or even
1108 * swizzled back from swapper_space to tmpfs file
1114 else if (TestSetPageLocked(page))
1117 if (unlikely(page->mapping != mapping)) {
1122 if (unlikely(is_range) && (page->index > end)) {
1128 if (next && (page->index != next)) {
1129 /* Not next consecutive page */
1134 if (wbc->sync_mode != WB_SYNC_NONE)
1135 wait_on_page_writeback(page);
1137 if (PageWriteback(page) ||
1138 !test_clear_page_dirty(page)) {
1143 if (page_offset(page) >= mapping->host->i_size) {
1150 * BB can we get rid of this? pages are held by pvec
1152 page_cache_get(page);
1154 len = min(mapping->host->i_size - page_offset(page),
1155 (loff_t)PAGE_CACHE_SIZE);
1157 /* reserve iov[0] for the smb header */
1159 iov[n_iov].iov_base = kmap(page);
1160 iov[n_iov].iov_len = len;
1161 bytes_to_write += len;
1165 offset = page_offset(page);
1167 next = page->index + 1;
1168 if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1172 /* Search for a writable handle every time we call
1173 * CIFSSMBWrite2. We can't rely on the last handle
1174 * we used to still be valid
1176 open_file = find_writable_file(CIFS_I(mapping->host));
1178 cERROR(1, ("No writable handles for inode"));
1181 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1183 bytes_to_write, offset,
1184 &bytes_written, iov, n_iov,
1186 atomic_dec(&open_file->wrtPending);
1187 if (rc || bytes_written < bytes_to_write) {
1188 cERROR(1,("Write2 ret %d, written = %d",
1189 rc, bytes_written));
1190 /* BB what if continued retry is
1191 requested via mount flags? */
1192 set_bit(AS_EIO, &mapping->flags);
1194 cifs_stats_bytes_written(cifs_sb->tcon,
1198 for (i = 0; i < n_iov; i++) {
1199 page = pvec.pages[first + i];
1200 /* Should we also set page error on
1201 success rc but too little data written? */
1202 /* BB investigate retry logic on temporary
1203 server crash cases and how recovery works
1204 when page marked as error */
1209 page_cache_release(page);
1211 if ((wbc->nr_to_write -= n_iov) <= 0)
1215 pagevec_release(&pvec);
1217 if (!scanned && !done) {
1219 * We hit the last page and there is more work to be done: wrap
1220 * back to the start of the file
1227 mapping->writeback_index = index;
1234 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1240 /* BB add check for wbc flags */
1241 page_cache_get(page);
1242 if (!PageUptodate(page)) {
1243 cFYI(1, ("ppw - page not up to date"));
1246 rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1247 SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1249 page_cache_release(page);
1254 static int cifs_commit_write(struct file *file, struct page *page,
1255 unsigned offset, unsigned to)
1259 struct inode *inode = page->mapping->host;
1260 loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1264 cFYI(1, ("commit write for page %p up to position %lld for %d",
1265 page, position, to));
1266 if (position > inode->i_size) {
1267 i_size_write(inode, position);
1268 /* if (file->private_data == NULL) {
1271 open_file = (struct cifsFileInfo *)file->private_data;
1272 cifs_sb = CIFS_SB(inode->i_sb);
1274 while (rc == -EAGAIN) {
1275 if ((open_file->invalidHandle) &&
1276 (!open_file->closePend)) {
1277 rc = cifs_reopen_file(
1278 file->f_dentry->d_inode, file);
1282 if (!open_file->closePend) {
1283 rc = CIFSSMBSetFileSize(xid,
1284 cifs_sb->tcon, position,
1286 open_file->pid, FALSE);
1292 cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1295 if (!PageUptodate(page)) {
1296 position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1297 /* can not rely on (or let) writepage write this data */
1299 cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1304 /* this is probably better than directly calling
1305 partialpage_write since in this function the file handle is
1306 known which we might as well leverage */
1307 /* BB check if anything else missing out of ppw
1308 such as updating last write time */
1309 page_data = kmap(page);
1310 rc = cifs_write(file, page_data + offset, to-offset,
1314 /* else if (rc < 0) should we set writebehind rc? */
1317 set_page_dirty(page);
1324 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1328 struct inode *inode = file->f_dentry->d_inode;
1332 cFYI(1, ("Sync file - name: %s datasync: 0x%x ",
1333 dentry->d_name.name, datasync));
1335 rc = filemap_fdatawrite(inode->i_mapping);
1337 CIFS_I(inode)->write_behind_rc = 0;
1342 /* static int cifs_sync_page(struct page *page)
1344 struct address_space *mapping;
1345 struct inode *inode;
1346 unsigned long index = page->index;
1347 unsigned int rpages = 0;
1350 cFYI(1, ("sync page %p",page));
1351 mapping = page->mapping;
1354 inode = mapping->host;
1358 /* fill in rpages then
1359 result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1361 /* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
1369 * As file closes, flush all cached write data for this inode checking
1370 * for write behind errors.
1372 int cifs_flush(struct file *file)
1374 struct inode * inode = file->f_dentry->d_inode;
1377 /* Rather than do the steps manually:
1378 lock the inode for writing
1379 loop through pages looking for write behind data (dirty pages)
1380 coalesce into contiguous 16K (or smaller) chunks to write to server
1381 send to server (prefer in parallel)
1382 deal with writebehind errors
1383 unlock inode for writing
1384 filemapfdatawrite appears easier for the time being */
1386 rc = filemap_fdatawrite(inode->i_mapping);
1387 if (!rc) /* reset wb rc if we were able to write out dirty pages */
1388 CIFS_I(inode)->write_behind_rc = 0;
1390 cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1395 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1396 size_t read_size, loff_t *poffset)
1399 unsigned int bytes_read = 0;
1400 unsigned int total_read = 0;
1401 unsigned int current_read_size;
1402 struct cifs_sb_info *cifs_sb;
1403 struct cifsTconInfo *pTcon;
1405 struct cifsFileInfo *open_file;
1406 char *smb_read_data;
1407 char __user *current_offset;
1408 struct smb_com_read_rsp *pSMBr;
1411 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1412 pTcon = cifs_sb->tcon;
1414 if (file->private_data == NULL) {
1418 open_file = (struct cifsFileInfo *)file->private_data;
1420 if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1421 cFYI(1, ("attempting read on write only file instance"));
1423 for (total_read = 0, current_offset = read_data;
1424 read_size > total_read;
1425 total_read += bytes_read, current_offset += bytes_read) {
1426 current_read_size = min_t(const int, read_size - total_read,
1429 smb_read_data = NULL;
1430 while (rc == -EAGAIN) {
1431 int buf_type = CIFS_NO_BUFFER;
1432 if ((open_file->invalidHandle) &&
1433 (!open_file->closePend)) {
1434 rc = cifs_reopen_file(file->f_dentry->d_inode,
1439 rc = CIFSSMBRead(xid, pTcon,
1441 current_read_size, *poffset,
1442 &bytes_read, &smb_read_data,
1444 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1445 if (copy_to_user(current_offset,
1446 smb_read_data + 4 /* RFC1001 hdr */
1447 + le16_to_cpu(pSMBr->DataOffset),
1451 if (smb_read_data) {
1452 if(buf_type == CIFS_SMALL_BUFFER)
1453 cifs_small_buf_release(smb_read_data);
1454 else if(buf_type == CIFS_LARGE_BUFFER)
1455 cifs_buf_release(smb_read_data);
1456 smb_read_data = NULL;
1459 if (rc || (bytes_read == 0)) {
1467 cifs_stats_bytes_read(pTcon, bytes_read);
1468 *poffset += bytes_read;
1476 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1480 unsigned int bytes_read = 0;
1481 unsigned int total_read;
1482 unsigned int current_read_size;
1483 struct cifs_sb_info *cifs_sb;
1484 struct cifsTconInfo *pTcon;
1486 char *current_offset;
1487 struct cifsFileInfo *open_file;
1488 int buf_type = CIFS_NO_BUFFER;
1491 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1492 pTcon = cifs_sb->tcon;
1494 if (file->private_data == NULL) {
1498 open_file = (struct cifsFileInfo *)file->private_data;
1500 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1501 cFYI(1, ("attempting read on write only file instance"));
1503 for (total_read = 0, current_offset = read_data;
1504 read_size > total_read;
1505 total_read += bytes_read, current_offset += bytes_read) {
1506 current_read_size = min_t(const int, read_size - total_read,
1508 /* For windows me and 9x we do not want to request more
1509 than it negotiated since it will refuse the read then */
1511 !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1512 current_read_size = min_t(const int, current_read_size,
1513 pTcon->ses->server->maxBuf - 128);
1516 while (rc == -EAGAIN) {
1517 if ((open_file->invalidHandle) &&
1518 (!open_file->closePend)) {
1519 rc = cifs_reopen_file(file->f_dentry->d_inode,
1524 rc = CIFSSMBRead(xid, pTcon,
1526 current_read_size, *poffset,
1527 &bytes_read, ¤t_offset,
1530 if (rc || (bytes_read == 0)) {
1538 cifs_stats_bytes_read(pTcon, total_read);
1539 *poffset += bytes_read;
1546 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1548 struct dentry *dentry = file->f_dentry;
1552 rc = cifs_revalidate(dentry);
1554 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1558 rc = generic_file_mmap(file, vma);
1564 static void cifs_copy_cache_pages(struct address_space *mapping,
1565 struct list_head *pages, int bytes_read, char *data,
1566 struct pagevec *plru_pvec)
1571 while (bytes_read > 0) {
1572 if (list_empty(pages))
1575 page = list_entry(pages->prev, struct page, lru);
1576 list_del(&page->lru);
1578 if (add_to_page_cache(page, mapping, page->index,
1580 page_cache_release(page);
1581 cFYI(1, ("Add page cache failed"));
1582 data += PAGE_CACHE_SIZE;
1583 bytes_read -= PAGE_CACHE_SIZE;
1587 target = kmap_atomic(page,KM_USER0);
1589 if (PAGE_CACHE_SIZE > bytes_read) {
1590 memcpy(target, data, bytes_read);
1591 /* zero the tail end of this partial page */
1592 memset(target + bytes_read, 0,
1593 PAGE_CACHE_SIZE - bytes_read);
1596 memcpy(target, data, PAGE_CACHE_SIZE);
1597 bytes_read -= PAGE_CACHE_SIZE;
1599 kunmap_atomic(target, KM_USER0);
1601 flush_dcache_page(page);
1602 SetPageUptodate(page);
1604 if (!pagevec_add(plru_pvec, page))
1605 __pagevec_lru_add(plru_pvec);
1606 data += PAGE_CACHE_SIZE;
1611 static int cifs_readpages(struct file *file, struct address_space *mapping,
1612 struct list_head *page_list, unsigned num_pages)
1618 struct cifs_sb_info *cifs_sb;
1619 struct cifsTconInfo *pTcon;
1621 unsigned int read_size,i;
1622 char *smb_read_data = NULL;
1623 struct smb_com_read_rsp *pSMBr;
1624 struct pagevec lru_pvec;
1625 struct cifsFileInfo *open_file;
1626 int buf_type = CIFS_NO_BUFFER;
1629 if (file->private_data == NULL) {
1633 open_file = (struct cifsFileInfo *)file->private_data;
1634 cifs_sb = CIFS_SB(file->f_dentry->d_sb);
1635 pTcon = cifs_sb->tcon;
1637 pagevec_init(&lru_pvec, 0);
1639 for (i = 0; i < num_pages; ) {
1640 unsigned contig_pages;
1641 struct page *tmp_page;
1642 unsigned long expected_index;
1644 if (list_empty(page_list))
1647 page = list_entry(page_list->prev, struct page, lru);
1648 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1650 /* count adjacent pages that we will read into */
1653 list_entry(page_list->prev, struct page, lru)->index;
1654 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1655 if (tmp_page->index == expected_index) {
1661 if (contig_pages + i > num_pages)
1662 contig_pages = num_pages - i;
1664 /* for reads over a certain size could initiate async
1667 read_size = contig_pages * PAGE_CACHE_SIZE;
1668 /* Read size needs to be in multiples of one page */
1669 read_size = min_t(const unsigned int, read_size,
1670 cifs_sb->rsize & PAGE_CACHE_MASK);
1673 while (rc == -EAGAIN) {
1674 if ((open_file->invalidHandle) &&
1675 (!open_file->closePend)) {
1676 rc = cifs_reopen_file(file->f_dentry->d_inode,
1682 rc = CIFSSMBRead(xid, pTcon,
1685 &bytes_read, &smb_read_data,
1687 /* BB more RC checks ? */
1689 if (smb_read_data) {
1690 if(buf_type == CIFS_SMALL_BUFFER)
1691 cifs_small_buf_release(smb_read_data);
1692 else if(buf_type == CIFS_LARGE_BUFFER)
1693 cifs_buf_release(smb_read_data);
1694 smb_read_data = NULL;
1698 if ((rc < 0) || (smb_read_data == NULL)) {
1699 cFYI(1, ("Read error in readpages: %d", rc));
1700 /* clean up remaing pages off list */
1701 while (!list_empty(page_list) && (i < num_pages)) {
1702 page = list_entry(page_list->prev, struct page,
1704 list_del(&page->lru);
1705 page_cache_release(page);
1708 } else if (bytes_read > 0) {
1709 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1710 cifs_copy_cache_pages(mapping, page_list, bytes_read,
1711 smb_read_data + 4 /* RFC1001 hdr */ +
1712 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1714 i += bytes_read >> PAGE_CACHE_SHIFT;
1715 cifs_stats_bytes_read(pTcon, bytes_read);
1716 if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1717 i++; /* account for partial page */
1719 /* server copy of file can have smaller size
1721 /* BB do we need to verify this common case ?
1722 this case is ok - if we are at server EOF
1723 we will hit it on next read */
1725 /* while (!list_empty(page_list) && (i < num_pages)) {
1726 page = list_entry(page_list->prev,
1728 list_del(&page->list);
1729 page_cache_release(page);
1734 cFYI(1, ("No bytes read (%d) at offset %lld . "
1735 "Cleaning remaining pages from readahead list",
1736 bytes_read, offset));
1737 /* BB turn off caching and do new lookup on
1738 file size at server? */
1739 while (!list_empty(page_list) && (i < num_pages)) {
1740 page = list_entry(page_list->prev, struct page,
1742 list_del(&page->lru);
1744 /* BB removeme - replace with zero of page? */
1745 page_cache_release(page);
1749 if (smb_read_data) {
1750 if(buf_type == CIFS_SMALL_BUFFER)
1751 cifs_small_buf_release(smb_read_data);
1752 else if(buf_type == CIFS_LARGE_BUFFER)
1753 cifs_buf_release(smb_read_data);
1754 smb_read_data = NULL;
1759 pagevec_lru_add(&lru_pvec);
1761 /* need to free smb_read_data buf before exit */
1762 if (smb_read_data) {
1763 if(buf_type == CIFS_SMALL_BUFFER)
1764 cifs_small_buf_release(smb_read_data);
1765 else if(buf_type == CIFS_LARGE_BUFFER)
1766 cifs_buf_release(smb_read_data);
1767 smb_read_data = NULL;
1774 static int cifs_readpage_worker(struct file *file, struct page *page,
1780 page_cache_get(page);
1781 read_data = kmap(page);
1782 /* for reads over a certain size could initiate async read ahead */
1784 rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1789 cFYI(1, ("Bytes read %d ",rc));
1791 file->f_dentry->d_inode->i_atime =
1792 current_fs_time(file->f_dentry->d_inode->i_sb);
1794 if (PAGE_CACHE_SIZE > rc)
1795 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1797 flush_dcache_page(page);
1798 SetPageUptodate(page);
1803 page_cache_release(page);
1807 static int cifs_readpage(struct file *file, struct page *page)
1809 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1815 if (file->private_data == NULL) {
1820 cFYI(1, ("readpage %p at offset %d 0x%x\n",
1821 page, (int)offset, (int)offset));
1823 rc = cifs_readpage_worker(file, page, &offset);
1831 /* We do not want to update the file size from server for inodes
1832 open for write - to avoid races with writepage extending
1833 the file - in the future we could consider allowing
1834 refreshing the inode only on increases in the file size
1835 but this is tricky to do without racing with writebehind
1836 page caching in the current Linux kernel design */
1837 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode)
1839 struct cifsFileInfo *open_file = NULL;
1842 open_file = find_writable_file(cifsInode);
1845 struct cifs_sb_info *cifs_sb;
1847 /* there is not actually a write pending so let
1848 this handle go free and allow it to
1849 be closable if needed */
1850 atomic_dec(&open_file->wrtPending);
1852 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1853 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1854 /* since no page cache to corrupt on directio
1855 we can change size safely */
1864 static int cifs_prepare_write(struct file *file, struct page *page,
1865 unsigned from, unsigned to)
1868 loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1869 cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1870 if (!PageUptodate(page)) {
1871 /* if (to - from != PAGE_CACHE_SIZE) {
1872 void *kaddr = kmap_atomic(page, KM_USER0);
1873 memset(kaddr, 0, from);
1874 memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1875 flush_dcache_page(page);
1876 kunmap_atomic(kaddr, KM_USER0);
1878 /* If we are writing a full page it will be up to date,
1879 no need to read from the server */
1880 if ((to == PAGE_CACHE_SIZE) && (from == 0))
1881 SetPageUptodate(page);
1883 /* might as well read a page, it is fast enough */
1884 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
1885 rc = cifs_readpage_worker(file, page, &offset);
1887 /* should we try using another file handle if there is one -
1888 how would we lock it to prevent close of that handle
1889 racing with this read?
1890 In any case this will be written out by commit_write */
1894 /* BB should we pass any errors back?
1895 e.g. if we do not have read access to the file */
1899 struct address_space_operations cifs_addr_ops = {
1900 .readpage = cifs_readpage,
1901 .readpages = cifs_readpages,
1902 .writepage = cifs_writepage,
1903 .writepages = cifs_writepages,
1904 .prepare_write = cifs_prepare_write,
1905 .commit_write = cifs_commit_write,
1906 .set_page_dirty = __set_page_dirty_nobuffers,
1907 /* .sync_page = cifs_sync_page, */