Merge ARM fixes
[linux-2.6] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  * 
6  *   Copyright (C) International Business Machines  Corp., 2002,2003
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/smp_lock.h>
31 #include <linux/writeback.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/delay.h>
34 #include <asm/div64.h>
35 #include "cifsfs.h"
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_unicode.h"
40 #include "cifs_debug.h"
41 #include "cifs_fs_sb.h"
42
43 static inline struct cifsFileInfo *cifs_init_private(
44         struct cifsFileInfo *private_data, struct inode *inode,
45         struct file *file, __u16 netfid)
46 {
47         memset(private_data, 0, sizeof(struct cifsFileInfo));
48         private_data->netfid = netfid;
49         private_data->pid = current->tgid;      
50         init_MUTEX(&private_data->fh_sem);
51         init_MUTEX(&private_data->lock_sem);
52         INIT_LIST_HEAD(&private_data->llist);
53         private_data->pfile = file; /* needed for writepage */
54         private_data->pInode = inode;
55         private_data->invalidHandle = FALSE;
56         private_data->closePend = FALSE;
57         /* we have to track num writers to the inode, since writepages
58         does not tell us which handle the write is for so there can
59         be a close (overlapping with write) of the filehandle that
60         cifs_writepages chose to use */
61         atomic_set(&private_data->wrtPending,0); 
62
63         return private_data;
64 }
65
66 static inline int cifs_convert_flags(unsigned int flags)
67 {
68         if ((flags & O_ACCMODE) == O_RDONLY)
69                 return GENERIC_READ;
70         else if ((flags & O_ACCMODE) == O_WRONLY)
71                 return GENERIC_WRITE;
72         else if ((flags & O_ACCMODE) == O_RDWR) {
73                 /* GENERIC_ALL is too much permission to request
74                    can cause unnecessary access denied on create */
75                 /* return GENERIC_ALL; */
76                 return (GENERIC_READ | GENERIC_WRITE);
77         }
78
79         return 0x20197;
80 }
81
82 static inline int cifs_get_disposition(unsigned int flags)
83 {
84         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
85                 return FILE_CREATE;
86         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
87                 return FILE_OVERWRITE_IF;
88         else if ((flags & O_CREAT) == O_CREAT)
89                 return FILE_OPEN_IF;
90         else if ((flags & O_TRUNC) == O_TRUNC)
91                 return FILE_OVERWRITE;
92         else
93                 return FILE_OPEN;
94 }
95
96 /* all arguments to this function must be checked for validity in caller */
97 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
98         struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
99         struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
100         char *full_path, int xid)
101 {
102         struct timespec temp;
103         int rc;
104
105         /* want handles we can use to read with first
106            in the list so we do not have to walk the
107            list to search for one in prepare_write */
108         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
109                 list_add_tail(&pCifsFile->flist, 
110                               &pCifsInode->openFileList);
111         } else {
112                 list_add(&pCifsFile->flist,
113                          &pCifsInode->openFileList);
114         }
115         write_unlock(&GlobalSMBSeslock);
116         if (pCifsInode->clientCanCacheRead) {
117                 /* we have the inode open somewhere else
118                    no need to discard cache data */
119                 goto client_can_cache;
120         }
121
122         /* BB need same check in cifs_create too? */
123         /* if not oplocked, invalidate inode pages if mtime or file
124            size changed */
125         temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
126         if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
127                            (file->f_path.dentry->d_inode->i_size ==
128                             (loff_t)le64_to_cpu(buf->EndOfFile))) {
129                 cFYI(1, ("inode unchanged on server"));
130         } else {
131                 if (file->f_path.dentry->d_inode->i_mapping) {
132                 /* BB no need to lock inode until after invalidate
133                    since namei code should already have it locked? */
134                         filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
135                 }
136                 cFYI(1, ("invalidating remote inode since open detected it "
137                          "changed"));
138                 invalidate_remote_inode(file->f_path.dentry->d_inode);
139         }
140
141 client_can_cache:
142         if (pTcon->ses->capabilities & CAP_UNIX)
143                 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
144                         full_path, inode->i_sb, xid);
145         else
146                 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
147                         full_path, buf, inode->i_sb, xid);
148
149         if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
150                 pCifsInode->clientCanCacheAll = TRUE;
151                 pCifsInode->clientCanCacheRead = TRUE;
152                 cFYI(1, ("Exclusive Oplock granted on inode %p",
153                          file->f_path.dentry->d_inode));
154         } else if ((*oplock & 0xF) == OPLOCK_READ)
155                 pCifsInode->clientCanCacheRead = TRUE;
156
157         return rc;
158 }
159
160 int cifs_open(struct inode *inode, struct file *file)
161 {
162         int rc = -EACCES;
163         int xid, oplock;
164         struct cifs_sb_info *cifs_sb;
165         struct cifsTconInfo *pTcon;
166         struct cifsFileInfo *pCifsFile;
167         struct cifsInodeInfo *pCifsInode;
168         struct list_head *tmp;
169         char *full_path = NULL;
170         int desiredAccess;
171         int disposition;
172         __u16 netfid;
173         FILE_ALL_INFO *buf = NULL;
174
175         xid = GetXid();
176
177         cifs_sb = CIFS_SB(inode->i_sb);
178         pTcon = cifs_sb->tcon;
179
180         if (file->f_flags & O_CREAT) {
181                 /* search inode for this file and fill in file->private_data */
182                 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
183                 read_lock(&GlobalSMBSeslock);
184                 list_for_each(tmp, &pCifsInode->openFileList) {
185                         pCifsFile = list_entry(tmp, struct cifsFileInfo,
186                                                flist);
187                         if ((pCifsFile->pfile == NULL) &&
188                             (pCifsFile->pid == current->tgid)) {
189                                 /* mode set in cifs_create */
190
191                                 /* needed for writepage */
192                                 pCifsFile->pfile = file;
193                                 
194                                 file->private_data = pCifsFile;
195                                 break;
196                         }
197                 }
198                 read_unlock(&GlobalSMBSeslock);
199                 if (file->private_data != NULL) {
200                         rc = 0;
201                         FreeXid(xid);
202                         return rc;
203                 } else {
204                         if (file->f_flags & O_EXCL)
205                                 cERROR(1, ("could not find file instance for "
206                                            "new file %p", file));
207                 }
208         }
209
210         full_path = build_path_from_dentry(file->f_path.dentry);
211         if (full_path == NULL) {
212                 FreeXid(xid);
213                 return -ENOMEM;
214         }
215
216         cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
217                  inode, file->f_flags, full_path));
218         desiredAccess = cifs_convert_flags(file->f_flags);
219
220 /*********************************************************************
221  *  open flag mapping table:
222  *  
223  *      POSIX Flag            CIFS Disposition
224  *      ----------            ---------------- 
225  *      O_CREAT               FILE_OPEN_IF
226  *      O_CREAT | O_EXCL      FILE_CREATE
227  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
228  *      O_TRUNC               FILE_OVERWRITE
229  *      none of the above     FILE_OPEN
230  *
231  *      Note that there is not a direct match between disposition
232  *      FILE_SUPERSEDE (ie create whether or not file exists although 
233  *      O_CREAT | O_TRUNC is similar but truncates the existing
234  *      file rather than creating a new file as FILE_SUPERSEDE does
235  *      (which uses the attributes / metadata passed in on open call)
236  *?
237  *?  O_SYNC is a reasonable match to CIFS writethrough flag  
238  *?  and the read write flags match reasonably.  O_LARGEFILE
239  *?  is irrelevant because largefile support is always used
240  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
241  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
242  *********************************************************************/
243
244         disposition = cifs_get_disposition(file->f_flags);
245
246         if (oplockEnabled)
247                 oplock = REQ_OPLOCK;
248         else
249                 oplock = FALSE;
250
251         /* BB pass O_SYNC flag through on file attributes .. BB */
252
253         /* Also refresh inode by passing in file_info buf returned by SMBOpen
254            and calling get_inode_info with returned buf (at least helps
255            non-Unix server case) */
256
257         /* BB we can not do this if this is the second open of a file 
258            and the first handle has writebehind data, we might be 
259            able to simply do a filemap_fdatawrite/filemap_fdatawait first */
260         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
261         if (!buf) {
262                 rc = -ENOMEM;
263                 goto out;
264         }
265
266         if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
267                 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, 
268                          desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
269                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
270                                  & CIFS_MOUNT_MAP_SPECIAL_CHR);
271         else
272                 rc = -EIO; /* no NT SMB support fall into legacy open below */
273
274         if (rc == -EIO) {
275                 /* Old server, try legacy style OpenX */
276                 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
277                         desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
278                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
279                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
280         }
281         if (rc) {
282                 cFYI(1, ("cifs_open returned 0x%x", rc));
283                 goto out;
284         }
285         file->private_data =
286                 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
287         if (file->private_data == NULL) {
288                 rc = -ENOMEM;
289                 goto out;
290         }
291         pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
292         write_lock(&GlobalSMBSeslock);
293         list_add(&pCifsFile->tlist, &pTcon->openFileList);
294
295         pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
296         if (pCifsInode) {
297                 rc = cifs_open_inode_helper(inode, file, pCifsInode,
298                                             pCifsFile, pTcon,
299                                             &oplock, buf, full_path, xid);
300         } else {
301                 write_unlock(&GlobalSMBSeslock);
302         }
303
304         if (oplock & CIFS_CREATE_ACTION) {           
305                 /* time to set mode which we can not set earlier due to
306                    problems creating new read-only files */
307                 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
308                         CIFSSMBUnixSetPerms(xid, pTcon, full_path,
309                                             inode->i_mode,
310                                             (__u64)-1, (__u64)-1, 0 /* dev */,
311                                             cifs_sb->local_nls,
312                                             cifs_sb->mnt_cifs_flags & 
313                                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
314                 } else {
315                         /* BB implement via Windows security descriptors eg
316                            CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
317                                               -1, -1, local_nls);
318                            in the meantime could set r/o dos attribute when
319                            perms are eg: mode & 0222 == 0 */
320                 }
321         }
322
323 out:
324         kfree(buf);
325         kfree(full_path);
326         FreeXid(xid);
327         return rc;
328 }
329
330 /* Try to reacquire byte range locks that were released when session */
331 /* to server was lost */
332 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
333 {
334         int rc = 0;
335
336 /* BB list all locks open on this file and relock */
337
338         return rc;
339 }
340
341 static int cifs_reopen_file(struct inode *inode, struct file *file, 
342         int can_flush)
343 {
344         int rc = -EACCES;
345         int xid, oplock;
346         struct cifs_sb_info *cifs_sb;
347         struct cifsTconInfo *pTcon;
348         struct cifsFileInfo *pCifsFile;
349         struct cifsInodeInfo *pCifsInode;
350         char *full_path = NULL;
351         int desiredAccess;
352         int disposition = FILE_OPEN;
353         __u16 netfid;
354
355         if (inode == NULL)
356                 return -EBADF;
357         if (file->private_data) {
358                 pCifsFile = (struct cifsFileInfo *)file->private_data;
359         } else
360                 return -EBADF;
361
362         xid = GetXid();
363         down(&pCifsFile->fh_sem);
364         if (pCifsFile->invalidHandle == FALSE) {
365                 up(&pCifsFile->fh_sem);
366                 FreeXid(xid);
367                 return 0;
368         }
369
370         if (file->f_path.dentry == NULL) {
371                 up(&pCifsFile->fh_sem);
372                 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
373                 FreeXid(xid);
374                 return -EBADF;
375         }
376         cifs_sb = CIFS_SB(inode->i_sb);
377         pTcon = cifs_sb->tcon;
378 /* can not grab rename sem here because various ops, including
379    those that already have the rename sem can end up causing writepage
380    to get called and if the server was down that means we end up here,
381    and we can never tell if the caller already has the rename_sem */
382         full_path = build_path_from_dentry(file->f_path.dentry);
383         if (full_path == NULL) {
384                 up(&pCifsFile->fh_sem);
385                 FreeXid(xid);
386                 return -ENOMEM;
387         }
388
389         cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
390                  inode, file->f_flags,full_path));
391         desiredAccess = cifs_convert_flags(file->f_flags);
392
393         if (oplockEnabled)
394                 oplock = REQ_OPLOCK;
395         else
396                 oplock = FALSE;
397
398         /* Can not refresh inode by passing in file_info buf to be returned
399            by SMBOpen and then calling get_inode_info with returned buf 
400            since file might have write behind data that needs to be flushed 
401            and server version of file size can be stale. If we knew for sure
402            that inode was not dirty locally we could do this */
403
404 /*      buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
405         if (buf == 0) {
406                 up(&pCifsFile->fh_sem);
407                 kfree(full_path);
408                 FreeXid(xid);
409                 return -ENOMEM;
410         } */
411         rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
412                          CREATE_NOT_DIR, &netfid, &oplock, NULL,
413                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 
414                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
415         if (rc) {
416                 up(&pCifsFile->fh_sem);
417                 cFYI(1, ("cifs_open returned 0x%x", rc));
418                 cFYI(1, ("oplock: %d", oplock));
419         } else {
420                 pCifsFile->netfid = netfid;
421                 pCifsFile->invalidHandle = FALSE;
422                 up(&pCifsFile->fh_sem);
423                 pCifsInode = CIFS_I(inode);
424                 if (pCifsInode) {
425                         if (can_flush) {
426                                 filemap_write_and_wait(inode->i_mapping);
427                         /* temporarily disable caching while we
428                            go to server to get inode info */
429                                 pCifsInode->clientCanCacheAll = FALSE;
430                                 pCifsInode->clientCanCacheRead = FALSE;
431                                 if (pTcon->ses->capabilities & CAP_UNIX)
432                                         rc = cifs_get_inode_info_unix(&inode,
433                                                 full_path, inode->i_sb, xid);
434                                 else
435                                         rc = cifs_get_inode_info(&inode,
436                                                 full_path, NULL, inode->i_sb,
437                                                 xid);
438                         } /* else we are writing out data to server already
439                              and could deadlock if we tried to flush data, and
440                              since we do not know if we have data that would
441                              invalidate the current end of file on the server
442                              we can not go to the server to get the new inod
443                              info */
444                         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
445                                 pCifsInode->clientCanCacheAll = TRUE;
446                                 pCifsInode->clientCanCacheRead = TRUE;
447                                 cFYI(1, ("Exclusive Oplock granted on inode %p",
448                                          file->f_path.dentry->d_inode));
449                         } else if ((oplock & 0xF) == OPLOCK_READ) {
450                                 pCifsInode->clientCanCacheRead = TRUE;
451                                 pCifsInode->clientCanCacheAll = FALSE;
452                         } else {
453                                 pCifsInode->clientCanCacheRead = FALSE;
454                                 pCifsInode->clientCanCacheAll = FALSE;
455                         }
456                         cifs_relock_file(pCifsFile);
457                 }
458         }
459
460         kfree(full_path);
461         FreeXid(xid);
462         return rc;
463 }
464
465 int cifs_close(struct inode *inode, struct file *file)
466 {
467         int rc = 0;
468         int xid;
469         struct cifs_sb_info *cifs_sb;
470         struct cifsTconInfo *pTcon;
471         struct cifsFileInfo *pSMBFile =
472                 (struct cifsFileInfo *)file->private_data;
473
474         xid = GetXid();
475
476         cifs_sb = CIFS_SB(inode->i_sb);
477         pTcon = cifs_sb->tcon;
478         if (pSMBFile) {
479                 struct cifsLockInfo *li, *tmp;
480
481                 pSMBFile->closePend = TRUE;
482                 if (pTcon) {
483                         /* no sense reconnecting to close a file that is
484                            already closed */
485                         if (pTcon->tidStatus != CifsNeedReconnect) {
486                                 int timeout = 2;
487                                 while((atomic_read(&pSMBFile->wrtPending) != 0)
488                                          && (timeout < 1000) ) {
489                                         /* Give write a better chance to get to
490                                         server ahead of the close.  We do not
491                                         want to add a wait_q here as it would
492                                         increase the memory utilization as
493                                         the struct would be in each open file,
494                                         but this should give enough time to 
495                                         clear the socket */
496 #ifdef CONFIG_CIFS_DEBUG2
497                                         cFYI(1,("close delay, write pending"));
498 #endif /* DEBUG2 */
499                                         msleep(timeout);
500                                         timeout *= 4;
501                                 }
502                                 if(atomic_read(&pSMBFile->wrtPending))
503                                         cERROR(1,("close with pending writes"));
504                                 rc = CIFSSMBClose(xid, pTcon,
505                                                   pSMBFile->netfid);
506                         }
507                 }
508
509                 /* Delete any outstanding lock records.
510                    We'll lose them when the file is closed anyway. */
511                 down(&pSMBFile->lock_sem);
512                 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
513                         list_del(&li->llist);
514                         kfree(li);
515                 }
516                 up(&pSMBFile->lock_sem);
517
518                 write_lock(&GlobalSMBSeslock);
519                 list_del(&pSMBFile->flist);
520                 list_del(&pSMBFile->tlist);
521                 write_unlock(&GlobalSMBSeslock);
522                 kfree(pSMBFile->search_resume_name);
523                 kfree(file->private_data);
524                 file->private_data = NULL;
525         } else
526                 rc = -EBADF;
527
528         if (list_empty(&(CIFS_I(inode)->openFileList))) {
529                 cFYI(1, ("closing last open instance for inode %p", inode));
530                 /* if the file is not open we do not know if we can cache info
531                    on this inode, much less write behind and read ahead */
532                 CIFS_I(inode)->clientCanCacheRead = FALSE;
533                 CIFS_I(inode)->clientCanCacheAll  = FALSE;
534         }
535         if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
536                 rc = CIFS_I(inode)->write_behind_rc;
537         FreeXid(xid);
538         return rc;
539 }
540
541 int cifs_closedir(struct inode *inode, struct file *file)
542 {
543         int rc = 0;
544         int xid;
545         struct cifsFileInfo *pCFileStruct =
546             (struct cifsFileInfo *)file->private_data;
547         char *ptmp;
548
549         cFYI(1, ("Closedir inode = 0x%p", inode));
550
551         xid = GetXid();
552
553         if (pCFileStruct) {
554                 struct cifsTconInfo *pTcon;
555                 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
556
557                 pTcon = cifs_sb->tcon;
558
559                 cFYI(1, ("Freeing private data in close dir"));
560                 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
561                    (pCFileStruct->invalidHandle == FALSE)) {
562                         pCFileStruct->invalidHandle = TRUE;
563                         rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
564                         cFYI(1, ("Closing uncompleted readdir with rc %d",
565                                  rc));
566                         /* not much we can do if it fails anyway, ignore rc */
567                         rc = 0;
568                 }
569                 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
570                 if (ptmp) {
571                         cFYI(1, ("closedir free smb buf in srch struct"));
572                         pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
573                         if(pCFileStruct->srch_inf.smallBuf)
574                                 cifs_small_buf_release(ptmp);
575                         else
576                                 cifs_buf_release(ptmp);
577                 }
578                 ptmp = pCFileStruct->search_resume_name;
579                 if (ptmp) {
580                         cFYI(1, ("closedir free resume name"));
581                         pCFileStruct->search_resume_name = NULL;
582                         kfree(ptmp);
583                 }
584                 kfree(file->private_data);
585                 file->private_data = NULL;
586         }
587         /* BB can we lock the filestruct while this is going on? */
588         FreeXid(xid);
589         return rc;
590 }
591
592 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
593                                 __u64 offset, __u8 lockType)
594 {
595         struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
596         if (li == NULL)
597                 return -ENOMEM;
598         li->offset = offset;
599         li->length = len;
600         li->type = lockType;
601         down(&fid->lock_sem);
602         list_add(&li->llist, &fid->llist);
603         up(&fid->lock_sem);
604         return 0;
605 }
606
607 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
608 {
609         int rc, xid;
610         __u32 numLock = 0;
611         __u32 numUnlock = 0;
612         __u64 length;
613         int wait_flag = FALSE;
614         struct cifs_sb_info *cifs_sb;
615         struct cifsTconInfo *pTcon;
616         __u16 netfid;
617         __u8 lockType = LOCKING_ANDX_LARGE_FILES;
618         int posix_locking;
619
620         length = 1 + pfLock->fl_end - pfLock->fl_start;
621         rc = -EACCES;
622         xid = GetXid();
623
624         cFYI(1, ("Lock parm: 0x%x flockflags: "
625                  "0x%x flocktype: 0x%x start: %lld end: %lld",
626                 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
627                 pfLock->fl_end));
628
629         if (pfLock->fl_flags & FL_POSIX)
630                 cFYI(1, ("Posix"));
631         if (pfLock->fl_flags & FL_FLOCK)
632                 cFYI(1, ("Flock"));
633         if (pfLock->fl_flags & FL_SLEEP) {
634                 cFYI(1, ("Blocking lock"));
635                 wait_flag = TRUE;
636         }
637         if (pfLock->fl_flags & FL_ACCESS)
638                 cFYI(1, ("Process suspended by mandatory locking - "
639                          "not implemented yet"));
640         if (pfLock->fl_flags & FL_LEASE)
641                 cFYI(1, ("Lease on file - not implemented yet"));
642         if (pfLock->fl_flags & 
643             (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
644                 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
645
646         if (pfLock->fl_type == F_WRLCK) {
647                 cFYI(1, ("F_WRLCK "));
648                 numLock = 1;
649         } else if (pfLock->fl_type == F_UNLCK) {
650                 cFYI(1, ("F_UNLCK"));
651                 numUnlock = 1;
652                 /* Check if unlock includes more than
653                 one lock range */
654         } else if (pfLock->fl_type == F_RDLCK) {
655                 cFYI(1, ("F_RDLCK"));
656                 lockType |= LOCKING_ANDX_SHARED_LOCK;
657                 numLock = 1;
658         } else if (pfLock->fl_type == F_EXLCK) {
659                 cFYI(1, ("F_EXLCK"));
660                 numLock = 1;
661         } else if (pfLock->fl_type == F_SHLCK) {
662                 cFYI(1, ("F_SHLCK"));
663                 lockType |= LOCKING_ANDX_SHARED_LOCK;
664                 numLock = 1;
665         } else
666                 cFYI(1, ("Unknown type of lock"));
667
668         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
669         pTcon = cifs_sb->tcon;
670
671         if (file->private_data == NULL) {
672                 FreeXid(xid);
673                 return -EBADF;
674         }
675         netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
676
677         posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
678                         (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
679
680         /* BB add code here to normalize offset and length to
681         account for negative length which we can not accept over the
682         wire */
683         if (IS_GETLK(cmd)) {
684                 if(posix_locking) {
685                         int posix_lock_type;
686                         if(lockType & LOCKING_ANDX_SHARED_LOCK)
687                                 posix_lock_type = CIFS_RDLCK;
688                         else
689                                 posix_lock_type = CIFS_WRLCK;
690                         rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
691                                         length, pfLock,
692                                         posix_lock_type, wait_flag);
693                         FreeXid(xid);
694                         return rc;
695                 }
696
697                 /* BB we could chain these into one lock request BB */
698                 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
699                                  0, 1, lockType, 0 /* wait flag */ );
700                 if (rc == 0) {
701                         rc = CIFSSMBLock(xid, pTcon, netfid, length, 
702                                          pfLock->fl_start, 1 /* numUnlock */ ,
703                                          0 /* numLock */ , lockType,
704                                          0 /* wait flag */ );
705                         pfLock->fl_type = F_UNLCK;
706                         if (rc != 0)
707                                 cERROR(1, ("Error unlocking previously locked "
708                                            "range %d during test of lock", rc));
709                         rc = 0;
710
711                 } else {
712                         /* if rc == ERR_SHARING_VIOLATION ? */
713                         rc = 0; /* do not change lock type to unlock
714                                    since range in use */
715                 }
716
717                 FreeXid(xid);
718                 return rc;
719         }
720
721         if (!numLock && !numUnlock) {
722                 /* if no lock or unlock then nothing
723                 to do since we do not know what it is */
724                 FreeXid(xid);
725                 return -EOPNOTSUPP;
726         }
727
728         if (posix_locking) {
729                 int posix_lock_type;
730                 if(lockType & LOCKING_ANDX_SHARED_LOCK)
731                         posix_lock_type = CIFS_RDLCK;
732                 else
733                         posix_lock_type = CIFS_WRLCK;
734                 
735                 if(numUnlock == 1)
736                         posix_lock_type = CIFS_UNLCK;
737
738                 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
739                                       length, pfLock,
740                                       posix_lock_type, wait_flag);
741         } else {
742                 struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data;
743
744                 if (numLock) {
745                         rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
746                                         0, numLock, lockType, wait_flag);
747
748                         if (rc == 0) {
749                                 /* For Windows locks we must store them. */
750                                 rc = store_file_lock(fid, length,
751                                                 pfLock->fl_start, lockType);
752                         }
753                 } else if (numUnlock) {
754                         /* For each stored lock that this unlock overlaps
755                            completely, unlock it. */
756                         int stored_rc = 0;
757                         struct cifsLockInfo *li, *tmp;
758
759                         rc = 0;
760                         down(&fid->lock_sem);
761                         list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
762                                 if (pfLock->fl_start <= li->offset &&
763                                                 length >= li->length) {
764                                         stored_rc = CIFSSMBLock(xid, pTcon, netfid,
765                                                         li->length, li->offset,
766                                                         1, 0, li->type, FALSE);
767                                         if (stored_rc)
768                                                 rc = stored_rc;
769
770                                         list_del(&li->llist);
771                                         kfree(li);
772                                 }
773                         }
774                         up(&fid->lock_sem);
775                 }
776         }
777
778         if (pfLock->fl_flags & FL_POSIX)
779                 posix_lock_file_wait(file, pfLock);
780         FreeXid(xid);
781         return rc;
782 }
783
784 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
785         size_t write_size, loff_t *poffset)
786 {
787         int rc = 0;
788         unsigned int bytes_written = 0;
789         unsigned int total_written;
790         struct cifs_sb_info *cifs_sb;
791         struct cifsTconInfo *pTcon;
792         int xid, long_op;
793         struct cifsFileInfo *open_file;
794
795         if (file->f_path.dentry == NULL)
796                 return -EBADF;
797
798         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
799         if (cifs_sb == NULL)
800                 return -EBADF;
801
802         pTcon = cifs_sb->tcon;
803
804         /* cFYI(1,
805            (" write %d bytes to offset %lld of %s", write_size,
806            *poffset, file->f_path.dentry->d_name.name)); */
807
808         if (file->private_data == NULL)
809                 return -EBADF;
810         else
811                 open_file = (struct cifsFileInfo *) file->private_data;
812         
813         xid = GetXid();
814         if (file->f_path.dentry->d_inode == NULL) {
815                 FreeXid(xid);
816                 return -EBADF;
817         }
818
819         if (*poffset > file->f_path.dentry->d_inode->i_size)
820                 long_op = 2; /* writes past end of file can take a long time */
821         else
822                 long_op = 1;
823
824         for (total_written = 0; write_size > total_written;
825              total_written += bytes_written) {
826                 rc = -EAGAIN;
827                 while (rc == -EAGAIN) {
828                         if (file->private_data == NULL) {
829                                 /* file has been closed on us */
830                                 FreeXid(xid);
831                         /* if we have gotten here we have written some data
832                            and blocked, and the file has been freed on us while
833                            we blocked so return what we managed to write */
834                                 return total_written;
835                         } 
836                         if (open_file->closePend) {
837                                 FreeXid(xid);
838                                 if (total_written)
839                                         return total_written;
840                                 else
841                                         return -EBADF;
842                         }
843                         if (open_file->invalidHandle) {
844                                 if ((file->f_path.dentry == NULL) ||
845                                     (file->f_path.dentry->d_inode == NULL)) {
846                                         FreeXid(xid);
847                                         return total_written;
848                                 }
849                                 /* we could deadlock if we called
850                                    filemap_fdatawait from here so tell
851                                    reopen_file not to flush data to server
852                                    now */
853                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
854                                         file, FALSE);
855                                 if (rc != 0)
856                                         break;
857                         }
858
859                         rc = CIFSSMBWrite(xid, pTcon,
860                                 open_file->netfid,
861                                 min_t(const int, cifs_sb->wsize,
862                                       write_size - total_written),
863                                 *poffset, &bytes_written,
864                                 NULL, write_data + total_written, long_op);
865                 }
866                 if (rc || (bytes_written == 0)) {
867                         if (total_written)
868                                 break;
869                         else {
870                                 FreeXid(xid);
871                                 return rc;
872                         }
873                 } else
874                         *poffset += bytes_written;
875                 long_op = FALSE; /* subsequent writes fast -
876                                     15 seconds is plenty */
877         }
878
879         cifs_stats_bytes_written(pTcon, total_written);
880
881         /* since the write may have blocked check these pointers again */
882         if (file->f_path.dentry) {
883                 if (file->f_path.dentry->d_inode) {
884                         struct inode *inode = file->f_path.dentry->d_inode;
885                         inode->i_ctime = inode->i_mtime =
886                                 current_fs_time(inode->i_sb);
887                         if (total_written > 0) {
888                                 if (*poffset > file->f_path.dentry->d_inode->i_size)
889                                         i_size_write(file->f_path.dentry->d_inode,
890                                         *poffset);
891                         }
892                         mark_inode_dirty_sync(file->f_path.dentry->d_inode);
893                 }
894         }
895         FreeXid(xid);
896         return total_written;
897 }
898
899 static ssize_t cifs_write(struct file *file, const char *write_data,
900         size_t write_size, loff_t *poffset)
901 {
902         int rc = 0;
903         unsigned int bytes_written = 0;
904         unsigned int total_written;
905         struct cifs_sb_info *cifs_sb;
906         struct cifsTconInfo *pTcon;
907         int xid, long_op;
908         struct cifsFileInfo *open_file;
909
910         if (file->f_path.dentry == NULL)
911                 return -EBADF;
912
913         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
914         if (cifs_sb == NULL)
915                 return -EBADF;
916
917         pTcon = cifs_sb->tcon;
918
919         cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
920            *poffset, file->f_path.dentry->d_name.name));
921
922         if (file->private_data == NULL)
923                 return -EBADF;
924         else
925                 open_file = (struct cifsFileInfo *)file->private_data;
926         
927         xid = GetXid();
928         if (file->f_path.dentry->d_inode == NULL) {
929                 FreeXid(xid);
930                 return -EBADF;
931         }
932
933         if (*poffset > file->f_path.dentry->d_inode->i_size)
934                 long_op = 2; /* writes past end of file can take a long time */
935         else
936                 long_op = 1;
937
938         for (total_written = 0; write_size > total_written;
939              total_written += bytes_written) {
940                 rc = -EAGAIN;
941                 while (rc == -EAGAIN) {
942                         if (file->private_data == NULL) {
943                                 /* file has been closed on us */
944                                 FreeXid(xid);
945                         /* if we have gotten here we have written some data
946                            and blocked, and the file has been freed on us
947                            while we blocked so return what we managed to 
948                            write */
949                                 return total_written;
950                         } 
951                         if (open_file->closePend) {
952                                 FreeXid(xid);
953                                 if (total_written)
954                                         return total_written;
955                                 else
956                                         return -EBADF;
957                         }
958                         if (open_file->invalidHandle) {
959                                 if ((file->f_path.dentry == NULL) ||
960                                    (file->f_path.dentry->d_inode == NULL)) {
961                                         FreeXid(xid);
962                                         return total_written;
963                                 }
964                                 /* we could deadlock if we called
965                                    filemap_fdatawait from here so tell
966                                    reopen_file not to flush data to 
967                                    server now */
968                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
969                                         file, FALSE);
970                                 if (rc != 0)
971                                         break;
972                         }
973                         if(experimEnabled || (pTcon->ses->server &&
974                                 ((pTcon->ses->server->secMode & 
975                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
976                                 == 0))) {
977                                 struct kvec iov[2];
978                                 unsigned int len;
979
980                                 len = min((size_t)cifs_sb->wsize,
981                                           write_size - total_written);
982                                 /* iov[0] is reserved for smb header */
983                                 iov[1].iov_base = (char *)write_data +
984                                                   total_written;
985                                 iov[1].iov_len = len;
986                                 rc = CIFSSMBWrite2(xid, pTcon,
987                                                 open_file->netfid, len,
988                                                 *poffset, &bytes_written,
989                                                 iov, 1, long_op);
990                         } else
991                                 rc = CIFSSMBWrite(xid, pTcon,
992                                          open_file->netfid,
993                                          min_t(const int, cifs_sb->wsize,
994                                                write_size - total_written),
995                                          *poffset, &bytes_written,
996                                          write_data + total_written,
997                                          NULL, long_op);
998                 }
999                 if (rc || (bytes_written == 0)) {
1000                         if (total_written)
1001                                 break;
1002                         else {
1003                                 FreeXid(xid);
1004                                 return rc;
1005                         }
1006                 } else
1007                         *poffset += bytes_written;
1008                 long_op = FALSE; /* subsequent writes fast - 
1009                                     15 seconds is plenty */
1010         }
1011
1012         cifs_stats_bytes_written(pTcon, total_written);
1013
1014         /* since the write may have blocked check these pointers again */
1015         if (file->f_path.dentry) {
1016                 if (file->f_path.dentry->d_inode) {
1017                         file->f_path.dentry->d_inode->i_ctime =
1018                         file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;
1019                         if (total_written > 0) {
1020                                 if (*poffset > file->f_path.dentry->d_inode->i_size)
1021                                         i_size_write(file->f_path.dentry->d_inode,
1022                                                      *poffset);
1023                         }
1024                         mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1025                 }
1026         }
1027         FreeXid(xid);
1028         return total_written;
1029 }
1030
1031 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1032 {
1033         struct cifsFileInfo *open_file;
1034         int rc;
1035
1036         /* Having a null inode here (because mapping->host was set to zero by
1037         the VFS or MM) should not happen but we had reports of on oops (due to
1038         it being zero) during stress testcases so we need to check for it */
1039
1040         if(cifs_inode == NULL) {
1041                 cERROR(1,("Null inode passed to cifs_writeable_file"));
1042                 dump_stack();
1043                 return NULL;
1044         }
1045
1046         read_lock(&GlobalSMBSeslock);
1047         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1048                 if (open_file->closePend)
1049                         continue;
1050                 if (open_file->pfile &&
1051                     ((open_file->pfile->f_flags & O_RDWR) ||
1052                      (open_file->pfile->f_flags & O_WRONLY))) {
1053                         atomic_inc(&open_file->wrtPending);
1054                         read_unlock(&GlobalSMBSeslock);
1055                         if((open_file->invalidHandle) && 
1056                            (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
1057                                 rc = cifs_reopen_file(&cifs_inode->vfs_inode, 
1058                                                       open_file->pfile, FALSE);
1059                                 /* if it fails, try another handle - might be */
1060                                 /* dangerous to hold up writepages with retry */
1061                                 if(rc) {
1062                                         cFYI(1,("failed on reopen file in wp"));
1063                                         read_lock(&GlobalSMBSeslock);
1064                                         /* can not use this handle, no write
1065                                         pending on this one after all */
1066                                         atomic_dec
1067                                              (&open_file->wrtPending);
1068                                         continue;
1069                                 }
1070                         }
1071                         return open_file;
1072                 }
1073         }
1074         read_unlock(&GlobalSMBSeslock);
1075         return NULL;
1076 }
1077
1078 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1079 {
1080         struct address_space *mapping = page->mapping;
1081         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1082         char *write_data;
1083         int rc = -EFAULT;
1084         int bytes_written = 0;
1085         struct cifs_sb_info *cifs_sb;
1086         struct cifsTconInfo *pTcon;
1087         struct inode *inode;
1088         struct cifsFileInfo *open_file;
1089
1090         if (!mapping || !mapping->host)
1091                 return -EFAULT;
1092
1093         inode = page->mapping->host;
1094         cifs_sb = CIFS_SB(inode->i_sb);
1095         pTcon = cifs_sb->tcon;
1096
1097         offset += (loff_t)from;
1098         write_data = kmap(page);
1099         write_data += from;
1100
1101         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1102                 kunmap(page);
1103                 return -EIO;
1104         }
1105
1106         /* racing with truncate? */
1107         if (offset > mapping->host->i_size) {
1108                 kunmap(page);
1109                 return 0; /* don't care */
1110         }
1111
1112         /* check to make sure that we are not extending the file */
1113         if (mapping->host->i_size - offset < (loff_t)to)
1114                 to = (unsigned)(mapping->host->i_size - offset); 
1115
1116         open_file = find_writable_file(CIFS_I(mapping->host));
1117         if (open_file) {
1118                 bytes_written = cifs_write(open_file->pfile, write_data,
1119                                            to-from, &offset);
1120                 atomic_dec(&open_file->wrtPending);
1121                 /* Does mm or vfs already set times? */
1122                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1123                 if ((bytes_written > 0) && (offset)) {
1124                         rc = 0;
1125                 } else if (bytes_written < 0) {
1126                         if (rc != -EBADF)
1127                                 rc = bytes_written;
1128                 }
1129         } else {
1130                 cFYI(1, ("No writeable filehandles for inode"));
1131                 rc = -EIO;
1132         }
1133
1134         kunmap(page);
1135         return rc;
1136 }
1137
1138 static int cifs_writepages(struct address_space *mapping,
1139                            struct writeback_control *wbc)
1140 {
1141         struct backing_dev_info *bdi = mapping->backing_dev_info;
1142         unsigned int bytes_to_write;
1143         unsigned int bytes_written;
1144         struct cifs_sb_info *cifs_sb;
1145         int done = 0;
1146         pgoff_t end;
1147         pgoff_t index;
1148         int range_whole = 0;
1149         struct kvec * iov;
1150         int len;
1151         int n_iov = 0;
1152         pgoff_t next;
1153         int nr_pages;
1154         __u64 offset = 0;
1155         struct cifsFileInfo *open_file;
1156         struct page *page;
1157         struct pagevec pvec;
1158         int rc = 0;
1159         int scanned = 0;
1160         int xid;
1161
1162         cifs_sb = CIFS_SB(mapping->host->i_sb);
1163         
1164         /*
1165          * If wsize is smaller that the page cache size, default to writing
1166          * one page at a time via cifs_writepage
1167          */
1168         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1169                 return generic_writepages(mapping, wbc);
1170
1171         if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1172                 if(cifs_sb->tcon->ses->server->secMode &
1173                           (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1174                         if(!experimEnabled) 
1175                                 return generic_writepages(mapping, wbc);
1176
1177         iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1178         if(iov == NULL)
1179                 return generic_writepages(mapping, wbc);
1180
1181
1182         /*
1183          * BB: Is this meaningful for a non-block-device file system?
1184          * If it is, we should test it again after we do I/O
1185          */
1186         if (wbc->nonblocking && bdi_write_congested(bdi)) {
1187                 wbc->encountered_congestion = 1;
1188                 kfree(iov);
1189                 return 0;
1190         }
1191
1192         xid = GetXid();
1193
1194         pagevec_init(&pvec, 0);
1195         if (wbc->range_cyclic) {
1196                 index = mapping->writeback_index; /* Start from prev offset */
1197                 end = -1;
1198         } else {
1199                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1200                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1201                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1202                         range_whole = 1;
1203                 scanned = 1;
1204         }
1205 retry:
1206         while (!done && (index <= end) &&
1207                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1208                         PAGECACHE_TAG_DIRTY,
1209                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1210                 int first;
1211                 unsigned int i;
1212
1213                 first = -1;
1214                 next = 0;
1215                 n_iov = 0;
1216                 bytes_to_write = 0;
1217
1218                 for (i = 0; i < nr_pages; i++) {
1219                         page = pvec.pages[i];
1220                         /*
1221                          * At this point we hold neither mapping->tree_lock nor
1222                          * lock on the page itself: the page may be truncated or
1223                          * invalidated (changing page->mapping to NULL), or even
1224                          * swizzled back from swapper_space to tmpfs file
1225                          * mapping
1226                          */
1227
1228                         if (first < 0)
1229                                 lock_page(page);
1230                         else if (TestSetPageLocked(page))
1231                                 break;
1232
1233                         if (unlikely(page->mapping != mapping)) {
1234                                 unlock_page(page);
1235                                 break;
1236                         }
1237
1238                         if (!wbc->range_cyclic && page->index > end) {
1239                                 done = 1;
1240                                 unlock_page(page);
1241                                 break;
1242                         }
1243
1244                         if (next && (page->index != next)) {
1245                                 /* Not next consecutive page */
1246                                 unlock_page(page);
1247                                 break;
1248                         }
1249
1250                         if (wbc->sync_mode != WB_SYNC_NONE)
1251                                 wait_on_page_writeback(page);
1252
1253                         if (PageWriteback(page) ||
1254                                         !clear_page_dirty_for_io(page)) {
1255                                 unlock_page(page);
1256                                 break;
1257                         }
1258
1259                         /*
1260                          * This actually clears the dirty bit in the radix tree.
1261                          * See cifs_writepage() for more commentary.
1262                          */
1263                         set_page_writeback(page);
1264
1265                         if (page_offset(page) >= mapping->host->i_size) {
1266                                 done = 1;
1267                                 unlock_page(page);
1268                                 end_page_writeback(page);
1269                                 break;
1270                         }
1271
1272                         /*
1273                          * BB can we get rid of this?  pages are held by pvec
1274                          */
1275                         page_cache_get(page);
1276
1277                         len = min(mapping->host->i_size - page_offset(page),
1278                                   (loff_t)PAGE_CACHE_SIZE);
1279
1280                         /* reserve iov[0] for the smb header */
1281                         n_iov++;
1282                         iov[n_iov].iov_base = kmap(page);
1283                         iov[n_iov].iov_len = len;
1284                         bytes_to_write += len;
1285
1286                         if (first < 0) {
1287                                 first = i;
1288                                 offset = page_offset(page);
1289                         }
1290                         next = page->index + 1;
1291                         if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1292                                 break;
1293                 }
1294                 if (n_iov) {
1295                         /* Search for a writable handle every time we call
1296                          * CIFSSMBWrite2.  We can't rely on the last handle
1297                          * we used to still be valid
1298                          */
1299                         open_file = find_writable_file(CIFS_I(mapping->host));
1300                         if (!open_file) {
1301                                 cERROR(1, ("No writable handles for inode"));
1302                                 rc = -EBADF;
1303                         } else {
1304                                 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1305                                                    open_file->netfid,
1306                                                    bytes_to_write, offset,
1307                                                    &bytes_written, iov, n_iov,
1308                                                    1);
1309                                 atomic_dec(&open_file->wrtPending);
1310                                 if (rc || bytes_written < bytes_to_write) {
1311                                         cERROR(1,("Write2 ret %d, written = %d",
1312                                                   rc, bytes_written));
1313                                         /* BB what if continued retry is
1314                                            requested via mount flags? */
1315                                         set_bit(AS_EIO, &mapping->flags);
1316                                 } else {
1317                                         cifs_stats_bytes_written(cifs_sb->tcon,
1318                                                                  bytes_written);
1319                                 }
1320                         }
1321                         for (i = 0; i < n_iov; i++) {
1322                                 page = pvec.pages[first + i];
1323                                 /* Should we also set page error on
1324                                 success rc but too little data written? */
1325                                 /* BB investigate retry logic on temporary
1326                                 server crash cases and how recovery works
1327                                 when page marked as error */ 
1328                                 if(rc)
1329                                         SetPageError(page);
1330                                 kunmap(page);
1331                                 unlock_page(page);
1332                                 end_page_writeback(page);
1333                                 page_cache_release(page);
1334                         }
1335                         if ((wbc->nr_to_write -= n_iov) <= 0)
1336                                 done = 1;
1337                         index = next;
1338                 }
1339                 pagevec_release(&pvec);
1340         }
1341         if (!scanned && !done) {
1342                 /*
1343                  * We hit the last page and there is more work to be done: wrap
1344                  * back to the start of the file
1345                  */
1346                 scanned = 1;
1347                 index = 0;
1348                 goto retry;
1349         }
1350         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1351                 mapping->writeback_index = index;
1352
1353         FreeXid(xid);
1354         kfree(iov);
1355         return rc;
1356 }
1357
1358 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1359 {
1360         int rc = -EFAULT;
1361         int xid;
1362
1363         xid = GetXid();
1364 /* BB add check for wbc flags */
1365         page_cache_get(page);
1366         if (!PageUptodate(page)) {
1367                 cFYI(1, ("ppw - page not up to date"));
1368         }
1369
1370         /*
1371          * Set the "writeback" flag, and clear "dirty" in the radix tree.
1372          *
1373          * A writepage() implementation always needs to do either this,
1374          * or re-dirty the page with "redirty_page_for_writepage()" in
1375          * the case of a failure.
1376          *
1377          * Just unlocking the page will cause the radix tree tag-bits
1378          * to fail to update with the state of the page correctly.
1379          */
1380         set_page_writeback(page);               
1381         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1382         SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1383         unlock_page(page);
1384         end_page_writeback(page);
1385         page_cache_release(page);
1386         FreeXid(xid);
1387         return rc;
1388 }
1389
1390 static int cifs_commit_write(struct file *file, struct page *page,
1391         unsigned offset, unsigned to)
1392 {
1393         int xid;
1394         int rc = 0;
1395         struct inode *inode = page->mapping->host;
1396         loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1397         char *page_data;
1398
1399         xid = GetXid();
1400         cFYI(1, ("commit write for page %p up to position %lld for %d", 
1401                  page, position, to));
1402         if (position > inode->i_size) {
1403                 i_size_write(inode, position);
1404                 /* if (file->private_data == NULL) {
1405                         rc = -EBADF;
1406                 } else {
1407                         open_file = (struct cifsFileInfo *)file->private_data;
1408                         cifs_sb = CIFS_SB(inode->i_sb);
1409                         rc = -EAGAIN;
1410                         while (rc == -EAGAIN) {
1411                                 if ((open_file->invalidHandle) && 
1412                                     (!open_file->closePend)) {
1413                                         rc = cifs_reopen_file(
1414                                                 file->f_path.dentry->d_inode, file);
1415                                         if (rc != 0)
1416                                                 break;
1417                                 }
1418                                 if (!open_file->closePend) {
1419                                         rc = CIFSSMBSetFileSize(xid,
1420                                                 cifs_sb->tcon, position,
1421                                                 open_file->netfid,
1422                                                 open_file->pid, FALSE);
1423                                 } else {
1424                                         rc = -EBADF;
1425                                         break;
1426                                 }
1427                         }
1428                         cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1429                 } */
1430         }
1431         if (!PageUptodate(page)) {
1432                 position =  ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1433                 /* can not rely on (or let) writepage write this data */
1434                 if (to < offset) {
1435                         cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1436                                 offset, to));
1437                         FreeXid(xid);
1438                         return rc;
1439                 }
1440                 /* this is probably better than directly calling
1441                    partialpage_write since in this function the file handle is
1442                    known which we might as well leverage */
1443                 /* BB check if anything else missing out of ppw
1444                    such as updating last write time */
1445                 page_data = kmap(page);
1446                 rc = cifs_write(file, page_data + offset, to-offset,
1447                                 &position);
1448                 if (rc > 0)
1449                         rc = 0;
1450                 /* else if (rc < 0) should we set writebehind rc? */
1451                 kunmap(page);
1452         } else {        
1453                 set_page_dirty(page);
1454         }
1455
1456         FreeXid(xid);
1457         return rc;
1458 }
1459
1460 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1461 {
1462         int xid;
1463         int rc = 0;
1464         struct inode *inode = file->f_path.dentry->d_inode;
1465
1466         xid = GetXid();
1467
1468         cFYI(1, ("Sync file - name: %s datasync: 0x%x", 
1469                 dentry->d_name.name, datasync));
1470         
1471         rc = filemap_fdatawrite(inode->i_mapping);
1472         if (rc == 0)
1473                 CIFS_I(inode)->write_behind_rc = 0;
1474         FreeXid(xid);
1475         return rc;
1476 }
1477
1478 /* static void cifs_sync_page(struct page *page)
1479 {
1480         struct address_space *mapping;
1481         struct inode *inode;
1482         unsigned long index = page->index;
1483         unsigned int rpages = 0;
1484         int rc = 0;
1485
1486         cFYI(1, ("sync page %p",page));
1487         mapping = page->mapping;
1488         if (!mapping)
1489                 return 0;
1490         inode = mapping->host;
1491         if (!inode)
1492                 return; */
1493
1494 /*      fill in rpages then 
1495         result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1496
1497 /*      cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1498
1499 #if 0
1500         if (rc < 0)
1501                 return rc;
1502         return 0;
1503 #endif
1504 } */
1505
1506 /*
1507  * As file closes, flush all cached write data for this inode checking
1508  * for write behind errors.
1509  */
1510 int cifs_flush(struct file *file, fl_owner_t id)
1511 {
1512         struct inode * inode = file->f_path.dentry->d_inode;
1513         int rc = 0;
1514
1515         /* Rather than do the steps manually:
1516            lock the inode for writing
1517            loop through pages looking for write behind data (dirty pages)
1518            coalesce into contiguous 16K (or smaller) chunks to write to server
1519            send to server (prefer in parallel)
1520            deal with writebehind errors
1521            unlock inode for writing
1522            filemapfdatawrite appears easier for the time being */
1523
1524         rc = filemap_fdatawrite(inode->i_mapping);
1525         if (!rc) /* reset wb rc if we were able to write out dirty pages */
1526                 CIFS_I(inode)->write_behind_rc = 0;
1527                 
1528         cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1529
1530         return rc;
1531 }
1532
1533 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1534         size_t read_size, loff_t *poffset)
1535 {
1536         int rc = -EACCES;
1537         unsigned int bytes_read = 0;
1538         unsigned int total_read = 0;
1539         unsigned int current_read_size;
1540         struct cifs_sb_info *cifs_sb;
1541         struct cifsTconInfo *pTcon;
1542         int xid;
1543         struct cifsFileInfo *open_file;
1544         char *smb_read_data;
1545         char __user *current_offset;
1546         struct smb_com_read_rsp *pSMBr;
1547
1548         xid = GetXid();
1549         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1550         pTcon = cifs_sb->tcon;
1551
1552         if (file->private_data == NULL) {
1553                 FreeXid(xid);
1554                 return -EBADF;
1555         }
1556         open_file = (struct cifsFileInfo *)file->private_data;
1557
1558         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1559                 cFYI(1, ("attempting read on write only file instance"));
1560         }
1561         for (total_read = 0, current_offset = read_data;
1562              read_size > total_read;
1563              total_read += bytes_read, current_offset += bytes_read) {
1564                 current_read_size = min_t(const int, read_size - total_read, 
1565                                           cifs_sb->rsize);
1566                 rc = -EAGAIN;
1567                 smb_read_data = NULL;
1568                 while (rc == -EAGAIN) {
1569                         int buf_type = CIFS_NO_BUFFER;
1570                         if ((open_file->invalidHandle) && 
1571                             (!open_file->closePend)) {
1572                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
1573                                         file, TRUE);
1574                                 if (rc != 0)
1575                                         break;
1576                         }
1577                         rc = CIFSSMBRead(xid, pTcon,
1578                                          open_file->netfid,
1579                                          current_read_size, *poffset,
1580                                          &bytes_read, &smb_read_data,
1581                                          &buf_type);
1582                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1583                         if (smb_read_data) {
1584                                 if (copy_to_user(current_offset,
1585                                                 smb_read_data +
1586                                                 4 /* RFC1001 length field */ +
1587                                                 le16_to_cpu(pSMBr->DataOffset),
1588                                                 bytes_read)) {
1589                                         rc = -EFAULT;
1590                                 }
1591
1592                                 if(buf_type == CIFS_SMALL_BUFFER)
1593                                         cifs_small_buf_release(smb_read_data);
1594                                 else if(buf_type == CIFS_LARGE_BUFFER)
1595                                         cifs_buf_release(smb_read_data);
1596                                 smb_read_data = NULL;
1597                         }
1598                 }
1599                 if (rc || (bytes_read == 0)) {
1600                         if (total_read) {
1601                                 break;
1602                         } else {
1603                                 FreeXid(xid);
1604                                 return rc;
1605                         }
1606                 } else {
1607                         cifs_stats_bytes_read(pTcon, bytes_read);
1608                         *poffset += bytes_read;
1609                 }
1610         }
1611         FreeXid(xid);
1612         return total_read;
1613 }
1614
1615
1616 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1617         loff_t *poffset)
1618 {
1619         int rc = -EACCES;
1620         unsigned int bytes_read = 0;
1621         unsigned int total_read;
1622         unsigned int current_read_size;
1623         struct cifs_sb_info *cifs_sb;
1624         struct cifsTconInfo *pTcon;
1625         int xid;
1626         char *current_offset;
1627         struct cifsFileInfo *open_file;
1628         int buf_type = CIFS_NO_BUFFER;
1629
1630         xid = GetXid();
1631         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1632         pTcon = cifs_sb->tcon;
1633
1634         if (file->private_data == NULL) {
1635                 FreeXid(xid);
1636                 return -EBADF;
1637         }
1638         open_file = (struct cifsFileInfo *)file->private_data;
1639
1640         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1641                 cFYI(1, ("attempting read on write only file instance"));
1642
1643         for (total_read = 0, current_offset = read_data; 
1644              read_size > total_read;
1645              total_read += bytes_read, current_offset += bytes_read) {
1646                 current_read_size = min_t(const int, read_size - total_read,
1647                                           cifs_sb->rsize);
1648                 /* For windows me and 9x we do not want to request more
1649                 than it negotiated since it will refuse the read then */
1650                 if((pTcon->ses) && 
1651                         !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1652                         current_read_size = min_t(const int, current_read_size,
1653                                         pTcon->ses->server->maxBuf - 128);
1654                 }
1655                 rc = -EAGAIN;
1656                 while (rc == -EAGAIN) {
1657                         if ((open_file->invalidHandle) && 
1658                             (!open_file->closePend)) {
1659                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
1660                                         file, TRUE);
1661                                 if (rc != 0)
1662                                         break;
1663                         }
1664                         rc = CIFSSMBRead(xid, pTcon,
1665                                          open_file->netfid,
1666                                          current_read_size, *poffset,
1667                                          &bytes_read, &current_offset,
1668                                          &buf_type);
1669                 }
1670                 if (rc || (bytes_read == 0)) {
1671                         if (total_read) {
1672                                 break;
1673                         } else {
1674                                 FreeXid(xid);
1675                                 return rc;
1676                         }
1677                 } else {
1678                         cifs_stats_bytes_read(pTcon, total_read);
1679                         *poffset += bytes_read;
1680                 }
1681         }
1682         FreeXid(xid);
1683         return total_read;
1684 }
1685
1686 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1687 {
1688         struct dentry *dentry = file->f_path.dentry;
1689         int rc, xid;
1690
1691         xid = GetXid();
1692         rc = cifs_revalidate(dentry);
1693         if (rc) {
1694                 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1695                 FreeXid(xid);
1696                 return rc;
1697         }
1698         rc = generic_file_mmap(file, vma);
1699         FreeXid(xid);
1700         return rc;
1701 }
1702
1703
1704 static void cifs_copy_cache_pages(struct address_space *mapping, 
1705         struct list_head *pages, int bytes_read, char *data,
1706         struct pagevec *plru_pvec)
1707 {
1708         struct page *page;
1709         char *target;
1710
1711         while (bytes_read > 0) {
1712                 if (list_empty(pages))
1713                         break;
1714
1715                 page = list_entry(pages->prev, struct page, lru);
1716                 list_del(&page->lru);
1717
1718                 if (add_to_page_cache(page, mapping, page->index,
1719                                       GFP_KERNEL)) {
1720                         page_cache_release(page);
1721                         cFYI(1, ("Add page cache failed"));
1722                         data += PAGE_CACHE_SIZE;
1723                         bytes_read -= PAGE_CACHE_SIZE;
1724                         continue;
1725                 }
1726
1727                 target = kmap_atomic(page,KM_USER0);
1728
1729                 if (PAGE_CACHE_SIZE > bytes_read) {
1730                         memcpy(target, data, bytes_read);
1731                         /* zero the tail end of this partial page */
1732                         memset(target + bytes_read, 0, 
1733                                PAGE_CACHE_SIZE - bytes_read);
1734                         bytes_read = 0;
1735                 } else {
1736                         memcpy(target, data, PAGE_CACHE_SIZE);
1737                         bytes_read -= PAGE_CACHE_SIZE;
1738                 }
1739                 kunmap_atomic(target, KM_USER0);
1740
1741                 flush_dcache_page(page);
1742                 SetPageUptodate(page);
1743                 unlock_page(page);
1744                 if (!pagevec_add(plru_pvec, page))
1745                         __pagevec_lru_add(plru_pvec);
1746                 data += PAGE_CACHE_SIZE;
1747         }
1748         return;
1749 }
1750
1751 static int cifs_readpages(struct file *file, struct address_space *mapping,
1752         struct list_head *page_list, unsigned num_pages)
1753 {
1754         int rc = -EACCES;
1755         int xid;
1756         loff_t offset;
1757         struct page *page;
1758         struct cifs_sb_info *cifs_sb;
1759         struct cifsTconInfo *pTcon;
1760         int bytes_read = 0;
1761         unsigned int read_size,i;
1762         char *smb_read_data = NULL;
1763         struct smb_com_read_rsp *pSMBr;
1764         struct pagevec lru_pvec;
1765         struct cifsFileInfo *open_file;
1766         int buf_type = CIFS_NO_BUFFER;
1767
1768         xid = GetXid();
1769         if (file->private_data == NULL) {
1770                 FreeXid(xid);
1771                 return -EBADF;
1772         }
1773         open_file = (struct cifsFileInfo *)file->private_data;
1774         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1775         pTcon = cifs_sb->tcon;
1776
1777         pagevec_init(&lru_pvec, 0);
1778
1779         for (i = 0; i < num_pages; ) {
1780                 unsigned contig_pages;
1781                 struct page *tmp_page;
1782                 unsigned long expected_index;
1783
1784                 if (list_empty(page_list))
1785                         break;
1786
1787                 page = list_entry(page_list->prev, struct page, lru);
1788                 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1789
1790                 /* count adjacent pages that we will read into */
1791                 contig_pages = 0;
1792                 expected_index = 
1793                         list_entry(page_list->prev, struct page, lru)->index;
1794                 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1795                         if (tmp_page->index == expected_index) {
1796                                 contig_pages++;
1797                                 expected_index++;
1798                         } else
1799                                 break; 
1800                 }
1801                 if (contig_pages + i >  num_pages)
1802                         contig_pages = num_pages - i;
1803
1804                 /* for reads over a certain size could initiate async
1805                    read ahead */
1806
1807                 read_size = contig_pages * PAGE_CACHE_SIZE;
1808                 /* Read size needs to be in multiples of one page */
1809                 read_size = min_t(const unsigned int, read_size,
1810                                   cifs_sb->rsize & PAGE_CACHE_MASK);
1811
1812                 rc = -EAGAIN;
1813                 while (rc == -EAGAIN) {
1814                         if ((open_file->invalidHandle) && 
1815                             (!open_file->closePend)) {
1816                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
1817                                         file, TRUE);
1818                                 if (rc != 0)
1819                                         break;
1820                         }
1821
1822                         rc = CIFSSMBRead(xid, pTcon,
1823                                          open_file->netfid,
1824                                          read_size, offset,
1825                                          &bytes_read, &smb_read_data,
1826                                          &buf_type);
1827                         /* BB more RC checks ? */
1828                         if (rc== -EAGAIN) {
1829                                 if (smb_read_data) {
1830                                         if(buf_type == CIFS_SMALL_BUFFER)
1831                                                 cifs_small_buf_release(smb_read_data);
1832                                         else if(buf_type == CIFS_LARGE_BUFFER)
1833                                                 cifs_buf_release(smb_read_data);
1834                                         smb_read_data = NULL;
1835                                 }
1836                         }
1837                 }
1838                 if ((rc < 0) || (smb_read_data == NULL)) {
1839                         cFYI(1, ("Read error in readpages: %d", rc));
1840                         break;
1841                 } else if (bytes_read > 0) {
1842                         task_io_account_read(bytes_read);
1843                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1844                         cifs_copy_cache_pages(mapping, page_list, bytes_read,
1845                                 smb_read_data + 4 /* RFC1001 hdr */ +
1846                                 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1847
1848                         i +=  bytes_read >> PAGE_CACHE_SHIFT;
1849                         cifs_stats_bytes_read(pTcon, bytes_read);
1850                         if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1851                                 i++; /* account for partial page */
1852
1853                                 /* server copy of file can have smaller size 
1854                                    than client */
1855                                 /* BB do we need to verify this common case ? 
1856                                    this case is ok - if we are at server EOF 
1857                                    we will hit it on next read */
1858
1859                                 /* break; */
1860                         }
1861                 } else {
1862                         cFYI(1, ("No bytes read (%d) at offset %lld . "
1863                                  "Cleaning remaining pages from readahead list",
1864                                  bytes_read, offset));
1865                         /* BB turn off caching and do new lookup on 
1866                            file size at server? */
1867                         break;
1868                 }
1869                 if (smb_read_data) {
1870                         if(buf_type == CIFS_SMALL_BUFFER)
1871                                 cifs_small_buf_release(smb_read_data);
1872                         else if(buf_type == CIFS_LARGE_BUFFER)
1873                                 cifs_buf_release(smb_read_data);
1874                         smb_read_data = NULL;
1875                 }
1876                 bytes_read = 0;
1877         }
1878
1879         pagevec_lru_add(&lru_pvec);
1880
1881 /* need to free smb_read_data buf before exit */
1882         if (smb_read_data) {
1883                 if(buf_type == CIFS_SMALL_BUFFER)
1884                         cifs_small_buf_release(smb_read_data);
1885                 else if(buf_type == CIFS_LARGE_BUFFER)
1886                         cifs_buf_release(smb_read_data);
1887                 smb_read_data = NULL;
1888         } 
1889
1890         FreeXid(xid);
1891         return rc;
1892 }
1893
1894 static int cifs_readpage_worker(struct file *file, struct page *page,
1895         loff_t *poffset)
1896 {
1897         char *read_data;
1898         int rc;
1899
1900         page_cache_get(page);
1901         read_data = kmap(page);
1902         /* for reads over a certain size could initiate async read ahead */
1903                                                                                                                            
1904         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1905                                                                                                                            
1906         if (rc < 0)
1907                 goto io_error;
1908         else
1909                 cFYI(1, ("Bytes read %d",rc));
1910                                                                                                                            
1911         file->f_path.dentry->d_inode->i_atime =
1912                 current_fs_time(file->f_path.dentry->d_inode->i_sb);
1913                                                                                                                            
1914         if (PAGE_CACHE_SIZE > rc)
1915                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1916
1917         flush_dcache_page(page);
1918         SetPageUptodate(page);
1919         rc = 0;
1920                                                                                                                            
1921 io_error:
1922         kunmap(page);
1923         page_cache_release(page);
1924         return rc;
1925 }
1926
1927 static int cifs_readpage(struct file *file, struct page *page)
1928 {
1929         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1930         int rc = -EACCES;
1931         int xid;
1932
1933         xid = GetXid();
1934
1935         if (file->private_data == NULL) {
1936                 FreeXid(xid);
1937                 return -EBADF;
1938         }
1939
1940         cFYI(1, ("readpage %p at offset %d 0x%x\n", 
1941                  page, (int)offset, (int)offset));
1942
1943         rc = cifs_readpage_worker(file, page, &offset);
1944
1945         unlock_page(page);
1946
1947         FreeXid(xid);
1948         return rc;
1949 }
1950
1951 /* We do not want to update the file size from server for inodes
1952    open for write - to avoid races with writepage extending
1953    the file - in the future we could consider allowing
1954    refreshing the inode only on increases in the file size 
1955    but this is tricky to do without racing with writebehind
1956    page caching in the current Linux kernel design */
1957 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1958 {
1959         struct cifsFileInfo *open_file = NULL;
1960
1961         if (cifsInode)
1962                 open_file =  find_writable_file(cifsInode);
1963  
1964         if(open_file) {
1965                 struct cifs_sb_info *cifs_sb;
1966
1967                 /* there is not actually a write pending so let
1968                 this handle go free and allow it to
1969                 be closable if needed */
1970                 atomic_dec(&open_file->wrtPending);
1971
1972                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1973                 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1974                         /* since no page cache to corrupt on directio 
1975                         we can change size safely */
1976                         return 1;
1977                 }
1978
1979                 if(i_size_read(&cifsInode->vfs_inode) < end_of_file)
1980                         return 1;
1981
1982                 return 0;
1983         } else
1984                 return 1;
1985 }
1986
1987 static int cifs_prepare_write(struct file *file, struct page *page,
1988         unsigned from, unsigned to)
1989 {
1990         int rc = 0;
1991         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1992         cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1993         if (!PageUptodate(page)) {
1994         /*      if (to - from != PAGE_CACHE_SIZE) {
1995                         void *kaddr = kmap_atomic(page, KM_USER0);
1996                         memset(kaddr, 0, from);
1997                         memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
1998                         flush_dcache_page(page);
1999                         kunmap_atomic(kaddr, KM_USER0);
2000                 } */
2001                 /* If we are writing a full page it will be up to date,
2002                    no need to read from the server */
2003                 if ((to == PAGE_CACHE_SIZE) && (from == 0))
2004                         SetPageUptodate(page);
2005
2006                 /* might as well read a page, it is fast enough */
2007                 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2008                         rc = cifs_readpage_worker(file, page, &offset);
2009                 } else {
2010                 /* should we try using another file handle if there is one -
2011                    how would we lock it to prevent close of that handle
2012                    racing with this read?
2013                    In any case this will be written out by commit_write */
2014                 }
2015         }
2016
2017         /* BB should we pass any errors back? 
2018            e.g. if we do not have read access to the file */
2019         return 0;
2020 }
2021
2022 const struct address_space_operations cifs_addr_ops = {
2023         .readpage = cifs_readpage,
2024         .readpages = cifs_readpages,
2025         .writepage = cifs_writepage,
2026         .writepages = cifs_writepages,
2027         .prepare_write = cifs_prepare_write,
2028         .commit_write = cifs_commit_write,
2029         .set_page_dirty = __set_page_dirty_nobuffers,
2030         /* .sync_page = cifs_sync_page, */
2031         /* .direct_IO = */
2032 };
2033
2034 /*
2035  * cifs_readpages requires the server to support a buffer large enough to
2036  * contain the header plus one complete page of data.  Otherwise, we need
2037  * to leave cifs_readpages out of the address space operations.
2038  */
2039 const struct address_space_operations cifs_addr_ops_smallbuf = {
2040         .readpage = cifs_readpage,
2041         .writepage = cifs_writepage,
2042         .writepages = cifs_writepages,
2043         .prepare_write = cifs_prepare_write,
2044         .commit_write = cifs_commit_write,
2045         .set_page_dirty = __set_page_dirty_nobuffers,
2046         /* .sync_page = cifs_sync_page, */
2047         /* .direct_IO = */
2048 };