[SCSI] lpfc: avoid double-free during PCI error failure
[linux-2.6] / fs / cifs / file.c
1 /*
2  *   fs/cifs/file.c
3  *
4  *   vfs operations that deal with files
5  * 
6  *   Copyright (C) International Business Machines  Corp., 2002,2003
7  *   Author(s): Steve French (sfrench@us.ibm.com)
8  *              Jeremy Allison (jra@samba.org)
9  *
10  *   This library is free software; you can redistribute it and/or modify
11  *   it under the terms of the GNU Lesser General Public License as published
12  *   by the Free Software Foundation; either version 2.1 of the License, or
13  *   (at your option) any later version.
14  *
15  *   This library is distributed in the hope that it will be useful,
16  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
18  *   the GNU Lesser General Public License for more details.
19  *
20  *   You should have received a copy of the GNU Lesser General Public License
21  *   along with this library; if not, write to the Free Software
22  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23  */
24 #include <linux/fs.h>
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/smp_lock.h>
31 #include <linux/writeback.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/delay.h>
34 #include <asm/div64.h>
35 #include "cifsfs.h"
36 #include "cifspdu.h"
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "cifs_unicode.h"
40 #include "cifs_debug.h"
41 #include "cifs_fs_sb.h"
42
43 static inline struct cifsFileInfo *cifs_init_private(
44         struct cifsFileInfo *private_data, struct inode *inode,
45         struct file *file, __u16 netfid)
46 {
47         memset(private_data, 0, sizeof(struct cifsFileInfo));
48         private_data->netfid = netfid;
49         private_data->pid = current->tgid;      
50         init_MUTEX(&private_data->fh_sem);
51         init_MUTEX(&private_data->lock_sem);
52         INIT_LIST_HEAD(&private_data->llist);
53         private_data->pfile = file; /* needed for writepage */
54         private_data->pInode = inode;
55         private_data->invalidHandle = FALSE;
56         private_data->closePend = FALSE;
57         /* we have to track num writers to the inode, since writepages
58         does not tell us which handle the write is for so there can
59         be a close (overlapping with write) of the filehandle that
60         cifs_writepages chose to use */
61         atomic_set(&private_data->wrtPending,0); 
62
63         return private_data;
64 }
65
66 static inline int cifs_convert_flags(unsigned int flags)
67 {
68         if ((flags & O_ACCMODE) == O_RDONLY)
69                 return GENERIC_READ;
70         else if ((flags & O_ACCMODE) == O_WRONLY)
71                 return GENERIC_WRITE;
72         else if ((flags & O_ACCMODE) == O_RDWR) {
73                 /* GENERIC_ALL is too much permission to request
74                    can cause unnecessary access denied on create */
75                 /* return GENERIC_ALL; */
76                 return (GENERIC_READ | GENERIC_WRITE);
77         }
78
79         return 0x20197;
80 }
81
82 static inline int cifs_get_disposition(unsigned int flags)
83 {
84         if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
85                 return FILE_CREATE;
86         else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
87                 return FILE_OVERWRITE_IF;
88         else if ((flags & O_CREAT) == O_CREAT)
89                 return FILE_OPEN_IF;
90         else if ((flags & O_TRUNC) == O_TRUNC)
91                 return FILE_OVERWRITE;
92         else
93                 return FILE_OPEN;
94 }
95
96 /* all arguments to this function must be checked for validity in caller */
97 static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
98         struct cifsInodeInfo *pCifsInode, struct cifsFileInfo *pCifsFile,
99         struct cifsTconInfo *pTcon, int *oplock, FILE_ALL_INFO *buf,
100         char *full_path, int xid)
101 {
102         struct timespec temp;
103         int rc;
104
105         /* want handles we can use to read with first
106            in the list so we do not have to walk the
107            list to search for one in prepare_write */
108         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
109                 list_add_tail(&pCifsFile->flist, 
110                               &pCifsInode->openFileList);
111         } else {
112                 list_add(&pCifsFile->flist,
113                          &pCifsInode->openFileList);
114         }
115         write_unlock(&GlobalSMBSeslock);
116         if (pCifsInode->clientCanCacheRead) {
117                 /* we have the inode open somewhere else
118                    no need to discard cache data */
119                 goto client_can_cache;
120         }
121
122         /* BB need same check in cifs_create too? */
123         /* if not oplocked, invalidate inode pages if mtime or file
124            size changed */
125         temp = cifs_NTtimeToUnix(le64_to_cpu(buf->LastWriteTime));
126         if (timespec_equal(&file->f_path.dentry->d_inode->i_mtime, &temp) &&
127                            (file->f_path.dentry->d_inode->i_size ==
128                             (loff_t)le64_to_cpu(buf->EndOfFile))) {
129                 cFYI(1, ("inode unchanged on server"));
130         } else {
131                 if (file->f_path.dentry->d_inode->i_mapping) {
132                 /* BB no need to lock inode until after invalidate
133                    since namei code should already have it locked? */
134                         filemap_write_and_wait(file->f_path.dentry->d_inode->i_mapping);
135                 }
136                 cFYI(1, ("invalidating remote inode since open detected it "
137                          "changed"));
138                 invalidate_remote_inode(file->f_path.dentry->d_inode);
139         }
140
141 client_can_cache:
142         if (pTcon->ses->capabilities & CAP_UNIX)
143                 rc = cifs_get_inode_info_unix(&file->f_path.dentry->d_inode,
144                         full_path, inode->i_sb, xid);
145         else
146                 rc = cifs_get_inode_info(&file->f_path.dentry->d_inode,
147                         full_path, buf, inode->i_sb, xid);
148
149         if ((*oplock & 0xF) == OPLOCK_EXCLUSIVE) {
150                 pCifsInode->clientCanCacheAll = TRUE;
151                 pCifsInode->clientCanCacheRead = TRUE;
152                 cFYI(1, ("Exclusive Oplock granted on inode %p",
153                          file->f_path.dentry->d_inode));
154         } else if ((*oplock & 0xF) == OPLOCK_READ)
155                 pCifsInode->clientCanCacheRead = TRUE;
156
157         return rc;
158 }
159
160 int cifs_open(struct inode *inode, struct file *file)
161 {
162         int rc = -EACCES;
163         int xid, oplock;
164         struct cifs_sb_info *cifs_sb;
165         struct cifsTconInfo *pTcon;
166         struct cifsFileInfo *pCifsFile;
167         struct cifsInodeInfo *pCifsInode;
168         struct list_head *tmp;
169         char *full_path = NULL;
170         int desiredAccess;
171         int disposition;
172         __u16 netfid;
173         FILE_ALL_INFO *buf = NULL;
174
175         xid = GetXid();
176
177         cifs_sb = CIFS_SB(inode->i_sb);
178         pTcon = cifs_sb->tcon;
179
180         if (file->f_flags & O_CREAT) {
181                 /* search inode for this file and fill in file->private_data */
182                 pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
183                 read_lock(&GlobalSMBSeslock);
184                 list_for_each(tmp, &pCifsInode->openFileList) {
185                         pCifsFile = list_entry(tmp, struct cifsFileInfo,
186                                                flist);
187                         if ((pCifsFile->pfile == NULL) &&
188                             (pCifsFile->pid == current->tgid)) {
189                                 /* mode set in cifs_create */
190
191                                 /* needed for writepage */
192                                 pCifsFile->pfile = file;
193                                 
194                                 file->private_data = pCifsFile;
195                                 break;
196                         }
197                 }
198                 read_unlock(&GlobalSMBSeslock);
199                 if (file->private_data != NULL) {
200                         rc = 0;
201                         FreeXid(xid);
202                         return rc;
203                 } else {
204                         if (file->f_flags & O_EXCL)
205                                 cERROR(1, ("could not find file instance for "
206                                            "new file %p", file));
207                 }
208         }
209
210         full_path = build_path_from_dentry(file->f_path.dentry);
211         if (full_path == NULL) {
212                 FreeXid(xid);
213                 return -ENOMEM;
214         }
215
216         cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
217                  inode, file->f_flags, full_path));
218         desiredAccess = cifs_convert_flags(file->f_flags);
219
220 /*********************************************************************
221  *  open flag mapping table:
222  *  
223  *      POSIX Flag            CIFS Disposition
224  *      ----------            ---------------- 
225  *      O_CREAT               FILE_OPEN_IF
226  *      O_CREAT | O_EXCL      FILE_CREATE
227  *      O_CREAT | O_TRUNC     FILE_OVERWRITE_IF
228  *      O_TRUNC               FILE_OVERWRITE
229  *      none of the above     FILE_OPEN
230  *
231  *      Note that there is not a direct match between disposition
232  *      FILE_SUPERSEDE (ie create whether or not file exists although 
233  *      O_CREAT | O_TRUNC is similar but truncates the existing
234  *      file rather than creating a new file as FILE_SUPERSEDE does
235  *      (which uses the attributes / metadata passed in on open call)
236  *?
237  *?  O_SYNC is a reasonable match to CIFS writethrough flag  
238  *?  and the read write flags match reasonably.  O_LARGEFILE
239  *?  is irrelevant because largefile support is always used
240  *?  by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
241  *       O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
242  *********************************************************************/
243
244         disposition = cifs_get_disposition(file->f_flags);
245
246         if (oplockEnabled)
247                 oplock = REQ_OPLOCK;
248         else
249                 oplock = FALSE;
250
251         /* BB pass O_SYNC flag through on file attributes .. BB */
252
253         /* Also refresh inode by passing in file_info buf returned by SMBOpen
254            and calling get_inode_info with returned buf (at least helps
255            non-Unix server case) */
256
257         /* BB we can not do this if this is the second open of a file 
258            and the first handle has writebehind data, we might be 
259            able to simply do a filemap_fdatawrite/filemap_fdatawait first */
260         buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
261         if (!buf) {
262                 rc = -ENOMEM;
263                 goto out;
264         }
265
266         if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
267                 rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, 
268                          desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
269                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
270                                  & CIFS_MOUNT_MAP_SPECIAL_CHR);
271         else
272                 rc = -EIO; /* no NT SMB support fall into legacy open below */
273
274         if (rc == -EIO) {
275                 /* Old server, try legacy style OpenX */
276                 rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
277                         desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
278                         cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
279                                 & CIFS_MOUNT_MAP_SPECIAL_CHR);
280         }
281         if (rc) {
282                 cFYI(1, ("cifs_open returned 0x%x", rc));
283                 goto out;
284         }
285         file->private_data =
286                 kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
287         if (file->private_data == NULL) {
288                 rc = -ENOMEM;
289                 goto out;
290         }
291         pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
292         write_lock(&GlobalSMBSeslock);
293         list_add(&pCifsFile->tlist, &pTcon->openFileList);
294
295         pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
296         if (pCifsInode) {
297                 rc = cifs_open_inode_helper(inode, file, pCifsInode,
298                                             pCifsFile, pTcon,
299                                             &oplock, buf, full_path, xid);
300         } else {
301                 write_unlock(&GlobalSMBSeslock);
302         }
303
304         if (oplock & CIFS_CREATE_ACTION) {           
305                 /* time to set mode which we can not set earlier due to
306                    problems creating new read-only files */
307                 if (cifs_sb->tcon->ses->capabilities & CAP_UNIX) {
308                         CIFSSMBUnixSetPerms(xid, pTcon, full_path,
309                                             inode->i_mode,
310                                             (__u64)-1, (__u64)-1, 0 /* dev */,
311                                             cifs_sb->local_nls,
312                                             cifs_sb->mnt_cifs_flags & 
313                                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
314                 } else {
315                         /* BB implement via Windows security descriptors eg
316                            CIFSSMBWinSetPerms(xid, pTcon, full_path, mode,
317                                               -1, -1, local_nls);
318                            in the meantime could set r/o dos attribute when
319                            perms are eg: mode & 0222 == 0 */
320                 }
321         }
322
323 out:
324         kfree(buf);
325         kfree(full_path);
326         FreeXid(xid);
327         return rc;
328 }
329
330 /* Try to reacquire byte range locks that were released when session */
331 /* to server was lost */
332 static int cifs_relock_file(struct cifsFileInfo *cifsFile)
333 {
334         int rc = 0;
335
336 /* BB list all locks open on this file and relock */
337
338         return rc;
339 }
340
341 static int cifs_reopen_file(struct inode *inode, struct file *file, 
342         int can_flush)
343 {
344         int rc = -EACCES;
345         int xid, oplock;
346         struct cifs_sb_info *cifs_sb;
347         struct cifsTconInfo *pTcon;
348         struct cifsFileInfo *pCifsFile;
349         struct cifsInodeInfo *pCifsInode;
350         char *full_path = NULL;
351         int desiredAccess;
352         int disposition = FILE_OPEN;
353         __u16 netfid;
354
355         if (inode == NULL)
356                 return -EBADF;
357         if (file->private_data) {
358                 pCifsFile = (struct cifsFileInfo *)file->private_data;
359         } else
360                 return -EBADF;
361
362         xid = GetXid();
363         down(&pCifsFile->fh_sem);
364         if (pCifsFile->invalidHandle == FALSE) {
365                 up(&pCifsFile->fh_sem);
366                 FreeXid(xid);
367                 return 0;
368         }
369
370         if (file->f_path.dentry == NULL) {
371                 up(&pCifsFile->fh_sem);
372                 cFYI(1, ("failed file reopen, no valid name if dentry freed"));
373                 FreeXid(xid);
374                 return -EBADF;
375         }
376         cifs_sb = CIFS_SB(inode->i_sb);
377         pTcon = cifs_sb->tcon;
378 /* can not grab rename sem here because various ops, including
379    those that already have the rename sem can end up causing writepage
380    to get called and if the server was down that means we end up here,
381    and we can never tell if the caller already has the rename_sem */
382         full_path = build_path_from_dentry(file->f_path.dentry);
383         if (full_path == NULL) {
384                 up(&pCifsFile->fh_sem);
385                 FreeXid(xid);
386                 return -ENOMEM;
387         }
388
389         cFYI(1, (" inode = 0x%p file flags are 0x%x for %s",
390                  inode, file->f_flags,full_path));
391         desiredAccess = cifs_convert_flags(file->f_flags);
392
393         if (oplockEnabled)
394                 oplock = REQ_OPLOCK;
395         else
396                 oplock = FALSE;
397
398         /* Can not refresh inode by passing in file_info buf to be returned
399            by SMBOpen and then calling get_inode_info with returned buf 
400            since file might have write behind data that needs to be flushed 
401            and server version of file size can be stale. If we knew for sure
402            that inode was not dirty locally we could do this */
403
404 /*      buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
405         if (buf == 0) {
406                 up(&pCifsFile->fh_sem);
407                 kfree(full_path);
408                 FreeXid(xid);
409                 return -ENOMEM;
410         } */
411         rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
412                          CREATE_NOT_DIR, &netfid, &oplock, NULL,
413                          cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & 
414                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
415         if (rc) {
416                 up(&pCifsFile->fh_sem);
417                 cFYI(1, ("cifs_open returned 0x%x", rc));
418                 cFYI(1, ("oplock: %d", oplock));
419         } else {
420                 pCifsFile->netfid = netfid;
421                 pCifsFile->invalidHandle = FALSE;
422                 up(&pCifsFile->fh_sem);
423                 pCifsInode = CIFS_I(inode);
424                 if (pCifsInode) {
425                         if (can_flush) {
426                                 filemap_write_and_wait(inode->i_mapping);
427                         /* temporarily disable caching while we
428                            go to server to get inode info */
429                                 pCifsInode->clientCanCacheAll = FALSE;
430                                 pCifsInode->clientCanCacheRead = FALSE;
431                                 if (pTcon->ses->capabilities & CAP_UNIX)
432                                         rc = cifs_get_inode_info_unix(&inode,
433                                                 full_path, inode->i_sb, xid);
434                                 else
435                                         rc = cifs_get_inode_info(&inode,
436                                                 full_path, NULL, inode->i_sb,
437                                                 xid);
438                         } /* else we are writing out data to server already
439                              and could deadlock if we tried to flush data, and
440                              since we do not know if we have data that would
441                              invalidate the current end of file on the server
442                              we can not go to the server to get the new inod
443                              info */
444                         if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
445                                 pCifsInode->clientCanCacheAll = TRUE;
446                                 pCifsInode->clientCanCacheRead = TRUE;
447                                 cFYI(1, ("Exclusive Oplock granted on inode %p",
448                                          file->f_path.dentry->d_inode));
449                         } else if ((oplock & 0xF) == OPLOCK_READ) {
450                                 pCifsInode->clientCanCacheRead = TRUE;
451                                 pCifsInode->clientCanCacheAll = FALSE;
452                         } else {
453                                 pCifsInode->clientCanCacheRead = FALSE;
454                                 pCifsInode->clientCanCacheAll = FALSE;
455                         }
456                         cifs_relock_file(pCifsFile);
457                 }
458         }
459
460         kfree(full_path);
461         FreeXid(xid);
462         return rc;
463 }
464
465 int cifs_close(struct inode *inode, struct file *file)
466 {
467         int rc = 0;
468         int xid;
469         struct cifs_sb_info *cifs_sb;
470         struct cifsTconInfo *pTcon;
471         struct cifsFileInfo *pSMBFile =
472                 (struct cifsFileInfo *)file->private_data;
473
474         xid = GetXid();
475
476         cifs_sb = CIFS_SB(inode->i_sb);
477         pTcon = cifs_sb->tcon;
478         if (pSMBFile) {
479                 struct cifsLockInfo *li, *tmp;
480
481                 pSMBFile->closePend = TRUE;
482                 if (pTcon) {
483                         /* no sense reconnecting to close a file that is
484                            already closed */
485                         if (pTcon->tidStatus != CifsNeedReconnect) {
486                                 int timeout = 2;
487                                 while((atomic_read(&pSMBFile->wrtPending) != 0)
488                                          && (timeout < 1000) ) {
489                                         /* Give write a better chance to get to
490                                         server ahead of the close.  We do not
491                                         want to add a wait_q here as it would
492                                         increase the memory utilization as
493                                         the struct would be in each open file,
494                                         but this should give enough time to 
495                                         clear the socket */
496 #ifdef CONFIG_CIFS_DEBUG2
497                                         cFYI(1,("close delay, write pending"));
498 #endif /* DEBUG2 */
499                                         msleep(timeout);
500                                         timeout *= 4;
501                                 }
502                                 if(atomic_read(&pSMBFile->wrtPending))
503                                         cERROR(1,("close with pending writes"));
504                                 rc = CIFSSMBClose(xid, pTcon,
505                                                   pSMBFile->netfid);
506                         }
507                 }
508
509                 /* Delete any outstanding lock records.
510                    We'll lose them when the file is closed anyway. */
511                 down(&pSMBFile->lock_sem);
512                 list_for_each_entry_safe(li, tmp, &pSMBFile->llist, llist) {
513                         list_del(&li->llist);
514                         kfree(li);
515                 }
516                 up(&pSMBFile->lock_sem);
517
518                 write_lock(&GlobalSMBSeslock);
519                 list_del(&pSMBFile->flist);
520                 list_del(&pSMBFile->tlist);
521                 write_unlock(&GlobalSMBSeslock);
522                 kfree(pSMBFile->search_resume_name);
523                 kfree(file->private_data);
524                 file->private_data = NULL;
525         } else
526                 rc = -EBADF;
527
528         if (list_empty(&(CIFS_I(inode)->openFileList))) {
529                 cFYI(1, ("closing last open instance for inode %p", inode));
530                 /* if the file is not open we do not know if we can cache info
531                    on this inode, much less write behind and read ahead */
532                 CIFS_I(inode)->clientCanCacheRead = FALSE;
533                 CIFS_I(inode)->clientCanCacheAll  = FALSE;
534         }
535         if ((rc ==0) && CIFS_I(inode)->write_behind_rc)
536                 rc = CIFS_I(inode)->write_behind_rc;
537         FreeXid(xid);
538         return rc;
539 }
540
541 int cifs_closedir(struct inode *inode, struct file *file)
542 {
543         int rc = 0;
544         int xid;
545         struct cifsFileInfo *pCFileStruct =
546             (struct cifsFileInfo *)file->private_data;
547         char *ptmp;
548
549         cFYI(1, ("Closedir inode = 0x%p", inode));
550
551         xid = GetXid();
552
553         if (pCFileStruct) {
554                 struct cifsTconInfo *pTcon;
555                 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
556
557                 pTcon = cifs_sb->tcon;
558
559                 cFYI(1, ("Freeing private data in close dir"));
560                 if ((pCFileStruct->srch_inf.endOfSearch == FALSE) &&
561                    (pCFileStruct->invalidHandle == FALSE)) {
562                         pCFileStruct->invalidHandle = TRUE;
563                         rc = CIFSFindClose(xid, pTcon, pCFileStruct->netfid);
564                         cFYI(1, ("Closing uncompleted readdir with rc %d",
565                                  rc));
566                         /* not much we can do if it fails anyway, ignore rc */
567                         rc = 0;
568                 }
569                 ptmp = pCFileStruct->srch_inf.ntwrk_buf_start;
570                 if (ptmp) {
571                         cFYI(1, ("closedir free smb buf in srch struct"));
572                         pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
573                         if(pCFileStruct->srch_inf.smallBuf)
574                                 cifs_small_buf_release(ptmp);
575                         else
576                                 cifs_buf_release(ptmp);
577                 }
578                 ptmp = pCFileStruct->search_resume_name;
579                 if (ptmp) {
580                         cFYI(1, ("closedir free resume name"));
581                         pCFileStruct->search_resume_name = NULL;
582                         kfree(ptmp);
583                 }
584                 kfree(file->private_data);
585                 file->private_data = NULL;
586         }
587         /* BB can we lock the filestruct while this is going on? */
588         FreeXid(xid);
589         return rc;
590 }
591
592 static int store_file_lock(struct cifsFileInfo *fid, __u64 len,
593                                 __u64 offset, __u8 lockType)
594 {
595         struct cifsLockInfo *li = kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
596         if (li == NULL)
597                 return -ENOMEM;
598         li->offset = offset;
599         li->length = len;
600         li->type = lockType;
601         down(&fid->lock_sem);
602         list_add(&li->llist, &fid->llist);
603         up(&fid->lock_sem);
604         return 0;
605 }
606
607 int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
608 {
609         int rc, xid;
610         __u32 numLock = 0;
611         __u32 numUnlock = 0;
612         __u64 length;
613         int wait_flag = FALSE;
614         struct cifs_sb_info *cifs_sb;
615         struct cifsTconInfo *pTcon;
616         __u16 netfid;
617         __u8 lockType = LOCKING_ANDX_LARGE_FILES;
618         int posix_locking;
619
620         length = 1 + pfLock->fl_end - pfLock->fl_start;
621         rc = -EACCES;
622         xid = GetXid();
623
624         cFYI(1, ("Lock parm: 0x%x flockflags: "
625                  "0x%x flocktype: 0x%x start: %lld end: %lld",
626                 cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start,
627                 pfLock->fl_end));
628
629         if (pfLock->fl_flags & FL_POSIX)
630                 cFYI(1, ("Posix"));
631         if (pfLock->fl_flags & FL_FLOCK)
632                 cFYI(1, ("Flock"));
633         if (pfLock->fl_flags & FL_SLEEP) {
634                 cFYI(1, ("Blocking lock"));
635                 wait_flag = TRUE;
636         }
637         if (pfLock->fl_flags & FL_ACCESS)
638                 cFYI(1, ("Process suspended by mandatory locking - "
639                          "not implemented yet"));
640         if (pfLock->fl_flags & FL_LEASE)
641                 cFYI(1, ("Lease on file - not implemented yet"));
642         if (pfLock->fl_flags & 
643             (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
644                 cFYI(1, ("Unknown lock flags 0x%x", pfLock->fl_flags));
645
646         if (pfLock->fl_type == F_WRLCK) {
647                 cFYI(1, ("F_WRLCK "));
648                 numLock = 1;
649         } else if (pfLock->fl_type == F_UNLCK) {
650                 cFYI(1, ("F_UNLCK"));
651                 numUnlock = 1;
652                 /* Check if unlock includes more than
653                 one lock range */
654         } else if (pfLock->fl_type == F_RDLCK) {
655                 cFYI(1, ("F_RDLCK"));
656                 lockType |= LOCKING_ANDX_SHARED_LOCK;
657                 numLock = 1;
658         } else if (pfLock->fl_type == F_EXLCK) {
659                 cFYI(1, ("F_EXLCK"));
660                 numLock = 1;
661         } else if (pfLock->fl_type == F_SHLCK) {
662                 cFYI(1, ("F_SHLCK"));
663                 lockType |= LOCKING_ANDX_SHARED_LOCK;
664                 numLock = 1;
665         } else
666                 cFYI(1, ("Unknown type of lock"));
667
668         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
669         pTcon = cifs_sb->tcon;
670
671         if (file->private_data == NULL) {
672                 FreeXid(xid);
673                 return -EBADF;
674         }
675         netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
676
677         posix_locking = (cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
678                         (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability));
679
680         /* BB add code here to normalize offset and length to
681         account for negative length which we can not accept over the
682         wire */
683         if (IS_GETLK(cmd)) {
684                 if(posix_locking) {
685                         int posix_lock_type;
686                         if(lockType & LOCKING_ANDX_SHARED_LOCK)
687                                 posix_lock_type = CIFS_RDLCK;
688                         else
689                                 posix_lock_type = CIFS_WRLCK;
690                         rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
691                                         length, pfLock,
692                                         posix_lock_type, wait_flag);
693                         FreeXid(xid);
694                         return rc;
695                 }
696
697                 /* BB we could chain these into one lock request BB */
698                 rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
699                                  0, 1, lockType, 0 /* wait flag */ );
700                 if (rc == 0) {
701                         rc = CIFSSMBLock(xid, pTcon, netfid, length, 
702                                          pfLock->fl_start, 1 /* numUnlock */ ,
703                                          0 /* numLock */ , lockType,
704                                          0 /* wait flag */ );
705                         pfLock->fl_type = F_UNLCK;
706                         if (rc != 0)
707                                 cERROR(1, ("Error unlocking previously locked "
708                                            "range %d during test of lock", rc));
709                         rc = 0;
710
711                 } else {
712                         /* if rc == ERR_SHARING_VIOLATION ? */
713                         rc = 0; /* do not change lock type to unlock
714                                    since range in use */
715                 }
716
717                 FreeXid(xid);
718                 return rc;
719         }
720
721         if (!numLock && !numUnlock) {
722                 /* if no lock or unlock then nothing
723                 to do since we do not know what it is */
724                 FreeXid(xid);
725                 return -EOPNOTSUPP;
726         }
727
728         if (posix_locking) {
729                 int posix_lock_type;
730                 if(lockType & LOCKING_ANDX_SHARED_LOCK)
731                         posix_lock_type = CIFS_RDLCK;
732                 else
733                         posix_lock_type = CIFS_WRLCK;
734                 
735                 if(numUnlock == 1)
736                         posix_lock_type = CIFS_UNLCK;
737
738                 rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
739                                       length, pfLock,
740                                       posix_lock_type, wait_flag);
741         } else {
742                 struct cifsFileInfo *fid = (struct cifsFileInfo *)file->private_data;
743
744                 if (numLock) {
745                         rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
746                                         0, numLock, lockType, wait_flag);
747
748                         if (rc == 0) {
749                                 /* For Windows locks we must store them. */
750                                 rc = store_file_lock(fid, length,
751                                                 pfLock->fl_start, lockType);
752                         }
753                 } else if (numUnlock) {
754                         /* For each stored lock that this unlock overlaps
755                            completely, unlock it. */
756                         int stored_rc = 0;
757                         struct cifsLockInfo *li, *tmp;
758
759                         rc = 0;
760                         down(&fid->lock_sem);
761                         list_for_each_entry_safe(li, tmp, &fid->llist, llist) {
762                                 if (pfLock->fl_start <= li->offset &&
763                                                 length >= li->length) {
764                                         stored_rc = CIFSSMBLock(xid, pTcon, netfid,
765                                                         li->length, li->offset,
766                                                         1, 0, li->type, FALSE);
767                                         if (stored_rc)
768                                                 rc = stored_rc;
769
770                                         list_del(&li->llist);
771                                         kfree(li);
772                                 }
773                         }
774                         up(&fid->lock_sem);
775                 }
776         }
777
778         if (pfLock->fl_flags & FL_POSIX)
779                 posix_lock_file_wait(file, pfLock);
780         FreeXid(xid);
781         return rc;
782 }
783
784 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
785         size_t write_size, loff_t *poffset)
786 {
787         int rc = 0;
788         unsigned int bytes_written = 0;
789         unsigned int total_written;
790         struct cifs_sb_info *cifs_sb;
791         struct cifsTconInfo *pTcon;
792         int xid, long_op;
793         struct cifsFileInfo *open_file;
794
795         if (file->f_path.dentry == NULL)
796                 return -EBADF;
797
798         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
799         if (cifs_sb == NULL)
800                 return -EBADF;
801
802         pTcon = cifs_sb->tcon;
803
804         /* cFYI(1,
805            (" write %d bytes to offset %lld of %s", write_size,
806            *poffset, file->f_path.dentry->d_name.name)); */
807
808         if (file->private_data == NULL)
809                 return -EBADF;
810         else
811                 open_file = (struct cifsFileInfo *) file->private_data;
812         
813         xid = GetXid();
814         if (file->f_path.dentry->d_inode == NULL) {
815                 FreeXid(xid);
816                 return -EBADF;
817         }
818
819         if (*poffset > file->f_path.dentry->d_inode->i_size)
820                 long_op = 2; /* writes past end of file can take a long time */
821         else
822                 long_op = 1;
823
824         for (total_written = 0; write_size > total_written;
825              total_written += bytes_written) {
826                 rc = -EAGAIN;
827                 while (rc == -EAGAIN) {
828                         if (file->private_data == NULL) {
829                                 /* file has been closed on us */
830                                 FreeXid(xid);
831                         /* if we have gotten here we have written some data
832                            and blocked, and the file has been freed on us while
833                            we blocked so return what we managed to write */
834                                 return total_written;
835                         } 
836                         if (open_file->closePend) {
837                                 FreeXid(xid);
838                                 if (total_written)
839                                         return total_written;
840                                 else
841                                         return -EBADF;
842                         }
843                         if (open_file->invalidHandle) {
844                                 if ((file->f_path.dentry == NULL) ||
845                                     (file->f_path.dentry->d_inode == NULL)) {
846                                         FreeXid(xid);
847                                         return total_written;
848                                 }
849                                 /* we could deadlock if we called
850                                    filemap_fdatawait from here so tell
851                                    reopen_file not to flush data to server
852                                    now */
853                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
854                                         file, FALSE);
855                                 if (rc != 0)
856                                         break;
857                         }
858
859                         rc = CIFSSMBWrite(xid, pTcon,
860                                 open_file->netfid,
861                                 min_t(const int, cifs_sb->wsize,
862                                       write_size - total_written),
863                                 *poffset, &bytes_written,
864                                 NULL, write_data + total_written, long_op);
865                 }
866                 if (rc || (bytes_written == 0)) {
867                         if (total_written)
868                                 break;
869                         else {
870                                 FreeXid(xid);
871                                 return rc;
872                         }
873                 } else
874                         *poffset += bytes_written;
875                 long_op = FALSE; /* subsequent writes fast -
876                                     15 seconds is plenty */
877         }
878
879         cifs_stats_bytes_written(pTcon, total_written);
880
881         /* since the write may have blocked check these pointers again */
882         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
883                 struct inode *inode = file->f_path.dentry->d_inode;
884 /* Do not update local mtime - server will set its actual value on write                
885  *              inode->i_ctime = inode->i_mtime = 
886  *                      current_fs_time(inode->i_sb);*/
887                 if (total_written > 0) {
888                         spin_lock(&inode->i_lock);
889                         if (*poffset > file->f_path.dentry->d_inode->i_size)
890                                 i_size_write(file->f_path.dentry->d_inode,
891                                         *poffset);
892                         spin_unlock(&inode->i_lock);
893                 }
894                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);    
895         }
896         FreeXid(xid);
897         return total_written;
898 }
899
900 static ssize_t cifs_write(struct file *file, const char *write_data,
901         size_t write_size, loff_t *poffset)
902 {
903         int rc = 0;
904         unsigned int bytes_written = 0;
905         unsigned int total_written;
906         struct cifs_sb_info *cifs_sb;
907         struct cifsTconInfo *pTcon;
908         int xid, long_op;
909         struct cifsFileInfo *open_file;
910
911         if (file->f_path.dentry == NULL)
912                 return -EBADF;
913
914         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
915         if (cifs_sb == NULL)
916                 return -EBADF;
917
918         pTcon = cifs_sb->tcon;
919
920         cFYI(1,("write %zd bytes to offset %lld of %s", write_size,
921            *poffset, file->f_path.dentry->d_name.name));
922
923         if (file->private_data == NULL)
924                 return -EBADF;
925         else
926                 open_file = (struct cifsFileInfo *)file->private_data;
927         
928         xid = GetXid();
929         if (file->f_path.dentry->d_inode == NULL) {
930                 FreeXid(xid);
931                 return -EBADF;
932         }
933
934         if (*poffset > file->f_path.dentry->d_inode->i_size)
935                 long_op = 2; /* writes past end of file can take a long time */
936         else
937                 long_op = 1;
938
939         for (total_written = 0; write_size > total_written;
940              total_written += bytes_written) {
941                 rc = -EAGAIN;
942                 while (rc == -EAGAIN) {
943                         if (file->private_data == NULL) {
944                                 /* file has been closed on us */
945                                 FreeXid(xid);
946                         /* if we have gotten here we have written some data
947                            and blocked, and the file has been freed on us
948                            while we blocked so return what we managed to 
949                            write */
950                                 return total_written;
951                         } 
952                         if (open_file->closePend) {
953                                 FreeXid(xid);
954                                 if (total_written)
955                                         return total_written;
956                                 else
957                                         return -EBADF;
958                         }
959                         if (open_file->invalidHandle) {
960                                 if ((file->f_path.dentry == NULL) ||
961                                    (file->f_path.dentry->d_inode == NULL)) {
962                                         FreeXid(xid);
963                                         return total_written;
964                                 }
965                                 /* we could deadlock if we called
966                                    filemap_fdatawait from here so tell
967                                    reopen_file not to flush data to 
968                                    server now */
969                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
970                                         file, FALSE);
971                                 if (rc != 0)
972                                         break;
973                         }
974                         if(experimEnabled || (pTcon->ses->server &&
975                                 ((pTcon->ses->server->secMode & 
976                                 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
977                                 == 0))) {
978                                 struct kvec iov[2];
979                                 unsigned int len;
980
981                                 len = min((size_t)cifs_sb->wsize,
982                                           write_size - total_written);
983                                 /* iov[0] is reserved for smb header */
984                                 iov[1].iov_base = (char *)write_data +
985                                                   total_written;
986                                 iov[1].iov_len = len;
987                                 rc = CIFSSMBWrite2(xid, pTcon,
988                                                 open_file->netfid, len,
989                                                 *poffset, &bytes_written,
990                                                 iov, 1, long_op);
991                         } else
992                                 rc = CIFSSMBWrite(xid, pTcon,
993                                          open_file->netfid,
994                                          min_t(const int, cifs_sb->wsize,
995                                                write_size - total_written),
996                                          *poffset, &bytes_written,
997                                          write_data + total_written,
998                                          NULL, long_op);
999                 }
1000                 if (rc || (bytes_written == 0)) {
1001                         if (total_written)
1002                                 break;
1003                         else {
1004                                 FreeXid(xid);
1005                                 return rc;
1006                         }
1007                 } else
1008                         *poffset += bytes_written;
1009                 long_op = FALSE; /* subsequent writes fast - 
1010                                     15 seconds is plenty */
1011         }
1012
1013         cifs_stats_bytes_written(pTcon, total_written);
1014
1015         /* since the write may have blocked check these pointers again */
1016         if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
1017 /*BB We could make this contingent on superblock ATIME flag too */
1018 /*              file->f_path.dentry->d_inode->i_ctime =
1019                 file->f_path.dentry->d_inode->i_mtime = CURRENT_TIME;*/
1020                 if (total_written > 0) {
1021                         spin_lock(&file->f_path.dentry->d_inode->i_lock);
1022                         if (*poffset > file->f_path.dentry->d_inode->i_size)
1023                                 i_size_write(file->f_path.dentry->d_inode,
1024                                              *poffset);
1025                         spin_unlock(&file->f_path.dentry->d_inode->i_lock);
1026                 }
1027                 mark_inode_dirty_sync(file->f_path.dentry->d_inode);
1028         }
1029         FreeXid(xid);
1030         return total_written;
1031 }
1032
1033 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode)
1034 {
1035         struct cifsFileInfo *open_file;
1036         int rc;
1037
1038         /* Having a null inode here (because mapping->host was set to zero by
1039         the VFS or MM) should not happen but we had reports of on oops (due to
1040         it being zero) during stress testcases so we need to check for it */
1041
1042         if(cifs_inode == NULL) {
1043                 cERROR(1,("Null inode passed to cifs_writeable_file"));
1044                 dump_stack();
1045                 return NULL;
1046         }
1047
1048         read_lock(&GlobalSMBSeslock);
1049         list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1050                 if (open_file->closePend)
1051                         continue;
1052                 if (open_file->pfile &&
1053                     ((open_file->pfile->f_flags & O_RDWR) ||
1054                      (open_file->pfile->f_flags & O_WRONLY))) {
1055                         atomic_inc(&open_file->wrtPending);
1056                         read_unlock(&GlobalSMBSeslock);
1057                         if((open_file->invalidHandle) && 
1058                            (!open_file->closePend) /* BB fixme -since the second clause can not be true remove it BB */) {
1059                                 rc = cifs_reopen_file(&cifs_inode->vfs_inode, 
1060                                                       open_file->pfile, FALSE);
1061                                 /* if it fails, try another handle - might be */
1062                                 /* dangerous to hold up writepages with retry */
1063                                 if(rc) {
1064                                         cFYI(1,("failed on reopen file in wp"));
1065                                         read_lock(&GlobalSMBSeslock);
1066                                         /* can not use this handle, no write
1067                                         pending on this one after all */
1068                                         atomic_dec
1069                                              (&open_file->wrtPending);
1070                                         continue;
1071                                 }
1072                         }
1073                         return open_file;
1074                 }
1075         }
1076         read_unlock(&GlobalSMBSeslock);
1077         return NULL;
1078 }
1079
1080 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1081 {
1082         struct address_space *mapping = page->mapping;
1083         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1084         char *write_data;
1085         int rc = -EFAULT;
1086         int bytes_written = 0;
1087         struct cifs_sb_info *cifs_sb;
1088         struct cifsTconInfo *pTcon;
1089         struct inode *inode;
1090         struct cifsFileInfo *open_file;
1091
1092         if (!mapping || !mapping->host)
1093                 return -EFAULT;
1094
1095         inode = page->mapping->host;
1096         cifs_sb = CIFS_SB(inode->i_sb);
1097         pTcon = cifs_sb->tcon;
1098
1099         offset += (loff_t)from;
1100         write_data = kmap(page);
1101         write_data += from;
1102
1103         if ((to > PAGE_CACHE_SIZE) || (from > to)) {
1104                 kunmap(page);
1105                 return -EIO;
1106         }
1107
1108         /* racing with truncate? */
1109         if (offset > mapping->host->i_size) {
1110                 kunmap(page);
1111                 return 0; /* don't care */
1112         }
1113
1114         /* check to make sure that we are not extending the file */
1115         if (mapping->host->i_size - offset < (loff_t)to)
1116                 to = (unsigned)(mapping->host->i_size - offset); 
1117
1118         open_file = find_writable_file(CIFS_I(mapping->host));
1119         if (open_file) {
1120                 bytes_written = cifs_write(open_file->pfile, write_data,
1121                                            to-from, &offset);
1122                 atomic_dec(&open_file->wrtPending);
1123                 /* Does mm or vfs already set times? */
1124                 inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
1125                 if ((bytes_written > 0) && (offset)) {
1126                         rc = 0;
1127                 } else if (bytes_written < 0) {
1128                         if (rc != -EBADF)
1129                                 rc = bytes_written;
1130                 }
1131         } else {
1132                 cFYI(1, ("No writeable filehandles for inode"));
1133                 rc = -EIO;
1134         }
1135
1136         kunmap(page);
1137         return rc;
1138 }
1139
1140 static int cifs_writepages(struct address_space *mapping,
1141                            struct writeback_control *wbc)
1142 {
1143         struct backing_dev_info *bdi = mapping->backing_dev_info;
1144         unsigned int bytes_to_write;
1145         unsigned int bytes_written;
1146         struct cifs_sb_info *cifs_sb;
1147         int done = 0;
1148         pgoff_t end;
1149         pgoff_t index;
1150         int range_whole = 0;
1151         struct kvec * iov;
1152         int len;
1153         int n_iov = 0;
1154         pgoff_t next;
1155         int nr_pages;
1156         __u64 offset = 0;
1157         struct cifsFileInfo *open_file;
1158         struct page *page;
1159         struct pagevec pvec;
1160         int rc = 0;
1161         int scanned = 0;
1162         int xid;
1163
1164         cifs_sb = CIFS_SB(mapping->host->i_sb);
1165         
1166         /*
1167          * If wsize is smaller that the page cache size, default to writing
1168          * one page at a time via cifs_writepage
1169          */
1170         if (cifs_sb->wsize < PAGE_CACHE_SIZE)
1171                 return generic_writepages(mapping, wbc);
1172
1173         if((cifs_sb->tcon->ses) && (cifs_sb->tcon->ses->server))
1174                 if(cifs_sb->tcon->ses->server->secMode &
1175                           (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
1176                         if(!experimEnabled) 
1177                                 return generic_writepages(mapping, wbc);
1178
1179         iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
1180         if(iov == NULL)
1181                 return generic_writepages(mapping, wbc);
1182
1183
1184         /*
1185          * BB: Is this meaningful for a non-block-device file system?
1186          * If it is, we should test it again after we do I/O
1187          */
1188         if (wbc->nonblocking && bdi_write_congested(bdi)) {
1189                 wbc->encountered_congestion = 1;
1190                 kfree(iov);
1191                 return 0;
1192         }
1193
1194         xid = GetXid();
1195
1196         pagevec_init(&pvec, 0);
1197         if (wbc->range_cyclic) {
1198                 index = mapping->writeback_index; /* Start from prev offset */
1199                 end = -1;
1200         } else {
1201                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1202                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1203                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1204                         range_whole = 1;
1205                 scanned = 1;
1206         }
1207 retry:
1208         while (!done && (index <= end) &&
1209                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1210                         PAGECACHE_TAG_DIRTY,
1211                         min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
1212                 int first;
1213                 unsigned int i;
1214
1215                 first = -1;
1216                 next = 0;
1217                 n_iov = 0;
1218                 bytes_to_write = 0;
1219
1220                 for (i = 0; i < nr_pages; i++) {
1221                         page = pvec.pages[i];
1222                         /*
1223                          * At this point we hold neither mapping->tree_lock nor
1224                          * lock on the page itself: the page may be truncated or
1225                          * invalidated (changing page->mapping to NULL), or even
1226                          * swizzled back from swapper_space to tmpfs file
1227                          * mapping
1228                          */
1229
1230                         if (first < 0)
1231                                 lock_page(page);
1232                         else if (TestSetPageLocked(page))
1233                                 break;
1234
1235                         if (unlikely(page->mapping != mapping)) {
1236                                 unlock_page(page);
1237                                 break;
1238                         }
1239
1240                         if (!wbc->range_cyclic && page->index > end) {
1241                                 done = 1;
1242                                 unlock_page(page);
1243                                 break;
1244                         }
1245
1246                         if (next && (page->index != next)) {
1247                                 /* Not next consecutive page */
1248                                 unlock_page(page);
1249                                 break;
1250                         }
1251
1252                         if (wbc->sync_mode != WB_SYNC_NONE)
1253                                 wait_on_page_writeback(page);
1254
1255                         if (PageWriteback(page) ||
1256                                         !clear_page_dirty_for_io(page)) {
1257                                 unlock_page(page);
1258                                 break;
1259                         }
1260
1261                         /*
1262                          * This actually clears the dirty bit in the radix tree.
1263                          * See cifs_writepage() for more commentary.
1264                          */
1265                         set_page_writeback(page);
1266
1267                         if (page_offset(page) >= mapping->host->i_size) {
1268                                 done = 1;
1269                                 unlock_page(page);
1270                                 end_page_writeback(page);
1271                                 break;
1272                         }
1273
1274                         /*
1275                          * BB can we get rid of this?  pages are held by pvec
1276                          */
1277                         page_cache_get(page);
1278
1279                         len = min(mapping->host->i_size - page_offset(page),
1280                                   (loff_t)PAGE_CACHE_SIZE);
1281
1282                         /* reserve iov[0] for the smb header */
1283                         n_iov++;
1284                         iov[n_iov].iov_base = kmap(page);
1285                         iov[n_iov].iov_len = len;
1286                         bytes_to_write += len;
1287
1288                         if (first < 0) {
1289                                 first = i;
1290                                 offset = page_offset(page);
1291                         }
1292                         next = page->index + 1;
1293                         if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
1294                                 break;
1295                 }
1296                 if (n_iov) {
1297                         /* Search for a writable handle every time we call
1298                          * CIFSSMBWrite2.  We can't rely on the last handle
1299                          * we used to still be valid
1300                          */
1301                         open_file = find_writable_file(CIFS_I(mapping->host));
1302                         if (!open_file) {
1303                                 cERROR(1, ("No writable handles for inode"));
1304                                 rc = -EBADF;
1305                         } else {
1306                                 rc = CIFSSMBWrite2(xid, cifs_sb->tcon,
1307                                                    open_file->netfid,
1308                                                    bytes_to_write, offset,
1309                                                    &bytes_written, iov, n_iov,
1310                                                    1);
1311                                 atomic_dec(&open_file->wrtPending);
1312                                 if (rc || bytes_written < bytes_to_write) {
1313                                         cERROR(1,("Write2 ret %d, written = %d",
1314                                                   rc, bytes_written));
1315                                         /* BB what if continued retry is
1316                                            requested via mount flags? */
1317                                         set_bit(AS_EIO, &mapping->flags);
1318                                 } else {
1319                                         cifs_stats_bytes_written(cifs_sb->tcon,
1320                                                                  bytes_written);
1321                                 }
1322                         }
1323                         for (i = 0; i < n_iov; i++) {
1324                                 page = pvec.pages[first + i];
1325                                 /* Should we also set page error on
1326                                 success rc but too little data written? */
1327                                 /* BB investigate retry logic on temporary
1328                                 server crash cases and how recovery works
1329                                 when page marked as error */ 
1330                                 if(rc)
1331                                         SetPageError(page);
1332                                 kunmap(page);
1333                                 unlock_page(page);
1334                                 end_page_writeback(page);
1335                                 page_cache_release(page);
1336                         }
1337                         if ((wbc->nr_to_write -= n_iov) <= 0)
1338                                 done = 1;
1339                         index = next;
1340                 }
1341                 pagevec_release(&pvec);
1342         }
1343         if (!scanned && !done) {
1344                 /*
1345                  * We hit the last page and there is more work to be done: wrap
1346                  * back to the start of the file
1347                  */
1348                 scanned = 1;
1349                 index = 0;
1350                 goto retry;
1351         }
1352         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1353                 mapping->writeback_index = index;
1354
1355         FreeXid(xid);
1356         kfree(iov);
1357         return rc;
1358 }
1359
1360 static int cifs_writepage(struct page* page, struct writeback_control *wbc)
1361 {
1362         int rc = -EFAULT;
1363         int xid;
1364
1365         xid = GetXid();
1366 /* BB add check for wbc flags */
1367         page_cache_get(page);
1368         if (!PageUptodate(page)) {
1369                 cFYI(1, ("ppw - page not up to date"));
1370         }
1371
1372         /*
1373          * Set the "writeback" flag, and clear "dirty" in the radix tree.
1374          *
1375          * A writepage() implementation always needs to do either this,
1376          * or re-dirty the page with "redirty_page_for_writepage()" in
1377          * the case of a failure.
1378          *
1379          * Just unlocking the page will cause the radix tree tag-bits
1380          * to fail to update with the state of the page correctly.
1381          */
1382         set_page_writeback(page);               
1383         rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE);
1384         SetPageUptodate(page); /* BB add check for error and Clearuptodate? */
1385         unlock_page(page);
1386         end_page_writeback(page);
1387         page_cache_release(page);
1388         FreeXid(xid);
1389         return rc;
1390 }
1391
1392 static int cifs_commit_write(struct file *file, struct page *page,
1393         unsigned offset, unsigned to)
1394 {
1395         int xid;
1396         int rc = 0;
1397         struct inode *inode = page->mapping->host;
1398         loff_t position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1399         char *page_data;
1400
1401         xid = GetXid();
1402         cFYI(1, ("commit write for page %p up to position %lld for %d", 
1403                  page, position, to));
1404         spin_lock(&inode->i_lock);
1405         if (position > inode->i_size) {
1406                 i_size_write(inode, position);
1407                 /* if (file->private_data == NULL) {
1408                         rc = -EBADF;
1409                 } else {
1410                         open_file = (struct cifsFileInfo *)file->private_data;
1411                         cifs_sb = CIFS_SB(inode->i_sb);
1412                         rc = -EAGAIN;
1413                         while (rc == -EAGAIN) {
1414                                 if ((open_file->invalidHandle) && 
1415                                     (!open_file->closePend)) {
1416                                         rc = cifs_reopen_file(
1417                                                 file->f_path.dentry->d_inode, file);
1418                                         if (rc != 0)
1419                                                 break;
1420                                 }
1421                                 if (!open_file->closePend) {
1422                                         rc = CIFSSMBSetFileSize(xid,
1423                                                 cifs_sb->tcon, position,
1424                                                 open_file->netfid,
1425                                                 open_file->pid, FALSE);
1426                                 } else {
1427                                         rc = -EBADF;
1428                                         break;
1429                                 }
1430                         }
1431                         cFYI(1, (" SetEOF (commit write) rc = %d", rc));
1432                 } */
1433         }
1434         spin_unlock(&inode->i_lock);
1435         if (!PageUptodate(page)) {
1436                 position =  ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
1437                 /* can not rely on (or let) writepage write this data */
1438                 if (to < offset) {
1439                         cFYI(1, ("Illegal offsets, can not copy from %d to %d",
1440                                 offset, to));
1441                         FreeXid(xid);
1442                         return rc;
1443                 }
1444                 /* this is probably better than directly calling
1445                    partialpage_write since in this function the file handle is
1446                    known which we might as well leverage */
1447                 /* BB check if anything else missing out of ppw
1448                    such as updating last write time */
1449                 page_data = kmap(page);
1450                 rc = cifs_write(file, page_data + offset, to-offset,
1451                                 &position);
1452                 if (rc > 0)
1453                         rc = 0;
1454                 /* else if (rc < 0) should we set writebehind rc? */
1455                 kunmap(page);
1456         } else {        
1457                 set_page_dirty(page);
1458         }
1459
1460         FreeXid(xid);
1461         return rc;
1462 }
1463
1464 int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
1465 {
1466         int xid;
1467         int rc = 0;
1468         struct inode *inode = file->f_path.dentry->d_inode;
1469
1470         xid = GetXid();
1471
1472         cFYI(1, ("Sync file - name: %s datasync: 0x%x", 
1473                 dentry->d_name.name, datasync));
1474         
1475         rc = filemap_fdatawrite(inode->i_mapping);
1476         if (rc == 0)
1477                 CIFS_I(inode)->write_behind_rc = 0;
1478         FreeXid(xid);
1479         return rc;
1480 }
1481
1482 /* static void cifs_sync_page(struct page *page)
1483 {
1484         struct address_space *mapping;
1485         struct inode *inode;
1486         unsigned long index = page->index;
1487         unsigned int rpages = 0;
1488         int rc = 0;
1489
1490         cFYI(1, ("sync page %p",page));
1491         mapping = page->mapping;
1492         if (!mapping)
1493                 return 0;
1494         inode = mapping->host;
1495         if (!inode)
1496                 return; */
1497
1498 /*      fill in rpages then 
1499         result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
1500
1501 /*      cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
1502
1503 #if 0
1504         if (rc < 0)
1505                 return rc;
1506         return 0;
1507 #endif
1508 } */
1509
1510 /*
1511  * As file closes, flush all cached write data for this inode checking
1512  * for write behind errors.
1513  */
1514 int cifs_flush(struct file *file, fl_owner_t id)
1515 {
1516         struct inode * inode = file->f_path.dentry->d_inode;
1517         int rc = 0;
1518
1519         /* Rather than do the steps manually:
1520            lock the inode for writing
1521            loop through pages looking for write behind data (dirty pages)
1522            coalesce into contiguous 16K (or smaller) chunks to write to server
1523            send to server (prefer in parallel)
1524            deal with writebehind errors
1525            unlock inode for writing
1526            filemapfdatawrite appears easier for the time being */
1527
1528         rc = filemap_fdatawrite(inode->i_mapping);
1529         if (!rc) /* reset wb rc if we were able to write out dirty pages */
1530                 CIFS_I(inode)->write_behind_rc = 0;
1531                 
1532         cFYI(1, ("Flush inode %p file %p rc %d",inode,file,rc));
1533
1534         return rc;
1535 }
1536
1537 ssize_t cifs_user_read(struct file *file, char __user *read_data,
1538         size_t read_size, loff_t *poffset)
1539 {
1540         int rc = -EACCES;
1541         unsigned int bytes_read = 0;
1542         unsigned int total_read = 0;
1543         unsigned int current_read_size;
1544         struct cifs_sb_info *cifs_sb;
1545         struct cifsTconInfo *pTcon;
1546         int xid;
1547         struct cifsFileInfo *open_file;
1548         char *smb_read_data;
1549         char __user *current_offset;
1550         struct smb_com_read_rsp *pSMBr;
1551
1552         xid = GetXid();
1553         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1554         pTcon = cifs_sb->tcon;
1555
1556         if (file->private_data == NULL) {
1557                 FreeXid(xid);
1558                 return -EBADF;
1559         }
1560         open_file = (struct cifsFileInfo *)file->private_data;
1561
1562         if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
1563                 cFYI(1, ("attempting read on write only file instance"));
1564         }
1565         for (total_read = 0, current_offset = read_data;
1566              read_size > total_read;
1567              total_read += bytes_read, current_offset += bytes_read) {
1568                 current_read_size = min_t(const int, read_size - total_read, 
1569                                           cifs_sb->rsize);
1570                 rc = -EAGAIN;
1571                 smb_read_data = NULL;
1572                 while (rc == -EAGAIN) {
1573                         int buf_type = CIFS_NO_BUFFER;
1574                         if ((open_file->invalidHandle) && 
1575                             (!open_file->closePend)) {
1576                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
1577                                         file, TRUE);
1578                                 if (rc != 0)
1579                                         break;
1580                         }
1581                         rc = CIFSSMBRead(xid, pTcon,
1582                                          open_file->netfid,
1583                                          current_read_size, *poffset,
1584                                          &bytes_read, &smb_read_data,
1585                                          &buf_type);
1586                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1587                         if (smb_read_data) {
1588                                 if (copy_to_user(current_offset,
1589                                                 smb_read_data +
1590                                                 4 /* RFC1001 length field */ +
1591                                                 le16_to_cpu(pSMBr->DataOffset),
1592                                                 bytes_read)) {
1593                                         rc = -EFAULT;
1594                                 }
1595
1596                                 if(buf_type == CIFS_SMALL_BUFFER)
1597                                         cifs_small_buf_release(smb_read_data);
1598                                 else if(buf_type == CIFS_LARGE_BUFFER)
1599                                         cifs_buf_release(smb_read_data);
1600                                 smb_read_data = NULL;
1601                         }
1602                 }
1603                 if (rc || (bytes_read == 0)) {
1604                         if (total_read) {
1605                                 break;
1606                         } else {
1607                                 FreeXid(xid);
1608                                 return rc;
1609                         }
1610                 } else {
1611                         cifs_stats_bytes_read(pTcon, bytes_read);
1612                         *poffset += bytes_read;
1613                 }
1614         }
1615         FreeXid(xid);
1616         return total_read;
1617 }
1618
1619
1620 static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
1621         loff_t *poffset)
1622 {
1623         int rc = -EACCES;
1624         unsigned int bytes_read = 0;
1625         unsigned int total_read;
1626         unsigned int current_read_size;
1627         struct cifs_sb_info *cifs_sb;
1628         struct cifsTconInfo *pTcon;
1629         int xid;
1630         char *current_offset;
1631         struct cifsFileInfo *open_file;
1632         int buf_type = CIFS_NO_BUFFER;
1633
1634         xid = GetXid();
1635         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1636         pTcon = cifs_sb->tcon;
1637
1638         if (file->private_data == NULL) {
1639                 FreeXid(xid);
1640                 return -EBADF;
1641         }
1642         open_file = (struct cifsFileInfo *)file->private_data;
1643
1644         if ((file->f_flags & O_ACCMODE) == O_WRONLY)
1645                 cFYI(1, ("attempting read on write only file instance"));
1646
1647         for (total_read = 0, current_offset = read_data; 
1648              read_size > total_read;
1649              total_read += bytes_read, current_offset += bytes_read) {
1650                 current_read_size = min_t(const int, read_size - total_read,
1651                                           cifs_sb->rsize);
1652                 /* For windows me and 9x we do not want to request more
1653                 than it negotiated since it will refuse the read then */
1654                 if((pTcon->ses) && 
1655                         !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
1656                         current_read_size = min_t(const int, current_read_size,
1657                                         pTcon->ses->server->maxBuf - 128);
1658                 }
1659                 rc = -EAGAIN;
1660                 while (rc == -EAGAIN) {
1661                         if ((open_file->invalidHandle) && 
1662                             (!open_file->closePend)) {
1663                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
1664                                         file, TRUE);
1665                                 if (rc != 0)
1666                                         break;
1667                         }
1668                         rc = CIFSSMBRead(xid, pTcon,
1669                                          open_file->netfid,
1670                                          current_read_size, *poffset,
1671                                          &bytes_read, &current_offset,
1672                                          &buf_type);
1673                 }
1674                 if (rc || (bytes_read == 0)) {
1675                         if (total_read) {
1676                                 break;
1677                         } else {
1678                                 FreeXid(xid);
1679                                 return rc;
1680                         }
1681                 } else {
1682                         cifs_stats_bytes_read(pTcon, total_read);
1683                         *poffset += bytes_read;
1684                 }
1685         }
1686         FreeXid(xid);
1687         return total_read;
1688 }
1689
1690 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
1691 {
1692         struct dentry *dentry = file->f_path.dentry;
1693         int rc, xid;
1694
1695         xid = GetXid();
1696         rc = cifs_revalidate(dentry);
1697         if (rc) {
1698                 cFYI(1, ("Validation prior to mmap failed, error=%d", rc));
1699                 FreeXid(xid);
1700                 return rc;
1701         }
1702         rc = generic_file_mmap(file, vma);
1703         FreeXid(xid);
1704         return rc;
1705 }
1706
1707
1708 static void cifs_copy_cache_pages(struct address_space *mapping, 
1709         struct list_head *pages, int bytes_read, char *data,
1710         struct pagevec *plru_pvec)
1711 {
1712         struct page *page;
1713         char *target;
1714
1715         while (bytes_read > 0) {
1716                 if (list_empty(pages))
1717                         break;
1718
1719                 page = list_entry(pages->prev, struct page, lru);
1720                 list_del(&page->lru);
1721
1722                 if (add_to_page_cache(page, mapping, page->index,
1723                                       GFP_KERNEL)) {
1724                         page_cache_release(page);
1725                         cFYI(1, ("Add page cache failed"));
1726                         data += PAGE_CACHE_SIZE;
1727                         bytes_read -= PAGE_CACHE_SIZE;
1728                         continue;
1729                 }
1730
1731                 target = kmap_atomic(page,KM_USER0);
1732
1733                 if (PAGE_CACHE_SIZE > bytes_read) {
1734                         memcpy(target, data, bytes_read);
1735                         /* zero the tail end of this partial page */
1736                         memset(target + bytes_read, 0, 
1737                                PAGE_CACHE_SIZE - bytes_read);
1738                         bytes_read = 0;
1739                 } else {
1740                         memcpy(target, data, PAGE_CACHE_SIZE);
1741                         bytes_read -= PAGE_CACHE_SIZE;
1742                 }
1743                 kunmap_atomic(target, KM_USER0);
1744
1745                 flush_dcache_page(page);
1746                 SetPageUptodate(page);
1747                 unlock_page(page);
1748                 if (!pagevec_add(plru_pvec, page))
1749                         __pagevec_lru_add(plru_pvec);
1750                 data += PAGE_CACHE_SIZE;
1751         }
1752         return;
1753 }
1754
1755 static int cifs_readpages(struct file *file, struct address_space *mapping,
1756         struct list_head *page_list, unsigned num_pages)
1757 {
1758         int rc = -EACCES;
1759         int xid;
1760         loff_t offset;
1761         struct page *page;
1762         struct cifs_sb_info *cifs_sb;
1763         struct cifsTconInfo *pTcon;
1764         int bytes_read = 0;
1765         unsigned int read_size,i;
1766         char *smb_read_data = NULL;
1767         struct smb_com_read_rsp *pSMBr;
1768         struct pagevec lru_pvec;
1769         struct cifsFileInfo *open_file;
1770         int buf_type = CIFS_NO_BUFFER;
1771
1772         xid = GetXid();
1773         if (file->private_data == NULL) {
1774                 FreeXid(xid);
1775                 return -EBADF;
1776         }
1777         open_file = (struct cifsFileInfo *)file->private_data;
1778         cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1779         pTcon = cifs_sb->tcon;
1780
1781         pagevec_init(&lru_pvec, 0);
1782
1783         for (i = 0; i < num_pages; ) {
1784                 unsigned contig_pages;
1785                 struct page *tmp_page;
1786                 unsigned long expected_index;
1787
1788                 if (list_empty(page_list))
1789                         break;
1790
1791                 page = list_entry(page_list->prev, struct page, lru);
1792                 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1793
1794                 /* count adjacent pages that we will read into */
1795                 contig_pages = 0;
1796                 expected_index = 
1797                         list_entry(page_list->prev, struct page, lru)->index;
1798                 list_for_each_entry_reverse(tmp_page,page_list,lru) {
1799                         if (tmp_page->index == expected_index) {
1800                                 contig_pages++;
1801                                 expected_index++;
1802                         } else
1803                                 break; 
1804                 }
1805                 if (contig_pages + i >  num_pages)
1806                         contig_pages = num_pages - i;
1807
1808                 /* for reads over a certain size could initiate async
1809                    read ahead */
1810
1811                 read_size = contig_pages * PAGE_CACHE_SIZE;
1812                 /* Read size needs to be in multiples of one page */
1813                 read_size = min_t(const unsigned int, read_size,
1814                                   cifs_sb->rsize & PAGE_CACHE_MASK);
1815
1816                 rc = -EAGAIN;
1817                 while (rc == -EAGAIN) {
1818                         if ((open_file->invalidHandle) && 
1819                             (!open_file->closePend)) {
1820                                 rc = cifs_reopen_file(file->f_path.dentry->d_inode,
1821                                         file, TRUE);
1822                                 if (rc != 0)
1823                                         break;
1824                         }
1825
1826                         rc = CIFSSMBRead(xid, pTcon,
1827                                          open_file->netfid,
1828                                          read_size, offset,
1829                                          &bytes_read, &smb_read_data,
1830                                          &buf_type);
1831                         /* BB more RC checks ? */
1832                         if (rc== -EAGAIN) {
1833                                 if (smb_read_data) {
1834                                         if(buf_type == CIFS_SMALL_BUFFER)
1835                                                 cifs_small_buf_release(smb_read_data);
1836                                         else if(buf_type == CIFS_LARGE_BUFFER)
1837                                                 cifs_buf_release(smb_read_data);
1838                                         smb_read_data = NULL;
1839                                 }
1840                         }
1841                 }
1842                 if ((rc < 0) || (smb_read_data == NULL)) {
1843                         cFYI(1, ("Read error in readpages: %d", rc));
1844                         break;
1845                 } else if (bytes_read > 0) {
1846                         task_io_account_read(bytes_read);
1847                         pSMBr = (struct smb_com_read_rsp *)smb_read_data;
1848                         cifs_copy_cache_pages(mapping, page_list, bytes_read,
1849                                 smb_read_data + 4 /* RFC1001 hdr */ +
1850                                 le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
1851
1852                         i +=  bytes_read >> PAGE_CACHE_SHIFT;
1853                         cifs_stats_bytes_read(pTcon, bytes_read);
1854                         if ((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
1855                                 i++; /* account for partial page */
1856
1857                                 /* server copy of file can have smaller size 
1858                                    than client */
1859                                 /* BB do we need to verify this common case ? 
1860                                    this case is ok - if we are at server EOF 
1861                                    we will hit it on next read */
1862
1863                                 /* break; */
1864                         }
1865                 } else {
1866                         cFYI(1, ("No bytes read (%d) at offset %lld . "
1867                                  "Cleaning remaining pages from readahead list",
1868                                  bytes_read, offset));
1869                         /* BB turn off caching and do new lookup on 
1870                            file size at server? */
1871                         break;
1872                 }
1873                 if (smb_read_data) {
1874                         if(buf_type == CIFS_SMALL_BUFFER)
1875                                 cifs_small_buf_release(smb_read_data);
1876                         else if(buf_type == CIFS_LARGE_BUFFER)
1877                                 cifs_buf_release(smb_read_data);
1878                         smb_read_data = NULL;
1879                 }
1880                 bytes_read = 0;
1881         }
1882
1883         pagevec_lru_add(&lru_pvec);
1884
1885 /* need to free smb_read_data buf before exit */
1886         if (smb_read_data) {
1887                 if(buf_type == CIFS_SMALL_BUFFER)
1888                         cifs_small_buf_release(smb_read_data);
1889                 else if(buf_type == CIFS_LARGE_BUFFER)
1890                         cifs_buf_release(smb_read_data);
1891                 smb_read_data = NULL;
1892         } 
1893
1894         FreeXid(xid);
1895         return rc;
1896 }
1897
1898 static int cifs_readpage_worker(struct file *file, struct page *page,
1899         loff_t *poffset)
1900 {
1901         char *read_data;
1902         int rc;
1903
1904         page_cache_get(page);
1905         read_data = kmap(page);
1906         /* for reads over a certain size could initiate async read ahead */
1907                                                                                                                            
1908         rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset);
1909                                                                                                                            
1910         if (rc < 0)
1911                 goto io_error;
1912         else
1913                 cFYI(1, ("Bytes read %d",rc));
1914                                                                                                                            
1915         file->f_path.dentry->d_inode->i_atime =
1916                 current_fs_time(file->f_path.dentry->d_inode->i_sb);
1917                                                                                                                            
1918         if (PAGE_CACHE_SIZE > rc)
1919                 memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc);
1920
1921         flush_dcache_page(page);
1922         SetPageUptodate(page);
1923         rc = 0;
1924                                                                                                                            
1925 io_error:
1926         kunmap(page);
1927         page_cache_release(page);
1928         return rc;
1929 }
1930
1931 static int cifs_readpage(struct file *file, struct page *page)
1932 {
1933         loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
1934         int rc = -EACCES;
1935         int xid;
1936
1937         xid = GetXid();
1938
1939         if (file->private_data == NULL) {
1940                 FreeXid(xid);
1941                 return -EBADF;
1942         }
1943
1944         cFYI(1, ("readpage %p at offset %d 0x%x\n", 
1945                  page, (int)offset, (int)offset));
1946
1947         rc = cifs_readpage_worker(file, page, &offset);
1948
1949         unlock_page(page);
1950
1951         FreeXid(xid);
1952         return rc;
1953 }
1954
1955 /* We do not want to update the file size from server for inodes
1956    open for write - to avoid races with writepage extending
1957    the file - in the future we could consider allowing
1958    refreshing the inode only on increases in the file size 
1959    but this is tricky to do without racing with writebehind
1960    page caching in the current Linux kernel design */
1961 int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
1962 {
1963         struct cifsFileInfo *open_file = NULL;
1964
1965         if (cifsInode)
1966                 open_file =  find_writable_file(cifsInode);
1967  
1968         if(open_file) {
1969                 struct cifs_sb_info *cifs_sb;
1970
1971                 /* there is not actually a write pending so let
1972                 this handle go free and allow it to
1973                 be closable if needed */
1974                 atomic_dec(&open_file->wrtPending);
1975
1976                 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
1977                 if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
1978                         /* since no page cache to corrupt on directio 
1979                         we can change size safely */
1980                         return 1;
1981                 }
1982
1983                 if(i_size_read(&cifsInode->vfs_inode) < end_of_file)
1984                         return 1;
1985
1986                 return 0;
1987         } else
1988                 return 1;
1989 }
1990
1991 static int cifs_prepare_write(struct file *file, struct page *page,
1992         unsigned from, unsigned to)
1993 {
1994         int rc = 0;
1995         loff_t i_size;
1996         loff_t offset;
1997
1998         cFYI(1, ("prepare write for page %p from %d to %d",page,from,to));
1999         if (PageUptodate(page))
2000                 return 0;
2001
2002         /* If we are writing a full page it will be up to date,
2003            no need to read from the server */
2004         if ((to == PAGE_CACHE_SIZE) && (from == 0)) {
2005                 SetPageUptodate(page);
2006                 return 0;
2007         }
2008
2009         offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
2010         i_size = i_size_read(page->mapping->host);
2011
2012         if ((offset >= i_size) ||
2013             ((from == 0) && (offset + to) >= i_size)) {
2014                 /*
2015                  * We don't need to read data beyond the end of the file.
2016                  * zero it, and set the page uptodate
2017                  */
2018                 void *kaddr = kmap_atomic(page, KM_USER0);
2019
2020                 if (from)
2021                         memset(kaddr, 0, from);
2022                 if (to < PAGE_CACHE_SIZE)
2023                         memset(kaddr + to, 0, PAGE_CACHE_SIZE - to);
2024                 flush_dcache_page(page);
2025                 kunmap_atomic(kaddr, KM_USER0);
2026                 SetPageUptodate(page);
2027         } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
2028                 /* might as well read a page, it is fast enough */
2029                 rc = cifs_readpage_worker(file, page, &offset);
2030         } else {
2031                 /* we could try using another file handle if there is one -
2032                    but how would we lock it to prevent close of that handle
2033                    racing with this read? In any case
2034                    this will be written out by commit_write so is fine */
2035         }
2036
2037         /* we do not need to pass errors back 
2038            e.g. if we do not have read access to the file 
2039            because cifs_commit_write will do the right thing.  -- shaggy */
2040
2041         return 0;
2042 }
2043
2044 const struct address_space_operations cifs_addr_ops = {
2045         .readpage = cifs_readpage,
2046         .readpages = cifs_readpages,
2047         .writepage = cifs_writepage,
2048         .writepages = cifs_writepages,
2049         .prepare_write = cifs_prepare_write,
2050         .commit_write = cifs_commit_write,
2051         .set_page_dirty = __set_page_dirty_nobuffers,
2052         /* .sync_page = cifs_sync_page, */
2053         /* .direct_IO = */
2054 };
2055
2056 /*
2057  * cifs_readpages requires the server to support a buffer large enough to
2058  * contain the header plus one complete page of data.  Otherwise, we need
2059  * to leave cifs_readpages out of the address space operations.
2060  */
2061 const struct address_space_operations cifs_addr_ops_smallbuf = {
2062         .readpage = cifs_readpage,
2063         .writepage = cifs_writepage,
2064         .writepages = cifs_writepages,
2065         .prepare_write = cifs_prepare_write,
2066         .commit_write = cifs_commit_write,
2067         .set_page_dirty = __set_page_dirty_nobuffers,
2068         /* .sync_page = cifs_sync_page, */
2069         /* .direct_IO = */
2070 };