Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
[linux-2.6] / fs / xfs / linux-2.6 / xfs_lrw.c
1 /*
2  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir.h"
27 #include "xfs_dir2.h"
28 #include "xfs_alloc.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_alloc_btree.h"
34 #include "xfs_ialloc_btree.h"
35 #include "xfs_dir_sf.h"
36 #include "xfs_dir2_sf.h"
37 #include "xfs_attr_sf.h"
38 #include "xfs_dinode.h"
39 #include "xfs_inode.h"
40 #include "xfs_bmap.h"
41 #include "xfs_btree.h"
42 #include "xfs_ialloc.h"
43 #include "xfs_rtalloc.h"
44 #include "xfs_error.h"
45 #include "xfs_itable.h"
46 #include "xfs_rw.h"
47 #include "xfs_acl.h"
48 #include "xfs_cap.h"
49 #include "xfs_mac.h"
50 #include "xfs_attr.h"
51 #include "xfs_inode_item.h"
52 #include "xfs_buf_item.h"
53 #include "xfs_utils.h"
54 #include "xfs_iomap.h"
55
56 #include <linux/capability.h>
57 #include <linux/writeback.h>
58
59
60 #if defined(XFS_RW_TRACE)
61 void
62 xfs_rw_enter_trace(
63         int                     tag,
64         xfs_iocore_t            *io,
65         void                    *data,
66         size_t                  segs,
67         loff_t                  offset,
68         int                     ioflags)
69 {
70         xfs_inode_t     *ip = XFS_IO_INODE(io);
71
72         if (ip->i_rwtrace == NULL)
73                 return;
74         ktrace_enter(ip->i_rwtrace,
75                 (void *)(unsigned long)tag,
76                 (void *)ip,
77                 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
78                 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
79                 (void *)data,
80                 (void *)((unsigned long)segs),
81                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
82                 (void *)((unsigned long)(offset & 0xffffffff)),
83                 (void *)((unsigned long)ioflags),
84                 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
85                 (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
86                 (void *)NULL,
87                 (void *)NULL,
88                 (void *)NULL,
89                 (void *)NULL,
90                 (void *)NULL);
91 }
92
93 void
94 xfs_inval_cached_trace(
95         xfs_iocore_t    *io,
96         xfs_off_t       offset,
97         xfs_off_t       len,
98         xfs_off_t       first,
99         xfs_off_t       last)
100 {
101         xfs_inode_t     *ip = XFS_IO_INODE(io);
102
103         if (ip->i_rwtrace == NULL)
104                 return;
105         ktrace_enter(ip->i_rwtrace,
106                 (void *)(__psint_t)XFS_INVAL_CACHED,
107                 (void *)ip,
108                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
109                 (void *)((unsigned long)(offset & 0xffffffff)),
110                 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
111                 (void *)((unsigned long)(len & 0xffffffff)),
112                 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
113                 (void *)((unsigned long)(first & 0xffffffff)),
114                 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
115                 (void *)((unsigned long)(last & 0xffffffff)),
116                 (void *)NULL,
117                 (void *)NULL,
118                 (void *)NULL,
119                 (void *)NULL,
120                 (void *)NULL,
121                 (void *)NULL);
122 }
123 #endif
124
125 /*
126  *      xfs_iozero
127  *
128  *      xfs_iozero clears the specified range of buffer supplied,
129  *      and marks all the affected blocks as valid and modified.  If
130  *      an affected block is not allocated, it will be allocated.  If
131  *      an affected block is not completely overwritten, and is not
132  *      valid before the operation, it will be read from disk before
133  *      being partially zeroed.
134  */
135 STATIC int
136 xfs_iozero(
137         struct inode            *ip,    /* inode                        */
138         loff_t                  pos,    /* offset in file               */
139         size_t                  count,  /* size of data to zero         */
140         loff_t                  end_size)       /* max file size to set */
141 {
142         unsigned                bytes;
143         struct page             *page;
144         struct address_space    *mapping;
145         char                    *kaddr;
146         int                     status;
147
148         mapping = ip->i_mapping;
149         do {
150                 unsigned long index, offset;
151
152                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
153                 index = pos >> PAGE_CACHE_SHIFT;
154                 bytes = PAGE_CACHE_SIZE - offset;
155                 if (bytes > count)
156                         bytes = count;
157
158                 status = -ENOMEM;
159                 page = grab_cache_page(mapping, index);
160                 if (!page)
161                         break;
162
163                 kaddr = kmap(page);
164                 status = mapping->a_ops->prepare_write(NULL, page, offset,
165                                                         offset + bytes);
166                 if (status) {
167                         goto unlock;
168                 }
169
170                 memset((void *) (kaddr + offset), 0, bytes);
171                 flush_dcache_page(page);
172                 status = mapping->a_ops->commit_write(NULL, page, offset,
173                                                         offset + bytes);
174                 if (!status) {
175                         pos += bytes;
176                         count -= bytes;
177                         if (pos > i_size_read(ip))
178                                 i_size_write(ip, pos < end_size ? pos : end_size);
179                 }
180
181 unlock:
182                 kunmap(page);
183                 unlock_page(page);
184                 page_cache_release(page);
185                 if (status)
186                         break;
187         } while (count);
188
189         return (-status);
190 }
191
192 ssize_t                 /* bytes read, or (-)  error */
193 xfs_read(
194         bhv_desc_t              *bdp,
195         struct kiocb            *iocb,
196         const struct iovec      *iovp,
197         unsigned int            segs,
198         loff_t                  *offset,
199         int                     ioflags,
200         cred_t                  *credp)
201 {
202         struct file             *file = iocb->ki_filp;
203         struct inode            *inode = file->f_mapping->host;
204         size_t                  size = 0;
205         ssize_t                 ret;
206         xfs_fsize_t             n;
207         xfs_inode_t             *ip;
208         xfs_mount_t             *mp;
209         vnode_t                 *vp;
210         unsigned long           seg;
211
212         ip = XFS_BHVTOI(bdp);
213         vp = BHV_TO_VNODE(bdp);
214         mp = ip->i_mount;
215
216         XFS_STATS_INC(xs_read_calls);
217
218         /* START copy & waste from filemap.c */
219         for (seg = 0; seg < segs; seg++) {
220                 const struct iovec *iv = &iovp[seg];
221
222                 /*
223                  * If any segment has a negative length, or the cumulative
224                  * length ever wraps negative then return -EINVAL.
225                  */
226                 size += iv->iov_len;
227                 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
228                         return XFS_ERROR(-EINVAL);
229         }
230         /* END copy & waste from filemap.c */
231
232         if (unlikely(ioflags & IO_ISDIRECT)) {
233                 xfs_buftarg_t   *target =
234                         (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
235                                 mp->m_rtdev_targp : mp->m_ddev_targp;
236                 if ((*offset & target->bt_smask) ||
237                     (size & target->bt_smask)) {
238                         if (*offset == ip->i_d.di_size) {
239                                 return (0);
240                         }
241                         return -XFS_ERROR(EINVAL);
242                 }
243         }
244
245         n = XFS_MAXIOFFSET(mp) - *offset;
246         if ((n <= 0) || (size == 0))
247                 return 0;
248
249         if (n < size)
250                 size = n;
251
252         if (XFS_FORCED_SHUTDOWN(mp)) {
253                 return -EIO;
254         }
255
256         if (unlikely(ioflags & IO_ISDIRECT))
257                 mutex_lock(&inode->i_mutex);
258         xfs_ilock(ip, XFS_IOLOCK_SHARED);
259
260         if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
261             !(ioflags & IO_INVIS)) {
262                 vrwlock_t locktype = VRWLOCK_READ;
263                 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
264
265                 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
266                                         BHV_TO_VNODE(bdp), *offset, size,
267                                         dmflags, &locktype);
268                 if (ret) {
269                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
270                         goto unlock_isem;
271                 }
272         }
273
274         xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
275                                 (void *)iovp, segs, *offset, ioflags);
276         ret = __generic_file_aio_read(iocb, iovp, segs, offset);
277         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
278                 ret = wait_on_sync_kiocb(iocb);
279         if (ret > 0)
280                 XFS_STATS_ADD(xs_read_bytes, ret);
281
282         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
283
284 unlock_isem:
285         if (unlikely(ioflags & IO_ISDIRECT))
286                 mutex_unlock(&inode->i_mutex);
287         return ret;
288 }
289
290 ssize_t
291 xfs_sendfile(
292         bhv_desc_t              *bdp,
293         struct file             *filp,
294         loff_t                  *offset,
295         int                     ioflags,
296         size_t                  count,
297         read_actor_t            actor,
298         void                    *target,
299         cred_t                  *credp)
300 {
301         ssize_t                 ret;
302         xfs_fsize_t             n;
303         xfs_inode_t             *ip;
304         xfs_mount_t             *mp;
305         vnode_t                 *vp;
306
307         ip = XFS_BHVTOI(bdp);
308         vp = BHV_TO_VNODE(bdp);
309         mp = ip->i_mount;
310
311         XFS_STATS_INC(xs_read_calls);
312
313         n = XFS_MAXIOFFSET(mp) - *offset;
314         if ((n <= 0) || (count == 0))
315                 return 0;
316
317         if (n < count)
318                 count = n;
319
320         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
321                 return -EIO;
322
323         xfs_ilock(ip, XFS_IOLOCK_SHARED);
324
325         if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
326             (!(ioflags & IO_INVIS))) {
327                 vrwlock_t locktype = VRWLOCK_READ;
328                 int error;
329
330                 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count,
331                                       FILP_DELAY_FLAG(filp), &locktype);
332                 if (error) {
333                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
334                         return -error;
335                 }
336         }
337         xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
338                    (void *)(unsigned long)target, count, *offset, ioflags);
339         ret = generic_file_sendfile(filp, offset, count, actor, target);
340
341         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
342
343         if (ret > 0)
344                 XFS_STATS_ADD(xs_read_bytes, ret);
345
346         return ret;
347 }
348
349 /*
350  * This routine is called to handle zeroing any space in the last
351  * block of the file that is beyond the EOF.  We do this since the
352  * size is being increased without writing anything to that block
353  * and we don't want anyone to read the garbage on the disk.
354  */
355 STATIC int                              /* error (positive) */
356 xfs_zero_last_block(
357         struct inode    *ip,
358         xfs_iocore_t    *io,
359         xfs_fsize_t     isize,
360         xfs_fsize_t     end_size)
361 {
362         xfs_fileoff_t   last_fsb;
363         xfs_mount_t     *mp;
364         int             nimaps;
365         int             zero_offset;
366         int             zero_len;
367         int             error = 0;
368         xfs_bmbt_irec_t imap;
369         loff_t          loff;
370
371         ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
372
373         mp = io->io_mount;
374
375         zero_offset = XFS_B_FSB_OFFSET(mp, isize);
376         if (zero_offset == 0) {
377                 /*
378                  * There are no extra bytes in the last block on disk to
379                  * zero, so return.
380                  */
381                 return 0;
382         }
383
384         last_fsb = XFS_B_TO_FSBT(mp, isize);
385         nimaps = 1;
386         error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
387                           &nimaps, NULL);
388         if (error) {
389                 return error;
390         }
391         ASSERT(nimaps > 0);
392         /*
393          * If the block underlying isize is just a hole, then there
394          * is nothing to zero.
395          */
396         if (imap.br_startblock == HOLESTARTBLOCK) {
397                 return 0;
398         }
399         /*
400          * Zero the part of the last block beyond the EOF, and write it
401          * out sync.  We need to drop the ilock while we do this so we
402          * don't deadlock when the buffer cache calls back to us.
403          */
404         XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
405         loff = XFS_FSB_TO_B(mp, last_fsb);
406
407         zero_len = mp->m_sb.sb_blocksize - zero_offset;
408
409         error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
410
411         XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
412         ASSERT(error >= 0);
413         return error;
414 }
415
416 /*
417  * Zero any on disk space between the current EOF and the new,
418  * larger EOF.  This handles the normal case of zeroing the remainder
419  * of the last block in the file and the unusual case of zeroing blocks
420  * out beyond the size of the file.  This second case only happens
421  * with fixed size extents and when the system crashes before the inode
422  * size was updated but after blocks were allocated.  If fill is set,
423  * then any holes in the range are filled and zeroed.  If not, the holes
424  * are left alone as holes.
425  */
426
427 int                                     /* error (positive) */
428 xfs_zero_eof(
429         vnode_t         *vp,
430         xfs_iocore_t    *io,
431         xfs_off_t       offset,         /* starting I/O offset */
432         xfs_fsize_t     isize,          /* current inode size */
433         xfs_fsize_t     end_size)       /* terminal inode size */
434 {
435         struct inode    *ip = LINVFS_GET_IP(vp);
436         xfs_fileoff_t   start_zero_fsb;
437         xfs_fileoff_t   end_zero_fsb;
438         xfs_fileoff_t   zero_count_fsb;
439         xfs_fileoff_t   last_fsb;
440         xfs_extlen_t    buf_len_fsb;
441         xfs_mount_t     *mp;
442         int             nimaps;
443         int             error = 0;
444         xfs_bmbt_irec_t imap;
445
446         ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
447         ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
448         ASSERT(offset > isize);
449
450         mp = io->io_mount;
451
452         /*
453          * First handle zeroing the block on which isize resides.
454          * We only zero a part of that block so it is handled specially.
455          */
456         error = xfs_zero_last_block(ip, io, isize, end_size);
457         if (error) {
458                 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
459                 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
460                 return error;
461         }
462
463         /*
464          * Calculate the range between the new size and the old
465          * where blocks needing to be zeroed may exist.  To get the
466          * block where the last byte in the file currently resides,
467          * we need to subtract one from the size and truncate back
468          * to a block boundary.  We subtract 1 in case the size is
469          * exactly on a block boundary.
470          */
471         last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
472         start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
473         end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
474         ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
475         if (last_fsb == end_zero_fsb) {
476                 /*
477                  * The size was only incremented on its last block.
478                  * We took care of that above, so just return.
479                  */
480                 return 0;
481         }
482
483         ASSERT(start_zero_fsb <= end_zero_fsb);
484         while (start_zero_fsb <= end_zero_fsb) {
485                 nimaps = 1;
486                 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
487                 error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
488                                   0, NULL, 0, &imap, &nimaps, NULL);
489                 if (error) {
490                         ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
491                         ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
492                         return error;
493                 }
494                 ASSERT(nimaps > 0);
495
496                 if (imap.br_state == XFS_EXT_UNWRITTEN ||
497                     imap.br_startblock == HOLESTARTBLOCK) {
498                         /*
499                          * This loop handles initializing pages that were
500                          * partially initialized by the code below this
501                          * loop. It basically zeroes the part of the page
502                          * that sits on a hole and sets the page as P_HOLE
503                          * and calls remapf if it is a mapped file.
504                          */
505                         start_zero_fsb = imap.br_startoff + imap.br_blockcount;
506                         ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
507                         continue;
508                 }
509
510                 /*
511                  * There are blocks in the range requested.
512                  * Zero them a single write at a time.  We actually
513                  * don't zero the entire range returned if it is
514                  * too big and simply loop around to get the rest.
515                  * That is not the most efficient thing to do, but it
516                  * is simple and this path should not be exercised often.
517                  */
518                 buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount,
519                                               mp->m_writeio_blocks << 8);
520                 /*
521                  * Drop the inode lock while we're doing the I/O.
522                  * We'll still have the iolock to protect us.
523                  */
524                 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
525
526                 error = xfs_iozero(ip,
527                                    XFS_FSB_TO_B(mp, start_zero_fsb),
528                                    XFS_FSB_TO_B(mp, buf_len_fsb),
529                                    end_size);
530
531                 if (error) {
532                         goto out_lock;
533                 }
534
535                 start_zero_fsb = imap.br_startoff + buf_len_fsb;
536                 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
537
538                 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
539         }
540
541         return 0;
542
543 out_lock:
544
545         XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
546         ASSERT(error >= 0);
547         return error;
548 }
549
550 ssize_t                         /* bytes written, or (-) error */
551 xfs_write(
552         bhv_desc_t              *bdp,
553         struct kiocb            *iocb,
554         const struct iovec      *iovp,
555         unsigned int            nsegs,
556         loff_t                  *offset,
557         int                     ioflags,
558         cred_t                  *credp)
559 {
560         struct file             *file = iocb->ki_filp;
561         struct address_space    *mapping = file->f_mapping;
562         struct inode            *inode = mapping->host;
563         unsigned long           segs = nsegs;
564         xfs_inode_t             *xip;
565         xfs_mount_t             *mp;
566         ssize_t                 ret = 0, error = 0;
567         xfs_fsize_t             isize, new_size;
568         xfs_iocore_t            *io;
569         vnode_t                 *vp;
570         unsigned long           seg;
571         int                     iolock;
572         int                     eventsent = 0;
573         vrwlock_t               locktype;
574         size_t                  ocount = 0, count;
575         loff_t                  pos;
576         int                     need_isem = 1, need_flush = 0;
577
578         XFS_STATS_INC(xs_write_calls);
579
580         vp = BHV_TO_VNODE(bdp);
581         xip = XFS_BHVTOI(bdp);
582
583         for (seg = 0; seg < segs; seg++) {
584                 const struct iovec *iv = &iovp[seg];
585
586                 /*
587                  * If any segment has a negative length, or the cumulative
588                  * length ever wraps negative then return -EINVAL.
589                  */
590                 ocount += iv->iov_len;
591                 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
592                         return -EINVAL;
593                 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
594                         continue;
595                 if (seg == 0)
596                         return -EFAULT;
597                 segs = seg;
598                 ocount -= iv->iov_len;  /* This segment is no good */
599                 break;
600         }
601
602         count = ocount;
603         pos = *offset;
604
605         if (count == 0)
606                 return 0;
607
608         io = &xip->i_iocore;
609         mp = io->io_mount;
610
611         if (XFS_FORCED_SHUTDOWN(mp))
612                 return -EIO;
613
614         fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE);
615
616         if (ioflags & IO_ISDIRECT) {
617                 xfs_buftarg_t   *target =
618                         (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
619                                 mp->m_rtdev_targp : mp->m_ddev_targp;
620
621                 if ((pos & target->bt_smask) || (count & target->bt_smask))
622                         return XFS_ERROR(-EINVAL);
623
624                 if (!VN_CACHED(vp) && pos < i_size_read(inode))
625                         need_isem = 0;
626
627                 if (VN_CACHED(vp))
628                         need_flush = 1;
629         }
630
631 relock:
632         if (need_isem) {
633                 iolock = XFS_IOLOCK_EXCL;
634                 locktype = VRWLOCK_WRITE;
635
636                 mutex_lock(&inode->i_mutex);
637         } else {
638                 iolock = XFS_IOLOCK_SHARED;
639                 locktype = VRWLOCK_WRITE_DIRECT;
640         }
641
642         xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
643
644         isize = i_size_read(inode);
645
646         if (file->f_flags & O_APPEND)
647                 *offset = isize;
648
649 start:
650         error = -generic_write_checks(file, &pos, &count,
651                                         S_ISBLK(inode->i_mode));
652         if (error) {
653                 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
654                 goto out_unlock_isem;
655         }
656
657         new_size = pos + count;
658         if (new_size > isize)
659                 io->io_new_size = new_size;
660
661         if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
662             !(ioflags & IO_INVIS) && !eventsent)) {
663                 loff_t          savedsize = pos;
664                 int             dmflags = FILP_DELAY_FLAG(file);
665
666                 if (need_isem)
667                         dmflags |= DM_FLAGS_IMUX;
668
669                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
670                 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
671                                       pos, count,
672                                       dmflags, &locktype);
673                 if (error) {
674                         xfs_iunlock(xip, iolock);
675                         goto out_unlock_isem;
676                 }
677                 xfs_ilock(xip, XFS_ILOCK_EXCL);
678                 eventsent = 1;
679
680                 /*
681                  * The iolock was dropped and reaquired in XFS_SEND_DATA
682                  * so we have to recheck the size when appending.
683                  * We will only "goto start;" once, since having sent the
684                  * event prevents another call to XFS_SEND_DATA, which is
685                  * what allows the size to change in the first place.
686                  */
687                 if ((file->f_flags & O_APPEND) && savedsize != isize) {
688                         pos = isize = xip->i_d.di_size;
689                         goto start;
690                 }
691         }
692
693         if (likely(!(ioflags & IO_INVIS))) {
694                 file_update_time(file);
695                 xfs_ichgtime_fast(xip, inode,
696                                   XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
697         }
698
699         /*
700          * If the offset is beyond the size of the file, we have a couple
701          * of things to do. First, if there is already space allocated
702          * we need to either create holes or zero the disk or ...
703          *
704          * If there is a page where the previous size lands, we need
705          * to zero it out up to the new size.
706          */
707
708         if (pos > isize) {
709                 error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos,
710                                         isize, pos + count);
711                 if (error) {
712                         xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
713                         goto out_unlock_isem;
714                 }
715         }
716         xfs_iunlock(xip, XFS_ILOCK_EXCL);
717
718         /*
719          * If we're writing the file then make sure to clear the
720          * setuid and setgid bits if the process is not being run
721          * by root.  This keeps people from modifying setuid and
722          * setgid binaries.
723          */
724
725         if (((xip->i_d.di_mode & S_ISUID) ||
726             ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
727                 (S_ISGID | S_IXGRP))) &&
728              !capable(CAP_FSETID)) {
729                 error = xfs_write_clear_setuid(xip);
730                 if (likely(!error))
731                         error = -remove_suid(file->f_dentry);
732                 if (unlikely(error)) {
733                         xfs_iunlock(xip, iolock);
734                         goto out_unlock_isem;
735                 }
736         }
737
738 retry:
739         /* We can write back this queue in page reclaim */
740         current->backing_dev_info = mapping->backing_dev_info;
741
742         if ((ioflags & IO_ISDIRECT)) {
743                 if (need_flush) {
744                         xfs_inval_cached_trace(io, pos, -1,
745                                         ctooff(offtoct(pos)), -1);
746                         VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)),
747                                         -1, FI_REMAPF_LOCKED);
748                 }
749
750                 if (need_isem) {
751                         /* demote the lock now the cached pages are gone */
752                         XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
753                         mutex_unlock(&inode->i_mutex);
754
755                         iolock = XFS_IOLOCK_SHARED;
756                         locktype = VRWLOCK_WRITE_DIRECT;
757                         need_isem = 0;
758                 }
759
760                 xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
761                                 *offset, ioflags);
762                 ret = generic_file_direct_write(iocb, iovp,
763                                 &segs, pos, offset, count, ocount);
764
765                 /*
766                  * direct-io write to a hole: fall through to buffered I/O
767                  * for completing the rest of the request.
768                  */
769                 if (ret >= 0 && ret != count) {
770                         XFS_STATS_ADD(xs_write_bytes, ret);
771
772                         pos += ret;
773                         count -= ret;
774
775                         need_isem = 1;
776                         ioflags &= ~IO_ISDIRECT;
777                         xfs_iunlock(xip, iolock);
778                         goto relock;
779                 }
780         } else {
781                 xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs,
782                                 *offset, ioflags);
783                 ret = generic_file_buffered_write(iocb, iovp, segs,
784                                 pos, offset, count, ret);
785         }
786
787         current->backing_dev_info = NULL;
788
789         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
790                 ret = wait_on_sync_kiocb(iocb);
791
792         if ((ret == -ENOSPC) &&
793             DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
794             !(ioflags & IO_INVIS)) {
795
796                 xfs_rwunlock(bdp, locktype);
797                 if (need_isem)
798                         mutex_unlock(&inode->i_mutex);
799                 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
800                                 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
801                                 0, 0, 0); /* Delay flag intentionally  unused */
802                 if (error)
803                         goto out_nounlocks;
804                 if (need_isem)
805                         mutex_lock(&inode->i_mutex);
806                 xfs_rwlock(bdp, locktype);
807                 pos = xip->i_d.di_size;
808                 ret = 0;
809                 goto retry;
810         }
811
812         isize = i_size_read(inode);
813         if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
814                 *offset = isize;
815
816         if (*offset > xip->i_d.di_size) {
817                 xfs_ilock(xip, XFS_ILOCK_EXCL);
818                 if (*offset > xip->i_d.di_size) {
819                         xip->i_d.di_size = *offset;
820                         i_size_write(inode, *offset);
821                         xip->i_update_core = 1;
822                         xip->i_update_size = 1;
823                 }
824                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
825         }
826
827         error = -ret;
828         if (ret <= 0)
829                 goto out_unlock_internal;
830
831         XFS_STATS_ADD(xs_write_bytes, ret);
832
833         /* Handle various SYNC-type writes */
834         if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
835                 /*
836                  * If we're treating this as O_DSYNC and we have not updated the
837                  * size, force the log.
838                  */
839                 if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) &&
840                     !(xip->i_update_size)) {
841                         xfs_inode_log_item_t    *iip = xip->i_itemp;
842
843                         /*
844                          * If an allocation transaction occurred
845                          * without extending the size, then we have to force
846                          * the log up the proper point to ensure that the
847                          * allocation is permanent.  We can't count on
848                          * the fact that buffered writes lock out direct I/O
849                          * writes - the direct I/O write could have extended
850                          * the size nontransactionally, then finished before
851                          * we started.  xfs_write_file will think that the file
852                          * didn't grow but the update isn't safe unless the
853                          * size change is logged.
854                          *
855                          * Force the log if we've committed a transaction
856                          * against the inode or if someone else has and
857                          * the commit record hasn't gone to disk (e.g.
858                          * the inode is pinned).  This guarantees that
859                          * all changes affecting the inode are permanent
860                          * when we return.
861                          */
862                         if (iip && iip->ili_last_lsn) {
863                                 xfs_log_force(mp, iip->ili_last_lsn,
864                                                 XFS_LOG_FORCE | XFS_LOG_SYNC);
865                         } else if (xfs_ipincount(xip) > 0) {
866                                 xfs_log_force(mp, (xfs_lsn_t)0,
867                                                 XFS_LOG_FORCE | XFS_LOG_SYNC);
868                         }
869
870                 } else {
871                         xfs_trans_t     *tp;
872
873                         /*
874                          * O_SYNC or O_DSYNC _with_ a size update are handled
875                          * the same way.
876                          *
877                          * If the write was synchronous then we need to make
878                          * sure that the inode modification time is permanent.
879                          * We'll have updated the timestamp above, so here
880                          * we use a synchronous transaction to log the inode.
881                          * It's not fast, but it's necessary.
882                          *
883                          * If this a dsync write and the size got changed
884                          * non-transactionally, then we need to ensure that
885                          * the size change gets logged in a synchronous
886                          * transaction.
887                          */
888
889                         tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
890                         if ((error = xfs_trans_reserve(tp, 0,
891                                                       XFS_SWRITE_LOG_RES(mp),
892                                                       0, 0, 0))) {
893                                 /* Transaction reserve failed */
894                                 xfs_trans_cancel(tp, 0);
895                         } else {
896                                 /* Transaction reserve successful */
897                                 xfs_ilock(xip, XFS_ILOCK_EXCL);
898                                 xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL);
899                                 xfs_trans_ihold(tp, xip);
900                                 xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE);
901                                 xfs_trans_set_sync(tp);
902                                 error = xfs_trans_commit(tp, 0, NULL);
903                                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
904                         }
905                         if (error)
906                                 goto out_unlock_internal;
907                 }
908         
909                 xfs_rwunlock(bdp, locktype);
910                 if (need_isem)
911                         mutex_unlock(&inode->i_mutex);
912
913                 error = sync_page_range(inode, mapping, pos, ret);
914                 if (!error)
915                         error = ret;
916                 return error;
917         }
918
919  out_unlock_internal:
920         xfs_rwunlock(bdp, locktype);
921  out_unlock_isem:
922         if (need_isem)
923                 mutex_unlock(&inode->i_mutex);
924  out_nounlocks:
925         return -error;
926 }
927
928 /*
929  * All xfs metadata buffers except log state machine buffers
930  * get this attached as their b_bdstrat callback function.
931  * This is so that we can catch a buffer
932  * after prematurely unpinning it to forcibly shutdown the filesystem.
933  */
934 int
935 xfs_bdstrat_cb(struct xfs_buf *bp)
936 {
937         xfs_mount_t     *mp;
938
939         mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
940         if (!XFS_FORCED_SHUTDOWN(mp)) {
941                 xfs_buf_iorequest(bp);
942                 return 0;
943         } else {
944                 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
945                 /*
946                  * Metadata write that didn't get logged but
947                  * written delayed anyway. These aren't associated
948                  * with a transaction, and can be ignored.
949                  */
950                 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
951                     (XFS_BUF_ISREAD(bp)) == 0)
952                         return (xfs_bioerror_relse(bp));
953                 else
954                         return (xfs_bioerror(bp));
955         }
956 }
957
958
959 int
960 xfs_bmap(bhv_desc_t     *bdp,
961         xfs_off_t       offset,
962         ssize_t         count,
963         int             flags,
964         xfs_iomap_t     *iomapp,
965         int             *niomaps)
966 {
967         xfs_inode_t     *ip = XFS_BHVTOI(bdp);
968         xfs_iocore_t    *io = &ip->i_iocore;
969
970         ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
971         ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
972                ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
973
974         return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
975 }
976
977 /*
978  * Wrapper around bdstrat so that we can stop data
979  * from going to disk in case we are shutting down the filesystem.
980  * Typically user data goes thru this path; one of the exceptions
981  * is the superblock.
982  */
983 int
984 xfsbdstrat(
985         struct xfs_mount        *mp,
986         struct xfs_buf          *bp)
987 {
988         ASSERT(mp);
989         if (!XFS_FORCED_SHUTDOWN(mp)) {
990                 /* Grio redirection would go here
991                  * if (XFS_BUF_IS_GRIO(bp)) {
992                  */
993
994                 xfs_buf_iorequest(bp);
995                 return 0;
996         }
997
998         xfs_buftrace("XFSBDSTRAT IOERROR", bp);
999         return (xfs_bioerror_relse(bp));
1000 }
1001
1002 /*
1003  * If the underlying (data/log/rt) device is readonly, there are some
1004  * operations that cannot proceed.
1005  */
1006 int
1007 xfs_dev_is_read_only(
1008         xfs_mount_t             *mp,
1009         char                    *message)
1010 {
1011         if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1012             xfs_readonly_buftarg(mp->m_logdev_targp) ||
1013             (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1014                 cmn_err(CE_NOTE,
1015                         "XFS: %s required on read-only device.", message);
1016                 cmn_err(CE_NOTE,
1017                         "XFS: write access unavailable, cannot proceed.");
1018                 return EROFS;
1019         }
1020         return 0;
1021 }