[XFS] (mostly) remove xfs_inval_cached_pages Since the last round of
[linux-2.6.git] / fs / xfs / linux-2.6 / xfs_lrw.c
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11  *
12  * Further, this software is distributed without any warranty that it is
13  * free of the rightful claim of any third person regarding infringement
14  * or the like.  Any license provided herein, whether implied or
15  * otherwise, applies only to this software file.  Patent licenses, if
16  * any, provided herein do not apply to combinations of this program with
17  * other software, or any other product whatsoever.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22  *
23  * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24  * Mountain View, CA  94043, or:
25  *
26  * http://www.sgi.com
27  *
28  * For further information regarding this notice, see:
29  *
30  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31  */
32 /*
33  *  fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff)
34  *
35  */
36
37 #include "xfs.h"
38
39 #include "xfs_fs.h"
40 #include "xfs_inum.h"
41 #include "xfs_log.h"
42 #include "xfs_trans.h"
43 #include "xfs_sb.h"
44 #include "xfs_ag.h"
45 #include "xfs_dir.h"
46 #include "xfs_dir2.h"
47 #include "xfs_alloc.h"
48 #include "xfs_dmapi.h"
49 #include "xfs_quota.h"
50 #include "xfs_mount.h"
51 #include "xfs_alloc_btree.h"
52 #include "xfs_bmap_btree.h"
53 #include "xfs_ialloc_btree.h"
54 #include "xfs_btree.h"
55 #include "xfs_ialloc.h"
56 #include "xfs_attr_sf.h"
57 #include "xfs_dir_sf.h"
58 #include "xfs_dir2_sf.h"
59 #include "xfs_dinode.h"
60 #include "xfs_inode.h"
61 #include "xfs_bmap.h"
62 #include "xfs_bit.h"
63 #include "xfs_rtalloc.h"
64 #include "xfs_error.h"
65 #include "xfs_itable.h"
66 #include "xfs_rw.h"
67 #include "xfs_acl.h"
68 #include "xfs_cap.h"
69 #include "xfs_mac.h"
70 #include "xfs_attr.h"
71 #include "xfs_inode_item.h"
72 #include "xfs_buf_item.h"
73 #include "xfs_utils.h"
74 #include "xfs_iomap.h"
75
76 #include <linux/capability.h>
77 #include <linux/writeback.h>
78
79
80 #if defined(XFS_RW_TRACE)
81 void
82 xfs_rw_enter_trace(
83         int                     tag,
84         xfs_iocore_t            *io,
85         void                    *data,
86         size_t                  segs,
87         loff_t                  offset,
88         int                     ioflags)
89 {
90         xfs_inode_t     *ip = XFS_IO_INODE(io);
91
92         if (ip->i_rwtrace == NULL)
93                 return;
94         ktrace_enter(ip->i_rwtrace,
95                 (void *)(unsigned long)tag,
96                 (void *)ip,
97                 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
98                 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
99                 (void *)data,
100                 (void *)((unsigned long)segs),
101                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
102                 (void *)((unsigned long)(offset & 0xffffffff)),
103                 (void *)((unsigned long)ioflags),
104                 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
105                 (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
106                 (void *)NULL,
107                 (void *)NULL,
108                 (void *)NULL,
109                 (void *)NULL,
110                 (void *)NULL);
111 }
112
113 void
114 xfs_inval_cached_trace(
115         xfs_iocore_t    *io,
116         xfs_off_t       offset,
117         xfs_off_t       len,
118         xfs_off_t       first,
119         xfs_off_t       last)
120 {
121         xfs_inode_t     *ip = XFS_IO_INODE(io);
122
123         if (ip->i_rwtrace == NULL)
124                 return;
125         ktrace_enter(ip->i_rwtrace,
126                 (void *)(__psint_t)XFS_INVAL_CACHED,
127                 (void *)ip,
128                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
129                 (void *)((unsigned long)(offset & 0xffffffff)),
130                 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
131                 (void *)((unsigned long)(len & 0xffffffff)),
132                 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
133                 (void *)((unsigned long)(first & 0xffffffff)),
134                 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
135                 (void *)((unsigned long)(last & 0xffffffff)),
136                 (void *)NULL,
137                 (void *)NULL,
138                 (void *)NULL,
139                 (void *)NULL,
140                 (void *)NULL,
141                 (void *)NULL);
142 }
143 #endif
144
145 /*
146  *      xfs_iozero
147  *
148  *      xfs_iozero clears the specified range of buffer supplied,
149  *      and marks all the affected blocks as valid and modified.  If
150  *      an affected block is not allocated, it will be allocated.  If
151  *      an affected block is not completely overwritten, and is not
152  *      valid before the operation, it will be read from disk before
153  *      being partially zeroed.
154  */
155 STATIC int
156 xfs_iozero(
157         struct inode            *ip,    /* inode                        */
158         loff_t                  pos,    /* offset in file               */
159         size_t                  count,  /* size of data to zero         */
160         loff_t                  end_size)       /* max file size to set */
161 {
162         unsigned                bytes;
163         struct page             *page;
164         struct address_space    *mapping;
165         char                    *kaddr;
166         int                     status;
167
168         mapping = ip->i_mapping;
169         do {
170                 unsigned long index, offset;
171
172                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
173                 index = pos >> PAGE_CACHE_SHIFT;
174                 bytes = PAGE_CACHE_SIZE - offset;
175                 if (bytes > count)
176                         bytes = count;
177
178                 status = -ENOMEM;
179                 page = grab_cache_page(mapping, index);
180                 if (!page)
181                         break;
182
183                 kaddr = kmap(page);
184                 status = mapping->a_ops->prepare_write(NULL, page, offset,
185                                                         offset + bytes);
186                 if (status) {
187                         goto unlock;
188                 }
189
190                 memset((void *) (kaddr + offset), 0, bytes);
191                 flush_dcache_page(page);
192                 status = mapping->a_ops->commit_write(NULL, page, offset,
193                                                         offset + bytes);
194                 if (!status) {
195                         pos += bytes;
196                         count -= bytes;
197                         if (pos > i_size_read(ip))
198                                 i_size_write(ip, pos < end_size ? pos : end_size);
199                 }
200
201 unlock:
202                 kunmap(page);
203                 unlock_page(page);
204                 page_cache_release(page);
205                 if (status)
206                         break;
207         } while (count);
208
209         return (-status);
210 }
211
212 ssize_t                 /* bytes read, or (-)  error */
213 xfs_read(
214         bhv_desc_t              *bdp,
215         struct kiocb            *iocb,
216         const struct iovec      *iovp,
217         unsigned int            segs,
218         loff_t                  *offset,
219         int                     ioflags,
220         cred_t                  *credp)
221 {
222         struct file             *file = iocb->ki_filp;
223         struct inode            *inode = file->f_mapping->host;
224         size_t                  size = 0;
225         ssize_t                 ret;
226         xfs_fsize_t             n;
227         xfs_inode_t             *ip;
228         xfs_mount_t             *mp;
229         vnode_t                 *vp;
230         unsigned long           seg;
231
232         ip = XFS_BHVTOI(bdp);
233         vp = BHV_TO_VNODE(bdp);
234         mp = ip->i_mount;
235
236         XFS_STATS_INC(xs_read_calls);
237
238         /* START copy & waste from filemap.c */
239         for (seg = 0; seg < segs; seg++) {
240                 const struct iovec *iv = &iovp[seg];
241
242                 /*
243                  * If any segment has a negative length, or the cumulative
244                  * length ever wraps negative then return -EINVAL.
245                  */
246                 size += iv->iov_len;
247                 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
248                         return XFS_ERROR(-EINVAL);
249         }
250         /* END copy & waste from filemap.c */
251
252         if (unlikely(ioflags & IO_ISDIRECT)) {
253                 xfs_buftarg_t   *target =
254                         (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
255                                 mp->m_rtdev_targp : mp->m_ddev_targp;
256                 if ((*offset & target->pbr_smask) ||
257                     (size & target->pbr_smask)) {
258                         if (*offset == ip->i_d.di_size) {
259                                 return (0);
260                         }
261                         return -XFS_ERROR(EINVAL);
262                 }
263         }
264
265         n = XFS_MAXIOFFSET(mp) - *offset;
266         if ((n <= 0) || (size == 0))
267                 return 0;
268
269         if (n < size)
270                 size = n;
271
272         if (XFS_FORCED_SHUTDOWN(mp)) {
273                 return -EIO;
274         }
275
276         if (unlikely(ioflags & IO_ISDIRECT))
277                 down(&inode->i_sem);
278         xfs_ilock(ip, XFS_IOLOCK_SHARED);
279
280         if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
281             !(ioflags & IO_INVIS)) {
282                 vrwlock_t locktype = VRWLOCK_READ;
283
284                 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
285                                         BHV_TO_VNODE(bdp), *offset, size,
286                                         FILP_DELAY_FLAG(file), &locktype);
287                 if (ret) {
288                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
289                         goto unlock_isem;
290                 }
291         }
292
293         xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
294                                 (void *)iovp, segs, *offset, ioflags);
295         ret = __generic_file_aio_read(iocb, iovp, segs, offset);
296         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
297                 ret = wait_on_sync_kiocb(iocb);
298         if (ret > 0)
299                 XFS_STATS_ADD(xs_read_bytes, ret);
300
301         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
302
303         if (likely(!(ioflags & IO_INVIS)))
304                 xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
305
306 unlock_isem:
307         if (unlikely(ioflags & IO_ISDIRECT))
308                 up(&inode->i_sem);
309         return ret;
310 }
311
312 ssize_t
313 xfs_sendfile(
314         bhv_desc_t              *bdp,
315         struct file             *filp,
316         loff_t                  *offset,
317         int                     ioflags,
318         size_t                  count,
319         read_actor_t            actor,
320         void                    *target,
321         cred_t                  *credp)
322 {
323         ssize_t                 ret;
324         xfs_fsize_t             n;
325         xfs_inode_t             *ip;
326         xfs_mount_t             *mp;
327         vnode_t                 *vp;
328
329         ip = XFS_BHVTOI(bdp);
330         vp = BHV_TO_VNODE(bdp);
331         mp = ip->i_mount;
332
333         XFS_STATS_INC(xs_read_calls);
334
335         n = XFS_MAXIOFFSET(mp) - *offset;
336         if ((n <= 0) || (count == 0))
337                 return 0;
338
339         if (n < count)
340                 count = n;
341
342         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
343                 return -EIO;
344
345         xfs_ilock(ip, XFS_IOLOCK_SHARED);
346
347         if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
348             (!(ioflags & IO_INVIS))) {
349                 vrwlock_t locktype = VRWLOCK_READ;
350                 int error;
351
352                 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count,
353                                       FILP_DELAY_FLAG(filp), &locktype);
354                 if (error) {
355                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
356                         return -error;
357                 }
358         }
359         xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
360                    (void *)(unsigned long)target, count, *offset, ioflags);
361         ret = generic_file_sendfile(filp, offset, count, actor, target);
362
363         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
364
365         if (ret > 0)
366                 XFS_STATS_ADD(xs_read_bytes, ret);
367
368         if (likely(!(ioflags & IO_INVIS)))
369                 xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
370
371         return ret;
372 }
373
374 /*
375  * This routine is called to handle zeroing any space in the last
376  * block of the file that is beyond the EOF.  We do this since the
377  * size is being increased without writing anything to that block
378  * and we don't want anyone to read the garbage on the disk.
379  */
380 STATIC int                              /* error (positive) */
381 xfs_zero_last_block(
382         struct inode    *ip,
383         xfs_iocore_t    *io,
384         xfs_off_t       offset,
385         xfs_fsize_t     isize,
386         xfs_fsize_t     end_size)
387 {
388         xfs_fileoff_t   last_fsb;
389         xfs_mount_t     *mp;
390         int             nimaps;
391         int             zero_offset;
392         int             zero_len;
393         int             isize_fsb_offset;
394         int             error = 0;
395         xfs_bmbt_irec_t imap;
396         loff_t          loff;
397         size_t          lsize;
398
399         ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
400         ASSERT(offset > isize);
401
402         mp = io->io_mount;
403
404         isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize);
405         if (isize_fsb_offset == 0) {
406                 /*
407                  * There are no extra bytes in the last block on disk to
408                  * zero, so return.
409                  */
410                 return 0;
411         }
412
413         last_fsb = XFS_B_TO_FSBT(mp, isize);
414         nimaps = 1;
415         error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
416                           &nimaps, NULL);
417         if (error) {
418                 return error;
419         }
420         ASSERT(nimaps > 0);
421         /*
422          * If the block underlying isize is just a hole, then there
423          * is nothing to zero.
424          */
425         if (imap.br_startblock == HOLESTARTBLOCK) {
426                 return 0;
427         }
428         /*
429          * Zero the part of the last block beyond the EOF, and write it
430          * out sync.  We need to drop the ilock while we do this so we
431          * don't deadlock when the buffer cache calls back to us.
432          */
433         XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
434         loff = XFS_FSB_TO_B(mp, last_fsb);
435         lsize = XFS_FSB_TO_B(mp, 1);
436
437         zero_offset = isize_fsb_offset;
438         zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset;
439
440         error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
441
442         XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
443         ASSERT(error >= 0);
444         return error;
445 }
446
447 /*
448  * Zero any on disk space between the current EOF and the new,
449  * larger EOF.  This handles the normal case of zeroing the remainder
450  * of the last block in the file and the unusual case of zeroing blocks
451  * out beyond the size of the file.  This second case only happens
452  * with fixed size extents and when the system crashes before the inode
453  * size was updated but after blocks were allocated.  If fill is set,
454  * then any holes in the range are filled and zeroed.  If not, the holes
455  * are left alone as holes.
456  */
457
458 int                                     /* error (positive) */
459 xfs_zero_eof(
460         vnode_t         *vp,
461         xfs_iocore_t    *io,
462         xfs_off_t       offset,         /* starting I/O offset */
463         xfs_fsize_t     isize,          /* current inode size */
464         xfs_fsize_t     end_size)       /* terminal inode size */
465 {
466         struct inode    *ip = LINVFS_GET_IP(vp);
467         xfs_fileoff_t   start_zero_fsb;
468         xfs_fileoff_t   end_zero_fsb;
469         xfs_fileoff_t   prev_zero_fsb;
470         xfs_fileoff_t   zero_count_fsb;
471         xfs_fileoff_t   last_fsb;
472         xfs_extlen_t    buf_len_fsb;
473         xfs_extlen_t    prev_zero_count;
474         xfs_mount_t     *mp;
475         int             nimaps;
476         int             error = 0;
477         xfs_bmbt_irec_t imap;
478         loff_t          loff;
479         size_t          lsize;
480
481         ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
482         ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
483
484         mp = io->io_mount;
485
486         /*
487          * First handle zeroing the block on which isize resides.
488          * We only zero a part of that block so it is handled specially.
489          */
490         error = xfs_zero_last_block(ip, io, offset, isize, end_size);
491         if (error) {
492                 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
493                 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
494                 return error;
495         }
496
497         /*
498          * Calculate the range between the new size and the old
499          * where blocks needing to be zeroed may exist.  To get the
500          * block where the last byte in the file currently resides,
501          * we need to subtract one from the size and truncate back
502          * to a block boundary.  We subtract 1 in case the size is
503          * exactly on a block boundary.
504          */
505         last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
506         start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
507         end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
508         ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
509         if (last_fsb == end_zero_fsb) {
510                 /*
511                  * The size was only incremented on its last block.
512                  * We took care of that above, so just return.
513                  */
514                 return 0;
515         }
516
517         ASSERT(start_zero_fsb <= end_zero_fsb);
518         prev_zero_fsb = NULLFILEOFF;
519         prev_zero_count = 0;
520         while (start_zero_fsb <= end_zero_fsb) {
521                 nimaps = 1;
522                 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
523                 error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
524                                   0, NULL, 0, &imap, &nimaps, NULL);
525                 if (error) {
526                         ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
527                         ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
528                         return error;
529                 }
530                 ASSERT(nimaps > 0);
531
532                 if (imap.br_state == XFS_EXT_UNWRITTEN ||
533                     imap.br_startblock == HOLESTARTBLOCK) {
534                         /*
535                          * This loop handles initializing pages that were
536                          * partially initialized by the code below this
537                          * loop. It basically zeroes the part of the page
538                          * that sits on a hole and sets the page as P_HOLE
539                          * and calls remapf if it is a mapped file.
540                          */
541                         prev_zero_fsb = NULLFILEOFF;
542                         prev_zero_count = 0;
543                         start_zero_fsb = imap.br_startoff +
544                                          imap.br_blockcount;
545                         ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
546                         continue;
547                 }
548
549                 /*
550                  * There are blocks in the range requested.
551                  * Zero them a single write at a time.  We actually
552                  * don't zero the entire range returned if it is
553                  * too big and simply loop around to get the rest.
554                  * That is not the most efficient thing to do, but it
555                  * is simple and this path should not be exercised often.
556                  */
557                 buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount,
558                                               mp->m_writeio_blocks << 8);
559                 /*
560                  * Drop the inode lock while we're doing the I/O.
561                  * We'll still have the iolock to protect us.
562                  */
563                 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
564
565                 loff = XFS_FSB_TO_B(mp, start_zero_fsb);
566                 lsize = XFS_FSB_TO_B(mp, buf_len_fsb);
567
568                 error = xfs_iozero(ip, loff, lsize, end_size);
569
570                 if (error) {
571                         goto out_lock;
572                 }
573
574                 prev_zero_fsb = start_zero_fsb;
575                 prev_zero_count = buf_len_fsb;
576                 start_zero_fsb = imap.br_startoff + buf_len_fsb;
577                 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
578
579                 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
580         }
581
582         return 0;
583
584 out_lock:
585
586         XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
587         ASSERT(error >= 0);
588         return error;
589 }
590
591 ssize_t                         /* bytes written, or (-) error */
592 xfs_write(
593         bhv_desc_t              *bdp,
594         struct kiocb            *iocb,
595         const struct iovec      *iovp,
596         unsigned int            nsegs,
597         loff_t                  *offset,
598         int                     ioflags,
599         cred_t                  *credp)
600 {
601         struct file             *file = iocb->ki_filp;
602         struct address_space    *mapping = file->f_mapping;
603         struct inode            *inode = mapping->host;
604         unsigned long           segs = nsegs;
605         xfs_inode_t             *xip;
606         xfs_mount_t             *mp;
607         ssize_t                 ret = 0, error = 0;
608         xfs_fsize_t             isize, new_size;
609         xfs_iocore_t            *io;
610         vnode_t                 *vp;
611         unsigned long           seg;
612         int                     iolock;
613         int                     eventsent = 0;
614         vrwlock_t               locktype;
615         size_t                  ocount = 0, count;
616         loff_t                  pos;
617         int                     need_isem = 1, need_flush = 0;
618
619         XFS_STATS_INC(xs_write_calls);
620
621         vp = BHV_TO_VNODE(bdp);
622         xip = XFS_BHVTOI(bdp);
623
624         for (seg = 0; seg < segs; seg++) {
625                 const struct iovec *iv = &iovp[seg];
626
627                 /*
628                  * If any segment has a negative length, or the cumulative
629                  * length ever wraps negative then return -EINVAL.
630                  */
631                 ocount += iv->iov_len;
632                 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
633                         return -EINVAL;
634                 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
635                         continue;
636                 if (seg == 0)
637                         return -EFAULT;
638                 segs = seg;
639                 ocount -= iv->iov_len;  /* This segment is no good */
640                 break;
641         }
642
643         count = ocount;
644         pos = *offset;
645
646         if (count == 0)
647                 return 0;
648
649         io = &xip->i_iocore;
650         mp = io->io_mount;
651
652         if (XFS_FORCED_SHUTDOWN(mp))
653                 return -EIO;
654
655         fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE);
656
657         if (ioflags & IO_ISDIRECT) {
658                 xfs_buftarg_t   *target =
659                         (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
660                                 mp->m_rtdev_targp : mp->m_ddev_targp;
661
662                 if (ioflags & IO_ISAIO)
663                         return XFS_ERROR(-ENOSYS);
664
665                 if ((pos & target->pbr_smask) || (count & target->pbr_smask))
666                         return XFS_ERROR(-EINVAL);
667
668                 if (!VN_CACHED(vp) && pos < i_size_read(inode))
669                         need_isem = 0;
670
671                 if (VN_CACHED(vp))
672                         need_flush = 1;
673         }
674
675 relock:
676         if (need_isem) {
677                 iolock = XFS_IOLOCK_EXCL;
678                 locktype = VRWLOCK_WRITE;
679
680                 down(&inode->i_sem);
681         } else {
682                 iolock = XFS_IOLOCK_SHARED;
683                 locktype = VRWLOCK_WRITE_DIRECT;
684         }
685
686         xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
687
688         isize = i_size_read(inode);
689
690         if (file->f_flags & O_APPEND)
691                 *offset = isize;
692
693 start:
694         error = -generic_write_checks(file, &pos, &count,
695                                         S_ISBLK(inode->i_mode));
696         if (error) {
697                 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
698                 goto out_unlock_isem;
699         }
700
701         new_size = pos + count;
702         if (new_size > isize)
703                 io->io_new_size = new_size;
704
705         if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
706             !(ioflags & IO_INVIS) && !eventsent)) {
707                 loff_t          savedsize = pos;
708                 int             dmflags = FILP_DELAY_FLAG(file);
709
710                 if (need_isem)
711                         dmflags |= DM_FLAGS_ISEM;
712
713                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
714                 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
715                                       pos, count,
716                                       dmflags, &locktype);
717                 if (error) {
718                         xfs_iunlock(xip, iolock);
719                         goto out_unlock_isem;
720                 }
721                 xfs_ilock(xip, XFS_ILOCK_EXCL);
722                 eventsent = 1;
723
724                 /*
725                  * The iolock was dropped and reaquired in XFS_SEND_DATA
726                  * so we have to recheck the size when appending.
727                  * We will only "goto start;" once, since having sent the
728                  * event prevents another call to XFS_SEND_DATA, which is
729                  * what allows the size to change in the first place.
730                  */
731                 if ((file->f_flags & O_APPEND) && savedsize != isize) {
732                         pos = isize = xip->i_d.di_size;
733                         goto start;
734                 }
735         }
736
737         /*
738          * On Linux, generic_file_write updates the times even if
739          * no data is copied in so long as the write had a size.
740          *
741          * We must update xfs' times since revalidate will overcopy xfs.
742          */
743         if (!(ioflags & IO_INVIS)) {
744                 xfs_ichgtime(xip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
745                 inode_update_time(inode, 1);
746         }
747
748         /*
749          * If the offset is beyond the size of the file, we have a couple
750          * of things to do. First, if there is already space allocated
751          * we need to either create holes or zero the disk or ...
752          *
753          * If there is a page where the previous size lands, we need
754          * to zero it out up to the new size.
755          */
756
757         if (pos > isize) {
758                 error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos,
759                                         isize, pos + count);
760                 if (error) {
761                         xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
762                         goto out_unlock_isem;
763                 }
764         }
765         xfs_iunlock(xip, XFS_ILOCK_EXCL);
766
767         /*
768          * If we're writing the file then make sure to clear the
769          * setuid and setgid bits if the process is not being run
770          * by root.  This keeps people from modifying setuid and
771          * setgid binaries.
772          */
773
774         if (((xip->i_d.di_mode & S_ISUID) ||
775             ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
776                 (S_ISGID | S_IXGRP))) &&
777              !capable(CAP_FSETID)) {
778                 error = xfs_write_clear_setuid(xip);
779                 if (likely(!error))
780                         error = -remove_suid(file->f_dentry);
781                 if (unlikely(error)) {
782                         xfs_iunlock(xip, iolock);
783                         goto out_unlock_isem;
784                 }
785         }
786
787 retry:
788         /* We can write back this queue in page reclaim */
789         current->backing_dev_info = mapping->backing_dev_info;
790
791         if ((ioflags & IO_ISDIRECT)) {
792                 if (need_flush) {
793                         xfs_inval_cached_trace(io, pos, -1,
794                                         ctooff(offtoct(pos)), -1);
795                         VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)),
796                                         -1, FI_REMAPF_LOCKED);
797                 }
798
799                 if (need_isem) {
800                         /* demote the lock now the cached pages are gone */
801                         XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
802                         up(&inode->i_sem);
803
804                         iolock = XFS_IOLOCK_SHARED;
805                         locktype = VRWLOCK_WRITE_DIRECT;
806                         need_isem = 0;
807                 }
808
809                 xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
810                                 *offset, ioflags);
811                 ret = generic_file_direct_write(iocb, iovp,
812                                 &segs, pos, offset, count, ocount);
813
814                 /*
815                  * direct-io write to a hole: fall through to buffered I/O
816                  * for completing the rest of the request.
817                  */
818                 if (ret >= 0 && ret != count) {
819                         XFS_STATS_ADD(xs_write_bytes, ret);
820
821                         pos += ret;
822                         count -= ret;
823
824                         need_isem = 1;
825                         ioflags &= ~IO_ISDIRECT;
826                         xfs_iunlock(xip, iolock);
827                         goto relock;
828                 }
829         } else {
830                 xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs,
831                                 *offset, ioflags);
832                 ret = generic_file_buffered_write(iocb, iovp, segs,
833                                 pos, offset, count, ret);
834         }
835
836         current->backing_dev_info = NULL;
837
838         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
839                 ret = wait_on_sync_kiocb(iocb);
840
841         if ((ret == -ENOSPC) &&
842             DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
843             !(ioflags & IO_INVIS)) {
844
845                 xfs_rwunlock(bdp, locktype);
846                 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
847                                 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
848                                 0, 0, 0); /* Delay flag intentionally  unused */
849                 if (error)
850                         goto out_unlock_isem;
851                 xfs_rwlock(bdp, locktype);
852                 pos = xip->i_d.di_size;
853                 ret = 0;
854                 goto retry;
855         }
856
857         if (*offset > xip->i_d.di_size) {
858                 xfs_ilock(xip, XFS_ILOCK_EXCL);
859                 if (*offset > xip->i_d.di_size) {
860                         xip->i_d.di_size = *offset;
861                         i_size_write(inode, *offset);
862                         xip->i_update_core = 1;
863                         xip->i_update_size = 1;
864                 }
865                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
866         }
867
868         error = -ret;
869         if (ret <= 0)
870                 goto out_unlock_internal;
871
872         XFS_STATS_ADD(xs_write_bytes, ret);
873
874         /* Handle various SYNC-type writes */
875         if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
876                 /*
877                  * If we're treating this as O_DSYNC and we have not updated the
878                  * size, force the log.
879                  */
880                 if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) &&
881                     !(xip->i_update_size)) {
882                         xfs_inode_log_item_t    *iip = xip->i_itemp;
883
884                         /*
885                          * If an allocation transaction occurred
886                          * without extending the size, then we have to force
887                          * the log up the proper point to ensure that the
888                          * allocation is permanent.  We can't count on
889                          * the fact that buffered writes lock out direct I/O
890                          * writes - the direct I/O write could have extended
891                          * the size nontransactionally, then finished before
892                          * we started.  xfs_write_file will think that the file
893                          * didn't grow but the update isn't safe unless the
894                          * size change is logged.
895                          *
896                          * Force the log if we've committed a transaction
897                          * against the inode or if someone else has and
898                          * the commit record hasn't gone to disk (e.g.
899                          * the inode is pinned).  This guarantees that
900                          * all changes affecting the inode are permanent
901                          * when we return.
902                          */
903                         if (iip && iip->ili_last_lsn) {
904                                 xfs_log_force(mp, iip->ili_last_lsn,
905                                                 XFS_LOG_FORCE | XFS_LOG_SYNC);
906                         } else if (xfs_ipincount(xip) > 0) {
907                                 xfs_log_force(mp, (xfs_lsn_t)0,
908                                                 XFS_LOG_FORCE | XFS_LOG_SYNC);
909                         }
910
911                 } else {
912                         xfs_trans_t     *tp;
913
914                         /*
915                          * O_SYNC or O_DSYNC _with_ a size update are handled
916                          * the same way.
917                          *
918                          * If the write was synchronous then we need to make
919                          * sure that the inode modification time is permanent.
920                          * We'll have updated the timestamp above, so here
921                          * we use a synchronous transaction to log the inode.
922                          * It's not fast, but it's necessary.
923                          *
924                          * If this a dsync write and the size got changed
925                          * non-transactionally, then we need to ensure that
926                          * the size change gets logged in a synchronous
927                          * transaction.
928                          */
929
930                         tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
931                         if ((error = xfs_trans_reserve(tp, 0,
932                                                       XFS_SWRITE_LOG_RES(mp),
933                                                       0, 0, 0))) {
934                                 /* Transaction reserve failed */
935                                 xfs_trans_cancel(tp, 0);
936                         } else {
937                                 /* Transaction reserve successful */
938                                 xfs_ilock(xip, XFS_ILOCK_EXCL);
939                                 xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL);
940                                 xfs_trans_ihold(tp, xip);
941                                 xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE);
942                                 xfs_trans_set_sync(tp);
943                                 error = xfs_trans_commit(tp, 0, NULL);
944                                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
945                         }
946                         if (error)
947                                 goto out_unlock_internal;
948                 }
949         
950                 xfs_rwunlock(bdp, locktype);
951                 if (need_isem)
952                         up(&inode->i_sem);
953
954                 error = sync_page_range(inode, mapping, pos, ret);
955                 if (!error)
956                         error = ret;
957                 return error;
958         }
959
960  out_unlock_internal:
961         xfs_rwunlock(bdp, locktype);
962  out_unlock_isem:
963         if (need_isem)
964                 up(&inode->i_sem);
965         return -error;
966 }
967
968 /*
969  * All xfs metadata buffers except log state machine buffers
970  * get this attached as their b_bdstrat callback function.
971  * This is so that we can catch a buffer
972  * after prematurely unpinning it to forcibly shutdown the filesystem.
973  */
974 int
975 xfs_bdstrat_cb(struct xfs_buf *bp)
976 {
977         xfs_mount_t     *mp;
978
979         mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
980         if (!XFS_FORCED_SHUTDOWN(mp)) {
981                 pagebuf_iorequest(bp);
982                 return 0;
983         } else {
984                 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
985                 /*
986                  * Metadata write that didn't get logged but
987                  * written delayed anyway. These aren't associated
988                  * with a transaction, and can be ignored.
989                  */
990                 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
991                     (XFS_BUF_ISREAD(bp)) == 0)
992                         return (xfs_bioerror_relse(bp));
993                 else
994                         return (xfs_bioerror(bp));
995         }
996 }
997
998
999 int
1000 xfs_bmap(bhv_desc_t     *bdp,
1001         xfs_off_t       offset,
1002         ssize_t         count,
1003         int             flags,
1004         xfs_iomap_t     *iomapp,
1005         int             *niomaps)
1006 {
1007         xfs_inode_t     *ip = XFS_BHVTOI(bdp);
1008         xfs_iocore_t    *io = &ip->i_iocore;
1009
1010         ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
1011         ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
1012                ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
1013
1014         return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
1015 }
1016
1017 /*
1018  * Wrapper around bdstrat so that we can stop data
1019  * from going to disk in case we are shutting down the filesystem.
1020  * Typically user data goes thru this path; one of the exceptions
1021  * is the superblock.
1022  */
1023 int
1024 xfsbdstrat(
1025         struct xfs_mount        *mp,
1026         struct xfs_buf          *bp)
1027 {
1028         ASSERT(mp);
1029         if (!XFS_FORCED_SHUTDOWN(mp)) {
1030                 /* Grio redirection would go here
1031                  * if (XFS_BUF_IS_GRIO(bp)) {
1032                  */
1033
1034                 pagebuf_iorequest(bp);
1035                 return 0;
1036         }
1037
1038         xfs_buftrace("XFSBDSTRAT IOERROR", bp);
1039         return (xfs_bioerror_relse(bp));
1040 }
1041
1042 /*
1043  * If the underlying (data/log/rt) device is readonly, there are some
1044  * operations that cannot proceed.
1045  */
1046 int
1047 xfs_dev_is_read_only(
1048         xfs_mount_t             *mp,
1049         char                    *message)
1050 {
1051         if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1052             xfs_readonly_buftarg(mp->m_logdev_targp) ||
1053             (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1054                 cmn_err(CE_NOTE,
1055                         "XFS: %s required on read-only device.", message);
1056                 cmn_err(CE_NOTE,
1057                         "XFS: write access unavailable, cannot proceed.");
1058                 return EROFS;
1059         }
1060         return 0;
1061 }