[XFS] Remove xfs_macros.c, xfs_macros.h, rework headers a whole lot.
[linux-2.6.git] / fs / xfs / linux-2.6 / xfs_lrw.c
1 /*
2  * Copyright (c) 2000-2003 Silicon Graphics, Inc.  All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11  *
12  * Further, this software is distributed without any warranty that it is
13  * free of the rightful claim of any third person regarding infringement
14  * or the like.  Any license provided herein, whether implied or
15  * otherwise, applies only to this software file.  Patent licenses, if
16  * any, provided herein do not apply to combinations of this program with
17  * other software, or any other product whatsoever.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22  *
23  * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24  * Mountain View, CA  94043, or:
25  *
26  * http://www.sgi.com
27  *
28  * For further information regarding this notice, see:
29  *
30  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31  */
32 #include "xfs.h"
33 #include "xfs_fs.h"
34 #include "xfs_bit.h"
35 #include "xfs_log.h"
36 #include "xfs_inum.h"
37 #include "xfs_trans.h"
38 #include "xfs_sb.h"
39 #include "xfs_ag.h"
40 #include "xfs_dir.h"
41 #include "xfs_dir2.h"
42 #include "xfs_alloc.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_quota.h"
45 #include "xfs_mount.h"
46 #include "xfs_bmap_btree.h"
47 #include "xfs_alloc_btree.h"
48 #include "xfs_ialloc_btree.h"
49 #include "xfs_dir_sf.h"
50 #include "xfs_dir2_sf.h"
51 #include "xfs_attr_sf.h"
52 #include "xfs_dinode.h"
53 #include "xfs_inode.h"
54 #include "xfs_bmap.h"
55 #include "xfs_btree.h"
56 #include "xfs_ialloc.h"
57 #include "xfs_rtalloc.h"
58 #include "xfs_error.h"
59 #include "xfs_itable.h"
60 #include "xfs_rw.h"
61 #include "xfs_acl.h"
62 #include "xfs_cap.h"
63 #include "xfs_mac.h"
64 #include "xfs_attr.h"
65 #include "xfs_inode_item.h"
66 #include "xfs_buf_item.h"
67 #include "xfs_utils.h"
68 #include "xfs_iomap.h"
69
70 #include <linux/capability.h>
71 #include <linux/writeback.h>
72
73
74 #if defined(XFS_RW_TRACE)
75 void
76 xfs_rw_enter_trace(
77         int                     tag,
78         xfs_iocore_t            *io,
79         void                    *data,
80         size_t                  segs,
81         loff_t                  offset,
82         int                     ioflags)
83 {
84         xfs_inode_t     *ip = XFS_IO_INODE(io);
85
86         if (ip->i_rwtrace == NULL)
87                 return;
88         ktrace_enter(ip->i_rwtrace,
89                 (void *)(unsigned long)tag,
90                 (void *)ip,
91                 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
92                 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
93                 (void *)data,
94                 (void *)((unsigned long)segs),
95                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
96                 (void *)((unsigned long)(offset & 0xffffffff)),
97                 (void *)((unsigned long)ioflags),
98                 (void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
99                 (void *)((unsigned long)(io->io_new_size & 0xffffffff)),
100                 (void *)NULL,
101                 (void *)NULL,
102                 (void *)NULL,
103                 (void *)NULL,
104                 (void *)NULL);
105 }
106
107 void
108 xfs_inval_cached_trace(
109         xfs_iocore_t    *io,
110         xfs_off_t       offset,
111         xfs_off_t       len,
112         xfs_off_t       first,
113         xfs_off_t       last)
114 {
115         xfs_inode_t     *ip = XFS_IO_INODE(io);
116
117         if (ip->i_rwtrace == NULL)
118                 return;
119         ktrace_enter(ip->i_rwtrace,
120                 (void *)(__psint_t)XFS_INVAL_CACHED,
121                 (void *)ip,
122                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
123                 (void *)((unsigned long)(offset & 0xffffffff)),
124                 (void *)((unsigned long)((len >> 32) & 0xffffffff)),
125                 (void *)((unsigned long)(len & 0xffffffff)),
126                 (void *)((unsigned long)((first >> 32) & 0xffffffff)),
127                 (void *)((unsigned long)(first & 0xffffffff)),
128                 (void *)((unsigned long)((last >> 32) & 0xffffffff)),
129                 (void *)((unsigned long)(last & 0xffffffff)),
130                 (void *)NULL,
131                 (void *)NULL,
132                 (void *)NULL,
133                 (void *)NULL,
134                 (void *)NULL,
135                 (void *)NULL);
136 }
137 #endif
138
139 /*
140  *      xfs_iozero
141  *
142  *      xfs_iozero clears the specified range of buffer supplied,
143  *      and marks all the affected blocks as valid and modified.  If
144  *      an affected block is not allocated, it will be allocated.  If
145  *      an affected block is not completely overwritten, and is not
146  *      valid before the operation, it will be read from disk before
147  *      being partially zeroed.
148  */
149 STATIC int
150 xfs_iozero(
151         struct inode            *ip,    /* inode                        */
152         loff_t                  pos,    /* offset in file               */
153         size_t                  count,  /* size of data to zero         */
154         loff_t                  end_size)       /* max file size to set */
155 {
156         unsigned                bytes;
157         struct page             *page;
158         struct address_space    *mapping;
159         char                    *kaddr;
160         int                     status;
161
162         mapping = ip->i_mapping;
163         do {
164                 unsigned long index, offset;
165
166                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
167                 index = pos >> PAGE_CACHE_SHIFT;
168                 bytes = PAGE_CACHE_SIZE - offset;
169                 if (bytes > count)
170                         bytes = count;
171
172                 status = -ENOMEM;
173                 page = grab_cache_page(mapping, index);
174                 if (!page)
175                         break;
176
177                 kaddr = kmap(page);
178                 status = mapping->a_ops->prepare_write(NULL, page, offset,
179                                                         offset + bytes);
180                 if (status) {
181                         goto unlock;
182                 }
183
184                 memset((void *) (kaddr + offset), 0, bytes);
185                 flush_dcache_page(page);
186                 status = mapping->a_ops->commit_write(NULL, page, offset,
187                                                         offset + bytes);
188                 if (!status) {
189                         pos += bytes;
190                         count -= bytes;
191                         if (pos > i_size_read(ip))
192                                 i_size_write(ip, pos < end_size ? pos : end_size);
193                 }
194
195 unlock:
196                 kunmap(page);
197                 unlock_page(page);
198                 page_cache_release(page);
199                 if (status)
200                         break;
201         } while (count);
202
203         return (-status);
204 }
205
206 ssize_t                 /* bytes read, or (-)  error */
207 xfs_read(
208         bhv_desc_t              *bdp,
209         struct kiocb            *iocb,
210         const struct iovec      *iovp,
211         unsigned int            segs,
212         loff_t                  *offset,
213         int                     ioflags,
214         cred_t                  *credp)
215 {
216         struct file             *file = iocb->ki_filp;
217         struct inode            *inode = file->f_mapping->host;
218         size_t                  size = 0;
219         ssize_t                 ret;
220         xfs_fsize_t             n;
221         xfs_inode_t             *ip;
222         xfs_mount_t             *mp;
223         vnode_t                 *vp;
224         unsigned long           seg;
225
226         ip = XFS_BHVTOI(bdp);
227         vp = BHV_TO_VNODE(bdp);
228         mp = ip->i_mount;
229
230         XFS_STATS_INC(xs_read_calls);
231
232         /* START copy & waste from filemap.c */
233         for (seg = 0; seg < segs; seg++) {
234                 const struct iovec *iv = &iovp[seg];
235
236                 /*
237                  * If any segment has a negative length, or the cumulative
238                  * length ever wraps negative then return -EINVAL.
239                  */
240                 size += iv->iov_len;
241                 if (unlikely((ssize_t)(size|iv->iov_len) < 0))
242                         return XFS_ERROR(-EINVAL);
243         }
244         /* END copy & waste from filemap.c */
245
246         if (unlikely(ioflags & IO_ISDIRECT)) {
247                 xfs_buftarg_t   *target =
248                         (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
249                                 mp->m_rtdev_targp : mp->m_ddev_targp;
250                 if ((*offset & target->pbr_smask) ||
251                     (size & target->pbr_smask)) {
252                         if (*offset == ip->i_d.di_size) {
253                                 return (0);
254                         }
255                         return -XFS_ERROR(EINVAL);
256                 }
257         }
258
259         n = XFS_MAXIOFFSET(mp) - *offset;
260         if ((n <= 0) || (size == 0))
261                 return 0;
262
263         if (n < size)
264                 size = n;
265
266         if (XFS_FORCED_SHUTDOWN(mp)) {
267                 return -EIO;
268         }
269
270         if (unlikely(ioflags & IO_ISDIRECT))
271                 down(&inode->i_sem);
272         xfs_ilock(ip, XFS_IOLOCK_SHARED);
273
274         if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
275             !(ioflags & IO_INVIS)) {
276                 vrwlock_t locktype = VRWLOCK_READ;
277                 int dmflags = FILP_DELAY_FLAG(file) | DM_SEM_FLAG_RD(ioflags);
278
279                 ret = -XFS_SEND_DATA(mp, DM_EVENT_READ,
280                                         BHV_TO_VNODE(bdp), *offset, size,
281                                         dmflags, &locktype);
282                 if (ret) {
283                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
284                         goto unlock_isem;
285                 }
286         }
287
288         xfs_rw_enter_trace(XFS_READ_ENTER, &ip->i_iocore,
289                                 (void *)iovp, segs, *offset, ioflags);
290         ret = __generic_file_aio_read(iocb, iovp, segs, offset);
291         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
292                 ret = wait_on_sync_kiocb(iocb);
293         if (ret > 0)
294                 XFS_STATS_ADD(xs_read_bytes, ret);
295
296         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
297
298         if (likely(!(ioflags & IO_INVIS)))
299                 xfs_ichgtime_fast(ip, inode, XFS_ICHGTIME_ACC);
300
301 unlock_isem:
302         if (unlikely(ioflags & IO_ISDIRECT))
303                 up(&inode->i_sem);
304         return ret;
305 }
306
307 ssize_t
308 xfs_sendfile(
309         bhv_desc_t              *bdp,
310         struct file             *filp,
311         loff_t                  *offset,
312         int                     ioflags,
313         size_t                  count,
314         read_actor_t            actor,
315         void                    *target,
316         cred_t                  *credp)
317 {
318         ssize_t                 ret;
319         xfs_fsize_t             n;
320         xfs_inode_t             *ip;
321         xfs_mount_t             *mp;
322         vnode_t                 *vp;
323
324         ip = XFS_BHVTOI(bdp);
325         vp = BHV_TO_VNODE(bdp);
326         mp = ip->i_mount;
327
328         XFS_STATS_INC(xs_read_calls);
329
330         n = XFS_MAXIOFFSET(mp) - *offset;
331         if ((n <= 0) || (count == 0))
332                 return 0;
333
334         if (n < count)
335                 count = n;
336
337         if (XFS_FORCED_SHUTDOWN(ip->i_mount))
338                 return -EIO;
339
340         xfs_ilock(ip, XFS_IOLOCK_SHARED);
341
342         if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) &&
343             (!(ioflags & IO_INVIS))) {
344                 vrwlock_t locktype = VRWLOCK_READ;
345                 int error;
346
347                 error = XFS_SEND_DATA(mp, DM_EVENT_READ, BHV_TO_VNODE(bdp), *offset, count,
348                                       FILP_DELAY_FLAG(filp), &locktype);
349                 if (error) {
350                         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
351                         return -error;
352                 }
353         }
354         xfs_rw_enter_trace(XFS_SENDFILE_ENTER, &ip->i_iocore,
355                    (void *)(unsigned long)target, count, *offset, ioflags);
356         ret = generic_file_sendfile(filp, offset, count, actor, target);
357
358         xfs_iunlock(ip, XFS_IOLOCK_SHARED);
359
360         if (ret > 0)
361                 XFS_STATS_ADD(xs_read_bytes, ret);
362
363         if (likely(!(ioflags & IO_INVIS)))
364                 xfs_ichgtime_fast(ip, LINVFS_GET_IP(vp), XFS_ICHGTIME_ACC);
365
366         return ret;
367 }
368
369 /*
370  * This routine is called to handle zeroing any space in the last
371  * block of the file that is beyond the EOF.  We do this since the
372  * size is being increased without writing anything to that block
373  * and we don't want anyone to read the garbage on the disk.
374  */
375 STATIC int                              /* error (positive) */
376 xfs_zero_last_block(
377         struct inode    *ip,
378         xfs_iocore_t    *io,
379         xfs_off_t       offset,
380         xfs_fsize_t     isize,
381         xfs_fsize_t     end_size)
382 {
383         xfs_fileoff_t   last_fsb;
384         xfs_mount_t     *mp;
385         int             nimaps;
386         int             zero_offset;
387         int             zero_len;
388         int             isize_fsb_offset;
389         int             error = 0;
390         xfs_bmbt_irec_t imap;
391         loff_t          loff;
392         size_t          lsize;
393
394         ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
395         ASSERT(offset > isize);
396
397         mp = io->io_mount;
398
399         isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize);
400         if (isize_fsb_offset == 0) {
401                 /*
402                  * There are no extra bytes in the last block on disk to
403                  * zero, so return.
404                  */
405                 return 0;
406         }
407
408         last_fsb = XFS_B_TO_FSBT(mp, isize);
409         nimaps = 1;
410         error = XFS_BMAPI(mp, NULL, io, last_fsb, 1, 0, NULL, 0, &imap,
411                           &nimaps, NULL);
412         if (error) {
413                 return error;
414         }
415         ASSERT(nimaps > 0);
416         /*
417          * If the block underlying isize is just a hole, then there
418          * is nothing to zero.
419          */
420         if (imap.br_startblock == HOLESTARTBLOCK) {
421                 return 0;
422         }
423         /*
424          * Zero the part of the last block beyond the EOF, and write it
425          * out sync.  We need to drop the ilock while we do this so we
426          * don't deadlock when the buffer cache calls back to us.
427          */
428         XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
429         loff = XFS_FSB_TO_B(mp, last_fsb);
430         lsize = XFS_FSB_TO_B(mp, 1);
431
432         zero_offset = isize_fsb_offset;
433         zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset;
434
435         error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
436
437         XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
438         ASSERT(error >= 0);
439         return error;
440 }
441
442 /*
443  * Zero any on disk space between the current EOF and the new,
444  * larger EOF.  This handles the normal case of zeroing the remainder
445  * of the last block in the file and the unusual case of zeroing blocks
446  * out beyond the size of the file.  This second case only happens
447  * with fixed size extents and when the system crashes before the inode
448  * size was updated but after blocks were allocated.  If fill is set,
449  * then any holes in the range are filled and zeroed.  If not, the holes
450  * are left alone as holes.
451  */
452
453 int                                     /* error (positive) */
454 xfs_zero_eof(
455         vnode_t         *vp,
456         xfs_iocore_t    *io,
457         xfs_off_t       offset,         /* starting I/O offset */
458         xfs_fsize_t     isize,          /* current inode size */
459         xfs_fsize_t     end_size)       /* terminal inode size */
460 {
461         struct inode    *ip = LINVFS_GET_IP(vp);
462         xfs_fileoff_t   start_zero_fsb;
463         xfs_fileoff_t   end_zero_fsb;
464         xfs_fileoff_t   prev_zero_fsb;
465         xfs_fileoff_t   zero_count_fsb;
466         xfs_fileoff_t   last_fsb;
467         xfs_extlen_t    buf_len_fsb;
468         xfs_extlen_t    prev_zero_count;
469         xfs_mount_t     *mp;
470         int             nimaps;
471         int             error = 0;
472         xfs_bmbt_irec_t imap;
473         loff_t          loff;
474         size_t          lsize;
475
476         ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
477         ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
478
479         mp = io->io_mount;
480
481         /*
482          * First handle zeroing the block on which isize resides.
483          * We only zero a part of that block so it is handled specially.
484          */
485         error = xfs_zero_last_block(ip, io, offset, isize, end_size);
486         if (error) {
487                 ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
488                 ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
489                 return error;
490         }
491
492         /*
493          * Calculate the range between the new size and the old
494          * where blocks needing to be zeroed may exist.  To get the
495          * block where the last byte in the file currently resides,
496          * we need to subtract one from the size and truncate back
497          * to a block boundary.  We subtract 1 in case the size is
498          * exactly on a block boundary.
499          */
500         last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
501         start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
502         end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
503         ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
504         if (last_fsb == end_zero_fsb) {
505                 /*
506                  * The size was only incremented on its last block.
507                  * We took care of that above, so just return.
508                  */
509                 return 0;
510         }
511
512         ASSERT(start_zero_fsb <= end_zero_fsb);
513         prev_zero_fsb = NULLFILEOFF;
514         prev_zero_count = 0;
515         while (start_zero_fsb <= end_zero_fsb) {
516                 nimaps = 1;
517                 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
518                 error = XFS_BMAPI(mp, NULL, io, start_zero_fsb, zero_count_fsb,
519                                   0, NULL, 0, &imap, &nimaps, NULL);
520                 if (error) {
521                         ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
522                         ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
523                         return error;
524                 }
525                 ASSERT(nimaps > 0);
526
527                 if (imap.br_state == XFS_EXT_UNWRITTEN ||
528                     imap.br_startblock == HOLESTARTBLOCK) {
529                         /*
530                          * This loop handles initializing pages that were
531                          * partially initialized by the code below this
532                          * loop. It basically zeroes the part of the page
533                          * that sits on a hole and sets the page as P_HOLE
534                          * and calls remapf if it is a mapped file.
535                          */
536                         prev_zero_fsb = NULLFILEOFF;
537                         prev_zero_count = 0;
538                         start_zero_fsb = imap.br_startoff +
539                                          imap.br_blockcount;
540                         ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
541                         continue;
542                 }
543
544                 /*
545                  * There are blocks in the range requested.
546                  * Zero them a single write at a time.  We actually
547                  * don't zero the entire range returned if it is
548                  * too big and simply loop around to get the rest.
549                  * That is not the most efficient thing to do, but it
550                  * is simple and this path should not be exercised often.
551                  */
552                 buf_len_fsb = XFS_FILBLKS_MIN(imap.br_blockcount,
553                                               mp->m_writeio_blocks << 8);
554                 /*
555                  * Drop the inode lock while we're doing the I/O.
556                  * We'll still have the iolock to protect us.
557                  */
558                 XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
559
560                 loff = XFS_FSB_TO_B(mp, start_zero_fsb);
561                 lsize = XFS_FSB_TO_B(mp, buf_len_fsb);
562
563                 error = xfs_iozero(ip, loff, lsize, end_size);
564
565                 if (error) {
566                         goto out_lock;
567                 }
568
569                 prev_zero_fsb = start_zero_fsb;
570                 prev_zero_count = buf_len_fsb;
571                 start_zero_fsb = imap.br_startoff + buf_len_fsb;
572                 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
573
574                 XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
575         }
576
577         return 0;
578
579 out_lock:
580
581         XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
582         ASSERT(error >= 0);
583         return error;
584 }
585
586 ssize_t                         /* bytes written, or (-) error */
587 xfs_write(
588         bhv_desc_t              *bdp,
589         struct kiocb            *iocb,
590         const struct iovec      *iovp,
591         unsigned int            nsegs,
592         loff_t                  *offset,
593         int                     ioflags,
594         cred_t                  *credp)
595 {
596         struct file             *file = iocb->ki_filp;
597         struct address_space    *mapping = file->f_mapping;
598         struct inode            *inode = mapping->host;
599         unsigned long           segs = nsegs;
600         xfs_inode_t             *xip;
601         xfs_mount_t             *mp;
602         ssize_t                 ret = 0, error = 0;
603         xfs_fsize_t             isize, new_size;
604         xfs_iocore_t            *io;
605         vnode_t                 *vp;
606         unsigned long           seg;
607         int                     iolock;
608         int                     eventsent = 0;
609         vrwlock_t               locktype;
610         size_t                  ocount = 0, count;
611         loff_t                  pos;
612         int                     need_isem = 1, need_flush = 0;
613
614         XFS_STATS_INC(xs_write_calls);
615
616         vp = BHV_TO_VNODE(bdp);
617         xip = XFS_BHVTOI(bdp);
618
619         for (seg = 0; seg < segs; seg++) {
620                 const struct iovec *iv = &iovp[seg];
621
622                 /*
623                  * If any segment has a negative length, or the cumulative
624                  * length ever wraps negative then return -EINVAL.
625                  */
626                 ocount += iv->iov_len;
627                 if (unlikely((ssize_t)(ocount|iv->iov_len) < 0))
628                         return -EINVAL;
629                 if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
630                         continue;
631                 if (seg == 0)
632                         return -EFAULT;
633                 segs = seg;
634                 ocount -= iv->iov_len;  /* This segment is no good */
635                 break;
636         }
637
638         count = ocount;
639         pos = *offset;
640
641         if (count == 0)
642                 return 0;
643
644         io = &xip->i_iocore;
645         mp = io->io_mount;
646
647         if (XFS_FORCED_SHUTDOWN(mp))
648                 return -EIO;
649
650         fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE);
651
652         if (ioflags & IO_ISDIRECT) {
653                 xfs_buftarg_t   *target =
654                         (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
655                                 mp->m_rtdev_targp : mp->m_ddev_targp;
656
657                 if ((pos & target->pbr_smask) || (count & target->pbr_smask))
658                         return XFS_ERROR(-EINVAL);
659
660                 if (!VN_CACHED(vp) && pos < i_size_read(inode))
661                         need_isem = 0;
662
663                 if (VN_CACHED(vp))
664                         need_flush = 1;
665         }
666
667 relock:
668         if (need_isem) {
669                 iolock = XFS_IOLOCK_EXCL;
670                 locktype = VRWLOCK_WRITE;
671
672                 down(&inode->i_sem);
673         } else {
674                 iolock = XFS_IOLOCK_SHARED;
675                 locktype = VRWLOCK_WRITE_DIRECT;
676         }
677
678         xfs_ilock(xip, XFS_ILOCK_EXCL|iolock);
679
680         isize = i_size_read(inode);
681
682         if (file->f_flags & O_APPEND)
683                 *offset = isize;
684
685 start:
686         error = -generic_write_checks(file, &pos, &count,
687                                         S_ISBLK(inode->i_mode));
688         if (error) {
689                 xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
690                 goto out_unlock_isem;
691         }
692
693         new_size = pos + count;
694         if (new_size > isize)
695                 io->io_new_size = new_size;
696
697         if ((DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_WRITE) &&
698             !(ioflags & IO_INVIS) && !eventsent)) {
699                 loff_t          savedsize = pos;
700                 int             dmflags = FILP_DELAY_FLAG(file);
701
702                 if (need_isem)
703                         dmflags |= DM_FLAGS_ISEM;
704
705                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
706                 error = XFS_SEND_DATA(xip->i_mount, DM_EVENT_WRITE, vp,
707                                       pos, count,
708                                       dmflags, &locktype);
709                 if (error) {
710                         xfs_iunlock(xip, iolock);
711                         goto out_unlock_isem;
712                 }
713                 xfs_ilock(xip, XFS_ILOCK_EXCL);
714                 eventsent = 1;
715
716                 /*
717                  * The iolock was dropped and reaquired in XFS_SEND_DATA
718                  * so we have to recheck the size when appending.
719                  * We will only "goto start;" once, since having sent the
720                  * event prevents another call to XFS_SEND_DATA, which is
721                  * what allows the size to change in the first place.
722                  */
723                 if ((file->f_flags & O_APPEND) && savedsize != isize) {
724                         pos = isize = xip->i_d.di_size;
725                         goto start;
726                 }
727         }
728
729         if (likely(!(ioflags & IO_INVIS))) {
730                 inode_update_time(inode, 1);
731                 xfs_ichgtime_fast(xip, inode,
732                                   XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
733         }
734
735         /*
736          * If the offset is beyond the size of the file, we have a couple
737          * of things to do. First, if there is already space allocated
738          * we need to either create holes or zero the disk or ...
739          *
740          * If there is a page where the previous size lands, we need
741          * to zero it out up to the new size.
742          */
743
744         if (pos > isize) {
745                 error = xfs_zero_eof(BHV_TO_VNODE(bdp), io, pos,
746                                         isize, pos + count);
747                 if (error) {
748                         xfs_iunlock(xip, XFS_ILOCK_EXCL|iolock);
749                         goto out_unlock_isem;
750                 }
751         }
752         xfs_iunlock(xip, XFS_ILOCK_EXCL);
753
754         /*
755          * If we're writing the file then make sure to clear the
756          * setuid and setgid bits if the process is not being run
757          * by root.  This keeps people from modifying setuid and
758          * setgid binaries.
759          */
760
761         if (((xip->i_d.di_mode & S_ISUID) ||
762             ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
763                 (S_ISGID | S_IXGRP))) &&
764              !capable(CAP_FSETID)) {
765                 error = xfs_write_clear_setuid(xip);
766                 if (likely(!error))
767                         error = -remove_suid(file->f_dentry);
768                 if (unlikely(error)) {
769                         xfs_iunlock(xip, iolock);
770                         goto out_unlock_isem;
771                 }
772         }
773
774 retry:
775         /* We can write back this queue in page reclaim */
776         current->backing_dev_info = mapping->backing_dev_info;
777
778         if ((ioflags & IO_ISDIRECT)) {
779                 if (need_flush) {
780                         xfs_inval_cached_trace(io, pos, -1,
781                                         ctooff(offtoct(pos)), -1);
782                         VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(pos)),
783                                         -1, FI_REMAPF_LOCKED);
784                 }
785
786                 if (need_isem) {
787                         /* demote the lock now the cached pages are gone */
788                         XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
789                         up(&inode->i_sem);
790
791                         iolock = XFS_IOLOCK_SHARED;
792                         locktype = VRWLOCK_WRITE_DIRECT;
793                         need_isem = 0;
794                 }
795
796                 xfs_rw_enter_trace(XFS_DIOWR_ENTER, io, (void *)iovp, segs,
797                                 *offset, ioflags);
798                 ret = generic_file_direct_write(iocb, iovp,
799                                 &segs, pos, offset, count, ocount);
800
801                 /*
802                  * direct-io write to a hole: fall through to buffered I/O
803                  * for completing the rest of the request.
804                  */
805                 if (ret >= 0 && ret != count) {
806                         XFS_STATS_ADD(xs_write_bytes, ret);
807
808                         pos += ret;
809                         count -= ret;
810
811                         need_isem = 1;
812                         ioflags &= ~IO_ISDIRECT;
813                         xfs_iunlock(xip, iolock);
814                         goto relock;
815                 }
816         } else {
817                 xfs_rw_enter_trace(XFS_WRITE_ENTER, io, (void *)iovp, segs,
818                                 *offset, ioflags);
819                 ret = generic_file_buffered_write(iocb, iovp, segs,
820                                 pos, offset, count, ret);
821         }
822
823         current->backing_dev_info = NULL;
824
825         if (ret == -EIOCBQUEUED && !(ioflags & IO_ISAIO))
826                 ret = wait_on_sync_kiocb(iocb);
827
828         if ((ret == -ENOSPC) &&
829             DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) &&
830             !(ioflags & IO_INVIS)) {
831
832                 xfs_rwunlock(bdp, locktype);
833                 if (need_isem)
834                         up(&inode->i_sem);
835                 error = XFS_SEND_NAMESP(xip->i_mount, DM_EVENT_NOSPACE, vp,
836                                 DM_RIGHT_NULL, vp, DM_RIGHT_NULL, NULL, NULL,
837                                 0, 0, 0); /* Delay flag intentionally  unused */
838                 if (error)
839                         goto out_nounlocks;
840                 if (need_isem)
841                         down(&inode->i_sem);
842                 xfs_rwlock(bdp, locktype);
843                 pos = xip->i_d.di_size;
844                 ret = 0;
845                 goto retry;
846         }
847
848         if (*offset > xip->i_d.di_size) {
849                 xfs_ilock(xip, XFS_ILOCK_EXCL);
850                 if (*offset > xip->i_d.di_size) {
851                         xip->i_d.di_size = *offset;
852                         i_size_write(inode, *offset);
853                         xip->i_update_core = 1;
854                         xip->i_update_size = 1;
855                 }
856                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
857         }
858
859         error = -ret;
860         if (ret <= 0)
861                 goto out_unlock_internal;
862
863         XFS_STATS_ADD(xs_write_bytes, ret);
864
865         /* Handle various SYNC-type writes */
866         if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
867                 /*
868                  * If we're treating this as O_DSYNC and we have not updated the
869                  * size, force the log.
870                  */
871                 if (!(mp->m_flags & XFS_MOUNT_OSYNCISOSYNC) &&
872                     !(xip->i_update_size)) {
873                         xfs_inode_log_item_t    *iip = xip->i_itemp;
874
875                         /*
876                          * If an allocation transaction occurred
877                          * without extending the size, then we have to force
878                          * the log up the proper point to ensure that the
879                          * allocation is permanent.  We can't count on
880                          * the fact that buffered writes lock out direct I/O
881                          * writes - the direct I/O write could have extended
882                          * the size nontransactionally, then finished before
883                          * we started.  xfs_write_file will think that the file
884                          * didn't grow but the update isn't safe unless the
885                          * size change is logged.
886                          *
887                          * Force the log if we've committed a transaction
888                          * against the inode or if someone else has and
889                          * the commit record hasn't gone to disk (e.g.
890                          * the inode is pinned).  This guarantees that
891                          * all changes affecting the inode are permanent
892                          * when we return.
893                          */
894                         if (iip && iip->ili_last_lsn) {
895                                 xfs_log_force(mp, iip->ili_last_lsn,
896                                                 XFS_LOG_FORCE | XFS_LOG_SYNC);
897                         } else if (xfs_ipincount(xip) > 0) {
898                                 xfs_log_force(mp, (xfs_lsn_t)0,
899                                                 XFS_LOG_FORCE | XFS_LOG_SYNC);
900                         }
901
902                 } else {
903                         xfs_trans_t     *tp;
904
905                         /*
906                          * O_SYNC or O_DSYNC _with_ a size update are handled
907                          * the same way.
908                          *
909                          * If the write was synchronous then we need to make
910                          * sure that the inode modification time is permanent.
911                          * We'll have updated the timestamp above, so here
912                          * we use a synchronous transaction to log the inode.
913                          * It's not fast, but it's necessary.
914                          *
915                          * If this a dsync write and the size got changed
916                          * non-transactionally, then we need to ensure that
917                          * the size change gets logged in a synchronous
918                          * transaction.
919                          */
920
921                         tp = xfs_trans_alloc(mp, XFS_TRANS_WRITE_SYNC);
922                         if ((error = xfs_trans_reserve(tp, 0,
923                                                       XFS_SWRITE_LOG_RES(mp),
924                                                       0, 0, 0))) {
925                                 /* Transaction reserve failed */
926                                 xfs_trans_cancel(tp, 0);
927                         } else {
928                                 /* Transaction reserve successful */
929                                 xfs_ilock(xip, XFS_ILOCK_EXCL);
930                                 xfs_trans_ijoin(tp, xip, XFS_ILOCK_EXCL);
931                                 xfs_trans_ihold(tp, xip);
932                                 xfs_trans_log_inode(tp, xip, XFS_ILOG_CORE);
933                                 xfs_trans_set_sync(tp);
934                                 error = xfs_trans_commit(tp, 0, NULL);
935                                 xfs_iunlock(xip, XFS_ILOCK_EXCL);
936                         }
937                         if (error)
938                                 goto out_unlock_internal;
939                 }
940         
941                 xfs_rwunlock(bdp, locktype);
942                 if (need_isem)
943                         up(&inode->i_sem);
944
945                 error = sync_page_range(inode, mapping, pos, ret);
946                 if (!error)
947                         error = ret;
948                 return error;
949         }
950
951  out_unlock_internal:
952         xfs_rwunlock(bdp, locktype);
953  out_unlock_isem:
954         if (need_isem)
955                 up(&inode->i_sem);
956  out_nounlocks:
957         return -error;
958 }
959
960 /*
961  * All xfs metadata buffers except log state machine buffers
962  * get this attached as their b_bdstrat callback function.
963  * This is so that we can catch a buffer
964  * after prematurely unpinning it to forcibly shutdown the filesystem.
965  */
966 int
967 xfs_bdstrat_cb(struct xfs_buf *bp)
968 {
969         xfs_mount_t     *mp;
970
971         mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
972         if (!XFS_FORCED_SHUTDOWN(mp)) {
973                 pagebuf_iorequest(bp);
974                 return 0;
975         } else {
976                 xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
977                 /*
978                  * Metadata write that didn't get logged but
979                  * written delayed anyway. These aren't associated
980                  * with a transaction, and can be ignored.
981                  */
982                 if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
983                     (XFS_BUF_ISREAD(bp)) == 0)
984                         return (xfs_bioerror_relse(bp));
985                 else
986                         return (xfs_bioerror(bp));
987         }
988 }
989
990
991 int
992 xfs_bmap(bhv_desc_t     *bdp,
993         xfs_off_t       offset,
994         ssize_t         count,
995         int             flags,
996         xfs_iomap_t     *iomapp,
997         int             *niomaps)
998 {
999         xfs_inode_t     *ip = XFS_BHVTOI(bdp);
1000         xfs_iocore_t    *io = &ip->i_iocore;
1001
1002         ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG);
1003         ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
1004                ((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
1005
1006         return xfs_iomap(io, offset, count, flags, iomapp, niomaps);
1007 }
1008
1009 /*
1010  * Wrapper around bdstrat so that we can stop data
1011  * from going to disk in case we are shutting down the filesystem.
1012  * Typically user data goes thru this path; one of the exceptions
1013  * is the superblock.
1014  */
1015 int
1016 xfsbdstrat(
1017         struct xfs_mount        *mp,
1018         struct xfs_buf          *bp)
1019 {
1020         ASSERT(mp);
1021         if (!XFS_FORCED_SHUTDOWN(mp)) {
1022                 /* Grio redirection would go here
1023                  * if (XFS_BUF_IS_GRIO(bp)) {
1024                  */
1025
1026                 pagebuf_iorequest(bp);
1027                 return 0;
1028         }
1029
1030         xfs_buftrace("XFSBDSTRAT IOERROR", bp);
1031         return (xfs_bioerror_relse(bp));
1032 }
1033
1034 /*
1035  * If the underlying (data/log/rt) device is readonly, there are some
1036  * operations that cannot proceed.
1037  */
1038 int
1039 xfs_dev_is_read_only(
1040         xfs_mount_t             *mp,
1041         char                    *message)
1042 {
1043         if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
1044             xfs_readonly_buftarg(mp->m_logdev_targp) ||
1045             (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
1046                 cmn_err(CE_NOTE,
1047                         "XFS: %s required on read-only device.", message);
1048                 cmn_err(CE_NOTE,
1049                         "XFS: write access unavailable, cannot proceed.");
1050                 return EROFS;
1051         }
1052         return 0;
1053 }