[PATCH] reduce size of bio mempools
[linux-2.6.git] / fs / xfs / linux-2.6 / xfs_buf.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include <linux/stddef.h>
19 #include <linux/errno.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/init.h>
23 #include <linux/vmalloc.h>
24 #include <linux/bio.h>
25 #include <linux/sysctl.h>
26 #include <linux/proc_fs.h>
27 #include <linux/workqueue.h>
28 #include <linux/percpu.h>
29 #include <linux/blkdev.h>
30 #include <linux/hash.h>
31 #include <linux/kthread.h>
32 #include <linux/migrate.h>
33 #include "xfs_linux.h"
34
35 STATIC kmem_zone_t *xfs_buf_zone;
36 STATIC kmem_shaker_t xfs_buf_shake;
37 STATIC int xfsbufd(void *);
38 STATIC int xfsbufd_wakeup(int, gfp_t);
39 STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
40
41 STATIC struct workqueue_struct *xfslogd_workqueue;
42 struct workqueue_struct *xfsdatad_workqueue;
43
44 #ifdef XFS_BUF_TRACE
45 void
46 xfs_buf_trace(
47         xfs_buf_t       *bp,
48         char            *id,
49         void            *data,
50         void            *ra)
51 {
52         ktrace_enter(xfs_buf_trace_buf,
53                 bp, id,
54                 (void *)(unsigned long)bp->b_flags,
55                 (void *)(unsigned long)bp->b_hold.counter,
56                 (void *)(unsigned long)bp->b_sema.count.counter,
57                 (void *)current,
58                 data, ra,
59                 (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
60                 (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
61                 (void *)(unsigned long)bp->b_buffer_length,
62                 NULL, NULL, NULL, NULL, NULL);
63 }
64 ktrace_t *xfs_buf_trace_buf;
65 #define XFS_BUF_TRACE_SIZE      4096
66 #define XB_TRACE(bp, id, data)  \
67         xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
68 #else
69 #define XB_TRACE(bp, id, data)  do { } while (0)
70 #endif
71
72 #ifdef XFS_BUF_LOCK_TRACKING
73 # define XB_SET_OWNER(bp)       ((bp)->b_last_holder = current->pid)
74 # define XB_CLEAR_OWNER(bp)     ((bp)->b_last_holder = -1)
75 # define XB_GET_OWNER(bp)       ((bp)->b_last_holder)
76 #else
77 # define XB_SET_OWNER(bp)       do { } while (0)
78 # define XB_CLEAR_OWNER(bp)     do { } while (0)
79 # define XB_GET_OWNER(bp)       do { } while (0)
80 #endif
81
82 #define xb_to_gfp(flags) \
83         ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
84           ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
85
86 #define xb_to_km(flags) \
87          (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
88
89 #define xfs_buf_allocate(flags) \
90         kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
91 #define xfs_buf_deallocate(bp) \
92         kmem_zone_free(xfs_buf_zone, (bp));
93
94 /*
95  *      Page Region interfaces.
96  *
97  *      For pages in filesystems where the blocksize is smaller than the
98  *      pagesize, we use the page->private field (long) to hold a bitmap
99  *      of uptodate regions within the page.
100  *
101  *      Each such region is "bytes per page / bits per long" bytes long.
102  *
103  *      NBPPR == number-of-bytes-per-page-region
104  *      BTOPR == bytes-to-page-region (rounded up)
105  *      BTOPRT == bytes-to-page-region-truncated (rounded down)
106  */
107 #if (BITS_PER_LONG == 32)
108 #define PRSHIFT         (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
109 #elif (BITS_PER_LONG == 64)
110 #define PRSHIFT         (PAGE_CACHE_SHIFT - 6)  /* (64 == 1<<6) */
111 #else
112 #error BITS_PER_LONG must be 32 or 64
113 #endif
114 #define NBPPR           (PAGE_CACHE_SIZE/BITS_PER_LONG)
115 #define BTOPR(b)        (((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
116 #define BTOPRT(b)       (((unsigned int)(b) >> PRSHIFT))
117
118 STATIC unsigned long
119 page_region_mask(
120         size_t          offset,
121         size_t          length)
122 {
123         unsigned long   mask;
124         int             first, final;
125
126         first = BTOPR(offset);
127         final = BTOPRT(offset + length - 1);
128         first = min(first, final);
129
130         mask = ~0UL;
131         mask <<= BITS_PER_LONG - (final - first);
132         mask >>= BITS_PER_LONG - (final);
133
134         ASSERT(offset + length <= PAGE_CACHE_SIZE);
135         ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);
136
137         return mask;
138 }
139
140 STATIC inline void
141 set_page_region(
142         struct page     *page,
143         size_t          offset,
144         size_t          length)
145 {
146         set_page_private(page,
147                 page_private(page) | page_region_mask(offset, length));
148         if (page_private(page) == ~0UL)
149                 SetPageUptodate(page);
150 }
151
152 STATIC inline int
153 test_page_region(
154         struct page     *page,
155         size_t          offset,
156         size_t          length)
157 {
158         unsigned long   mask = page_region_mask(offset, length);
159
160         return (mask && (page_private(page) & mask) == mask);
161 }
162
163 /*
164  *      Mapping of multi-page buffers into contiguous virtual space
165  */
166
167 typedef struct a_list {
168         void            *vm_addr;
169         struct a_list   *next;
170 } a_list_t;
171
172 STATIC a_list_t         *as_free_head;
173 STATIC int              as_list_len;
174 STATIC DEFINE_SPINLOCK(as_lock);
175
176 /*
177  *      Try to batch vunmaps because they are costly.
178  */
179 STATIC void
180 free_address(
181         void            *addr)
182 {
183         a_list_t        *aentry;
184
185         aentry = kmalloc(sizeof(a_list_t), GFP_ATOMIC & ~__GFP_HIGH);
186         if (likely(aentry)) {
187                 spin_lock(&as_lock);
188                 aentry->next = as_free_head;
189                 aentry->vm_addr = addr;
190                 as_free_head = aentry;
191                 as_list_len++;
192                 spin_unlock(&as_lock);
193         } else {
194                 vunmap(addr);
195         }
196 }
197
198 STATIC void
199 purge_addresses(void)
200 {
201         a_list_t        *aentry, *old;
202
203         if (as_free_head == NULL)
204                 return;
205
206         spin_lock(&as_lock);
207         aentry = as_free_head;
208         as_free_head = NULL;
209         as_list_len = 0;
210         spin_unlock(&as_lock);
211
212         while ((old = aentry) != NULL) {
213                 vunmap(aentry->vm_addr);
214                 aentry = aentry->next;
215                 kfree(old);
216         }
217 }
218
219 /*
220  *      Internal xfs_buf_t object manipulation
221  */
222
223 STATIC void
224 _xfs_buf_initialize(
225         xfs_buf_t               *bp,
226         xfs_buftarg_t           *target,
227         xfs_off_t               range_base,
228         size_t                  range_length,
229         xfs_buf_flags_t         flags)
230 {
231         /*
232          * We don't want certain flags to appear in b_flags.
233          */
234         flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
235
236         memset(bp, 0, sizeof(xfs_buf_t));
237         atomic_set(&bp->b_hold, 1);
238         init_MUTEX_LOCKED(&bp->b_iodonesema);
239         INIT_LIST_HEAD(&bp->b_list);
240         INIT_LIST_HEAD(&bp->b_hash_list);
241         init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
242         XB_SET_OWNER(bp);
243         bp->b_target = target;
244         bp->b_file_offset = range_base;
245         /*
246          * Set buffer_length and count_desired to the same value initially.
247          * I/O routines should use count_desired, which will be the same in
248          * most cases but may be reset (e.g. XFS recovery).
249          */
250         bp->b_buffer_length = bp->b_count_desired = range_length;
251         bp->b_flags = flags;
252         bp->b_bn = XFS_BUF_DADDR_NULL;
253         atomic_set(&bp->b_pin_count, 0);
254         init_waitqueue_head(&bp->b_waiters);
255
256         XFS_STATS_INC(xb_create);
257         XB_TRACE(bp, "initialize", target);
258 }
259
260 /*
261  *      Allocate a page array capable of holding a specified number
262  *      of pages, and point the page buf at it.
263  */
264 STATIC int
265 _xfs_buf_get_pages(
266         xfs_buf_t               *bp,
267         int                     page_count,
268         xfs_buf_flags_t         flags)
269 {
270         /* Make sure that we have a page list */
271         if (bp->b_pages == NULL) {
272                 bp->b_offset = xfs_buf_poff(bp->b_file_offset);
273                 bp->b_page_count = page_count;
274                 if (page_count <= XB_PAGES) {
275                         bp->b_pages = bp->b_page_array;
276                 } else {
277                         bp->b_pages = kmem_alloc(sizeof(struct page *) *
278                                         page_count, xb_to_km(flags));
279                         if (bp->b_pages == NULL)
280                                 return -ENOMEM;
281                 }
282                 memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
283         }
284         return 0;
285 }
286
287 /*
288  *      Frees b_pages if it was allocated.
289  */
290 STATIC void
291 _xfs_buf_free_pages(
292         xfs_buf_t       *bp)
293 {
294         if (bp->b_pages != bp->b_page_array) {
295                 kmem_free(bp->b_pages,
296                           bp->b_page_count * sizeof(struct page *));
297         }
298 }
299
300 /*
301  *      Releases the specified buffer.
302  *
303  *      The modification state of any associated pages is left unchanged.
304  *      The buffer most not be on any hash - use xfs_buf_rele instead for
305  *      hashed and refcounted buffers
306  */
307 void
308 xfs_buf_free(
309         xfs_buf_t               *bp)
310 {
311         XB_TRACE(bp, "free", 0);
312
313         ASSERT(list_empty(&bp->b_hash_list));
314
315         if (bp->b_flags & _XBF_PAGE_CACHE) {
316                 uint            i;
317
318                 if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
319                         free_address(bp->b_addr - bp->b_offset);
320
321                 for (i = 0; i < bp->b_page_count; i++)
322                         page_cache_release(bp->b_pages[i]);
323                 _xfs_buf_free_pages(bp);
324         } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
325                  /*
326                   * XXX(hch): bp->b_count_desired might be incorrect (see
327                   * xfs_buf_associate_memory for details), but fortunately
328                   * the Linux version of kmem_free ignores the len argument..
329                   */
330                 kmem_free(bp->b_addr, bp->b_count_desired);
331                 _xfs_buf_free_pages(bp);
332         }
333
334         xfs_buf_deallocate(bp);
335 }
336
337 /*
338  *      Finds all pages for buffer in question and builds it's page list.
339  */
340 STATIC int
341 _xfs_buf_lookup_pages(
342         xfs_buf_t               *bp,
343         uint                    flags)
344 {
345         struct address_space    *mapping = bp->b_target->bt_mapping;
346         size_t                  blocksize = bp->b_target->bt_bsize;
347         size_t                  size = bp->b_count_desired;
348         size_t                  nbytes, offset;
349         gfp_t                   gfp_mask = xb_to_gfp(flags);
350         unsigned short          page_count, i;
351         pgoff_t                 first;
352         xfs_off_t               end;
353         int                     error;
354
355         end = bp->b_file_offset + bp->b_buffer_length;
356         page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
357
358         error = _xfs_buf_get_pages(bp, page_count, flags);
359         if (unlikely(error))
360                 return error;
361         bp->b_flags |= _XBF_PAGE_CACHE;
362
363         offset = bp->b_offset;
364         first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
365
366         for (i = 0; i < bp->b_page_count; i++) {
367                 struct page     *page;
368                 uint            retries = 0;
369
370               retry:
371                 page = find_or_create_page(mapping, first + i, gfp_mask);
372                 if (unlikely(page == NULL)) {
373                         if (flags & XBF_READ_AHEAD) {
374                                 bp->b_page_count = i;
375                                 for (i = 0; i < bp->b_page_count; i++)
376                                         unlock_page(bp->b_pages[i]);
377                                 return -ENOMEM;
378                         }
379
380                         /*
381                          * This could deadlock.
382                          *
383                          * But until all the XFS lowlevel code is revamped to
384                          * handle buffer allocation failures we can't do much.
385                          */
386                         if (!(++retries % 100))
387                                 printk(KERN_ERR
388                                         "XFS: possible memory allocation "
389                                         "deadlock in %s (mode:0x%x)\n",
390                                         __FUNCTION__, gfp_mask);
391
392                         XFS_STATS_INC(xb_page_retries);
393                         xfsbufd_wakeup(0, gfp_mask);
394                         blk_congestion_wait(WRITE, HZ/50);
395                         goto retry;
396                 }
397
398                 XFS_STATS_INC(xb_page_found);
399
400                 nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
401                 size -= nbytes;
402
403                 if (!PageUptodate(page)) {
404                         page_count--;
405                         if (blocksize >= PAGE_CACHE_SIZE) {
406                                 if (flags & XBF_READ)
407                                         bp->b_locked = 1;
408                         } else if (!PagePrivate(page)) {
409                                 if (test_page_region(page, offset, nbytes))
410                                         page_count++;
411                         }
412                 }
413
414                 bp->b_pages[i] = page;
415                 offset = 0;
416         }
417
418         if (!bp->b_locked) {
419                 for (i = 0; i < bp->b_page_count; i++)
420                         unlock_page(bp->b_pages[i]);
421         }
422
423         if (page_count == bp->b_page_count)
424                 bp->b_flags |= XBF_DONE;
425
426         XB_TRACE(bp, "lookup_pages", (long)page_count);
427         return error;
428 }
429
430 /*
431  *      Map buffer into kernel address-space if nessecary.
432  */
433 STATIC int
434 _xfs_buf_map_pages(
435         xfs_buf_t               *bp,
436         uint                    flags)
437 {
438         /* A single page buffer is always mappable */
439         if (bp->b_page_count == 1) {
440                 bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
441                 bp->b_flags |= XBF_MAPPED;
442         } else if (flags & XBF_MAPPED) {
443                 if (as_list_len > 64)
444                         purge_addresses();
445                 bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
446                                         VM_MAP, PAGE_KERNEL);
447                 if (unlikely(bp->b_addr == NULL))
448                         return -ENOMEM;
449                 bp->b_addr += bp->b_offset;
450                 bp->b_flags |= XBF_MAPPED;
451         }
452
453         return 0;
454 }
455
456 /*
457  *      Finding and Reading Buffers
458  */
459
460 /*
461  *      Look up, and creates if absent, a lockable buffer for
462  *      a given range of an inode.  The buffer is returned
463  *      locked.  If other overlapping buffers exist, they are
464  *      released before the new buffer is created and locked,
465  *      which may imply that this call will block until those buffers
466  *      are unlocked.  No I/O is implied by this call.
467  */
468 xfs_buf_t *
469 _xfs_buf_find(
470         xfs_buftarg_t           *btp,   /* block device target          */
471         xfs_off_t               ioff,   /* starting offset of range     */
472         size_t                  isize,  /* length of range              */
473         xfs_buf_flags_t         flags,
474         xfs_buf_t               *new_bp)
475 {
476         xfs_off_t               range_base;
477         size_t                  range_length;
478         xfs_bufhash_t           *hash;
479         xfs_buf_t               *bp, *n;
480
481         range_base = (ioff << BBSHIFT);
482         range_length = (isize << BBSHIFT);
483
484         /* Check for IOs smaller than the sector size / not sector aligned */
485         ASSERT(!(range_length < (1 << btp->bt_sshift)));
486         ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
487
488         hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
489
490         spin_lock(&hash->bh_lock);
491
492         list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
493                 ASSERT(btp == bp->b_target);
494                 if (bp->b_file_offset == range_base &&
495                     bp->b_buffer_length == range_length) {
496                         /*
497                          * If we look at something, bring it to the
498                          * front of the list for next time.
499                          */
500                         atomic_inc(&bp->b_hold);
501                         list_move(&bp->b_hash_list, &hash->bh_list);
502                         goto found;
503                 }
504         }
505
506         /* No match found */
507         if (new_bp) {
508                 _xfs_buf_initialize(new_bp, btp, range_base,
509                                 range_length, flags);
510                 new_bp->b_hash = hash;
511                 list_add(&new_bp->b_hash_list, &hash->bh_list);
512         } else {
513                 XFS_STATS_INC(xb_miss_locked);
514         }
515
516         spin_unlock(&hash->bh_lock);
517         return new_bp;
518
519 found:
520         spin_unlock(&hash->bh_lock);
521
522         /* Attempt to get the semaphore without sleeping,
523          * if this does not work then we need to drop the
524          * spinlock and do a hard attempt on the semaphore.
525          */
526         if (down_trylock(&bp->b_sema)) {
527                 if (!(flags & XBF_TRYLOCK)) {
528                         /* wait for buffer ownership */
529                         XB_TRACE(bp, "get_lock", 0);
530                         xfs_buf_lock(bp);
531                         XFS_STATS_INC(xb_get_locked_waited);
532                 } else {
533                         /* We asked for a trylock and failed, no need
534                          * to look at file offset and length here, we
535                          * know that this buffer at least overlaps our
536                          * buffer and is locked, therefore our buffer
537                          * either does not exist, or is this buffer.
538                          */
539                         xfs_buf_rele(bp);
540                         XFS_STATS_INC(xb_busy_locked);
541                         return NULL;
542                 }
543         } else {
544                 /* trylock worked */
545                 XB_SET_OWNER(bp);
546         }
547
548         if (bp->b_flags & XBF_STALE) {
549                 ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
550                 bp->b_flags &= XBF_MAPPED;
551         }
552         XB_TRACE(bp, "got_lock", 0);
553         XFS_STATS_INC(xb_get_locked);
554         return bp;
555 }
556
557 /*
558  *      Assembles a buffer covering the specified range.
559  *      Storage in memory for all portions of the buffer will be allocated,
560  *      although backing storage may not be.
561  */
562 xfs_buf_t *
563 xfs_buf_get_flags(
564         xfs_buftarg_t           *target,/* target for buffer            */
565         xfs_off_t               ioff,   /* starting offset of range     */
566         size_t                  isize,  /* length of range              */
567         xfs_buf_flags_t         flags)
568 {
569         xfs_buf_t               *bp, *new_bp;
570         int                     error = 0, i;
571
572         new_bp = xfs_buf_allocate(flags);
573         if (unlikely(!new_bp))
574                 return NULL;
575
576         bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
577         if (bp == new_bp) {
578                 error = _xfs_buf_lookup_pages(bp, flags);
579                 if (error)
580                         goto no_buffer;
581         } else {
582                 xfs_buf_deallocate(new_bp);
583                 if (unlikely(bp == NULL))
584                         return NULL;
585         }
586
587         for (i = 0; i < bp->b_page_count; i++)
588                 mark_page_accessed(bp->b_pages[i]);
589
590         if (!(bp->b_flags & XBF_MAPPED)) {
591                 error = _xfs_buf_map_pages(bp, flags);
592                 if (unlikely(error)) {
593                         printk(KERN_WARNING "%s: failed to map pages\n",
594                                         __FUNCTION__);
595                         goto no_buffer;
596                 }
597         }
598
599         XFS_STATS_INC(xb_get);
600
601         /*
602          * Always fill in the block number now, the mapped cases can do
603          * their own overlay of this later.
604          */
605         bp->b_bn = ioff;
606         bp->b_count_desired = bp->b_buffer_length;
607
608         XB_TRACE(bp, "get", (unsigned long)flags);
609         return bp;
610
611  no_buffer:
612         if (flags & (XBF_LOCK | XBF_TRYLOCK))
613                 xfs_buf_unlock(bp);
614         xfs_buf_rele(bp);
615         return NULL;
616 }
617
618 xfs_buf_t *
619 xfs_buf_read_flags(
620         xfs_buftarg_t           *target,
621         xfs_off_t               ioff,
622         size_t                  isize,
623         xfs_buf_flags_t         flags)
624 {
625         xfs_buf_t               *bp;
626
627         flags |= XBF_READ;
628
629         bp = xfs_buf_get_flags(target, ioff, isize, flags);
630         if (bp) {
631                 if (!XFS_BUF_ISDONE(bp)) {
632                         XB_TRACE(bp, "read", (unsigned long)flags);
633                         XFS_STATS_INC(xb_get_read);
634                         xfs_buf_iostart(bp, flags);
635                 } else if (flags & XBF_ASYNC) {
636                         XB_TRACE(bp, "read_async", (unsigned long)flags);
637                         /*
638                          * Read ahead call which is already satisfied,
639                          * drop the buffer
640                          */
641                         goto no_buffer;
642                 } else {
643                         XB_TRACE(bp, "read_done", (unsigned long)flags);
644                         /* We do not want read in the flags */
645                         bp->b_flags &= ~XBF_READ;
646                 }
647         }
648
649         return bp;
650
651  no_buffer:
652         if (flags & (XBF_LOCK | XBF_TRYLOCK))
653                 xfs_buf_unlock(bp);
654         xfs_buf_rele(bp);
655         return NULL;
656 }
657
658 /*
659  *      If we are not low on memory then do the readahead in a deadlock
660  *      safe manner.
661  */
662 void
663 xfs_buf_readahead(
664         xfs_buftarg_t           *target,
665         xfs_off_t               ioff,
666         size_t                  isize,
667         xfs_buf_flags_t         flags)
668 {
669         struct backing_dev_info *bdi;
670
671         bdi = target->bt_mapping->backing_dev_info;
672         if (bdi_read_congested(bdi))
673                 return;
674
675         flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
676         xfs_buf_read_flags(target, ioff, isize, flags);
677 }
678
679 xfs_buf_t *
680 xfs_buf_get_empty(
681         size_t                  len,
682         xfs_buftarg_t           *target)
683 {
684         xfs_buf_t               *bp;
685
686         bp = xfs_buf_allocate(0);
687         if (bp)
688                 _xfs_buf_initialize(bp, target, 0, len, 0);
689         return bp;
690 }
691
692 static inline struct page *
693 mem_to_page(
694         void                    *addr)
695 {
696         if (((unsigned long)addr < VMALLOC_START) ||
697             ((unsigned long)addr >= VMALLOC_END)) {
698                 return virt_to_page(addr);
699         } else {
700                 return vmalloc_to_page(addr);
701         }
702 }
703
704 int
705 xfs_buf_associate_memory(
706         xfs_buf_t               *bp,
707         void                    *mem,
708         size_t                  len)
709 {
710         int                     rval;
711         int                     i = 0;
712         size_t                  ptr;
713         size_t                  end, end_cur;
714         off_t                   offset;
715         int                     page_count;
716
717         page_count = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT;
718         offset = (off_t) mem - ((off_t)mem & PAGE_CACHE_MASK);
719         if (offset && (len > PAGE_CACHE_SIZE))
720                 page_count++;
721
722         /* Free any previous set of page pointers */
723         if (bp->b_pages)
724                 _xfs_buf_free_pages(bp);
725
726         bp->b_pages = NULL;
727         bp->b_addr = mem;
728
729         rval = _xfs_buf_get_pages(bp, page_count, 0);
730         if (rval)
731                 return rval;
732
733         bp->b_offset = offset;
734         ptr = (size_t) mem & PAGE_CACHE_MASK;
735         end = PAGE_CACHE_ALIGN((size_t) mem + len);
736         end_cur = end;
737         /* set up first page */
738         bp->b_pages[0] = mem_to_page(mem);
739
740         ptr += PAGE_CACHE_SIZE;
741         bp->b_page_count = ++i;
742         while (ptr < end) {
743                 bp->b_pages[i] = mem_to_page((void *)ptr);
744                 bp->b_page_count = ++i;
745                 ptr += PAGE_CACHE_SIZE;
746         }
747         bp->b_locked = 0;
748
749         bp->b_count_desired = bp->b_buffer_length = len;
750         bp->b_flags |= XBF_MAPPED;
751
752         return 0;
753 }
754
755 xfs_buf_t *
756 xfs_buf_get_noaddr(
757         size_t                  len,
758         xfs_buftarg_t           *target)
759 {
760         size_t                  malloc_len = len;
761         xfs_buf_t               *bp;
762         void                    *data;
763         int                     error;
764
765         bp = xfs_buf_allocate(0);
766         if (unlikely(bp == NULL))
767                 goto fail;
768         _xfs_buf_initialize(bp, target, 0, len, 0);
769
770  try_again:
771         data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
772         if (unlikely(data == NULL))
773                 goto fail_free_buf;
774
775         /* check whether alignment matches.. */
776         if ((__psunsigned_t)data !=
777             ((__psunsigned_t)data & ~target->bt_smask)) {
778                 /* .. else double the size and try again */
779                 kmem_free(data, malloc_len);
780                 malloc_len <<= 1;
781                 goto try_again;
782         }
783
784         error = xfs_buf_associate_memory(bp, data, len);
785         if (error)
786                 goto fail_free_mem;
787         bp->b_flags |= _XBF_KMEM_ALLOC;
788
789         xfs_buf_unlock(bp);
790
791         XB_TRACE(bp, "no_daddr", data);
792         return bp;
793  fail_free_mem:
794         kmem_free(data, malloc_len);
795  fail_free_buf:
796         xfs_buf_free(bp);
797  fail:
798         return NULL;
799 }
800
801 /*
802  *      Increment reference count on buffer, to hold the buffer concurrently
803  *      with another thread which may release (free) the buffer asynchronously.
804  *      Must hold the buffer already to call this function.
805  */
806 void
807 xfs_buf_hold(
808         xfs_buf_t               *bp)
809 {
810         atomic_inc(&bp->b_hold);
811         XB_TRACE(bp, "hold", 0);
812 }
813
814 /*
815  *      Releases a hold on the specified buffer.  If the
816  *      the hold count is 1, calls xfs_buf_free.
817  */
818 void
819 xfs_buf_rele(
820         xfs_buf_t               *bp)
821 {
822         xfs_bufhash_t           *hash = bp->b_hash;
823
824         XB_TRACE(bp, "rele", bp->b_relse);
825
826         if (unlikely(!hash)) {
827                 ASSERT(!bp->b_relse);
828                 if (atomic_dec_and_test(&bp->b_hold))
829                         xfs_buf_free(bp);
830                 return;
831         }
832
833         if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
834                 if (bp->b_relse) {
835                         atomic_inc(&bp->b_hold);
836                         spin_unlock(&hash->bh_lock);
837                         (*(bp->b_relse)) (bp);
838                 } else if (bp->b_flags & XBF_FS_MANAGED) {
839                         spin_unlock(&hash->bh_lock);
840                 } else {
841                         ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
842                         list_del_init(&bp->b_hash_list);
843                         spin_unlock(&hash->bh_lock);
844                         xfs_buf_free(bp);
845                 }
846         } else {
847                 /*
848                  * Catch reference count leaks
849                  */
850                 ASSERT(atomic_read(&bp->b_hold) >= 0);
851         }
852 }
853
854
855 /*
856  *      Mutual exclusion on buffers.  Locking model:
857  *
858  *      Buffers associated with inodes for which buffer locking
859  *      is not enabled are not protected by semaphores, and are
860  *      assumed to be exclusively owned by the caller.  There is a
861  *      spinlock in the buffer, used by the caller when concurrent
862  *      access is possible.
863  */
864
865 /*
866  *      Locks a buffer object, if it is not already locked.
867  *      Note that this in no way locks the underlying pages, so it is only
868  *      useful for synchronizing concurrent use of buffer objects, not for
869  *      synchronizing independent access to the underlying pages.
870  */
871 int
872 xfs_buf_cond_lock(
873         xfs_buf_t               *bp)
874 {
875         int                     locked;
876
877         locked = down_trylock(&bp->b_sema) == 0;
878         if (locked) {
879                 XB_SET_OWNER(bp);
880         }
881         XB_TRACE(bp, "cond_lock", (long)locked);
882         return locked ? 0 : -EBUSY;
883 }
884
885 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
886 int
887 xfs_buf_lock_value(
888         xfs_buf_t               *bp)
889 {
890         return atomic_read(&bp->b_sema.count);
891 }
892 #endif
893
894 /*
895  *      Locks a buffer object.
896  *      Note that this in no way locks the underlying pages, so it is only
897  *      useful for synchronizing concurrent use of buffer objects, not for
898  *      synchronizing independent access to the underlying pages.
899  */
900 void
901 xfs_buf_lock(
902         xfs_buf_t               *bp)
903 {
904         XB_TRACE(bp, "lock", 0);
905         if (atomic_read(&bp->b_io_remaining))
906                 blk_run_address_space(bp->b_target->bt_mapping);
907         down(&bp->b_sema);
908         XB_SET_OWNER(bp);
909         XB_TRACE(bp, "locked", 0);
910 }
911
912 /*
913  *      Releases the lock on the buffer object.
914  *      If the buffer is marked delwri but is not queued, do so before we
915  *      unlock the buffer as we need to set flags correctly.  We also need to
916  *      take a reference for the delwri queue because the unlocker is going to
917  *      drop their's and they don't know we just queued it.
918  */
919 void
920 xfs_buf_unlock(
921         xfs_buf_t               *bp)
922 {
923         if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
924                 atomic_inc(&bp->b_hold);
925                 bp->b_flags |= XBF_ASYNC;
926                 xfs_buf_delwri_queue(bp, 0);
927         }
928
929         XB_CLEAR_OWNER(bp);
930         up(&bp->b_sema);
931         XB_TRACE(bp, "unlock", 0);
932 }
933
934
935 /*
936  *      Pinning Buffer Storage in Memory
937  *      Ensure that no attempt to force a buffer to disk will succeed.
938  */
939 void
940 xfs_buf_pin(
941         xfs_buf_t               *bp)
942 {
943         atomic_inc(&bp->b_pin_count);
944         XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
945 }
946
947 void
948 xfs_buf_unpin(
949         xfs_buf_t               *bp)
950 {
951         if (atomic_dec_and_test(&bp->b_pin_count))
952                 wake_up_all(&bp->b_waiters);
953         XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
954 }
955
956 int
957 xfs_buf_ispin(
958         xfs_buf_t               *bp)
959 {
960         return atomic_read(&bp->b_pin_count);
961 }
962
963 STATIC void
964 xfs_buf_wait_unpin(
965         xfs_buf_t               *bp)
966 {
967         DECLARE_WAITQUEUE       (wait, current);
968
969         if (atomic_read(&bp->b_pin_count) == 0)
970                 return;
971
972         add_wait_queue(&bp->b_waiters, &wait);
973         for (;;) {
974                 set_current_state(TASK_UNINTERRUPTIBLE);
975                 if (atomic_read(&bp->b_pin_count) == 0)
976                         break;
977                 if (atomic_read(&bp->b_io_remaining))
978                         blk_run_address_space(bp->b_target->bt_mapping);
979                 schedule();
980         }
981         remove_wait_queue(&bp->b_waiters, &wait);
982         set_current_state(TASK_RUNNING);
983 }
984
985 /*
986  *      Buffer Utility Routines
987  */
988
989 STATIC void
990 xfs_buf_iodone_work(
991         void                    *v)
992 {
993         xfs_buf_t               *bp = (xfs_buf_t *)v;
994
995         if (bp->b_iodone)
996                 (*(bp->b_iodone))(bp);
997         else if (bp->b_flags & XBF_ASYNC)
998                 xfs_buf_relse(bp);
999 }
1000
1001 void
1002 xfs_buf_ioend(
1003         xfs_buf_t               *bp,
1004         int                     schedule)
1005 {
1006         bp->b_flags &= ~(XBF_READ | XBF_WRITE);
1007         if (bp->b_error == 0)
1008                 bp->b_flags |= XBF_DONE;
1009
1010         XB_TRACE(bp, "iodone", bp->b_iodone);
1011
1012         if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
1013                 if (schedule) {
1014                         INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
1015                         queue_work(xfslogd_workqueue, &bp->b_iodone_work);
1016                 } else {
1017                         xfs_buf_iodone_work(bp);
1018                 }
1019         } else {
1020                 up(&bp->b_iodonesema);
1021         }
1022 }
1023
1024 void
1025 xfs_buf_ioerror(
1026         xfs_buf_t               *bp,
1027         int                     error)
1028 {
1029         ASSERT(error >= 0 && error <= 0xffff);
1030         bp->b_error = (unsigned short)error;
1031         XB_TRACE(bp, "ioerror", (unsigned long)error);
1032 }
1033
1034 /*
1035  *      Initiate I/O on a buffer, based on the flags supplied.
1036  *      The b_iodone routine in the buffer supplied will only be called
1037  *      when all of the subsidiary I/O requests, if any, have been completed.
1038  */
1039 int
1040 xfs_buf_iostart(
1041         xfs_buf_t               *bp,
1042         xfs_buf_flags_t         flags)
1043 {
1044         int                     status = 0;
1045
1046         XB_TRACE(bp, "iostart", (unsigned long)flags);
1047
1048         if (flags & XBF_DELWRI) {
1049                 bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
1050                 bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
1051                 xfs_buf_delwri_queue(bp, 1);
1052                 return status;
1053         }
1054
1055         bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
1056                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1057         bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
1058                         XBF_READ_AHEAD | _XBF_RUN_QUEUES);
1059
1060         BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
1061
1062         /* For writes allow an alternate strategy routine to precede
1063          * the actual I/O request (which may not be issued at all in
1064          * a shutdown situation, for example).
1065          */
1066         status = (flags & XBF_WRITE) ?
1067                 xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
1068
1069         /* Wait for I/O if we are not an async request.
1070          * Note: async I/O request completion will release the buffer,
1071          * and that can already be done by this point.  So using the
1072          * buffer pointer from here on, after async I/O, is invalid.
1073          */
1074         if (!status && !(flags & XBF_ASYNC))
1075                 status = xfs_buf_iowait(bp);
1076
1077         return status;
1078 }
1079
1080 STATIC __inline__ int
1081 _xfs_buf_iolocked(
1082         xfs_buf_t               *bp)
1083 {
1084         ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
1085         if (bp->b_flags & XBF_READ)
1086                 return bp->b_locked;
1087         return 0;
1088 }
1089
1090 STATIC __inline__ void
1091 _xfs_buf_ioend(
1092         xfs_buf_t               *bp,
1093         int                     schedule)
1094 {
1095         if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1096                 bp->b_locked = 0;
1097                 xfs_buf_ioend(bp, schedule);
1098         }
1099 }
1100
1101 STATIC int
1102 xfs_buf_bio_end_io(
1103         struct bio              *bio,
1104         unsigned int            bytes_done,
1105         int                     error)
1106 {
1107         xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
1108         unsigned int            blocksize = bp->b_target->bt_bsize;
1109         struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1110
1111         if (bio->bi_size)
1112                 return 1;
1113
1114         if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1115                 bp->b_error = EIO;
1116
1117         do {
1118                 struct page     *page = bvec->bv_page;
1119
1120                 if (unlikely(bp->b_error)) {
1121                         if (bp->b_flags & XBF_READ)
1122                                 ClearPageUptodate(page);
1123                         SetPageError(page);
1124                 } else if (blocksize >= PAGE_CACHE_SIZE) {
1125                         SetPageUptodate(page);
1126                 } else if (!PagePrivate(page) &&
1127                                 (bp->b_flags & _XBF_PAGE_CACHE)) {
1128                         set_page_region(page, bvec->bv_offset, bvec->bv_len);
1129                 }
1130
1131                 if (--bvec >= bio->bi_io_vec)
1132                         prefetchw(&bvec->bv_page->flags);
1133
1134                 if (_xfs_buf_iolocked(bp)) {
1135                         unlock_page(page);
1136                 }
1137         } while (bvec >= bio->bi_io_vec);
1138
1139         _xfs_buf_ioend(bp, 1);
1140         bio_put(bio);
1141         return 0;
1142 }
1143
1144 STATIC void
1145 _xfs_buf_ioapply(
1146         xfs_buf_t               *bp)
1147 {
1148         int                     i, rw, map_i, total_nr_pages, nr_pages;
1149         struct bio              *bio;
1150         int                     offset = bp->b_offset;
1151         int                     size = bp->b_count_desired;
1152         sector_t                sector = bp->b_bn;
1153         unsigned int            blocksize = bp->b_target->bt_bsize;
1154         int                     locking = _xfs_buf_iolocked(bp);
1155
1156         total_nr_pages = bp->b_page_count;
1157         map_i = 0;
1158
1159         if (bp->b_flags & _XBF_RUN_QUEUES) {
1160                 bp->b_flags &= ~_XBF_RUN_QUEUES;
1161                 rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC;
1162         } else {
1163                 rw = (bp->b_flags & XBF_READ) ? READ : WRITE;
1164         }
1165
1166         if (bp->b_flags & XBF_ORDERED) {
1167                 ASSERT(!(bp->b_flags & XBF_READ));
1168                 rw = WRITE_BARRIER;
1169         }
1170
1171         /* Special code path for reading a sub page size buffer in --
1172          * we populate up the whole page, and hence the other metadata
1173          * in the same page.  This optimization is only valid when the
1174          * filesystem block size is not smaller than the page size.
1175          */
1176         if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1177             (bp->b_flags & XBF_READ) && locking &&
1178             (blocksize >= PAGE_CACHE_SIZE)) {
1179                 bio = bio_alloc(GFP_NOIO, 1);
1180
1181                 bio->bi_bdev = bp->b_target->bt_bdev;
1182                 bio->bi_sector = sector - (offset >> BBSHIFT);
1183                 bio->bi_end_io = xfs_buf_bio_end_io;
1184                 bio->bi_private = bp;
1185
1186                 bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
1187                 size = 0;
1188
1189                 atomic_inc(&bp->b_io_remaining);
1190
1191                 goto submit_io;
1192         }
1193
1194         /* Lock down the pages which we need to for the request */
1195         if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
1196                 for (i = 0; size; i++) {
1197                         int             nbytes = PAGE_CACHE_SIZE - offset;
1198                         struct page     *page = bp->b_pages[i];
1199
1200                         if (nbytes > size)
1201                                 nbytes = size;
1202
1203                         lock_page(page);
1204
1205                         size -= nbytes;
1206                         offset = 0;
1207                 }
1208                 offset = bp->b_offset;
1209                 size = bp->b_count_desired;
1210         }
1211
1212 next_chunk:
1213         atomic_inc(&bp->b_io_remaining);
1214         nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
1215         if (nr_pages > total_nr_pages)
1216                 nr_pages = total_nr_pages;
1217
1218         bio = bio_alloc(GFP_NOIO, nr_pages);
1219         bio->bi_bdev = bp->b_target->bt_bdev;
1220         bio->bi_sector = sector;
1221         bio->bi_end_io = xfs_buf_bio_end_io;
1222         bio->bi_private = bp;
1223
1224         for (; size && nr_pages; nr_pages--, map_i++) {
1225                 int     rbytes, nbytes = PAGE_CACHE_SIZE - offset;
1226
1227                 if (nbytes > size)
1228                         nbytes = size;
1229
1230                 rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
1231                 if (rbytes < nbytes)
1232                         break;
1233
1234                 offset = 0;
1235                 sector += nbytes >> BBSHIFT;
1236                 size -= nbytes;
1237                 total_nr_pages--;
1238         }
1239
1240 submit_io:
1241         if (likely(bio->bi_size)) {
1242                 submit_bio(rw, bio);
1243                 if (size)
1244                         goto next_chunk;
1245         } else {
1246                 bio_put(bio);
1247                 xfs_buf_ioerror(bp, EIO);
1248         }
1249 }
1250
1251 int
1252 xfs_buf_iorequest(
1253         xfs_buf_t               *bp)
1254 {
1255         XB_TRACE(bp, "iorequest", 0);
1256
1257         if (bp->b_flags & XBF_DELWRI) {
1258                 xfs_buf_delwri_queue(bp, 1);
1259                 return 0;
1260         }
1261
1262         if (bp->b_flags & XBF_WRITE) {
1263                 xfs_buf_wait_unpin(bp);
1264         }
1265
1266         xfs_buf_hold(bp);
1267
1268         /* Set the count to 1 initially, this will stop an I/O
1269          * completion callout which happens before we have started
1270          * all the I/O from calling xfs_buf_ioend too early.
1271          */
1272         atomic_set(&bp->b_io_remaining, 1);
1273         _xfs_buf_ioapply(bp);
1274         _xfs_buf_ioend(bp, 0);
1275
1276         xfs_buf_rele(bp);
1277         return 0;
1278 }
1279
1280 /*
1281  *      Waits for I/O to complete on the buffer supplied.
1282  *      It returns immediately if no I/O is pending.
1283  *      It returns the I/O error code, if any, or 0 if there was no error.
1284  */
1285 int
1286 xfs_buf_iowait(
1287         xfs_buf_t               *bp)
1288 {
1289         XB_TRACE(bp, "iowait", 0);
1290         if (atomic_read(&bp->b_io_remaining))
1291                 blk_run_address_space(bp->b_target->bt_mapping);
1292         down(&bp->b_iodonesema);
1293         XB_TRACE(bp, "iowaited", (long)bp->b_error);
1294         return bp->b_error;
1295 }
1296
1297 xfs_caddr_t
1298 xfs_buf_offset(
1299         xfs_buf_t               *bp,
1300         size_t                  offset)
1301 {
1302         struct page             *page;
1303
1304         if (bp->b_flags & XBF_MAPPED)
1305                 return XFS_BUF_PTR(bp) + offset;
1306
1307         offset += bp->b_offset;
1308         page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
1309         return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
1310 }
1311
1312 /*
1313  *      Move data into or out of a buffer.
1314  */
1315 void
1316 xfs_buf_iomove(
1317         xfs_buf_t               *bp,    /* buffer to process            */
1318         size_t                  boff,   /* starting buffer offset       */
1319         size_t                  bsize,  /* length to copy               */
1320         caddr_t                 data,   /* data address                 */
1321         xfs_buf_rw_t            mode)   /* read/write/zero flag         */
1322 {
1323         size_t                  bend, cpoff, csize;
1324         struct page             *page;
1325
1326         bend = boff + bsize;
1327         while (boff < bend) {
1328                 page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
1329                 cpoff = xfs_buf_poff(boff + bp->b_offset);
1330                 csize = min_t(size_t,
1331                               PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
1332
1333                 ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
1334
1335                 switch (mode) {
1336                 case XBRW_ZERO:
1337                         memset(page_address(page) + cpoff, 0, csize);
1338                         break;
1339                 case XBRW_READ:
1340                         memcpy(data, page_address(page) + cpoff, csize);
1341                         break;
1342                 case XBRW_WRITE:
1343                         memcpy(page_address(page) + cpoff, data, csize);
1344                 }
1345
1346                 boff += csize;
1347                 data += csize;
1348         }
1349 }
1350
1351 /*
1352  *      Handling of buffer targets (buftargs).
1353  */
1354
1355 /*
1356  *      Wait for any bufs with callbacks that have been submitted but
1357  *      have not yet returned... walk the hash list for the target.
1358  */
1359 void
1360 xfs_wait_buftarg(
1361         xfs_buftarg_t   *btp)
1362 {
1363         xfs_buf_t       *bp, *n;
1364         xfs_bufhash_t   *hash;
1365         uint            i;
1366
1367         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1368                 hash = &btp->bt_hash[i];
1369 again:
1370                 spin_lock(&hash->bh_lock);
1371                 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1372                         ASSERT(btp == bp->b_target);
1373                         if (!(bp->b_flags & XBF_FS_MANAGED)) {
1374                                 spin_unlock(&hash->bh_lock);
1375                                 /*
1376                                  * Catch superblock reference count leaks
1377                                  * immediately
1378                                  */
1379                                 BUG_ON(bp->b_bn == 0);
1380                                 delay(100);
1381                                 goto again;
1382                         }
1383                 }
1384                 spin_unlock(&hash->bh_lock);
1385         }
1386 }
1387
1388 /*
1389  *      Allocate buffer hash table for a given target.
1390  *      For devices containing metadata (i.e. not the log/realtime devices)
1391  *      we need to allocate a much larger hash table.
1392  */
1393 STATIC void
1394 xfs_alloc_bufhash(
1395         xfs_buftarg_t           *btp,
1396         int                     external)
1397 {
1398         unsigned int            i;
1399
1400         btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
1401         btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
1402         btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1403                                         sizeof(xfs_bufhash_t), KM_SLEEP);
1404         for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1405                 spin_lock_init(&btp->bt_hash[i].bh_lock);
1406                 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
1407         }
1408 }
1409
1410 STATIC void
1411 xfs_free_bufhash(
1412         xfs_buftarg_t           *btp)
1413 {
1414         kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
1415         btp->bt_hash = NULL;
1416 }
1417
1418 /*
1419  *      buftarg list for delwrite queue processing
1420  */
1421 STATIC LIST_HEAD(xfs_buftarg_list);
1422 STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
1423
1424 STATIC void
1425 xfs_register_buftarg(
1426         xfs_buftarg_t           *btp)
1427 {
1428         spin_lock(&xfs_buftarg_lock);
1429         list_add(&btp->bt_list, &xfs_buftarg_list);
1430         spin_unlock(&xfs_buftarg_lock);
1431 }
1432
1433 STATIC void
1434 xfs_unregister_buftarg(
1435         xfs_buftarg_t           *btp)
1436 {
1437         spin_lock(&xfs_buftarg_lock);
1438         list_del(&btp->bt_list);
1439         spin_unlock(&xfs_buftarg_lock);
1440 }
1441
1442 void
1443 xfs_free_buftarg(
1444         xfs_buftarg_t           *btp,
1445         int                     external)
1446 {
1447         xfs_flush_buftarg(btp, 1);
1448         if (external)
1449                 xfs_blkdev_put(btp->bt_bdev);
1450         xfs_free_bufhash(btp);
1451         iput(btp->bt_mapping->host);
1452
1453         /* Unregister the buftarg first so that we don't get a
1454          * wakeup finding a non-existent task
1455          */
1456         xfs_unregister_buftarg(btp);
1457         kthread_stop(btp->bt_task);
1458
1459         kmem_free(btp, sizeof(*btp));
1460 }
1461
1462 STATIC int
1463 xfs_setsize_buftarg_flags(
1464         xfs_buftarg_t           *btp,
1465         unsigned int            blocksize,
1466         unsigned int            sectorsize,
1467         int                     verbose)
1468 {
1469         btp->bt_bsize = blocksize;
1470         btp->bt_sshift = ffs(sectorsize) - 1;
1471         btp->bt_smask = sectorsize - 1;
1472
1473         if (set_blocksize(btp->bt_bdev, sectorsize)) {
1474                 printk(KERN_WARNING
1475                         "XFS: Cannot set_blocksize to %u on device %s\n",
1476                         sectorsize, XFS_BUFTARG_NAME(btp));
1477                 return EINVAL;
1478         }
1479
1480         if (verbose &&
1481             (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
1482                 printk(KERN_WARNING
1483                         "XFS: %u byte sectors in use on device %s.  "
1484                         "This is suboptimal; %u or greater is ideal.\n",
1485                         sectorsize, XFS_BUFTARG_NAME(btp),
1486                         (unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
1487         }
1488
1489         return 0;
1490 }
1491
1492 /*
1493  *      When allocating the initial buffer target we have not yet
1494  *      read in the superblock, so don't know what sized sectors
1495  *      are being used is at this early stage.  Play safe.
1496  */
1497 STATIC int
1498 xfs_setsize_buftarg_early(
1499         xfs_buftarg_t           *btp,
1500         struct block_device     *bdev)
1501 {
1502         return xfs_setsize_buftarg_flags(btp,
1503                         PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0);
1504 }
1505
1506 int
1507 xfs_setsize_buftarg(
1508         xfs_buftarg_t           *btp,
1509         unsigned int            blocksize,
1510         unsigned int            sectorsize)
1511 {
1512         return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1513 }
1514
1515 STATIC int
1516 xfs_mapping_buftarg(
1517         xfs_buftarg_t           *btp,
1518         struct block_device     *bdev)
1519 {
1520         struct backing_dev_info *bdi;
1521         struct inode            *inode;
1522         struct address_space    *mapping;
1523         static struct address_space_operations mapping_aops = {
1524                 .sync_page = block_sync_page,
1525                 .migratepage = fail_migrate_page,
1526         };
1527
1528         inode = new_inode(bdev->bd_inode->i_sb);
1529         if (!inode) {
1530                 printk(KERN_WARNING
1531                         "XFS: Cannot allocate mapping inode for device %s\n",
1532                         XFS_BUFTARG_NAME(btp));
1533                 return ENOMEM;
1534         }
1535         inode->i_mode = S_IFBLK;
1536         inode->i_bdev = bdev;
1537         inode->i_rdev = bdev->bd_dev;
1538         bdi = blk_get_backing_dev_info(bdev);
1539         if (!bdi)
1540                 bdi = &default_backing_dev_info;
1541         mapping = &inode->i_data;
1542         mapping->a_ops = &mapping_aops;
1543         mapping->backing_dev_info = bdi;
1544         mapping_set_gfp_mask(mapping, GFP_NOFS);
1545         btp->bt_mapping = mapping;
1546         return 0;
1547 }
1548
1549 STATIC int
1550 xfs_alloc_delwrite_queue(
1551         xfs_buftarg_t           *btp)
1552 {
1553         int     error = 0;
1554
1555         INIT_LIST_HEAD(&btp->bt_list);
1556         INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1557         spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
1558         btp->bt_flags = 0;
1559         btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
1560         if (IS_ERR(btp->bt_task)) {
1561                 error = PTR_ERR(btp->bt_task);
1562                 goto out_error;
1563         }
1564         xfs_register_buftarg(btp);
1565 out_error:
1566         return error;
1567 }
1568
1569 xfs_buftarg_t *
1570 xfs_alloc_buftarg(
1571         struct block_device     *bdev,
1572         int                     external)
1573 {
1574         xfs_buftarg_t           *btp;
1575
1576         btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1577
1578         btp->bt_dev =  bdev->bd_dev;
1579         btp->bt_bdev = bdev;
1580         if (xfs_setsize_buftarg_early(btp, bdev))
1581                 goto error;
1582         if (xfs_mapping_buftarg(btp, bdev))
1583                 goto error;
1584         if (xfs_alloc_delwrite_queue(btp))
1585                 goto error;
1586         xfs_alloc_bufhash(btp, external);
1587         return btp;
1588
1589 error:
1590         kmem_free(btp, sizeof(*btp));
1591         return NULL;
1592 }
1593
1594
1595 /*
1596  *      Delayed write buffer handling
1597  */
1598 STATIC void
1599 xfs_buf_delwri_queue(
1600         xfs_buf_t               *bp,
1601         int                     unlock)
1602 {
1603         struct list_head        *dwq = &bp->b_target->bt_delwrite_queue;
1604         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1605
1606         XB_TRACE(bp, "delwri_q", (long)unlock);
1607         ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
1608
1609         spin_lock(dwlk);
1610         /* If already in the queue, dequeue and place at tail */
1611         if (!list_empty(&bp->b_list)) {
1612                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1613                 if (unlock)
1614                         atomic_dec(&bp->b_hold);
1615                 list_del(&bp->b_list);
1616         }
1617
1618         bp->b_flags |= _XBF_DELWRI_Q;
1619         list_add_tail(&bp->b_list, dwq);
1620         bp->b_queuetime = jiffies;
1621         spin_unlock(dwlk);
1622
1623         if (unlock)
1624                 xfs_buf_unlock(bp);
1625 }
1626
1627 void
1628 xfs_buf_delwri_dequeue(
1629         xfs_buf_t               *bp)
1630 {
1631         spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
1632         int                     dequeued = 0;
1633
1634         spin_lock(dwlk);
1635         if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1636                 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1637                 list_del_init(&bp->b_list);
1638                 dequeued = 1;
1639         }
1640         bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1641         spin_unlock(dwlk);
1642
1643         if (dequeued)
1644                 xfs_buf_rele(bp);
1645
1646         XB_TRACE(bp, "delwri_dq", (long)dequeued);
1647 }
1648
1649 STATIC void
1650 xfs_buf_runall_queues(
1651         struct workqueue_struct *queue)
1652 {
1653         flush_workqueue(queue);
1654 }
1655
1656 STATIC int
1657 xfsbufd_wakeup(
1658         int                     priority,
1659         gfp_t                   mask)
1660 {
1661         xfs_buftarg_t           *btp;
1662
1663         spin_lock(&xfs_buftarg_lock);
1664         list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1665                 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1666                         continue;
1667                 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1668                 wake_up_process(btp->bt_task);
1669         }
1670         spin_unlock(&xfs_buftarg_lock);
1671         return 0;
1672 }
1673
1674 STATIC int
1675 xfsbufd(
1676         void                    *data)
1677 {
1678         struct list_head        tmp;
1679         unsigned long           age;
1680         xfs_buftarg_t           *target = (xfs_buftarg_t *)data;
1681         xfs_buf_t               *bp, *n;
1682         struct list_head        *dwq = &target->bt_delwrite_queue;
1683         spinlock_t              *dwlk = &target->bt_delwrite_lock;
1684
1685         current->flags |= PF_MEMALLOC;
1686
1687         INIT_LIST_HEAD(&tmp);
1688         do {
1689                 if (unlikely(freezing(current))) {
1690                         set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1691                         refrigerator();
1692                 } else {
1693                         clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1694                 }
1695
1696                 schedule_timeout_interruptible(
1697                         xfs_buf_timer_centisecs * msecs_to_jiffies(10));
1698
1699                 age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
1700                 spin_lock(dwlk);
1701                 list_for_each_entry_safe(bp, n, dwq, b_list) {
1702                         XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
1703                         ASSERT(bp->b_flags & XBF_DELWRI);
1704
1705                         if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1706                                 if (!test_bit(XBT_FORCE_FLUSH,
1707                                                 &target->bt_flags) &&
1708                                     time_before(jiffies,
1709                                                 bp->b_queuetime + age)) {
1710                                         xfs_buf_unlock(bp);
1711                                         break;
1712                                 }
1713
1714                                 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1715                                 bp->b_flags |= XBF_WRITE;
1716                                 list_move(&bp->b_list, &tmp);
1717                         }
1718                 }
1719                 spin_unlock(dwlk);
1720
1721                 while (!list_empty(&tmp)) {
1722                         bp = list_entry(tmp.next, xfs_buf_t, b_list);
1723                         ASSERT(target == bp->b_target);
1724
1725                         list_del_init(&bp->b_list);
1726                         xfs_buf_iostrategy(bp);
1727
1728                         blk_run_address_space(target->bt_mapping);
1729                 }
1730
1731                 if (as_list_len > 0)
1732                         purge_addresses();
1733
1734                 clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1735         } while (!kthread_should_stop());
1736
1737         return 0;
1738 }
1739
1740 /*
1741  *      Go through all incore buffers, and release buffers if they belong to
1742  *      the given device. This is used in filesystem error handling to
1743  *      preserve the consistency of its metadata.
1744  */
1745 int
1746 xfs_flush_buftarg(
1747         xfs_buftarg_t           *target,
1748         int                     wait)
1749 {
1750         struct list_head        tmp;
1751         xfs_buf_t               *bp, *n;
1752         int                     pincount = 0;
1753         struct list_head        *dwq = &target->bt_delwrite_queue;
1754         spinlock_t              *dwlk = &target->bt_delwrite_lock;
1755
1756         xfs_buf_runall_queues(xfsdatad_workqueue);
1757         xfs_buf_runall_queues(xfslogd_workqueue);
1758
1759         INIT_LIST_HEAD(&tmp);
1760         spin_lock(dwlk);
1761         list_for_each_entry_safe(bp, n, dwq, b_list) {
1762                 ASSERT(bp->b_target == target);
1763                 ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
1764                 XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
1765                 if (xfs_buf_ispin(bp)) {
1766                         pincount++;
1767                         continue;
1768                 }
1769
1770                 list_move(&bp->b_list, &tmp);
1771         }
1772         spin_unlock(dwlk);
1773
1774         /*
1775          * Dropped the delayed write list lock, now walk the temporary list
1776          */
1777         list_for_each_entry_safe(bp, n, &tmp, b_list) {
1778                 xfs_buf_lock(bp);
1779                 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1780                 bp->b_flags |= XBF_WRITE;
1781                 if (wait)
1782                         bp->b_flags &= ~XBF_ASYNC;
1783                 else
1784                         list_del_init(&bp->b_list);
1785
1786                 xfs_buf_iostrategy(bp);
1787         }
1788
1789         /*
1790          * Remaining list items must be flushed before returning
1791          */
1792         while (!list_empty(&tmp)) {
1793                 bp = list_entry(tmp.next, xfs_buf_t, b_list);
1794
1795                 list_del_init(&bp->b_list);
1796                 xfs_iowait(bp);
1797                 xfs_buf_relse(bp);
1798         }
1799
1800         if (wait)
1801                 blk_run_address_space(target->bt_mapping);
1802
1803         return pincount;
1804 }
1805
1806 int __init
1807 xfs_buf_init(void)
1808 {
1809         int             error = -ENOMEM;
1810
1811 #ifdef XFS_BUF_TRACE
1812         xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
1813 #endif
1814
1815         xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
1816         if (!xfs_buf_zone)
1817                 goto out_free_trace_buf;
1818
1819         xfslogd_workqueue = create_workqueue("xfslogd");
1820         if (!xfslogd_workqueue)
1821                 goto out_free_buf_zone;
1822
1823         xfsdatad_workqueue = create_workqueue("xfsdatad");
1824         if (!xfsdatad_workqueue)
1825                 goto out_destroy_xfslogd_workqueue;
1826
1827         xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
1828         if (!xfs_buf_shake)
1829                 goto out_destroy_xfsdatad_workqueue;
1830
1831         return 0;
1832
1833  out_destroy_xfsdatad_workqueue:
1834         destroy_workqueue(xfsdatad_workqueue);
1835  out_destroy_xfslogd_workqueue:
1836         destroy_workqueue(xfslogd_workqueue);
1837  out_free_buf_zone:
1838         kmem_zone_destroy(xfs_buf_zone);
1839  out_free_trace_buf:
1840 #ifdef XFS_BUF_TRACE
1841         ktrace_free(xfs_buf_trace_buf);
1842 #endif
1843         return error;
1844 }
1845
1846 void
1847 xfs_buf_terminate(void)
1848 {
1849         kmem_shake_deregister(xfs_buf_shake);
1850         destroy_workqueue(xfsdatad_workqueue);
1851         destroy_workqueue(xfslogd_workqueue);
1852         kmem_zone_destroy(xfs_buf_zone);
1853 #ifdef XFS_BUF_TRACE
1854         ktrace_free(xfs_buf_trace_buf);
1855 #endif
1856 }