#include <linux/file.h>
#include <linux/quotaops.h>
#include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/writeback.h>
#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/bitops.h>
#include <linux/mpage.h>
#include <linux/bit_spinlock.h>
+#include <trace/events/block.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
-inline void
-init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
+void init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
{
bh->b_end_io = handler;
bh->b_private = private;
}
+EXPORT_SYMBOL(init_buffer);
-static int sync_buffer(void *word)
+inline void touch_buffer(struct buffer_head *bh)
{
- struct block_device *bd;
- struct buffer_head *bh
- = container_of(word, struct buffer_head, b_state);
+ trace_block_touch_buffer(bh);
+ mark_page_accessed(bh->b_page);
+}
+EXPORT_SYMBOL(touch_buffer);
- smp_mb();
- bd = bh->b_bdev;
- if (bd)
- blk_run_address_space(bd->bd_inode->i_mapping);
+static int sleep_on_buffer(void *word)
+{
io_schedule();
return 0;
}
void __lock_buffer(struct buffer_head *bh)
{
- wait_on_bit_lock(&bh->b_state, BH_Lock, sync_buffer,
+ wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL(__lock_buffer);
smp_mb__after_clear_bit();
wake_up_bit(&bh->b_state, BH_Lock);
}
+EXPORT_SYMBOL(unlock_buffer);
/*
* Block until a buffer comes unlocked. This doesn't stop it
*/
void __wait_on_buffer(struct buffer_head * bh)
{
- wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
+ wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
}
+EXPORT_SYMBOL(__wait_on_buffer);
static void
__clear_page_buffers(struct page *page)
__end_buffer_read_notouch(bh, uptodate);
put_bh(bh);
}
+EXPORT_SYMBOL(end_buffer_read_sync);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
{
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
+ if (!quiet_error(bh)) {
buffer_io_error(bh);
printk(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
unlock_buffer(bh);
put_bh(bh);
}
+EXPORT_SYMBOL(end_buffer_write_sync);
/*
* Various filesystems appear to want __find_get_block to be non-blocking.
* elsewhere, don't buffer_error if we had some unmapped buffers
*/
if (all_mapped) {
+ char b[BDEVNAME_SIZE];
+
printk("__find_get_block_slow() failed. "
"block=%llu, b_blocknr=%llu\n",
(unsigned long long)block,
(unsigned long long)bh->b_blocknr);
printk("b_state=0x%08lx, b_size=%zu\n",
bh->b_state, bh->b_size);
- printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
+ printk("device %s blocksize: %d\n", bdevname(bdev, b),
+ 1 << bd_inode->i_blkbits);
}
out_unlock:
spin_unlock(&bd_mapping->private_lock);
return ret;
}
-/* If invalidate_buffers() will trash dirty buffers, it means some kind
- of fs corruption is going on. Trashing dirty data always imply losing
- information that was supposed to be just stored on the physical layer
- by the user.
-
- Thus invalidate_buffers in general usage is not allwowed to trash
- dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
- be preserved. These buffers are simply skipped.
-
- We also skip buffers which are still in use. For example this can
- happen if a userspace program is reading the block device.
-
- NOTE: In the case where the user removed a removable-media-disk even if
- there's still dirty data not synced on disk (due a bug in the device driver
- or due an error of the user), by not destroying the dirty buffers we could
- generate corruption also on the next media inserted, thus a parameter is
- necessary to handle this case in the most safe way possible (trying
- to not corrupt also the new disk inserted with the data belonging to
- the old now corrupted disk). Also for the ramdisk the natural thing
- to do in order to release the ramdisk memory is to destroy dirty buffers.
-
- These are two special cases. Normal usage imply the device driver
- to issue a sync on the device (without waiting I/O completion) and
- then an invalidate_buffers call that doesn't trash dirty buffers.
-
- For handling cache coherency with the blkdev pagecache the 'update' case
- is been introduced. It is needed to re-read from disk any pinned
- buffer. NOTE: re-reading from disk is destructive so we can do it only
- when we assume nobody is changing the buffercache under our I/O and when
- we think the disk contains more recent information than the buffercache.
- The update == 1 pass marks the buffers we need to update, the update == 2
- pass does the actual I/O. */
-void invalidate_bdev(struct block_device *bdev)
-{
- struct address_space *mapping = bdev->bd_inode->i_mapping;
-
- if (mapping->nrpages == 0)
- return;
-
- invalidate_bh_lrus();
- invalidate_mapping_pages(mapping, 0, -1);
-}
-
/*
- * Kick pdflush then try to free up some ZONE_NORMAL memory.
+ * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
*/
static void free_more_memory(void)
{
struct zone *zone;
int nid;
- wakeup_pdflush(1024);
+ wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM);
yield();
for_each_online_node(nid) {
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
*/
-static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
unsigned long flags;
local_irq_restore(flags);
return;
}
+EXPORT_SYMBOL(end_buffer_async_write);
/*
* If a page's buffers are under async readin (end_buffer_async_read
set_buffer_async_read(bh);
}
-void mark_buffer_async_write(struct buffer_head *bh)
+static void mark_buffer_async_write_endio(struct buffer_head *bh,
+ bh_end_io_t *handler)
{
- bh->b_end_io = end_buffer_async_write;
+ bh->b_end_io = handler;
set_buffer_async_write(bh);
}
+
+void mark_buffer_async_write(struct buffer_head *bh)
+{
+ mark_buffer_async_write_endio(bh, end_buffer_async_write);
+}
EXPORT_SYMBOL(mark_buffer_async_write);
return err;
}
-void do_thaw_all(unsigned long unused)
+static void do_thaw_one(struct super_block *sb, void *unused)
{
- struct super_block *sb;
char b[BDEVNAME_SIZE];
+ while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
+ printk(KERN_WARNING "Emergency Thaw on %s\n",
+ bdevname(sb->s_bdev, b));
+}
- spin_lock(&sb_lock);
-restart:
- list_for_each_entry(sb, &super_blocks, s_list) {
- sb->s_count++;
- spin_unlock(&sb_lock);
- down_read(&sb->s_umount);
- while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
- printk(KERN_WARNING "Emergency Thaw on %s\n",
- bdevname(sb->s_bdev, b));
- up_read(&sb->s_umount);
- spin_lock(&sb_lock);
- if (__put_super_and_need_restart(sb))
- goto restart;
- }
- spin_unlock(&sb_lock);
+static void do_thaw_all(struct work_struct *work)
+{
+ iterate_supers(do_thaw_one, NULL);
+ kfree(work);
printk(KERN_WARNING "Emergency Thaw complete\n");
}
*/
void emergency_thaw_all(void)
{
- pdflush_operation(do_thaw_all, 0);
+ struct work_struct *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (work) {
+ INIT_WORK(work, do_thaw_all);
+ schedule_work(work);
+ }
}
/**
*/
int sync_mapping_buffers(struct address_space *mapping)
{
- struct address_space *buffer_mapping = mapping->assoc_mapping;
+ struct address_space *buffer_mapping = mapping->private_data;
if (buffer_mapping == NULL || list_empty(&mapping->private_list))
return 0;
struct address_space *buffer_mapping = bh->b_page->mapping;
mark_buffer_dirty(bh);
- if (!mapping->assoc_mapping) {
- mapping->assoc_mapping = buffer_mapping;
+ if (!mapping->private_data) {
+ mapping->private_data = buffer_mapping;
} else {
- BUG_ON(mapping->assoc_mapping != buffer_mapping);
+ BUG_ON(mapping->private_data != buffer_mapping);
}
if (!bh->b_assoc_map) {
spin_lock(&buffer_mapping->private_lock);
static void __set_page_dirty(struct page *page,
struct address_space *mapping, int warn)
{
- spin_lock_irq(&mapping->tree_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mapping->tree_lock, flags);
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
- spin_unlock_irq(&mapping->tree_lock);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
{
struct buffer_head *bh;
struct list_head tmp;
- struct address_space *mapping, *prev_mapping = NULL;
+ struct address_space *mapping;
int err = 0, err2;
+ struct blk_plug plug;
INIT_LIST_HEAD(&tmp);
+ blk_start_plug(&plug);
spin_lock(lock);
while (!list_empty(list)) {
spin_unlock(lock);
/*
* Ensure any pending I/O completes so that
- * ll_rw_block() actually writes the current
- * contents - it is a noop if I/O is still in
- * flight on potentially older contents.
+ * write_dirty_buffer() actually writes the
+ * current contents - it is a noop if I/O is
+ * still in flight on potentially older
+ * contents.
*/
- ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
+ write_dirty_buffer(bh, WRITE_SYNC);
/*
* Kick off IO for the previous mapping. Note
* wait_on_buffer() will do that for us
* through sync_buffer().
*/
- if (prev_mapping && prev_mapping != mapping)
- blk_run_address_space(prev_mapping);
- prev_mapping = mapping;
-
brelse(bh);
spin_lock(lock);
}
}
}
+ spin_unlock(lock);
+ blk_finish_plug(&plug);
+ spin_lock(lock);
+
while (!list_empty(&tmp)) {
bh = BH_ENTRY(tmp.prev);
get_bh(bh);
if (inode_has_buffers(inode)) {
struct address_space *mapping = &inode->i_data;
struct list_head *list = &mapping->private_list;
- struct address_space *buffer_mapping = mapping->assoc_mapping;
+ struct address_space *buffer_mapping = mapping->private_data;
spin_lock(&buffer_mapping->private_lock);
while (!list_empty(list))
if (inode_has_buffers(inode)) {
struct address_space *mapping = &inode->i_data;
struct list_head *list = &mapping->private_list;
- struct address_space *buffer_mapping = mapping->assoc_mapping;
+ struct address_space *buffer_mapping = mapping->private_data;
spin_lock(&buffer_mapping->private_lock);
while (!list_empty(list)) {
if (!bh)
goto no_grow;
- bh->b_bdev = NULL;
bh->b_this_page = head;
bh->b_blocknr = -1;
head = bh;
- bh->b_state = 0;
- atomic_set(&bh->b_count, 0);
- bh->b_private = NULL;
bh->b_size = size;
/* Link the buffer to its page */
set_bh_page(bh, page, offset);
-
- init_buffer(bh, NULL, NULL);
}
return head;
/*
attach_page_buffers(page, head);
}
+static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size)
+{
+ sector_t retval = ~((sector_t)0);
+ loff_t sz = i_size_read(bdev->bd_inode);
+
+ if (sz) {
+ unsigned int sizebits = blksize_bits(size);
+ retval = (sz >> sizebits);
+ }
+ return retval;
+}
+
/*
* Initialise the state of a blockdev page's buffers.
*/
-static void
+static sector_t
init_page_buffers(struct page *page, struct block_device *bdev,
sector_t block, int size)
{
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh = head;
int uptodate = PageUptodate(page);
+ sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size);
do {
if (!buffer_mapped(bh)) {
bh->b_blocknr = block;
if (uptodate)
set_buffer_uptodate(bh);
- set_buffer_mapped(bh);
+ if (block < end_block)
+ set_buffer_mapped(bh);
}
block++;
bh = bh->b_this_page;
} while (bh != head);
+
+ /*
+ * Caller needs to validate requested block against end of device.
+ */
+ return end_block;
}
/*
* Create the page-cache page that contains the requested block.
*
- * This is user purely for blockdev mappings.
+ * This is used purely for blockdev mappings.
*/
-static struct page *
+static int
grow_dev_page(struct block_device *bdev, sector_t block,
- pgoff_t index, int size)
+ pgoff_t index, int size, int sizebits)
{
struct inode *inode = bdev->bd_inode;
struct page *page;
struct buffer_head *bh;
+ sector_t end_block;
+ int ret = 0; /* Will call free_more_memory() */
page = find_or_create_page(inode->i_mapping, index,
- (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
+ (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS));
if (!page)
- return NULL;
+ return ret;
BUG_ON(!PageLocked(page));
if (page_has_buffers(page)) {
bh = page_buffers(page);
if (bh->b_size == size) {
- init_page_buffers(page, bdev, block, size);
- return page;
+ end_block = init_page_buffers(page, bdev,
+ (sector_t)index << sizebits,
+ size);
+ goto done;
}
if (!try_to_free_buffers(page))
goto failed;
*/
spin_lock(&inode->i_mapping->private_lock);
link_dev_buffers(page, bh);
- init_page_buffers(page, bdev, block, size);
+ end_block = init_page_buffers(page, bdev, (sector_t)index << sizebits,
+ size);
spin_unlock(&inode->i_mapping->private_lock);
- return page;
-
+done:
+ ret = (block < end_block) ? 1 : -ENXIO;
failed:
- BUG();
unlock_page(page);
page_cache_release(page);
- return NULL;
+ return ret;
}
/*
static int
grow_buffers(struct block_device *bdev, sector_t block, int size)
{
- struct page *page;
pgoff_t index;
int sizebits;
bdevname(bdev, b));
return -EIO;
}
- block = index << sizebits;
+
/* Create a page with the proper size buffers.. */
- page = grow_dev_page(bdev, block, index, size);
- if (!page)
- return 0;
- unlock_page(page);
- page_cache_release(page);
- return 1;
+ return grow_dev_page(bdev, block, index, size, sizebits);
}
static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block, int size)
{
/* Size must be multiple of hard sectorsize */
- if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
+ if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
(size < 512 || size > PAGE_SIZE))) {
printk(KERN_ERR "getblk(): invalid block size %d requested\n",
size);
- printk(KERN_ERR "hardsect size: %d\n",
- bdev_hardsect_size(bdev));
+ printk(KERN_ERR "logical block size: %d\n",
+ bdev_logical_block_size(bdev));
dump_stack();
return NULL;
}
for (;;) {
- struct buffer_head * bh;
+ struct buffer_head *bh;
int ret;
bh = __find_get_block(bdev, block, size);
* inode list.
*
* mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
- * mapping->tree_lock and the global inode_lock.
+ * mapping->tree_lock and mapping->host->i_lock.
*/
void mark_buffer_dirty(struct buffer_head *bh)
{
WARN_ON_ONCE(!buffer_uptodate(bh));
+ trace_block_dirty_buffer(bh);
+
/*
* Very *carefully* optimize the it-is-already-dirty case.
*
if (!test_set_buffer_dirty(bh)) {
struct page *page = bh->b_page;
- if (!TestSetPageDirty(page))
- __set_page_dirty(page, page_mapping(page), 0);
+ if (!TestSetPageDirty(page)) {
+ struct address_space *mapping = page_mapping(page);
+ if (mapping)
+ __set_page_dirty(page, mapping, 0);
+ }
}
}
+EXPORT_SYMBOL(mark_buffer_dirty);
/*
* Decrement a buffer_head's reference count. If all buffers against a page
}
WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
}
+EXPORT_SYMBOL(__brelse);
/*
* bforget() is like brelse(), except it discards any
}
__brelse(bh);
}
+EXPORT_SYMBOL(__bforget);
static struct buffer_head *__bread_slow(struct buffer_head *bh)
{
static void bh_lru_install(struct buffer_head *bh)
{
struct buffer_head *evictee = NULL;
- struct bh_lru *lru;
check_irqs_on();
bh_lru_lock();
- lru = &__get_cpu_var(bh_lrus);
- if (lru->bhs[0] != bh) {
+ if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
struct buffer_head *bhs[BH_LRU_SIZE];
int in;
int out = 0;
get_bh(bh);
bhs[out++] = bh;
for (in = 0; in < BH_LRU_SIZE; in++) {
- struct buffer_head *bh2 = lru->bhs[in];
+ struct buffer_head *bh2 =
+ __this_cpu_read(bh_lrus.bhs[in]);
if (bh2 == bh) {
__brelse(bh2);
}
while (out < BH_LRU_SIZE)
bhs[out++] = NULL;
- memcpy(lru->bhs, bhs, sizeof(bhs));
+ memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
}
bh_lru_unlock();
lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *ret = NULL;
- struct bh_lru *lru;
unsigned int i;
check_irqs_on();
bh_lru_lock();
- lru = &__get_cpu_var(bh_lrus);
for (i = 0; i < BH_LRU_SIZE; i++) {
- struct buffer_head *bh = lru->bhs[i];
+ struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
if (bh && bh->b_bdev == bdev &&
bh->b_blocknr == block && bh->b_size == size) {
if (i) {
while (i) {
- lru->bhs[i] = lru->bhs[i - 1];
+ __this_cpu_write(bh_lrus.bhs[i],
+ __this_cpu_read(bh_lrus.bhs[i - 1]));
i--;
}
- lru->bhs[0] = bh;
+ __this_cpu_write(bh_lrus.bhs[0], bh);
}
get_bh(bh);
ret = bh;
* which corresponds to the passed block_device, block and size. The
* returned buffer has its reference count incremented.
*
- * __getblk() cannot fail - it just keeps trying. If you pass it an
- * illegal block number, __getblk() will happily return a buffer_head
- * which represents the non-existent block. Very weird.
- *
* __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
* attempt is failing. FIXME, perhaps?
*/
}
put_cpu_var(bh_lrus);
}
+
+static bool has_bh_in_lru(int cpu, void *dummy)
+{
+ struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu);
+ int i;
+ for (i = 0; i < BH_LRU_SIZE; i++) {
+ if (b->bhs[i])
+ return 1;
+ }
+
+ return 0;
+}
+
void invalidate_bh_lrus(void)
{
- on_each_cpu(invalidate_bh_lru, NULL, 1);
+ on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
}
/**
- * block_invalidatepage - invalidate part of all of a buffer-backed page
+ * block_invalidatepage - invalidate part or all of a buffer-backed page
*
* @page: the page which is affected
* @offset: the index of the truncation point
*
* block_invalidatepage() is called when all or part of the page has become
- * invalidatedby a truncate operation.
+ * invalidated by a truncate operation.
*
* block_invalidatepage() does not have to release all buffers, but it must
* ensure that no dirty buffer is left outside @offset and that no I/O
}
EXPORT_SYMBOL(unmap_underlying_metadata);
+/*
+ * Size is a power-of-two in the range 512..PAGE_SIZE,
+ * and the case we care about most is PAGE_SIZE.
+ *
+ * So this *could* possibly be written with those
+ * constraints in mind (relevant mostly if some
+ * architecture has a slow bit-scan instruction)
+ */
+static inline int block_size_bits(unsigned int blocksize)
+{
+ return ilog2(blocksize);
+}
+
+static struct buffer_head *create_page_buffers(struct page *page, struct inode *inode, unsigned int b_state)
+{
+ BUG_ON(!PageLocked(page));
+
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, 1 << ACCESS_ONCE(inode->i_blkbits), b_state);
+ return page_buffers(page);
+}
+
/*
* NOTE! All mapped/uptodate combinations are valid:
*
* prevents this contention from occurring.
*
* If block_write_full_page() is called with wbc->sync_mode ==
- * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
- * causes the writes to be flagged as synchronous writes, but the
- * block device queue will NOT be unplugged, since usually many pages
- * will be pushed to the out before the higher-level caller actually
- * waits for the writes to be completed. The various wait functions,
- * such as wait_on_writeback_range() will ultimately call sync_page()
- * which will ultimately call blk_run_backing_dev(), which will end up
- * unplugging the device queue.
+ * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
+ * causes the writes to be flagged as synchronous writes.
*/
static int __block_write_full_page(struct inode *inode, struct page *page,
- get_block_t *get_block, struct writeback_control *wbc)
+ get_block_t *get_block, struct writeback_control *wbc,
+ bh_end_io_t *handler)
{
int err;
sector_t block;
sector_t last_block;
struct buffer_head *bh, *head;
- const unsigned blocksize = 1 << inode->i_blkbits;
+ unsigned int blocksize, bbits;
int nr_underway = 0;
int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC_PLUG : WRITE);
-
- BUG_ON(!PageLocked(page));
-
- last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
+ WRITE_SYNC : WRITE);
- if (!page_has_buffers(page)) {
- create_empty_buffers(page, blocksize,
+ head = create_page_buffers(page, inode,
(1 << BH_Dirty)|(1 << BH_Uptodate));
- }
/*
* Be very careful. We have no exclusion from __set_page_dirty_buffers
* handle that here by just cleaning them.
*/
- block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- head = page_buffers(page);
bh = head;
+ blocksize = bh->b_size;
+ bbits = block_size_bits(blocksize);
+
+ block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+ last_block = (i_size_read(inode) - 1) >> bbits;
/*
* Get all the dirty buffers mapped to disk addresses and
/*
* If it's a fully non-blocking write attempt and we cannot
* lock the buffer then redirty the page. Note that this can
- * potentially cause a busy-wait loop from pdflush and kswapd
- * activity, but those code paths have their own higher-level
- * throttling.
+ * potentially cause a busy-wait loop from writeback threads
+ * and kswapd activity, but those code paths have their own
+ * higher-level throttling.
*/
- if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+ if (wbc->sync_mode != WB_SYNC_NONE) {
lock_buffer(bh);
} else if (!trylock_buffer(bh)) {
redirty_page_for_writepage(wbc, page);
continue;
}
if (test_clear_buffer_dirty(bh)) {
- mark_buffer_async_write(bh);
+ mark_buffer_async_write_endio(bh, handler);
} else {
unlock_buffer(bh);
}
if (buffer_mapped(bh) && buffer_dirty(bh) &&
!buffer_delay(bh)) {
lock_buffer(bh);
- mark_buffer_async_write(bh);
+ mark_buffer_async_write_endio(bh, handler);
} else {
/*
* The buffer may have been set dirty during
}
EXPORT_SYMBOL(page_zero_new_buffers);
-static int __block_prepare_write(struct inode *inode, struct page *page,
- unsigned from, unsigned to, get_block_t *get_block)
+int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+ get_block_t *get_block)
{
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned to = from + len;
+ struct inode *inode = page->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
BUG_ON(to > PAGE_CACHE_SIZE);
BUG_ON(from > to);
- blocksize = 1 << inode->i_blkbits;
- if (!page_has_buffers(page))
- create_empty_buffers(page, blocksize, 0);
- head = page_buffers(page);
+ head = create_page_buffers(page, inode, 0);
+ blocksize = head->b_size;
+ bbits = block_size_bits(blocksize);
- bbits = inode->i_blkbits;
block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
for(bh = head, block_start = 0; bh != head || !block_start;
page_zero_new_buffers(page, from, to);
return err;
}
+EXPORT_SYMBOL(__block_write_begin);
static int __block_commit_write(struct inode *inode, struct page *page,
unsigned from, unsigned to)
unsigned blocksize;
struct buffer_head *bh, *head;
- blocksize = 1 << inode->i_blkbits;
+ bh = head = page_buffers(page);
+ blocksize = bh->b_size;
- for(bh = head = page_buffers(page), block_start = 0;
- bh != head || !block_start;
- block_start=block_end, bh = bh->b_this_page) {
+ block_start = 0;
+ do {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
if (!buffer_uptodate(bh))
mark_buffer_dirty(bh);
}
clear_buffer_new(bh);
- }
+
+ block_start = block_end;
+ bh = bh->b_this_page;
+ } while (bh != head);
/*
* If this is a partial write which happened to make all buffers
* block_write_begin takes care of the basic task of block allocation and
* bringing partial write blocks uptodate first.
*
- * If *pagep is not NULL, then block_write_begin uses the locked page
- * at *pagep rather than allocating its own. In this case, the page will
- * not be unlocked or deallocated on failure.
+ * The filesystem needs to handle block truncation upon failure.
*/
-int block_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata,
- get_block_t *get_block)
+int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
+ unsigned flags, struct page **pagep, get_block_t *get_block)
{
- struct inode *inode = mapping->host;
- int status = 0;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
struct page *page;
- pgoff_t index;
- unsigned start, end;
- int ownpage = 0;
+ int status;
- index = pos >> PAGE_CACHE_SHIFT;
- start = pos & (PAGE_CACHE_SIZE - 1);
- end = start + len;
-
- page = *pagep;
- if (page == NULL) {
- ownpage = 1;
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page) {
- status = -ENOMEM;
- goto out;
- }
- *pagep = page;
- } else
- BUG_ON(!PageLocked(page));
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
- status = __block_prepare_write(inode, page, start, end, get_block);
+ status = __block_write_begin(page, pos, len, get_block);
if (unlikely(status)) {
- ClearPageUptodate(page);
-
- if (ownpage) {
- unlock_page(page);
- page_cache_release(page);
- *pagep = NULL;
-
- /*
- * prepare_write() may have instantiated a few blocks
- * outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
- */
- if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
- }
+ unlock_page(page);
+ page_cache_release(page);
+ page = NULL;
}
-out:
+ *pagep = page;
return status;
}
EXPORT_SYMBOL(block_write_begin);
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
+ loff_t old_size = inode->i_size;
int i_size_changed = 0;
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
unlock_page(page);
page_cache_release(page);
+ if (old_size < pos)
+ pagecache_isize_extended(inode, old_size, pos);
/*
* Don't mark the inode dirty under page lock. First, it unnecessarily
* makes the holding time of page lock longer. Second, it forces lock
int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
unsigned long from)
{
- struct inode *inode = page->mapping->host;
unsigned block_start, block_end, blocksize;
unsigned to;
struct buffer_head *bh, *head;
if (!page_has_buffers(page))
return 0;
- blocksize = 1 << inode->i_blkbits;
+ head = page_buffers(page);
+ blocksize = head->b_size;
to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
to = from + to;
if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
return 0;
- head = page_buffers(page);
bh = head;
block_start = 0;
do {
struct inode *inode = page->mapping->host;
sector_t iblock, lblock;
struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
- unsigned int blocksize;
+ unsigned int blocksize, bbits;
int nr, i;
int fully_mapped = 1;
- BUG_ON(!PageLocked(page));
- blocksize = 1 << inode->i_blkbits;
- if (!page_has_buffers(page))
- create_empty_buffers(page, blocksize, 0);
- head = page_buffers(page);
+ head = create_page_buffers(page, inode, 0);
+ blocksize = head->b_size;
+ bbits = block_size_bits(blocksize);
- iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- lblock = (i_size_read(inode)+blocksize-1) >> inode->i_blkbits;
+ iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits);
+ lblock = (i_size_read(inode)+blocksize-1) >> bbits;
bh = head;
nr = 0;
i = 0;
}
return 0;
}
+EXPORT_SYMBOL(block_read_full_page);
/* utility function for filesystems that need to do work on expanding
* truncates. Uses filesystem pagecache writes to allow the filesystem to
struct address_space *mapping = inode->i_mapping;
struct page *page;
void *fsdata;
- unsigned long limit;
int err;
- err = -EFBIG;
- limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
- if (limit != RLIM_INFINITY && size > (loff_t)limit) {
- send_sig(SIGXFSZ, current, 0);
- goto out;
- }
- if (size > inode->i_sb->s_maxbytes)
+ err = inode_newsize_ok(inode, size);
+ if (err)
goto out;
err = pagecache_write_begin(NULL, mapping, size, 0,
out:
return err;
}
+EXPORT_SYMBOL(generic_cont_expand_simple);
static int cont_expand_zero(struct file *file, struct address_space *mapping,
loff_t pos, loff_t *bytes)
err = 0;
balance_dirty_pages_ratelimited(mapping);
+
+ if (unlikely(fatal_signal_pending(current))) {
+ err = -EINTR;
+ goto out;
+ }
}
/* page covers the boundary, find the boundary offset */
err = cont_expand_zero(file, mapping, pos, bytes);
if (err)
- goto out;
+ return err;
zerofrom = *bytes & ~PAGE_CACHE_MASK;
if (pos+len > *bytes && zerofrom & (blocksize-1)) {
(*bytes)++;
}
- *pagep = NULL;
- err = block_write_begin(file, mapping, pos, len,
- flags, pagep, fsdata, get_block);
-out:
- return err;
-}
-
-int block_prepare_write(struct page *page, unsigned from, unsigned to,
- get_block_t *get_block)
-{
- struct inode *inode = page->mapping->host;
- int err = __block_prepare_write(inode, page, from, to, get_block);
- if (err)
- ClearPageUptodate(page);
- return err;
+ return block_write_begin(mapping, pos, len, flags, pagep, get_block);
}
+EXPORT_SYMBOL(cont_write_begin);
int block_commit_write(struct page *page, unsigned from, unsigned to)
{
__block_commit_write(inode,page,from,to);
return 0;
}
+EXPORT_SYMBOL(block_commit_write);
/*
* block_page_mkwrite() is not allowed to change the file size as it gets
*
* We are not allowed to take the i_mutex here so we have to play games to
* protect against truncate races as the page could now be beyond EOF. Because
- * vmtruncate() writes the inode size before removing pages, once we have the
+ * truncate writes the inode size before removing pages, once we have the
* page lock we can determine safely if the page is beyond EOF. If it is not
* beyond EOF, then the page is guaranteed safe against truncation until we
* unlock the page.
+ *
+ * Direct callers of this function should protect against filesystem freezing
+ * using sb_start_write() - sb_end_write() functions.
*/
-int
-block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
- get_block_t get_block)
+int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+ get_block_t get_block)
{
struct page *page = vmf->page;
- struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(vma->vm_file);
unsigned long end;
loff_t size;
- int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
+ int ret;
lock_page(page);
size = i_size_read(inode);
if ((page->mapping != inode->i_mapping) ||
(page_offset(page) > size)) {
- /* page got truncated out from underneath us */
+ /* We overload EFAULT to mean page got truncated */
+ ret = -EFAULT;
goto out_unlock;
}
else
end = PAGE_CACHE_SIZE;
- ret = block_prepare_write(page, 0, end, get_block);
+ ret = __block_write_begin(page, 0, end, get_block);
if (!ret)
ret = block_commit_write(page, 0, end);
- if (unlikely(ret)) {
- if (ret == -ENOMEM)
- ret = VM_FAULT_OOM;
- else /* -ENOSPC, -EIO, etc */
- ret = VM_FAULT_SIGBUS;
- }
-
+ if (unlikely(ret < 0))
+ goto out_unlock;
+ set_page_dirty(page);
+ wait_for_stable_page(page);
+ return 0;
out_unlock:
unlock_page(page);
return ret;
}
+EXPORT_SYMBOL(__block_page_mkwrite);
+
+int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+ get_block_t get_block)
+{
+ int ret;
+ struct super_block *sb = file_inode(vma->vm_file)->i_sb;
+
+ sb_start_pagefault(sb);
+
+ /*
+ * Update file times before taking page lock. We may end up failing the
+ * fault so this update may be superfluous but who really cares...
+ */
+ file_update_time(vma->vm_file);
+
+ ret = __block_page_mkwrite(vma, vmf, get_block);
+ sb_end_pagefault(sb);
+ return block_page_mkwrite_return(ret);
+}
+EXPORT_SYMBOL(block_page_mkwrite);
/*
* nobh_write_begin()'s prereads are special: the buffer_heads are freed
/*
* On entry, the page is fully not uptodate.
* On exit the page is fully uptodate in the areas outside (from,to)
+ * The filesystem needs to handle block truncation upon failure.
*/
-int nobh_write_begin(struct file *file, struct address_space *mapping,
+int nobh_write_begin(struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata,
get_block_t *get_block)
*fsdata = NULL;
if (page_has_buffers(page)) {
- unlock_page(page);
- page_cache_release(page);
- *pagep = NULL;
- return block_write_begin(file, mapping, pos, len, flags, pagep,
- fsdata, get_block);
+ ret = __block_write_begin(page, pos, len, get_block);
+ if (unlikely(ret))
+ goto out_release;
+ return ret;
}
if (PageMappedToDisk(page))
page_cache_release(page);
*pagep = NULL;
- if (pos + len > inode->i_size)
- vmtruncate(inode, inode->i_size);
-
return ret;
}
EXPORT_SYMBOL(nobh_write_begin);
out:
ret = mpage_writepage(page, get_block, wbc);
if (ret == -EAGAIN)
- ret = __block_write_full_page(inode, page, get_block, wbc);
+ ret = __block_write_full_page(inode, page, get_block, wbc,
+ end_buffer_async_write);
return ret;
}
EXPORT_SYMBOL(nobh_writepage);
pos += blocksize;
}
+ map_bh.b_size = blocksize;
+ map_bh.b_state = 0;
err = get_block(inode, iblock, &map_bh, 0);
if (err)
goto unlock;
out:
return err;
}
+EXPORT_SYMBOL(block_truncate_page);
/*
* The generic ->writepage function for buffer-backed address_spaces
+ * this form passes in the end_io handler used to finish the IO.
*/
-int block_write_full_page(struct page *page, get_block_t *get_block,
- struct writeback_control *wbc)
+int block_write_full_page_endio(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc, bh_end_io_t *handler)
{
struct inode * const inode = page->mapping->host;
loff_t i_size = i_size_read(inode);
/* Is the page fully inside i_size? */
if (page->index < end_index)
- return __block_write_full_page(inode, page, get_block, wbc);
+ return __block_write_full_page(inode, page, get_block, wbc,
+ handler);
/* Is the page fully outside i_size? (truncate in progress) */
offset = i_size & (PAGE_CACHE_SIZE-1);
/*
* The page straddles i_size. It must be zeroed out on each and every
- * writepage invokation because it may be mmapped. "A file is mapped
+ * writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
*/
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
- return __block_write_full_page(inode, page, get_block, wbc);
+ return __block_write_full_page(inode, page, get_block, wbc, handler);
+}
+EXPORT_SYMBOL(block_write_full_page_endio);
+
+/*
+ * The generic ->writepage function for buffer-backed address_spaces
+ */
+int block_write_full_page(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc)
+{
+ return block_write_full_page_endio(page, get_block, wbc,
+ end_buffer_async_write);
}
+EXPORT_SYMBOL(block_write_full_page);
sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
get_block_t *get_block)
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
+EXPORT_SYMBOL(generic_block_bmap);
static void end_bio_bh_io_sync(struct bio *bio, int err)
{
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
- set_bit(BH_Eopnotsupp, &bh->b_state);
}
if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
bio_put(bio);
}
-int submit_bh(int rw, struct buffer_head * bh)
+/*
+ * This allows us to do IO even on the odd last sectors
+ * of a device, even if the bh block size is some multiple
+ * of the physical sector size.
+ *
+ * We'll just truncate the bio to the size of the device,
+ * and clear the end of the buffer head manually.
+ *
+ * Truly out-of-range accesses will turn into actual IO
+ * errors, this only handles the "we need to be able to
+ * do IO at the final sector" case.
+ */
+static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
+{
+ sector_t maxsector;
+ unsigned bytes;
+
+ maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
+ if (!maxsector)
+ return;
+
+ /*
+ * If the *whole* IO is past the end of the device,
+ * let it through, and the IO layer will turn it into
+ * an EIO.
+ */
+ if (unlikely(bio->bi_sector >= maxsector))
+ return;
+
+ maxsector -= bio->bi_sector;
+ bytes = bio->bi_size;
+ if (likely((bytes >> 9) <= maxsector))
+ return;
+
+ /* Uhhuh. We've got a bh that straddles the device size! */
+ bytes = maxsector << 9;
+
+ /* Truncate the bio.. */
+ bio->bi_size = bytes;
+ bio->bi_io_vec[0].bv_len = bytes;
+
+ /* ..and clear the end of the buffer for reads */
+ if ((rw & RW_MASK) == READ) {
+ void *kaddr = kmap_atomic(bh->b_page);
+ memset(kaddr + bh_offset(bh) + bytes, 0, bh->b_size - bytes);
+ kunmap_atomic(kaddr);
+ flush_dcache_page(bh->b_page);
+ }
+}
+
+int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
{
struct bio *bio;
int ret = 0;
BUG_ON(!buffer_locked(bh));
BUG_ON(!buffer_mapped(bh));
BUG_ON(!bh->b_end_io);
-
- /*
- * Mask in barrier bit for a write (could be either a WRITE or a
- * WRITE_SYNC
- */
- if (buffer_ordered(bh) && (rw & WRITE))
- rw |= WRITE_BARRIER;
+ BUG_ON(buffer_delay(bh));
+ BUG_ON(buffer_unwritten(bh));
/*
* Only clear out a write error when rewriting
bio->bi_io_vec[0].bv_offset = bh_offset(bh);
bio->bi_vcnt = 1;
- bio->bi_idx = 0;
bio->bi_size = bh->b_size;
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
+ bio->bi_flags |= bio_flags;
+
+ /* Take care of bh's that straddle the end of the device */
+ guard_bh_eod(rw, bio, bh);
+
+ if (buffer_meta(bh))
+ rw |= REQ_META;
+ if (buffer_prio(bh))
+ rw |= REQ_PRIO;
bio_get(bio);
submit_bio(rw, bio);
bio_put(bio);
return ret;
}
+EXPORT_SYMBOL_GPL(_submit_bh);
+
+int submit_bh(int rw, struct buffer_head *bh)
+{
+ return _submit_bh(rw, bh, 0);
+}
+EXPORT_SYMBOL(submit_bh);
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
- * @rw: whether to %READ or %WRITE or %SWRITE or maybe %READA (readahead)
+ * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
* @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head
*
* ll_rw_block() takes an array of pointers to &struct buffer_heads, and
* requests an I/O operation on them, either a %READ or a %WRITE. The third
- * %SWRITE is like %WRITE only we make sure that the *current* data in buffers
- * are sent to disk. The fourth %READA option is described in the documentation
- * for generic_make_request() which ll_rw_block() calls.
+ * %READA option is described in the documentation for generic_make_request()
+ * which ll_rw_block() calls.
*
* This function drops any buffer that it cannot get a lock on (with the
- * BH_Lock state bit) unless SWRITE is required, any buffer that appears to be
- * clean when doing a write request, and any buffer that appears to be
- * up-to-date when doing read request. Further it marks as clean buffers that
- * are processed for writing (the buffer cache won't assume that they are
- * actually clean until the buffer gets unlocked).
+ * BH_Lock state bit), any buffer that appears to be clean when doing a write
+ * request, and any buffer that appears to be up-to-date when doing read
+ * request. Further it marks as clean buffers that are processed for
+ * writing (the buffer cache won't assume that they are actually clean
+ * until the buffer gets unlocked).
*
* ll_rw_block sets b_end_io to simple completion handler that marks
* the buffer up-to-date (if approriate), unlocks the buffer and wakes
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
- if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
- lock_buffer(bh);
- else if (!trylock_buffer(bh))
+ if (!trylock_buffer(bh))
continue;
-
- if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
- rw == SWRITE_SYNC_PLUG) {
+ if (rw == WRITE) {
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
- if (rw == SWRITE_SYNC)
- submit_bh(WRITE_SYNC, bh);
- else
- submit_bh(WRITE, bh);
+ submit_bh(WRITE, bh);
continue;
}
} else {
unlock_buffer(bh);
}
}
+EXPORT_SYMBOL(ll_rw_block);
+
+void write_dirty_buffer(struct buffer_head *bh, int rw)
+{
+ lock_buffer(bh);
+ if (!test_clear_buffer_dirty(bh)) {
+ unlock_buffer(bh);
+ return;
+ }
+ bh->b_end_io = end_buffer_write_sync;
+ get_bh(bh);
+ submit_bh(rw, bh);
+}
+EXPORT_SYMBOL(write_dirty_buffer);
/*
* For a data-integrity writeout, we need to wait upon any in-progress I/O
* and then start new I/O and then wait upon it. The caller must have a ref on
* the buffer_head.
*/
-int sync_dirty_buffer(struct buffer_head *bh)
+int __sync_dirty_buffer(struct buffer_head *bh, int rw)
{
int ret = 0;
if (test_clear_buffer_dirty(bh)) {
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
- ret = submit_bh(WRITE_SYNC, bh);
+ ret = submit_bh(rw, bh);
wait_on_buffer(bh);
- if (buffer_eopnotsupp(bh)) {
- clear_buffer_eopnotsupp(bh);
- ret = -EOPNOTSUPP;
- }
if (!ret && !buffer_uptodate(bh))
ret = -EIO;
} else {
}
return ret;
}
+EXPORT_SYMBOL(__sync_dirty_buffer);
+
+int sync_dirty_buffer(struct buffer_head *bh)
+{
+ return __sync_dirty_buffer(bh, WRITE_SYNC);
+}
+EXPORT_SYMBOL(sync_dirty_buffer);
/*
* try_to_free_buffers() checks if all the buffers on this particular page
}
EXPORT_SYMBOL(try_to_free_buffers);
-void block_sync_page(struct page *page)
-{
- struct address_space *mapping;
-
- smp_mb();
- mapping = page_mapping(page);
- if (mapping)
- blk_run_backing_dev(mapping->backing_dev_info, page);
-}
-
/*
* There are no bdflush tunables left. But distributions are
* still running obsolete flush daemons, so we terminate them here.
*
* Use of bdflush() is deprecated and will be removed in a future kernel.
- * The `pdflush' kernel threads fully replace bdflush daemons and this call.
+ * The `flush-X' kernel threads fully replace bdflush daemons and this call.
*/
SYSCALL_DEFINE2(bdflush, int, func, long, data)
{
/*
* Buffer-head allocation
*/
-static struct kmem_cache *bh_cachep;
+static struct kmem_cache *bh_cachep __read_mostly;
/*
* Once the number of bh's in the machine exceeds this level, we start
* stripping them in writeback.
*/
-static int max_buffer_heads;
+static unsigned long max_buffer_heads;
int buffer_heads_over_limit;
int i;
int tot = 0;
- if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
+ if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
return;
- __get_cpu_var(bh_accounting).ratelimit = 0;
+ __this_cpu_write(bh_accounting.ratelimit, 0);
for_each_online_cpu(i)
tot += per_cpu(bh_accounting, i).nr;
buffer_heads_over_limit = (tot > max_buffer_heads);
}
-
+
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
{
- struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
+ struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
- get_cpu_var(bh_accounting).nr++;
+ preempt_disable();
+ __this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
- put_cpu_var(bh_accounting);
+ preempt_enable();
}
return ret;
}
{
BUG_ON(!list_empty(&bh->b_assoc_buffers));
kmem_cache_free(bh_cachep, bh);
- get_cpu_var(bh_accounting).nr--;
+ preempt_disable();
+ __this_cpu_dec(bh_accounting.nr);
recalc_bh_state();
- put_cpu_var(bh_accounting);
+ preempt_enable();
}
EXPORT_SYMBOL(free_buffer_head);
brelse(b->bhs[i]);
b->bhs[i] = NULL;
}
- get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+ this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
per_cpu(bh_accounting, cpu).nr = 0;
- put_cpu_var(bh_accounting);
}
static int buffer_cpu_notify(struct notifier_block *self,
}
EXPORT_SYMBOL(bh_submit_read);
-static void
-init_buffer_head(void *data)
-{
- struct buffer_head *bh = data;
-
- memset(bh, 0, sizeof(*bh));
- INIT_LIST_HEAD(&bh->b_assoc_buffers);
-}
-
void __init buffer_init(void)
{
- int nrpages;
+ unsigned long nrpages;
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head), 0,
(SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
SLAB_MEM_SPREAD),
- init_buffer_head);
+ NULL);
/*
* Limit the bh occupancy to 10% of ZONE_NORMAL
max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
hotcpu_notifier(buffer_cpu_notify, 0);
}
-
-EXPORT_SYMBOL(__bforget);
-EXPORT_SYMBOL(__brelse);
-EXPORT_SYMBOL(__wait_on_buffer);
-EXPORT_SYMBOL(block_commit_write);
-EXPORT_SYMBOL(block_prepare_write);
-EXPORT_SYMBOL(block_page_mkwrite);
-EXPORT_SYMBOL(block_read_full_page);
-EXPORT_SYMBOL(block_sync_page);
-EXPORT_SYMBOL(block_truncate_page);
-EXPORT_SYMBOL(block_write_full_page);
-EXPORT_SYMBOL(cont_write_begin);
-EXPORT_SYMBOL(end_buffer_read_sync);
-EXPORT_SYMBOL(end_buffer_write_sync);
-EXPORT_SYMBOL(file_fsync);
-EXPORT_SYMBOL(generic_block_bmap);
-EXPORT_SYMBOL(generic_cont_expand_simple);
-EXPORT_SYMBOL(init_buffer);
-EXPORT_SYMBOL(invalidate_bdev);
-EXPORT_SYMBOL(ll_rw_block);
-EXPORT_SYMBOL(mark_buffer_dirty);
-EXPORT_SYMBOL(submit_bh);
-EXPORT_SYMBOL(sync_dirty_buffer);
-EXPORT_SYMBOL(unlock_buffer);