]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - fs/buffer.c
drm/radeon/kms: add hw_i2c module option
[linux-2.6.git] / fs / buffer.c
index a2fd743d97cb85380b5140e5d84ba9c50c383b84..c9c266db0624d73efaa487f664006d1bb862151b 100644 (file)
@@ -52,6 +52,7 @@ init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
        bh->b_end_io = handler;
        bh->b_private = private;
 }
+EXPORT_SYMBOL(init_buffer);
 
 static int sync_buffer(void *word)
 {
@@ -80,6 +81,7 @@ void unlock_buffer(struct buffer_head *bh)
        smp_mb__after_clear_bit();
        wake_up_bit(&bh->b_state, BH_Lock);
 }
+EXPORT_SYMBOL(unlock_buffer);
 
 /*
  * Block until a buffer comes unlocked.  This doesn't stop it
@@ -90,6 +92,7 @@ void __wait_on_buffer(struct buffer_head * bh)
 {
        wait_on_bit(&bh->b_state, BH_Lock, sync_buffer, TASK_UNINTERRUPTIBLE);
 }
+EXPORT_SYMBOL(__wait_on_buffer);
 
 static void
 __clear_page_buffers(struct page *page)
@@ -144,6 +147,7 @@ void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
        __end_buffer_read_notouch(bh, uptodate);
        put_bh(bh);
 }
+EXPORT_SYMBOL(end_buffer_read_sync);
 
 void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
 {
@@ -164,6 +168,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
        unlock_buffer(bh);
        put_bh(bh);
 }
+EXPORT_SYMBOL(end_buffer_write_sync);
 
 /*
  * Various filesystems appear to want __find_get_block to be non-blocking.
@@ -199,13 +204,13 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
        head = page_buffers(page);
        bh = head;
        do {
-               if (bh->b_blocknr == block) {
+               if (!buffer_mapped(bh))
+                       all_mapped = 0;
+               else if (bh->b_blocknr == block) {
                        ret = bh;
                        get_bh(bh);
                        goto out_unlock;
                }
-               if (!buffer_mapped(bh))
-                       all_mapped = 0;
                bh = bh->b_this_page;
        } while (bh != head);
 
@@ -272,16 +277,17 @@ void invalidate_bdev(struct block_device *bdev)
        invalidate_bh_lrus();
        invalidate_mapping_pages(mapping, 0, -1);
 }
+EXPORT_SYMBOL(invalidate_bdev);
 
 /*
- * Kick pdflush then try to free up some ZONE_NORMAL memory.
+ * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
  */
 static void free_more_memory(void)
 {
        struct zone *zone;
        int nid;
 
-       wakeup_pdflush(1024);
+       wakeup_flusher_threads(1024);
        yield();
 
        for_each_online_node(nid) {
@@ -290,7 +296,7 @@ static void free_more_memory(void)
                                                &zone);
                if (zone)
                        try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
-                                               GFP_NOFS);
+                                               GFP_NOFS, NULL);
        }
 }
 
@@ -360,7 +366,7 @@ still_busy:
  * Completion handler for block_write_full_page() - pages which are unlocked
  * during I/O, and which have PageWriteback cleared upon I/O completion.
  */
-static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+void end_buffer_async_write(struct buffer_head *bh, int uptodate)
 {
        char b[BDEVNAME_SIZE];
        unsigned long flags;
@@ -410,6 +416,7 @@ still_busy:
        local_irq_restore(flags);
        return;
 }
+EXPORT_SYMBOL(end_buffer_async_write);
 
 /*
  * If a page's buffers are under async readin (end_buffer_async_read
@@ -438,11 +445,17 @@ static void mark_buffer_async_read(struct buffer_head *bh)
        set_buffer_async_read(bh);
 }
 
-void mark_buffer_async_write(struct buffer_head *bh)
+static void mark_buffer_async_write_endio(struct buffer_head *bh,
+                                         bh_end_io_t *handler)
 {
-       bh->b_end_io = end_buffer_async_write;
+       bh->b_end_io = handler;
        set_buffer_async_write(bh);
 }
+
+void mark_buffer_async_write(struct buffer_head *bh)
+{
+       mark_buffer_async_write_endio(bh, end_buffer_async_write);
+}
 EXPORT_SYMBOL(mark_buffer_async_write);
 
 
@@ -547,6 +560,46 @@ repeat:
        return err;
 }
 
+static void do_thaw_all(struct work_struct *work)
+{
+       struct super_block *sb;
+       char b[BDEVNAME_SIZE];
+
+       spin_lock(&sb_lock);
+restart:
+       list_for_each_entry(sb, &super_blocks, s_list) {
+               sb->s_count++;
+               spin_unlock(&sb_lock);
+               down_read(&sb->s_umount);
+               while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
+                       printk(KERN_WARNING "Emergency Thaw on %s\n",
+                              bdevname(sb->s_bdev, b));
+               up_read(&sb->s_umount);
+               spin_lock(&sb_lock);
+               if (__put_super_and_need_restart(sb))
+                       goto restart;
+       }
+       spin_unlock(&sb_lock);
+       kfree(work);
+       printk(KERN_WARNING "Emergency Thaw complete\n");
+}
+
+/**
+ * emergency_thaw_all -- forcibly thaw every frozen filesystem
+ *
+ * Used for emergency unfreeze of all filesystems via SysRq
+ */
+void emergency_thaw_all(void)
+{
+       struct work_struct *work;
+
+       work = kmalloc(sizeof(*work), GFP_ATOMIC);
+       if (work) {
+               INIT_WORK(work, do_thaw_all);
+               schedule_work(work);
+       }
+}
+
 /**
  * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
  * @mapping: the mapping which wants those buffers written
@@ -621,14 +674,7 @@ static void __set_page_dirty(struct page *page,
        spin_lock_irq(&mapping->tree_lock);
        if (page->mapping) {    /* Race with truncate? */
                WARN_ON_ONCE(warn && !PageUptodate(page));
-
-               if (mapping_cap_account_dirty(mapping)) {
-                       __inc_zone_page_state(page, NR_FILE_DIRTY);
-                       __inc_bdi_stat(mapping->backing_dev_info,
-                                       BDI_RECLAIMABLE);
-                       task_dirty_inc(current);
-                       task_io_account_write(PAGE_CACHE_SIZE);
-               }
+               account_page_dirtied(page, mapping);
                radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
        }
@@ -711,7 +757,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
 {
        struct buffer_head *bh;
        struct list_head tmp;
-       struct address_space *mapping;
+       struct address_space *mapping, *prev_mapping = NULL;
        int err = 0, err2;
 
        INIT_LIST_HEAD(&tmp);
@@ -736,7 +782,18 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
                                 * contents - it is a noop if I/O is still in
                                 * flight on potentially older contents.
                                 */
-                               ll_rw_block(SWRITE_SYNC, 1, &bh);
+                               ll_rw_block(SWRITE_SYNC_PLUG, 1, &bh);
+
+                               /*
+                                * Kick off IO for the previous mapping. Note
+                                * that we will not run the very last mapping,
+                                * wait_on_buffer() will do that for us
+                                * through sync_buffer().
+                                */
+                               if (prev_mapping && prev_mapping != mapping)
+                                       blk_run_address_space(prev_mapping);
+                               prev_mapping = mapping;
+
                                brelse(bh);
                                spin_lock(lock);
                        }
@@ -1035,12 +1092,12 @@ static struct buffer_head *
 __getblk_slow(struct block_device *bdev, sector_t block, int size)
 {
        /* Size must be multiple of hard sectorsize */
-       if (unlikely(size & (bdev_hardsect_size(bdev)-1) ||
+       if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
                        (size < 512 || size > PAGE_SIZE))) {
                printk(KERN_ERR "getblk(): invalid block size %d requested\n",
                                        size);
-               printk(KERN_ERR "hardsect size: %d\n",
-                                       bdev_hardsect_size(bdev));
+               printk(KERN_ERR "logical block size: %d\n",
+                                       bdev_logical_block_size(bdev));
 
                dump_stack();
                return NULL;
@@ -1115,10 +1172,14 @@ void mark_buffer_dirty(struct buffer_head *bh)
 
        if (!test_set_buffer_dirty(bh)) {
                struct page *page = bh->b_page;
-               if (!TestSetPageDirty(page))
-                       __set_page_dirty(page, page_mapping(page), 0);
+               if (!TestSetPageDirty(page)) {
+                       struct address_space *mapping = page_mapping(page);
+                       if (mapping)
+                               __set_page_dirty(page, mapping, 0);
+               }
        }
 }
+EXPORT_SYMBOL(mark_buffer_dirty);
 
 /*
  * Decrement a buffer_head's reference count.  If all buffers against a page
@@ -1135,6 +1196,7 @@ void __brelse(struct buffer_head * buf)
        }
        WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
 }
+EXPORT_SYMBOL(__brelse);
 
 /*
  * bforget() is like brelse(), except it discards any
@@ -1153,6 +1215,7 @@ void __bforget(struct buffer_head *bh)
        }
        __brelse(bh);
 }
+EXPORT_SYMBOL(__bforget);
 
 static struct buffer_head *__bread_slow(struct buffer_head *bh)
 {
@@ -1559,9 +1622,20 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
  * locked buffer.   This only can happen if someone has written the buffer
  * directly, with submit_bh().  At the address_space level PageWriteback
  * prevents this contention from occurring.
+ *
+ * If block_write_full_page() is called with wbc->sync_mode ==
+ * WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this
+ * causes the writes to be flagged as synchronous writes, but the
+ * block device queue will NOT be unplugged, since usually many pages
+ * will be pushed to the out before the higher-level caller actually
+ * waits for the writes to be completed.  The various wait functions,
+ * such as wait_on_writeback_range() will ultimately call sync_page()
+ * which will ultimately call blk_run_backing_dev(), which will end up
+ * unplugging the device queue.
  */
 static int __block_write_full_page(struct inode *inode, struct page *page,
-                       get_block_t *get_block, struct writeback_control *wbc)
+                       get_block_t *get_block, struct writeback_control *wbc,
+                       bh_end_io_t *handler)
 {
        int err;
        sector_t block;
@@ -1569,6 +1643,8 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        struct buffer_head *bh, *head;
        const unsigned blocksize = 1 << inode->i_blkbits;
        int nr_underway = 0;
+       int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
+                       WRITE_SYNC_PLUG : WRITE);
 
        BUG_ON(!PageLocked(page));
 
@@ -1633,9 +1709,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                /*
                 * If it's a fully non-blocking write attempt and we cannot
                 * lock the buffer then redirty the page.  Note that this can
-                * potentially cause a busy-wait loop from pdflush and kswapd
-                * activity, but those code paths have their own higher-level
-                * throttling.
+                * potentially cause a busy-wait loop from writeback threads
+                * and kswapd activity, but those code paths have their own
+                * higher-level throttling.
                 */
                if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
                        lock_buffer(bh);
@@ -1644,7 +1720,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
                        continue;
                }
                if (test_clear_buffer_dirty(bh)) {
-                       mark_buffer_async_write(bh);
+                       mark_buffer_async_write_endio(bh, handler);
                } else {
                        unlock_buffer(bh);
                }
@@ -1660,7 +1736,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
        do {
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
-                       submit_bh(WRITE, bh);
+                       submit_bh(write_op, bh);
                        nr_underway++;
                }
                bh = next;
@@ -1697,7 +1773,7 @@ recover:
                if (buffer_mapped(bh) && buffer_dirty(bh) &&
                    !buffer_delay(bh)) {
                        lock_buffer(bh);
-                       mark_buffer_async_write(bh);
+                       mark_buffer_async_write_endio(bh, handler);
                } else {
                        /*
                         * The buffer may have been set dirty during
@@ -1714,7 +1790,7 @@ recover:
                struct buffer_head *next = bh->b_this_page;
                if (buffer_async_write(bh)) {
                        clear_buffer_dirty(bh);
-                       submit_bh(WRITE, bh);
+                       submit_bh(write_op, bh);
                        nr_underway++;
                }
                bh = next;
@@ -2152,6 +2228,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
        }
        return 0;
 }
+EXPORT_SYMBOL(block_read_full_page);
 
 /* utility function for filesystems that need to do work on expanding
  * truncates.  Uses filesystem pagecache writes to allow the filesystem to
@@ -2162,16 +2239,10 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
        void *fsdata;
-       unsigned long limit;
        int err;
 
-       err = -EFBIG;
-        limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
-       if (limit != RLIM_INFINITY && size > (loff_t)limit) {
-               send_sig(SIGXFSZ, current, 0);
-               goto out;
-       }
-       if (size > inode->i_sb->s_maxbytes)
+       err = inode_newsize_ok(inode, size);
+       if (err)
                goto out;
 
        err = pagecache_write_begin(NULL, mapping, size, 0,
@@ -2186,6 +2257,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
 out:
        return err;
 }
+EXPORT_SYMBOL(generic_cont_expand_simple);
 
 static int cont_expand_zero(struct file *file, struct address_space *mapping,
                            loff_t pos, loff_t *bytes)
@@ -2286,6 +2358,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
 out:
        return err;
 }
+EXPORT_SYMBOL(cont_write_begin);
 
 int block_prepare_write(struct page *page, unsigned from, unsigned to,
                        get_block_t *get_block)
@@ -2296,6 +2369,7 @@ int block_prepare_write(struct page *page, unsigned from, unsigned to,
                ClearPageUptodate(page);
        return err;
 }
+EXPORT_SYMBOL(block_prepare_write);
 
 int block_commit_write(struct page *page, unsigned from, unsigned to)
 {
@@ -2303,6 +2377,7 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
        __block_commit_write(inode,page,from,to);
        return 0;
 }
+EXPORT_SYMBOL(block_commit_write);
 
 /*
  * block_page_mkwrite() is not allowed to change the file size as it gets
@@ -2320,20 +2395,22 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
  * unlock the page.
  */
 int
-block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
+block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                   get_block_t get_block)
 {
+       struct page *page = vmf->page;
        struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        unsigned long end;
        loff_t size;
-       int ret = -EINVAL;
+       int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
 
        lock_page(page);
        size = i_size_read(inode);
        if ((page->mapping != inode->i_mapping) ||
            (page_offset(page) > size)) {
                /* page got truncated out from underneath us */
-               goto out_unlock;
+               unlock_page(page);
+               goto out;
        }
 
        /* page is wholly or partially inside EOF */
@@ -2346,10 +2423,19 @@ block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
        if (!ret)
                ret = block_commit_write(page, 0, end);
 
-out_unlock:
-       unlock_page(page);
+       if (unlikely(ret)) {
+               unlock_page(page);
+               if (ret == -ENOMEM)
+                       ret = VM_FAULT_OOM;
+               else /* -ENOSPC, -EIO, etc */
+                       ret = VM_FAULT_SIGBUS;
+       } else
+               ret = VM_FAULT_LOCKED;
+
+out:
        return ret;
 }
+EXPORT_SYMBOL(block_page_mkwrite);
 
 /*
  * nobh_write_begin()'s prereads are special: the buffer_heads are freed
@@ -2615,7 +2701,8 @@ int nobh_writepage(struct page *page, get_block_t *get_block,
 out:
        ret = mpage_writepage(page, get_block, wbc);
        if (ret == -EAGAIN)
-               ret = __block_write_full_page(inode, page, get_block, wbc);
+               ret = __block_write_full_page(inode, page, get_block, wbc,
+                                             end_buffer_async_write);
        return ret;
 }
 EXPORT_SYMBOL(nobh_writepage);
@@ -2662,6 +2749,8 @@ has_buffers:
                pos += blocksize;
        }
 
+       map_bh.b_size = blocksize;
+       map_bh.b_state = 0;
        err = get_block(inode, iblock, &map_bh, 0);
        if (err)
                goto unlock;
@@ -2770,12 +2859,14 @@ unlock:
 out:
        return err;
 }
+EXPORT_SYMBOL(block_truncate_page);
 
 /*
  * The generic ->writepage function for buffer-backed address_spaces
+ * this form passes in the end_io handler used to finish the IO.
  */
-int block_write_full_page(struct page *page, get_block_t *get_block,
-                       struct writeback_control *wbc)
+int block_write_full_page_endio(struct page *page, get_block_t *get_block,
+                       struct writeback_control *wbc, bh_end_io_t *handler)
 {
        struct inode * const inode = page->mapping->host;
        loff_t i_size = i_size_read(inode);
@@ -2784,7 +2875,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
 
        /* Is the page fully inside i_size? */
        if (page->index < end_index)
-               return __block_write_full_page(inode, page, get_block, wbc);
+               return __block_write_full_page(inode, page, get_block, wbc,
+                                              handler);
 
        /* Is the page fully outside i_size? (truncate in progress) */
        offset = i_size & (PAGE_CACHE_SIZE-1);
@@ -2801,14 +2893,26 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
 
        /*
         * The page straddles i_size.  It must be zeroed out on each and every
-        * writepage invokation because it may be mmapped.  "A file is mapped
+        * writepage invocation because it may be mmapped.  "A file is mapped
         * in multiples of the page size.  For a file that is not a multiple of
         * the  page size, the remaining memory is zeroed when mapped, and
         * writes to that region are not written out to the file."
         */
        zero_user_segment(page, offset, PAGE_CACHE_SIZE);
-       return __block_write_full_page(inode, page, get_block, wbc);
+       return __block_write_full_page(inode, page, get_block, wbc, handler);
+}
+EXPORT_SYMBOL(block_write_full_page_endio);
+
+/*
+ * The generic ->writepage function for buffer-backed address_spaces
+ */
+int block_write_full_page(struct page *page, get_block_t *get_block,
+                       struct writeback_control *wbc)
+{
+       return block_write_full_page_endio(page, get_block, wbc,
+                                          end_buffer_async_write);
 }
+EXPORT_SYMBOL(block_write_full_page);
 
 sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
                            get_block_t *get_block)
@@ -2821,6 +2925,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
        get_block(inode, block, &tmp, 0);
        return tmp.b_blocknr;
 }
+EXPORT_SYMBOL(generic_block_bmap);
 
 static void end_bio_bh_io_sync(struct bio *bio, int err)
 {
@@ -2846,6 +2951,8 @@ int submit_bh(int rw, struct buffer_head * bh)
        BUG_ON(!buffer_locked(bh));
        BUG_ON(!buffer_mapped(bh));
        BUG_ON(!bh->b_end_io);
+       BUG_ON(buffer_delay(bh));
+       BUG_ON(buffer_unwritten(bh));
 
        /*
         * Mask in barrier bit for a write (could be either a WRITE or a
@@ -2888,6 +2995,7 @@ int submit_bh(int rw, struct buffer_head * bh)
        bio_put(bio);
        return ret;
 }
+EXPORT_SYMBOL(submit_bh);
 
 /**
  * ll_rw_block: low-level access to block devices (DEPRECATED)
@@ -2922,12 +3030,13 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
        for (i = 0; i < nr; i++) {
                struct buffer_head *bh = bhs[i];
 
-               if (rw == SWRITE || rw == SWRITE_SYNC)
+               if (rw == SWRITE || rw == SWRITE_SYNC || rw == SWRITE_SYNC_PLUG)
                        lock_buffer(bh);
                else if (!trylock_buffer(bh))
                        continue;
 
-               if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC) {
+               if (rw == WRITE || rw == SWRITE || rw == SWRITE_SYNC ||
+                   rw == SWRITE_SYNC_PLUG) {
                        if (test_clear_buffer_dirty(bh)) {
                                bh->b_end_io = end_buffer_write_sync;
                                get_bh(bh);
@@ -2948,6 +3057,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
                unlock_buffer(bh);
        }
 }
+EXPORT_SYMBOL(ll_rw_block);
 
 /*
  * For a data-integrity writeout, we need to wait upon any in-progress I/O
@@ -2963,7 +3073,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
        if (test_clear_buffer_dirty(bh)) {
                get_bh(bh);
                bh->b_end_io = end_buffer_write_sync;
-               ret = submit_bh(WRITE, bh);
+               ret = submit_bh(WRITE_SYNC, bh);
                wait_on_buffer(bh);
                if (buffer_eopnotsupp(bh)) {
                        clear_buffer_eopnotsupp(bh);
@@ -2976,6 +3086,7 @@ int sync_dirty_buffer(struct buffer_head *bh)
        }
        return ret;
 }
+EXPORT_SYMBOL(sync_dirty_buffer);
 
 /*
  * try_to_free_buffers() checks if all the buffers on this particular page
@@ -3090,13 +3201,14 @@ void block_sync_page(struct page *page)
        if (mapping)
                blk_run_backing_dev(mapping->backing_dev_info, page);
 }
+EXPORT_SYMBOL(block_sync_page);
 
 /*
  * There are no bdflush tunables left.  But distributions are
  * still running obsolete flush daemons, so we terminate them here.
  *
  * Use of bdflush() is deprecated and will be removed in a future kernel.
- * The `pdflush' kernel threads fully replace bdflush daemons and this call.
+ * The `flush-X' kernel threads fully replace bdflush daemons and this call.
  */
 SYSCALL_DEFINE2(bdflush, int, func, long, data)
 {
@@ -3153,7 +3265,7 @@ static void recalc_bh_state(void)
        
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
-       struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
+       struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
        if (ret) {
                INIT_LIST_HEAD(&ret->b_assoc_buffers);
                get_cpu_var(bh_accounting).nr++;
@@ -3240,15 +3352,6 @@ int bh_submit_read(struct buffer_head *bh)
 }
 EXPORT_SYMBOL(bh_submit_read);
 
-static void
-init_buffer_head(void *data)
-{
-       struct buffer_head *bh = data;
-
-       memset(bh, 0, sizeof(*bh));
-       INIT_LIST_HEAD(&bh->b_assoc_buffers);
-}
-
 void __init buffer_init(void)
 {
        int nrpages;
@@ -3257,7 +3360,7 @@ void __init buffer_init(void)
                        sizeof(struct buffer_head), 0,
                                (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
                                SLAB_MEM_SPREAD),
-                               init_buffer_head);
+                               NULL);
 
        /*
         * Limit the bh occupancy to 10% of ZONE_NORMAL
@@ -3266,28 +3369,3 @@ void __init buffer_init(void)
        max_buffer_heads = nrpages * (PAGE_SIZE / sizeof(struct buffer_head));
        hotcpu_notifier(buffer_cpu_notify, 0);
 }
-
-EXPORT_SYMBOL(__bforget);
-EXPORT_SYMBOL(__brelse);
-EXPORT_SYMBOL(__wait_on_buffer);
-EXPORT_SYMBOL(block_commit_write);
-EXPORT_SYMBOL(block_prepare_write);
-EXPORT_SYMBOL(block_page_mkwrite);
-EXPORT_SYMBOL(block_read_full_page);
-EXPORT_SYMBOL(block_sync_page);
-EXPORT_SYMBOL(block_truncate_page);
-EXPORT_SYMBOL(block_write_full_page);
-EXPORT_SYMBOL(cont_write_begin);
-EXPORT_SYMBOL(end_buffer_read_sync);
-EXPORT_SYMBOL(end_buffer_write_sync);
-EXPORT_SYMBOL(file_fsync);
-EXPORT_SYMBOL(fsync_bdev);
-EXPORT_SYMBOL(generic_block_bmap);
-EXPORT_SYMBOL(generic_cont_expand_simple);
-EXPORT_SYMBOL(init_buffer);
-EXPORT_SYMBOL(invalidate_bdev);
-EXPORT_SYMBOL(ll_rw_block);
-EXPORT_SYMBOL(mark_buffer_dirty);
-EXPORT_SYMBOL(submit_bh);
-EXPORT_SYMBOL(sync_dirty_buffer);
-EXPORT_SYMBOL(unlock_buffer);