Merge git://git.kernel.org/pub/scm/linux/kernel/git/bunk/trivial
Linus Torvalds [Sun, 26 Mar 2006 17:41:18 +0000 (09:41 -0800)]
* git://git.kernel.org/pub/scm/linux/kernel/git/bunk/trivial:
  drivers/char/ftape/lowlevel/fdc-io.c: Correct a comment
  Kconfig help: MTD_JEDECPROBE already supports Intel
  Remove ugly debugging stuff
  do_mounts.c: Minor ROOT_DEV comment cleanup
  BUG_ON() Conversion in drivers/s390/block/dasd_devmap.c
  BUG_ON() Conversion in mm/mempool.c
  BUG_ON() Conversion in mm/memory.c
  BUG_ON() Conversion in kernel/fork.c
  BUG_ON() Conversion in ipc/sem.c
  BUG_ON() Conversion in fs/ext2/
  BUG_ON() Conversion in fs/hfs/
  BUG_ON() Conversion in fs/dcache.c
  BUG_ON() Conversion in fs/buffer.c
  BUG_ON() Conversion in input/serio/hp_sdc_mlc.c
  BUG_ON() Conversion in md/dm-table.c
  BUG_ON() Conversion in md/dm-path-selector.c
  BUG_ON() Conversion in drivers/isdn
  BUG_ON() Conversion in drivers/char
  BUG_ON() Conversion in drivers/mtd/

1  2 
fs/buffer.c
fs/dcache.c
ipc/sem.c
kernel/fork.c
mm/memory.c
mm/mempool.c

diff --combined fs/buffer.c
@@@ -426,10 -426,8 +426,10 @@@ __find_get_block_slow(struct block_devi
        if (all_mapped) {
                printk("__find_get_block_slow() failed. "
                        "block=%llu, b_blocknr=%llu\n",
 -                      (unsigned long long)block, (unsigned long long)bh->b_blocknr);
 -              printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size);
 +                      (unsigned long long)block,
 +                      (unsigned long long)bh->b_blocknr);
 +              printk("b_state=0x%08lx, b_size=%zu\n",
 +                      bh->b_state, bh->b_size);
                printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
        }
  out_unlock:
@@@ -798,8 -796,7 +798,7 @@@ void mark_buffer_dirty_inode(struct buf
        if (!mapping->assoc_mapping) {
                mapping->assoc_mapping = buffer_mapping;
        } else {
-               if (mapping->assoc_mapping != buffer_mapping)
-                       BUG();
+               BUG_ON(mapping->assoc_mapping != buffer_mapping);
        }
        if (list_empty(&bh->b_assoc_buffers)) {
                spin_lock(&buffer_mapping->private_lock);
@@@ -1116,8 -1113,7 +1115,7 @@@ grow_dev_page(struct block_device *bdev
        if (!page)
                return NULL;
  
-       if (!PageLocked(page))
-               BUG();
+       BUG_ON(!PageLocked(page));
  
        if (page_has_buffers(page)) {
                bh = page_buffers(page);
@@@ -1524,8 -1520,7 +1522,7 @@@ void set_bh_page(struct buffer_head *bh
                struct page *page, unsigned long offset)
  {
        bh->b_page = page;
-       if (offset >= PAGE_SIZE)
-               BUG();
+       BUG_ON(offset >= PAGE_SIZE);
        if (PageHighMem(page))
                /*
                 * This catches illegal uses and preserves the offset:
@@@ -1595,10 -1590,11 +1592,10 @@@ EXPORT_SYMBOL(try_to_release_page)
   * point.  Because the caller is about to free (and possibly reuse) those
   * blocks on-disk.
   */
 -int block_invalidatepage(struct page *page, unsigned long offset)
 +void block_invalidatepage(struct page *page, unsigned long offset)
  {
        struct buffer_head *head, *bh, *next;
        unsigned int curr_off = 0;
 -      int ret = 1;
  
        BUG_ON(!PageLocked(page));
        if (!page_has_buffers(page))
         * so real IO is not possible anymore.
         */
        if (offset == 0)
 -              ret = try_to_release_page(page, 0);
 +              try_to_release_page(page, 0);
  out:
 -      return ret;
 +      return;
  }
  EXPORT_SYMBOL(block_invalidatepage);
  
 -int do_invalidatepage(struct page *page, unsigned long offset)
 +void do_invalidatepage(struct page *page, unsigned long offset)
  {
 -      int (*invalidatepage)(struct page *, unsigned long);
 -      invalidatepage = page->mapping->a_ops->invalidatepage;
 -      if (invalidatepage == NULL)
 -              invalidatepage = block_invalidatepage;
 -      return (*invalidatepage)(page, offset);
 +      void (*invalidatepage)(struct page *, unsigned long);
 +      invalidatepage = page->mapping->a_ops->invalidatepage ? :
 +              block_invalidatepage;
 +      (*invalidatepage)(page, offset);
  }
  
  /*
@@@ -1738,7 -1735,6 +1735,7 @@@ static int __block_write_full_page(stru
        sector_t block;
        sector_t last_block;
        struct buffer_head *bh, *head;
 +      const unsigned blocksize = 1 << inode->i_blkbits;
        int nr_underway = 0;
  
        BUG_ON(!PageLocked(page));
        last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
  
        if (!page_has_buffers(page)) {
 -              create_empty_buffers(page, 1 << inode->i_blkbits,
 +              create_empty_buffers(page, blocksize,
                                        (1 << BH_Dirty)|(1 << BH_Uptodate));
        }
  
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
                } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
 +                      WARN_ON(bh->b_size != blocksize);
                        err = get_block(inode, block, bh, 1);
                        if (err)
                                goto recover;
@@@ -1935,7 -1930,6 +1932,7 @@@ static int __block_prepare_write(struc
                if (buffer_new(bh))
                        clear_buffer_new(bh);
                if (!buffer_mapped(bh)) {
 +                      WARN_ON(bh->b_size != blocksize);
                        err = get_block(inode, block, bh, 1);
                        if (err)
                                break;
@@@ -2091,7 -2085,6 +2088,7 @@@ int block_read_full_page(struct page *p
  
                        fully_mapped = 0;
                        if (iblock < lblock) {
 +                              WARN_ON(bh->b_size != blocksize);
                                err = get_block(inode, iblock, bh, 0);
                                if (err)
                                        SetPageError(page);
@@@ -2413,7 -2406,6 +2410,7 @@@ int nobh_prepare_write(struct page *pag
                create = 1;
                if (block_start >= to)
                        create = 0;
 +              map_bh.b_size = blocksize;
                ret = get_block(inode, block_in_file + block_in_page,
                                        &map_bh, create);
                if (ret)
@@@ -2674,7 -2666,6 +2671,7 @@@ int block_truncate_page(struct address_
  
        err = 0;
        if (!buffer_mapped(bh)) {
 +              WARN_ON(bh->b_size != blocksize);
                err = get_block(inode, iblock, bh, 0);
                if (err)
                        goto unlock;
@@@ -2761,7 -2752,6 +2758,7 @@@ sector_t generic_block_bmap(struct addr
        struct inode *inode = mapping->host;
        tmp.b_state = 0;
        tmp.b_blocknr = 0;
 +      tmp.b_size = 1 << inode->i_blkbits;
        get_block(inode, block, &tmp, 0);
        return tmp.b_blocknr;
  }
@@@ -3014,7 -3004,7 +3011,7 @@@ out
  }
  EXPORT_SYMBOL(try_to_free_buffers);
  
 -int block_sync_page(struct page *page)
 +void block_sync_page(struct page *page)
  {
        struct address_space *mapping;
  
        mapping = page_mapping(page);
        if (mapping)
                blk_run_backing_dev(mapping->backing_dev_info, page);
 -      return 0;
  }
  
  /*
diff --combined fs/dcache.c
@@@ -34,9 -34,8 +34,8 @@@
  #include <linux/swap.h>
  #include <linux/bootmem.h>
  
- /* #define DCACHE_DEBUG 1 */
  
 -int sysctl_vfs_cache_pressure = 100;
 +int sysctl_vfs_cache_pressure __read_mostly = 100;
  EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
  
   __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
@@@ -44,7 -43,7 +43,7 @@@ static seqlock_t rename_lock __cachelin
  
  EXPORT_SYMBOL(dcache_lock);
  
 -static kmem_cache_t *dentry_cache; 
 +static kmem_cache_t *dentry_cache __read_mostly;
  
  #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
  
@@@ -59,9 -58,9 +58,9 @@@
  #define D_HASHBITS     d_hash_shift
  #define D_HASHMASK     d_hash_mask
  
 -static unsigned int d_hash_mask;
 -static unsigned int d_hash_shift;
 -static struct hlist_head *dentry_hashtable;
 +static unsigned int d_hash_mask __read_mostly;
 +static unsigned int d_hash_shift __read_mostly;
 +static struct hlist_head *dentry_hashtable __read_mostly;
  static LIST_HEAD(dentry_unused);
  
  /* Statistics gathering. */
@@@ -603,10 -602,6 +602,6 @@@ resume
                 */
                if (!list_empty(&dentry->d_subdirs)) {
                        this_parent = dentry;
- #ifdef DCACHE_DEBUG
- printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n",
- dentry->d_parent->d_name.name, dentry->d_name.name, found);
- #endif
                        goto repeat;
                }
        }
        if (this_parent != parent) {
                next = this_parent->d_u.d_child.next;
                this_parent = this_parent->d_parent;
- #ifdef DCACHE_DEBUG
- printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
- this_parent->d_parent->d_name.name, this_parent->d_name.name, found);
- #endif
                goto resume;
        }
  out:
@@@ -798,7 -789,7 +789,7 @@@ struct dentry *d_alloc_name(struct dent
   
  void d_instantiate(struct dentry *entry, struct inode * inode)
  {
-       if (!list_empty(&entry->d_alias)) BUG();
+       BUG_ON(!list_empty(&entry->d_alias));
        spin_lock(&dcache_lock);
        if (inode)
                list_add(&entry->d_alias, &inode->i_dentry);
@@@ -1719,10 -1710,10 +1710,10 @@@ static void __init dcache_init(unsigne
  }
  
  /* SLAB cache for __getname() consumers */
 -kmem_cache_t *names_cachep;
 +kmem_cache_t *names_cachep __read_mostly;
  
  /* SLAB cache for file structures */
 -kmem_cache_t *filp_cachep;
 +kmem_cache_t *filp_cachep __read_mostly;
  
  EXPORT_SYMBOL(d_genocide);
  
diff --combined ipc/sem.c
+++ b/ipc/sem.c
@@@ -75,8 -75,6 +75,8 @@@
  #include <linux/audit.h>
  #include <linux/capability.h>
  #include <linux/seq_file.h>
 +#include <linux/mutex.h>
 +
  #include <asm/uaccess.h>
  #include "util.h"
  
@@@ -141,7 -139,7 +141,7 @@@ void __init sem_init (void
   *    * if it's IN_WAKEUP, then it must wait until the value changes
   *    * if it's not -EINTR, then the operation was completed by
   *      update_queue. semtimedop can return queue.status without
 - *      performing any operation on the semaphore array.
 + *      performing any operation on the sem array.
   *    * otherwise it must acquire the spinlock and check what's up.
   *
   * The two-stage algorithm is necessary to protect against the following
@@@ -216,7 -214,7 +216,7 @@@ asmlinkage long sys_semget (key_t key, 
  
        if (nsems < 0 || nsems > sc_semmsl)
                return -EINVAL;
 -      down(&sem_ids.sem);
 +      mutex_lock(&sem_ids.mutex);
        
        if (key == IPC_PRIVATE) {
                err = newary(key, nsems, semflg);
                err = -EEXIST;
        } else {
                sma = sem_lock(id);
-               if(sma==NULL)
-                       BUG();
+               BUG_ON(sma==NULL);
                if (nsems > sma->sem_nsems)
                        err = -EINVAL;
                else if (ipcperms(&sma->sem_perm, semflg))
                sem_unlock(sma);
        }
  
 -      up(&sem_ids.sem);
 +      mutex_unlock(&sem_ids.mutex);
        return err;
  }
  
@@@ -439,8 -436,8 +438,8 @@@ static int count_semzcnt (struct sem_ar
        return semzcnt;
  }
  
 -/* Free a semaphore set. freeary() is called with sem_ids.sem down and
 - * the spinlock for this semaphore set hold. sem_ids.sem remains locked
 +/* Free a semaphore set. freeary() is called with sem_ids.mutex locked and
 + * the spinlock for this semaphore set hold. sem_ids.mutex remains locked
   * on exit.
   */
  static void freeary (struct sem_array *sma, int id)
@@@ -527,7 -524,7 +526,7 @@@ static int semctl_nolock(int semid, in
                seminfo.semmnu = SEMMNU;
                seminfo.semmap = SEMMAP;
                seminfo.semume = SEMUME;
 -              down(&sem_ids.sem);
 +              mutex_lock(&sem_ids.mutex);
                if (cmd == SEM_INFO) {
                        seminfo.semusz = sem_ids.in_use;
                        seminfo.semaem = used_sems;
                        seminfo.semaem = SEMAEM;
                }
                max_id = sem_ids.max_id;
 -              up(&sem_ids.sem);
 +              mutex_unlock(&sem_ids.mutex);
                if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) 
                        return -EFAULT;
                return (max_id < 0) ? 0: max_id;
@@@ -887,9 -884,9 +886,9 @@@ asmlinkage long sys_semctl (int semid, 
                return err;
        case IPC_RMID:
        case IPC_SET:
 -              down(&sem_ids.sem);
 +              mutex_lock(&sem_ids.mutex);
                err = semctl_down(semid,semnum,cmd,version,arg);
 -              up(&sem_ids.sem);
 +              mutex_unlock(&sem_ids.mutex);
                return err;
        default:
                return -EINVAL;
@@@ -1183,8 -1180,7 +1182,7 @@@ retry_undos
  
        sma = sem_lock(semid);
        if(sma==NULL) {
-               if(queue.prev != NULL)
-                       BUG();
+               BUG_ON(queue.prev != NULL);
                error = -EIDRM;
                goto out_free;
        }
@@@ -1301,9 -1297,9 +1299,9 @@@ found
                /* perform adjustments registered in u */
                nsems = sma->sem_nsems;
                for (i = 0; i < nsems; i++) {
 -                      struct sem * sem = &sma->sem_base[i];
 +                      struct sem * semaphore = &sma->sem_base[i];
                        if (u->semadj[i]) {
 -                              sem->semval += u->semadj[i];
 +                              semaphore->semval += u->semadj[i];
                                /*
                                 * Range checks of the new semaphore value,
                                 * not defined by sus:
                                 *
                                 *      Manfred <manfred@colorfullife.com>
                                 */
 -                              if (sem->semval < 0)
 -                                      sem->semval = 0;
 -                              if (sem->semval > SEMVMX)
 -                                      sem->semval = SEMVMX;
 -                              sem->sempid = current->tgid;
 +                              if (semaphore->semval < 0)
 +                                      semaphore->semval = 0;
 +                              if (semaphore->semval > SEMVMX)
 +                                      semaphore->semval = SEMVMX;
 +                              semaphore->sempid = current->tgid;
                        }
                }
                sma->sem_otime = get_seconds();
diff --combined kernel/fork.c
@@@ -769,8 -769,7 +769,7 @@@ int unshare_files(void
        struct files_struct *files  = current->files;
        int rc;
  
-       if(!files)
-               BUG();
+       BUG_ON(!files);
  
        /* This can race but the race causes us to copy when we don't
           need to and drop the copy */
@@@ -848,7 -847,7 +847,7 @@@ static inline int copy_signal(unsigned 
        hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
        sig->it_real_incr.tv64 = 0;
        sig->real_timer.function = it_real_fn;
 -      sig->real_timer.data = tsk;
 +      sig->tsk = tsk;
  
        sig->it_virt_expires = cputime_zero;
        sig->it_virt_incr = cputime_zero;
diff --combined mm/memory.c
@@@ -1071,8 -1071,6 +1071,8 @@@ int get_user_pages(struct task_struct *
                        }
                        if (pages) {
                                pages[i] = page;
 +
 +                              flush_anon_page(page, start);
                                flush_dcache_page(page);
                        }
                        if (vmas)
@@@ -2354,10 -2352,8 +2354,8 @@@ int make_pages_present(unsigned long ad
        if (!vma)
                return -1;
        write = (vma->vm_flags & VM_WRITE) != 0;
-       if (addr >= end)
-               BUG();
-       if (end > vma->vm_end)
-               BUG();
+       BUG_ON(addr >= end);
+       BUG_ON(end > vma->vm_end);
        len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
        ret = get_user_pages(current, current->mm, addr,
                        len, write, 0, NULL, NULL);
diff --combined mm/mempool.c
@@@ -183,8 -183,8 +183,8 @@@ EXPORT_SYMBOL(mempool_resize)
   */
  void mempool_destroy(mempool_t *pool)
  {
-       if (pool->curr_nr != pool->min_nr)
-               BUG();          /* There were outstanding elements */
+       /* Check for outstanding elements */
+       BUG_ON(pool->curr_nr != pool->min_nr);
        free_pool(pool);
  }
  EXPORT_SYMBOL(mempool_destroy);
@@@ -289,45 -289,3 +289,45 @@@ void mempool_free_slab(void *element, v
        kmem_cache_free(mem, element);
  }
  EXPORT_SYMBOL(mempool_free_slab);
 +
 +/*
 + * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
 + * specfied by pool_data
 + */
 +void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
 +{
 +      size_t size = (size_t)(long)pool_data;
 +      return kmalloc(size, gfp_mask);
 +}
 +EXPORT_SYMBOL(mempool_kmalloc);
 +
 +void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
 +{
 +      size_t size = (size_t) pool_data;
 +      return kzalloc(size, gfp_mask);
 +}
 +EXPORT_SYMBOL(mempool_kzalloc);
 +
 +void mempool_kfree(void *element, void *pool_data)
 +{
 +      kfree(element);
 +}
 +EXPORT_SYMBOL(mempool_kfree);
 +
 +/*
 + * A simple mempool-backed page allocator that allocates pages
 + * of the order specified by pool_data.
 + */
 +void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
 +{
 +      int order = (int)(long)pool_data;
 +      return alloc_pages(gfp_mask, order);
 +}
 +EXPORT_SYMBOL(mempool_alloc_pages);
 +
 +void mempool_free_pages(void *element, void *pool_data)
 +{
 +      int order = (int)(long)pool_data;
 +      __free_pages(element, order);
 +}
 +EXPORT_SYMBOL(mempool_free_pages);