Merge commit 'v2.6.30-rc6' into perfcounters/core
Ingo Molnar [Mon, 18 May 2009 05:37:44 +0000 (07:37 +0200)]
Merge reason: this branch was on an -rc4 base, merge it up to -rc6
              to get the latest upstream fixes.

Signed-off-by: Ingo Molnar <mingo@elte.hu>

1  2 
MAINTAINERS
arch/x86/kernel/cpu/common.c
drivers/char/sysrq.c
fs/exec.c
include/linux/syscalls.h
kernel/sched.c
kernel/sysctl.c
mm/mmap.c

diff --combined MAINTAINERS
@@@ -3434,11 -3434,10 +3434,10 @@@ L:   linuxppc-dev@ozlabs.or
  S:    Maintained
  
  LINUX FOR POWERPC EMBEDDED MPC5XXX
  P:    Grant Likely
  M:    grant.likely@secretlab.ca
  L:    linuxppc-dev@ozlabs.org
+ T:    git git://git.secretlab.ca/git/linux-2.6.git
  S:    Maintained
  
  LINUX FOR POWERPC EMBEDDED PPC4XX
@@@ -3456,6 -3455,7 +3455,7 @@@ P:      Grant Likel
  M:    grant.likely@secretlab.ca
  W:    http://wiki.secretlab.ca/index.php/Linux_on_Xilinx_Virtex
  L:    linuxppc-dev@ozlabs.org
+ T:    git git://git.secretlab.ca/git/linux-2.6.git
  S:    Maintained
  
  LINUX FOR POWERPC EMBEDDED PPC8XX
@@@ -4189,7 -4189,7 +4189,7 @@@ P:      Joel Becke
  M:    joel.becker@oracle.com
  L:    ocfs2-devel@oss.oracle.com (moderated for non-subscribers)
  W:    http://oss.oracle.com/projects/ocfs2/
- T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mfasheh/ocfs2.git
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git
  S:    Supported
  F:    Documentation/filesystems/ocfs2.txt
  F:    Documentation/filesystems/dlmfs.txt
@@@ -4375,16 -4375,6 +4375,16 @@@ S:    Maintaine
  F:    include/linux/delayacct.h
  F:    kernel/delayacct.c
  
 +PERFORMANCE COUNTER SUBSYSTEM
 +P:    Peter Zijlstra
 +M:    a.p.zijlstra@chello.nl
 +P:    Paul Mackerras
 +M:    paulus@samba.org
 +P:    Ingo Molnar
 +M:    mingo@elte.hu
 +L:    linux-kernel@vger.kernel.org
 +S:    Supported
 +
  PERSONALITY HANDLING
  P:    Christoph Hellwig
  M:    hch@infradead.org
@@@ -4531,6 -4521,19 +4531,19 @@@ M:    jim@jtan.co
  L:    cbe-oss-dev@ozlabs.org
  S:    Maintained
  
+ PTRACE SUPPORT
+ P:    Roland McGrath
+ M:    roland@redhat.com
+ P:    Oleg Nesterov
+ M:    oleg@redhat.com
+ L:    linux-kernel@vger.kernel.org
+ S:    Maintained
+ F:    include/asm-generic/syscall.h
+ F:    include/linux/ptrace.h
+ F:    include/linux/regset.h
+ F:    include/linux/tracehook.h
+ F:    kernel/ptrace.c
  PVRUSB2 VIDEO4LINUX DRIVER
  P:    Mike Isely
  M:    isely@pobox.com
@@@ -4676,13 -4679,13 +4689,13 @@@ F:   kernel/rcutorture.
  
  RDC R-321X SoC
  P:    Florian Fainelli
- M:    florian.fainelli@telecomint.eu
+ M:    florian@openwrt.org
  L:    linux-kernel@vger.kernel.org
  S:    Maintained
  
  RDC R6040 FAST ETHERNET DRIVER
  P:    Florian Fainelli
- M:    florian.fainelli@telecomint.eu
+ M:    florian@openwrt.org
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/r6040.c
@@@ -13,7 -13,6 +13,7 @@@
  #include <linux/io.h>
  
  #include <asm/stackprotector.h>
 +#include <asm/perf_counter.h>
  #include <asm/mmu_context.h>
  #include <asm/hypervisor.h>
  #include <asm/processor.h>
@@@ -855,7 -854,6 +855,7 @@@ void __init identify_boot_cpu(void
  #else
        vgetcpu_set_mode();
  #endif
 +      init_hw_perf_counters();
  }
  
  void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
@@@ -1205,6 -1203,8 +1205,8 @@@ void __cpuinit cpu_init(void
        load_TR_desc();
        load_LDT(&init_mm.context);
  
+       t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
  #ifdef CONFIG_DOUBLEFAULT
        /* Set up doublefault TSS pointer in the GDT */
        __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
diff --combined drivers/char/sysrq.c
@@@ -25,7 -25,6 +25,7 @@@
  #include <linux/kbd_kern.h>
  #include <linux/proc_fs.h>
  #include <linux/quotaops.h>
 +#include <linux/perf_counter.h>
  #include <linux/kernel.h>
  #include <linux/module.h>
  #include <linux/suspend.h>
@@@ -244,7 -243,6 +244,7 @@@ static void sysrq_handle_showregs(int k
        struct pt_regs *regs = get_irq_regs();
        if (regs)
                show_regs(regs);
 +      perf_counter_print_debug();
  }
  static struct sysrq_key_op sysrq_showregs_op = {
        .handler        = sysrq_handle_showregs,
@@@ -408,7 -406,7 +408,7 @@@ static struct sysrq_key_op *sysrq_key_t
        &sysrq_showlocks_op,            /* d */
        &sysrq_term_op,                 /* e */
        &sysrq_moom_op,                 /* f */
-       /* g: May be registered by ppc for kgdb */
+       /* g: May be registered for the kernel debugger */
        NULL,                           /* g */
        NULL,                           /* h - reserved for help */
        &sysrq_kill_op,                 /* i */
        &sysrq_sync_op,                 /* s */
        &sysrq_showstate_op,            /* t */
        &sysrq_mountro_op,              /* u */
-       /* v: May be registered at init time by SMP VOYAGER */
+       /* v: May be registered for frame buffer console restore */
        NULL,                           /* v */
        &sysrq_showstate_blocked_op,    /* w */
        /* x: May be registered on ppc/powerpc for xmon */
diff --combined fs/exec.c
+++ b/fs/exec.c
@@@ -33,7 -33,6 +33,7 @@@
  #include <linux/string.h>
  #include <linux/init.h>
  #include <linux/pagemap.h>
 +#include <linux/perf_counter.h>
  #include <linux/highmem.h>
  #include <linux/spinlock.h>
  #include <linux/key.h>
@@@ -70,17 -69,18 +70,18 @@@ int suid_dumpable = 0
  static LIST_HEAD(formats);
  static DEFINE_RWLOCK(binfmt_lock);
  
- int register_binfmt(struct linux_binfmt * fmt)
+ int __register_binfmt(struct linux_binfmt * fmt, int insert)
  {
        if (!fmt)
                return -EINVAL;
        write_lock(&binfmt_lock);
-       list_add(&fmt->lh, &formats);
+       insert ? list_add(&fmt->lh, &formats) :
+                list_add_tail(&fmt->lh, &formats);
        write_unlock(&binfmt_lock);
        return 0;       
  }
  
- EXPORT_SYMBOL(register_binfmt);
+ EXPORT_SYMBOL(__register_binfmt);
  
  void unregister_binfmt(struct linux_binfmt * fmt)
  {
@@@ -105,40 -105,28 +106,28 @@@ static inline void put_binfmt(struct li
  SYSCALL_DEFINE1(uselib, const char __user *, library)
  {
        struct file *file;
        char *tmp = getname(library);
        int error = PTR_ERR(tmp);
  
-       if (!IS_ERR(tmp)) {
-               error = path_lookup_open(AT_FDCWD, tmp,
-                                        LOOKUP_FOLLOW, &nd,
-                                        FMODE_READ|FMODE_EXEC);
-               putname(tmp);
-       }
-       if (error)
+       if (IS_ERR(tmp))
+               goto out;
+       file = do_filp_open(AT_FDCWD, tmp,
+                               O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+                               MAY_READ | MAY_EXEC | MAY_OPEN);
+       putname(tmp);
+       error = PTR_ERR(file);
+       if (IS_ERR(file))
                goto out;
  
        error = -EINVAL;
-       if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
+       if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
                goto exit;
  
        error = -EACCES;
-       if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
-               goto exit;
-       error = inode_permission(nd.path.dentry->d_inode,
-                                MAY_READ | MAY_EXEC | MAY_OPEN);
-       if (error)
-               goto exit;
-       error = ima_path_check(&nd.path, MAY_READ | MAY_EXEC | MAY_OPEN);
-       if (error)
+       if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
                goto exit;
  
-       file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
-       error = PTR_ERR(file);
-       if (IS_ERR(file))
-               goto out;
        fsnotify_open(file->f_path.dentry);
  
        error = -ENOEXEC;
                }
                read_unlock(&binfmt_lock);
        }
+ exit:
        fput(file);
  out:
        return error;
- exit:
-       release_open_intent(&nd);
-       path_put(&nd.path);
-       goto out;
  }
  
  #ifdef CONFIG_MMU
@@@ -661,47 -646,33 +647,33 @@@ EXPORT_SYMBOL(setup_arg_pages)
  
  struct file *open_exec(const char *name)
  {
        struct file *file;
        int err;
  
-       err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd,
-                               FMODE_READ|FMODE_EXEC);
-       if (err)
+       file = do_filp_open(AT_FDCWD, name,
+                               O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
+                               MAY_EXEC | MAY_OPEN);
+       if (IS_ERR(file))
                goto out;
  
        err = -EACCES;
-       if (!S_ISREG(nd.path.dentry->d_inode->i_mode))
-               goto out_path_put;
-       if (nd.path.mnt->mnt_flags & MNT_NOEXEC)
-               goto out_path_put;
-       err = inode_permission(nd.path.dentry->d_inode, MAY_EXEC | MAY_OPEN);
-       if (err)
-               goto out_path_put;
-       err = ima_path_check(&nd.path, MAY_EXEC | MAY_OPEN);
-       if (err)
-               goto out_path_put;
+       if (!S_ISREG(file->f_path.dentry->d_inode->i_mode))
+               goto exit;
  
-       file = nameidata_to_filp(&nd, O_RDONLY|O_LARGEFILE);
-       if (IS_ERR(file))
-               return file;
+       if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
+               goto exit;
  
        fsnotify_open(file->f_path.dentry);
  
        err = deny_write_access(file);
-       if (err) {
-               fput(file);
-               goto out;
-       }
+       if (err)
+               goto exit;
  
+ out:
        return file;
  
-  out_path_put:
-       release_open_intent(&nd);
-       path_put(&nd.path);
-  out:
+ exit:
+       fput(file);
        return ERR_PTR(err);
  }
  EXPORT_SYMBOL(open_exec);
@@@ -951,7 -922,6 +923,7 @@@ void set_task_comm(struct task_struct *
        task_lock(tsk);
        strlcpy(tsk->comm, buf, sizeof(tsk->comm));
        task_unlock(tsk);
 +      perf_counter_comm(tsk);
  }
  
  int flush_old_exec(struct linux_binprm * bprm)
  
        current->personality &= ~bprm->per_clear;
  
 +      /*
 +       * Flush performance counters when crossing a
 +       * security domain:
 +       */
 +      if (!get_dumpable(current->mm))
 +              perf_counter_exit_task(current);
 +
        /* An exec changes our domain. We are no longer part of the thread
           group */
  
diff --combined include/linux/syscalls.h
@@@ -55,7 -55,6 +55,7 @@@ struct compat_timeval
  struct robust_list_head;
  struct getcpu_cache;
  struct old_linux_dirent;
 +struct perf_counter_hw_event;
  
  #include <linux/types.h>
  #include <linux/aio_abi.h>
@@@ -434,6 -433,7 +434,7 @@@ asmlinkage long sys_fcntl(unsigned int 
  asmlinkage long sys_fcntl64(unsigned int fd,
                                unsigned int cmd, unsigned long arg);
  #endif
+ asmlinkage long sys_pipe2(int __user *fildes, int flags);
  asmlinkage long sys_dup(unsigned int fildes);
  asmlinkage long sys_dup2(unsigned int oldfd, unsigned int newfd);
  asmlinkage long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags);
@@@ -755,8 -755,4 +756,8 @@@ asmlinkage long sys_pipe(int __user *)
  
  int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
  
 +
 +asmlinkage long sys_perf_counter_open(
 +              const struct perf_counter_hw_event __user *hw_event_uptr,
 +              pid_t pid, int cpu, int group_fd, unsigned long flags);
  #endif
diff --combined kernel/sched.c
@@@ -39,7 -39,6 +39,7 @@@
  #include <linux/completion.h>
  #include <linux/kernel_stat.h>
  #include <linux/debug_locks.h>
 +#include <linux/perf_counter.h>
  #include <linux/security.h>
  #include <linux/notifier.h>
  #include <linux/profile.h>
@@@ -585,7 -584,6 +585,7 @@@ struct rq 
        struct load_weight load;
        unsigned long nr_load_updates;
        u64 nr_switches;
 +      u64 nr_migrations_in;
  
        struct cfs_rq cfs;
        struct rt_rq rt;
@@@ -694,7 -692,7 +694,7 @@@ static inline int cpu_of(struct rq *rq
  #define task_rq(p)            cpu_rq(task_cpu(p))
  #define cpu_curr(cpu)         (cpu_rq(cpu)->curr)
  
 -static inline void update_rq_clock(struct rq *rq)
 +inline void update_rq_clock(struct rq *rq)
  {
        rq->clock = sched_clock_cpu(cpu_of(rq));
  }
@@@ -1969,15 -1967,12 +1969,15 @@@ void set_task_cpu(struct task_struct *p
                p->se.sleep_start -= clock_offset;
        if (p->se.block_start)
                p->se.block_start -= clock_offset;
 +#endif
        if (old_cpu != new_cpu) {
 -              schedstat_inc(p, se.nr_migrations);
 +              p->se.nr_migrations++;
 +              new_rq->nr_migrations_in++;
 +#ifdef CONFIG_SCHEDSTATS
                if (task_hot(p, old_rq->clock, NULL))
                        schedstat_inc(p, se.nr_forced2_migrations);
 -      }
  #endif
 +      }
        p->se.vruntime -= old_cfsrq->min_vruntime -
                                         new_cfsrq->min_vruntime;
  
@@@ -2329,27 -2324,6 +2329,27 @@@ static int sched_balance_self(int cpu, 
  
  #endif /* CONFIG_SMP */
  
 +/**
 + * task_oncpu_function_call - call a function on the cpu on which a task runs
 + * @p:                the task to evaluate
 + * @func:     the function to be called
 + * @info:     the function call argument
 + *
 + * Calls the function @func when the task is currently running. This might
 + * be on the current CPU, which just calls the function directly
 + */
 +void task_oncpu_function_call(struct task_struct *p,
 +                            void (*func) (void *info), void *info)
 +{
 +      int cpu;
 +
 +      preempt_disable();
 +      cpu = task_cpu(p);
 +      if (task_curr(p))
 +              smp_call_function_single(cpu, func, info, 1);
 +      preempt_enable();
 +}
 +
  /***
   * try_to_wake_up - wake up a thread
   * @p: the to-be-woken-up thread
@@@ -2506,7 -2480,6 +2506,7 @@@ static void __sched_fork(struct task_st
        p->se.exec_start                = 0;
        p->se.sum_exec_runtime          = 0;
        p->se.prev_sum_exec_runtime     = 0;
 +      p->se.nr_migrations             = 0;
        p->se.last_wakeup               = 0;
        p->se.avg_overlap               = 0;
        p->se.start_runtime             = 0;
@@@ -2737,7 -2710,6 +2737,7 @@@ static void finish_task_switch(struct r
         */
        prev_state = prev->state;
        finish_arch_switch(prev);
 +      perf_counter_task_sched_in(current, cpu_of(rq));
        finish_lock_switch(rq, prev);
  #ifdef CONFIG_SMP
        if (post_schedule)
@@@ -2900,15 -2872,6 +2900,15 @@@ unsigned long nr_active(void
  }
  
  /*
 + * Externally visible per-cpu scheduler statistics:
 + * cpu_nr_migrations(cpu) - number of migrations into that cpu
 + */
 +u64 cpu_nr_migrations(int cpu)
 +{
 +      return cpu_rq(cpu)->nr_migrations_in;
 +}
 +
 +/*
   * Update rq->cpu_load[] statistics. This function is usually called every
   * scheduler tick (TICK_NSEC).
   */
@@@ -4769,7 -4732,7 +4769,7 @@@ void account_process_tick(struct task_s
  
        if (user_tick)
                account_user_time(p, one_jiffy, one_jiffy_scaled);
-       else if (p != rq->idle)
+       else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
                account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
                                    one_jiffy_scaled);
        else
@@@ -4875,7 -4838,6 +4875,7 @@@ void scheduler_tick(void
        update_rq_clock(rq);
        update_cpu_load(rq);
        curr->sched_class->task_tick(rq, curr, 0);
 +      perf_counter_task_tick(curr, cpu);
        spin_unlock(&rq->lock);
  
  #ifdef CONFIG_SMP
@@@ -5091,7 -5053,6 +5091,7 @@@ need_resched_nonpreemptible
  
        if (likely(prev != next)) {
                sched_info_switch(prev, next);
 +              perf_counter_task_sched_out(prev, cpu);
  
                rq->nr_switches++;
                rq->curr = next;
@@@ -8997,7 -8958,7 +8997,7 @@@ void __init sched_init(void
                 * 1024) and two child groups A0 and A1 (of weight 1024 each),
                 * then A0's share of the cpu resource is:
                 *
 -               *      A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
 +               *      A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
                 *
                 * We achieve this by letting init_task_group's tasks sit
                 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
        alloc_bootmem_cpumask_var(&cpu_isolated_map);
  #endif /* SMP */
  
 +      perf_counter_init();
 +
        scheduler_running = 1;
  }
  
diff --combined kernel/sysctl.c
@@@ -49,7 -49,6 +49,7 @@@
  #include <linux/reboot.h>
  #include <linux/ftrace.h>
  #include <linux/slow-work.h>
 +#include <linux/perf_counter.h>
  
  #include <asm/uaccess.h>
  #include <asm/processor.h>
@@@ -102,7 -101,9 +102,9 @@@ static int __maybe_unused one = 1
  static int __maybe_unused two = 2;
  static unsigned long one_ul = 1;
  static int one_hundred = 100;
- static int one_thousand = 1000;
+ /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
+ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
  
  /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
  static int maxolduid = 65535;
@@@ -911,24 -912,6 +913,24 @@@ static struct ctl_table kern_table[] = 
                .child          = slow_work_sysctls,
        },
  #endif
 +#ifdef CONFIG_PERF_COUNTERS
 +      {
 +              .ctl_name       = CTL_UNNUMBERED,
 +              .procname       = "perf_counter_privileged",
 +              .data           = &sysctl_perf_counter_priv,
 +              .maxlen         = sizeof(sysctl_perf_counter_priv),
 +              .mode           = 0644,
 +              .proc_handler   = &proc_dointvec,
 +      },
 +      {
 +              .ctl_name       = CTL_UNNUMBERED,
 +              .procname       = "perf_counter_mlock_kb",
 +              .data           = &sysctl_perf_counter_mlock,
 +              .maxlen         = sizeof(sysctl_perf_counter_mlock),
 +              .mode           = 0644,
 +              .proc_handler   = &proc_dointvec,
 +      },
 +#endif
  /*
   * NOTE: do not add new entries to this table unless you have read
   * Documentation/sysctl/ctl_unnumbered.txt
@@@ -1025,7 -1008,7 +1027,7 @@@ static struct ctl_table vm_table[] = 
                .mode           = 0644,
                .proc_handler   = &dirty_bytes_handler,
                .strategy       = &sysctl_intvec,
-               .extra1         = &one_ul,
+               .extra1         = &dirty_bytes_min,
        },
        {
                .procname       = "dirty_writeback_centisecs",
                .proc_handler   = &proc_dointvec,
        },
        {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "nr_pdflush_threads_min",
-               .data           = &nr_pdflush_threads_min,
-               .maxlen         = sizeof nr_pdflush_threads_min,
-               .mode           = 0644 /* read-write */,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &one,
-               .extra2         = &nr_pdflush_threads_max,
-       },
-       {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "nr_pdflush_threads_max",
-               .data           = &nr_pdflush_threads_max,
-               .maxlen         = sizeof nr_pdflush_threads_max,
-               .mode           = 0644 /* read-write */,
-               .proc_handler   = &proc_dointvec_minmax,
-               .strategy       = &sysctl_intvec,
-               .extra1         = &nr_pdflush_threads_min,
-               .extra2         = &one_thousand,
-       },
-       {
                .ctl_name       = VM_SWAPPINESS,
                .procname       = "swappiness",
                .data           = &vm_swappiness,
diff --combined mm/mmap.c
+++ b/mm/mmap.c
@@@ -28,7 -28,6 +28,7 @@@
  #include <linux/mempolicy.h>
  #include <linux/rmap.h>
  #include <linux/mmu_notifier.h>
 +#include <linux/perf_counter.h>
  
  #include <asm/uaccess.h>
  #include <asm/cacheflush.h>
@@@ -86,7 -85,7 +86,7 @@@ EXPORT_SYMBOL(vm_get_page_prot)
  int sysctl_overcommit_memory = OVERCOMMIT_GUESS;  /* heuristic overcommit */
  int sysctl_overcommit_ratio = 50;     /* default is 50% */
  int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
- atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
+ struct percpu_counter vm_committed_as;
  
  /*
   * Check that a process has enough memory to allocate a new virtual
@@@ -180,11 -179,7 +180,7 @@@ int __vm_enough_memory(struct mm_struc
        if (mm)
                allowed -= mm->total_vm / 32;
  
-       /*
-        * cast `allowed' as a signed long because vm_committed_space
-        * sometimes has a negative value
-        */
-       if (atomic_long_read(&vm_committed_space) < (long)allowed)
+       if (percpu_counter_read_positive(&vm_committed_as) < allowed)
                return 0;
  error:
        vm_unacct_memory(pages);
@@@ -1224,9 -1219,6 +1220,9 @@@ munmap_back
        if (correct_wcount)
                atomic_inc(&inode->i_writecount);
  out:
 +      if (vm_flags & VM_EXEC)
 +              perf_counter_mmap(addr, len, pgoff, file);
 +
        mm->total_vm += len >> PAGE_SHIFT;
        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
@@@ -1760,12 -1752,6 +1756,12 @@@ static void remove_vma_list(struct mm_s
        do {
                long nrpages = vma_pages(vma);
  
 +              if (vma->vm_flags & VM_EXEC) {
 +                      perf_counter_munmap(vma->vm_start,
 +                                      nrpages << PAGE_SHIFT,
 +                                      vma->vm_pgoff, vma->vm_file);
 +              }
 +
                mm->total_vm -= nrpages;
                vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
                vma = remove_vma(vma);
@@@ -2491,4 -2477,8 +2487,8 @@@ void mm_drop_all_locks(struct mm_struc
   */
  void __init mmap_init(void)
  {
+       int ret;
+       ret = percpu_counter_init(&vm_committed_as, 0);
+       VM_BUG_ON(ret);
  }