Merge branch 'linus' into core/locking
authorIngo Molnar <mingo@elte.hu>
Wed, 12 Nov 2008 11:39:21 +0000 (12:39 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 12 Nov 2008 11:39:21 +0000 (12:39 +0100)
1  2 
include/linux/kernel.h
kernel/lockdep.c
kernel/sched.c

diff --combined include/linux/kernel.h
index fa2853b49f70a4bb26e255c679f69eefb83fc6c8,fba141d3ca0783303c661f39fb2c503ba418dc56..69a9bfdf9c86d9de03919277d108fe242a4d1e4f
@@@ -116,6 -116,8 +116,8 @@@ extern int _cond_resched(void)
  # define might_resched() do { } while (0)
  #endif
  
+ #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+   void __might_sleep(char *file, int line);
  /**
   * might_sleep - annotation for functions that can sleep
   *
   * be bitten later when the calling function happens to sleep when it is not
   * supposed to.
   */
- #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
-   void __might_sleep(char *file, int line);
  # define might_sleep() \
        do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
  #else
                (__x < 0) ? -__x : __x;         \
        })
  
 +#ifdef CONFIG_PROVE_LOCKING
 +void might_fault(void);
 +#else
 +static inline void might_fault(void)
 +{
 +      might_sleep();
 +}
 +#endif
 +
  extern struct atomic_notifier_head panic_notifier_list;
  extern long (*panic_blink)(long time);
  NORET_TYPE void panic(const char * fmt, ...)
diff --combined kernel/lockdep.c
index 234a9dccb4befe1bb8cc897784b4a0216faca84b,06e157119d2b24d2b254c9a38b5ace0935a05886..a4285830323352666b3c3e31b4ec04f9e7331d1b
@@@ -136,16 -136,16 +136,16 @@@ static inline struct lock_class *hlock_
  #ifdef CONFIG_LOCK_STAT
  static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
  
 -static int lock_contention_point(struct lock_class *class, unsigned long ip)
 +static int lock_point(unsigned long points[], unsigned long ip)
  {
        int i;
  
 -      for (i = 0; i < ARRAY_SIZE(class->contention_point); i++) {
 -              if (class->contention_point[i] == 0) {
 -                      class->contention_point[i] = ip;
 +      for (i = 0; i < LOCKSTAT_POINTS; i++) {
 +              if (points[i] == 0) {
 +                      points[i] = ip;
                        break;
                }
 -              if (class->contention_point[i] == ip)
 +              if (points[i] == ip)
                        break;
        }
  
@@@ -185,9 -185,6 +185,9 @@@ struct lock_class_stats lock_stats(stru
                for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
                        stats.contention_point[i] += pcs->contention_point[i];
  
 +              for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
 +                      stats.contending_point[i] += pcs->contending_point[i];
 +
                lock_time_add(&pcs->read_waittime, &stats.read_waittime);
                lock_time_add(&pcs->write_waittime, &stats.write_waittime);
  
@@@ -212,7 -209,6 +212,7 @@@ void clear_lock_stats(struct lock_clas
                memset(cpu_stats, 0, sizeof(struct lock_class_stats));
        }
        memset(class->contention_point, 0, sizeof(class->contention_point));
 +      memset(class->contending_point, 0, sizeof(class->contending_point));
  }
  
  static struct lock_class_stats *get_lock_stats(struct lock_class *class)
@@@ -2173,12 -2169,11 +2173,11 @@@ void early_boot_irqs_on(void
  /*
   * Hardirqs will be enabled:
   */
- void trace_hardirqs_on_caller(unsigned long a0)
+ void trace_hardirqs_on_caller(unsigned long ip)
  {
        struct task_struct *curr = current;
-       unsigned long ip;
  
-       time_hardirqs_on(CALLER_ADDR0, a0);
+       time_hardirqs_on(CALLER_ADDR0, ip);
  
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
        }
        /* we'll do an OFF -> ON transition: */
        curr->hardirqs_enabled = 1;
-       ip = (unsigned long) __builtin_return_address(0);
  
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return;
@@@ -2228,11 -2222,11 +2226,11 @@@ EXPORT_SYMBOL(trace_hardirqs_on)
  /*
   * Hardirqs were disabled:
   */
- void trace_hardirqs_off_caller(unsigned long a0)
+ void trace_hardirqs_off_caller(unsigned long ip)
  {
        struct task_struct *curr = current;
  
-       time_hardirqs_off(CALLER_ADDR0, a0);
+       time_hardirqs_off(CALLER_ADDR0, ip);
  
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
                 * We have done an ON -> OFF transition:
                 */
                curr->hardirqs_enabled = 0;
-               curr->hardirq_disable_ip = _RET_IP_;
+               curr->hardirq_disable_ip = ip;
                curr->hardirq_disable_event = ++curr->irq_events;
                debug_atomic_inc(&hardirqs_off_events);
        } else
@@@ -3005,7 -2999,7 +3003,7 @@@ __lock_contended(struct lockdep_map *lo
        struct held_lock *hlock, *prev_hlock;
        struct lock_class_stats *stats;
        unsigned int depth;
 -      int i, point;
 +      int i, contention_point, contending_point;
  
        depth = curr->lockdep_depth;
        if (DEBUG_LOCKS_WARN_ON(!depth))
  found_it:
        hlock->waittime_stamp = sched_clock();
  
 -      point = lock_contention_point(hlock_class(hlock), ip);
 +      contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
 +      contending_point = lock_point(hlock_class(hlock)->contending_point,
 +                                    lock->ip);
  
        stats = get_lock_stats(hlock_class(hlock));
 -      if (point < ARRAY_SIZE(stats->contention_point))
 -              stats->contention_point[point]++;
 +      if (contention_point < LOCKSTAT_POINTS)
 +              stats->contention_point[contention_point]++;
 +      if (contending_point < LOCKSTAT_POINTS)
 +              stats->contending_point[contending_point]++;
        if (lock->cpu != smp_processor_id())
                stats->bounces[bounce_contended + !!hlock->read]++;
        put_lock_stats(stats);
  }
  
  static void
 -__lock_acquired(struct lockdep_map *lock)
 +__lock_acquired(struct lockdep_map *lock, unsigned long ip)
  {
        struct task_struct *curr = current;
        struct held_lock *hlock, *prev_hlock;
@@@ -3093,7 -3083,6 +3091,7 @@@ found_it
        put_lock_stats(stats);
  
        lock->cpu = cpu;
 +      lock->ip = ip;
  }
  
  void lock_contended(struct lockdep_map *lock, unsigned long ip)
  }
  EXPORT_SYMBOL_GPL(lock_contended);
  
 -void lock_acquired(struct lockdep_map *lock)
 +void lock_acquired(struct lockdep_map *lock, unsigned long ip)
  {
        unsigned long flags;
  
        raw_local_irq_save(flags);
        check_flags(flags);
        current->lockdep_recursion = 1;
 -      __lock_acquired(lock);
 +      __lock_acquired(lock, ip);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
  }
@@@ -3426,9 -3415,10 +3424,10 @@@ retry
                }
                printk(" ignoring it.\n");
                unlock = 0;
+       } else {
+               if (count != 10)
+                       printk(KERN_CONT " locked it.\n");
        }
-       if (count != 10)
-               printk(" locked it.\n");
  
        do_each_thread(g, p) {
                /*
diff --combined kernel/sched.c
index 0a4dc3b1300b07d7284f626a3c572f186d8c257c,50a21f964679ec42eee299cab7f7f86edc26a408..2a106b6b78b09006f75274defb2057b6e7a428e7
@@@ -386,7 -386,6 +386,6 @@@ struct cfs_rq 
  
        u64 exec_clock;
        u64 min_vruntime;
-       u64 pair_start;
  
        struct rb_root tasks_timeline;
        struct rb_node *rb_leftmost;
         * 'curr' points to currently running entity on this cfs_rq.
         * It is set to NULL otherwise (i.e when none are currently running).
         */
-       struct sched_entity *curr, *next;
+       struct sched_entity *curr, *next, *last;
  
-       unsigned long nr_spread_over;
+       unsigned int nr_spread_over;
  
  #ifdef CONFIG_FAIR_GROUP_SCHED
        struct rq *rq;  /* cpu runqueue to which this cfs_rq is attached */
@@@ -970,6 -969,14 +969,14 @@@ static struct rq *task_rq_lock(struct t
        }
  }
  
+ void task_rq_unlock_wait(struct task_struct *p)
+ {
+       struct rq *rq = task_rq(p);
+       smp_mb(); /* spin-unlock-wait is not a full memory barrier */
+       spin_unlock_wait(&rq->lock);
+ }
  static void __task_rq_unlock(struct rq *rq)
        __releases(rq->lock)
  {
@@@ -1806,7 -1813,9 +1813,9 @@@ task_hot(struct task_struct *p, u64 now
        /*
         * Buddy candidates are cache hot:
         */
-       if (sched_feat(CACHE_HOT_BUDDY) && (&p->se == cfs_rq_of(&p->se)->next))
+       if (sched_feat(CACHE_HOT_BUDDY) &&
+                       (&p->se == cfs_rq_of(&p->se)->next ||
+                        &p->se == cfs_rq_of(&p->se)->last))
                return 1;
  
        if (p->sched_class != &fair_sched_class)
@@@ -3344,7 -3353,7 +3353,7 @@@ small_imbalance
                } else
                        this_load_per_task = cpu_avg_load_per_task(this_cpu);
  
-               if (max_load - this_load + 2*busiest_load_per_task >=
+               if (max_load - this_load + busiest_load_per_task >=
                                        busiest_load_per_task * imbn) {
                        *imbalance = busiest_load_per_task;
                        return busiest;
@@@ -4327,7 -4336,7 +4336,7 @@@ void __kprobes sub_preempt_count(int va
        /*
         * Underflow?
         */
 -      if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
 +       if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked())))
                return;
        /*
         * Is the spinlock portion underflowing?
@@@ -6876,15 -6885,17 +6885,17 @@@ cpu_attach_domain(struct sched_domain *
        struct sched_domain *tmp;
  
        /* Remove the sched domains which do not contribute to scheduling. */
-       for (tmp = sd; tmp; tmp = tmp->parent) {
+       for (tmp = sd; tmp; ) {
                struct sched_domain *parent = tmp->parent;
                if (!parent)
                        break;
                if (sd_parent_degenerate(tmp, parent)) {
                        tmp->parent = parent->parent;
                        if (parent->parent)
                                parent->parent->child = tmp;
-               }
+               } else
+                       tmp = tmp->parent;
        }
  
        if (sd && sd_degenerate(sd)) {
@@@ -7673,6 -7684,7 +7684,7 @@@ static int __build_sched_domains(const 
  error:
        free_sched_groups(cpu_map, tmpmask);
        SCHED_CPUMASK_FREE((void *)allmasks);
+       kfree(rd);
        return -ENOMEM;
  #endif
  }