extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
-extern unsigned long weighted_cpuload(const int cpu);
struct seq_file;
struct cfs_rq;
}
#endif
+extern unsigned long long time_sync_thresh;
+
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
cpumask_t span; /* span of all CPUs in this domain */
- int first_cpu; /* cache of the first cpu in this domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */
unsigned int balance_interval; /* initialise to 1. units in ms. */
unsigned int nr_balance_failed; /* initialise to 0 */
+ u64 last_update;
+
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
#endif /* CONFIG_SMP */
-/*
- * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
- * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
- * task of nice 0 or enough lower priority tasks to bring up the
- * weighted_cpuload
- */
-static inline int above_background_load(void)
-{
- unsigned long cpu;
-
- for_each_online_cpu(cpu) {
- if (weighted_cpuload(cpu) >= SCHED_LOAD_SCALE)
- return 1;
- }
- return 0;
-}
-
struct io_context; /* See blkdev.h */
#define NGROUPS_SMALL 32
#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
void (*set_cpus_allowed)(struct task_struct *p,
const cpumask_t *newmask);
- void (*join_domain)(struct rq *rq);
- void (*leave_domain)(struct rq *rq);
+ void (*rq_online)(struct rq *rq);
+ void (*rq_offline)(struct rq *rq);
void (*switched_from) (struct rq *this_rq, struct task_struct *task,
int running);
#endif
int prio, static_prio, normal_prio;
+ unsigned int rt_priority;
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
- unsigned int rt_priority;
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
cputime_t prev_utime, prev_stime;
gid_t gid,egid,sgid,fsgid;
struct group_info *group_info;
kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset;
- unsigned securebits;
struct user_struct *user;
+ unsigned securebits;
#ifdef CONFIG_KEYS
+ unsigned char jit_keyring; /* default keyring to attach requested keys to */
struct key *request_key_auth; /* assumed request_key authority */
struct key *thread_keyring; /* keyring private to this thread */
- unsigned char jit_keyring; /* default keyring to attach requested keys to */
#endif
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
- struct held_lock held_locks[MAX_LOCK_DEPTH];
unsigned int lockdep_recursion;
+ struct held_lock held_locks[MAX_LOCK_DEPTH];
#endif
/* journalling filesystem info */
u64 acct_vm_mem1; /* accumulated virtual memory usage */
cputime_t acct_stimexpd;/* stime since last update */
#endif
-#ifdef CONFIG_NUMA
- struct mempolicy *mempolicy;
- short il_next;
-#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed;
int cpuset_mems_generation;
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
+#endif
+#ifdef CONFIG_NUMA
+ struct mempolicy *mempolicy;
+ short il_next;
#endif
atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu;
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
+#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
extern unsigned long long sched_clock(void);
+#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+static inline void sched_clock_init(void)
+{
+}
+
+static inline u64 sched_clock_cpu(int cpu)
+{
+ return sched_clock();
+}
+
+static inline void sched_clock_tick(void)
+{
+}
+
+static inline void sched_clock_idle_sleep_event(void)
+{
+}
+
+static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
+{
+}
+
+#ifdef CONFIG_NO_HZ
+static inline void sched_clock_tick_stop(int cpu)
+{
+}
+
+static inline void sched_clock_tick_start(int cpu)
+{
+}
+#endif
+
+#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
+extern void sched_clock_init(void);
+extern u64 sched_clock_cpu(int cpu);
+extern void sched_clock_tick(void);
+extern void sched_clock_idle_sleep_event(void);
+extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+#ifdef CONFIG_NO_HZ
+extern void sched_clock_tick_stop(int cpu);
+extern void sched_clock_tick_start(int cpu);
+#endif
+#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
+
/*
* For kernel-internal use: high-speed (but slightly incorrect) per-cpu
* clock constructed from sched_clock():
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
+extern unsigned int sysctl_sched_shares_ratelimit;
int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length,
extern void exit_files(struct task_struct *);
extern void __cleanup_signal(struct signal_struct *);
extern void __cleanup_sighand(struct sighand_struct *);
+
extern void exit_itimers(struct signal_struct *);
+extern void flush_itimer_signals(void);
extern NORET_TYPE void do_group_exit(int);
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
+static inline int test_tsk_need_resched(struct task_struct *tsk)
+{
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+}
+
static inline int signal_pending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
return signal_pending(p) && __fatal_signal_pending(p);
}
+static inline int signal_pending_state(long state, struct task_struct *p)
+{
+ if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
+ return 0;
+ if (!signal_pending(p))
+ return 0;
+
+ if (state & (__TASK_STOPPED | __TASK_TRACED))
+ return 0;
+
+ return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
+}
+
static inline int need_resched(void)
{
return unlikely(test_thread_flag(TIF_NEED_RESCHED));
* cond_resched_lock() will drop the spinlock before scheduling,
* cond_resched_softirq() will enable bhs before scheduling.
*/
-#ifdef CONFIG_PREEMPT
+extern int _cond_resched(void);
+#ifdef CONFIG_PREEMPT_BKL
static inline int cond_resched(void)
{
return 0;
}
#else
-extern int _cond_resched(void);
static inline int cond_resched(void)
{
return _cond_resched();
#endif
extern int cond_resched_lock(spinlock_t * lock);
extern int cond_resched_softirq(void);
+static inline int cond_resched_bkl(void)
+{
+ return _cond_resched();
+}
/*
* Does a critical section need to be broken due to another