diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 82 | 
1 files changed, 42 insertions, 40 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 18d63cea284..340f5ee5733 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -315,7 +315,6 @@ extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,  				  void __user *buffer,  				  size_t *lenp, loff_t *ppos);  extern unsigned int  softlockup_panic; -extern int softlockup_thresh;  void lockup_detector_init(void);  #else  static inline void touch_softlockup_watchdog(void) @@ -360,7 +359,7 @@ extern signed long schedule_timeout_interruptible(signed long timeout);  extern signed long schedule_timeout_killable(signed long timeout);  extern signed long schedule_timeout_uninterruptible(signed long timeout);  asmlinkage void schedule(void); -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);  struct nsproxy;  struct user_namespace; @@ -653,9 +652,8 @@ struct signal_struct {   * Bits in flags field of signal_struct.   */  #define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */ -#define SIGNAL_STOP_DEQUEUED	0x00000002 /* stop signal dequeued */ -#define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */ -#define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */ +#define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */ +#define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */  /*   * Pending notifications to parent.   */ @@ -731,10 +729,6 @@ struct sched_info {  	/* timestamps */  	unsigned long long last_arrival,/* when we last ran on a cpu */  			   last_queued;	/* when we were last queued to run */ -#ifdef CONFIG_SCHEDSTATS -	/* BKL stats */ -	unsigned int bkl_count; -#endif  };  #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ @@ -868,6 +862,7 @@ static inline int sd_power_saving_flags(void)  struct sched_group {  	struct sched_group *next;	/* Must be a circular list */ +	atomic_t ref;  	/*  	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a @@ -882,9 +877,6 @@ struct sched_group {  	 * NOTE: this field is variable length. (Allocated dynamically  	 * by attaching extra space to the end of the structure,  	 * depending on how many CPUs the kernel has booted up with) -	 * -	 * It is also be embedded into static data structures at build -	 * time. (See 'struct static_sched_group' in kernel/sched.c)  	 */  	unsigned long cpumask[0];  }; @@ -894,17 +886,6 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)  	return to_cpumask(sg->cpumask);  } -enum sched_domain_level { -	SD_LV_NONE = 0, -	SD_LV_SIBLING, -	SD_LV_MC, -	SD_LV_BOOK, -	SD_LV_CPU, -	SD_LV_NODE, -	SD_LV_ALLNODES, -	SD_LV_MAX -}; -  struct sched_domain_attr {  	int relax_domain_level;  }; @@ -913,6 +894,8 @@ struct sched_domain_attr {  	.relax_domain_level = -1,			\  } +extern int sched_domain_level_max; +  struct sched_domain {  	/* These fields must be setup */  	struct sched_domain *parent;	/* top domain must be null terminated */ @@ -930,7 +913,7 @@ struct sched_domain {  	unsigned int forkexec_idx;  	unsigned int smt_gain;  	int flags;			/* See SD_* */ -	enum sched_domain_level level; +	int level;  	/* Runtime fields. */  	unsigned long last_balance;	/* init to jiffies. units in jiffies */ @@ -973,6 +956,10 @@ struct sched_domain {  #ifdef CONFIG_SCHED_DEBUG  	char *name;  #endif +	union { +		void *private;		/* used during construction */ +		struct rcu_head rcu;	/* used during destruction */ +	};  	unsigned int span_weight;  	/* @@ -981,9 +968,6 @@ struct sched_domain {  	 * NOTE: this field is variable length. (Allocated dynamically  	 * by attaching extra space to the end of the structure,  	 * depending on how many CPUs the kernel has booted up with) -	 * -	 * It is also be embedded into static data structures at build -	 * time. (See 'struct static_sched_domain' in kernel/sched.c)  	 */  	unsigned long span[0];  }; @@ -1048,8 +1032,12 @@ struct sched_domain;  #define WF_FORK		0x02		/* child wakeup after fork */  #define ENQUEUE_WAKEUP		1 -#define ENQUEUE_WAKING		2 -#define ENQUEUE_HEAD		4 +#define ENQUEUE_HEAD		2 +#ifdef CONFIG_SMP +#define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */ +#else +#define ENQUEUE_WAKING		0 +#endif  #define DEQUEUE_SLEEP		1 @@ -1067,12 +1055,11 @@ struct sched_class {  	void (*put_prev_task) (struct rq *rq, struct task_struct *p);  #ifdef CONFIG_SMP -	int  (*select_task_rq)(struct rq *rq, struct task_struct *p, -			       int sd_flag, int flags); +	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);  	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);  	void (*post_schedule) (struct rq *this_rq); -	void (*task_waking) (struct rq *this_rq, struct task_struct *task); +	void (*task_waking) (struct task_struct *task);  	void (*task_woken) (struct rq *this_rq, struct task_struct *task);  	void (*set_cpus_allowed)(struct task_struct *p, @@ -1197,13 +1184,11 @@ struct task_struct {  	unsigned int flags;	/* per process flags, defined below */  	unsigned int ptrace; -	int lock_depth;		/* BKL lock depth */ -  #ifdef CONFIG_SMP -#ifdef __ARCH_WANT_UNLOCKED_CTXSW -	int oncpu; -#endif +	struct task_struct *wake_entry; +	int on_cpu;  #endif +	int on_rq;  	int prio, static_prio, normal_prio;  	unsigned int rt_priority; @@ -1264,6 +1249,7 @@ struct task_struct {  	int exit_state;  	int exit_code, exit_signal;  	int pdeath_signal;  /*  The signal sent when the parent dies  */ +	unsigned int group_stop;	/* GROUP_STOP_*, siglock protected */  	/* ??? */  	unsigned int personality;  	unsigned did_exec:1; @@ -1274,6 +1260,7 @@ struct task_struct {  	/* Revert to default priority/policy when forking */  	unsigned sched_reset_on_fork:1; +	unsigned sched_contributes_to_load:1;  	pid_t pid;  	pid_t tgid; @@ -1537,6 +1524,9 @@ struct task_struct {  		unsigned long memsw_nr_pages; /* uncharged mem+swap usage */  	} memcg_batch;  #endif +#ifdef CONFIG_HAVE_HW_BREAKPOINT +	atomic_t ptrace_bp_refcnt; +#endif  };  /* Future-safe accessor for struct task_struct's cpus_allowed. */ @@ -1780,6 +1770,17 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *  #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)  #define used_math() tsk_used_math(current) +/* + * task->group_stop flags + */ +#define GROUP_STOP_SIGMASK	0xffff    /* signr of the last group stop */ +#define GROUP_STOP_PENDING	(1 << 16) /* task should stop for group stop */ +#define GROUP_STOP_CONSUME	(1 << 17) /* consume group stop count */ +#define GROUP_STOP_TRAPPING	(1 << 18) /* switching from STOPPED to TRACED */ +#define GROUP_STOP_DEQUEUED	(1 << 19) /* stop signal dequeued */ + +extern void task_clear_group_stop_pending(struct task_struct *task); +  #ifdef CONFIG_PREEMPT_RCU  #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ @@ -2060,14 +2061,13 @@ extern void xtime_update(unsigned long ticks);  extern int wake_up_state(struct task_struct *tsk, unsigned int state);  extern int wake_up_process(struct task_struct *tsk); -extern void wake_up_new_task(struct task_struct *tsk, -				unsigned long clone_flags); +extern void wake_up_new_task(struct task_struct *tsk);  #ifdef CONFIG_SMP   extern void kick_process(struct task_struct *tsk);  #else   static inline void kick_process(struct task_struct *tsk) { }  #endif -extern void sched_fork(struct task_struct *p, int clone_flags); +extern void sched_fork(struct task_struct *p);  extern void sched_dead(struct task_struct *p);  extern void proc_caches_init(void); @@ -2192,8 +2192,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);  extern char *get_task_comm(char *to, struct task_struct *tsk);  #ifdef CONFIG_SMP +void scheduler_ipi(void);  extern unsigned long wait_task_inactive(struct task_struct *, long match_state);  #else +static inline void scheduler_ipi(void) { }  static inline unsigned long wait_task_inactive(struct task_struct *p,  					       long match_state)  {  |