diff options
Diffstat (limited to 'include/linux/sched.h')
| -rw-r--r-- | include/linux/sched.h | 185 | 
1 files changed, 42 insertions, 143 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index d2112477ff5..33cc4213037 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -304,19 +304,6 @@ static inline void lockup_detector_init(void)  }  #endif -#ifdef CONFIG_DETECT_HUNG_TASK -extern unsigned int  sysctl_hung_task_panic; -extern unsigned long sysctl_hung_task_check_count; -extern unsigned long sysctl_hung_task_timeout_secs; -extern unsigned long sysctl_hung_task_warnings; -extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, -					 void __user *buffer, -					 size_t *lenp, loff_t *ppos); -#else -/* Avoid need for ifdefs elsewhere in the code */ -enum { sysctl_hung_task_timeout_secs = 0 }; -#endif -  /* Attach to any functions which should be ignored in wchan output. */  #define __sched		__attribute__((__section__(".sched.text"))) @@ -338,23 +325,6 @@ extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);  struct nsproxy;  struct user_namespace; -/* - * Default maximum number of active map areas, this limits the number of vmas - * per mm struct. Users can overwrite this number by sysctl but there is a - * problem. - * - * When a program's coredump is generated as ELF format, a section is created - * per a vma. In ELF, the number of sections is represented in unsigned short. - * This means the number of sections should be smaller than 65535 at coredump. - * Because the kernel adds some informative sections to a image of program at - * generating coredump, we need some margin. The number of extra sections is - * 1-3 now and depends on arch. We use "5" as safe margin, here. - */ -#define MAPCOUNT_ELF_CORE_MARGIN	(5) -#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) - -extern int sysctl_max_map_count; -  #include <linux/aio.h>  #ifdef CONFIG_MMU @@ -1194,6 +1164,7 @@ struct sched_entity {  	/* rq "owned" by this entity/group: */  	struct cfs_rq		*my_q;  #endif +  /*   * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be   * removed when useful for applications beyond shares distribution (e.g. @@ -1208,6 +1179,7 @@ struct sched_entity {  struct sched_rt_entity {  	struct list_head run_list;  	unsigned long timeout; +	unsigned long watchdog_stamp;  	unsigned int time_slice;  	struct sched_rt_entity *back; @@ -1220,11 +1192,6 @@ struct sched_rt_entity {  #endif  }; -/* - * default timeslice is 100 msecs (used only for SCHED_RR tasks). - * Timeslices get refilled after they expire. - */ -#define RR_TIMESLICE		(100 * HZ / 1000)  struct rcu_node; @@ -1368,6 +1335,15 @@ struct task_struct {  #ifndef CONFIG_VIRT_CPU_ACCOUNTING  	struct cputime prev_cputime;  #endif +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +	seqlock_t vtime_seqlock; +	unsigned long long vtime_snap; +	enum { +		VTIME_SLEEPING = 0, +		VTIME_USER, +		VTIME_SYS, +	} vtime_snap_whence; +#endif  	unsigned long nvcsw, nivcsw; /* context switch counts */  	struct timespec start_time; 		/* monotonic time */  	struct timespec real_start_time;	/* boot based time */ @@ -1622,37 +1598,6 @@ static inline void set_numabalancing_state(bool enabled)  }  #endif -/* - * Priority of a process goes from 0..MAX_PRIO-1, valid RT - * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH - * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority - * values are inverted: lower p->prio value means higher priority. - * - * The MAX_USER_RT_PRIO value allows the actual maximum - * RT priority to be separate from the value exported to - * user-space.  This allows kernel threads to set their - * priority to a value higher than any user task. Note: - * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO. - */ - -#define MAX_USER_RT_PRIO	100 -#define MAX_RT_PRIO		MAX_USER_RT_PRIO - -#define MAX_PRIO		(MAX_RT_PRIO + 40) -#define DEFAULT_PRIO		(MAX_RT_PRIO + 20) - -static inline int rt_prio(int prio) -{ -	if (unlikely(prio < MAX_RT_PRIO)) -		return 1; -	return 0; -} - -static inline int rt_task(struct task_struct *p) -{ -	return rt_prio(p->prio); -} -  static inline struct pid *task_pid(struct task_struct *task)  {  	return task->pids[PIDTYPE_PID].pid; @@ -1792,6 +1737,37 @@ static inline void put_task_struct(struct task_struct *t)  		__put_task_struct(t);  } +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +extern void task_cputime(struct task_struct *t, +			 cputime_t *utime, cputime_t *stime); +extern void task_cputime_scaled(struct task_struct *t, +				cputime_t *utimescaled, cputime_t *stimescaled); +extern cputime_t task_gtime(struct task_struct *t); +#else +static inline void task_cputime(struct task_struct *t, +				cputime_t *utime, cputime_t *stime) +{ +	if (utime) +		*utime = t->utime; +	if (stime) +		*stime = t->stime; +} + +static inline void task_cputime_scaled(struct task_struct *t, +				       cputime_t *utimescaled, +				       cputime_t *stimescaled) +{ +	if (utimescaled) +		*utimescaled = t->utimescaled; +	if (stimescaled) +		*stimescaled = t->stimescaled; +} + +static inline cputime_t task_gtime(struct task_struct *t) +{ +	return t->gtime; +} +#endif  extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);  extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); @@ -2033,58 +2009,7 @@ extern void wake_up_idle_cpu(int cpu);  static inline void wake_up_idle_cpu(int cpu) { }  #endif -extern unsigned int sysctl_sched_latency; -extern unsigned int sysctl_sched_min_granularity; -extern unsigned int sysctl_sched_wakeup_granularity; -extern unsigned int sysctl_sched_child_runs_first; - -enum sched_tunable_scaling { -	SCHED_TUNABLESCALING_NONE, -	SCHED_TUNABLESCALING_LOG, -	SCHED_TUNABLESCALING_LINEAR, -	SCHED_TUNABLESCALING_END, -}; -extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; - -extern unsigned int sysctl_numa_balancing_scan_delay; -extern unsigned int sysctl_numa_balancing_scan_period_min; -extern unsigned int sysctl_numa_balancing_scan_period_max; -extern unsigned int sysctl_numa_balancing_scan_period_reset; -extern unsigned int sysctl_numa_balancing_scan_size; -extern unsigned int sysctl_numa_balancing_settle_count; - -#ifdef CONFIG_SCHED_DEBUG -extern unsigned int sysctl_sched_migration_cost; -extern unsigned int sysctl_sched_nr_migrate; -extern unsigned int sysctl_sched_time_avg; -extern unsigned int sysctl_timer_migration; -extern unsigned int sysctl_sched_shares_window; - -int sched_proc_update_handler(struct ctl_table *table, int write, -		void __user *buffer, size_t *length, -		loff_t *ppos); -#endif -#ifdef CONFIG_SCHED_DEBUG -static inline unsigned int get_sysctl_timer_migration(void) -{ -	return sysctl_timer_migration; -} -#else -static inline unsigned int get_sysctl_timer_migration(void) -{ -	return 1; -} -#endif -extern unsigned int sysctl_sched_rt_period; -extern int sysctl_sched_rt_runtime; - -int sched_rt_handler(struct ctl_table *table, int write, -		void __user *buffer, size_t *lenp, -		loff_t *ppos); -  #ifdef CONFIG_SCHED_AUTOGROUP -extern unsigned int sysctl_sched_autogroup_enabled; -  extern void sched_autogroup_create_attach(struct task_struct *p);  extern void sched_autogroup_detach(struct task_struct *p);  extern void sched_autogroup_fork(struct signal_struct *sig); @@ -2100,30 +2025,6 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { }  static inline void sched_autogroup_exit(struct signal_struct *sig) { }  #endif -#ifdef CONFIG_CFS_BANDWIDTH -extern unsigned int sysctl_sched_cfs_bandwidth_slice; -#endif - -#ifdef CONFIG_RT_MUTEXES -extern int rt_mutex_getprio(struct task_struct *p); -extern void rt_mutex_setprio(struct task_struct *p, int prio); -extern void rt_mutex_adjust_pi(struct task_struct *p); -static inline bool tsk_is_pi_blocked(struct task_struct *tsk) -{ -	return tsk->pi_blocked_on != NULL; -} -#else -static inline int rt_mutex_getprio(struct task_struct *p) -{ -	return p->normal_prio; -} -# define rt_mutex_adjust_pi(p)		do { } while (0) -static inline bool tsk_is_pi_blocked(struct task_struct *tsk) -{ -	return false; -} -#endif -  extern bool yield_to(struct task_struct *p, bool preempt);  extern void set_user_nice(struct task_struct *p, long nice);  extern int task_prio(const struct task_struct *p); @@ -2753,8 +2654,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)  extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);  extern long sched_getaffinity(pid_t pid, struct cpumask *mask); -extern void normalize_rt_tasks(void); -  #ifdef CONFIG_CGROUP_SCHED  extern struct task_group root_task_group;  |