diff options
| author | Ingo Molnar <mingo@elte.hu> | 2006-07-03 00:25:41 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-03 15:27:11 -0700 | 
| commit | 36c8b586896f60cb91a4fd526233190b34316baf (patch) | |
| tree | 003246e1e676de33703daa979b3e3109ca202a89 | |
| parent | 48f24c4da1ee7f3f22289cb85e8b8a73e4df4db5 (diff) | |
| download | olio-linux-3.10-36c8b586896f60cb91a4fd526233190b34316baf.tar.xz olio-linux-3.10-36c8b586896f60cb91a4fd526233190b34316baf.zip  | |
[PATCH] sched: cleanup, remove task_t, convert to struct task_struct
cleanup: remove task_t and convert all the uses to struct task_struct. I
introduced it for the scheduler anno and it was a mistake.
Conversion was mostly scripted, the result was reviewed and all
secondary whitespace and style impact (if any) was fixed up by hand.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
| -rw-r--r-- | arch/alpha/kernel/process.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/mca.c | 10 | ||||
| -rw-r--r-- | arch/ia64/kernel/smpboot.c | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/entry.S | 2 | ||||
| -rw-r--r-- | arch/mips/kernel/mips-mt.c | 6 | ||||
| -rw-r--r-- | arch/um/kernel/tt/process_kern.c | 2 | ||||
| -rw-r--r-- | drivers/char/tty_io.c | 2 | ||||
| -rw-r--r-- | fs/eventpoll.c | 4 | ||||
| -rw-r--r-- | include/asm-ia64/thread_info.h | 2 | ||||
| -rw-r--r-- | include/asm-m32r/system.h | 2 | ||||
| -rw-r--r-- | include/asm-sh/system.h | 2 | ||||
| -rw-r--r-- | include/linux/sched.h | 55 | ||||
| -rw-r--r-- | kernel/capability.c | 8 | ||||
| -rw-r--r-- | kernel/exit.c | 35 | ||||
| -rw-r--r-- | kernel/fork.c | 18 | ||||
| -rw-r--r-- | kernel/hrtimer.c | 2 | ||||
| -rw-r--r-- | kernel/pid.c | 6 | ||||
| -rw-r--r-- | kernel/ptrace.c | 6 | ||||
| -rw-r--r-- | kernel/rtmutex-debug.c | 5 | ||||
| -rw-r--r-- | kernel/rtmutex-tester.c | 4 | ||||
| -rw-r--r-- | kernel/rtmutex.c | 11 | ||||
| -rw-r--r-- | kernel/sched.c | 192 | ||||
| -rw-r--r-- | kernel/timer.c | 2 | ||||
| -rw-r--r-- | kernel/workqueue.c | 2 | ||||
| -rw-r--r-- | mm/oom_kill.c | 8 | 
25 files changed, 203 insertions, 187 deletions
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 01c8c8b2333..41ebf51a107 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c @@ -474,7 +474,7 @@ out:   */  unsigned long -thread_saved_pc(task_t *t) +thread_saved_pc(struct task_struct *t)  {  	unsigned long base = (unsigned long)task_stack_page(t);  	unsigned long fp, sp = task_thread_info(t)->pcb.ksp; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index eb8e8dc5ac8..2fbe4536fe1 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -678,7 +678,7 @@ copy_reg(const u64 *fr, u64 fnat, u64 *tr, u64 *tnat)   */  static void -ia64_mca_modify_comm(const task_t *previous_current) +ia64_mca_modify_comm(const struct task_struct *previous_current)  {  	char *p, comm[sizeof(current->comm)];  	if (previous_current->pid) @@ -709,7 +709,7 @@ ia64_mca_modify_comm(const task_t *previous_current)   * that we can do backtrace on the MCA/INIT handler code itself.   */ -static task_t * +static struct task_struct *  ia64_mca_modify_original_stack(struct pt_regs *regs,  		const struct switch_stack *sw,  		struct ia64_sal_os_state *sos, @@ -719,7 +719,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,  	ia64_va va;  	extern char ia64_leave_kernel[];	/* Need asm address, not function descriptor */  	const pal_min_state_area_t *ms = sos->pal_min_state; -	task_t *previous_current; +	struct task_struct *previous_current;  	struct pt_regs *old_regs;  	struct switch_stack *old_sw;  	unsigned size = sizeof(struct pt_regs) + @@ -1023,7 +1023,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,  	pal_processor_state_info_t *psp = (pal_processor_state_info_t *)  		&sos->proc_state_param;  	int recover, cpu = smp_processor_id(); -	task_t *previous_current; +	struct task_struct *previous_current;  	struct ia64_mca_notify_die nd =  		{ .sos = sos, .monarch_cpu = &monarch_cpu }; @@ -1352,7 +1352,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,  {  	static atomic_t slaves;  	static atomic_t monarchs; -	task_t *previous_current; +	struct task_struct *previous_current;  	int cpu = smp_processor_id();  	struct ia64_mca_notify_die nd =  		{ .sos = sos, .monarch_cpu = &monarch_cpu }; diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index e1960979be2..6203ed4ec8c 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -124,7 +124,7 @@ extern void __devinit calibrate_delay (void);  extern void start_ap (void);  extern unsigned long ia64_iobase; -task_t *task_for_booting_cpu; +struct task_struct *task_for_booting_cpu;  /*   * State for each CPU diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index ecfd637d702..01e7fa86aa4 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -65,7 +65,7 @@ need_resched:  #endif  FEXPORT(ret_from_fork) -	jal	schedule_tail		# a0 = task_t *prev +	jal	schedule_tail		# a0 = struct task_struct *prev  FEXPORT(syscall_exit)  	local_irq_disable		# make sure need_resched and diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 02237a685ec..4dcc39f4295 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -47,7 +47,7 @@ unsigned long mt_fpemul_threshold = 0;   * used in sys_sched_set/getaffinity() in kernel/sched.c, so   * cloned here.   */ -static inline task_t *find_process_by_pid(pid_t pid) +static inline struct task_struct *find_process_by_pid(pid_t pid)  {  	return pid ? find_task_by_pid(pid) : current;  } @@ -62,7 +62,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,  	cpumask_t new_mask;  	cpumask_t effective_mask;  	int retval; -	task_t *p; +	struct task_struct *p;  	if (len < sizeof(new_mask))  		return -EINVAL; @@ -127,7 +127,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,  	unsigned int real_len;  	cpumask_t mask;  	int retval; -	task_t *p; +	struct task_struct *p;  	real_len = sizeof(mask);  	if (len < real_len) diff --git a/arch/um/kernel/tt/process_kern.c b/arch/um/kernel/tt/process_kern.c index a9c1443fc54..8368c2dbe63 100644 --- a/arch/um/kernel/tt/process_kern.c +++ b/arch/um/kernel/tt/process_kern.c @@ -119,7 +119,7 @@ void suspend_new_thread(int fd)  		panic("read failed in suspend_new_thread, err = %d", -err);  } -void schedule_tail(task_t *prev); +void schedule_tail(struct task_struct *prev);  static void new_thread_handler(int sig)  { diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 6fb77952562..bfdb90242a9 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c @@ -2336,7 +2336,7 @@ static int fionbio(struct file *file, int __user *p)  static int tiocsctty(struct tty_struct *tty, int arg)  { -	task_t *p; +	struct task_struct *p;  	if (current->signal->leader &&  	    (current->signal->session == tty->session)) diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 9c677bbd0b0..19ffb043abb 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -120,7 +120,7 @@ struct epoll_filefd {   */  struct wake_task_node {  	struct list_head llink; -	task_t *task; +	struct task_struct *task;  	wait_queue_head_t *wq;  }; @@ -413,7 +413,7 @@ static void ep_poll_safewake(struct poll_safewake *psw, wait_queue_head_t *wq)  {  	int wake_nests = 0;  	unsigned long flags; -	task_t *this_task = current; +	struct task_struct *this_task = current;  	struct list_head *lsthead = &psw->wake_task_list, *lnk;  	struct wake_task_node *tncur;  	struct wake_task_node tnode; diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h index 8bc9869e576..8adcde0934c 100644 --- a/include/asm-ia64/thread_info.h +++ b/include/asm-ia64/thread_info.h @@ -68,7 +68,7 @@ struct thread_info {  #define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)  #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR -#define alloc_task_struct()	((task_t *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER)) +#define alloc_task_struct()	((struct task_struct *)__get_free_pages(GFP_KERNEL | __GFP_COMP, KERNEL_STACK_SIZE_ORDER))  #define free_task_struct(tsk)	free_pages((unsigned long) (tsk), KERNEL_STACK_SIZE_ORDER)  #endif /* !__ASSEMBLY */ diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 66c4742f09e..311cebf44ef 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -18,7 +18,7 @@   * switch_to(prev, next) should switch from task `prev' to `next'   * `prev' will never be the same as `next'.   * - * `next' and `prev' should be task_t, but it isn't always defined + * `next' and `prev' should be struct task_struct, but it isn't always defined   */  #define switch_to(prev, next, last)  do { \ diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index b752e5cbb83..ce2e60664a8 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -12,7 +12,7 @@   */  #define switch_to(prev, next, last) do {				\ - task_t *__last;							\ + struct task_struct *__last;						\   register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp;	\   register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc;	\   register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev;	\ diff --git a/include/linux/sched.h b/include/linux/sched.h index 8ebddba4448..c2797f04d93 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -184,11 +184,11 @@ extern unsigned long weighted_cpuload(const int cpu);  extern rwlock_t tasklist_lock;  extern spinlock_t mmlist_lock; -typedef struct task_struct task_t; +struct task_struct;  extern void sched_init(void);  extern void sched_init_smp(void); -extern void init_idle(task_t *idle, int cpu); +extern void init_idle(struct task_struct *idle, int cpu);  extern cpumask_t nohz_cpu_mask; @@ -383,7 +383,7 @@ struct signal_struct {  	wait_queue_head_t	wait_chldexit;	/* for wait4() */  	/* current thread group signal load-balancing target: */ -	task_t			*curr_target; +	struct task_struct	*curr_target;  	/* shared signal handling: */  	struct sigpending	shared_pending; @@ -699,7 +699,7 @@ extern int groups_search(struct group_info *group_info, gid_t grp);      ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])  #ifdef ARCH_HAS_PREFETCH_SWITCH_STACK -extern void prefetch_stack(struct task_struct*); +extern void prefetch_stack(struct task_struct *t);  #else  static inline void prefetch_stack(struct task_struct *t) { }  #endif @@ -1031,9 +1031,9 @@ static inline void put_task_struct(struct task_struct *t)  #define used_math() tsk_used_math(current)  #ifdef CONFIG_SMP -extern int set_cpus_allowed(task_t *p, cpumask_t new_mask); +extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);  #else -static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask) +static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)  {  	if (!cpu_isset(0, new_mask))  		return -EINVAL; @@ -1042,7 +1042,8 @@ static inline int set_cpus_allowed(task_t *p, cpumask_t new_mask)  #endif  extern unsigned long long sched_clock(void); -extern unsigned long long current_sched_time(const task_t *current_task); +extern unsigned long long +current_sched_time(const struct task_struct *current_task);  /* sched_exec is called by processes performing an exec */  #ifdef CONFIG_SMP @@ -1060,27 +1061,27 @@ static inline void idle_task_exit(void) {}  extern void sched_idle_next(void);  #ifdef CONFIG_RT_MUTEXES -extern int rt_mutex_getprio(task_t *p); -extern void rt_mutex_setprio(task_t *p, int prio); -extern void rt_mutex_adjust_pi(task_t *p); +extern int rt_mutex_getprio(struct task_struct *p); +extern void rt_mutex_setprio(struct task_struct *p, int prio); +extern void rt_mutex_adjust_pi(struct task_struct *p);  #else -static inline int rt_mutex_getprio(task_t *p) +static inline int rt_mutex_getprio(struct task_struct *p)  {  	return p->normal_prio;  }  # define rt_mutex_adjust_pi(p)		do { } while (0)  #endif -extern void set_user_nice(task_t *p, long nice); -extern int task_prio(const task_t *p); -extern int task_nice(const task_t *p); -extern int can_nice(const task_t *p, const int nice); -extern int task_curr(const task_t *p); +extern void set_user_nice(struct task_struct *p, long nice); +extern int task_prio(const struct task_struct *p); +extern int task_nice(const struct task_struct *p); +extern int can_nice(const struct task_struct *p, const int nice); +extern int task_curr(const struct task_struct *p);  extern int idle_cpu(int cpu);  extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); -extern task_t *idle_task(int cpu); -extern task_t *curr_task(int cpu); -extern void set_curr_task(int cpu, task_t *p); +extern struct task_struct *idle_task(int cpu); +extern struct task_struct *curr_task(int cpu); +extern void set_curr_task(int cpu, struct task_struct *p);  void yield(void); @@ -1137,8 +1138,8 @@ extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,  #else   static inline void kick_process(struct task_struct *tsk) { }  #endif -extern void FASTCALL(sched_fork(task_t * p, int clone_flags)); -extern void FASTCALL(sched_exit(task_t * p)); +extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags)); +extern void FASTCALL(sched_exit(struct task_struct * p));  extern int in_group_p(gid_t);  extern int in_egroup_p(gid_t); @@ -1243,17 +1244,17 @@ extern NORET_TYPE void do_group_exit(int);  extern void daemonize(const char *, ...);  extern int allow_signal(int);  extern int disallow_signal(int); -extern task_t *child_reaper; +extern struct task_struct *child_reaper;  extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);  extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); -task_t *fork_idle(int); +struct task_struct *fork_idle(int);  extern void set_task_comm(struct task_struct *tsk, char *from);  extern void get_task_comm(char *to, struct task_struct *tsk);  #ifdef CONFIG_SMP -extern void wait_task_inactive(task_t * p); +extern void wait_task_inactive(struct task_struct * p);  #else  #define wait_task_inactive(p)	do { } while (0)  #endif @@ -1279,13 +1280,13 @@ extern void wait_task_inactive(task_t * p);  /* de_thread depends on thread_group_leader not being a pid based check */  #define thread_group_leader(p)	(p == p->group_leader) -static inline task_t *next_thread(const task_t *p) +static inline struct task_struct *next_thread(const struct task_struct *p)  {  	return list_entry(rcu_dereference(p->thread_group.next), -				task_t, thread_group); +			  struct task_struct, thread_group);  } -static inline int thread_group_empty(task_t *p) +static inline int thread_group_empty(struct task_struct *p)  {  	return list_empty(&p->thread_group);  } diff --git a/kernel/capability.c b/kernel/capability.c index 1a4d8a40d3f..c7685ad00a9 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -46,7 +46,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)       int ret = 0;       pid_t pid;       __u32 version; -     task_t *target; +     struct task_struct *target;       struct __user_cap_data_struct data;       if (get_user(version, &header->version)) @@ -96,7 +96,7 @@ static inline int cap_set_pg(int pgrp, kernel_cap_t *effective,  			      kernel_cap_t *inheritable,  			      kernel_cap_t *permitted)  { -	task_t *g, *target; +	struct task_struct *g, *target;  	int ret = -EPERM;  	int found = 0; @@ -128,7 +128,7 @@ static inline int cap_set_all(kernel_cap_t *effective,  			       kernel_cap_t *inheritable,  			       kernel_cap_t *permitted)  { -     task_t *g, *target; +     struct task_struct *g, *target;       int ret = -EPERM;       int found = 0; @@ -172,7 +172,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)  {       kernel_cap_t inheritable, permitted, effective;       __u32 version; -     task_t *target; +     struct task_struct *target;       int ret;       pid_t pid; diff --git a/kernel/exit.c b/kernel/exit.c index c595db14cf2..6664c084783 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -134,8 +134,8 @@ static void delayed_put_task_struct(struct rcu_head *rhp)  void release_task(struct task_struct * p)  { +	struct task_struct *leader;  	int zap_leader; -	task_t *leader;  repeat:  	atomic_dec(&p->user->processes);  	write_lock_irq(&tasklist_lock); @@ -209,7 +209,7 @@ out:   *   * "I ask you, have you ever known what it is to be an orphan?"   */ -static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) +static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task)  {  	struct task_struct *p;  	int ret = 1; @@ -582,7 +582,8 @@ static void exit_mm(struct task_struct * tsk)  	mmput(mm);  } -static inline void choose_new_parent(task_t *p, task_t *reaper) +static inline void +choose_new_parent(struct task_struct *p, struct task_struct *reaper)  {  	/*  	 * Make sure we're not reparenting to ourselves and that @@ -592,7 +593,8 @@ static inline void choose_new_parent(task_t *p, task_t *reaper)  	p->real_parent = reaper;  } -static void reparent_thread(task_t *p, task_t *father, int traced) +static void +reparent_thread(struct task_struct *p, struct task_struct *father, int traced)  {  	/* We don't want people slaying init.  */  	if (p->exit_signal != -1) @@ -656,8 +658,8 @@ static void reparent_thread(task_t *p, task_t *father, int traced)   * group, and if no such member exists, give it to   * the global child reaper process (ie "init")   */ -static void forget_original_parent(struct task_struct * father, -					  struct list_head *to_release) +static void +forget_original_parent(struct task_struct *father, struct list_head *to_release)  {  	struct task_struct *p, *reaper = father;  	struct list_head *_p, *_n; @@ -680,7 +682,7 @@ static void forget_original_parent(struct task_struct * father,  	 */  	list_for_each_safe(_p, _n, &father->children) {  		int ptrace; -		p = list_entry(_p,struct task_struct,sibling); +		p = list_entry(_p, struct task_struct, sibling);  		ptrace = p->ptrace; @@ -709,7 +711,7 @@ static void forget_original_parent(struct task_struct * father,  			list_add(&p->ptrace_list, to_release);  	}  	list_for_each_safe(_p, _n, &father->ptrace_children) { -		p = list_entry(_p,struct task_struct,ptrace_list); +		p = list_entry(_p, struct task_struct, ptrace_list);  		choose_new_parent(p, reaper);  		reparent_thread(p, father, 1);  	} @@ -829,7 +831,7 @@ static void exit_notify(struct task_struct *tsk)  	list_for_each_safe(_p, _n, &ptrace_dead) {  		list_del_init(_p); -		t = list_entry(_p,struct task_struct,ptrace_list); +		t = list_entry(_p, struct task_struct, ptrace_list);  		release_task(t);  	} @@ -1010,7 +1012,7 @@ asmlinkage void sys_exit_group(int error_code)  	do_group_exit((error_code & 0xff) << 8);  } -static int eligible_child(pid_t pid, int options, task_t *p) +static int eligible_child(pid_t pid, int options, struct task_struct *p)  {  	if (pid > 0) {  		if (p->pid != pid) @@ -1051,12 +1053,13 @@ static int eligible_child(pid_t pid, int options, task_t *p)  	return 1;  } -static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, +static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,  			       int why, int status,  			       struct siginfo __user *infop,  			       struct rusage __user *rusagep)  {  	int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; +  	put_task_struct(p);  	if (!retval)  		retval = put_user(SIGCHLD, &infop->si_signo); @@ -1081,7 +1084,7 @@ static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid,   * the lock and this task is uninteresting.  If we return nonzero, we have   * released the lock and the system call should return.   */ -static int wait_task_zombie(task_t *p, int noreap, +static int wait_task_zombie(struct task_struct *p, int noreap,  			    struct siginfo __user *infop,  			    int __user *stat_addr, struct rusage __user *ru)  { @@ -1243,8 +1246,8 @@ static int wait_task_zombie(task_t *p, int noreap,   * the lock and this task is uninteresting.  If we return nonzero, we have   * released the lock and the system call should return.   */ -static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, -			     struct siginfo __user *infop, +static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, +			     int noreap, struct siginfo __user *infop,  			     int __user *stat_addr, struct rusage __user *ru)  {  	int retval, exit_code; @@ -1358,7 +1361,7 @@ bail_ref:   * the lock and this task is uninteresting.  If we return nonzero, we have   * released the lock and the system call should return.   */ -static int wait_task_continued(task_t *p, int noreap, +static int wait_task_continued(struct task_struct *p, int noreap,  			       struct siginfo __user *infop,  			       int __user *stat_addr, struct rusage __user *ru)  { @@ -1444,7 +1447,7 @@ repeat:  		int ret;  		list_for_each(_p,&tsk->children) { -			p = list_entry(_p,struct task_struct,sibling); +			p = list_entry(_p, struct task_struct, sibling);  			ret = eligible_child(pid, options, p);  			if (!ret) diff --git a/kernel/fork.c b/kernel/fork.c index 54953d8a6f1..56e4e07e45f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -933,13 +933,13 @@ static inline void rt_mutex_init_task(struct task_struct *p)   * parts of the process environment (as per the clone   * flags). The actual kick-off is left to the caller.   */ -static task_t *copy_process(unsigned long clone_flags, -				 unsigned long stack_start, -				 struct pt_regs *regs, -				 unsigned long stack_size, -				 int __user *parent_tidptr, -				 int __user *child_tidptr, -				 int pid) +static struct task_struct *copy_process(unsigned long clone_flags, +					unsigned long stack_start, +					struct pt_regs *regs, +					unsigned long stack_size, +					int __user *parent_tidptr, +					int __user *child_tidptr, +					int pid)  {  	int retval;  	struct task_struct *p = NULL; @@ -1294,9 +1294,9 @@ struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)  	return regs;  } -task_t * __devinit fork_idle(int cpu) +struct task_struct * __devinit fork_idle(int cpu)  { -	task_t *task; +	struct task_struct *task;  	struct pt_regs regs;  	task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0); diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 617304ce67d..d17766d40da 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -669,7 +669,7 @@ static int hrtimer_wakeup(struct hrtimer *timer)  	return HRTIMER_NORESTART;  } -void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, task_t *task) +void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)  {  	sl->timer.function = hrtimer_wakeup;  	sl->task = task; diff --git a/kernel/pid.c b/kernel/pid.c index eeb836b65ca..93e212f2067 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -218,7 +218,7 @@ struct pid * fastcall find_pid(int nr)  	return NULL;  } -int fastcall attach_pid(task_t *task, enum pid_type type, int nr) +int fastcall attach_pid(struct task_struct *task, enum pid_type type, int nr)  {  	struct pid_link *link;  	struct pid *pid; @@ -233,7 +233,7 @@ int fastcall attach_pid(task_t *task, enum pid_type type, int nr)  	return 0;  } -void fastcall detach_pid(task_t *task, enum pid_type type) +void fastcall detach_pid(struct task_struct *task, enum pid_type type)  {  	struct pid_link *link;  	struct pid *pid; @@ -267,7 +267,7 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type)  /*   * Must be called under rcu_read_lock() or with tasklist_lock read-held.   */ -task_t *find_task_by_pid_type(int type, int nr) +struct task_struct *find_task_by_pid_type(int type, int nr)  {  	return pid_task(find_pid(nr), type);  } diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 335c5b932e1..9a111f70145 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -28,7 +28,7 @@   *   * Must be called with the tasklist lock write-held.   */ -void __ptrace_link(task_t *child, task_t *new_parent) +void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)  {  	BUG_ON(!list_empty(&child->ptrace_list));  	if (child->parent == new_parent) @@ -46,7 +46,7 @@ void __ptrace_link(task_t *child, task_t *new_parent)   * TASK_TRACED, resume it now.   * Requires that irqs be disabled.   */ -void ptrace_untrace(task_t *child) +void ptrace_untrace(struct task_struct *child)  {  	spin_lock(&child->sighand->siglock);  	if (child->state == TASK_TRACED) { @@ -65,7 +65,7 @@ void ptrace_untrace(task_t *child)   *   * Must be called with the tasklist lock write-held.   */ -void __ptrace_unlink(task_t *child) +void __ptrace_unlink(struct task_struct *child)  {  	BUG_ON(!child->ptrace); diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c index 353a853bc39..0c1faa950af 100644 --- a/kernel/rtmutex-debug.c +++ b/kernel/rtmutex-debug.c @@ -96,7 +96,7 @@ void deadlock_trace_off(void)  	rt_trace_on = 0;  } -static void printk_task(task_t *p) +static void printk_task(struct task_struct *p)  {  	if (p)  		printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); @@ -231,7 +231,8 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)  	lock->name = name;  } -void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) +void +rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task)  {  } diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c index e82c2f84824..494dac872a1 100644 --- a/kernel/rtmutex-tester.c +++ b/kernel/rtmutex-tester.c @@ -33,7 +33,7 @@ struct test_thread_data {  };  static struct test_thread_data thread_data[MAX_RT_TEST_THREADS]; -static task_t *threads[MAX_RT_TEST_THREADS]; +static struct task_struct *threads[MAX_RT_TEST_THREADS];  static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];  enum test_opcodes { @@ -361,8 +361,8 @@ static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf,  static ssize_t sysfs_test_status(struct sys_device *dev, char *buf)  {  	struct test_thread_data *td; +	struct task_struct *tsk;  	char *curr = buf; -	task_t *tsk;  	int i;  	td = container_of(dev, struct test_thread_data, sysdev); diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 91b699aa658..d2ef13b485e 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -157,7 +157,7 @@ int max_lock_depth = 1024;   * Decreases task's usage by one - may thus free the task.   * Returns 0 or -EDEADLK.   */ -static int rt_mutex_adjust_prio_chain(task_t *task, +static int rt_mutex_adjust_prio_chain(struct task_struct *task,  				      int deadlock_detect,  				      struct rt_mutex *orig_lock,  				      struct rt_mutex_waiter *orig_waiter, @@ -282,6 +282,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task,  	spin_unlock_irqrestore(&task->pi_lock, flags);   out_put_task:  	put_task_struct(task); +  	return ret;  } @@ -403,10 +404,10 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,  				   struct rt_mutex_waiter *waiter,  				   int detect_deadlock)  { +	struct task_struct *owner = rt_mutex_owner(lock);  	struct rt_mutex_waiter *top_waiter = waiter; -	task_t *owner = rt_mutex_owner(lock); -	int boost = 0, res;  	unsigned long flags; +	int boost = 0, res;  	spin_lock_irqsave(¤t->pi_lock, flags);  	__rt_mutex_adjust_prio(current); @@ -527,9 +528,9 @@ static void remove_waiter(struct rt_mutex *lock,  			  struct rt_mutex_waiter *waiter)  {  	int first = (waiter == rt_mutex_top_waiter(lock)); -	int boost = 0; -	task_t *owner = rt_mutex_owner(lock); +	struct task_struct *owner = rt_mutex_owner(lock);  	unsigned long flags; +	int boost = 0;  	spin_lock_irqsave(¤t->pi_lock, flags);  	plist_del(&waiter->list_entry, &lock->wait_list); diff --git a/kernel/sched.c b/kernel/sched.c index b0326141f84..021b3121951 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -179,7 +179,7 @@ static unsigned int static_prio_timeslice(int static_prio)  		return SCALE_PRIO(DEF_TIMESLICE, static_prio);  } -static inline unsigned int task_timeslice(task_t *p) +static inline unsigned int task_timeslice(struct task_struct *p)  {  	return static_prio_timeslice(p->static_prio);  } @@ -227,7 +227,7 @@ struct runqueue {  	unsigned long expired_timestamp;  	unsigned long long timestamp_last_tick; -	task_t *curr, *idle; +	struct task_struct *curr, *idle;  	struct mm_struct *prev_mm;  	prio_array_t *active, *expired, arrays[2];  	int best_expired_prio; @@ -240,7 +240,7 @@ struct runqueue {  	int active_balance;  	int push_cpu; -	task_t *migration_thread; +	struct task_struct *migration_thread;  	struct list_head migration_queue;  #endif @@ -291,16 +291,16 @@ static DEFINE_PER_CPU(struct runqueue, runqueues);  #endif  #ifndef __ARCH_WANT_UNLOCKED_CTXSW -static inline int task_running(runqueue_t *rq, task_t *p) +static inline int task_running(runqueue_t *rq, struct task_struct *p)  {  	return rq->curr == p;  } -static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) +static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next)  {  } -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev)  {  #ifdef CONFIG_DEBUG_SPINLOCK  	/* this is a valid case when another task releases the spinlock */ @@ -317,7 +317,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)  }  #else /* __ARCH_WANT_UNLOCKED_CTXSW */ -static inline int task_running(runqueue_t *rq, task_t *p) +static inline int task_running(runqueue_t *rq, struct task_struct *p)  {  #ifdef CONFIG_SMP  	return p->oncpu; @@ -326,7 +326,7 @@ static inline int task_running(runqueue_t *rq, task_t *p)  #endif  } -static inline void prepare_lock_switch(runqueue_t *rq, task_t *next) +static inline void prepare_lock_switch(runqueue_t *rq, struct task_struct *next)  {  #ifdef CONFIG_SMP  	/* @@ -343,7 +343,7 @@ static inline void prepare_lock_switch(runqueue_t *rq, task_t *next)  #endif  } -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +static inline void finish_lock_switch(runqueue_t *rq, struct task_struct *prev)  {  #ifdef CONFIG_SMP  	/* @@ -364,7 +364,7 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)   * __task_rq_lock - lock the runqueue a given task resides on.   * Must be called interrupts disabled.   */ -static inline runqueue_t *__task_rq_lock(task_t *p) +static inline runqueue_t *__task_rq_lock(struct task_struct *p)  	__acquires(rq->lock)  {  	struct runqueue *rq; @@ -384,7 +384,7 @@ repeat_lock_task:   * interrupts.  Note the ordering: we can safely lookup the task_rq without   * explicitly disabling preemption.   */ -static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +static runqueue_t *task_rq_lock(struct task_struct *p, unsigned long *flags)  	__acquires(rq->lock)  {  	struct runqueue *rq; @@ -541,7 +541,7 @@ static inline runqueue_t *this_rq_lock(void)   * long it was from the *first* time it was queued to the time that it   * finally hit a cpu.   */ -static inline void sched_info_dequeued(task_t *t) +static inline void sched_info_dequeued(struct task_struct *t)  {  	t->sched_info.last_queued = 0;  } @@ -551,7 +551,7 @@ static inline void sched_info_dequeued(task_t *t)   * long it was waiting to run.  We also note when it began so that we   * can keep stats on how long its timeslice is.   */ -static void sched_info_arrive(task_t *t) +static void sched_info_arrive(struct task_struct *t)  {  	unsigned long now = jiffies, diff = 0;  	struct runqueue *rq = task_rq(t); @@ -585,7 +585,7 @@ static void sched_info_arrive(task_t *t)   * the timestamp if it is already not set.  It's assumed that   * sched_info_dequeued() will clear that stamp when appropriate.   */ -static inline void sched_info_queued(task_t *t) +static inline void sched_info_queued(struct task_struct *t)  {  	if (!t->sched_info.last_queued)  		t->sched_info.last_queued = jiffies; @@ -595,7 +595,7 @@ static inline void sched_info_queued(task_t *t)   * Called when a process ceases being the active-running process, either   * voluntarily or involuntarily.  Now we can calculate how long we ran.   */ -static inline void sched_info_depart(task_t *t) +static inline void sched_info_depart(struct task_struct *t)  {  	struct runqueue *rq = task_rq(t);  	unsigned long diff = jiffies - t->sched_info.last_arrival; @@ -611,7 +611,8 @@ static inline void sched_info_depart(task_t *t)   * their time slice.  (This may also be called when switching to or from   * the idle task.)  We are only called when prev != next.   */ -static inline void sched_info_switch(task_t *prev, task_t *next) +static inline void +sched_info_switch(struct task_struct *prev, struct task_struct *next)  {  	struct runqueue *rq = task_rq(prev); @@ -683,7 +684,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)   * Both properties are important to certain workloads.   */ -static inline int __normal_prio(task_t *p) +static inline int __normal_prio(struct task_struct *p)  {  	int bonus, prio; @@ -719,7 +720,7 @@ static inline int __normal_prio(task_t *p)  #define RTPRIO_TO_LOAD_WEIGHT(rp) \  	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) -static void set_load_weight(task_t *p) +static void set_load_weight(struct task_struct *p)  {  	if (has_rt_policy(p)) {  #ifdef CONFIG_SMP @@ -737,23 +738,25 @@ static void set_load_weight(task_t *p)  		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);  } -static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p) +static inline void +inc_raw_weighted_load(runqueue_t *rq, const struct task_struct *p)  {  	rq->raw_weighted_load += p->load_weight;  } -static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p) +static inline void +dec_raw_weighted_load(runqueue_t *rq, const struct task_struct *p)  {  	rq->raw_weighted_load -= p->load_weight;  } -static inline void inc_nr_running(task_t *p, runqueue_t *rq) +static inline void inc_nr_running(struct task_struct *p, runqueue_t *rq)  {  	rq->nr_running++;  	inc_raw_weighted_load(rq, p);  } -static inline void dec_nr_running(task_t *p, runqueue_t *rq) +static inline void dec_nr_running(struct task_struct *p, runqueue_t *rq)  {  	rq->nr_running--;  	dec_raw_weighted_load(rq, p); @@ -766,7 +769,7 @@ static inline void dec_nr_running(task_t *p, runqueue_t *rq)   * setprio syscalls, and whenever the interactivity   * estimator recalculates.   */ -static inline int normal_prio(task_t *p) +static inline int normal_prio(struct task_struct *p)  {  	int prio; @@ -784,7 +787,7 @@ static inline int normal_prio(task_t *p)   * interactivity modifiers. Will be RT if the task got   * RT-boosted. If not then it returns p->normal_prio.   */ -static int effective_prio(task_t *p) +static int effective_prio(struct task_struct *p)  {  	p->normal_prio = normal_prio(p);  	/* @@ -800,7 +803,7 @@ static int effective_prio(task_t *p)  /*   * __activate_task - move a task to the runqueue.   */ -static void __activate_task(task_t *p, runqueue_t *rq) +static void __activate_task(struct task_struct *p, runqueue_t *rq)  {  	prio_array_t *target = rq->active; @@ -813,7 +816,7 @@ static void __activate_task(task_t *p, runqueue_t *rq)  /*   * __activate_idle_task - move idle task to the _front_ of runqueue.   */ -static inline void __activate_idle_task(task_t *p, runqueue_t *rq) +static inline void __activate_idle_task(struct task_struct *p, runqueue_t *rq)  {  	enqueue_task_head(p, rq->active);  	inc_nr_running(p, rq); @@ -823,7 +826,7 @@ static inline void __activate_idle_task(task_t *p, runqueue_t *rq)   * Recalculate p->normal_prio and p->prio after having slept,   * updating the sleep-average too:   */ -static int recalc_task_prio(task_t *p, unsigned long long now) +static int recalc_task_prio(struct task_struct *p, unsigned long long now)  {  	/* Caller must always ensure 'now >= p->timestamp' */  	unsigned long sleep_time = now - p->timestamp; @@ -895,7 +898,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now)   * Update all the scheduling statistics stuff. (sleep average   * calculation, priority modifiers, etc.)   */ -static void activate_task(task_t *p, runqueue_t *rq, int local) +static void activate_task(struct task_struct *p, runqueue_t *rq, int local)  {  	unsigned long long now; @@ -962,7 +965,7 @@ static void deactivate_task(struct task_struct *p, runqueue_t *rq)  #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)  #endif -static void resched_task(task_t *p) +static void resched_task(struct task_struct *p)  {  	int cpu; @@ -983,7 +986,7 @@ static void resched_task(task_t *p)  		smp_send_reschedule(cpu);  }  #else -static inline void resched_task(task_t *p) +static inline void resched_task(struct task_struct *p)  {  	assert_spin_locked(&task_rq(p)->lock);  	set_tsk_need_resched(p); @@ -994,7 +997,7 @@ static inline void resched_task(task_t *p)   * task_curr - is this task currently executing on a CPU?   * @p: the task in question.   */ -inline int task_curr(const task_t *p) +inline int task_curr(const struct task_struct *p)  {  	return cpu_curr(task_cpu(p)) == p;  } @@ -1009,7 +1012,7 @@ unsigned long weighted_cpuload(const int cpu)  typedef struct {  	struct list_head list; -	task_t *task; +	struct task_struct *task;  	int dest_cpu;  	struct completion done; @@ -1019,7 +1022,8 @@ typedef struct {   * The task's runqueue lock must be held.   * Returns true if you have to wait for migration thread.   */ -static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) +static int +migrate_task(struct task_struct *p, int dest_cpu, migration_req_t *req)  {  	runqueue_t *rq = task_rq(p); @@ -1049,7 +1053,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)   * smp_call_function() if an IPI is sent by the same process we are   * waiting to become inactive.   */ -void wait_task_inactive(task_t *p) +void wait_task_inactive(struct task_struct *p)  {  	unsigned long flags;  	runqueue_t *rq; @@ -1083,7 +1087,7 @@ repeat:   * to another CPU then no harm is done and the purpose has been   * achieved as well.   */ -void kick_process(task_t *p) +void kick_process(struct task_struct *p)  {  	int cpu; @@ -1286,7 +1290,7 @@ nextlevel:   * Returns the CPU we should wake onto.   */  #if defined(ARCH_HAS_SCHED_WAKE_IDLE) -static int wake_idle(int cpu, task_t *p) +static int wake_idle(int cpu, struct task_struct *p)  {  	cpumask_t tmp;  	struct sched_domain *sd; @@ -1309,7 +1313,7 @@ static int wake_idle(int cpu, task_t *p)  	return cpu;  }  #else -static inline int wake_idle(int cpu, task_t *p) +static inline int wake_idle(int cpu, struct task_struct *p)  {  	return cpu;  } @@ -1329,7 +1333,7 @@ static inline int wake_idle(int cpu, task_t *p)   *   * returns failure only if the task is already active.   */ -static int try_to_wake_up(task_t *p, unsigned int state, int sync) +static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)  {  	int cpu, this_cpu, success = 0;  	unsigned long flags; @@ -1487,14 +1491,14 @@ out:  	return success;  } -int fastcall wake_up_process(task_t *p) +int fastcall wake_up_process(struct task_struct *p)  {  	return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |  				 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);  }  EXPORT_SYMBOL(wake_up_process); -int fastcall wake_up_state(task_t *p, unsigned int state) +int fastcall wake_up_state(struct task_struct *p, unsigned int state)  {  	return try_to_wake_up(p, state, 0);  } @@ -1503,7 +1507,7 @@ int fastcall wake_up_state(task_t *p, unsigned int state)   * Perform scheduler related setup for a newly forked process p.   * p is forked by current.   */ -void fastcall sched_fork(task_t *p, int clone_flags) +void fastcall sched_fork(struct task_struct *p, int clone_flags)  {  	int cpu = get_cpu(); @@ -1571,7 +1575,7 @@ void fastcall sched_fork(task_t *p, int clone_flags)   * that must be done for every newly created context, then puts the task   * on the runqueue and wakes it.   */ -void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) +void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)  {  	unsigned long flags;  	int this_cpu, cpu; @@ -1655,7 +1659,7 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)   * artificially, because any timeslice recovered here   * was given away by the parent in the first place.)   */ -void fastcall sched_exit(task_t *p) +void fastcall sched_exit(struct task_struct *p)  {  	unsigned long flags;  	runqueue_t *rq; @@ -1689,7 +1693,7 @@ void fastcall sched_exit(task_t *p)   * prepare_task_switch sets up locking and calls architecture specific   * hooks.   */ -static inline void prepare_task_switch(runqueue_t *rq, task_t *next) +static inline void prepare_task_switch(runqueue_t *rq, struct task_struct *next)  {  	prepare_lock_switch(rq, next);  	prepare_arch_switch(next); @@ -1710,7 +1714,7 @@ static inline void prepare_task_switch(runqueue_t *rq, task_t *next)   * with the lock held can cause deadlocks; see schedule() for   * details.)   */ -static inline void finish_task_switch(runqueue_t *rq, task_t *prev) +static inline void finish_task_switch(runqueue_t *rq, struct task_struct *prev)  	__releases(rq->lock)  {  	struct mm_struct *mm = rq->prev_mm; @@ -1748,7 +1752,7 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev)   * schedule_tail - first thing a freshly forked thread must call.   * @prev: the thread we just switched away from.   */ -asmlinkage void schedule_tail(task_t *prev) +asmlinkage void schedule_tail(struct task_struct *prev)  	__releases(rq->lock)  {  	runqueue_t *rq = this_rq(); @@ -1765,8 +1769,9 @@ asmlinkage void schedule_tail(task_t *prev)   * context_switch - switch to the new MM and the new   * thread's register state.   */ -static inline -task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next) +static inline struct task_struct * +context_switch(runqueue_t *rq, struct task_struct *prev, +	       struct task_struct *next)  {  	struct mm_struct *mm = next->mm;  	struct mm_struct *oldmm = prev->active_mm; @@ -1937,7 +1942,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)   * allow dest_cpu, which will force the cpu onto dest_cpu.  Then   * the cpu_allowed mask is restored.   */ -static void sched_migrate_task(task_t *p, int dest_cpu) +static void sched_migrate_task(struct task_struct *p, int dest_cpu)  {  	migration_req_t req;  	runqueue_t *rq; @@ -1952,11 +1957,13 @@ static void sched_migrate_task(task_t *p, int dest_cpu)  	if (migrate_task(p, dest_cpu, &req)) {  		/* Need to wait for migration thread (might exit: take ref). */  		struct task_struct *mt = rq->migration_thread; +  		get_task_struct(mt);  		task_rq_unlock(rq, &flags);  		wake_up_process(mt);  		put_task_struct(mt);  		wait_for_completion(&req.done); +  		return;  	}  out: @@ -1980,9 +1987,9 @@ void sched_exec(void)   * pull_task - move a task from a remote runqueue to the local runqueue.   * Both runqueues must be locked.   */ -static -void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, -	       runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) +static void pull_task(runqueue_t *src_rq, prio_array_t *src_array, +		      struct task_struct *p, runqueue_t *this_rq, +		      prio_array_t *this_array, int this_cpu)  {  	dequeue_task(p, src_array);  	dec_nr_running(p, src_rq); @@ -2003,7 +2010,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,   * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?   */  static -int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, +int can_migrate_task(struct task_struct *p, runqueue_t *rq, int this_cpu,  		     struct sched_domain *sd, enum idle_type idle,  		     int *all_pinned)  { @@ -2052,8 +2059,8 @@ static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,  	    best_prio_seen, skip_for_load;  	prio_array_t *array, *dst_array;  	struct list_head *head, *curr; +	struct task_struct *tmp;  	long rem_load_move; -	task_t *tmp;  	if (max_nr_move == 0 || max_load_move == 0)  		goto out; @@ -2105,7 +2112,7 @@ skip_bitmap:  	head = array->queue + idx;  	curr = head->prev;  skip_queue: -	tmp = list_entry(curr, task_t, run_list); +	tmp = list_entry(curr, struct task_struct, run_list);  	curr = curr->prev; @@ -2819,7 +2826,7 @@ EXPORT_PER_CPU_SYMBOL(kstat);   * Bank in p->sched_time the ns elapsed since the last tick or switch.   */  static inline void -update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now) +update_cpu_clock(struct task_struct *p, runqueue_t *rq, unsigned long long now)  {  	p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick);  } @@ -2828,7 +2835,7 @@ update_cpu_clock(task_t *p, runqueue_t *rq, unsigned long long now)   * Return current->sched_time plus any more ns on the sched_clock   * that have not yet been banked.   */ -unsigned long long current_sched_time(const task_t *p) +unsigned long long current_sched_time(const struct task_struct *p)  {  	unsigned long long ns;  	unsigned long flags; @@ -2945,9 +2952,9 @@ void account_steal_time(struct task_struct *p, cputime_t steal)  void scheduler_tick(void)  {  	unsigned long long now = sched_clock(); +	struct task_struct *p = current;  	int cpu = smp_processor_id();  	runqueue_t *rq = this_rq(); -	task_t *p = current;  	update_cpu_clock(p, rq, now); @@ -3079,7 +3086,8 @@ static void wake_sleeping_dependent(int this_cpu)   * utilize, if another task runs on a sibling. This models the   * slowdown effect of other tasks running on siblings:   */ -static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd) +static inline unsigned long +smt_slice(struct task_struct *p, struct sched_domain *sd)  {  	return p->time_slice * (100 - sd->per_cpu_gain) / 100;  } @@ -3090,7 +3098,8 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)   * acquire their lock. As we only trylock the normal locking order does not   * need to be obeyed.   */ -static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) +static int +dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p)  {  	struct sched_domain *tmp, *sd = NULL;  	int ret = 0, i; @@ -3110,8 +3119,8 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p)  		return 0;  	for_each_cpu_mask(i, sd->span) { +		struct task_struct *smt_curr;  		runqueue_t *smt_rq; -		task_t *smt_curr;  		if (i == this_cpu)  			continue; @@ -3157,7 +3166,7 @@ static inline void wake_sleeping_dependent(int this_cpu)  {  }  static inline int -dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p) +dependent_sleeper(int this_cpu, runqueue_t *this_rq, struct task_struct *p)  {  	return 0;  } @@ -3211,11 +3220,11 @@ static inline int interactive_sleep(enum sleep_type sleep_type)   */  asmlinkage void __sched schedule(void)  { +	struct task_struct *prev, *next;  	struct list_head *queue;  	unsigned long long now;  	unsigned long run_time;  	int cpu, idx, new_prio; -	task_t *prev, *next;  	prio_array_t *array;  	long *switch_count;  	runqueue_t *rq; @@ -3308,7 +3317,7 @@ need_resched_nonpreemptible:  	idx = sched_find_first_bit(array->bitmap);  	queue = array->queue + idx; -	next = list_entry(queue->next, task_t, run_list); +	next = list_entry(queue->next, struct task_struct, run_list);  	if (!rt_task(next) && interactive_sleep(next->sleep_type)) {  		unsigned long long delta = now - next->timestamp; @@ -3776,7 +3785,7 @@ EXPORT_SYMBOL(sleep_on_timeout);   *   * Used by the rt_mutex code to implement priority inheritance logic.   */ -void rt_mutex_setprio(task_t *p, int prio) +void rt_mutex_setprio(struct task_struct *p, int prio)  {  	unsigned long flags;  	prio_array_t *array; @@ -3817,7 +3826,7 @@ void rt_mutex_setprio(task_t *p, int prio)  #endif -void set_user_nice(task_t *p, long nice) +void set_user_nice(struct task_struct *p, long nice)  {  	int old_prio, delta;  	unsigned long flags; @@ -3873,7 +3882,7 @@ EXPORT_SYMBOL(set_user_nice);   * @p: task   * @nice: nice value   */ -int can_nice(const task_t *p, const int nice) +int can_nice(const struct task_struct *p, const int nice)  {  	/* convert nice value [19,-20] to rlimit style value [1,40] */  	int nice_rlim = 20 - nice; @@ -3932,7 +3941,7 @@ asmlinkage long sys_nice(int increment)   * RT tasks are offset by -200. Normal tasks are centered   * around 0, value goes from -16 to +15.   */ -int task_prio(const task_t *p) +int task_prio(const struct task_struct *p)  {  	return p->prio - MAX_RT_PRIO;  } @@ -3941,7 +3950,7 @@ int task_prio(const task_t *p)   * task_nice - return the nice value of a given task.   * @p: the task in question.   */ -int task_nice(const task_t *p) +int task_nice(const struct task_struct *p)  {  	return TASK_NICE(p);  } @@ -3960,7 +3969,7 @@ int idle_cpu(int cpu)   * idle_task - return the idle task for a given cpu.   * @cpu: the processor in question.   */ -task_t *idle_task(int cpu) +struct task_struct *idle_task(int cpu)  {  	return cpu_rq(cpu)->idle;  } @@ -3969,7 +3978,7 @@ task_t *idle_task(int cpu)   * find_process_by_pid - find a process with a matching PID value.   * @pid: the pid in question.   */ -static inline task_t *find_process_by_pid(pid_t pid) +static inline struct task_struct *find_process_by_pid(pid_t pid)  {  	return pid ? find_task_by_pid(pid) : current;  } @@ -4103,9 +4112,9 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);  static int  do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)  { -	int retval;  	struct sched_param lparam;  	struct task_struct *p; +	int retval;  	if (!param || pid < 0)  		return -EINVAL; @@ -4121,6 +4130,7 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)  	read_unlock_irq(&tasklist_lock);  	retval = sched_setscheduler(p, policy, &lparam);  	put_task_struct(p); +  	return retval;  } @@ -4156,8 +4166,8 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)   */  asmlinkage long sys_sched_getscheduler(pid_t pid)  { +	struct task_struct *p;  	int retval = -EINVAL; -	task_t *p;  	if (pid < 0)  		goto out_nounlock; @@ -4184,8 +4194,8 @@ out_nounlock:  asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)  {  	struct sched_param lp; +	struct task_struct *p;  	int retval = -EINVAL; -	task_t *p;  	if (!param || pid < 0)  		goto out_nounlock; @@ -4218,9 +4228,9 @@ out_unlock:  long sched_setaffinity(pid_t pid, cpumask_t new_mask)  { -	task_t *p; -	int retval;  	cpumask_t cpus_allowed; +	struct task_struct *p; +	int retval;  	lock_cpu_hotplug();  	read_lock(&tasklist_lock); @@ -4306,8 +4316,8 @@ cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;  long sched_getaffinity(pid_t pid, cpumask_t *mask)  { +	struct task_struct *p;  	int retval; -	task_t *p;  	lock_cpu_hotplug();  	read_lock(&tasklist_lock); @@ -4592,9 +4602,9 @@ asmlinkage long sys_sched_get_priority_min(int policy)  asmlinkage  long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)  { +	struct task_struct *p;  	int retval = -EINVAL;  	struct timespec t; -	task_t *p;  	if (pid < 0)  		goto out_nounlock; @@ -4641,12 +4651,13 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)  	return list_entry(p->sibling.next,struct task_struct,sibling);  } -static void show_task(task_t *p) +static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; + +static void show_task(struct task_struct *p)  { -	task_t *relative; -	unsigned state; +	struct task_struct *relative;  	unsigned long free = 0; -	static const char *stat_nam[] = { "R", "S", "D", "T", "t", "Z", "X" }; +	unsigned state;  	printk("%-13.13s ", p->comm);  	state = p->state ? __ffs(p->state) + 1 : 0; @@ -4697,7 +4708,7 @@ static void show_task(task_t *p)  void show_state(void)  { -	task_t *g, *p; +	struct task_struct *g, *p;  #if (BITS_PER_LONG == 32)  	printk("\n" @@ -4730,7 +4741,7 @@ void show_state(void)   * NOTE: this function does not set the idle thread's NEED_RESCHED   * flag, to make booting more robust.   */ -void __devinit init_idle(task_t *idle, int cpu) +void __devinit init_idle(struct task_struct *idle, int cpu)  {  	runqueue_t *rq = cpu_rq(cpu);  	unsigned long flags; @@ -4793,7 +4804,7 @@ cpumask_t nohz_cpu_mask = CPU_MASK_NONE;   * task must not exit() & deallocate itself prematurely.  The   * call is not atomic; no spinlocks may be held.   */ -int set_cpus_allowed(task_t *p, cpumask_t new_mask) +int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)  {  	unsigned long flags;  	migration_req_t req; @@ -5061,7 +5072,7 @@ void idle_task_exit(void)  	mmdrop(mm);  } -static void migrate_dead(unsigned int dead_cpu, task_t *p) +static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)  {  	struct runqueue *rq = cpu_rq(dead_cpu); @@ -5096,9 +5107,8 @@ static void migrate_dead_tasks(unsigned int dead_cpu)  			struct list_head *list = &rq->arrays[arr].queue[i];  			while (!list_empty(list)) -				migrate_dead(dead_cpu, -					     list_entry(list->next, task_t, -							run_list)); +				migrate_dead(dead_cpu, list_entry(list->next, +					     struct task_struct, run_list));  		}  	}  } @@ -6801,7 +6811,7 @@ void normalize_rt_tasks(void)   *   * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!   */ -task_t *curr_task(int cpu) +struct task_struct *curr_task(int cpu)  {  	return cpu_curr(cpu);  } @@ -6821,7 +6831,7 @@ task_t *curr_task(int cpu)   *   * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!   */ -void set_curr_task(int cpu, task_t *p) +void set_curr_task(int cpu, struct task_struct *p)  {  	cpu_curr(cpu) = p;  } diff --git a/kernel/timer.c b/kernel/timer.c index b761898d04c..396a3c024c2 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1368,7 +1368,7 @@ asmlinkage long sys_getegid(void)  static void process_timeout(unsigned long __data)  { -	wake_up_process((task_t *)__data); +	wake_up_process((struct task_struct *)__data);  }  /** diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 59f0b42bd89..90d2c600165 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -51,7 +51,7 @@ struct cpu_workqueue_struct {  	wait_queue_head_t work_done;  	struct workqueue_struct *wq; -	task_t *thread; +	struct task_struct *thread;  	int run_depth;		/* Detect run_workqueue() recursion depth */  } ____cacheline_aligned; diff --git a/mm/oom_kill.c b/mm/oom_kill.c index d46ed0f1dc0..b9af136e5cf 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -225,7 +225,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)   * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that   * we select a process with CAP_SYS_RAW_IO set).   */ -static void __oom_kill_task(task_t *p, const char *message) +static void __oom_kill_task(struct task_struct *p, const char *message)  {  	if (p->pid == 1) {  		WARN_ON(1); @@ -255,10 +255,10 @@ static void __oom_kill_task(task_t *p, const char *message)  	force_sig(SIGKILL, p);  } -static int oom_kill_task(task_t *p, const char *message) +static int oom_kill_task(struct task_struct *p, const char *message)  {  	struct mm_struct *mm; -	task_t * g, * q; +	struct task_struct *g, *q;  	mm = p->mm; @@ -316,7 +316,7 @@ static int oom_kill_process(struct task_struct *p, unsigned long points,   */  void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order)  { -	task_t *p; +	struct task_struct *p;  	unsigned long points = 0;  	if (printk_ratelimit()) {  |