diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/compat.c | 51 | ||||
| -rw-r--r-- | kernel/exit.c | 19 | ||||
| -rw-r--r-- | kernel/fork.c | 88 | ||||
| -rw-r--r-- | kernel/itimer.c | 33 | ||||
| -rw-r--r-- | kernel/posix-cpu-timers.c | 471 | ||||
| -rw-r--r-- | kernel/sched.c | 53 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 1 | ||||
| -rw-r--r-- | kernel/sched_rt.c | 4 | ||||
| -rw-r--r-- | kernel/signal.c | 8 | ||||
| -rw-r--r-- | kernel/sys.c | 73 | 
10 files changed, 413 insertions, 388 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index 32c254a8ab9..72650e39b3e 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -23,6 +23,7 @@  #include <linux/timex.h>  #include <linux/migrate.h>  #include <linux/posix-timers.h> +#include <linux/times.h>  #include <asm/uaccess.h> @@ -150,49 +151,23 @@ asmlinkage long compat_sys_setitimer(int which,  	return 0;  } +static compat_clock_t clock_t_to_compat_clock_t(clock_t x) +{ +	return compat_jiffies_to_clock_t(clock_t_to_jiffies(x)); +} +  asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)  { -	/* -	 *	In the SMP world we might just be unlucky and have one of -	 *	the times increment as we use it. Since the value is an -	 *	atomically safe type this is just fine. Conceptually its -	 *	as if the syscall took an instant longer to occur. -	 */  	if (tbuf) { +		struct tms tms;  		struct compat_tms tmp; -		struct task_struct *tsk = current; -		struct task_struct *t; -		cputime_t utime, stime, cutime, cstime; - -		read_lock(&tasklist_lock); -		utime = tsk->signal->utime; -		stime = tsk->signal->stime; -		t = tsk; -		do { -			utime = cputime_add(utime, t->utime); -			stime = cputime_add(stime, t->stime); -			t = next_thread(t); -		} while (t != tsk); - -		/* -		 * While we have tasklist_lock read-locked, no dying thread -		 * can be updating current->signal->[us]time.  Instead, -		 * we got their counts included in the live thread loop. -		 * However, another thread can come in right now and -		 * do a wait call that updates current->signal->c[us]time. -		 * To make sure we always see that pair updated atomically, -		 * we take the siglock around fetching them. -		 */ -		spin_lock_irq(&tsk->sighand->siglock); -		cutime = tsk->signal->cutime; -		cstime = tsk->signal->cstime; -		spin_unlock_irq(&tsk->sighand->siglock); -		read_unlock(&tasklist_lock); -		tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime)); -		tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime)); -		tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime)); -		tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime)); +		do_sys_times(&tms); +		/* Convert our struct tms to the compat version. */ +		tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime); +		tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime); +		tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime); +		tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);  		if (copy_to_user(tbuf, &tmp, sizeof(tmp)))  			return -EFAULT;  	} diff --git a/kernel/exit.c b/kernel/exit.c index 16395644a98..40036ac0427 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -112,8 +112,6 @@ static void __exit_signal(struct task_struct *tsk)  		 * We won't ever get here for the group leader, since it  		 * will have been the last reference on the signal_struct.  		 */ -		sig->utime = cputime_add(sig->utime, task_utime(tsk)); -		sig->stime = cputime_add(sig->stime, task_stime(tsk));  		sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));  		sig->min_flt += tsk->min_flt;  		sig->maj_flt += tsk->maj_flt; @@ -122,7 +120,6 @@ static void __exit_signal(struct task_struct *tsk)  		sig->inblock += task_io_get_inblock(tsk);  		sig->oublock += task_io_get_oublock(tsk);  		task_io_accounting_add(&sig->ioac, &tsk->ioac); -		sig->sum_sched_runtime += tsk->se.sum_exec_runtime;  		sig = NULL; /* Marker for below. */  	} @@ -1294,6 +1291,7 @@ static int wait_task_zombie(struct task_struct *p, int options,  	if (likely(!traced)) {  		struct signal_struct *psig;  		struct signal_struct *sig; +		struct task_cputime cputime;  		/*  		 * The resource counters for the group leader are in its @@ -1309,20 +1307,23 @@ static int wait_task_zombie(struct task_struct *p, int options,  		 * need to protect the access to p->parent->signal fields,  		 * as other threads in the parent group can be right  		 * here reaping other children at the same time. +		 * +		 * We use thread_group_cputime() to get times for the thread +		 * group, which consolidates times for all threads in the +		 * group including the group leader.  		 */  		spin_lock_irq(&p->parent->sighand->siglock);  		psig = p->parent->signal;  		sig = p->signal; +		thread_group_cputime(p, &cputime);  		psig->cutime =  			cputime_add(psig->cutime, -			cputime_add(p->utime, -			cputime_add(sig->utime, -				    sig->cutime))); +			cputime_add(cputime.utime, +				    sig->cutime));  		psig->cstime =  			cputime_add(psig->cstime, -			cputime_add(p->stime, -			cputime_add(sig->stime, -				    sig->cstime))); +			cputime_add(cputime.stime, +				    sig->cstime));  		psig->cgtime =  			cputime_add(psig->cgtime,  			cputime_add(p->gtime, diff --git a/kernel/fork.c b/kernel/fork.c index 7ce2ebe8479..a8ac2efb8e3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -759,15 +759,44 @@ void __cleanup_sighand(struct sighand_struct *sighand)  		kmem_cache_free(sighand_cachep, sighand);  } + +/* + * Initialize POSIX timer handling for a thread group. + */ +static void posix_cpu_timers_init_group(struct signal_struct *sig) +{ +	/* Thread group counters. */ +	thread_group_cputime_init(sig); + +	/* Expiration times and increments. */ +	sig->it_virt_expires = cputime_zero; +	sig->it_virt_incr = cputime_zero; +	sig->it_prof_expires = cputime_zero; +	sig->it_prof_incr = cputime_zero; + +	/* Cached expiration times. */ +	sig->cputime_expires.prof_exp = cputime_zero; +	sig->cputime_expires.virt_exp = cputime_zero; +	sig->cputime_expires.sched_exp = 0; + +	/* The timer lists. */ +	INIT_LIST_HEAD(&sig->cpu_timers[0]); +	INIT_LIST_HEAD(&sig->cpu_timers[1]); +	INIT_LIST_HEAD(&sig->cpu_timers[2]); +} +  static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)  {  	struct signal_struct *sig;  	int ret;  	if (clone_flags & CLONE_THREAD) { -		atomic_inc(¤t->signal->count); -		atomic_inc(¤t->signal->live); -		return 0; +		ret = thread_group_cputime_clone_thread(current, tsk); +		if (likely(!ret)) { +			atomic_inc(¤t->signal->count); +			atomic_inc(¤t->signal->live); +		} +		return ret;  	}  	sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);  	tsk->signal = sig; @@ -795,15 +824,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)  	sig->it_real_incr.tv64 = 0;  	sig->real_timer.function = it_real_fn; -	sig->it_virt_expires = cputime_zero; -	sig->it_virt_incr = cputime_zero; -	sig->it_prof_expires = cputime_zero; -	sig->it_prof_incr = cputime_zero; -  	sig->leader = 0;	/* session leadership doesn't inherit */  	sig->tty_old_pgrp = NULL; -	sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; +	sig->cutime = sig->cstime = cputime_zero;  	sig->gtime = cputime_zero;  	sig->cgtime = cputime_zero;  	sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; @@ -820,14 +844,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)  	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);  	task_unlock(current->group_leader); -	if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { -		/* -		 * New sole thread in the process gets an expiry time -		 * of the whole CPU time limit. -		 */ -		tsk->it_prof_expires = -			secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); -	} +	posix_cpu_timers_init_group(sig); +  	acct_init_pacct(&sig->pacct);  	tty_audit_fork(sig); @@ -837,6 +855,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)  void __cleanup_signal(struct signal_struct *sig)  { +	thread_group_cputime_free(sig);  	exit_thread_group_keys(sig);  	kmem_cache_free(signal_cachep, sig);  } @@ -886,6 +905,19 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)  #endif /* CONFIG_MM_OWNER */  /* + * Initialize POSIX timer handling for a single task. + */ +static void posix_cpu_timers_init(struct task_struct *tsk) +{ +	tsk->cputime_expires.prof_exp = cputime_zero; +	tsk->cputime_expires.virt_exp = cputime_zero; +	tsk->cputime_expires.sched_exp = 0; +	INIT_LIST_HEAD(&tsk->cpu_timers[0]); +	INIT_LIST_HEAD(&tsk->cpu_timers[1]); +	INIT_LIST_HEAD(&tsk->cpu_timers[2]); +} + +/*   * This creates a new process as a copy of the old one,   * but does not actually start it yet.   * @@ -995,12 +1027,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,  	task_io_accounting_init(&p->ioac);  	acct_clear_integrals(p); -	p->it_virt_expires = cputime_zero; -	p->it_prof_expires = cputime_zero; -	p->it_sched_expires = 0; -	INIT_LIST_HEAD(&p->cpu_timers[0]); -	INIT_LIST_HEAD(&p->cpu_timers[1]); -	INIT_LIST_HEAD(&p->cpu_timers[2]); +	posix_cpu_timers_init(p);  	p->lock_depth = -1;		/* -1 = no lock */  	do_posix_clock_monotonic_gettime(&p->start_time); @@ -1201,21 +1228,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,  	if (clone_flags & CLONE_THREAD) {  		p->group_leader = current->group_leader;  		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); - -		if (!cputime_eq(current->signal->it_virt_expires, -				cputime_zero) || -		    !cputime_eq(current->signal->it_prof_expires, -				cputime_zero) || -		    current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY || -		    !list_empty(¤t->signal->cpu_timers[0]) || -		    !list_empty(¤t->signal->cpu_timers[1]) || -		    !list_empty(¤t->signal->cpu_timers[2])) { -			/* -			 * Have child wake up on its first tick to check -			 * for process CPU timers. -			 */ -			p->it_prof_expires = jiffies_to_cputime(1); -		}  	}  	if (likely(p->pid)) { diff --git a/kernel/itimer.c b/kernel/itimer.c index ab982747d9b..db7c358b9a0 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -55,17 +55,15 @@ int do_getitimer(int which, struct itimerval *value)  		spin_unlock_irq(&tsk->sighand->siglock);  		break;  	case ITIMER_VIRTUAL: -		read_lock(&tasklist_lock);  		spin_lock_irq(&tsk->sighand->siglock);  		cval = tsk->signal->it_virt_expires;  		cinterval = tsk->signal->it_virt_incr;  		if (!cputime_eq(cval, cputime_zero)) { -			struct task_struct *t = tsk; -			cputime_t utime = tsk->signal->utime; -			do { -				utime = cputime_add(utime, t->utime); -				t = next_thread(t); -			} while (t != tsk); +			struct task_cputime cputime; +			cputime_t utime; + +			thread_group_cputime(tsk, &cputime); +			utime = cputime.utime;  			if (cputime_le(cval, utime)) { /* about to fire */  				cval = jiffies_to_cputime(1);  			} else { @@ -73,25 +71,19 @@ int do_getitimer(int which, struct itimerval *value)  			}  		}  		spin_unlock_irq(&tsk->sighand->siglock); -		read_unlock(&tasklist_lock);  		cputime_to_timeval(cval, &value->it_value);  		cputime_to_timeval(cinterval, &value->it_interval);  		break;  	case ITIMER_PROF: -		read_lock(&tasklist_lock);  		spin_lock_irq(&tsk->sighand->siglock);  		cval = tsk->signal->it_prof_expires;  		cinterval = tsk->signal->it_prof_incr;  		if (!cputime_eq(cval, cputime_zero)) { -			struct task_struct *t = tsk; -			cputime_t ptime = cputime_add(tsk->signal->utime, -						      tsk->signal->stime); -			do { -				ptime = cputime_add(ptime, -						    cputime_add(t->utime, -								t->stime)); -				t = next_thread(t); -			} while (t != tsk); +			struct task_cputime times; +			cputime_t ptime; + +			thread_group_cputime(tsk, ×); +			ptime = cputime_add(times.utime, times.stime);  			if (cputime_le(cval, ptime)) { /* about to fire */  				cval = jiffies_to_cputime(1);  			} else { @@ -99,7 +91,6 @@ int do_getitimer(int which, struct itimerval *value)  			}  		}  		spin_unlock_irq(&tsk->sighand->siglock); -		read_unlock(&tasklist_lock);  		cputime_to_timeval(cval, &value->it_value);  		cputime_to_timeval(cinterval, &value->it_interval);  		break; @@ -185,7 +176,6 @@ again:  	case ITIMER_VIRTUAL:  		nval = timeval_to_cputime(&value->it_value);  		ninterval = timeval_to_cputime(&value->it_interval); -		read_lock(&tasklist_lock);  		spin_lock_irq(&tsk->sighand->siglock);  		cval = tsk->signal->it_virt_expires;  		cinterval = tsk->signal->it_virt_incr; @@ -200,7 +190,6 @@ again:  		tsk->signal->it_virt_expires = nval;  		tsk->signal->it_virt_incr = ninterval;  		spin_unlock_irq(&tsk->sighand->siglock); -		read_unlock(&tasklist_lock);  		if (ovalue) {  			cputime_to_timeval(cval, &ovalue->it_value);  			cputime_to_timeval(cinterval, &ovalue->it_interval); @@ -209,7 +198,6 @@ again:  	case ITIMER_PROF:  		nval = timeval_to_cputime(&value->it_value);  		ninterval = timeval_to_cputime(&value->it_interval); -		read_lock(&tasklist_lock);  		spin_lock_irq(&tsk->sighand->siglock);  		cval = tsk->signal->it_prof_expires;  		cinterval = tsk->signal->it_prof_incr; @@ -224,7 +212,6 @@ again:  		tsk->signal->it_prof_expires = nval;  		tsk->signal->it_prof_incr = ninterval;  		spin_unlock_irq(&tsk->sighand->siglock); -		read_unlock(&tasklist_lock);  		if (ovalue) {  			cputime_to_timeval(cval, &ovalue->it_value);  			cputime_to_timeval(cinterval, &ovalue->it_interval); diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index c42a03aef36..dba1c334c3e 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -8,6 +8,99 @@  #include <linux/math64.h>  #include <asm/uaccess.h> +#ifdef CONFIG_SMP +/* + * Allocate the thread_group_cputime structure appropriately for SMP kernels + * and fill in the current values of the fields.  Called from copy_signal() + * via thread_group_cputime_clone_thread() when adding a second or subsequent + * thread to a thread group.  Assumes interrupts are enabled when called. + */ +int thread_group_cputime_alloc_smp(struct task_struct *tsk) +{ +	struct signal_struct *sig = tsk->signal; +	struct task_cputime *cputime; + +	/* +	 * If we have multiple threads and we don't already have a +	 * per-CPU task_cputime struct, allocate one and fill it in with +	 * the times accumulated so far. +	 */ +	if (sig->cputime.totals) +		return 0; +	cputime = alloc_percpu(struct task_cputime); +	if (cputime == NULL) +		return -ENOMEM; +	read_lock(&tasklist_lock); +	spin_lock_irq(&tsk->sighand->siglock); +	if (sig->cputime.totals) { +		spin_unlock_irq(&tsk->sighand->siglock); +		read_unlock(&tasklist_lock); +		free_percpu(cputime); +		return 0; +	} +	sig->cputime.totals = cputime; +	cputime = per_cpu_ptr(sig->cputime.totals, get_cpu()); +	cputime->utime = tsk->utime; +	cputime->stime = tsk->stime; +	cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; +	put_cpu_no_resched(); +	spin_unlock_irq(&tsk->sighand->siglock); +	read_unlock(&tasklist_lock); +	return 0; +} + +/** + * thread_group_cputime_smp - Sum the thread group time fields across all CPUs. + * + * @tsk:	The task we use to identify the thread group. + * @times:	task_cputime structure in which we return the summed fields. + * + * Walk the list of CPUs to sum the per-CPU time fields in the thread group + * time structure. + */ +void thread_group_cputime_smp( +	struct task_struct *tsk, +	struct task_cputime *times) +{ +	struct signal_struct *sig; +	int i; +	struct task_cputime *tot; + +	sig = tsk->signal; +	if (unlikely(!sig) || !sig->cputime.totals) { +		times->utime = tsk->utime; +		times->stime = tsk->stime; +		times->sum_exec_runtime = tsk->se.sum_exec_runtime; +		return; +	} +	times->stime = times->utime = cputime_zero; +	times->sum_exec_runtime = 0; +	for_each_possible_cpu(i) { +		tot = per_cpu_ptr(tsk->signal->cputime.totals, i); +		times->utime = cputime_add(times->utime, tot->utime); +		times->stime = cputime_add(times->stime, tot->stime); +		times->sum_exec_runtime += tot->sum_exec_runtime; +	} +} + +#endif /* CONFIG_SMP */ + +/* + * Called after updating RLIMIT_CPU to set timer expiration if necessary. + */ +void update_rlimit_cpu(unsigned long rlim_new) +{ +	cputime_t cputime; + +	cputime = secs_to_cputime(rlim_new); +	if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || +            cputime_lt(current->signal->it_prof_expires, cputime)) { +		spin_lock_irq(¤t->sighand->siglock); +		set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); +		spin_unlock_irq(¤t->sighand->siglock); +	} +} +  static int check_clock(const clockid_t which_clock)  {  	int error = 0; @@ -158,10 +251,6 @@ static inline cputime_t virt_ticks(struct task_struct *p)  {  	return p->utime;  } -static inline unsigned long long sched_ns(struct task_struct *p) -{ -	return task_sched_runtime(p); -}  int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)  { @@ -211,7 +300,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,  		cpu->cpu = virt_ticks(p);  		break;  	case CPUCLOCK_SCHED: -		cpu->sched = sched_ns(p); +		cpu->sched = task_sched_runtime(p);  		break;  	}  	return 0; @@ -226,31 +315,20 @@ static int cpu_clock_sample_group_locked(unsigned int clock_idx,  					 struct task_struct *p,  					 union cpu_time_count *cpu)  { -	struct task_struct *t = p; - 	switch (clock_idx) { +	struct task_cputime cputime; + +	thread_group_cputime(p, &cputime); +	switch (clock_idx) {  	default:  		return -EINVAL;  	case CPUCLOCK_PROF: -		cpu->cpu = cputime_add(p->signal->utime, p->signal->stime); -		do { -			cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t)); -			t = next_thread(t); -		} while (t != p); +		cpu->cpu = cputime_add(cputime.utime, cputime.stime);  		break;  	case CPUCLOCK_VIRT: -		cpu->cpu = p->signal->utime; -		do { -			cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t)); -			t = next_thread(t); -		} while (t != p); +		cpu->cpu = cputime.utime;  		break;  	case CPUCLOCK_SCHED: -		cpu->sched = p->signal->sum_sched_runtime; -		/* Add in each other live thread.  */ -		while ((t = next_thread(t)) != p) { -			cpu->sched += t->se.sum_exec_runtime; -		} -		cpu->sched += sched_ns(p); +		cpu->sched = thread_group_sched_runtime(p);  		break;  	}  	return 0; @@ -471,80 +549,11 @@ void posix_cpu_timers_exit(struct task_struct *tsk)  }  void posix_cpu_timers_exit_group(struct task_struct *tsk)  { -	cleanup_timers(tsk->signal->cpu_timers, -		       cputime_add(tsk->utime, tsk->signal->utime), -		       cputime_add(tsk->stime, tsk->signal->stime), -		     tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime); -} - - -/* - * Set the expiry times of all the threads in the process so one of them - * will go off before the process cumulative expiry total is reached. - */ -static void process_timer_rebalance(struct task_struct *p, -				    unsigned int clock_idx, -				    union cpu_time_count expires, -				    union cpu_time_count val) -{ -	cputime_t ticks, left; -	unsigned long long ns, nsleft; - 	struct task_struct *t = p; -	unsigned int nthreads = atomic_read(&p->signal->live); +	struct task_cputime cputime; -	if (!nthreads) -		return; - -	switch (clock_idx) { -	default: -		BUG(); -		break; -	case CPUCLOCK_PROF: -		left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu), -				       nthreads); -		do { -			if (likely(!(t->flags & PF_EXITING))) { -				ticks = cputime_add(prof_ticks(t), left); -				if (cputime_eq(t->it_prof_expires, -					       cputime_zero) || -				    cputime_gt(t->it_prof_expires, ticks)) { -					t->it_prof_expires = ticks; -				} -			} -			t = next_thread(t); -		} while (t != p); -		break; -	case CPUCLOCK_VIRT: -		left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu), -				       nthreads); -		do { -			if (likely(!(t->flags & PF_EXITING))) { -				ticks = cputime_add(virt_ticks(t), left); -				if (cputime_eq(t->it_virt_expires, -					       cputime_zero) || -				    cputime_gt(t->it_virt_expires, ticks)) { -					t->it_virt_expires = ticks; -				} -			} -			t = next_thread(t); -		} while (t != p); -		break; -	case CPUCLOCK_SCHED: -		nsleft = expires.sched - val.sched; -		do_div(nsleft, nthreads); -		nsleft = max_t(unsigned long long, nsleft, 1); -		do { -			if (likely(!(t->flags & PF_EXITING))) { -				ns = t->se.sum_exec_runtime + nsleft; -				if (t->it_sched_expires == 0 || -				    t->it_sched_expires > ns) { -					t->it_sched_expires = ns; -				} -			} -			t = next_thread(t); -		} while (t != p); -		break; -	} +	thread_group_cputime(tsk, &cputime); +	cleanup_timers(tsk->signal->cpu_timers, +		       cputime.utime, cputime.stime, cputime.sum_exec_runtime);  }  static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) @@ -608,29 +617,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)  			default:  				BUG();  			case CPUCLOCK_PROF: -				if (cputime_eq(p->it_prof_expires, +				if (cputime_eq(p->cputime_expires.prof_exp,  					       cputime_zero) || -				    cputime_gt(p->it_prof_expires, +				    cputime_gt(p->cputime_expires.prof_exp,  					       nt->expires.cpu)) -					p->it_prof_expires = nt->expires.cpu; +					p->cputime_expires.prof_exp = +						nt->expires.cpu;  				break;  			case CPUCLOCK_VIRT: -				if (cputime_eq(p->it_virt_expires, +				if (cputime_eq(p->cputime_expires.virt_exp,  					       cputime_zero) || -				    cputime_gt(p->it_virt_expires, +				    cputime_gt(p->cputime_expires.virt_exp,  					       nt->expires.cpu)) -					p->it_virt_expires = nt->expires.cpu; +					p->cputime_expires.virt_exp = +						nt->expires.cpu;  				break;  			case CPUCLOCK_SCHED: -				if (p->it_sched_expires == 0 || -				    p->it_sched_expires > nt->expires.sched) -					p->it_sched_expires = nt->expires.sched; +				if (p->cputime_expires.sched_exp == 0 || +				    p->cputime_expires.sched_exp > +							nt->expires.sched) +					p->cputime_expires.sched_exp = +						nt->expires.sched;  				break;  			}  		} else {  			/* -			 * For a process timer, we must balance -			 * all the live threads' expirations. +			 * For a process timer, set the cached expiration time.  			 */  			switch (CPUCLOCK_WHICH(timer->it_clock)) {  			default: @@ -641,7 +653,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)  				    cputime_lt(p->signal->it_virt_expires,  					       timer->it.cpu.expires.cpu))  					break; -				goto rebalance; +				p->signal->cputime_expires.virt_exp = +					timer->it.cpu.expires.cpu; +				break;  			case CPUCLOCK_PROF:  				if (!cputime_eq(p->signal->it_prof_expires,  						cputime_zero) && @@ -652,13 +666,12 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)  				if (i != RLIM_INFINITY &&  				    i <= cputime_to_secs(timer->it.cpu.expires.cpu))  					break; -				goto rebalance; +				p->signal->cputime_expires.prof_exp = +					timer->it.cpu.expires.cpu; +				break;  			case CPUCLOCK_SCHED: -			rebalance: -				process_timer_rebalance( -					timer->it.cpu.task, -					CPUCLOCK_WHICH(timer->it_clock), -					timer->it.cpu.expires, now); +				p->signal->cputime_expires.sched_exp = +					timer->it.cpu.expires.sched;  				break;  			}  		} @@ -969,13 +982,13 @@ static void check_thread_timers(struct task_struct *tsk,  	struct signal_struct *const sig = tsk->signal;  	maxfire = 20; -	tsk->it_prof_expires = cputime_zero; +	tsk->cputime_expires.prof_exp = cputime_zero;  	while (!list_empty(timers)) {  		struct cpu_timer_list *t = list_first_entry(timers,  						      struct cpu_timer_list,  						      entry);  		if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { -			tsk->it_prof_expires = t->expires.cpu; +			tsk->cputime_expires.prof_exp = t->expires.cpu;  			break;  		}  		t->firing = 1; @@ -984,13 +997,13 @@ static void check_thread_timers(struct task_struct *tsk,  	++timers;  	maxfire = 20; -	tsk->it_virt_expires = cputime_zero; +	tsk->cputime_expires.virt_exp = cputime_zero;  	while (!list_empty(timers)) {  		struct cpu_timer_list *t = list_first_entry(timers,  						      struct cpu_timer_list,  						      entry);  		if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { -			tsk->it_virt_expires = t->expires.cpu; +			tsk->cputime_expires.virt_exp = t->expires.cpu;  			break;  		}  		t->firing = 1; @@ -999,13 +1012,13 @@ static void check_thread_timers(struct task_struct *tsk,  	++timers;  	maxfire = 20; -	tsk->it_sched_expires = 0; +	tsk->cputime_expires.sched_exp = 0;  	while (!list_empty(timers)) {  		struct cpu_timer_list *t = list_first_entry(timers,  						      struct cpu_timer_list,  						      entry);  		if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) { -			tsk->it_sched_expires = t->expires.sched; +			tsk->cputime_expires.sched_exp = t->expires.sched;  			break;  		}  		t->firing = 1; @@ -1055,10 +1068,10 @@ static void check_process_timers(struct task_struct *tsk,  {  	int maxfire;  	struct signal_struct *const sig = tsk->signal; -	cputime_t utime, stime, ptime, virt_expires, prof_expires; +	cputime_t utime, ptime, virt_expires, prof_expires;  	unsigned long long sum_sched_runtime, sched_expires; -	struct task_struct *t;  	struct list_head *timers = sig->cpu_timers; +	struct task_cputime cputime;  	/*  	 * Don't sample the current process CPU clocks if there are no timers. @@ -1074,18 +1087,10 @@ static void check_process_timers(struct task_struct *tsk,  	/*  	 * Collect the current process totals.  	 */ -	utime = sig->utime; -	stime = sig->stime; -	sum_sched_runtime = sig->sum_sched_runtime; -	t = tsk; -	do { -		utime = cputime_add(utime, t->utime); -		stime = cputime_add(stime, t->stime); -		sum_sched_runtime += t->se.sum_exec_runtime; -		t = next_thread(t); -	} while (t != tsk); -	ptime = cputime_add(utime, stime); - +	thread_group_cputime(tsk, &cputime); +	utime = cputime.utime; +	ptime = cputime_add(utime, cputime.stime); +	sum_sched_runtime = cputime.sum_exec_runtime;  	maxfire = 20;  	prof_expires = cputime_zero;  	while (!list_empty(timers)) { @@ -1193,60 +1198,18 @@ static void check_process_timers(struct task_struct *tsk,  		}  	} -	if (!cputime_eq(prof_expires, cputime_zero) || -	    !cputime_eq(virt_expires, cputime_zero) || -	    sched_expires != 0) { -		/* -		 * Rebalance the threads' expiry times for the remaining -		 * process CPU timers. -		 */ - -		cputime_t prof_left, virt_left, ticks; -		unsigned long long sched_left, sched; -		const unsigned int nthreads = atomic_read(&sig->live); - -		if (!nthreads) -			return; - -		prof_left = cputime_sub(prof_expires, utime); -		prof_left = cputime_sub(prof_left, stime); -		prof_left = cputime_div_non_zero(prof_left, nthreads); -		virt_left = cputime_sub(virt_expires, utime); -		virt_left = cputime_div_non_zero(virt_left, nthreads); -		if (sched_expires) { -			sched_left = sched_expires - sum_sched_runtime; -			do_div(sched_left, nthreads); -			sched_left = max_t(unsigned long long, sched_left, 1); -		} else { -			sched_left = 0; -		} -		t = tsk; -		do { -			if (unlikely(t->flags & PF_EXITING)) -				continue; - -			ticks = cputime_add(cputime_add(t->utime, t->stime), -					    prof_left); -			if (!cputime_eq(prof_expires, cputime_zero) && -			    (cputime_eq(t->it_prof_expires, cputime_zero) || -			     cputime_gt(t->it_prof_expires, ticks))) { -				t->it_prof_expires = ticks; -			} - -			ticks = cputime_add(t->utime, virt_left); -			if (!cputime_eq(virt_expires, cputime_zero) && -			    (cputime_eq(t->it_virt_expires, cputime_zero) || -			     cputime_gt(t->it_virt_expires, ticks))) { -				t->it_virt_expires = ticks; -			} - -			sched = t->se.sum_exec_runtime + sched_left; -			if (sched_expires && (t->it_sched_expires == 0 || -					      t->it_sched_expires > sched)) { -				t->it_sched_expires = sched; -			} -		} while ((t = next_thread(t)) != tsk); -	} +	if (!cputime_eq(prof_expires, cputime_zero) && +	    (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) || +	     cputime_gt(sig->cputime_expires.prof_exp, prof_expires))) +		sig->cputime_expires.prof_exp = prof_expires; +	if (!cputime_eq(virt_expires, cputime_zero) && +	    (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) || +	     cputime_gt(sig->cputime_expires.virt_exp, virt_expires))) +		sig->cputime_expires.virt_exp = virt_expires; +	if (sched_expires != 0 && +	    (sig->cputime_expires.sched_exp == 0 || +	     sig->cputime_expires.sched_exp > sched_expires)) +		sig->cputime_expires.sched_exp = sched_expires;  }  /* @@ -1314,6 +1277,78 @@ out:  	++timer->it_requeue_pending;  } +/** + * task_cputime_zero - Check a task_cputime struct for all zero fields. + * + * @cputime:	The struct to compare. + * + * Checks @cputime to see if all fields are zero.  Returns true if all fields + * are zero, false if any field is nonzero. + */ +static inline int task_cputime_zero(const struct task_cputime *cputime) +{ +	if (cputime_eq(cputime->utime, cputime_zero) && +	    cputime_eq(cputime->stime, cputime_zero) && +	    cputime->sum_exec_runtime == 0) +		return 1; +	return 0; +} + +/** + * task_cputime_expired - Compare two task_cputime entities. + * + * @sample:	The task_cputime structure to be checked for expiration. + * @expires:	Expiration times, against which @sample will be checked. + * + * Checks @sample against @expires to see if any field of @sample has expired. + * Returns true if any field of the former is greater than the corresponding + * field of the latter if the latter field is set.  Otherwise returns false. + */ +static inline int task_cputime_expired(const struct task_cputime *sample, +					const struct task_cputime *expires) +{ +	if (!cputime_eq(expires->utime, cputime_zero) && +	    cputime_ge(sample->utime, expires->utime)) +		return 1; +	if (!cputime_eq(expires->stime, cputime_zero) && +	    cputime_ge(cputime_add(sample->utime, sample->stime), +		       expires->stime)) +		return 1; +	if (expires->sum_exec_runtime != 0 && +	    sample->sum_exec_runtime >= expires->sum_exec_runtime) +		return 1; +	return 0; +} + +/** + * fastpath_timer_check - POSIX CPU timers fast path. + * + * @tsk:	The task (thread) being checked. + * @sig:	The signal pointer for that task. + * + * If there are no timers set return false.  Otherwise snapshot the task and + * thread group timers, then compare them with the corresponding expiration + # times.  Returns true if a timer has expired, else returns false. + */ +static inline int fastpath_timer_check(struct task_struct *tsk, +					struct signal_struct *sig) +{ +	struct task_cputime task_sample = { +		.utime = tsk->utime, +		.stime = tsk->stime, +		.sum_exec_runtime = tsk->se.sum_exec_runtime +	}; +	struct task_cputime group_sample; + +	if (task_cputime_zero(&tsk->cputime_expires) && +	    task_cputime_zero(&sig->cputime_expires)) +		return 0; +	if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) +		return 1; +	thread_group_cputime(tsk, &group_sample); +	return task_cputime_expired(&group_sample, &sig->cputime_expires); +} +  /*   * This is called from the timer interrupt handler.  The irq handler has   * already updated our counts.  We need to check if any timers fire now. @@ -1323,30 +1358,29 @@ void run_posix_cpu_timers(struct task_struct *tsk)  {  	LIST_HEAD(firing);  	struct k_itimer *timer, *next; +	struct signal_struct *sig; +	struct sighand_struct *sighand; +	unsigned long flags;  	BUG_ON(!irqs_disabled()); -#define UNEXPIRED(clock) \ -		(cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \ -		 cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires)) - -	if (UNEXPIRED(prof) && UNEXPIRED(virt) && -	    (tsk->it_sched_expires == 0 || -	     tsk->se.sum_exec_runtime < tsk->it_sched_expires)) -		return; - -#undef	UNEXPIRED - +	/* Pick up tsk->signal and make sure it's valid. */ +	sig = tsk->signal;  	/* -	 * Double-check with locks held. +	 * The fast path checks that there are no expired thread or thread +	 * group timers.  If that's so, just return.  Also check that +	 * tsk->signal is non-NULL; this probably can't happen but cover the +	 * possibility anyway.  	 */ -	read_lock(&tasklist_lock); -	if (likely(tsk->signal != NULL)) { -		spin_lock(&tsk->sighand->siglock); - +	if (unlikely(!sig) || !fastpath_timer_check(tsk, sig)) { +		return; +	} +	sighand = lock_task_sighand(tsk, &flags); +	if (likely(sighand)) {  		/* -		 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] -		 * all the timers that are firing, and put them on the firing list. +		 * Here we take off tsk->signal->cpu_timers[N] and +		 * tsk->cpu_timers[N] all the timers that are firing, and +		 * put them on the firing list.  		 */  		check_thread_timers(tsk, &firing);  		check_process_timers(tsk, &firing); @@ -1359,9 +1393,8 @@ void run_posix_cpu_timers(struct task_struct *tsk)  		 * that gets the timer lock before we do will give it up and  		 * spin until we've taken care of that timer below.  		 */ -		spin_unlock(&tsk->sighand->siglock);  	} -	read_unlock(&tasklist_lock); +	unlock_task_sighand(tsk, &flags);  	/*  	 * Now that all the timers on our list have the firing flag, @@ -1389,10 +1422,9 @@ void run_posix_cpu_timers(struct task_struct *tsk)  /*   * Set one of the process-wide special case CPU timers. - * The tasklist_lock and tsk->sighand->siglock must be held by the caller. - * The oldval argument is null for the RLIMIT_CPU timer, where *newval is - * absolute; non-null for ITIMER_*, where *newval is relative and we update - * it to be absolute, *oldval is absolute and we update it to be relative. + * The tsk->sighand->siglock must be held by the caller. + * The *newval argument is relative and we update it to be absolute, *oldval + * is absolute and we update it to be relative.   */  void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,  			   cputime_t *newval, cputime_t *oldval) @@ -1435,13 +1467,14 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,  	    cputime_ge(list_first_entry(head,  				  struct cpu_timer_list, entry)->expires.cpu,  		       *newval)) { -		/* -		 * Rejigger each thread's expiry time so that one will -		 * notice before we hit the process-cumulative expiry time. -		 */ -		union cpu_time_count expires = { .sched = 0 }; -		expires.cpu = *newval; -		process_timer_rebalance(tsk, clock_idx, expires, now); +		switch (clock_idx) { +		case CPUCLOCK_PROF: +			tsk->signal->cputime_expires.prof_exp = *newval; +			break; +		case CPUCLOCK_VIRT: +			tsk->signal->cputime_expires.virt_exp = *newval; +			break; +		}  	}  } diff --git a/kernel/sched.c b/kernel/sched.c index cc1f81b50b8..c51b5d27666 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4037,23 +4037,56 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);  EXPORT_PER_CPU_SYMBOL(kstat);  /* + * Return any ns on the sched_clock that have not yet been banked in + * @p in case that task is currently running. + * + * Called with task_rq_lock() held on @rq. + */ +static unsigned long long task_delta_exec(struct task_struct *p, struct rq *rq) +{ +	if (task_current(rq, p)) { +		u64 delta_exec; + +		update_rq_clock(rq); +		delta_exec = rq->clock - p->se.exec_start; +		if ((s64)delta_exec > 0) +			return delta_exec; +	} +	return 0; +} + +/*   * Return p->sum_exec_runtime plus any more ns on the sched_clock   * that have not yet been banked in case the task is currently running.   */  unsigned long long task_sched_runtime(struct task_struct *p)  {  	unsigned long flags; -	u64 ns, delta_exec; +	u64 ns;  	struct rq *rq;  	rq = task_rq_lock(p, &flags); -	ns = p->se.sum_exec_runtime; -	if (task_current(rq, p)) { -		update_rq_clock(rq); -		delta_exec = rq->clock - p->se.exec_start; -		if ((s64)delta_exec > 0) -			ns += delta_exec; -	} +	ns = p->se.sum_exec_runtime + task_delta_exec(p, rq); +	task_rq_unlock(rq, &flags); + +	return ns; +} + +/* + * Return sum_exec_runtime for the thread group plus any more ns on the + * sched_clock that have not yet been banked in case the task is currently + * running. + */ +unsigned long long thread_group_sched_runtime(struct task_struct *p) +{ +	unsigned long flags; +	u64 ns; +	struct rq *rq; +	struct task_cputime totals; + +	rq = task_rq_lock(p, &flags); +	thread_group_cputime(p, &totals); +	ns = totals.sum_exec_runtime + task_delta_exec(p, rq);  	task_rq_unlock(rq, &flags);  	return ns; @@ -4070,6 +4103,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)  	cputime64_t tmp;  	p->utime = cputime_add(p->utime, cputime); +	account_group_user_time(p, cputime);  	/* Add user time to cpustat. */  	tmp = cputime_to_cputime64(cputime); @@ -4094,6 +4128,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime)  	tmp = cputime_to_cputime64(cputime);  	p->utime = cputime_add(p->utime, cputime); +	account_group_user_time(p, cputime);  	p->gtime = cputime_add(p->gtime, cputime);  	cpustat->user = cputime64_add(cpustat->user, tmp); @@ -4129,6 +4164,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,  	}  	p->stime = cputime_add(p->stime, cputime); +	account_group_system_time(p, cputime);  	/* Add system time to cpustat. */  	tmp = cputime_to_cputime64(cputime); @@ -4170,6 +4206,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal)  	if (p == rq->idle) {  		p->stime = cputime_add(p->stime, steal); +		account_group_system_time(p, steal);  		if (atomic_read(&rq->nr_iowait) > 0)  			cpustat->iowait = cputime64_add(cpustat->iowait, tmp);  		else diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index fb8994c6d4b..99aa31acc54 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -507,6 +507,7 @@ static void update_curr(struct cfs_rq *cfs_rq)  		struct task_struct *curtask = task_of(curr);  		cpuacct_charge(curtask, delta_exec); +		account_group_exec_runtime(curtask, delta_exec);  	}  } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 552310798da..8375e69af36 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -483,6 +483,8 @@ static void update_curr_rt(struct rq *rq)  	schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));  	curr->se.sum_exec_runtime += delta_exec; +	account_group_exec_runtime(curr, delta_exec); +  	curr->se.exec_start = rq->clock;  	cpuacct_charge(curr, delta_exec); @@ -1412,7 +1414,7 @@ static void watchdog(struct rq *rq, struct task_struct *p)  		p->rt.timeout++;  		next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);  		if (p->rt.timeout > next) -			p->it_sched_expires = p->se.sum_exec_runtime; +			p->cputime_expires.sched_exp = p->se.sum_exec_runtime;  	}  } diff --git a/kernel/signal.c b/kernel/signal.c index e661b01d340..6eea5826d61 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -1338,6 +1338,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)  	struct siginfo info;  	unsigned long flags;  	struct sighand_struct *psig; +	struct task_cputime cputime;  	int ret = sig;  	BUG_ON(sig == -1); @@ -1368,10 +1369,9 @@ int do_notify_parent(struct task_struct *tsk, int sig)  	info.si_uid = tsk->uid; -	info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, -						       tsk->signal->utime)); -	info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, -						       tsk->signal->stime)); +	thread_group_cputime(tsk, &cputime); +	info.si_utime = cputime_to_jiffies(cputime.utime); +	info.si_stime = cputime_to_jiffies(cputime.stime);  	info.si_status = tsk->exit_code & 0x7f;  	if (tsk->exit_code & 0x80) diff --git a/kernel/sys.c b/kernel/sys.c index 038a7bc0901..d046a7a055c 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -853,38 +853,28 @@ asmlinkage long sys_setfsgid(gid_t gid)  	return old_fsgid;  } +void do_sys_times(struct tms *tms) +{ +	struct task_cputime cputime; +	cputime_t cutime, cstime; + +	spin_lock_irq(¤t->sighand->siglock); +	thread_group_cputime(current, &cputime); +	cutime = current->signal->cutime; +	cstime = current->signal->cstime; +	spin_unlock_irq(¤t->sighand->siglock); +	tms->tms_utime = cputime_to_clock_t(cputime.utime); +	tms->tms_stime = cputime_to_clock_t(cputime.stime); +	tms->tms_cutime = cputime_to_clock_t(cutime); +	tms->tms_cstime = cputime_to_clock_t(cstime); +} +  asmlinkage long sys_times(struct tms __user * tbuf)  { -	/* -	 *	In the SMP world we might just be unlucky and have one of -	 *	the times increment as we use it. Since the value is an -	 *	atomically safe type this is just fine. Conceptually its -	 *	as if the syscall took an instant longer to occur. -	 */  	if (tbuf) {  		struct tms tmp; -		struct task_struct *tsk = current; -		struct task_struct *t; -		cputime_t utime, stime, cutime, cstime; - -		spin_lock_irq(&tsk->sighand->siglock); -		utime = tsk->signal->utime; -		stime = tsk->signal->stime; -		t = tsk; -		do { -			utime = cputime_add(utime, t->utime); -			stime = cputime_add(stime, t->stime); -			t = next_thread(t); -		} while (t != tsk); - -		cutime = tsk->signal->cutime; -		cstime = tsk->signal->cstime; -		spin_unlock_irq(&tsk->sighand->siglock); -		tmp.tms_utime = cputime_to_clock_t(utime); -		tmp.tms_stime = cputime_to_clock_t(stime); -		tmp.tms_cutime = cputime_to_clock_t(cutime); -		tmp.tms_cstime = cputime_to_clock_t(cstime); +		do_sys_times(&tmp);  		if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))  			return -EFAULT;  	} @@ -1445,7 +1435,6 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r  asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)  {  	struct rlimit new_rlim, *old_rlim; -	unsigned long it_prof_secs;  	int retval;  	if (resource >= RLIM_NLIMITS) @@ -1491,18 +1480,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)  	if (new_rlim.rlim_cur == RLIM_INFINITY)  		goto out; -	it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); -	if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) { -		unsigned long rlim_cur = new_rlim.rlim_cur; -		cputime_t cputime; - -		cputime = secs_to_cputime(rlim_cur); -		read_lock(&tasklist_lock); -		spin_lock_irq(¤t->sighand->siglock); -		set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); -		spin_unlock_irq(¤t->sighand->siglock); -		read_unlock(&tasklist_lock); -	} +	update_rlimit_cpu(new_rlim.rlim_cur);  out:  	return 0;  } @@ -1540,11 +1518,8 @@ out:   *   */ -static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r, -				     cputime_t *utimep, cputime_t *stimep) +static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)  { -	*utimep = cputime_add(*utimep, t->utime); -	*stimep = cputime_add(*stimep, t->stime);  	r->ru_nvcsw += t->nvcsw;  	r->ru_nivcsw += t->nivcsw;  	r->ru_minflt += t->min_flt; @@ -1558,12 +1533,13 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)  	struct task_struct *t;  	unsigned long flags;  	cputime_t utime, stime; +	struct task_cputime cputime;  	memset((char *) r, 0, sizeof *r);  	utime = stime = cputime_zero;  	if (who == RUSAGE_THREAD) { -		accumulate_thread_rusage(p, r, &utime, &stime); +		accumulate_thread_rusage(p, r);  		goto out;  	} @@ -1586,8 +1562,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)  				break;  		case RUSAGE_SELF: -			utime = cputime_add(utime, p->signal->utime); -			stime = cputime_add(stime, p->signal->stime); +			thread_group_cputime(p, &cputime); +			utime = cputime_add(utime, cputime.utime); +			stime = cputime_add(stime, cputime.stime);  			r->ru_nvcsw += p->signal->nvcsw;  			r->ru_nivcsw += p->signal->nivcsw;  			r->ru_minflt += p->signal->min_flt; @@ -1596,7 +1573,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)  			r->ru_oublock += p->signal->oublock;  			t = p;  			do { -				accumulate_thread_rusage(t, r, &utime, &stime); +				accumulate_thread_rusage(t, r);  				t = next_thread(t);  			} while (t != p);  			break;  |