diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-19 18:19:48 -0800 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-19 18:19:48 -0800 | 
| commit | d652e1eb8e7b739fccbfb503a3da3e9f640fbf3d (patch) | |
| tree | 55ab77bad0cbb045eac0b84b80d63f88f1ae09e6 /kernel/posix-cpu-timers.c | |
| parent | 8f55cea410dbc56114bb71a3742032070c8108d0 (diff) | |
| parent | 77852fea6e2442a0e654a9292060489895de18c7 (diff) | |
| download | olio-linux-3.10-d652e1eb8e7b739fccbfb503a3da3e9f640fbf3d.tar.xz olio-linux-3.10-d652e1eb8e7b739fccbfb503a3da3e9f640fbf3d.zip  | |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes from Ingo Molnar:
 "Main changes:
   - scheduler side full-dynticks (user-space execution is undisturbed
     and receives no timer IRQs) preparation changes that convert the
     cputime accounting code to be full-dynticks ready, from Frederic
     Weisbecker.
   - Initial sched.h split-up changes, by Clark Williams
   - select_idle_sibling() performance improvement by Mike Galbraith:
        " 1 tbench pair (worst case) in a 10 core + SMT package:
          pre   15.22 MB/sec 1 procs
          post 252.01 MB/sec 1 procs "
  - sched_rr_get_interval() ABI fix/change.  We think this detail is not
    used by apps (so it's not an ABI in practice), but lets keep it
    under observation.
  - misc RT scheduling cleanups, optimizations"
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  sched/rt: Add <linux/sched/rt.h> header to <linux/init_task.h>
  cputime: Remove irqsave from seqlock readers
  sched, powerpc: Fix sched.h split-up build failure
  cputime: Restore CPU_ACCOUNTING config defaults for PPC64
  sched/rt: Move rt specific bits into new header file
  sched/rt: Add a tuning knob to allow changing SCHED_RR timeslice
  sched: Move sched.h sysctl bits into separate header
  sched: Fix signedness bug in yield_to()
  sched: Fix select_idle_sibling() bouncing cow syndrome
  sched/rt: Further simplify pick_rt_task()
  sched/rt: Do not account zero delta_exec in update_curr_rt()
  cputime: Safely read cputime of full dynticks CPUs
  kvm: Prepare to add generic guest entry/exit callbacks
  cputime: Use accessors to read task cputime stats
  cputime: Allow dynamic switch between tick/virtual based cputime accounting
  cputime: Generic on-demand virtual cputime accounting
  cputime: Move default nsecs_to_cputime() to jiffies based cputime file
  cputime: Librarize per nsecs resolution cputime definitions
  cputime: Avoid multiplication overflow on utime scaling
  context_tracking: Export context state for generic vtime
  ...
Fix up conflict in kernel/context_tracking.c due to comment additions.
Diffstat (limited to 'kernel/posix-cpu-timers.c')
| -rw-r--r-- | kernel/posix-cpu-timers.c | 28 | 
1 files changed, 22 insertions, 6 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index a278cad1d5d..165d4769847 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -155,11 +155,19 @@ static void bump_cpu_timer(struct k_itimer *timer,  static inline cputime_t prof_ticks(struct task_struct *p)  { -	return p->utime + p->stime; +	cputime_t utime, stime; + +	task_cputime(p, &utime, &stime); + +	return utime + stime;  }  static inline cputime_t virt_ticks(struct task_struct *p)  { -	return p->utime; +	cputime_t utime; + +	task_cputime(p, &utime, NULL); + +	return utime;  }  static int @@ -471,18 +479,23 @@ static void cleanup_timers(struct list_head *head,   */  void posix_cpu_timers_exit(struct task_struct *tsk)  { +	cputime_t utime, stime; +  	add_device_randomness((const void*) &tsk->se.sum_exec_runtime,  						sizeof(unsigned long long)); +	task_cputime(tsk, &utime, &stime);  	cleanup_timers(tsk->cpu_timers, -		       tsk->utime, tsk->stime, tsk->se.sum_exec_runtime); +		       utime, stime, tsk->se.sum_exec_runtime);  }  void posix_cpu_timers_exit_group(struct task_struct *tsk)  {  	struct signal_struct *const sig = tsk->signal; +	cputime_t utime, stime; +	task_cputime(tsk, &utime, &stime);  	cleanup_timers(tsk->signal->cpu_timers, -		       tsk->utime + sig->utime, tsk->stime + sig->stime, +		       utime + sig->utime, stime + sig->stime,  		       tsk->se.sum_exec_runtime + sig->sum_sched_runtime);  } @@ -1226,11 +1239,14 @@ static inline int task_cputime_expired(const struct task_cputime *sample,  static inline int fastpath_timer_check(struct task_struct *tsk)  {  	struct signal_struct *sig; +	cputime_t utime, stime; + +	task_cputime(tsk, &utime, &stime);  	if (!task_cputime_zero(&tsk->cputime_expires)) {  		struct task_cputime task_sample = { -			.utime = tsk->utime, -			.stime = tsk->stime, +			.utime = utime, +			.stime = stime,  			.sum_exec_runtime = tsk->se.sum_exec_runtime  		};  |