diff options
| author | Tejun Heo <tj@kernel.org> | 2013-04-01 17:08:13 -0700 | 
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2013-04-01 18:45:36 -0700 | 
| commit | 229641a6f1f09e27a1f12fba38980f33f4c92975 (patch) | |
| tree | 234a6f8aea0910de3242af0bbe6d7494fcf81847 /kernel/trace/trace_sched_wakeup.c | |
| parent | d55262c4d164759a8debe772da6c9b16059dec47 (diff) | |
| parent | 07961ac7c0ee8b546658717034fe692fd12eefa9 (diff) | |
| download | olio-linux-3.10-229641a6f1f09e27a1f12fba38980f33f4c92975.tar.xz olio-linux-3.10-229641a6f1f09e27a1f12fba38980f33f4c92975.zip  | |
Merge tag 'v3.9-rc5' into wq/for-3.10
Writeback conversion to workqueue will be based on top of wq/for-3.10
branch to take advantage of custom attrs and NUMA support for unbound
workqueues.  Mainline currently contains two commits which result in
non-trivial merge conflicts with wq/for-3.10 and because
block/for-3.10/core is based on v3.9-rc3 which contains one of the
conflicting commits, we need a pre-merge-window merge anyway.  Let's
pull v3.9-rc5 into wq/for-3.10 so that the block tree doesn't suffer
from workqueue merge conflicts.
The two conflicts and their resolutions:
* e68035fb65 ("workqueue: convert to idr_alloc()") in mainline changes
  worker_pool_assign_id() to use idr_alloc() instead of the old idr
  interface.  worker_pool_assign_id() goes through multiple locking
  changes in wq/for-3.10 causing the following conflict.
  static int worker_pool_assign_id(struct worker_pool *pool)
  {
	  int ret;
  <<<<<<< HEAD
	  lockdep_assert_held(&wq_pool_mutex);
	  do {
		  if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL))
			  return -ENOMEM;
		  ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
	  } while (ret == -EAGAIN);
  =======
	  mutex_lock(&worker_pool_idr_mutex);
	  ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
	  if (ret >= 0)
		  pool->id = ret;
	  mutex_unlock(&worker_pool_idr_mutex);
  >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89
	  return ret < 0 ? ret : 0;
  }
  We want locking from the former and idr_alloc() usage from the
  latter, which can be combined to the following.
  static int worker_pool_assign_id(struct worker_pool *pool)
  {
	  int ret;
	  lockdep_assert_held(&wq_pool_mutex);
	  ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
	  if (ret >= 0) {
		  pool->id = ret;
		  return 0;
	  }
	  return ret;
   }
* eb2834285c ("workqueue: fix possible pool stall bug in
  wq_unbind_fn()") updated wq_unbind_fn() such that it has single
  larger for_each_std_worker_pool() loop instead of two separate loops
  with a schedule() call inbetween.  wq/for-3.10 renamed
  pool->assoc_mutex to pool->manager_mutex causing the following
  conflict (earlier function body and comments omitted for brevity).
  static void wq_unbind_fn(struct work_struct *work)
  {
  ...
		  spin_unlock_irq(&pool->lock);
  <<<<<<< HEAD
		  mutex_unlock(&pool->manager_mutex);
	  }
  =======
		  mutex_unlock(&pool->assoc_mutex);
  >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89
		  schedule();
  <<<<<<< HEAD
	  for_each_cpu_worker_pool(pool, cpu)
  =======
  >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89
		  atomic_set(&pool->nr_running, 0);
		  spin_lock_irq(&pool->lock);
		  wake_up_worker(pool);
		  spin_unlock_irq(&pool->lock);
	  }
  }
  The resolution is mostly trivial.  We want the control flow of the
  latter with the rename of the former.
  static void wq_unbind_fn(struct work_struct *work)
  {
  ...
		  spin_unlock_irq(&pool->lock);
		  mutex_unlock(&pool->manager_mutex);
		  schedule();
		  atomic_set(&pool->nr_running, 0);
		  spin_lock_irq(&pool->lock);
		  wake_up_worker(pool);
		  spin_unlock_irq(&pool->lock);
	  }
  }
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 18 | 
1 files changed, 13 insertions, 5 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 75aa97fbe1a..fde652c9a51 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr);  static int wakeup_graph_entry(struct ftrace_graph_ent *trace);  static void wakeup_graph_return(struct ftrace_graph_ret *trace); -static int save_lat_flag; +static int save_flags;  #define TRACE_DISPLAY_GRAPH     1 @@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct trace_array *tr)  static int __wakeup_tracer_init(struct trace_array *tr)  { -	save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; -	trace_flags |= TRACE_ITER_LATENCY_FMT; +	save_flags = trace_flags; + +	/* non overwrite screws up the latency tracers */ +	set_tracer_flag(TRACE_ITER_OVERWRITE, 1); +	set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);  	tracing_max_latency = 0;  	wakeup_trace = tr; @@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)  static void wakeup_tracer_reset(struct trace_array *tr)  { +	int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; +	int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; +  	stop_wakeup_tracer(tr);  	/* make sure we put back any tasks we are tracing */  	wakeup_reset(tr); -	if (!save_lat_flag) -		trace_flags &= ~TRACE_ITER_LATENCY_FMT; +	set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); +	set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);  }  static void wakeup_tracer_start(struct trace_array *tr) @@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __read_mostly =  	.print_line	= wakeup_print_line,  	.flags		= &tracer_flags,  	.set_flag	= wakeup_set_flag, +	.flag_changed	= trace_keep_overwrite,  #ifdef CONFIG_FTRACE_SELFTEST  	.selftest    = trace_selftest_startup_wakeup,  #endif @@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =  	.print_line	= wakeup_print_line,  	.flags		= &tracer_flags,  	.set_flag	= wakeup_set_flag, +	.flag_changed	= trace_keep_overwrite,  #ifdef CONFIG_FTRACE_SELFTEST  	.selftest    = trace_selftest_startup_wakeup,  #endif  |