diff options
| author | Tejun Heo <tj@kernel.org> | 2012-12-21 17:57:00 -0800 | 
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2013-02-09 11:32:42 -0800 | 
| commit | ad72b3bea744b4db01c89af0f86f3e8920d354df (patch) | |
| tree | 8482593d6330783150e88ff7181a1484d3156265 | |
| parent | 7c99e0bf86fdc1dee238eb6e213b980f887b68f1 (diff) | |
| download | olio-linux-3.10-ad72b3bea744b4db01c89af0f86f3e8920d354df.tar.xz olio-linux-3.10-ad72b3bea744b4db01c89af0f86f3e8920d354df.zip | |
kprobes: fix wait_for_kprobe_optimizer()
wait_for_kprobe_optimizer() seems largely broken.  It uses
optimizer_comp which is never re-initialized, so
wait_for_kprobe_optimizer() will never wait for anything once
kprobe_optimizer() finishes all pending jobs for the first time.
Also, aside from completion, delayed_work_pending() is %false once
kprobe_optimizer() starts execution and wait_for_kprobe_optimizer()
won't wait for it.
Reimplement it so that it flushes optimizing_work until
[un]optimizing_lists are empty.  Note that this also makes
optimizing_work execute immediately if someone's waiting for it, which
is the nicer behavior.
Only compile tested.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Cc: "David S. Miller" <davem@davemloft.net>
| -rw-r--r-- | kernel/kprobes.c | 23 | 
1 files changed, 15 insertions, 8 deletions
| diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 098f396aa40..f230e81a9db 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -471,7 +471,6 @@ static LIST_HEAD(unoptimizing_list);  static void kprobe_optimizer(struct work_struct *work);  static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer); -static DECLARE_COMPLETION(optimizer_comp);  #define OPTIMIZE_DELAY 5  /* @@ -552,8 +551,7 @@ static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)  /* Start optimizer after OPTIMIZE_DELAY passed */  static __kprobes void kick_kprobe_optimizer(void)  { -	if (!delayed_work_pending(&optimizing_work)) -		schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY); +	schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);  }  /* Kprobe jump optimizer */ @@ -592,16 +590,25 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)  	/* Step 5: Kick optimizer again if needed */  	if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))  		kick_kprobe_optimizer(); -	else -		/* Wake up all waiters */ -		complete_all(&optimizer_comp);  }  /* Wait for completing optimization and unoptimization */  static __kprobes void wait_for_kprobe_optimizer(void)  { -	if (delayed_work_pending(&optimizing_work)) -		wait_for_completion(&optimizer_comp); +	mutex_lock(&kprobe_mutex); + +	while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) { +		mutex_unlock(&kprobe_mutex); + +		/* this will also make optimizing_work execute immmediately */ +		flush_delayed_work(&optimizing_work); +		/* @optimizing_work might not have been queued yet, relax */ +		cpu_relax(); + +		mutex_lock(&kprobe_mutex); +	} + +	mutex_unlock(&kprobe_mutex);  }  /* Optimize kprobe if p is ready to be optimized */ |