diff options
| author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-10-08 16:37:12 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-10-08 16:56:33 +0200 | 
| commit | 9a92b479b2f088ee2d3194243f4c8e59b1b8c9c2 (patch) | |
| tree | 000ea4a4e36e37af2867ba8aaf7cf38210373974 /tools/perf/builtin-sched.c | |
| parent | 016e92fbc9ef33689cf654f343a94383d43235e7 (diff) | |
| download | olio-linux-3.10-9a92b479b2f088ee2d3194243f4c8e59b1b8c9c2.tar.xz olio-linux-3.10-9a92b479b2f088ee2d3194243f4c8e59b1b8c9c2.zip  | |
perf tools: Improve thread comm resolution in perf sched
When we get sched traces that involve a task that was already
created before opening the event, we won't have the comm event for
it.
So if we can't find the comm event for a given thread, we look at
the traces that may contain these informations.
Before:
 ata/1:371             |      0.000 ms |        1 | avg: 3988.693 ms | max: 3988.693 ms |
 kondemand/1:421       |      0.096 ms |        3 | avg:  345.346 ms | max: 1035.989 ms |
 kondemand/0:420       |      0.025 ms |        3 | avg:  421.332 ms | max:  964.014 ms |
 :5124:5124            |      0.103 ms |        5 | avg:   74.082 ms | max:  277.194 ms |
 :6244:6244            |      0.691 ms |        9 | avg:  125.655 ms | max:  271.306 ms |
 firefox:5080          |      0.924 ms |        5 | avg:   53.833 ms | max:  257.828 ms |
 npviewer.bin:6225     |     21.871 ms |       53 | avg:   22.462 ms | max:  220.835 ms |
 :6245:6245            |      9.631 ms |       21 | avg:   41.864 ms | max:  213.349 ms |
After:
 ata/1:371             |      0.000 ms |        1 | avg: 3988.693 ms | max: 3988.693 ms |
 kondemand/1:421       |      0.096 ms |        3 | avg:  345.346 ms | max: 1035.989 ms |
 kondemand/0:420       |      0.025 ms |        3 | avg:  421.332 ms | max:  964.014 ms |
 firefox:5124          |      0.103 ms |        5 | avg:   74.082 ms | max:  277.194 ms |
 npviewer.bin:6244     |      0.691 ms |        9 | avg:  125.655 ms | max:  271.306 ms |
 firefox:5080          |      0.924 ms |        5 | avg:   53.833 ms | max:  257.828 ms |
 npviewer.bin:6225     |     21.871 ms |       53 | avg:   22.462 ms | max:  220.835 ms |
 npviewer.bin:6245     |      9.631 ms |       21 | avg:   41.864 ms | max:  213.349 ms |
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1255012632-7882-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf/builtin-sched.c')
| -rw-r--r-- | tools/perf/builtin-sched.c | 44 | 
1 files changed, 39 insertions, 5 deletions
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c index e1df7055ab8..25b91e78433 100644 --- a/tools/perf/builtin-sched.c +++ b/tools/perf/builtin-sched.c @@ -1034,6 +1034,36 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)  	atoms->nb_atoms++;  } +static struct thread * +threads__findnew_from_ctx(u32 pid, struct trace_switch_event *switch_event) +{ +	struct thread *th; + +	th = threads__findnew_nocomm(pid, &threads, &last_match); +	if (th->comm) +		return th; + +	if (pid == switch_event->prev_pid) +		thread__set_comm(th, switch_event->prev_comm); +	else +		thread__set_comm(th, switch_event->next_comm); +	return th; +} + +static struct thread * +threads__findnew_from_wakeup(struct trace_wakeup_event *wakeup_event) +{ +	struct thread *th; + +	th =  threads__findnew_nocomm(wakeup_event->pid, &threads, &last_match); +	if (th->comm) +		return th; + +	thread__set_comm(th, wakeup_event->comm); + +	return th; +} +  static void  latency_switch_event(struct trace_switch_event *switch_event,  		     struct event *event __used, @@ -1059,8 +1089,10 @@ latency_switch_event(struct trace_switch_event *switch_event,  		die("hm, delta: %Ld < 0 ?\n", delta); -	sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); -	sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); +	sched_out = threads__findnew_from_ctx(switch_event->prev_pid, +					      switch_event); +	sched_in = threads__findnew_from_ctx(switch_event->next_pid, +					     switch_event);  	out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);  	if (!out_events) { @@ -1126,7 +1158,7 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event,  	if (!wakeup_event->success)  		return; -	wakee = threads__findnew(wakeup_event->pid, &threads, &last_match); +	wakee = threads__findnew_from_wakeup(wakeup_event);  	atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);  	if (!atoms) {  		thread_atoms_insert(wakee); @@ -1386,8 +1418,10 @@ map_switch_event(struct trace_switch_event *switch_event,  		die("hm, delta: %Ld < 0 ?\n", delta); -	sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match); -	sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match); +	sched_out = threads__findnew_from_ctx(switch_event->prev_pid, +					      switch_event); +	sched_in = threads__findnew_from_ctx(switch_event->next_pid, +					     switch_event);  	curr_thread[this_cpu] = sched_in;  |