diff options
Diffstat (limited to 'kernel/trace/trace_selftest.c')
| -rw-r--r-- | kernel/trace/trace_selftest.c | 19 | 
1 files changed, 19 insertions, 0 deletions
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 01415f4edaa..7238646b872 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -24,10 +24,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)  {  	struct ring_buffer_event *event;  	struct trace_entry *entry; +	unsigned int loops = 0;  	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {  		entry = ring_buffer_event_data(event); +		/* +		 * The ring buffer is a size of trace_buf_size, if +		 * we loop more than the size, there's something wrong +		 * with the ring buffer. +		 */ +		if (loops++ > trace_buf_size) { +			printk(KERN_CONT ".. bad ring buffer "); +			goto failed; +		}  		if (!trace_valid_entry(entry)) {  			printk(KERN_CONT ".. invalid entry %d ",  				entry->type); @@ -58,11 +68,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)  	cnt = ring_buffer_entries(tr->buffer); +	/* +	 * The trace_test_buffer_cpu runs a while loop to consume all data. +	 * If the calling tracer is broken, and is constantly filling +	 * the buffer, this will run forever, and hard lock the box. +	 * We disable the ring buffer while we do this test to prevent +	 * a hard lock up. +	 */ +	tracing_off();  	for_each_possible_cpu(cpu) {  		ret = trace_test_buffer_cpu(tr, cpu);  		if (ret)  			break;  	} +	tracing_on();  	__raw_spin_unlock(&ftrace_max_lock);  	local_irq_restore(flags);  |