diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
| -rw-r--r-- | kernel/trace/ring_buffer.c | 38 | 
1 files changed, 20 insertions, 18 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d4ff0197054..a1ca4956ab5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s)  	int ret;  	ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" -			       "offset:0;\tsize:%u;\n", -			       (unsigned int)sizeof(field.time_stamp)); +			       "offset:0;\tsize:%u;\tsigned:%u;\n", +			       (unsigned int)sizeof(field.time_stamp), +			       (unsigned int)is_signed_type(u64));  	ret = trace_seq_printf(s, "\tfield: local_t commit;\t" -			       "offset:%u;\tsize:%u;\n", +			       "offset:%u;\tsize:%u;\tsigned:%u;\n",  			       (unsigned int)offsetof(typeof(field), commit), -			       (unsigned int)sizeof(field.commit)); +			       (unsigned int)sizeof(field.commit), +			       (unsigned int)is_signed_type(long));  	ret = trace_seq_printf(s, "\tfield: char data;\t" -			       "offset:%u;\tsize:%u;\n", +			       "offset:%u;\tsize:%u;\tsigned:%u;\n",  			       (unsigned int)offsetof(typeof(field), data), -			       (unsigned int)BUF_PAGE_SIZE); +			       (unsigned int)BUF_PAGE_SIZE, +			       (unsigned int)is_signed_type(char));  	return ret;  } @@ -483,7 +486,7 @@ struct ring_buffer_iter {  /* Up this if you want to test the TIME_EXTENTS and normalization */  #define DEBUG_SHIFT 0 -static inline u64 rb_time_stamp(struct ring_buffer *buffer, int cpu) +static inline u64 rb_time_stamp(struct ring_buffer *buffer)  {  	/* shift to debug/test normalization and TIME_EXTENTS */  	return buffer->clock() << DEBUG_SHIFT; @@ -494,7 +497,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)  	u64 time;  	preempt_disable_notrace(); -	time = rb_time_stamp(buffer, cpu); +	time = rb_time_stamp(buffer);  	preempt_enable_no_resched_notrace();  	return time; @@ -599,7 +602,7 @@ static struct list_head *rb_list_head(struct list_head *list)  }  /* - * rb_is_head_page - test if the give page is the head page + * rb_is_head_page - test if the given page is the head page   *   * Because the reader may move the head_page pointer, we can   * not trust what the head page is (it may be pointing to @@ -1193,6 +1196,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)  	atomic_inc(&cpu_buffer->record_disabled);  	synchronize_sched(); +	spin_lock_irq(&cpu_buffer->reader_lock);  	rb_head_page_deactivate(cpu_buffer);  	for (i = 0; i < nr_pages; i++) { @@ -1207,6 +1211,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)  		return;  	rb_reset_cpu(cpu_buffer); +	spin_unlock_irq(&cpu_buffer->reader_lock);  	rb_check_pages(cpu_buffer); @@ -1785,9 +1790,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,  static struct ring_buffer_event *  rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,  	     unsigned long length, unsigned long tail, -	     struct buffer_page *commit_page,  	     struct buffer_page *tail_page, u64 *ts)  { +	struct buffer_page *commit_page = cpu_buffer->commit_page;  	struct ring_buffer *buffer = cpu_buffer->buffer;  	struct buffer_page *next_page;  	int ret; @@ -1868,7 +1873,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,  		 * Nested commits always have zero deltas, so  		 * just reread the time stamp  		 */ -		*ts = rb_time_stamp(buffer, cpu_buffer->cpu); +		*ts = rb_time_stamp(buffer);  		next_page->page->time_stamp = *ts;  	} @@ -1890,13 +1895,10 @@ static struct ring_buffer_event *  __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,  		  unsigned type, unsigned long length, u64 *ts)  { -	struct buffer_page *tail_page, *commit_page; +	struct buffer_page *tail_page;  	struct ring_buffer_event *event;  	unsigned long tail, write; -	commit_page = cpu_buffer->commit_page; -	/* we just need to protect against interrupts */ -	barrier();  	tail_page = cpu_buffer->tail_page;  	write = local_add_return(length, &tail_page->write); @@ -1907,7 +1909,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,  	/* See if we shot pass the end of this buffer page */  	if (write > BUF_PAGE_SIZE)  		return rb_move_tail(cpu_buffer, length, tail, -				    commit_page, tail_page, ts); +				    tail_page, ts);  	/* We reserved something on the buffer */ @@ -2111,7 +2113,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,  	if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))  		goto out_fail; -	ts = rb_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu); +	ts = rb_time_stamp(cpu_buffer->buffer);  	/*  	 * Only the first commit can update the timestamp. @@ -2681,7 +2683,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)  EXPORT_SYMBOL_GPL(ring_buffer_entries);  /** - * ring_buffer_overrun_cpu - get the number of overruns in buffer + * ring_buffer_overruns - get the number of overruns in buffer   * @buffer: The ring buffer   *   * Returns the total number of overruns in the ring buffer  |