diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/lockdep.c | 2 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 10 | ||||
| -rw-r--r-- | kernel/trace/trace_clock.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 2 | 
6 files changed, 10 insertions, 10 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 7cc50c62af5..2389e3f85cf 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -73,7 +73,7 @@ module_param(lock_stat, int, 0644);   * to use a raw spinlock - we really dont want the spinlock   * code to recurse back into the lockdep code...   */ -static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;  static int graph_lock(void)  { diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 5ac8ee0a9e3..fb7a0fa508b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)  	cpu_buffer->buffer = buffer;  	spin_lock_init(&cpu_buffer->reader_lock);  	lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); -	cpu_buffer->lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +	cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;  	bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),  			    GFP_KERNEL, cpu_to_node(cpu)); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7d56cecc2c6..63bc1cc3821 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -501,7 +501,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)   * CONFIG_TRACER_MAX_TRACE.   */  static arch_spinlock_t ftrace_max_lock = -	(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;  #ifdef CONFIG_TRACER_MAX_TRACE  unsigned long __read_mostly	tracing_max_latency; @@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];  static unsigned map_cmdline_to_pid[SAVED_CMDLINES];  static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];  static int cmdline_idx; -static arch_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;  /* temporary disable recording */  static atomic_t trace_record_cmdline_disabled __read_mostly; @@ -1252,7 +1252,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)  int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)  {  	static arch_spinlock_t trace_buf_lock = -		(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +		(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;  	static u32 trace_buf[TRACE_BUF_SIZE];  	struct ftrace_event_call *call = &event_bprint; @@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,  int trace_array_vprintk(struct trace_array *tr,  			unsigned long ip, const char *fmt, va_list args)  { -	static arch_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; +	static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;  	static char trace_buf[TRACE_BUF_SIZE];  	struct ftrace_event_call *call = &event_print; @@ -4308,7 +4308,7 @@ trace_printk_seq(struct trace_seq *s)  static void __ftrace_dump(bool disable_tracing)  {  	static arch_spinlock_t ftrace_dump_lock = -		(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +		(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;  	/* use static because iter can be a bit big for the stack */  	static struct trace_iterator iter;  	unsigned int old_userobj; diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 206ec3d4b3c..433e2eda2d0 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -74,7 +74,7 @@ static struct {  	arch_spinlock_t lock;  } trace_clock_struct ____cacheline_aligned_in_smp =  	{ -		.lock = (arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, +		.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,  	};  u64 notrace trace_clock_global(void) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 4cf7e83ec23..e347853564e 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -29,7 +29,7 @@ static unsigned			wakeup_prio = -1;  static int			wakeup_rt;  static arch_spinlock_t wakeup_lock = -	(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;  static void __wakeup_reset(struct trace_array *tr); diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 9a82d568fde..728c3522148 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -28,7 +28,7 @@ static struct stack_trace max_stack_trace = {  static unsigned long max_stack_size;  static arch_spinlock_t max_stack_lock = -	(arch_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +	(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;  static int stack_trace_disabled __read_mostly;  static DEFINE_PER_CPU(int, trace_active);  |