diff options
Diffstat (limited to 'arch/x86/include/asm')
| -rw-r--r-- | arch/x86/include/asm/paravirt.h | 14 | ||||
| -rw-r--r-- | arch/x86/include/asm/spinlock.h | 26 | 
2 files changed, 20 insertions, 20 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 5655f75f10b..dd59a85a918 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,  #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) -static inline int __raw_spin_is_locked(struct arch_spinlock *lock) +static inline int arch_spin_is_locked(struct arch_spinlock *lock)  {  	return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);  } -static inline int __raw_spin_is_contended(struct arch_spinlock *lock) +static inline int arch_spin_is_contended(struct arch_spinlock *lock)  {  	return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);  } -#define __raw_spin_is_contended	__raw_spin_is_contended +#define arch_spin_is_contended	arch_spin_is_contended -static __always_inline void __raw_spin_lock(struct arch_spinlock *lock) +static __always_inline void arch_spin_lock(struct arch_spinlock *lock)  {  	PVOP_VCALL1(pv_lock_ops.spin_lock, lock);  } -static __always_inline void __raw_spin_lock_flags(struct arch_spinlock *lock, +static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,  						  unsigned long flags)  {  	PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);  } -static __always_inline int __raw_spin_trylock(struct arch_spinlock *lock) +static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)  {  	return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);  } -static __always_inline void __raw_spin_unlock(struct arch_spinlock *lock) +static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)  {  	PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);  } diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 204b524fcf5..ab9055fd57d 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)  #ifndef CONFIG_PARAVIRT_SPINLOCKS -static inline int __raw_spin_is_locked(arch_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock)  {  	return __ticket_spin_is_locked(lock);  } -static inline int __raw_spin_is_contended(arch_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock)  {  	return __ticket_spin_is_contended(lock);  } -#define __raw_spin_is_contended	__raw_spin_is_contended +#define arch_spin_is_contended	arch_spin_is_contended -static __always_inline void __raw_spin_lock(arch_spinlock_t *lock) +static __always_inline void arch_spin_lock(arch_spinlock_t *lock)  {  	__ticket_spin_lock(lock);  } -static __always_inline int __raw_spin_trylock(arch_spinlock_t *lock) +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)  {  	return __ticket_spin_trylock(lock);  } -static __always_inline void __raw_spin_unlock(arch_spinlock_t *lock) +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)  {  	__ticket_spin_unlock(lock);  } -static __always_inline void __raw_spin_lock_flags(arch_spinlock_t *lock, +static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,  						  unsigned long flags)  { -	__raw_spin_lock(lock); +	arch_spin_lock(lock);  }  #endif	/* CONFIG_PARAVIRT_SPINLOCKS */ -static inline void __raw_spin_unlock_wait(arch_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)  { -	while (__raw_spin_is_locked(lock)) +	while (arch_spin_is_locked(lock))  		cpu_relax();  } @@ -298,9 +298,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)  #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)  #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) -#define _raw_spin_relax(lock)	cpu_relax() -#define _raw_read_relax(lock)	cpu_relax() -#define _raw_write_relax(lock)	cpu_relax() +#define arch_spin_relax(lock)	cpu_relax() +#define arch_read_relax(lock)	cpu_relax() +#define arch_write_relax(lock)	cpu_relax()  /* The {read|write|spin}_lock() on x86 are full memory barriers. */  static inline void smp_mb__after_lock(void) { }  |