diff options
Diffstat (limited to 'arch/s390')
| -rw-r--r-- | arch/s390/include/asm/spinlock.h | 34 | ||||
| -rw-r--r-- | arch/s390/lib/spinlock.c | 22 | 
2 files changed, 28 insertions, 28 deletions
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index 6121fa4b83d..a94c146657a 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock,   * (the type definitions are in asm/spinlock_types.h)   */ -#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) -#define __raw_spin_unlock_wait(lock) \ -	do { while (__raw_spin_is_locked(lock)) \ -		 _raw_spin_relax(lock); } while (0) +#define arch_spin_is_locked(x) ((x)->owner_cpu != 0) +#define arch_spin_unlock_wait(lock) \ +	do { while (arch_spin_is_locked(lock)) \ +		 arch_spin_relax(lock); } while (0) -extern void _raw_spin_lock_wait(arch_spinlock_t *); -extern void _raw_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); -extern int _raw_spin_trylock_retry(arch_spinlock_t *); -extern void _raw_spin_relax(arch_spinlock_t *lock); +extern void arch_spin_lock_wait(arch_spinlock_t *); +extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); +extern int arch_spin_trylock_retry(arch_spinlock_t *); +extern void arch_spin_relax(arch_spinlock_t *lock); -static inline void __raw_spin_lock(arch_spinlock_t *lp) +static inline void arch_spin_lock(arch_spinlock_t *lp)  {  	int old;  	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());  	if (likely(old == 0))  		return; -	_raw_spin_lock_wait(lp); +	arch_spin_lock_wait(lp);  } -static inline void __raw_spin_lock_flags(arch_spinlock_t *lp, +static inline void arch_spin_lock_flags(arch_spinlock_t *lp,  					 unsigned long flags)  {  	int old; @@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(arch_spinlock_t *lp,  	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());  	if (likely(old == 0))  		return; -	_raw_spin_lock_wait_flags(lp, flags); +	arch_spin_lock_wait_flags(lp, flags);  } -static inline int __raw_spin_trylock(arch_spinlock_t *lp) +static inline int arch_spin_trylock(arch_spinlock_t *lp)  {  	int old;  	old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());  	if (likely(old == 0))  		return 1; -	return _raw_spin_trylock_retry(lp); +	return arch_spin_trylock_retry(lp);  } -static inline void __raw_spin_unlock(arch_spinlock_t *lp) +static inline void arch_spin_unlock(arch_spinlock_t *lp)  {  	_raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);  } @@ -188,7 +188,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)  	return _raw_write_trylock_retry(rw);  } -#define _raw_read_relax(lock)	cpu_relax() -#define _raw_write_relax(lock)	cpu_relax() +#define arch_read_relax(lock)	cpu_relax() +#define arch_write_relax(lock)	cpu_relax()  #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index d4cbf71a607..f4596452f07 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu)  		_raw_yield();  } -void _raw_spin_lock_wait(arch_spinlock_t *lp) +void arch_spin_lock_wait(arch_spinlock_t *lp)  {  	int count = spin_retry;  	unsigned int cpu = ~smp_processor_id(); @@ -51,15 +51,15 @@ void _raw_spin_lock_wait(arch_spinlock_t *lp)  				_raw_yield_cpu(~owner);  			count = spin_retry;  		} -		if (__raw_spin_is_locked(lp)) +		if (arch_spin_is_locked(lp))  			continue;  		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)  			return;  	}  } -EXPORT_SYMBOL(_raw_spin_lock_wait); +EXPORT_SYMBOL(arch_spin_lock_wait); -void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) +void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)  {  	int count = spin_retry;  	unsigned int cpu = ~smp_processor_id(); @@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)  				_raw_yield_cpu(~owner);  			count = spin_retry;  		} -		if (__raw_spin_is_locked(lp)) +		if (arch_spin_is_locked(lp))  			continue;  		local_irq_disable();  		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) @@ -80,30 +80,30 @@ void _raw_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)  		local_irq_restore(flags);  	}  } -EXPORT_SYMBOL(_raw_spin_lock_wait_flags); +EXPORT_SYMBOL(arch_spin_lock_wait_flags); -int _raw_spin_trylock_retry(arch_spinlock_t *lp) +int arch_spin_trylock_retry(arch_spinlock_t *lp)  {  	unsigned int cpu = ~smp_processor_id();  	int count;  	for (count = spin_retry; count > 0; count--) { -		if (__raw_spin_is_locked(lp)) +		if (arch_spin_is_locked(lp))  			continue;  		if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0)  			return 1;  	}  	return 0;  } -EXPORT_SYMBOL(_raw_spin_trylock_retry); +EXPORT_SYMBOL(arch_spin_trylock_retry); -void _raw_spin_relax(arch_spinlock_t *lock) +void arch_spin_relax(arch_spinlock_t *lock)  {  	unsigned int cpu = lock->owner_cpu;  	if (cpu != 0)  		_raw_yield_cpu(~cpu);  } -EXPORT_SYMBOL(_raw_spin_relax); +EXPORT_SYMBOL(arch_spin_relax);  void _raw_read_lock_wait(raw_rwlock_t *rw)  {  |