diff options
| author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2009-08-31 14:43:36 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 18:08:50 +0200 | 
| commit | 69d0ee7377eef808e34ba5542b554ec97244b871 (patch) | |
| tree | f46c756b897cf51497fda2ad22f9f12a3512e23b /include/linux/spinlock_api_smp.h | |
| parent | 0ee000e5e8fa2e5c760250be0d78d5906e3eb94b (diff) | |
| download | olio-linux-3.10-69d0ee7377eef808e34ba5542b554ec97244b871.tar.xz olio-linux-3.10-69d0ee7377eef808e34ba5542b554ec97244b871.zip  | |
locking: Move spinlock function bodies to header file
Move spinlock function bodies to header file by creating a
static inline version of each variant. Use the inline version
on the out-of-line code.
This shouldn't make any difference besides that the spinlock
code can now be used to generate inlined spinlock code.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124417.859022429@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/spinlock_api_smp.h')
| -rw-r--r-- | include/linux/spinlock_api_smp.h | 263 | 
1 files changed, 263 insertions, 0 deletions
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index d79845d034b..6b108f5fb14 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -60,4 +60,267 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  							__releases(lock); +static inline int __spin_trylock(spinlock_t *lock) +{ +	preempt_disable(); +	if (_raw_spin_trylock(lock)) { +		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); +		return 1; +	} +	preempt_enable(); +	return 0; +} + +static inline int __read_trylock(rwlock_t *lock) +{ +	preempt_disable(); +	if (_raw_read_trylock(lock)) { +		rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); +		return 1; +	} +	preempt_enable(); +	return 0; +} + +static inline int __write_trylock(rwlock_t *lock) +{ +	preempt_disable(); +	if (_raw_write_trylock(lock)) { +		rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); +		return 1; +	} +	preempt_enable(); +	return 0; +} + +/* + * If lockdep is enabled then we use the non-preemption spin-ops + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * not re-enabled during lock-acquire (which the preempt-spin-ops do): + */ +#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) + +static inline void __read_lock(rwlock_t *lock) +{ +	preempt_disable(); +	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); +	LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +} + +static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) +{ +	unsigned long flags; + +	local_irq_save(flags); +	preempt_disable(); +	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); +	/* +	 * On lockdep we dont want the hand-coded irq-enable of +	 * _raw_spin_lock_flags() code, because lockdep assumes +	 * that interrupts are not re-enabled during lock-acquire: +	 */ +#ifdef CONFIG_LOCKDEP +	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +#else +	_raw_spin_lock_flags(lock, &flags); +#endif +	return flags; +} + +static inline void __spin_lock_irq(spinlock_t *lock) +{ +	local_irq_disable(); +	preempt_disable(); +	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); +	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +} + +static inline void __spin_lock_bh(spinlock_t *lock) +{ +	local_bh_disable(); +	preempt_disable(); +	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); +	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +} + +static inline unsigned long __read_lock_irqsave(rwlock_t *lock) +{ +	unsigned long flags; + +	local_irq_save(flags); +	preempt_disable(); +	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); +	LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, +			     _raw_read_lock_flags, &flags); +	return flags; +} + +static inline void __read_lock_irq(rwlock_t *lock) +{ +	local_irq_disable(); +	preempt_disable(); +	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); +	LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +} + +static inline void __read_lock_bh(rwlock_t *lock) +{ +	local_bh_disable(); +	preempt_disable(); +	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); +	LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +} + +static inline unsigned long __write_lock_irqsave(rwlock_t *lock) +{ +	unsigned long flags; + +	local_irq_save(flags); +	preempt_disable(); +	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); +	LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, +			     _raw_write_lock_flags, &flags); +	return flags; +} + +static inline void __write_lock_irq(rwlock_t *lock) +{ +	local_irq_disable(); +	preempt_disable(); +	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); +	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +} + +static inline void __write_lock_bh(rwlock_t *lock) +{ +	local_bh_disable(); +	preempt_disable(); +	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); +	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +} + +static inline void __spin_lock(spinlock_t *lock) +{ +	preempt_disable(); +	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); +	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +} + +static inline void __write_lock(rwlock_t *lock) +{ +	preempt_disable(); +	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); +	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +} + +#endif /* CONFIG_PREEMPT */ + +static inline void __spin_unlock(spinlock_t *lock) +{ +	spin_release(&lock->dep_map, 1, _RET_IP_); +	_raw_spin_unlock(lock); +	preempt_enable(); +} + +static inline void __write_unlock(rwlock_t *lock) +{ +	rwlock_release(&lock->dep_map, 1, _RET_IP_); +	_raw_write_unlock(lock); +	preempt_enable(); +} + +static inline void __read_unlock(rwlock_t *lock) +{ +	rwlock_release(&lock->dep_map, 1, _RET_IP_); +	_raw_read_unlock(lock); +	preempt_enable(); +} + +static inline void __spin_unlock_irqrestore(spinlock_t *lock, +					    unsigned long flags) +{ +	spin_release(&lock->dep_map, 1, _RET_IP_); +	_raw_spin_unlock(lock); +	local_irq_restore(flags); +	preempt_enable(); +} + +static inline void __spin_unlock_irq(spinlock_t *lock) +{ +	spin_release(&lock->dep_map, 1, _RET_IP_); +	_raw_spin_unlock(lock); +	local_irq_enable(); +	preempt_enable(); +} + +static inline void __spin_unlock_bh(spinlock_t *lock) +{ +	spin_release(&lock->dep_map, 1, _RET_IP_); +	_raw_spin_unlock(lock); +	preempt_enable_no_resched(); +	local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) +{ +	rwlock_release(&lock->dep_map, 1, _RET_IP_); +	_raw_read_unlock(lock); +	local_irq_restore(flags); +	preempt_enable(); +} + +static inline void __read_unlock_irq(rwlock_t *lock) +{ +	rwlock_release(&lock->dep_map, 1, _RET_IP_); +	_raw_read_unlock(lock); +	local_irq_enable(); +	preempt_enable(); +} + +static inline void __read_unlock_bh(rwlock_t *lock) +{ +	rwlock_release(&lock->dep_map, 1, _RET_IP_); +	_raw_read_unlock(lock); +	preempt_enable_no_resched(); +	local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +static inline void __write_unlock_irqrestore(rwlock_t *lock, +					     unsigned long flags) +{ +	rwlock_release(&lock->dep_map, 1, _RET_IP_); +	_raw_write_unlock(lock); +	local_irq_restore(flags); +	preempt_enable(); +} + +static inline void __write_unlock_irq(rwlock_t *lock) +{ +	rwlock_release(&lock->dep_map, 1, _RET_IP_); +	_raw_write_unlock(lock); +	local_irq_enable(); +	preempt_enable(); +} + +static inline void __write_unlock_bh(rwlock_t *lock) +{ +	rwlock_release(&lock->dep_map, 1, _RET_IP_); +	_raw_write_unlock(lock); +	preempt_enable_no_resched(); +	local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +} + +static inline int __spin_trylock_bh(spinlock_t *lock) +{ +	local_bh_disable(); +	preempt_disable(); +	if (_raw_spin_trylock(lock)) { +		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); +		return 1; +	} +	preempt_enable_no_resched(); +	local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +	return 0; +} +  #endif /* __LINUX_SPINLOCK_API_SMP_H */  |