diff options
| author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2009-08-31 14:43:37 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 18:08:50 +0200 | 
| commit | 892a7c67c12da63fa4b51728bbe5b982356a090a (patch) | |
| tree | ba6cb9cf1be394428d9ef2596b0575e28ab0b19a | |
| parent | 69d0ee7377eef808e34ba5542b554ec97244b871 (diff) | |
| download | olio-linux-3.10-892a7c67c12da63fa4b51728bbe5b982356a090a.tar.xz olio-linux-3.10-892a7c67c12da63fa4b51728bbe5b982356a090a.zip | |
locking: Allow arch-inlined spinlocks
This allows an architecture to specify per lock variant if the
locking code should be kept out-of-line or inlined.
If an architecure wants out-of-line locking code no change is
needed. To force inlining of e.g. spin_lock() the line:
  #define __always_inline__spin_lock
needs to be added to arch/<...>/include/asm/spinlock.h
If CONFIG_DEBUG_SPINLOCK or CONFIG_GENERIC_LOCKBREAK are
defined the per architecture defines are (partly) ignored and
still out-of-line spinlock code will be generated.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124418.375299024@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
| -rw-r--r-- | include/linux/spinlock_api_smp.h | 119 | ||||
| -rw-r--r-- | kernel/spinlock.c | 56 | 
2 files changed, 175 insertions, 0 deletions
| diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 6b108f5fb14..1a411e3fab9 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -60,6 +60,125 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  							__releases(lock); +#ifndef CONFIG_DEBUG_SPINLOCK +#ifndef CONFIG_GENERIC_LOCKBREAK + +#ifdef __always_inline__spin_lock +#define _spin_lock(lock) __spin_lock(lock) +#endif + +#ifdef __always_inline__read_lock +#define _read_lock(lock) __read_lock(lock) +#endif + +#ifdef __always_inline__write_lock +#define _write_lock(lock) __write_lock(lock) +#endif + +#ifdef __always_inline__spin_lock_bh +#define _spin_lock_bh(lock) __spin_lock_bh(lock) +#endif + +#ifdef __always_inline__read_lock_bh +#define _read_lock_bh(lock) __read_lock_bh(lock) +#endif + +#ifdef __always_inline__write_lock_bh +#define _write_lock_bh(lock) __write_lock_bh(lock) +#endif + +#ifdef __always_inline__spin_lock_irq +#define _spin_lock_irq(lock) __spin_lock_irq(lock) +#endif + +#ifdef __always_inline__read_lock_irq +#define _read_lock_irq(lock) __read_lock_irq(lock) +#endif + +#ifdef __always_inline__write_lock_irq +#define _write_lock_irq(lock) __write_lock_irq(lock) +#endif + +#ifdef __always_inline__spin_lock_irqsave +#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) +#endif + +#ifdef __always_inline__read_lock_irqsave +#define _read_lock_irqsave(lock) __read_lock_irqsave(lock) +#endif + +#ifdef __always_inline__write_lock_irqsave +#define _write_lock_irqsave(lock) __write_lock_irqsave(lock) +#endif + +#endif /* !CONFIG_GENERIC_LOCKBREAK */ + +#ifdef __always_inline__spin_trylock +#define _spin_trylock(lock) __spin_trylock(lock) +#endif + +#ifdef __always_inline__read_trylock +#define _read_trylock(lock) __read_trylock(lock) +#endif + +#ifdef __always_inline__write_trylock +#define _write_trylock(lock) __write_trylock(lock) +#endif + +#ifdef __always_inline__spin_trylock_bh +#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) +#endif + +#ifdef __always_inline__spin_unlock +#define _spin_unlock(lock) __spin_unlock(lock) +#endif + +#ifdef __always_inline__read_unlock +#define _read_unlock(lock) __read_unlock(lock) +#endif + +#ifdef __always_inline__write_unlock +#define _write_unlock(lock) __write_unlock(lock) +#endif + +#ifdef __always_inline__spin_unlock_bh +#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) +#endif + +#ifdef __always_inline__read_unlock_bh +#define _read_unlock_bh(lock) __read_unlock_bh(lock) +#endif + +#ifdef __always_inline__write_unlock_bh +#define _write_unlock_bh(lock) __write_unlock_bh(lock) +#endif + +#ifdef __always_inline__spin_unlock_irq +#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) +#endif + +#ifdef __always_inline__read_unlock_irq +#define _read_unlock_irq(lock) __read_unlock_irq(lock) +#endif + +#ifdef __always_inline__write_unlock_irq +#define _write_unlock_irq(lock) __write_unlock_irq(lock) +#endif + +#ifdef __always_inline__spin_unlock_irqrestore +#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) +#endif + +#ifdef __always_inline__read_unlock_irqrestore +#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) +#endif + +#ifdef __always_inline__write_unlock_irqrestore +#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) +#endif + +#endif /* CONFIG_DEBUG_SPINLOCK */ +  static inline int __spin_trylock(spinlock_t *lock)  {  	preempt_disable(); diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 2c000f5c070..5ddab730cb2 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -21,23 +21,29 @@  #include <linux/debug_locks.h>  #include <linux/module.h> +#ifndef _spin_trylock  int __lockfunc _spin_trylock(spinlock_t *lock)  {  	return __spin_trylock(lock);  }  EXPORT_SYMBOL(_spin_trylock); +#endif +#ifndef _read_trylock  int __lockfunc _read_trylock(rwlock_t *lock)  {  	return __read_trylock(lock);  }  EXPORT_SYMBOL(_read_trylock); +#endif +#ifndef _write_trylock  int __lockfunc _write_trylock(rwlock_t *lock)  {  	return __write_trylock(lock);  }  EXPORT_SYMBOL(_write_trylock); +#endif  /*   * If lockdep is enabled then we use the non-preemption spin-ops @@ -46,77 +52,101 @@ EXPORT_SYMBOL(_write_trylock);   */  #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) +#ifndef _read_lock  void __lockfunc _read_lock(rwlock_t *lock)  {  	__read_lock(lock);  }  EXPORT_SYMBOL(_read_lock); +#endif +#ifndef _spin_lock_irqsave  unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)  {  	return __spin_lock_irqsave(lock);  }  EXPORT_SYMBOL(_spin_lock_irqsave); +#endif +#ifndef _spin_lock_irq  void __lockfunc _spin_lock_irq(spinlock_t *lock)  {  	__spin_lock_irq(lock);  }  EXPORT_SYMBOL(_spin_lock_irq); +#endif +#ifndef _spin_lock_bh  void __lockfunc _spin_lock_bh(spinlock_t *lock)  {  	__spin_lock_bh(lock);  }  EXPORT_SYMBOL(_spin_lock_bh); +#endif +#ifndef _read_lock_irqsave  unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)  {  	return __read_lock_irqsave(lock);  }  EXPORT_SYMBOL(_read_lock_irqsave); +#endif +#ifndef _read_lock_irq  void __lockfunc _read_lock_irq(rwlock_t *lock)  {  	__read_lock_irq(lock);  }  EXPORT_SYMBOL(_read_lock_irq); +#endif +#ifndef _read_lock_bh  void __lockfunc _read_lock_bh(rwlock_t *lock)  {  	__read_lock_bh(lock);  }  EXPORT_SYMBOL(_read_lock_bh); +#endif +#ifndef _write_lock_irqsave  unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)  {  	return __write_lock_irqsave(lock);  }  EXPORT_SYMBOL(_write_lock_irqsave); +#endif +#ifndef _write_lock_irq  void __lockfunc _write_lock_irq(rwlock_t *lock)  {  	__write_lock_irq(lock);  }  EXPORT_SYMBOL(_write_lock_irq); +#endif +#ifndef _write_lock_bh  void __lockfunc _write_lock_bh(rwlock_t *lock)  {  	__write_lock_bh(lock);  }  EXPORT_SYMBOL(_write_lock_bh); +#endif +#ifndef _spin_lock  void __lockfunc _spin_lock(spinlock_t *lock)  {  	__spin_lock(lock);  }  EXPORT_SYMBOL(_spin_lock); +#endif +#ifndef _write_lock  void __lockfunc _write_lock(rwlock_t *lock)  {  	__write_lock(lock);  }  EXPORT_SYMBOL(_write_lock); +#endif  #else /* CONFIG_PREEMPT: */ @@ -242,83 +272,109 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);  #endif +#ifndef _spin_unlock  void __lockfunc _spin_unlock(spinlock_t *lock)  {  	__spin_unlock(lock);  }  EXPORT_SYMBOL(_spin_unlock); +#endif +#ifndef _write_unlock  void __lockfunc _write_unlock(rwlock_t *lock)  {  	__write_unlock(lock);  }  EXPORT_SYMBOL(_write_unlock); +#endif +#ifndef _read_unlock  void __lockfunc _read_unlock(rwlock_t *lock)  {  	__read_unlock(lock);  }  EXPORT_SYMBOL(_read_unlock); +#endif +#ifndef _spin_unlock_irqrestore  void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)  {  	__spin_unlock_irqrestore(lock, flags);  }  EXPORT_SYMBOL(_spin_unlock_irqrestore); +#endif +#ifndef _spin_unlock_irq  void __lockfunc _spin_unlock_irq(spinlock_t *lock)  {  	__spin_unlock_irq(lock);  }  EXPORT_SYMBOL(_spin_unlock_irq); +#endif +#ifndef _spin_unlock_bh  void __lockfunc _spin_unlock_bh(spinlock_t *lock)  {  	__spin_unlock_bh(lock);  }  EXPORT_SYMBOL(_spin_unlock_bh); +#endif +#ifndef _read_unlock_irqrestore  void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  {  	__read_unlock_irqrestore(lock, flags);  }  EXPORT_SYMBOL(_read_unlock_irqrestore); +#endif +#ifndef _read_unlock_irq  void __lockfunc _read_unlock_irq(rwlock_t *lock)  {  	__read_unlock_irq(lock);  }  EXPORT_SYMBOL(_read_unlock_irq); +#endif +#ifndef _read_unlock_bh  void __lockfunc _read_unlock_bh(rwlock_t *lock)  {  	__read_unlock_bh(lock);  }  EXPORT_SYMBOL(_read_unlock_bh); +#endif +#ifndef _write_unlock_irqrestore  void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  {  	__write_unlock_irqrestore(lock, flags);  }  EXPORT_SYMBOL(_write_unlock_irqrestore); +#endif +#ifndef _write_unlock_irq  void __lockfunc _write_unlock_irq(rwlock_t *lock)  {  	__write_unlock_irq(lock);  }  EXPORT_SYMBOL(_write_unlock_irq); +#endif +#ifndef _write_unlock_bh  void __lockfunc _write_unlock_bh(rwlock_t *lock)  {  	__write_unlock_bh(lock);  }  EXPORT_SYMBOL(_write_unlock_bh); +#endif +#ifndef _spin_trylock_bh  int __lockfunc _spin_trylock_bh(spinlock_t *lock)  {  	return __spin_trylock_bh(lock);  }  EXPORT_SYMBOL(_spin_trylock_bh); +#endif  notrace int in_lock_functions(unsigned long addr)  { |