diff options
| author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2009-08-31 14:43:36 +0200 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-08-31 18:08:50 +0200 | 
| commit | 69d0ee7377eef808e34ba5542b554ec97244b871 (patch) | |
| tree | f46c756b897cf51497fda2ad22f9f12a3512e23b /kernel/spinlock.c | |
| parent | 0ee000e5e8fa2e5c760250be0d78d5906e3eb94b (diff) | |
| download | olio-linux-3.10-69d0ee7377eef808e34ba5542b554ec97244b871.tar.xz olio-linux-3.10-69d0ee7377eef808e34ba5542b554ec97244b871.zip  | |
locking: Move spinlock function bodies to header file
Move spinlock function bodies to header file by creating a
static inline version of each variant. Use the inline version
on the out-of-line code.
This shouldn't make any difference besides that the spinlock
code can now be used to generate inlined spinlock code.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Horst Hartmann <horsth@linux.vnet.ibm.com>
Cc: Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: David Miller <davem@davemloft.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Roman Zippel <zippel@linux-m68k.org>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <20090831124417.859022429@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/spinlock.c')
| -rw-r--r-- | kernel/spinlock.c | 174 | 
1 files changed, 28 insertions, 146 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 7932653c4eb..2c000f5c070 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -23,40 +23,19 @@  int __lockfunc _spin_trylock(spinlock_t *lock)  { -	preempt_disable(); -	if (_raw_spin_trylock(lock)) { -		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -		return 1; -	} -	 -	preempt_enable(); -	return 0; +	return __spin_trylock(lock);  }  EXPORT_SYMBOL(_spin_trylock);  int __lockfunc _read_trylock(rwlock_t *lock)  { -	preempt_disable(); -	if (_raw_read_trylock(lock)) { -		rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); -		return 1; -	} - -	preempt_enable(); -	return 0; +	return __read_trylock(lock);  }  EXPORT_SYMBOL(_read_trylock);  int __lockfunc _write_trylock(rwlock_t *lock)  { -	preempt_disable(); -	if (_raw_write_trylock(lock)) { -		rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); -		return 1; -	} - -	preempt_enable(); -	return 0; +	return __write_trylock(lock);  }  EXPORT_SYMBOL(_write_trylock); @@ -69,129 +48,74 @@ EXPORT_SYMBOL(_write_trylock);  void __lockfunc _read_lock(rwlock_t *lock)  { -	preempt_disable(); -	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); -	LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +	__read_lock(lock);  }  EXPORT_SYMBOL(_read_lock);  unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)  { -	unsigned long flags; - -	local_irq_save(flags); -	preempt_disable(); -	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); -	/* -	 * On lockdep we dont want the hand-coded irq-enable of -	 * _raw_spin_lock_flags() code, because lockdep assumes -	 * that interrupts are not re-enabled during lock-acquire: -	 */ -#ifdef CONFIG_LOCKDEP -	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); -#else -	_raw_spin_lock_flags(lock, &flags); -#endif -	return flags; +	return __spin_lock_irqsave(lock);  }  EXPORT_SYMBOL(_spin_lock_irqsave);  void __lockfunc _spin_lock_irq(spinlock_t *lock)  { -	local_irq_disable(); -	preempt_disable(); -	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); -	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +	__spin_lock_irq(lock);  }  EXPORT_SYMBOL(_spin_lock_irq);  void __lockfunc _spin_lock_bh(spinlock_t *lock)  { -	local_bh_disable(); -	preempt_disable(); -	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); -	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +	__spin_lock_bh(lock);  }  EXPORT_SYMBOL(_spin_lock_bh);  unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)  { -	unsigned long flags; - -	local_irq_save(flags); -	preempt_disable(); -	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); -	LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, -			     _raw_read_lock_flags, &flags); -	return flags; +	return __read_lock_irqsave(lock);  }  EXPORT_SYMBOL(_read_lock_irqsave);  void __lockfunc _read_lock_irq(rwlock_t *lock)  { -	local_irq_disable(); -	preempt_disable(); -	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); -	LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +	__read_lock_irq(lock);  }  EXPORT_SYMBOL(_read_lock_irq);  void __lockfunc _read_lock_bh(rwlock_t *lock)  { -	local_bh_disable(); -	preempt_disable(); -	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); -	LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); +	__read_lock_bh(lock);  }  EXPORT_SYMBOL(_read_lock_bh);  unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)  { -	unsigned long flags; - -	local_irq_save(flags); -	preempt_disable(); -	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); -	LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, -			     _raw_write_lock_flags, &flags); -	return flags; +	return __write_lock_irqsave(lock);  }  EXPORT_SYMBOL(_write_lock_irqsave);  void __lockfunc _write_lock_irq(rwlock_t *lock)  { -	local_irq_disable(); -	preempt_disable(); -	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); -	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +	__write_lock_irq(lock);  }  EXPORT_SYMBOL(_write_lock_irq);  void __lockfunc _write_lock_bh(rwlock_t *lock)  { -	local_bh_disable(); -	preempt_disable(); -	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); -	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +	__write_lock_bh(lock);  }  EXPORT_SYMBOL(_write_lock_bh);  void __lockfunc _spin_lock(spinlock_t *lock)  { -	preempt_disable(); -	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); -	LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); +	__spin_lock(lock);  } -  EXPORT_SYMBOL(_spin_lock);  void __lockfunc _write_lock(rwlock_t *lock)  { -	preempt_disable(); -	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); -	LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); +	__write_lock(lock);  } -  EXPORT_SYMBOL(_write_lock);  #else /* CONFIG_PREEMPT: */ @@ -320,121 +244,79 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);  void __lockfunc _spin_unlock(spinlock_t *lock)  { -	spin_release(&lock->dep_map, 1, _RET_IP_); -	_raw_spin_unlock(lock); -	preempt_enable(); +	__spin_unlock(lock);  }  EXPORT_SYMBOL(_spin_unlock);  void __lockfunc _write_unlock(rwlock_t *lock)  { -	rwlock_release(&lock->dep_map, 1, _RET_IP_); -	_raw_write_unlock(lock); -	preempt_enable(); +	__write_unlock(lock);  }  EXPORT_SYMBOL(_write_unlock);  void __lockfunc _read_unlock(rwlock_t *lock)  { -	rwlock_release(&lock->dep_map, 1, _RET_IP_); -	_raw_read_unlock(lock); -	preempt_enable(); +	__read_unlock(lock);  }  EXPORT_SYMBOL(_read_unlock);  void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)  { -	spin_release(&lock->dep_map, 1, _RET_IP_); -	_raw_spin_unlock(lock); -	local_irq_restore(flags); -	preempt_enable(); +	__spin_unlock_irqrestore(lock, flags);  }  EXPORT_SYMBOL(_spin_unlock_irqrestore);  void __lockfunc _spin_unlock_irq(spinlock_t *lock)  { -	spin_release(&lock->dep_map, 1, _RET_IP_); -	_raw_spin_unlock(lock); -	local_irq_enable(); -	preempt_enable(); +	__spin_unlock_irq(lock);  }  EXPORT_SYMBOL(_spin_unlock_irq);  void __lockfunc _spin_unlock_bh(spinlock_t *lock)  { -	spin_release(&lock->dep_map, 1, _RET_IP_); -	_raw_spin_unlock(lock); -	preempt_enable_no_resched(); -	local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +	__spin_unlock_bh(lock);  }  EXPORT_SYMBOL(_spin_unlock_bh);  void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  { -	rwlock_release(&lock->dep_map, 1, _RET_IP_); -	_raw_read_unlock(lock); -	local_irq_restore(flags); -	preempt_enable(); +	__read_unlock_irqrestore(lock, flags);  }  EXPORT_SYMBOL(_read_unlock_irqrestore);  void __lockfunc _read_unlock_irq(rwlock_t *lock)  { -	rwlock_release(&lock->dep_map, 1, _RET_IP_); -	_raw_read_unlock(lock); -	local_irq_enable(); -	preempt_enable(); +	__read_unlock_irq(lock);  }  EXPORT_SYMBOL(_read_unlock_irq);  void __lockfunc _read_unlock_bh(rwlock_t *lock)  { -	rwlock_release(&lock->dep_map, 1, _RET_IP_); -	_raw_read_unlock(lock); -	preempt_enable_no_resched(); -	local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +	__read_unlock_bh(lock);  }  EXPORT_SYMBOL(_read_unlock_bh);  void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  { -	rwlock_release(&lock->dep_map, 1, _RET_IP_); -	_raw_write_unlock(lock); -	local_irq_restore(flags); -	preempt_enable(); +	__write_unlock_irqrestore(lock, flags);  }  EXPORT_SYMBOL(_write_unlock_irqrestore);  void __lockfunc _write_unlock_irq(rwlock_t *lock)  { -	rwlock_release(&lock->dep_map, 1, _RET_IP_); -	_raw_write_unlock(lock); -	local_irq_enable(); -	preempt_enable(); +	__write_unlock_irq(lock);  }  EXPORT_SYMBOL(_write_unlock_irq);  void __lockfunc _write_unlock_bh(rwlock_t *lock)  { -	rwlock_release(&lock->dep_map, 1, _RET_IP_); -	_raw_write_unlock(lock); -	preempt_enable_no_resched(); -	local_bh_enable_ip((unsigned long)__builtin_return_address(0)); +	__write_unlock_bh(lock);  }  EXPORT_SYMBOL(_write_unlock_bh);  int __lockfunc _spin_trylock_bh(spinlock_t *lock)  { -	local_bh_disable(); -	preempt_disable(); -	if (_raw_spin_trylock(lock)) { -		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -		return 1; -	} - -	preempt_enable_no_resched(); -	local_bh_enable_ip((unsigned long)__builtin_return_address(0)); -	return 0; +	return __spin_trylock_bh(lock);  }  EXPORT_SYMBOL(_spin_trylock_bh);  |