diff options
| author | Ingo Molnar <mingo@elte.hu> | 2006-07-03 00:24:54 -0700 | 
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-03 15:27:04 -0700 | 
| commit | 8a25d5debff2daee280e83e09d8c25d67c26a972 (patch) | |
| tree | 3bccfef9acb66fc62863bfd6c16493c5e8c8e394 | |
| parent | 4ea2176dfa714882e88180b474e4cbcd888b70af (diff) | |
| download | olio-linux-3.10-8a25d5debff2daee280e83e09d8c25d67c26a972.tar.xz olio-linux-3.10-8a25d5debff2daee280e83e09d8c25d67c26a972.zip  | |
[PATCH] lockdep: prove spinlock rwlock locking correctness
Use the lock validator framework to prove spinlock and rwlock locking
correctness.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
| -rw-r--r-- | include/asm-i386/spinlock.h | 7 | ||||
| -rw-r--r-- | include/linux/spinlock.h | 63 | ||||
| -rw-r--r-- | include/linux/spinlock_api_smp.h | 2 | ||||
| -rw-r--r-- | include/linux/spinlock_api_up.h | 1 | ||||
| -rw-r--r-- | include/linux/spinlock_types.h | 32 | ||||
| -rw-r--r-- | include/linux/spinlock_types_up.h | 9 | ||||
| -rw-r--r-- | include/linux/spinlock_up.h | 1 | ||||
| -rw-r--r-- | kernel/Makefile | 1 | ||||
| -rw-r--r-- | kernel/sched.c | 10 | ||||
| -rw-r--r-- | kernel/spinlock.c | 79 | ||||
| -rw-r--r-- | lib/kernel_lock.c | 7 | ||||
| -rw-r--r-- | lib/spinlock_debug.c | 36 | ||||
| -rw-r--r-- | net/ipv4/route.c | 3 | 
13 files changed, 217 insertions, 34 deletions
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index 7e29b51bcaa..87c40f83065 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h @@ -68,6 +68,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)  		"=m" (lock->slock) : : "memory");  } +/* + * It is easier for the lock validator if interrupts are not re-enabled + * in the middle of a lock-acquire. This is a performance feature anyway + * so we turn it off: + */ +#ifndef CONFIG_PROVE_LOCKING  static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)  {  	alternative_smp( @@ -75,6 +81,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla  		__raw_spin_lock_string_up,  		"=m" (lock->slock) : "r" (flags) : "memory");  } +#endif  static inline int __raw_spin_trylock(raw_spinlock_t *lock)  { diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index ae23beef9cc..31473db92d3 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -82,14 +82,40 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);  /*   * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):   */ -#if defined(CONFIG_SMP) +#ifdef CONFIG_SMP  # include <asm/spinlock.h>  #else  # include <linux/spinlock_up.h>  #endif -#define spin_lock_init(lock)	do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) -#define rwlock_init(lock)	do { *(lock) = RW_LOCK_UNLOCKED; } while (0) +#ifdef CONFIG_DEBUG_SPINLOCK +  extern void __spin_lock_init(spinlock_t *lock, const char *name, +			       struct lock_class_key *key); +# define spin_lock_init(lock)					\ +do {								\ +	static struct lock_class_key __key;			\ +								\ +	__spin_lock_init((lock), #lock, &__key);		\ +} while (0) + +#else +# define spin_lock_init(lock)					\ +	do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +  extern void __rwlock_init(rwlock_t *lock, const char *name, +			    struct lock_class_key *key); +# define rwlock_init(lock)					\ +do {								\ +	static struct lock_class_key __key;			\ +								\ +	__rwlock_init((lock), #lock, &__key);			\ +} while (0) +#else +# define rwlock_init(lock)					\ +	do { *(lock) = RW_LOCK_UNLOCKED; } while (0) +#endif  #define spin_is_locked(lock)	__raw_spin_is_locked(&(lock)->raw_lock) @@ -113,7 +139,6 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);  #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)   extern int _raw_spin_trylock(spinlock_t *lock);   extern void _raw_spin_unlock(spinlock_t *lock); -   extern void _raw_read_lock(rwlock_t *lock);   extern int _raw_read_trylock(rwlock_t *lock);   extern void _raw_read_unlock(rwlock_t *lock); @@ -121,17 +146,17 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);   extern int _raw_write_trylock(rwlock_t *lock);   extern void _raw_write_unlock(rwlock_t *lock);  #else -# define _raw_spin_unlock(lock)		__raw_spin_unlock(&(lock)->raw_lock) -# define _raw_spin_trylock(lock)	__raw_spin_trylock(&(lock)->raw_lock)  # define _raw_spin_lock(lock)		__raw_spin_lock(&(lock)->raw_lock)  # define _raw_spin_lock_flags(lock, flags) \  		__raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) +# define _raw_spin_trylock(lock)	__raw_spin_trylock(&(lock)->raw_lock) +# define _raw_spin_unlock(lock)		__raw_spin_unlock(&(lock)->raw_lock)  # define _raw_read_lock(rwlock)		__raw_read_lock(&(rwlock)->raw_lock) -# define _raw_write_lock(rwlock)	__raw_write_lock(&(rwlock)->raw_lock) -# define _raw_read_unlock(rwlock)	__raw_read_unlock(&(rwlock)->raw_lock) -# define _raw_write_unlock(rwlock)	__raw_write_unlock(&(rwlock)->raw_lock)  # define _raw_read_trylock(rwlock)	__raw_read_trylock(&(rwlock)->raw_lock) +# define _raw_read_unlock(rwlock)	__raw_read_unlock(&(rwlock)->raw_lock) +# define _raw_write_lock(rwlock)	__raw_write_lock(&(rwlock)->raw_lock)  # define _raw_write_trylock(rwlock)	__raw_write_trylock(&(rwlock)->raw_lock) +# define _raw_write_unlock(rwlock)	__raw_write_unlock(&(rwlock)->raw_lock)  #endif  #define read_can_lock(rwlock)		__raw_read_can_lock(&(rwlock)->raw_lock) @@ -147,6 +172,13 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);  #define write_trylock(lock)		__cond_lock(_write_trylock(lock))  #define spin_lock(lock)			_spin_lock(lock) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) +#else +# define spin_lock_nested(lock, subclass) _spin_lock(lock) +#endif +  #define write_lock(lock)		_write_lock(lock)  #define read_lock(lock)			_read_lock(lock) @@ -172,21 +204,18 @@ extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);  /*   * We inline the unlock functions in the nondebug case:   */ -#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) +#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \ +	!defined(CONFIG_SMP)  # define spin_unlock(lock)		_spin_unlock(lock)  # define read_unlock(lock)		_read_unlock(lock)  # define write_unlock(lock)		_write_unlock(lock) -#else -# define spin_unlock(lock)		__raw_spin_unlock(&(lock)->raw_lock) -# define read_unlock(lock)		__raw_read_unlock(&(lock)->raw_lock) -# define write_unlock(lock)		__raw_write_unlock(&(lock)->raw_lock) -#endif - -#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP)  # define spin_unlock_irq(lock)		_spin_unlock_irq(lock)  # define read_unlock_irq(lock)		_read_unlock_irq(lock)  # define write_unlock_irq(lock)		_write_unlock_irq(lock)  #else +# define spin_unlock(lock)		__raw_spin_unlock(&(lock)->raw_lock) +# define read_unlock(lock)		__raw_read_unlock(&(lock)->raw_lock) +# define write_unlock(lock)		__raw_write_unlock(&(lock)->raw_lock)  # define spin_unlock_irq(lock) \      do { __raw_spin_unlock(&(lock)->raw_lock); local_irq_enable(); } while (0)  # define read_unlock_irq(lock) \ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 78e6989ffb5..b2c4f829946 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -20,6 +20,8 @@ int in_lock_functions(unsigned long addr);  #define assert_spin_locked(x)	BUG_ON(!spin_is_locked(x))  void __lockfunc _spin_lock(spinlock_t *lock)		__acquires(spinlock_t); +void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) +							__acquires(spinlock_t);  void __lockfunc _read_lock(rwlock_t *lock)		__acquires(rwlock_t);  void __lockfunc _write_lock(rwlock_t *lock)		__acquires(rwlock_t);  void __lockfunc _spin_lock_bh(spinlock_t *lock)		__acquires(spinlock_t); diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index cd81cee566f..67faa044c5f 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h @@ -49,6 +49,7 @@    do { local_irq_restore(flags); __UNLOCK(lock); } while (0)  #define _spin_lock(lock)			__LOCK(lock) +#define _spin_lock_nested(lock, subclass)	__LOCK(lock)  #define _read_lock(lock)			__LOCK(lock)  #define _write_lock(lock)			__LOCK(lock)  #define _spin_lock_bh(lock)			__LOCK_BH(lock) diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index f5d4ed7bc78..dc5fb69e4de 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,6 +9,8 @@   * Released under the General Public License (GPL).   */ +#include <linux/lockdep.h> +  #if defined(CONFIG_SMP)  # include <asm/spinlock_types.h>  #else @@ -24,6 +26,9 @@ typedef struct {  	unsigned int magic, owner_cpu;  	void *owner;  #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	struct lockdep_map dep_map; +#endif  } spinlock_t;  #define SPINLOCK_MAGIC		0xdead4ead @@ -37,28 +42,47 @@ typedef struct {  	unsigned int magic, owner_cpu;  	void *owner;  #endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	struct lockdep_map dep_map; +#endif  } rwlock_t;  #define RWLOCK_MAGIC		0xdeaf1eed  #define SPINLOCK_OWNER_INIT	((void *)-1L) +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SPIN_DEP_MAP_INIT(lockname)	.dep_map = { .name = #lockname } +#else +# define SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname)	.dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif +  #ifdef CONFIG_DEBUG_SPINLOCK  # define __SPIN_LOCK_UNLOCKED(lockname)					\  	(spinlock_t)	{	.raw_lock = __RAW_SPIN_LOCK_UNLOCKED,	\  				.magic = SPINLOCK_MAGIC,		\  				.owner = SPINLOCK_OWNER_INIT,		\ -				.owner_cpu = -1 } +				.owner_cpu = -1,			\ +				SPIN_DEP_MAP_INIT(lockname) }  #define __RW_LOCK_UNLOCKED(lockname)					\  	(rwlock_t)	{	.raw_lock = __RAW_RW_LOCK_UNLOCKED,	\  				.magic = RWLOCK_MAGIC,			\  				.owner = SPINLOCK_OWNER_INIT,		\ -				.owner_cpu = -1 } +				.owner_cpu = -1,			\ +				RW_DEP_MAP_INIT(lockname) }  #else  # define __SPIN_LOCK_UNLOCKED(lockname) \ -	(spinlock_t)	{	.raw_lock = __RAW_SPIN_LOCK_UNLOCKED } +	(spinlock_t)	{	.raw_lock = __RAW_SPIN_LOCK_UNLOCKED,	\ +				SPIN_DEP_MAP_INIT(lockname) }  #define __RW_LOCK_UNLOCKED(lockname) \ -	(rwlock_t)	{	.raw_lock = __RAW_RW_LOCK_UNLOCKED } +	(rwlock_t)	{	.raw_lock = __RAW_RW_LOCK_UNLOCKED,	\ +				RW_DEP_MAP_INIT(lockname) }  #endif  #define SPIN_LOCK_UNLOCKED	__SPIN_LOCK_UNLOCKED(old_style_spin_init) diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 04135b0e198..27644af20b7 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -12,10 +12,14 @@   * Released under the General Public License (GPL).   */ -#ifdef CONFIG_DEBUG_SPINLOCK +#if defined(CONFIG_DEBUG_SPINLOCK) || \ +	defined(CONFIG_DEBUG_LOCK_ALLOC)  typedef struct {  	volatile unsigned int slock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	struct lockdep_map dep_map; +#endif  } raw_spinlock_t;  #define __RAW_SPIN_LOCK_UNLOCKED { 1 } @@ -30,6 +34,9 @@ typedef struct { } raw_spinlock_t;  typedef struct {  	/* no debug version on UP */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	struct lockdep_map dep_map; +#endif  } raw_rwlock_t;  #define __RAW_RW_LOCK_UNLOCKED { } diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 31accf2f0b1..ea54c4c9a4e 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -18,7 +18,6 @@   */  #ifdef CONFIG_DEBUG_SPINLOCK -  #define __raw_spin_is_locked(x)		((x)->slock == 0)  static inline void __raw_spin_lock(raw_spinlock_t *lock) diff --git a/kernel/Makefile b/kernel/Makefile index df6ef326369..47dbcd570cd 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -27,6 +27,7 @@ obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o  obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o  obj-$(CONFIG_SMP) += cpu.o spinlock.o  obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o +obj-$(CONFIG_PROVE_LOCKING) += spinlock.o  obj-$(CONFIG_UID16) += uid16.o  obj-$(CONFIG_MODULES) += module.o  obj-$(CONFIG_KALLSYMS) += kallsyms.o diff --git a/kernel/sched.c b/kernel/sched.c index 91182996653..ae4db0185bb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -308,6 +308,13 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)  	/* this is a valid case when another task releases the spinlock */  	rq->lock.owner = current;  #endif +	/* +	 * If we are tracking spinlock dependencies then we have to +	 * fix up the runqueue lock - which gets 'carried over' from +	 * prev into current: +	 */ +	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); +  	spin_unlock_irq(&rq->lock);  } @@ -1778,6 +1785,7 @@ task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)  		WARN_ON(rq->prev_mm);  		rq->prev_mm = oldmm;  	} +	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);  	/* Here we just switch the register state and the stack. */  	switch_to(prev, next, prev); @@ -4384,6 +4392,7 @@ asmlinkage long sys_sched_yield(void)  	 * no need to preempt or enable interrupts:  	 */  	__release(rq->lock); +	spin_release(&rq->lock.dep_map, 1, _THIS_IP_);  	_raw_spin_unlock(&rq->lock);  	preempt_enable_no_resched(); @@ -4447,6 +4456,7 @@ int cond_resched_lock(spinlock_t *lock)  		spin_lock(lock);  	}  	if (need_resched() && __resched_legal()) { +		spin_release(&lock->dep_map, 1, _THIS_IP_);  		_raw_spin_unlock(lock);  		preempt_enable_no_resched();  		__cond_resched(); diff --git a/kernel/spinlock.c b/kernel/spinlock.c index b31e54eadf5..bfd6ad9c033 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c @@ -13,6 +13,7 @@  #include <linux/preempt.h>  #include <linux/spinlock.h>  #include <linux/interrupt.h> +#include <linux/debug_locks.h>  #include <linux/module.h>  /* @@ -29,8 +30,10 @@ EXPORT_SYMBOL(generic__raw_read_trylock);  int __lockfunc _spin_trylock(spinlock_t *lock)  {  	preempt_disable(); -	if (_raw_spin_trylock(lock)) +	if (_raw_spin_trylock(lock)) { +		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);  		return 1; +	}  	preempt_enable();  	return 0; @@ -40,8 +43,10 @@ EXPORT_SYMBOL(_spin_trylock);  int __lockfunc _read_trylock(rwlock_t *lock)  {  	preempt_disable(); -	if (_raw_read_trylock(lock)) +	if (_raw_read_trylock(lock)) { +		rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);  		return 1; +	}  	preempt_enable();  	return 0; @@ -51,19 +56,28 @@ EXPORT_SYMBOL(_read_trylock);  int __lockfunc _write_trylock(rwlock_t *lock)  {  	preempt_disable(); -	if (_raw_write_trylock(lock)) +	if (_raw_write_trylock(lock)) { +		rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);  		return 1; +	}  	preempt_enable();  	return 0;  }  EXPORT_SYMBOL(_write_trylock); -#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) +/* + * If lockdep is enabled then we use the non-preemption spin-ops + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are + * not re-enabled during lock-acquire (which the preempt-spin-ops do): + */ +#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \ +	defined(CONFIG_PROVE_LOCKING)  void __lockfunc _read_lock(rwlock_t *lock)  {  	preempt_disable(); +	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);  	_raw_read_lock(lock);  }  EXPORT_SYMBOL(_read_lock); @@ -74,7 +88,17 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)  	local_irq_save(flags);  	preempt_disable(); +	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); +	/* +	 * On lockdep we dont want the hand-coded irq-enable of +	 * _raw_spin_lock_flags() code, because lockdep assumes +	 * that interrupts are not re-enabled during lock-acquire: +	 */ +#ifdef CONFIG_PROVE_LOCKING +	_raw_spin_lock(lock); +#else  	_raw_spin_lock_flags(lock, &flags); +#endif  	return flags;  }  EXPORT_SYMBOL(_spin_lock_irqsave); @@ -83,6 +107,7 @@ void __lockfunc _spin_lock_irq(spinlock_t *lock)  {  	local_irq_disable();  	preempt_disable(); +	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);  	_raw_spin_lock(lock);  }  EXPORT_SYMBOL(_spin_lock_irq); @@ -91,6 +116,7 @@ void __lockfunc _spin_lock_bh(spinlock_t *lock)  {  	local_bh_disable();  	preempt_disable(); +	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);  	_raw_spin_lock(lock);  }  EXPORT_SYMBOL(_spin_lock_bh); @@ -101,6 +127,7 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)  	local_irq_save(flags);  	preempt_disable(); +	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);  	_raw_read_lock(lock);  	return flags;  } @@ -110,6 +137,7 @@ void __lockfunc _read_lock_irq(rwlock_t *lock)  {  	local_irq_disable();  	preempt_disable(); +	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);  	_raw_read_lock(lock);  }  EXPORT_SYMBOL(_read_lock_irq); @@ -118,6 +146,7 @@ void __lockfunc _read_lock_bh(rwlock_t *lock)  {  	local_bh_disable();  	preempt_disable(); +	rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);  	_raw_read_lock(lock);  }  EXPORT_SYMBOL(_read_lock_bh); @@ -128,6 +157,7 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)  	local_irq_save(flags);  	preempt_disable(); +	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);  	_raw_write_lock(lock);  	return flags;  } @@ -137,6 +167,7 @@ void __lockfunc _write_lock_irq(rwlock_t *lock)  {  	local_irq_disable();  	preempt_disable(); +	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);  	_raw_write_lock(lock);  }  EXPORT_SYMBOL(_write_lock_irq); @@ -145,6 +176,7 @@ void __lockfunc _write_lock_bh(rwlock_t *lock)  {  	local_bh_disable();  	preempt_disable(); +	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);  	_raw_write_lock(lock);  }  EXPORT_SYMBOL(_write_lock_bh); @@ -152,6 +184,7 @@ EXPORT_SYMBOL(_write_lock_bh);  void __lockfunc _spin_lock(spinlock_t *lock)  {  	preempt_disable(); +	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);  	_raw_spin_lock(lock);  } @@ -160,6 +193,7 @@ EXPORT_SYMBOL(_spin_lock);  void __lockfunc _write_lock(rwlock_t *lock)  {  	preempt_disable(); +	rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);  	_raw_write_lock(lock);  } @@ -255,8 +289,22 @@ BUILD_LOCK_OPS(write, rwlock);  #endif /* CONFIG_PREEMPT */ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + +void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) +{ +	preempt_disable(); +	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); +	_raw_spin_lock(lock); +} + +EXPORT_SYMBOL(_spin_lock_nested); + +#endif +  void __lockfunc _spin_unlock(spinlock_t *lock)  { +	spin_release(&lock->dep_map, 1, _RET_IP_);  	_raw_spin_unlock(lock);  	preempt_enable();  } @@ -264,6 +312,7 @@ EXPORT_SYMBOL(_spin_unlock);  void __lockfunc _write_unlock(rwlock_t *lock)  { +	rwlock_release(&lock->dep_map, 1, _RET_IP_);  	_raw_write_unlock(lock);  	preempt_enable();  } @@ -271,6 +320,7 @@ EXPORT_SYMBOL(_write_unlock);  void __lockfunc _read_unlock(rwlock_t *lock)  { +	rwlock_release(&lock->dep_map, 1, _RET_IP_);  	_raw_read_unlock(lock);  	preempt_enable();  } @@ -278,6 +328,7 @@ EXPORT_SYMBOL(_read_unlock);  void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)  { +	spin_release(&lock->dep_map, 1, _RET_IP_);  	_raw_spin_unlock(lock);  	local_irq_restore(flags);  	preempt_enable(); @@ -286,6 +337,7 @@ EXPORT_SYMBOL(_spin_unlock_irqrestore);  void __lockfunc _spin_unlock_irq(spinlock_t *lock)  { +	spin_release(&lock->dep_map, 1, _RET_IP_);  	_raw_spin_unlock(lock);  	local_irq_enable();  	preempt_enable(); @@ -294,14 +346,16 @@ EXPORT_SYMBOL(_spin_unlock_irq);  void __lockfunc _spin_unlock_bh(spinlock_t *lock)  { +	spin_release(&lock->dep_map, 1, _RET_IP_);  	_raw_spin_unlock(lock);  	preempt_enable_no_resched(); -	local_bh_enable(); +	local_bh_enable_ip((unsigned long)__builtin_return_address(0));  }  EXPORT_SYMBOL(_spin_unlock_bh);  void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  { +	rwlock_release(&lock->dep_map, 1, _RET_IP_);  	_raw_read_unlock(lock);  	local_irq_restore(flags);  	preempt_enable(); @@ -310,6 +364,7 @@ EXPORT_SYMBOL(_read_unlock_irqrestore);  void __lockfunc _read_unlock_irq(rwlock_t *lock)  { +	rwlock_release(&lock->dep_map, 1, _RET_IP_);  	_raw_read_unlock(lock);  	local_irq_enable();  	preempt_enable(); @@ -318,14 +373,16 @@ EXPORT_SYMBOL(_read_unlock_irq);  void __lockfunc _read_unlock_bh(rwlock_t *lock)  { +	rwlock_release(&lock->dep_map, 1, _RET_IP_);  	_raw_read_unlock(lock);  	preempt_enable_no_resched(); -	local_bh_enable(); +	local_bh_enable_ip((unsigned long)__builtin_return_address(0));  }  EXPORT_SYMBOL(_read_unlock_bh);  void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)  { +	rwlock_release(&lock->dep_map, 1, _RET_IP_);  	_raw_write_unlock(lock);  	local_irq_restore(flags);  	preempt_enable(); @@ -334,6 +391,7 @@ EXPORT_SYMBOL(_write_unlock_irqrestore);  void __lockfunc _write_unlock_irq(rwlock_t *lock)  { +	rwlock_release(&lock->dep_map, 1, _RET_IP_);  	_raw_write_unlock(lock);  	local_irq_enable();  	preempt_enable(); @@ -342,9 +400,10 @@ EXPORT_SYMBOL(_write_unlock_irq);  void __lockfunc _write_unlock_bh(rwlock_t *lock)  { +	rwlock_release(&lock->dep_map, 1, _RET_IP_);  	_raw_write_unlock(lock);  	preempt_enable_no_resched(); -	local_bh_enable(); +	local_bh_enable_ip((unsigned long)__builtin_return_address(0));  }  EXPORT_SYMBOL(_write_unlock_bh); @@ -352,11 +411,13 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock)  {  	local_bh_disable();  	preempt_disable(); -	if (_raw_spin_trylock(lock)) +	if (_raw_spin_trylock(lock)) { +		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);  		return 1; +	}  	preempt_enable_no_resched(); -	local_bh_enable(); +	local_bh_enable_ip((unsigned long)__builtin_return_address(0));  	return 0;  }  EXPORT_SYMBOL(_spin_trylock_bh); diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index e713e86811a..e0fdfddb406 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -177,7 +177,12 @@ static inline void __lock_kernel(void)  static inline void __unlock_kernel(void)  { -	spin_unlock(&kernel_flag); +	/* +	 * the BKL is not covered by lockdep, so we open-code the +	 * unlocking sequence (and thus avoid the dep-chain ops): +	 */ +	_raw_spin_unlock(&kernel_flag); +	preempt_enable();  }  /* diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index 3de2ccf48ac..3d9c4dc965e 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c @@ -12,6 +12,42 @@  #include <linux/delay.h>  #include <linux/module.h> +void __spin_lock_init(spinlock_t *lock, const char *name, +		      struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	/* +	 * Make sure we are not reinitializing a held lock: +	 */ +	debug_check_no_locks_freed((void *)lock, sizeof(*lock)); +	lockdep_init_map(&lock->dep_map, name, key); +#endif +	lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; +	lock->magic = SPINLOCK_MAGIC; +	lock->owner = SPINLOCK_OWNER_INIT; +	lock->owner_cpu = -1; +} + +EXPORT_SYMBOL(__spin_lock_init); + +void __rwlock_init(rwlock_t *lock, const char *name, +		   struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC +	/* +	 * Make sure we are not reinitializing a held lock: +	 */ +	debug_check_no_locks_freed((void *)lock, sizeof(*lock)); +	lockdep_init_map(&lock->dep_map, name, key); +#endif +	lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; +	lock->magic = RWLOCK_MAGIC; +	lock->owner = SPINLOCK_OWNER_INIT; +	lock->owner_cpu = -1; +} + +EXPORT_SYMBOL(__rwlock_init); +  static void spin_bug(spinlock_t *lock, const char *msg)  {  	struct task_struct *owner = NULL; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index da44fabf4dc..283a72247e5 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -205,7 +205,8 @@ __u8 ip_tos2prio[16] = {  struct rt_hash_bucket {  	struct rtable	*chain;  }; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ +	defined(CONFIG_PROVE_LOCKING)  /*   * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks   * The size of this table is a power of two and depends on the number of CPUS.  |