diff options
Diffstat (limited to 'kernel/mutex.c')
| -rw-r--r-- | kernel/mutex.c | 10 | 
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 12c779dc65d..4f45d4b658e 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -59,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);   * We also put the fastpath first in the kernel image, to make sure the   * branch is predicted by the CPU as default-untaken.   */ -static void noinline __sched +static __used noinline void __sched  __mutex_lock_slowpath(atomic_t *lock_count);  /*** @@ -96,7 +96,7 @@ void inline __sched mutex_lock(struct mutex *lock)  EXPORT_SYMBOL(mutex_lock);  #endif -static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); +static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);  /***   * mutex_unlock - release the mutex @@ -184,7 +184,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,  	}  done: -	lock_acquired(&lock->dep_map); +	lock_acquired(&lock->dep_map, ip);  	/* got the lock - rejoice! */  	mutex_remove_waiter(lock, &waiter, task_thread_info(task));  	debug_mutex_set_owner(lock, task_thread_info(task)); @@ -268,7 +268,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)  /*   * Release the lock, slowpath:   */ -static noinline void +static __used noinline void  __mutex_unlock_slowpath(atomic_t *lock_count)  {  	__mutex_unlock_common_slowpath(lock_count, 1); @@ -313,7 +313,7 @@ int __sched mutex_lock_killable(struct mutex *lock)  }  EXPORT_SYMBOL(mutex_lock_killable); -static noinline void __sched +static __used noinline void __sched  __mutex_lock_slowpath(atomic_t *lock_count)  {  	struct mutex *lock = container_of(lock_count, struct mutex, count);  |