diff options
Diffstat (limited to 'include/linux/percpu-rwsem.h')
| -rw-r--r-- | include/linux/percpu-rwsem.h | 91 | 
1 files changed, 21 insertions, 70 deletions
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 250a4acddb2..3e88c9a7d57 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -1,83 +1,34 @@  #ifndef _LINUX_PERCPU_RWSEM_H  #define _LINUX_PERCPU_RWSEM_H -#include <linux/mutex.h> +#include <linux/atomic.h> +#include <linux/rwsem.h>  #include <linux/percpu.h> -#include <linux/rcupdate.h> -#include <linux/delay.h> +#include <linux/wait.h> +#include <linux/lockdep.h>  struct percpu_rw_semaphore { -	unsigned __percpu *counters; -	bool locked; -	struct mutex mtx; +	unsigned int __percpu	*fast_read_ctr; +	atomic_t		write_ctr; +	struct rw_semaphore	rw_sem; +	atomic_t		slow_read_ctr; +	wait_queue_head_t	write_waitq;  }; -#define light_mb()	barrier() -#define heavy_mb()	synchronize_sched() +extern void percpu_down_read(struct percpu_rw_semaphore *); +extern void percpu_up_read(struct percpu_rw_semaphore *); -static inline void percpu_down_read(struct percpu_rw_semaphore *p) -{ -	rcu_read_lock_sched(); -	if (unlikely(p->locked)) { -		rcu_read_unlock_sched(); -		mutex_lock(&p->mtx); -		this_cpu_inc(*p->counters); -		mutex_unlock(&p->mtx); -		return; -	} -	this_cpu_inc(*p->counters); -	rcu_read_unlock_sched(); -	light_mb(); /* A, between read of p->locked and read of data, paired with D */ -} +extern void percpu_down_write(struct percpu_rw_semaphore *); +extern void percpu_up_write(struct percpu_rw_semaphore *); -static inline void percpu_up_read(struct percpu_rw_semaphore *p) -{ -	light_mb(); /* B, between read of the data and write to p->counter, paired with C */ -	this_cpu_dec(*p->counters); -} +extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, +				const char *, struct lock_class_key *); +extern void percpu_free_rwsem(struct percpu_rw_semaphore *); -static inline unsigned __percpu_count(unsigned __percpu *counters) -{ -	unsigned total = 0; -	int cpu; - -	for_each_possible_cpu(cpu) -		total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu)); - -	return total; -} - -static inline void percpu_down_write(struct percpu_rw_semaphore *p) -{ -	mutex_lock(&p->mtx); -	p->locked = true; -	synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */ -	while (__percpu_count(p->counters)) -		msleep(1); -	heavy_mb(); /* C, between read of p->counter and write to data, paired with B */ -} - -static inline void percpu_up_write(struct percpu_rw_semaphore *p) -{ -	heavy_mb(); /* D, between write to data and write to p->locked, paired with A */ -	p->locked = false; -	mutex_unlock(&p->mtx); -} - -static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p) -{ -	p->counters = alloc_percpu(unsigned); -	if (unlikely(!p->counters)) -		return -ENOMEM; -	p->locked = false; -	mutex_init(&p->mtx); -	return 0; -} - -static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p) -{ -	free_percpu(p->counters); -	p->counters = NULL; /* catch use after free bugs */ -} +#define percpu_init_rwsem(brw)	\ +({								\ +	static struct lock_class_key rwsem_key;			\ +	__percpu_init_rwsem(brw, #brw, &rwsem_key);		\ +})  #endif  |