diff options
Diffstat (limited to 'arch/s390/include/asm/atomic.h')
| -rw-r--r-- | arch/s390/include/asm/atomic.h | 205 | 
1 files changed, 146 insertions, 59 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index c7d0abfb0f0..ae7c8f9f94a 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h @@ -1,33 +1,23 @@  #ifndef __ARCH_S390_ATOMIC__  #define __ARCH_S390_ATOMIC__ -#include <linux/compiler.h> -#include <linux/types.h> -  /* - *  include/asm-s390/atomic.h + * Copyright 1999,2009 IBM Corp. + * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, + *	      Denis Joseph Barrow, + *	      Arnd Bergmann <arndb@de.ibm.com>,   * - *  S390 version - *    Copyright (C) 1999-2005 IBM Deutschland Entwicklung GmbH, IBM Corporation - *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), - *               Denis Joseph Barrow, - *		 Arnd Bergmann (arndb@de.ibm.com) - * - *  Derived from "include/asm-i386/bitops.h" - *    Copyright (C) 1992, Linus Torvalds + * Atomic operations that C can't guarantee us. + * Useful for resource counting etc. + * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.   *   */ -/* - * Atomic operations that C can't guarantee us.  Useful for - * resource counting etc.. - * S390 uses 'Compare And Swap' for atomicity in SMP enviroment - */ +#include <linux/compiler.h> +#include <linux/types.h>  #define ATOMIC_INIT(i)  { (i) } -#ifdef __KERNEL__ -  #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)  #define __CS_LOOP(ptr, op_val, op_string) ({				\ @@ -77,7 +67,7 @@ static inline void atomic_set(atomic_t *v, int i)  	barrier();  } -static __inline__ int atomic_add_return(int i, atomic_t * v) +static inline int atomic_add_return(int i, atomic_t *v)  {  	return __CS_LOOP(v, i, "ar");  } @@ -87,7 +77,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)  #define atomic_inc_return(_v)		atomic_add_return(1, _v)  #define atomic_inc_and_test(_v)		(atomic_add_return(1, _v) == 0) -static __inline__ int atomic_sub_return(int i, atomic_t * v) +static inline int atomic_sub_return(int i, atomic_t *v)  {  	return __CS_LOOP(v, i, "sr");  } @@ -97,19 +87,19 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)  #define atomic_dec_return(_v)		atomic_sub_return(1, _v)  #define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0) -static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v) +static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)  { -	       __CS_LOOP(v, ~mask, "nr"); +	__CS_LOOP(v, ~mask, "nr");  } -static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v) +static inline void atomic_set_mask(unsigned long mask, atomic_t *v)  { -	       __CS_LOOP(v, mask, "or"); +	__CS_LOOP(v, mask, "or");  }  #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) -static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new) +static inline int atomic_cmpxchg(atomic_t *v, int old, int new)  {  #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)  	asm volatile( @@ -127,7 +117,7 @@ static __inline__ int atomic_cmpxchg(atomic_t *v, int old, int new)  	return old;  } -static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +static inline int atomic_add_unless(atomic_t *v, int a, int u)  {  	int c, old;  	c = atomic_read(v); @@ -146,9 +136,10 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)  #undef __CS_LOOP -#ifdef __s390x__  #define ATOMIC64_INIT(i)  { (i) } +#ifdef CONFIG_64BIT +  #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)  #define __CSG_LOOP(ptr, op_val, op_string) ({				\ @@ -162,7 +153,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)  		: "=&d" (old_val), "=&d" (new_val),			\  		  "=Q" (((atomic_t *)(ptr))->counter)			\  		: "d" (op_val),	"Q" (((atomic_t *)(ptr))->counter)	\ -		: "cc", "memory" );					\ +		: "cc", "memory");					\  	new_val;							\  }) @@ -180,7 +171,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)  		  "=m" (((atomic_t *)(ptr))->counter)			\  		: "a" (ptr), "d" (op_val),				\  		  "m" (((atomic_t *)(ptr))->counter)			\ -		: "cc", "memory" );					\ +		: "cc", "memory");					\  	new_val;							\  }) @@ -198,39 +189,29 @@ static inline void atomic64_set(atomic64_t *v, long long i)  	barrier();  } -static __inline__ long long atomic64_add_return(long long i, atomic64_t * v) +static inline long long atomic64_add_return(long long i, atomic64_t *v)  {  	return __CSG_LOOP(v, i, "agr");  } -#define atomic64_add(_i, _v)		atomic64_add_return(_i, _v) -#define atomic64_add_negative(_i, _v)	(atomic64_add_return(_i, _v) < 0) -#define atomic64_inc(_v)		atomic64_add_return(1, _v) -#define atomic64_inc_return(_v)		atomic64_add_return(1, _v) -#define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0) -static __inline__ long long atomic64_sub_return(long long i, atomic64_t * v) +static inline long long atomic64_sub_return(long long i, atomic64_t *v)  {  	return __CSG_LOOP(v, i, "sgr");  } -#define atomic64_sub(_i, _v)		atomic64_sub_return(_i, _v) -#define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0) -#define atomic64_dec(_v)		atomic64_sub_return(1, _v) -#define atomic64_dec_return(_v)		atomic64_sub_return(1, _v) -#define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0) -static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v) +static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)  { -	       __CSG_LOOP(v, ~mask, "ngr"); +	__CSG_LOOP(v, ~mask, "ngr");  } -static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v) +static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)  { -	       __CSG_LOOP(v, mask, "ogr"); +	__CSG_LOOP(v, mask, "ogr");  }  #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) -static __inline__ long long atomic64_cmpxchg(atomic64_t *v, +static inline long long atomic64_cmpxchg(atomic64_t *v,  					     long long old, long long new)  {  #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) @@ -249,8 +230,112 @@ static __inline__ long long atomic64_cmpxchg(atomic64_t *v,  	return old;  } -static __inline__ int atomic64_add_unless(atomic64_t *v, -					  long long a, long long u) +#undef __CSG_LOOP + +#else /* CONFIG_64BIT */ + +typedef struct { +	long long counter; +} atomic64_t; + +static inline long long atomic64_read(const atomic64_t *v) +{ +	register_pair rp; + +	asm volatile( +		"	lm	%0,%N0,0(%1)" +		: "=&d" (rp) +		: "a" (&v->counter), "m" (v->counter) +		); +	return rp.pair; +} + +static inline void atomic64_set(atomic64_t *v, long long i) +{ +	register_pair rp = {.pair = i}; + +	asm volatile( +		"	stm	%1,%N1,0(%2)" +		: "=m" (v->counter) +		: "d" (rp), "a" (&v->counter) +		); +} + +static inline long long atomic64_xchg(atomic64_t *v, long long new) +{ +	register_pair rp_new = {.pair = new}; +	register_pair rp_old; + +	asm volatile( +		"	lm	%0,%N0,0(%2)\n" +		"0:	cds	%0,%3,0(%2)\n" +		"	jl	0b\n" +		: "=&d" (rp_old), "+m" (v->counter) +		: "a" (&v->counter), "d" (rp_new) +		: "cc"); +	return rp_old.pair; +} + +static inline long long atomic64_cmpxchg(atomic64_t *v, +					 long long old, long long new) +{ +	register_pair rp_old = {.pair = old}; +	register_pair rp_new = {.pair = new}; + +	asm volatile( +		"	cds	%0,%3,0(%2)" +		: "+&d" (rp_old), "+m" (v->counter) +		: "a" (&v->counter), "d" (rp_new) +		: "cc"); +	return rp_old.pair; +} + + +static inline long long atomic64_add_return(long long i, atomic64_t *v) +{ +	long long old, new; + +	do { +		old = atomic64_read(v); +		new = old + i; +	} while (atomic64_cmpxchg(v, old, new) != old); +	return new; +} + +static inline long long atomic64_sub_return(long long i, atomic64_t *v) +{ +	long long old, new; + +	do { +		old = atomic64_read(v); +		new = old - i; +	} while (atomic64_cmpxchg(v, old, new) != old); +	return new; +} + +static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v) +{ +	long long old, new; + +	do { +		old = atomic64_read(v); +		new = old | mask; +	} while (atomic64_cmpxchg(v, old, new) != old); +} + +static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v) +{ +	long long old, new; + +	do { +		old = atomic64_read(v); +		new = old & mask; +	} while (atomic64_cmpxchg(v, old, new) != old); +} + +#endif /* CONFIG_64BIT */ + +static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)  {  	long long c, old;  	c = atomic64_read(v); @@ -265,15 +350,17 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,  	return c != u;  } -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) - -#undef __CSG_LOOP - -#else /* __s390x__ */ - -#include <asm-generic/atomic64.h> - -#endif /* __s390x__ */ +#define atomic64_add(_i, _v)		atomic64_add_return(_i, _v) +#define atomic64_add_negative(_i, _v)	(atomic64_add_return(_i, _v) < 0) +#define atomic64_inc(_v)		atomic64_add_return(1, _v) +#define atomic64_inc_return(_v)		atomic64_add_return(1, _v) +#define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0) +#define atomic64_sub(_i, _v)		atomic64_sub_return(_i, _v) +#define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0) +#define atomic64_dec(_v)		atomic64_sub_return(1, _v) +#define atomic64_dec_return(_v)		atomic64_sub_return(1, _v) +#define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0) +#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)  #define smp_mb__before_atomic_dec()	smp_mb()  #define smp_mb__after_atomic_dec()	smp_mb() @@ -281,5 +368,5 @@ static __inline__ int atomic64_add_unless(atomic64_t *v,  #define smp_mb__after_atomic_inc()	smp_mb()  #include <asm-generic/atomic-long.h> -#endif /* __KERNEL__ */ +  #endif /* __ARCH_S390_ATOMIC__  */  |