diff options
| -rw-r--r-- | arch/x86/include/asm/percpu.h | 11 | 
1 files changed, 5 insertions, 6 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index a0a9779084d..3470c9d0ebb 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -388,12 +388,9 @@ do {									\  #define __this_cpu_xor_1(pcp, val)	percpu_to_op("xor", (pcp), val)  #define __this_cpu_xor_2(pcp, val)	percpu_to_op("xor", (pcp), val)  #define __this_cpu_xor_4(pcp, val)	percpu_to_op("xor", (pcp), val) -/* - * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much - * faster than an xchg with forced lock semantics. - */ -#define __this_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval) -#define __this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval) +#define __this_cpu_xchg_1(pcp, val)	percpu_xchg_op(pcp, val) +#define __this_cpu_xchg_2(pcp, val)	percpu_xchg_op(pcp, val) +#define __this_cpu_xchg_4(pcp, val)	percpu_xchg_op(pcp, val)  #define this_cpu_read_1(pcp)		percpu_from_op("mov", (pcp), "m"(pcp))  #define this_cpu_read_2(pcp)		percpu_from_op("mov", (pcp), "m"(pcp)) @@ -485,6 +482,8 @@ do {									\  #define __this_cpu_or_8(pcp, val)	percpu_to_op("or", (pcp), val)  #define __this_cpu_xor_8(pcp, val)	percpu_to_op("xor", (pcp), val)  #define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) +#define __this_cpu_xchg_8(pcp, nval)	percpu_xchg_op(pcp, nval) +#define __this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)  #define this_cpu_read_8(pcp)		percpu_from_op("mov", (pcp), "m"(pcp))  #define this_cpu_write_8(pcp, val)	percpu_to_op("mov", (pcp), val)  |