mirror of
				git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
				synced 2025-09-04 20:19:47 +08:00 
			
		
		
		
	 7ca8cf5347
			
		
	
	
		7ca8cf5347
		
	
	
	
	
		
			
			This patch moves ATOMIC_INIT from asm/atomic.h into linux/types.h. This allows users of atomic_t to use ATOMIC_INIT without having to include atomic.h as that way may lead to header loops. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Link: https://lkml.kernel.org/r/20200729123105.GB7047@gondor.apana.org.au
		
			
				
	
	
		
			224 lines
		
	
	
		
			6.0 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			224 lines
		
	
	
		
			6.0 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| /* SPDX-License-Identifier: GPL-2.0 */
 | |
| #ifndef _ASM_IA64_ATOMIC_H
 | |
| #define _ASM_IA64_ATOMIC_H
 | |
| 
 | |
| /*
 | |
|  * Atomic operations that C can't guarantee us.  Useful for
 | |
|  * resource counting etc..
 | |
|  *
 | |
|  * NOTE: don't mess with the types below!  The "unsigned long" and
 | |
|  * "int" types were carefully placed so as to ensure proper operation
 | |
|  * of the macros.
 | |
|  *
 | |
|  * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
 | |
|  *	David Mosberger-Tang <davidm@hpl.hp.com>
 | |
|  */
 | |
| #include <linux/types.h>
 | |
| 
 | |
| #include <asm/intrinsics.h>
 | |
| #include <asm/barrier.h>
 | |
| 
 | |
| 
 | |
| #define ATOMIC64_INIT(i)	{ (i) }
 | |
| 
 | |
| #define atomic_read(v)		READ_ONCE((v)->counter)
 | |
| #define atomic64_read(v)	READ_ONCE((v)->counter)
 | |
| 
 | |
| #define atomic_set(v,i)		WRITE_ONCE(((v)->counter), (i))
 | |
| #define atomic64_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 | |
| 
 | |
| #define ATOMIC_OP(op, c_op)						\
 | |
| static __inline__ int							\
 | |
| ia64_atomic_##op (int i, atomic_t *v)					\
 | |
| {									\
 | |
| 	__s32 old, new;							\
 | |
| 	CMPXCHG_BUGCHECK_DECL						\
 | |
| 									\
 | |
| 	do {								\
 | |
| 		CMPXCHG_BUGCHECK(v);					\
 | |
| 		old = atomic_read(v);					\
 | |
| 		new = old c_op i;					\
 | |
| 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
 | |
| 	return new;							\
 | |
| }
 | |
| 
 | |
| #define ATOMIC_FETCH_OP(op, c_op)					\
 | |
| static __inline__ int							\
 | |
| ia64_atomic_fetch_##op (int i, atomic_t *v)				\
 | |
| {									\
 | |
| 	__s32 old, new;							\
 | |
| 	CMPXCHG_BUGCHECK_DECL						\
 | |
| 									\
 | |
| 	do {								\
 | |
| 		CMPXCHG_BUGCHECK(v);					\
 | |
| 		old = atomic_read(v);					\
 | |
| 		new = old c_op i;					\
 | |
| 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
 | |
| 	return old;							\
 | |
| }
 | |
| 
 | |
| #define ATOMIC_OPS(op, c_op)						\
 | |
| 	ATOMIC_OP(op, c_op)						\
 | |
| 	ATOMIC_FETCH_OP(op, c_op)
 | |
| 
 | |
| ATOMIC_OPS(add, +)
 | |
| ATOMIC_OPS(sub, -)
 | |
| 
 | |
| #ifdef __OPTIMIZE__
 | |
| #define __ia64_atomic_const(i)						\
 | |
| 	static const int __ia64_atomic_p = __builtin_constant_p(i) ?	\
 | |
| 		((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 ||	\
 | |
| 		 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
 | |
| 	__ia64_atomic_p
 | |
| #else
 | |
| #define __ia64_atomic_const(i)	0
 | |
| #endif
 | |
| 
 | |
| #define atomic_add_return(i,v)						\
 | |
| ({									\
 | |
| 	int __ia64_aar_i = (i);						\
 | |
| 	__ia64_atomic_const(i)						\
 | |
| 		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
 | |
| 		: ia64_atomic_add(__ia64_aar_i, v);			\
 | |
| })
 | |
| 
 | |
| #define atomic_sub_return(i,v)						\
 | |
| ({									\
 | |
| 	int __ia64_asr_i = (i);						\
 | |
| 	__ia64_atomic_const(i)						\
 | |
| 		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
 | |
| 		: ia64_atomic_sub(__ia64_asr_i, v);			\
 | |
| })
 | |
| 
 | |
| #define atomic_fetch_add(i,v)						\
 | |
| ({									\
 | |
| 	int __ia64_aar_i = (i);						\
 | |
| 	__ia64_atomic_const(i)						\
 | |
| 		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
 | |
| 		: ia64_atomic_fetch_add(__ia64_aar_i, v);		\
 | |
| })
 | |
| 
 | |
| #define atomic_fetch_sub(i,v)						\
 | |
| ({									\
 | |
| 	int __ia64_asr_i = (i);						\
 | |
| 	__ia64_atomic_const(i)						\
 | |
| 		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
 | |
| 		: ia64_atomic_fetch_sub(__ia64_asr_i, v);		\
 | |
| })
 | |
| 
 | |
| ATOMIC_FETCH_OP(and, &)
 | |
| ATOMIC_FETCH_OP(or, |)
 | |
| ATOMIC_FETCH_OP(xor, ^)
 | |
| 
 | |
| #define atomic_and(i,v)	(void)ia64_atomic_fetch_and(i,v)
 | |
| #define atomic_or(i,v)	(void)ia64_atomic_fetch_or(i,v)
 | |
| #define atomic_xor(i,v)	(void)ia64_atomic_fetch_xor(i,v)
 | |
| 
 | |
| #define atomic_fetch_and(i,v)	ia64_atomic_fetch_and(i,v)
 | |
| #define atomic_fetch_or(i,v)	ia64_atomic_fetch_or(i,v)
 | |
| #define atomic_fetch_xor(i,v)	ia64_atomic_fetch_xor(i,v)
 | |
| 
 | |
| #undef ATOMIC_OPS
 | |
| #undef ATOMIC_FETCH_OP
 | |
| #undef ATOMIC_OP
 | |
| 
 | |
| #define ATOMIC64_OP(op, c_op)						\
 | |
| static __inline__ s64							\
 | |
| ia64_atomic64_##op (s64 i, atomic64_t *v)				\
 | |
| {									\
 | |
| 	s64 old, new;							\
 | |
| 	CMPXCHG_BUGCHECK_DECL						\
 | |
| 									\
 | |
| 	do {								\
 | |
| 		CMPXCHG_BUGCHECK(v);					\
 | |
| 		old = atomic64_read(v);					\
 | |
| 		new = old c_op i;					\
 | |
| 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
 | |
| 	return new;							\
 | |
| }
 | |
| 
 | |
| #define ATOMIC64_FETCH_OP(op, c_op)					\
 | |
| static __inline__ s64							\
 | |
| ia64_atomic64_fetch_##op (s64 i, atomic64_t *v)				\
 | |
| {									\
 | |
| 	s64 old, new;							\
 | |
| 	CMPXCHG_BUGCHECK_DECL						\
 | |
| 									\
 | |
| 	do {								\
 | |
| 		CMPXCHG_BUGCHECK(v);					\
 | |
| 		old = atomic64_read(v);					\
 | |
| 		new = old c_op i;					\
 | |
| 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
 | |
| 	return old;							\
 | |
| }
 | |
| 
 | |
| #define ATOMIC64_OPS(op, c_op)						\
 | |
| 	ATOMIC64_OP(op, c_op)						\
 | |
| 	ATOMIC64_FETCH_OP(op, c_op)
 | |
| 
 | |
| ATOMIC64_OPS(add, +)
 | |
| ATOMIC64_OPS(sub, -)
 | |
| 
 | |
| #define atomic64_add_return(i,v)					\
 | |
| ({									\
 | |
| 	s64 __ia64_aar_i = (i);						\
 | |
| 	__ia64_atomic_const(i)						\
 | |
| 		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
 | |
| 		: ia64_atomic64_add(__ia64_aar_i, v);			\
 | |
| })
 | |
| 
 | |
| #define atomic64_sub_return(i,v)					\
 | |
| ({									\
 | |
| 	s64 __ia64_asr_i = (i);						\
 | |
| 	__ia64_atomic_const(i)						\
 | |
| 		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
 | |
| 		: ia64_atomic64_sub(__ia64_asr_i, v);			\
 | |
| })
 | |
| 
 | |
| #define atomic64_fetch_add(i,v)						\
 | |
| ({									\
 | |
| 	s64 __ia64_aar_i = (i);						\
 | |
| 	__ia64_atomic_const(i)						\
 | |
| 		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
 | |
| 		: ia64_atomic64_fetch_add(__ia64_aar_i, v);		\
 | |
| })
 | |
| 
 | |
| #define atomic64_fetch_sub(i,v)						\
 | |
| ({									\
 | |
| 	s64 __ia64_asr_i = (i);						\
 | |
| 	__ia64_atomic_const(i)						\
 | |
| 		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
 | |
| 		: ia64_atomic64_fetch_sub(__ia64_asr_i, v);		\
 | |
| })
 | |
| 
 | |
| ATOMIC64_FETCH_OP(and, &)
 | |
| ATOMIC64_FETCH_OP(or, |)
 | |
| ATOMIC64_FETCH_OP(xor, ^)
 | |
| 
 | |
| #define atomic64_and(i,v)	(void)ia64_atomic64_fetch_and(i,v)
 | |
| #define atomic64_or(i,v)	(void)ia64_atomic64_fetch_or(i,v)
 | |
| #define atomic64_xor(i,v)	(void)ia64_atomic64_fetch_xor(i,v)
 | |
| 
 | |
| #define atomic64_fetch_and(i,v)	ia64_atomic64_fetch_and(i,v)
 | |
| #define atomic64_fetch_or(i,v)	ia64_atomic64_fetch_or(i,v)
 | |
| #define atomic64_fetch_xor(i,v)	ia64_atomic64_fetch_xor(i,v)
 | |
| 
 | |
| #undef ATOMIC64_OPS
 | |
| #undef ATOMIC64_FETCH_OP
 | |
| #undef ATOMIC64_OP
 | |
| 
 | |
| #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 | |
| #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 | |
| 
 | |
| #define atomic64_cmpxchg(v, old, new) \
 | |
| 	(cmpxchg(&((v)->counter), old, new))
 | |
| #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 | |
| 
 | |
| #define atomic_add(i,v)			(void)atomic_add_return((i), (v))
 | |
| #define atomic_sub(i,v)			(void)atomic_sub_return((i), (v))
 | |
| 
 | |
| #define atomic64_add(i,v)		(void)atomic64_add_return((i), (v))
 | |
| #define atomic64_sub(i,v)		(void)atomic64_sub_return((i), (v))
 | |
| 
 | |
| #endif /* _ASM_IA64_ATOMIC_H */
 |