atomic.h revision 03e3b5a0f18c53560de1984fbbfca146d31da2a5
1#ifndef __ARCH_S390_ATOMIC__
2#define __ARCH_S390_ATOMIC__
3
4/*
5 * Copyright 1999,2009 IBM Corp.
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 *	      Denis Joseph Barrow,
8 *	      Arnd Bergmann <arndb@de.ibm.com>,
9 *
10 * Atomic operations that C can't guarantee us.
11 * Useful for resource counting etc.
12 * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
13 *
14 */
15
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/system.h>
19
20#define ATOMIC_INIT(i)  { (i) }
21
22#define __CS_LOOP(ptr, op_val, op_string) ({				\
23	int old_val, new_val;						\
24	asm volatile(							\
25		"	l	%0,%2\n"				\
26		"0:	lr	%1,%0\n"				\
27		op_string "	%1,%3\n"				\
28		"	cs	%0,%1,%2\n"				\
29		"	jl	0b"					\
30		: "=&d" (old_val), "=&d" (new_val),			\
31		  "=Q" (((atomic_t *)(ptr))->counter)			\
32		: "d" (op_val),	 "Q" (((atomic_t *)(ptr))->counter)	\
33		: "cc", "memory");					\
34	new_val;							\
35})
36
37static inline int atomic_read(const atomic_t *v)
38{
39	return ACCESS_ONCE(v->counter);
40}
41
42static inline void atomic_set(atomic_t *v, int i)
43{
44	v->counter = i;
45}
46
47static inline int atomic_add_return(int i, atomic_t *v)
48{
49	return __CS_LOOP(v, i, "ar");
50}
51#define atomic_add(_i, _v)		atomic_add_return(_i, _v)
52#define atomic_add_negative(_i, _v)	(atomic_add_return(_i, _v) < 0)
53#define atomic_inc(_v)			atomic_add_return(1, _v)
54#define atomic_inc_return(_v)		atomic_add_return(1, _v)
55#define atomic_inc_and_test(_v)		(atomic_add_return(1, _v) == 0)
56
57static inline int atomic_sub_return(int i, atomic_t *v)
58{
59	return __CS_LOOP(v, i, "sr");
60}
61#define atomic_sub(_i, _v)		atomic_sub_return(_i, _v)
62#define atomic_sub_and_test(_i, _v)	(atomic_sub_return(_i, _v) == 0)
63#define atomic_dec(_v)			atomic_sub_return(1, _v)
64#define atomic_dec_return(_v)		atomic_sub_return(1, _v)
65#define atomic_dec_and_test(_v)		(atomic_sub_return(1, _v) == 0)
66
67static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
68{
69	__CS_LOOP(v, ~mask, "nr");
70}
71
72static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
73{
74	__CS_LOOP(v, mask, "or");
75}
76
77#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
78
79static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
80{
81	asm volatile(
82		"	cs	%0,%2,%1"
83		: "+d" (old), "=Q" (v->counter)
84		: "d" (new), "Q" (v->counter)
85		: "cc", "memory");
86	return old;
87}
88
89static inline int atomic_add_unless(atomic_t *v, int a, int u)
90{
91	int c, old;
92	c = atomic_read(v);
93	for (;;) {
94		if (unlikely(c == u))
95			break;
96		old = atomic_cmpxchg(v, c, c + a);
97		if (likely(old == c))
98			break;
99		c = old;
100	}
101	return c != u;
102}
103
104#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
105
106#undef __CS_LOOP
107
108#define ATOMIC64_INIT(i)  { (i) }
109
110#ifdef CONFIG_64BIT
111
112#define __CSG_LOOP(ptr, op_val, op_string) ({				\
113	long long old_val, new_val;					\
114	asm volatile(							\
115		"	lg	%0,%2\n"				\
116		"0:	lgr	%1,%0\n"				\
117		op_string "	%1,%3\n"				\
118		"	csg	%0,%1,%2\n"				\
119		"	jl	0b"					\
120		: "=&d" (old_val), "=&d" (new_val),			\
121		  "=Q" (((atomic_t *)(ptr))->counter)			\
122		: "d" (op_val),	"Q" (((atomic_t *)(ptr))->counter)	\
123		: "cc", "memory");					\
124	new_val;							\
125})
126
127static inline long long atomic64_read(const atomic64_t *v)
128{
129	return ACCESS_ONCE(v->counter);
130}
131
132static inline void atomic64_set(atomic64_t *v, long long i)
133{
134	v->counter = i;
135}
136
137static inline long long atomic64_add_return(long long i, atomic64_t *v)
138{
139	return __CSG_LOOP(v, i, "agr");
140}
141
142static inline long long atomic64_sub_return(long long i, atomic64_t *v)
143{
144	return __CSG_LOOP(v, i, "sgr");
145}
146
147static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
148{
149	__CSG_LOOP(v, ~mask, "ngr");
150}
151
152static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
153{
154	__CSG_LOOP(v, mask, "ogr");
155}
156
157#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
158
159static inline long long atomic64_cmpxchg(atomic64_t *v,
160					     long long old, long long new)
161{
162	asm volatile(
163		"	csg	%0,%2,%1"
164		: "+d" (old), "=Q" (v->counter)
165		: "d" (new), "Q" (v->counter)
166		: "cc", "memory");
167	return old;
168}
169
170#undef __CSG_LOOP
171
172#else /* CONFIG_64BIT */
173
174typedef struct {
175	long long counter;
176} atomic64_t;
177
178static inline long long atomic64_read(const atomic64_t *v)
179{
180	register_pair rp;
181
182	asm volatile(
183		"	lm	%0,%N0,%1"
184		: "=&d" (rp) : "Q" (v->counter)	);
185	return rp.pair;
186}
187
188static inline void atomic64_set(atomic64_t *v, long long i)
189{
190	register_pair rp = {.pair = i};
191
192	asm volatile(
193		"	stm	%1,%N1,%0"
194		: "=Q" (v->counter) : "d" (rp) );
195}
196
197static inline long long atomic64_xchg(atomic64_t *v, long long new)
198{
199	register_pair rp_new = {.pair = new};
200	register_pair rp_old;
201
202	asm volatile(
203		"	lm	%0,%N0,%1\n"
204		"0:	cds	%0,%2,%1\n"
205		"	jl	0b\n"
206		: "=&d" (rp_old), "=Q" (v->counter)
207		: "d" (rp_new), "Q" (v->counter)
208		: "cc");
209	return rp_old.pair;
210}
211
212static inline long long atomic64_cmpxchg(atomic64_t *v,
213					 long long old, long long new)
214{
215	register_pair rp_old = {.pair = old};
216	register_pair rp_new = {.pair = new};
217
218	asm volatile(
219		"	cds	%0,%2,%1"
220		: "+&d" (rp_old), "=Q" (v->counter)
221		: "d" (rp_new), "Q" (v->counter)
222		: "cc");
223	return rp_old.pair;
224}
225
226
227static inline long long atomic64_add_return(long long i, atomic64_t *v)
228{
229	long long old, new;
230
231	do {
232		old = atomic64_read(v);
233		new = old + i;
234	} while (atomic64_cmpxchg(v, old, new) != old);
235	return new;
236}
237
238static inline long long atomic64_sub_return(long long i, atomic64_t *v)
239{
240	long long old, new;
241
242	do {
243		old = atomic64_read(v);
244		new = old - i;
245	} while (atomic64_cmpxchg(v, old, new) != old);
246	return new;
247}
248
249static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
250{
251	long long old, new;
252
253	do {
254		old = atomic64_read(v);
255		new = old | mask;
256	} while (atomic64_cmpxchg(v, old, new) != old);
257}
258
259static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
260{
261	long long old, new;
262
263	do {
264		old = atomic64_read(v);
265		new = old & mask;
266	} while (atomic64_cmpxchg(v, old, new) != old);
267}
268
269#endif /* CONFIG_64BIT */
270
271static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
272{
273	long long c, old;
274
275	c = atomic64_read(v);
276	for (;;) {
277		if (unlikely(c == u))
278			break;
279		old = atomic64_cmpxchg(v, c, c + a);
280		if (likely(old == c))
281			break;
282		c = old;
283	}
284	return c != u;
285}
286
287static inline long long atomic64_dec_if_positive(atomic64_t *v)
288{
289	long long c, old, dec;
290
291	c = atomic64_read(v);
292	for (;;) {
293		dec = c - 1;
294		if (unlikely(dec < 0))
295			break;
296		old = atomic64_cmpxchg((v), c, dec);
297		if (likely(old == c))
298			break;
299		c = old;
300	}
301	return dec;
302}
303
304#define atomic64_add(_i, _v)		atomic64_add_return(_i, _v)
305#define atomic64_add_negative(_i, _v)	(atomic64_add_return(_i, _v) < 0)
306#define atomic64_inc(_v)		atomic64_add_return(1, _v)
307#define atomic64_inc_return(_v)		atomic64_add_return(1, _v)
308#define atomic64_inc_and_test(_v)	(atomic64_add_return(1, _v) == 0)
309#define atomic64_sub(_i, _v)		atomic64_sub_return(_i, _v)
310#define atomic64_sub_and_test(_i, _v)	(atomic64_sub_return(_i, _v) == 0)
311#define atomic64_dec(_v)		atomic64_sub_return(1, _v)
312#define atomic64_dec_return(_v)		atomic64_sub_return(1, _v)
313#define atomic64_dec_and_test(_v)	(atomic64_sub_return(1, _v) == 0)
314#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1, 0)
315
316#define smp_mb__before_atomic_dec()	smp_mb()
317#define smp_mb__after_atomic_dec()	smp_mb()
318#define smp_mb__before_atomic_inc()	smp_mb()
319#define smp_mb__after_atomic_inc()	smp_mb()
320
321#include <asm-generic/atomic-long.h>
322
323#endif /* __ARCH_S390_ATOMIC__  */
324