1#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#if __LINUX_ARM_ARCH__ < 6
5#error SMP not supported on pre-ARMv6 CPUs
6#endif
7
8/*
9 * ARMv6 Spin-locking.
10 *
11 * We exclusively read the old value.  If it is zero, we may have
12 * won the lock, so we try exclusively storing it.  A memory barrier
13 * is required after we get a lock, and before we release it, because
14 * V6 CPUs are assumed to have weakly ordered memory.
15 *
16 * Unlocked value: 0
17 * Locked value: 1
18 */
19
20#define __raw_spin_is_locked(x)		((x)->lock != 0)
21#define __raw_spin_unlock_wait(lock) \
22	do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
23
24#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
25
26static inline void __raw_spin_lock(raw_spinlock_t *lock)
27{
28	unsigned long tmp;
29
30	__asm__ __volatile__(
31"1:	ldrex	%0, [%1]\n"
32"	teq	%0, #0\n"
33#ifdef CONFIG_CPU_32v6K
34"	wfene\n"
35#endif
36"	strexeq	%0, %2, [%1]\n"
37"	teqeq	%0, #0\n"
38"	bne	1b"
39	: "=&r" (tmp)
40	: "r" (&lock->lock), "r" (1)
41	: "cc");
42
43	smp_mb();
44}
45
46static inline int __raw_spin_trylock(raw_spinlock_t *lock)
47{
48	unsigned long tmp;
49
50	__asm__ __volatile__(
51"	ldrex	%0, [%1]\n"
52"	teq	%0, #0\n"
53"	strexeq	%0, %2, [%1]"
54	: "=&r" (tmp)
55	: "r" (&lock->lock), "r" (1)
56	: "cc");
57
58	if (tmp == 0) {
59		smp_mb();
60		return 1;
61	} else {
62		return 0;
63	}
64}
65
66static inline void __raw_spin_unlock(raw_spinlock_t *lock)
67{
68	smp_mb();
69
70	__asm__ __volatile__(
71"	str	%1, [%0]\n"
72#ifdef CONFIG_CPU_32v6K
73"	mcr	p15, 0, %1, c7, c10, 4\n" /* DSB */
74"	sev"
75#endif
76	:
77	: "r" (&lock->lock), "r" (0)
78	: "cc");
79}
80
81/*
82 * RWLOCKS
83 *
84 *
85 * Write locks are easy - we just set bit 31.  When unlocking, we can
86 * just write zero since the lock is exclusively held.
87 */
88#define rwlock_is_locked(x)	(*((volatile unsigned int *)(x)) != 0)
89
90static inline void __raw_write_lock(raw_rwlock_t *rw)
91{
92	unsigned long tmp;
93
94	__asm__ __volatile__(
95"1:	ldrex	%0, [%1]\n"
96"	teq	%0, #0\n"
97#ifdef CONFIG_CPU_32v6K
98"	wfene\n"
99#endif
100"	strexeq	%0, %2, [%1]\n"
101"	teq	%0, #0\n"
102"	bne	1b"
103	: "=&r" (tmp)
104	: "r" (&rw->lock), "r" (0x80000000)
105	: "cc");
106
107	smp_mb();
108}
109
110static inline int __raw_write_trylock(raw_rwlock_t *rw)
111{
112	unsigned long tmp;
113
114	__asm__ __volatile__(
115"1:	ldrex	%0, [%1]\n"
116"	teq	%0, #0\n"
117"	strexeq	%0, %2, [%1]"
118	: "=&r" (tmp)
119	: "r" (&rw->lock), "r" (0x80000000)
120	: "cc");
121
122	if (tmp == 0) {
123		smp_mb();
124		return 1;
125	} else {
126		return 0;
127	}
128}
129
130static inline void __raw_write_unlock(raw_rwlock_t *rw)
131{
132	smp_mb();
133
134	__asm__ __volatile__(
135	"str	%1, [%0]\n"
136#ifdef CONFIG_CPU_32v6K
137"	mcr	p15, 0, %1, c7, c10, 4\n" /* DSB */
138"	sev\n"
139#endif
140	:
141	: "r" (&rw->lock), "r" (0)
142	: "cc");
143}
144
145/* write_can_lock - would write_trylock() succeed? */
146#define __raw_write_can_lock(x)		((x)->lock == 0x80000000)
147
148/*
149 * Read locks are a bit more hairy:
150 *  - Exclusively load the lock value.
151 *  - Increment it.
152 *  - Store new lock value if positive, and we still own this location.
153 *    If the value is negative, we've already failed.
154 *  - If we failed to store the value, we want a negative result.
155 *  - If we failed, try again.
156 * Unlocking is similarly hairy.  We may have multiple read locks
157 * currently active.  However, we know we won't have any write
158 * locks.
159 */
160static inline void __raw_read_lock(raw_rwlock_t *rw)
161{
162	unsigned long tmp, tmp2;
163
164	__asm__ __volatile__(
165"1:	ldrex	%0, [%2]\n"
166"	adds	%0, %0, #1\n"
167"	strexpl	%1, %0, [%2]\n"
168#ifdef CONFIG_CPU_32v6K
169"	wfemi\n"
170#endif
171"	rsbpls	%0, %1, #0\n"
172"	bmi	1b"
173	: "=&r" (tmp), "=&r" (tmp2)
174	: "r" (&rw->lock)
175	: "cc");
176
177	smp_mb();
178}
179
180static inline void __raw_read_unlock(raw_rwlock_t *rw)
181{
182	unsigned long tmp, tmp2;
183
184	smp_mb();
185
186	__asm__ __volatile__(
187"1:	ldrex	%0, [%2]\n"
188"	sub	%0, %0, #1\n"
189"	strex	%1, %0, [%2]\n"
190"	teq	%1, #0\n"
191"	bne	1b"
192#ifdef CONFIG_CPU_32v6K
193"\n	cmp	%0, #0\n"
194"	mcreq   p15, 0, %0, c7, c10, 4\n"
195"	seveq"
196#endif
197	: "=&r" (tmp), "=&r" (tmp2)
198	: "r" (&rw->lock)
199	: "cc");
200}
201
202static inline int __raw_read_trylock(raw_rwlock_t *rw)
203{
204	unsigned long tmp, tmp2 = 1;
205
206	__asm__ __volatile__(
207"1:	ldrex	%0, [%2]\n"
208"	adds	%0, %0, #1\n"
209"	strexpl	%1, %0, [%2]\n"
210	: "=&r" (tmp), "+r" (tmp2)
211	: "r" (&rw->lock)
212	: "cc");
213
214	smp_mb();
215	return tmp2 == 0;
216}
217
218/* read_can_lock - would read_trylock() succeed? */
219#define __raw_read_can_lock(x)		((x)->lock < 0x80000000)
220
221#endif /* __ASM_SPINLOCK_H */
222