1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 *   This program is free software; you can redistribute it and/or
5 *   modify it under the terms of the GNU General Public License
6 *   as published by the Free Software Foundation, version 2.
7 *
8 *   This program is distributed in the hope that it will be useful, but
9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 *   NON INFRINGEMENT.  See the GNU General Public License for
12 *   more details.
13 */
14
15#include <linux/cache.h>
16#include <linux/delay.h>
17#include <linux/uaccess.h>
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/atomic.h>
21#include <arch/chip.h>
22
23/* This page is remapped on startup to be hash-for-home. */
24int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
25
26int *__atomic_hashed_lock(volatile void *v)
27{
28	/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
29	/*
30	 * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
31	 * Using mm works here because atomic_locks is page aligned.
32	 */
33	unsigned long ptr = __insn_mm((unsigned long)v >> 1,
34				      (unsigned long)atomic_locks,
35				      2, (ATOMIC_HASH_SHIFT + 2) - 1);
36	return (int *)ptr;
37}
38
39#ifdef CONFIG_SMP
40/* Return whether the passed pointer is a valid atomic lock pointer. */
41static int is_atomic_lock(int *p)
42{
43	return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
44}
45
46void __atomic_fault_unlock(int *irqlock_word)
47{
48	BUG_ON(!is_atomic_lock(irqlock_word));
49	BUG_ON(*irqlock_word != 1);
50	*irqlock_word = 0;
51}
52
53#endif /* CONFIG_SMP */
54
55static inline int *__atomic_setup(volatile void *v)
56{
57	/* Issue a load to the target to bring it into cache. */
58	*(volatile int *)v;
59	return __atomic_hashed_lock(v);
60}
61
62int _atomic_xchg(int *v, int n)
63{
64	return __atomic_xchg(v, __atomic_setup(v), n).val;
65}
66EXPORT_SYMBOL(_atomic_xchg);
67
68int _atomic_xchg_add(int *v, int i)
69{
70	return __atomic_xchg_add(v, __atomic_setup(v), i).val;
71}
72EXPORT_SYMBOL(_atomic_xchg_add);
73
74int _atomic_xchg_add_unless(int *v, int a, int u)
75{
76	/*
77	 * Note: argument order is switched here since it is easier
78	 * to use the first argument consistently as the "old value"
79	 * in the assembly, as is done for _atomic_cmpxchg().
80	 */
81	return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val;
82}
83EXPORT_SYMBOL(_atomic_xchg_add_unless);
84
85int _atomic_cmpxchg(int *v, int o, int n)
86{
87	return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val;
88}
89EXPORT_SYMBOL(_atomic_cmpxchg);
90
91unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
92{
93	return __atomic_or((int *)p, __atomic_setup(p), mask).val;
94}
95EXPORT_SYMBOL(_atomic_or);
96
97unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
98{
99	return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
100}
101EXPORT_SYMBOL(_atomic_andn);
102
103unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
104{
105	return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
106}
107EXPORT_SYMBOL(_atomic_xor);
108
109
110long long _atomic64_xchg(long long *v, long long n)
111{
112	return __atomic64_xchg(v, __atomic_setup(v), n);
113}
114EXPORT_SYMBOL(_atomic64_xchg);
115
116long long _atomic64_xchg_add(long long *v, long long i)
117{
118	return __atomic64_xchg_add(v, __atomic_setup(v), i);
119}
120EXPORT_SYMBOL(_atomic64_xchg_add);
121
122long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
123{
124	/*
125	 * Note: argument order is switched here since it is easier
126	 * to use the first argument consistently as the "old value"
127	 * in the assembly, as is done for _atomic_cmpxchg().
128	 */
129	return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
130}
131EXPORT_SYMBOL(_atomic64_xchg_add_unless);
132
133long long _atomic64_cmpxchg(long long *v, long long o, long long n)
134{
135	return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
136}
137EXPORT_SYMBOL(_atomic64_cmpxchg);
138
139
140/*
141 * If any of the atomic or futex routines hit a bad address (not in
142 * the page tables at kernel PL) this routine is called.  The futex
143 * routines are never used on kernel space, and the normal atomics and
144 * bitops are never used on user space.  So a fault on kernel space
145 * must be fatal, but a fault on userspace is a futex fault and we
146 * need to return -EFAULT.  Note that the context this routine is
147 * invoked in is the context of the "_atomic_xxx()" routines called
148 * by the functions in this file.
149 */
150struct __get_user __atomic_bad_address(int __user *addr)
151{
152	if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
153		panic("Bad address used for kernel atomic op: %p\n", addr);
154	return (struct __get_user) { .err = -EFAULT };
155}
156
157
158void __init __init_atomic_per_cpu(void)
159{
160	/* Validate power-of-two and "bigger than cpus" assumption */
161	BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
162	BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
163
164	/*
165	 * On TILEPro we prefer to use a single hash-for-home
166	 * page, since this means atomic operations are less
167	 * likely to encounter a TLB fault and thus should
168	 * in general perform faster.  You may wish to disable
169	 * this in situations where few hash-for-home tiles
170	 * are configured.
171	 */
172	BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
173
174	/* The locks must all fit on one page. */
175	BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
176
177	/*
178	 * We use the page offset of the atomic value's address as
179	 * an index into atomic_locks, excluding the low 3 bits.
180	 * That should not produce more indices than ATOMIC_HASH_SIZE.
181	 */
182	BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
183}
184