1/* rwsem.h: R/W semaphores, public interface
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from asm-i386/semaphore.h
5 */
6
7#ifndef _LINUX_RWSEM_H
8#define _LINUX_RWSEM_H
9
10#include <linux/linkage.h>
11
12#include <linux/types.h>
13#include <linux/kernel.h>
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/atomic.h>
17#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
18#include <linux/osq_lock.h>
19#endif
20
21struct rw_semaphore;
22
23#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
24#include <linux/rwsem-spinlock.h> /* use a generic implementation */
25#else
26/* All arch specific implementations share the same struct */
27struct rw_semaphore {
28	long count;
29	struct list_head wait_list;
30	raw_spinlock_t wait_lock;
31#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
32	struct optimistic_spin_queue osq; /* spinner MCS lock */
33	/*
34	 * Write owner. Used as a speculative check to see
35	 * if the owner is running on the cpu.
36	 */
37	struct task_struct *owner;
38#endif
39#ifdef CONFIG_DEBUG_LOCK_ALLOC
40	struct lockdep_map	dep_map;
41#endif
42};
43
44extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
45extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
46extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
47extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
48
49/* Include the arch specific part */
50#include <asm/rwsem.h>
51
52/* In all implementations count != 0 means locked */
53static inline int rwsem_is_locked(struct rw_semaphore *sem)
54{
55	return sem->count != 0;
56}
57
58#endif
59
60/* Common initializer macros and functions */
61
62#ifdef CONFIG_DEBUG_LOCK_ALLOC
63# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
64#else
65# define __RWSEM_DEP_MAP_INIT(lockname)
66#endif
67
68#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
69#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
70#else
71#define __RWSEM_OPT_INIT(lockname)
72#endif
73
74#define __RWSEM_INITIALIZER(name)				\
75	{ .count = RWSEM_UNLOCKED_VALUE,			\
76	  .wait_list = LIST_HEAD_INIT((name).wait_list),	\
77	  .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock)	\
78	  __RWSEM_OPT_INIT(name)				\
79	  __RWSEM_DEP_MAP_INIT(name) }
80
81#define DECLARE_RWSEM(name) \
82	struct rw_semaphore name = __RWSEM_INITIALIZER(name)
83
84extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
85			 struct lock_class_key *key);
86
87#define init_rwsem(sem)						\
88do {								\
89	static struct lock_class_key __key;			\
90								\
91	__init_rwsem((sem), #sem, &__key);			\
92} while (0)
93
94/*
95 * This is the same regardless of which rwsem implementation that is being used.
96 * It is just a heuristic meant to be called by somebody alreadying holding the
97 * rwsem to see if somebody from an incompatible type is wanting access to the
98 * lock.
99 */
100static inline int rwsem_is_contended(struct rw_semaphore *sem)
101{
102	return !list_empty(&sem->wait_list);
103}
104
105/*
106 * lock for reading
107 */
108extern void down_read(struct rw_semaphore *sem);
109
110/*
111 * trylock for reading -- returns 1 if successful, 0 if contention
112 */
113extern int down_read_trylock(struct rw_semaphore *sem);
114
115/*
116 * lock for writing
117 */
118extern void down_write(struct rw_semaphore *sem);
119
120/*
121 * trylock for writing -- returns 1 if successful, 0 if contention
122 */
123extern int down_write_trylock(struct rw_semaphore *sem);
124
125/*
126 * release a read lock
127 */
128extern void up_read(struct rw_semaphore *sem);
129
130/*
131 * release a write lock
132 */
133extern void up_write(struct rw_semaphore *sem);
134
135/*
136 * downgrade write lock to read lock
137 */
138extern void downgrade_write(struct rw_semaphore *sem);
139
140#ifdef CONFIG_DEBUG_LOCK_ALLOC
141/*
142 * nested locking. NOTE: rwsems are not allowed to recurse
143 * (which occurs if the same task tries to acquire the same
144 * lock instance multiple times), but multiple locks of the
145 * same lock class might be taken, if the order of the locks
146 * is always the same. This ordering rule can be expressed
147 * to lockdep via the _nested() APIs, but enumerating the
148 * subclasses that are used. (If the nesting relationship is
149 * static then another method for expressing nested locking is
150 * the explicit definition of lock class keys and the use of
151 * lockdep_set_class() at lock initialization time.
152 * See Documentation/locking/lockdep-design.txt for more details.)
153 */
154extern void down_read_nested(struct rw_semaphore *sem, int subclass);
155extern void down_write_nested(struct rw_semaphore *sem, int subclass);
156extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
157
158# define down_write_nest_lock(sem, nest_lock)			\
159do {								\
160	typecheck(struct lockdep_map *, &(nest_lock)->dep_map);	\
161	_down_write_nest_lock(sem, &(nest_lock)->dep_map);	\
162} while (0);
163
164/*
165 * Take/release a lock when not the owner will release it.
166 *
167 * [ This API should be avoided as much as possible - the
168 *   proper abstraction for this case is completions. ]
169 */
170extern void down_read_non_owner(struct rw_semaphore *sem);
171extern void up_read_non_owner(struct rw_semaphore *sem);
172#else
173# define down_read_nested(sem, subclass)		down_read(sem)
174# define down_write_nest_lock(sem, nest_lock)	down_write(sem)
175# define down_write_nested(sem, subclass)	down_write(sem)
176# define down_read_non_owner(sem)		down_read(sem)
177# define up_read_non_owner(sem)			up_read(sem)
178#endif
179
180#endif /* _LINUX_RWSEM_H */
181