1/*
2 * MCS lock defines
3 *
4 * This file contains the main data structure and API definitions of MCS lock.
5 *
6 * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
7 * with the desirable properties of being fair, and with each cpu trying
8 * to acquire the lock spinning on a local variable.
9 * It avoids expensive cache bouncings that common test-and-set spin-lock
10 * implementations incur.
11 */
12#ifndef __LINUX_MCS_SPINLOCK_H
13#define __LINUX_MCS_SPINLOCK_H
14
15#include <asm/mcs_spinlock.h>
16
17struct mcs_spinlock {
18	struct mcs_spinlock *next;
19	int locked; /* 1 if lock acquired */
20};
21
22#ifndef arch_mcs_spin_lock_contended
23/*
24 * Using smp_load_acquire() provides a memory barrier that ensures
25 * subsequent operations happen after the lock is acquired.
26 */
27#define arch_mcs_spin_lock_contended(l)					\
28do {									\
29	while (!(smp_load_acquire(l)))					\
30		cpu_relax_lowlatency();					\
31} while (0)
32#endif
33
34#ifndef arch_mcs_spin_unlock_contended
35/*
36 * smp_store_release() provides a memory barrier to ensure all
37 * operations in the critical section has been completed before
38 * unlocking.
39 */
40#define arch_mcs_spin_unlock_contended(l)				\
41	smp_store_release((l), 1)
42#endif
43
44/*
45 * Note: the smp_load_acquire/smp_store_release pair is not
46 * sufficient to form a full memory barrier across
47 * cpus for many architectures (except x86) for mcs_unlock and mcs_lock.
48 * For applications that need a full barrier across multiple cpus
49 * with mcs_unlock and mcs_lock pair, smp_mb__after_unlock_lock() should be
50 * used after mcs_lock.
51 */
52
53/*
54 * In order to acquire the lock, the caller should declare a local node and
55 * pass a reference of the node to this function in addition to the lock.
56 * If the lock has already been acquired, then this will proceed to spin
57 * on this node->locked until the previous lock holder sets the node->locked
58 * in mcs_spin_unlock().
59 */
60static inline
61void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
62{
63	struct mcs_spinlock *prev;
64
65	/* Init node */
66	node->locked = 0;
67	node->next   = NULL;
68
69	prev = xchg(lock, node);
70	if (likely(prev == NULL)) {
71		/*
72		 * Lock acquired, don't need to set node->locked to 1. Threads
73		 * only spin on its own node->locked value for lock acquisition.
74		 * However, since this thread can immediately acquire the lock
75		 * and does not proceed to spin on its own node->locked, this
76		 * value won't be used. If a debug mode is needed to
77		 * audit lock status, then set node->locked value here.
78		 */
79		return;
80	}
81	ACCESS_ONCE(prev->next) = node;
82
83	/* Wait until the lock holder passes the lock down. */
84	arch_mcs_spin_lock_contended(&node->locked);
85}
86
87/*
88 * Releases the lock. The caller should pass in the corresponding node that
89 * was used to acquire the lock.
90 */
91static inline
92void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
93{
94	struct mcs_spinlock *next = ACCESS_ONCE(node->next);
95
96	if (likely(!next)) {
97		/*
98		 * Release the lock by setting it to NULL
99		 */
100		if (likely(cmpxchg(lock, node, NULL) == node))
101			return;
102		/* Wait until the next pointer is set */
103		while (!(next = ACCESS_ONCE(node->next)))
104			cpu_relax_lowlatency();
105	}
106
107	/* Pass lock to next waiter. */
108	arch_mcs_spin_unlock_contended(&next->locked);
109}
110
111/*
112 * Cancellable version of the MCS lock above.
113 *
114 * Intended for adaptive spinning of sleeping locks:
115 * mutex_lock()/rwsem_down_{read,write}() etc.
116 */
117
118struct optimistic_spin_node {
119	struct optimistic_spin_node *next, *prev;
120	int locked; /* 1 if lock acquired */
121	int cpu; /* encoded CPU # value */
122};
123
124extern bool osq_lock(struct optimistic_spin_queue *lock);
125extern void osq_unlock(struct optimistic_spin_queue *lock);
126
127#endif /* __LINUX_MCS_SPINLOCK_H */
128