1/*
2   ----------------------------------------------------------------
3
4   Notice that the above BSD-style license applies to this one file
5   (helgrind.h) only.  The entire rest of Valgrind is licensed under
6   the terms of the GNU General Public License, version 2.  See the
7   COPYING file in the source distribution for details.
8
9   ----------------------------------------------------------------
10
11   This file is part of Helgrind, a Valgrind tool for detecting errors
12   in threaded programs.
13
14   Copyright (C) 2007-2013 OpenWorks LLP
15      info@open-works.co.uk
16
17   Redistribution and use in source and binary forms, with or without
18   modification, are permitted provided that the following conditions
19   are met:
20
21   1. Redistributions of source code must retain the above copyright
22      notice, this list of conditions and the following disclaimer.
23
24   2. The origin of this software must not be misrepresented; you must
25      not claim that you wrote the original software.  If you use this
26      software in a product, an acknowledgment in the product
27      documentation would be appreciated but is not required.
28
29   3. Altered source versions must be plainly marked as such, and must
30      not be misrepresented as being the original software.
31
32   4. The name of the author may not be used to endorse or promote
33      products derived from this software without specific prior written
34      permission.
35
36   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
37   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
38   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
40   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
42   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
43   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
44   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
45   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
46   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47
48   ----------------------------------------------------------------
49
50   Notice that the above BSD-style license applies to this one file
51   (helgrind.h) only.  The entire rest of Valgrind is licensed under
52   the terms of the GNU General Public License, version 2.  See the
53   COPYING file in the source distribution for details.
54
55   ----------------------------------------------------------------
56*/
57
58#ifndef __HELGRIND_H
59#define __HELGRIND_H
60
61#include "valgrind.h"
62
63/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
64   This enum comprises an ABI exported by Valgrind to programs
65   which use client requests.  DO NOT CHANGE THE ORDER OF THESE
66   ENTRIES, NOR DELETE ANY -- add new ones at the end. */
67typedef
68   enum {
69      VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'),
70
71      /* The rest are for Helgrind's internal use.  Not for end-user
72         use.  Do not use them unless you are a Valgrind developer. */
73
74      /* Notify the tool what this thread's pthread_t is. */
75      _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G')
76                                         + 256,
77      _VG_USERREQ__HG_PTH_API_ERROR,              /* char*, int */
78      _VG_USERREQ__HG_PTHREAD_JOIN_POST,          /* pthread_t of quitter */
79      _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST,    /* pth_mx_t*, long mbRec */
80      _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE,  /* pth_mx_t*, long isInit */
81      _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,   /* pth_mx_t* */
82      _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST,  /* pth_mx_t* */
83      _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE, /* pth_mx_t*, long isTryLock */
84      _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST,    /* pth_mx_t* */
85      _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE,    /* pth_cond_t* */
86      _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */
87      _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE,     /* pth_cond_t*, pth_mx_t* */
88      _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST,    /* pth_cond_t*, pth_mx_t* */
89      _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE,   /* pth_cond_t*, long isInit */
90      _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST,   /* pth_rwlk_t* */
91      _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */
92      _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE,    /* pth_rwlk_t*, long isW */
93      _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST,   /* pth_rwlk_t*, long isW */
94      _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE,  /* pth_rwlk_t* */
95      _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */
96      _VG_USERREQ__HG_POSIX_SEM_INIT_POST,        /* sem_t*, ulong value */
97      _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE,      /* sem_t* */
98      _VG_USERREQ__HG_POSIX_SEM_POST_PRE,         /* sem_t* */
99      _VG_USERREQ__HG_POSIX_SEM_WAIT_POST,        /* sem_t* */
100      _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE,   /* pth_bar_t*, ulong, ulong */
101      _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE,   /* pth_bar_t* */
102      _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */
103      _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE,  /* pth_slk_t* */
104      _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */
105      _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE,      /* pth_slk_t* */
106      _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST,     /* pth_slk_t* */
107      _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE,   /* pth_slk_t* */
108      _VG_USERREQ__HG_CLIENTREQ_UNIMP,            /* char* */
109      _VG_USERREQ__HG_USERSO_SEND_PRE,        /* arbitrary UWord SO-tag */
110      _VG_USERREQ__HG_USERSO_RECV_POST,       /* arbitrary UWord SO-tag */
111      _VG_USERREQ__HG_USERSO_FORGET_ALL,      /* arbitrary UWord SO-tag */
112      _VG_USERREQ__HG_RESERVED2,              /* Do not use */
113      _VG_USERREQ__HG_RESERVED3,              /* Do not use */
114      _VG_USERREQ__HG_RESERVED4,              /* Do not use */
115      _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */
116      _VG_USERREQ__HG_ARANGE_MAKE_TRACKED,   /* Addr a, ulong len */
117      _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */
118      _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, /* Addr start_of_block */
119      _VG_USERREQ__HG_PTHREAD_COND_INIT_POST,  /* pth_cond_t*, pth_cond_attr_t*/
120      _VG_USERREQ__HG_GNAT_MASTER_HOOK,       /* void*d,void*m,Word ml */
121      _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK,/* void*s,Word ml */
122      _VG_USERREQ__HG_GET_ABITS               /* Addr a,Addr abits, ulong len */
123   } Vg_TCheckClientRequest;
124
125
126/*----------------------------------------------------------------*/
127/*---                                                          ---*/
128/*--- Implementation-only facilities.  Not for end-user use.   ---*/
129/*--- For end-user facilities see below (the next section in   ---*/
130/*--- this file.)                                              ---*/
131/*---                                                          ---*/
132/*----------------------------------------------------------------*/
133
134/* Do a client request.  These are macros rather than a functions so
135   as to avoid having an extra frame in stack traces.
136
137   NB: these duplicate definitions in hg_intercepts.c.  But here, we
138   have to make do with weaker typing (no definition of Word etc) and
139   no assertions, whereas in helgrind.h we can use those facilities.
140   Obviously it's important the two sets of definitions are kept in
141   sync.
142
143   The commented-out asserts should actually hold, but unfortunately
144   they can't be allowed to be visible here, because that would
145   require the end-user code to #include <assert.h>.
146*/
147
148#define DO_CREQ_v_W(_creqF, _ty1F,_arg1F)                \
149   do {                                                  \
150      long int _arg1;                                    \
151      /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
152      _arg1 = (long int)(_arg1F);                        \
153      VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
154                                 (_creqF),               \
155                                 _arg1, 0,0,0,0);        \
156   } while (0)
157
158#define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \
159   do {                                                  \
160      long int _arg1;                                    \
161      /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
162      _arg1 = (long int)(_arg1F);                        \
163      _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(        \
164                                 (_dfltF),               \
165                                 (_creqF),               \
166                                 _arg1, 0,0,0,0);        \
167      _resF = _qzz_res;                                  \
168   } while (0)
169
170#define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \
171   do {                                                  \
172      long int _arg1, _arg2;                             \
173      /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
174      /* assert(sizeof(_ty2F) == sizeof(long int)); */   \
175      _arg1 = (long int)(_arg1F);                        \
176      _arg2 = (long int)(_arg2F);                        \
177      VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
178                                 (_creqF),               \
179                                 _arg1,_arg2,0,0,0);     \
180   } while (0)
181
182#define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F,              \
183                      _ty2F,_arg2F, _ty3F, _arg3F)       \
184   do {                                                  \
185      long int _arg1, _arg2, _arg3;                      \
186      /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
187      /* assert(sizeof(_ty2F) == sizeof(long int)); */   \
188      /* assert(sizeof(_ty3F) == sizeof(long int)); */   \
189      _arg1 = (long int)(_arg1F);                        \
190      _arg2 = (long int)(_arg2F);                        \
191      _arg3 = (long int)(_arg3F);                        \
192      VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
193                                 (_creqF),               \
194                                 _arg1,_arg2,_arg3,0,0); \
195   } while (0)
196
197#define DO_CREQ_W_WWW(_resF, _dfltF, _creqF, _ty1F,_arg1F, \
198                      _ty2F,_arg2F, _ty3F, _arg3F)       \
199   do {                                                  \
200      long int _qzz_res;                                 \
201      long int _arg1, _arg2, _arg3;                      \
202      /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
203      _arg1 = (long int)(_arg1F);                        \
204      _arg2 = (long int)(_arg2F);                        \
205      _arg3 = (long int)(_arg3F);                        \
206      _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(        \
207                                 (_dfltF),               \
208                                 (_creqF),               \
209                                 _arg1,_arg2,_arg3,0,0); \
210      _resF = _qzz_res;                                  \
211   } while (0)
212
213
214
215#define _HG_CLIENTREQ_UNIMP(_qzz_str)                    \
216   DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP,          \
217               (char*),(_qzz_str))
218
219
220/*----------------------------------------------------------------*/
221/*---                                                          ---*/
222/*--- Helgrind-native requests.  These allow access to         ---*/
223/*--- the same set of annotation primitives that are used      ---*/
224/*--- to build the POSIX pthread wrappers.                     ---*/
225/*---                                                          ---*/
226/*----------------------------------------------------------------*/
227
228/* ----------------------------------------------------------
229   For describing ordinary mutexes (non-rwlocks).  For rwlock
230   descriptions see ANNOTATE_RWLOCK_* below.
231   ---------------------------------------------------------- */
232
233/* Notify here immediately after mutex creation.  _mbRec == 0 for a
234   non-recursive mutex, 1 for a recursive mutex. */
235#define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec)          \
236   DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST,     \
237                void*,(_mutex), long,(_mbRec))
238
239/* Notify here immediately before mutex acquisition.  _isTryLock == 0
240   for a normal acquisition, 1 for a "try" style acquisition. */
241#define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock)       \
242   DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE,      \
243                void*,(_mutex), long,(_isTryLock))
244
245/* Notify here immediately after a successful mutex acquisition. */
246#define VALGRIND_HG_MUTEX_LOCK_POST(_mutex)                  \
247   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST,      \
248               void*,(_mutex))
249
250/* Notify here immediately before a mutex release. */
251#define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex)                 \
252   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,     \
253               void*,(_mutex))
254
255/* Notify here immediately after a mutex release. */
256#define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex)                \
257   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST,    \
258               void*,(_mutex))
259
260/* Notify here immediately before mutex destruction. */
261#define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex)                \
262   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE,    \
263               void*,(_mutex))
264
265/* ----------------------------------------------------------
266   For describing semaphores.
267   ---------------------------------------------------------- */
268
269/* Notify here immediately after semaphore creation. */
270#define VALGRIND_HG_SEM_INIT_POST(_sem, _value)              \
271   DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST,         \
272                void*, (_sem), unsigned long, (_value))
273
274/* Notify here immediately after a semaphore wait (an acquire-style
275   operation) */
276#define VALGRIND_HG_SEM_WAIT_POST(_sem)                      \
277   DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_WAIT_POST,          \
278               void*,(_sem))
279
280/* Notify here immediately before semaphore post (a release-style
281   operation) */
282#define VALGRIND_HG_SEM_POST_PRE(_sem)                       \
283   DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_POST_PRE,           \
284               void*,(_sem))
285
286/* Notify here immediately before semaphore destruction. */
287#define VALGRIND_HG_SEM_DESTROY_PRE(_sem)                    \
288   DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE,        \
289               void*, (_sem))
290
291/* ----------------------------------------------------------
292   For describing barriers.
293   ---------------------------------------------------------- */
294
295/* Notify here immediately before barrier creation.  _count is the
296   capacity.  _resizable == 0 means the barrier may not be resized, 1
297   means it may be. */
298#define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \
299   DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE,   \
300                 void*,(_bar),                               \
301                 unsigned long,(_count),                     \
302                 unsigned long,(_resizable))
303
304/* Notify here immediately before arrival at a barrier. */
305#define VALGRIND_HG_BARRIER_WAIT_PRE(_bar)                   \
306   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE,     \
307               void*,(_bar))
308
309/* Notify here immediately before a resize (change of barrier
310   capacity).  If _newcount >= the existing capacity, then there is no
311   change in the state of any threads waiting at the barrier.  If
312   _newcount < the existing capacity, and >= _newcount threads are
313   currently waiting at the barrier, then this notification is
314   considered to also have the effect of telling the checker that all
315   waiting threads have now moved past the barrier.  (I can't think of
316   any other sane semantics.) */
317#define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount)      \
318   DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE,  \
319                void*,(_bar),                                \
320                unsigned long,(_newcount))
321
322/* Notify here immediately before barrier destruction. */
323#define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar)                \
324   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE,  \
325               void*,(_bar))
326
327/* ----------------------------------------------------------
328   For describing memory ownership changes.
329   ---------------------------------------------------------- */
330
331/* Clean memory state.  This makes Helgrind forget everything it knew
332   about the specified memory range.  Effectively this announces that
333   the specified memory range now "belongs" to the calling thread, so
334   that: (1) the calling thread can access it safely without
335   synchronisation, and (2) all other threads must sync with this one
336   to access it safely.  This is particularly useful for memory
337   allocators that wish to recycle memory. */
338#define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len)       \
339   DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY,                 \
340                void*,(_qzz_start),                          \
341                unsigned long,(_qzz_len))
342
343/* The same, but for the heap block starting at _qzz_blockstart.  This
344   allows painting when we only know the address of an object, but not
345   its size, which is sometimes the case in C++ code involving
346   inheritance, and in which RTTI is not, for whatever reason,
347   available.  Returns the number of bytes painted, which can be zero
348   for a zero-sized block.  Hence, return values >= 0 indicate success
349   (the block was found), and the value -1 indicates block not
350   found, and -2 is returned when not running on Helgrind. */
351#define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart)  \
352   (__extension__                                            \
353   ({long int _npainted;                                     \
354     DO_CREQ_W_W(_npainted, (-2)/*default*/,                 \
355                 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK,     \
356                            void*,(_qzz_blockstart));        \
357     _npainted;                                              \
358   }))
359
360/* ----------------------------------------------------------
361   For error control.
362   ---------------------------------------------------------- */
363
364/* Tell H that an address range is not to be "tracked" until further
365   notice.  This puts it in the NOACCESS state, in which case we
366   ignore all reads and writes to it.  Useful for ignoring ranges of
367   memory where there might be races we don't want to see.  If the
368   memory is subsequently reallocated via malloc/new/stack allocation,
369   then it is put back in the trackable state.  Hence it is safe in
370   the situation where checking is disabled, the containing area is
371   deallocated and later reallocated for some other purpose. */
372#define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len)   \
373   DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED,       \
374                 void*,(_qzz_start),                         \
375                 unsigned long,(_qzz_len))
376
377/* And put it back into the normal "tracked" state, that is, make it
378   once again subject to the normal race-checking machinery.  This
379   puts it in the same state as new memory allocated by this thread --
380   that is, basically owned exclusively by this thread. */
381#define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len)    \
382   DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED,         \
383                 void*,(_qzz_start),                         \
384                 unsigned long,(_qzz_len))
385
386
387#define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len)    \
388   DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED,         \
389                 void*,(_qzz_start),                         \
390                 unsigned long,(_qzz_len))
391
392
393/*  Checks the accessibility bits for addresses [zza..zza+zznbytes-1].
394    If zzabits array is provided, copy the accessibility bits in zzabits.
395   Return values:
396     -2   if not running on helgrind
397     -1   if any parts of zzabits is not addressable
398     >= 0 : success.
399   When success, it returns the nr of addressable bytes found.
400      So, to check that a whole range is addressable, check
401         VALGRIND_HG_GET_ABITS(addr,NULL,len) == len
402      In addition, if you want to examine the addressability of each
403      byte of the range, you need to provide a non NULL ptr as
404      second argument, pointing to an array of unsigned char
405      of length len.
406      Addressable bytes are indicated with 0xff.
407      Non-addressable bytes are indicated with 0x00.
408*/
409#define VALGRIND_HG_GET_ABITS(zza,zzabits,zznbytes)          \
410   (__extension__                                            \
411   ({long int _res;                                          \
412      DO_CREQ_W_WWW(_res, (-2)/*default*/,                   \
413                    _VG_USERREQ__HG_GET_ABITS,               \
414                    void*,(zza), void*,(zzabits),            \
415                    unsigned long,(zznbytes));               \
416      _res;                                                  \
417   }))
418
419/*----------------------------------------------------------------*/
420/*---                                                          ---*/
421/*--- ThreadSanitizer-compatible requests                      ---*/
422/*--- (mostly unimplemented)                                   ---*/
423/*---                                                          ---*/
424/*----------------------------------------------------------------*/
425
426/* A quite-broad set of annotations, as used in the ThreadSanitizer
427   project.  This implementation aims to be a (source-level)
428   compatible implementation of the macros defined in:
429
430   http://code.google.com/p/data-race-test/source
431          /browse/trunk/dynamic_annotations/dynamic_annotations.h
432
433   (some of the comments below are taken from the above file)
434
435   The implementation here is very incomplete, and intended as a
436   starting point.  Many of the macros are unimplemented.  Rather than
437   allowing unimplemented macros to silently do nothing, they cause an
438   assertion.  Intention is to implement them on demand.
439
440   The major use of these macros is to make visible to race detectors,
441   the behaviour (effects) of user-implemented synchronisation
442   primitives, that the detectors could not otherwise deduce from the
443   normal observation of pthread etc calls.
444
445   Some of the macros are no-ops in Helgrind.  That's because Helgrind
446   is a pure happens-before detector, whereas ThreadSanitizer uses a
447   hybrid lockset and happens-before scheme, which requires more
448   accurate annotations for correct operation.
449
450   The macros are listed in the same order as in dynamic_annotations.h
451   (URL just above).
452
453   I should point out that I am less than clear about the intended
454   semantics of quite a number of them.  Comments and clarifications
455   welcomed!
456*/
457
458/* ----------------------------------------------------------------
459   These four allow description of user-level condition variables,
460   apparently in the style of POSIX's pthread_cond_t.  Currently
461   unimplemented and will assert.
462   ----------------------------------------------------------------
463*/
464/* Report that wait on the condition variable at address CV has
465   succeeded and the lock at address LOCK is now held.  CV and LOCK
466   are completely arbitrary memory addresses which presumably mean
467   something to the application, but are meaningless to Helgrind. */
468#define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
469   _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT")
470
471/* Report that wait on the condition variable at CV has succeeded.
472   Variant w/o lock. */
473#define ANNOTATE_CONDVAR_WAIT(cv) \
474   _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT")
475
476/* Report that we are about to signal on the condition variable at
477   address CV. */
478#define ANNOTATE_CONDVAR_SIGNAL(cv) \
479   _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL")
480
481/* Report that we are about to signal_all on the condition variable at
482   CV. */
483#define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
484   _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL")
485
486
487/* ----------------------------------------------------------------
488   Create completely arbitrary happens-before edges between threads.
489
490   If threads T1 .. Tn all do ANNOTATE_HAPPENS_BEFORE(obj) and later
491   (w.r.t. some notional global clock for the computation) thread Tm
492   does ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all
493   memory accesses done by T1 .. Tn before the ..BEFORE.. call as
494   happening-before all memory accesses done by Tm after the
495   ..AFTER.. call.  Hence Helgrind won't complain about races if Tm's
496   accesses afterwards are to the same locations as accesses before by
497   any of T1 .. Tn.
498
499   OBJ is a machine word (unsigned long, or void*), is completely
500   arbitrary, and denotes the identity of some synchronisation object
501   you're modelling.
502
503   You must do the _BEFORE call just before the real sync event on the
504   signaller's side, and _AFTER just after the real sync event on the
505   waiter's side.
506
507   If none of the rest of these macros make sense to you, at least
508   take the time to understand these two.  They form the very essence
509   of describing arbitrary inter-thread synchronisation events to
510   Helgrind.  You can get a long way just with them alone.
511
512   See also, extensive discussion on semantics of this in
513   https://bugs.kde.org/show_bug.cgi?id=243935
514
515   ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) is interim until such time
516   as bug 243935 is fully resolved.  It instructs Helgrind to forget
517   about any ANNOTATE_HAPPENS_BEFORE calls on the specified object, in
518   effect putting it back in its original state.  Once in that state,
519   a use of ANNOTATE_HAPPENS_AFTER on it has no effect on the calling
520   thread.
521
522   An implementation may optionally release resources it has
523   associated with 'obj' when ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj)
524   happens.  Users are recommended to use
525   ANNOTATE_HAPPENS_BEFORE_FORGET_ALL to indicate when a
526   synchronisation object is no longer needed, so as to avoid
527   potential indefinite resource leaks.
528   ----------------------------------------------------------------
529*/
530#define ANNOTATE_HAPPENS_BEFORE(obj) \
531   DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj))
532
533#define ANNOTATE_HAPPENS_AFTER(obj) \
534   DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj))
535
536#define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \
537   DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_FORGET_ALL, void*,(obj))
538
539/* ----------------------------------------------------------------
540   Memory publishing.  The TSan sources say:
541
542     Report that the bytes in the range [pointer, pointer+size) are about
543     to be published safely. The race checker will create a happens-before
544     arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
545     subsequent accesses to this memory.
546
547   I'm not sure I understand what this means exactly, nor whether it
548   is relevant for a pure h-b detector.  Leaving unimplemented for
549   now.
550   ----------------------------------------------------------------
551*/
552#define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
553   _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE")
554
555/* DEPRECATED. Don't use it. */
556/* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */
557
558/* DEPRECATED. Don't use it. */
559/* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */
560
561
562/* ----------------------------------------------------------------
563   TSan sources say:
564
565     Instruct the tool to create a happens-before arc between
566     MU->Unlock() and MU->Lock().  This annotation may slow down the
567     race detector; normally it is used only when it would be
568     difficult to annotate each of the mutex's critical sections
569     individually using the annotations above.
570
571   If MU is a posix pthread_mutex_t then Helgrind will do this anyway.
572   In any case, leave as unimp for now.  I'm unsure about the intended
573   behaviour.
574   ----------------------------------------------------------------
575*/
576#define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
577   _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX")
578
579/* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
580/* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */
581
582
583/* ----------------------------------------------------------------
584   TSan sources say:
585
586     Annotations useful when defining memory allocators, or when
587     memory that was protected in one way starts to be protected in
588     another.
589
590     Report that a new memory at "address" of size "size" has been
591     allocated.  This might be used when the memory has been retrieved
592     from a free list and is about to be reused, or when a the locking
593     discipline for a variable changes.
594
595   AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY.
596   ----------------------------------------------------------------
597*/
598#define ANNOTATE_NEW_MEMORY(address, size) \
599   VALGRIND_HG_CLEAN_MEMORY((address), (size))
600
601
602/* ----------------------------------------------------------------
603   TSan sources say:
604
605     Annotations useful when defining FIFO queues that transfer data
606     between threads.
607
608   All unimplemented.  Am not claiming to understand this (yet).
609   ----------------------------------------------------------------
610*/
611
612/* Report that the producer-consumer queue object at address PCQ has
613   been created.  The ANNOTATE_PCQ_* annotations should be used only
614   for FIFO queues.  For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE
615   (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
616#define ANNOTATE_PCQ_CREATE(pcq) \
617   _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE")
618
619/* Report that the queue at address PCQ is about to be destroyed. */
620#define ANNOTATE_PCQ_DESTROY(pcq) \
621   _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY")
622
623/* Report that we are about to put an element into a FIFO queue at
624   address PCQ. */
625#define ANNOTATE_PCQ_PUT(pcq) \
626   _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT")
627
628/* Report that we've just got an element from a FIFO queue at address
629   PCQ. */
630#define ANNOTATE_PCQ_GET(pcq) \
631   _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET")
632
633
634/* ----------------------------------------------------------------
635   Annotations that suppress errors.  It is usually better to express
636   the program's synchronization using the other annotations, but
637   these can be used when all else fails.
638
639   Currently these are all unimplemented.  I can't think of a simple
640   way to implement them without at least some performance overhead.
641   ----------------------------------------------------------------
642*/
643
644/* Report that we may have a benign race at "pointer", with size
645   "sizeof(*(pointer))". "pointer" must be a non-void* pointer.  Insert at the
646   point where "pointer" has been allocated, preferably close to the point
647   where the race happens.  See also ANNOTATE_BENIGN_RACE_STATIC.
648
649   XXX: what's this actually supposed to do?  And what's the type of
650   DESCRIPTION?  When does the annotation stop having an effect?
651*/
652#define ANNOTATE_BENIGN_RACE(pointer, description) \
653   _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE")
654
655/* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
656   the memory range [address, address+size). */
657#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
658   VALGRIND_HG_DISABLE_CHECKING(address, size)
659
660/* Request the analysis tool to ignore all reads in the current thread
661   until ANNOTATE_IGNORE_READS_END is called.  Useful to ignore
662   intentional racey reads, while still checking other reads and all
663   writes. */
664#define ANNOTATE_IGNORE_READS_BEGIN() \
665   _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN")
666
667/* Stop ignoring reads. */
668#define ANNOTATE_IGNORE_READS_END() \
669   _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END")
670
671/* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
672#define ANNOTATE_IGNORE_WRITES_BEGIN() \
673   _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN")
674
675/* Stop ignoring writes. */
676#define ANNOTATE_IGNORE_WRITES_END() \
677   _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END")
678
679/* Start ignoring all memory accesses (reads and writes). */
680#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
681   do { \
682      ANNOTATE_IGNORE_READS_BEGIN(); \
683      ANNOTATE_IGNORE_WRITES_BEGIN(); \
684   } while (0)
685
686/* Stop ignoring all memory accesses. */
687#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
688   do { \
689      ANNOTATE_IGNORE_WRITES_END(); \
690      ANNOTATE_IGNORE_READS_END(); \
691   } while (0)
692
693
694/* ----------------------------------------------------------------
695   Annotations useful for debugging.
696
697   Again, so for unimplemented, partly for performance reasons.
698   ----------------------------------------------------------------
699*/
700
701/* Request to trace every access to ADDRESS. */
702#define ANNOTATE_TRACE_MEMORY(address) \
703   _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY")
704
705/* Report the current thread name to a race detector. */
706#define ANNOTATE_THREAD_NAME(name) \
707   _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME")
708
709
710/* ----------------------------------------------------------------
711   Annotations for describing behaviour of user-implemented lock
712   primitives.  In all cases, the LOCK argument is a completely
713   arbitrary machine word (unsigned long, or void*) and can be any
714   value which gives a unique identity to the lock objects being
715   modelled.
716
717   We just pretend they're ordinary posix rwlocks.  That'll probably
718   give some rather confusing wording in error messages, claiming that
719   the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact
720   they are not.  Ah well.
721   ----------------------------------------------------------------
722*/
723/* Report that a lock has just been created at address LOCK. */
724#define ANNOTATE_RWLOCK_CREATE(lock)                         \
725   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST,     \
726               void*,(lock))
727
728/* Report that the lock at address LOCK is about to be destroyed. */
729#define ANNOTATE_RWLOCK_DESTROY(lock)                        \
730   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE,   \
731               void*,(lock))
732
733/* Report that the lock at address LOCK has just been acquired.
734   is_w=1 for writer lock, is_w=0 for reader lock. */
735#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)                 \
736  DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST,     \
737               void*,(lock), unsigned long,(is_w))
738
739/* Report that the lock at address LOCK is about to be released. */
740#define ANNOTATE_RWLOCK_RELEASED(lock, is_w)                 \
741  DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE,     \
742              void*,(lock)) /* is_w is ignored */
743
744
745/* -------------------------------------------------------------
746   Annotations useful when implementing barriers.  They are not
747   normally needed by modules that merely use barriers.
748   The "barrier" argument is a pointer to the barrier object.
749   ----------------------------------------------------------------
750*/
751
752/* Report that the "barrier" has been initialized with initial
753   "count".  If 'reinitialization_allowed' is true, initialization is
754   allowed to happen multiple times w/o calling barrier_destroy() */
755#define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
756   _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT")
757
758/* Report that we are about to enter barrier_wait("barrier"). */
759#define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
760   _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
761
762/* Report that we just exited barrier_wait("barrier"). */
763#define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
764   _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
765
766/* Report that the "barrier" has been destroyed. */
767#define ANNOTATE_BARRIER_DESTROY(barrier) \
768   _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
769
770
771/* ----------------------------------------------------------------
772   Annotations useful for testing race detectors.
773   ----------------------------------------------------------------
774*/
775
776/* Report that we expect a race on the variable at ADDRESS.  Use only
777   in unit tests for a race detector. */
778#define ANNOTATE_EXPECT_RACE(address, description) \
779   _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE")
780
781/* A no-op. Insert where you like to test the interceptors. */
782#define ANNOTATE_NO_OP(arg) \
783   _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP")
784
785/* Force the race detector to flush its state. The actual effect depends on
786 * the implementation of the detector. */
787#define ANNOTATE_FLUSH_STATE() \
788   _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE")
789
790#endif /* __HELGRIND_H */
791