1/*
2   ----------------------------------------------------------------
3
4   Notice that the above BSD-style license applies to this one file
5   (helgrind.h) only.  The entire rest of Valgrind is licensed under
6   the terms of the GNU General Public License, version 2.  See the
7   COPYING file in the source distribution for details.
8
9   ----------------------------------------------------------------
10
11   This file is part of Helgrind, a Valgrind tool for detecting errors
12   in threaded programs.
13
14   Copyright (C) 2007-2015 OpenWorks LLP
15      info@open-works.co.uk
16
17   Redistribution and use in source and binary forms, with or without
18   modification, are permitted provided that the following conditions
19   are met:
20
21   1. Redistributions of source code must retain the above copyright
22      notice, this list of conditions and the following disclaimer.
23
24   2. The origin of this software must not be misrepresented; you must
25      not claim that you wrote the original software.  If you use this
26      software in a product, an acknowledgment in the product
27      documentation would be appreciated but is not required.
28
29   3. Altered source versions must be plainly marked as such, and must
30      not be misrepresented as being the original software.
31
32   4. The name of the author may not be used to endorse or promote
33      products derived from this software without specific prior written
34      permission.
35
36   THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
37   OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
38   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39   ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
40   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41   DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
42   GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
43   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
44   WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
45   NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
46   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47
48   ----------------------------------------------------------------
49
50   Notice that the above BSD-style license applies to this one file
51   (helgrind.h) only.  The entire rest of Valgrind is licensed under
52   the terms of the GNU General Public License, version 2.  See the
53   COPYING file in the source distribution for details.
54
55   ----------------------------------------------------------------
56*/
57
58#ifndef __HELGRIND_H
59#define __HELGRIND_H
60
61#include "valgrind.h"
62
63/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
64   This enum comprises an ABI exported by Valgrind to programs
65   which use client requests.  DO NOT CHANGE THE ORDER OF THESE
66   ENTRIES, NOR DELETE ANY -- add new ones at the end. */
67typedef
68   enum {
69      VG_USERREQ__HG_CLEAN_MEMORY = VG_USERREQ_TOOL_BASE('H','G'),
70
71      /* The rest are for Helgrind's internal use.  Not for end-user
72         use.  Do not use them unless you are a Valgrind developer. */
73
74      /* Notify the tool what this thread's pthread_t is. */
75      _VG_USERREQ__HG_SET_MY_PTHREAD_T = VG_USERREQ_TOOL_BASE('H','G')
76                                         + 256,
77      _VG_USERREQ__HG_PTH_API_ERROR,              /* char*, int */
78      _VG_USERREQ__HG_PTHREAD_JOIN_POST,          /* pthread_t of quitter */
79      _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST,    /* pth_mx_t*, long mbRec */
80      _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE,  /* pth_mx_t*, long isInit */
81      _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,   /* pth_mx_t* */
82      _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST,  /* pth_mx_t* */
83      _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE,  /* void*, long isTryLock */
84      _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST, /* void* */
85      _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE,    /* pth_cond_t* */
86      _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE, /* pth_cond_t* */
87      _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE,     /* pth_cond_t*, pth_mx_t* */
88      _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST,    /* pth_cond_t*, pth_mx_t* */
89      _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE,   /* pth_cond_t*, long isInit */
90      _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST,   /* pth_rwlk_t* */
91      _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE, /* pth_rwlk_t* */
92      _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE,    /* pth_rwlk_t*, long isW */
93      _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED,    /* void*, long isW */
94      _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED,    /* void* */
95      _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST, /* pth_rwlk_t* */
96      _VG_USERREQ__HG_POSIX_SEM_INIT_POST,        /* sem_t*, ulong value */
97      _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE,      /* sem_t* */
98      _VG_USERREQ__HG_POSIX_SEM_RELEASED,         /* void* */
99      _VG_USERREQ__HG_POSIX_SEM_ACQUIRED,         /* void* */
100      _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE,   /* pth_bar_t*, ulong, ulong */
101      _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE,   /* pth_bar_t* */
102      _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE, /* pth_bar_t* */
103      _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE,  /* pth_slk_t* */
104      _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST, /* pth_slk_t* */
105      _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE,      /* pth_slk_t* */
106      _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST,     /* pth_slk_t* */
107      _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE,   /* pth_slk_t* */
108      _VG_USERREQ__HG_CLIENTREQ_UNIMP,            /* char* */
109      _VG_USERREQ__HG_USERSO_SEND_PRE,        /* arbitrary UWord SO-tag */
110      _VG_USERREQ__HG_USERSO_RECV_POST,       /* arbitrary UWord SO-tag */
111      _VG_USERREQ__HG_USERSO_FORGET_ALL,      /* arbitrary UWord SO-tag */
112      _VG_USERREQ__HG_RESERVED2,              /* Do not use */
113      _VG_USERREQ__HG_RESERVED3,              /* Do not use */
114      _VG_USERREQ__HG_RESERVED4,              /* Do not use */
115      _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED, /* Addr a, ulong len */
116      _VG_USERREQ__HG_ARANGE_MAKE_TRACKED,   /* Addr a, ulong len */
117      _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE, /* pth_bar_t*, ulong */
118      _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK, /* Addr start_of_block */
119      _VG_USERREQ__HG_PTHREAD_COND_INIT_POST,  /* pth_cond_t*, pth_cond_attr_t*/
120      _VG_USERREQ__HG_GNAT_MASTER_HOOK,       /* void*d,void*m,Word ml */
121      _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK, /* void*s,Word ml */
122      _VG_USERREQ__HG_GET_ABITS,              /* Addr a,Addr abits, ulong len */
123      _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN,
124      _VG_USERREQ__HG_PTHREAD_CREATE_END,
125      _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE,     /* pth_mx_t*,long isTryLock */
126      _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST,    /* pth_mx_t *,long tookLock */
127      _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST,  /* pth_rwlk_t*,long isW,long */
128      _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE,  /* pth_rwlk_t* */
129      _VG_USERREQ__HG_POSIX_SEM_POST_PRE,         /* sem_t* */
130      _VG_USERREQ__HG_POSIX_SEM_POST_POST,        /* sem_t* */
131      _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE,         /* sem_t* */
132      _VG_USERREQ__HG_POSIX_SEM_WAIT_POST,        /* sem_t*, long tookLock */
133      _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST,   /* pth_cond_t* */
134      _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST,/* pth_cond_t* */
135      _VG_USERREQ__HG_RTLD_BIND_GUARD,            /* int flags */
136      _VG_USERREQ__HG_RTLD_BIND_CLEAR             /* int flags */
137   } Vg_TCheckClientRequest;
138
139
140/*----------------------------------------------------------------*/
141/*---                                                          ---*/
142/*--- Implementation-only facilities.  Not for end-user use.   ---*/
143/*--- For end-user facilities see below (the next section in   ---*/
144/*--- this file.)                                              ---*/
145/*---                                                          ---*/
146/*----------------------------------------------------------------*/
147
148/* Do a client request.  These are macros rather than a functions so
149   as to avoid having an extra frame in stack traces.
150
151   NB: these duplicate definitions in hg_intercepts.c.  But here, we
152   have to make do with weaker typing (no definition of Word etc) and
153   no assertions, whereas in helgrind.h we can use those facilities.
154   Obviously it's important the two sets of definitions are kept in
155   sync.
156
157   The commented-out asserts should actually hold, but unfortunately
158   they can't be allowed to be visible here, because that would
159   require the end-user code to #include <assert.h>.
160*/
161
162#define DO_CREQ_v_W(_creqF, _ty1F,_arg1F)                \
163   do {                                                  \
164      long int _arg1;                                    \
165      /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
166      _arg1 = (long int)(_arg1F);                        \
167      VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
168                                 (_creqF),               \
169                                 _arg1, 0,0,0,0);        \
170   } while (0)
171
172#define DO_CREQ_W_W(_resF, _dfltF, _creqF, _ty1F,_arg1F) \
173   do {                                                  \
174      long int _arg1;                                    \
175      /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
176      _arg1 = (long int)(_arg1F);                        \
177      _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(        \
178                                 (_dfltF),               \
179                                 (_creqF),               \
180                                 _arg1, 0,0,0,0);        \
181      _resF = _qzz_res;                                  \
182   } while (0)
183
184#define DO_CREQ_v_WW(_creqF, _ty1F,_arg1F, _ty2F,_arg2F) \
185   do {                                                  \
186      long int _arg1, _arg2;                             \
187      /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
188      /* assert(sizeof(_ty2F) == sizeof(long int)); */   \
189      _arg1 = (long int)(_arg1F);                        \
190      _arg2 = (long int)(_arg2F);                        \
191      VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
192                                 (_creqF),               \
193                                 _arg1,_arg2,0,0,0);     \
194   } while (0)
195
196#define DO_CREQ_v_WWW(_creqF, _ty1F,_arg1F,              \
197                      _ty2F,_arg2F, _ty3F, _arg3F)       \
198   do {                                                  \
199      long int _arg1, _arg2, _arg3;                      \
200      /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
201      /* assert(sizeof(_ty2F) == sizeof(long int)); */   \
202      /* assert(sizeof(_ty3F) == sizeof(long int)); */   \
203      _arg1 = (long int)(_arg1F);                        \
204      _arg2 = (long int)(_arg2F);                        \
205      _arg3 = (long int)(_arg3F);                        \
206      VALGRIND_DO_CLIENT_REQUEST_STMT(                   \
207                                 (_creqF),               \
208                                 _arg1,_arg2,_arg3,0,0); \
209   } while (0)
210
211#define DO_CREQ_W_WWW(_resF, _dfltF, _creqF, _ty1F,_arg1F, \
212                      _ty2F,_arg2F, _ty3F, _arg3F)       \
213   do {                                                  \
214      long int _qzz_res;                                 \
215      long int _arg1, _arg2, _arg3;                      \
216      /* assert(sizeof(_ty1F) == sizeof(long int)); */   \
217      _arg1 = (long int)(_arg1F);                        \
218      _arg2 = (long int)(_arg2F);                        \
219      _arg3 = (long int)(_arg3F);                        \
220      _qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(        \
221                                 (_dfltF),               \
222                                 (_creqF),               \
223                                 _arg1,_arg2,_arg3,0,0); \
224      _resF = _qzz_res;                                  \
225   } while (0)
226
227
228
229#define _HG_CLIENTREQ_UNIMP(_qzz_str)                    \
230   DO_CREQ_v_W(_VG_USERREQ__HG_CLIENTREQ_UNIMP,          \
231               (char*),(_qzz_str))
232
233
234/*----------------------------------------------------------------*/
235/*---                                                          ---*/
236/*--- Helgrind-native requests.  These allow access to         ---*/
237/*--- the same set of annotation primitives that are used      ---*/
238/*--- to build the POSIX pthread wrappers.                     ---*/
239/*---                                                          ---*/
240/*----------------------------------------------------------------*/
241
242/* ----------------------------------------------------------
243   For describing ordinary mutexes (non-rwlocks).  For rwlock
244   descriptions see ANNOTATE_RWLOCK_* below.
245   ---------------------------------------------------------- */
246
247/* Notify here immediately after mutex creation.  _mbRec == 0 for a
248   non-recursive mutex, 1 for a recursive mutex. */
249#define VALGRIND_HG_MUTEX_INIT_POST(_mutex, _mbRec)          \
250   DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST,     \
251                void*,(_mutex), long,(_mbRec))
252
253/* Notify here immediately before mutex acquisition.  _isTryLock == 0
254   for a normal acquisition, 1 for a "try" style acquisition. */
255#define VALGRIND_HG_MUTEX_LOCK_PRE(_mutex, _isTryLock)       \
256   DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE,   \
257                void*,(_mutex), long,(_isTryLock))
258
259/* Notify here immediately after a successful mutex acquisition. */
260#define VALGRIND_HG_MUTEX_LOCK_POST(_mutex)                  \
261   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST,   \
262               void*,(_mutex))
263
264/* Notify here immediately before a mutex release. */
265#define VALGRIND_HG_MUTEX_UNLOCK_PRE(_mutex)                 \
266   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE,     \
267               void*,(_mutex))
268
269/* Notify here immediately after a mutex release. */
270#define VALGRIND_HG_MUTEX_UNLOCK_POST(_mutex)                \
271   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST,    \
272               void*,(_mutex))
273
274/* Notify here immediately before mutex destruction. */
275#define VALGRIND_HG_MUTEX_DESTROY_PRE(_mutex)                \
276   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE,    \
277               void*,(_mutex))
278
279/* ----------------------------------------------------------
280   For describing semaphores.
281   ---------------------------------------------------------- */
282
283/* Notify here immediately after semaphore creation. */
284#define VALGRIND_HG_SEM_INIT_POST(_sem, _value)              \
285   DO_CREQ_v_WW(_VG_USERREQ__HG_POSIX_SEM_INIT_POST,         \
286                void*, (_sem), unsigned long, (_value))
287
288/* Notify here immediately after a semaphore wait (an acquire-style
289   operation) */
290#define VALGRIND_HG_SEM_WAIT_POST(_sem)                      \
291   DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_ACQUIRED,           \
292               void*,(_sem))
293
294/* Notify here immediately before semaphore post (a release-style
295   operation) */
296#define VALGRIND_HG_SEM_POST_PRE(_sem)                       \
297   DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_RELEASED,           \
298               void*,(_sem))
299
300/* Notify here immediately before semaphore destruction. */
301#define VALGRIND_HG_SEM_DESTROY_PRE(_sem)                    \
302   DO_CREQ_v_W(_VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE,        \
303               void*, (_sem))
304
305/* ----------------------------------------------------------
306   For describing barriers.
307   ---------------------------------------------------------- */
308
309/* Notify here immediately before barrier creation.  _count is the
310   capacity.  _resizable == 0 means the barrier may not be resized, 1
311   means it may be. */
312#define VALGRIND_HG_BARRIER_INIT_PRE(_bar, _count, _resizable) \
313   DO_CREQ_v_WWW(_VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE,   \
314                 void*,(_bar),                               \
315                 unsigned long,(_count),                     \
316                 unsigned long,(_resizable))
317
318/* Notify here immediately before arrival at a barrier. */
319#define VALGRIND_HG_BARRIER_WAIT_PRE(_bar)                   \
320   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE,     \
321               void*,(_bar))
322
323/* Notify here immediately before a resize (change of barrier
324   capacity).  If _newcount >= the existing capacity, then there is no
325   change in the state of any threads waiting at the barrier.  If
326   _newcount < the existing capacity, and >= _newcount threads are
327   currently waiting at the barrier, then this notification is
328   considered to also have the effect of telling the checker that all
329   waiting threads have now moved past the barrier.  (I can't think of
330   any other sane semantics.) */
331#define VALGRIND_HG_BARRIER_RESIZE_PRE(_bar, _newcount)      \
332   DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE,  \
333                void*,(_bar),                                \
334                unsigned long,(_newcount))
335
336/* Notify here immediately before barrier destruction. */
337#define VALGRIND_HG_BARRIER_DESTROY_PRE(_bar)                \
338   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE,  \
339               void*,(_bar))
340
341/* ----------------------------------------------------------
342   For describing memory ownership changes.
343   ---------------------------------------------------------- */
344
345/* Clean memory state.  This makes Helgrind forget everything it knew
346   about the specified memory range.  Effectively this announces that
347   the specified memory range now "belongs" to the calling thread, so
348   that: (1) the calling thread can access it safely without
349   synchronisation, and (2) all other threads must sync with this one
350   to access it safely.  This is particularly useful for memory
351   allocators that wish to recycle memory. */
352#define VALGRIND_HG_CLEAN_MEMORY(_qzz_start, _qzz_len)       \
353   DO_CREQ_v_WW(VG_USERREQ__HG_CLEAN_MEMORY,                 \
354                void*,(_qzz_start),                          \
355                unsigned long,(_qzz_len))
356
357/* The same, but for the heap block starting at _qzz_blockstart.  This
358   allows painting when we only know the address of an object, but not
359   its size, which is sometimes the case in C++ code involving
360   inheritance, and in which RTTI is not, for whatever reason,
361   available.  Returns the number of bytes painted, which can be zero
362   for a zero-sized block.  Hence, return values >= 0 indicate success
363   (the block was found), and the value -1 indicates block not
364   found, and -2 is returned when not running on Helgrind. */
365#define VALGRIND_HG_CLEAN_MEMORY_HEAPBLOCK(_qzz_blockstart)  \
366   (__extension__                                            \
367   ({long int _npainted;                                     \
368     DO_CREQ_W_W(_npainted, (-2)/*default*/,                 \
369                 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK,     \
370                            void*,(_qzz_blockstart));        \
371     _npainted;                                              \
372   }))
373
374/* ----------------------------------------------------------
375   For error control.
376   ---------------------------------------------------------- */
377
378/* Tell H that an address range is not to be "tracked" until further
379   notice.  This puts it in the NOACCESS state, in which case we
380   ignore all reads and writes to it.  Useful for ignoring ranges of
381   memory where there might be races we don't want to see.  If the
382   memory is subsequently reallocated via malloc/new/stack allocation,
383   then it is put back in the trackable state.  Hence it is safe in
384   the situation where checking is disabled, the containing area is
385   deallocated and later reallocated for some other purpose. */
386#define VALGRIND_HG_DISABLE_CHECKING(_qzz_start, _qzz_len)   \
387   DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED,       \
388                 void*,(_qzz_start),                         \
389                 unsigned long,(_qzz_len))
390
391/* And put it back into the normal "tracked" state, that is, make it
392   once again subject to the normal race-checking machinery.  This
393   puts it in the same state as new memory allocated by this thread --
394   that is, basically owned exclusively by this thread. */
395#define VALGRIND_HG_ENABLE_CHECKING(_qzz_start, _qzz_len)    \
396   DO_CREQ_v_WW(_VG_USERREQ__HG_ARANGE_MAKE_TRACKED,         \
397                 void*,(_qzz_start),                         \
398                 unsigned long,(_qzz_len))
399
400
401/*  Checks the accessibility bits for addresses [zza..zza+zznbytes-1].
402    If zzabits array is provided, copy the accessibility bits in zzabits.
403   Return values:
404     -2   if not running on helgrind
405     -1   if any parts of zzabits is not addressable
406     >= 0 : success.
407   When success, it returns the nr of addressable bytes found.
408      So, to check that a whole range is addressable, check
409         VALGRIND_HG_GET_ABITS(addr,NULL,len) == len
410      In addition, if you want to examine the addressability of each
411      byte of the range, you need to provide a non NULL ptr as
412      second argument, pointing to an array of unsigned char
413      of length len.
414      Addressable bytes are indicated with 0xff.
415      Non-addressable bytes are indicated with 0x00.
416*/
417#define VALGRIND_HG_GET_ABITS(zza,zzabits,zznbytes)          \
418   (__extension__                                            \
419   ({long int _res;                                          \
420      DO_CREQ_W_WWW(_res, (-2)/*default*/,                   \
421                    _VG_USERREQ__HG_GET_ABITS,               \
422                    void*,(zza), void*,(zzabits),            \
423                    unsigned long,(zznbytes));               \
424      _res;                                                  \
425   }))
426
427/*----------------------------------------------------------------*/
428/*---                                                          ---*/
429/*--- ThreadSanitizer-compatible requests                      ---*/
430/*--- (mostly unimplemented)                                   ---*/
431/*---                                                          ---*/
432/*----------------------------------------------------------------*/
433
434/* A quite-broad set of annotations, as used in the ThreadSanitizer
435   project.  This implementation aims to be a (source-level)
436   compatible implementation of the macros defined in:
437
438   http://code.google.com/p/data-race-test/source
439          /browse/trunk/dynamic_annotations/dynamic_annotations.h
440
441   (some of the comments below are taken from the above file)
442
443   The implementation here is very incomplete, and intended as a
444   starting point.  Many of the macros are unimplemented.  Rather than
445   allowing unimplemented macros to silently do nothing, they cause an
446   assertion.  Intention is to implement them on demand.
447
448   The major use of these macros is to make visible to race detectors,
449   the behaviour (effects) of user-implemented synchronisation
450   primitives, that the detectors could not otherwise deduce from the
451   normal observation of pthread etc calls.
452
453   Some of the macros are no-ops in Helgrind.  That's because Helgrind
454   is a pure happens-before detector, whereas ThreadSanitizer uses a
455   hybrid lockset and happens-before scheme, which requires more
456   accurate annotations for correct operation.
457
458   The macros are listed in the same order as in dynamic_annotations.h
459   (URL just above).
460
461   I should point out that I am less than clear about the intended
462   semantics of quite a number of them.  Comments and clarifications
463   welcomed!
464*/
465
466/* ----------------------------------------------------------------
467   These four allow description of user-level condition variables,
468   apparently in the style of POSIX's pthread_cond_t.  Currently
469   unimplemented and will assert.
470   ----------------------------------------------------------------
471*/
472/* Report that wait on the condition variable at address CV has
473   succeeded and the lock at address LOCK is now held.  CV and LOCK
474   are completely arbitrary memory addresses which presumably mean
475   something to the application, but are meaningless to Helgrind. */
476#define ANNOTATE_CONDVAR_LOCK_WAIT(cv, lock) \
477   _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_LOCK_WAIT")
478
479/* Report that wait on the condition variable at CV has succeeded.
480   Variant w/o lock. */
481#define ANNOTATE_CONDVAR_WAIT(cv) \
482   _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_WAIT")
483
484/* Report that we are about to signal on the condition variable at
485   address CV. */
486#define ANNOTATE_CONDVAR_SIGNAL(cv) \
487   _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL")
488
489/* Report that we are about to signal_all on the condition variable at
490   CV. */
491#define ANNOTATE_CONDVAR_SIGNAL_ALL(cv) \
492   _HG_CLIENTREQ_UNIMP("ANNOTATE_CONDVAR_SIGNAL_ALL")
493
494
495/* ----------------------------------------------------------------
496   Create completely arbitrary happens-before edges between threads.
497
498   If threads T1 .. Tn all do ANNOTATE_HAPPENS_BEFORE(obj) and later
499   (w.r.t. some notional global clock for the computation) thread Tm
500   does ANNOTATE_HAPPENS_AFTER(obj), then Helgrind will regard all
501   memory accesses done by T1 .. Tn before the ..BEFORE.. call as
502   happening-before all memory accesses done by Tm after the
503   ..AFTER.. call.  Hence Helgrind won't complain about races if Tm's
504   accesses afterwards are to the same locations as accesses before by
505   any of T1 .. Tn.
506
507   OBJ is a machine word (unsigned long, or void*), is completely
508   arbitrary, and denotes the identity of some synchronisation object
509   you're modelling.
510
511   You must do the _BEFORE call just before the real sync event on the
512   signaller's side, and _AFTER just after the real sync event on the
513   waiter's side.
514
515   If none of the rest of these macros make sense to you, at least
516   take the time to understand these two.  They form the very essence
517   of describing arbitrary inter-thread synchronisation events to
518   Helgrind.  You can get a long way just with them alone.
519
520   See also, extensive discussion on semantics of this in
521   https://bugs.kde.org/show_bug.cgi?id=243935
522
523   ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) is interim until such time
524   as bug 243935 is fully resolved.  It instructs Helgrind to forget
525   about any ANNOTATE_HAPPENS_BEFORE calls on the specified object, in
526   effect putting it back in its original state.  Once in that state,
527   a use of ANNOTATE_HAPPENS_AFTER on it has no effect on the calling
528   thread.
529
530   An implementation may optionally release resources it has
531   associated with 'obj' when ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj)
532   happens.  Users are recommended to use
533   ANNOTATE_HAPPENS_BEFORE_FORGET_ALL to indicate when a
534   synchronisation object is no longer needed, so as to avoid
535   potential indefinite resource leaks.
536   ----------------------------------------------------------------
537*/
538#define ANNOTATE_HAPPENS_BEFORE(obj) \
539   DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_SEND_PRE, void*,(obj))
540
541#define ANNOTATE_HAPPENS_AFTER(obj) \
542   DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_RECV_POST, void*,(obj))
543
544#define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(obj) \
545   DO_CREQ_v_W(_VG_USERREQ__HG_USERSO_FORGET_ALL, void*,(obj))
546
547/* ----------------------------------------------------------------
548   Memory publishing.  The TSan sources say:
549
550     Report that the bytes in the range [pointer, pointer+size) are about
551     to be published safely. The race checker will create a happens-before
552     arc from the call ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) to
553     subsequent accesses to this memory.
554
555   I'm not sure I understand what this means exactly, nor whether it
556   is relevant for a pure h-b detector.  Leaving unimplemented for
557   now.
558   ----------------------------------------------------------------
559*/
560#define ANNOTATE_PUBLISH_MEMORY_RANGE(pointer, size) \
561   _HG_CLIENTREQ_UNIMP("ANNOTATE_PUBLISH_MEMORY_RANGE")
562
563/* DEPRECATED. Don't use it. */
564/* #define ANNOTATE_UNPUBLISH_MEMORY_RANGE(pointer, size) */
565
566/* DEPRECATED. Don't use it. */
567/* #define ANNOTATE_SWAP_MEMORY_RANGE(pointer, size) */
568
569
570/* ----------------------------------------------------------------
571   TSan sources say:
572
573     Instruct the tool to create a happens-before arc between
574     MU->Unlock() and MU->Lock().  This annotation may slow down the
575     race detector; normally it is used only when it would be
576     difficult to annotate each of the mutex's critical sections
577     individually using the annotations above.
578
579   If MU is a posix pthread_mutex_t then Helgrind will do this anyway.
580   In any case, leave as unimp for now.  I'm unsure about the intended
581   behaviour.
582   ----------------------------------------------------------------
583*/
584#define ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(mu) \
585   _HG_CLIENTREQ_UNIMP("ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX")
586
587/* Deprecated. Use ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX. */
588/* #define ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(mu) */
589
590
591/* ----------------------------------------------------------------
592   TSan sources say:
593
594     Annotations useful when defining memory allocators, or when
595     memory that was protected in one way starts to be protected in
596     another.
597
598     Report that a new memory at "address" of size "size" has been
599     allocated.  This might be used when the memory has been retrieved
600     from a free list and is about to be reused, or when a the locking
601     discipline for a variable changes.
602
603   AFAICS this is the same as VALGRIND_HG_CLEAN_MEMORY.
604   ----------------------------------------------------------------
605*/
606#define ANNOTATE_NEW_MEMORY(address, size) \
607   VALGRIND_HG_CLEAN_MEMORY((address), (size))
608
609
610/* ----------------------------------------------------------------
611   TSan sources say:
612
613     Annotations useful when defining FIFO queues that transfer data
614     between threads.
615
616   All unimplemented.  Am not claiming to understand this (yet).
617   ----------------------------------------------------------------
618*/
619
620/* Report that the producer-consumer queue object at address PCQ has
621   been created.  The ANNOTATE_PCQ_* annotations should be used only
622   for FIFO queues.  For non-FIFO queues use ANNOTATE_HAPPENS_BEFORE
623   (for put) and ANNOTATE_HAPPENS_AFTER (for get). */
624#define ANNOTATE_PCQ_CREATE(pcq) \
625   _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_CREATE")
626
627/* Report that the queue at address PCQ is about to be destroyed. */
628#define ANNOTATE_PCQ_DESTROY(pcq) \
629   _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_DESTROY")
630
631/* Report that we are about to put an element into a FIFO queue at
632   address PCQ. */
633#define ANNOTATE_PCQ_PUT(pcq) \
634   _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_PUT")
635
636/* Report that we've just got an element from a FIFO queue at address
637   PCQ. */
638#define ANNOTATE_PCQ_GET(pcq) \
639   _HG_CLIENTREQ_UNIMP("ANNOTATE_PCQ_GET")
640
641
642/* ----------------------------------------------------------------
643   Annotations that suppress errors.  It is usually better to express
644   the program's synchronization using the other annotations, but
645   these can be used when all else fails.
646
647   Currently these are all unimplemented.  I can't think of a simple
648   way to implement them without at least some performance overhead.
649   ----------------------------------------------------------------
650*/
651
652/* Report that we may have a benign race at "pointer", with size
653   "sizeof(*(pointer))". "pointer" must be a non-void* pointer.  Insert at the
654   point where "pointer" has been allocated, preferably close to the point
655   where the race happens.  See also ANNOTATE_BENIGN_RACE_STATIC.
656
657   XXX: what's this actually supposed to do?  And what's the type of
658   DESCRIPTION?  When does the annotation stop having an effect?
659*/
660#define ANNOTATE_BENIGN_RACE(pointer, description) \
661   _HG_CLIENTREQ_UNIMP("ANNOTATE_BENIGN_RACE")
662
663/* Same as ANNOTATE_BENIGN_RACE(address, description), but applies to
664   the memory range [address, address+size). */
665#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
666   VALGRIND_HG_DISABLE_CHECKING(address, size)
667
668/* Request the analysis tool to ignore all reads in the current thread
669   until ANNOTATE_IGNORE_READS_END is called.  Useful to ignore
670   intentional racey reads, while still checking other reads and all
671   writes. */
672#define ANNOTATE_IGNORE_READS_BEGIN() \
673   _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_BEGIN")
674
675/* Stop ignoring reads. */
676#define ANNOTATE_IGNORE_READS_END() \
677   _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_READS_END")
678
679/* Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes. */
680#define ANNOTATE_IGNORE_WRITES_BEGIN() \
681   _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_BEGIN")
682
683/* Stop ignoring writes. */
684#define ANNOTATE_IGNORE_WRITES_END() \
685   _HG_CLIENTREQ_UNIMP("ANNOTATE_IGNORE_WRITES_END")
686
687/* Start ignoring all memory accesses (reads and writes). */
688#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
689   do { \
690      ANNOTATE_IGNORE_READS_BEGIN(); \
691      ANNOTATE_IGNORE_WRITES_BEGIN(); \
692   } while (0)
693
694/* Stop ignoring all memory accesses. */
695#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
696   do { \
697      ANNOTATE_IGNORE_WRITES_END(); \
698      ANNOTATE_IGNORE_READS_END(); \
699   } while (0)
700
701
702/* ----------------------------------------------------------------
703   Annotations useful for debugging.
704
705   Again, so for unimplemented, partly for performance reasons.
706   ----------------------------------------------------------------
707*/
708
709/* Request to trace every access to ADDRESS. */
710#define ANNOTATE_TRACE_MEMORY(address) \
711   _HG_CLIENTREQ_UNIMP("ANNOTATE_TRACE_MEMORY")
712
713/* Report the current thread name to a race detector. */
714#define ANNOTATE_THREAD_NAME(name) \
715   _HG_CLIENTREQ_UNIMP("ANNOTATE_THREAD_NAME")
716
717
718/* ----------------------------------------------------------------
719   Annotations for describing behaviour of user-implemented lock
720   primitives.  In all cases, the LOCK argument is a completely
721   arbitrary machine word (unsigned long, or void*) and can be any
722   value which gives a unique identity to the lock objects being
723   modelled.
724
725   We just pretend they're ordinary posix rwlocks.  That'll probably
726   give some rather confusing wording in error messages, claiming that
727   the arbitrary LOCK values are pthread_rwlock_t*'s, when in fact
728   they are not.  Ah well.
729   ----------------------------------------------------------------
730*/
731/* Report that a lock has just been created at address LOCK. */
732#define ANNOTATE_RWLOCK_CREATE(lock)                         \
733   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST,     \
734               void*,(lock))
735
736/* Report that the lock at address LOCK is about to be destroyed. */
737#define ANNOTATE_RWLOCK_DESTROY(lock)                        \
738   DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE,   \
739               void*,(lock))
740
741/* Report that the lock at address LOCK has just been acquired.
742   is_w=1 for writer lock, is_w=0 for reader lock. */
743#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)                 \
744  DO_CREQ_v_WW(_VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED,      \
745               void*,(lock), unsigned long,(is_w))
746
747/* Report that the lock at address LOCK is about to be released. */
748#define ANNOTATE_RWLOCK_RELEASED(lock, is_w)                 \
749  DO_CREQ_v_W(_VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED,       \
750              void*,(lock)) /* is_w is ignored */
751
752
753/* -------------------------------------------------------------
754   Annotations useful when implementing barriers.  They are not
755   normally needed by modules that merely use barriers.
756   The "barrier" argument is a pointer to the barrier object.
757   ----------------------------------------------------------------
758*/
759
760/* Report that the "barrier" has been initialized with initial
761   "count".  If 'reinitialization_allowed' is true, initialization is
762   allowed to happen multiple times w/o calling barrier_destroy() */
763#define ANNOTATE_BARRIER_INIT(barrier, count, reinitialization_allowed) \
764   _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_INIT")
765
766/* Report that we are about to enter barrier_wait("barrier"). */
767#define ANNOTATE_BARRIER_WAIT_BEFORE(barrier) \
768   _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
769
770/* Report that we just exited barrier_wait("barrier"). */
771#define ANNOTATE_BARRIER_WAIT_AFTER(barrier) \
772   _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
773
774/* Report that the "barrier" has been destroyed. */
775#define ANNOTATE_BARRIER_DESTROY(barrier) \
776   _HG_CLIENTREQ_UNIMP("ANNOTATE_BARRIER_DESTROY")
777
778
779/* ----------------------------------------------------------------
780   Annotations useful for testing race detectors.
781   ----------------------------------------------------------------
782*/
783
784/* Report that we expect a race on the variable at ADDRESS.  Use only
785   in unit tests for a race detector. */
786#define ANNOTATE_EXPECT_RACE(address, description) \
787   _HG_CLIENTREQ_UNIMP("ANNOTATE_EXPECT_RACE")
788
789/* A no-op. Insert where you like to test the interceptors. */
790#define ANNOTATE_NO_OP(arg) \
791   _HG_CLIENTREQ_UNIMP("ANNOTATE_NO_OP")
792
793/* Force the race detector to flush its state. The actual effect depends on
794 * the implementation of the detector. */
795#define ANNOTATE_FLUSH_STATE() \
796   _HG_CLIENTREQ_UNIMP("ANNOTATE_FLUSH_STATE")
797
798#endif /* __HELGRIND_H */
799