drd_thread.c revision 9cdc08368068d746e42d40c8f3a3dca5db5caee4
1ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap/*
2ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  This file is part of drd, a thread error detector.
3ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
4ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  Copyright (C) 2006-2013 Bart Van Assche <bvanassche@acm.org>.
5ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
6ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  This program is free software; you can redistribute it and/or
7ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  modify it under the terms of the GNU General Public License as
8ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  published by the Free Software Foundation; either version 2 of the
9ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  License, or (at your option) any later version.
10ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
11ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  This program is distributed in the hope that it will be useful, but
12ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  WITHOUT ANY WARRANTY; without even the implied warranty of
13ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  General Public License for more details.
15ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
16ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  You should have received a copy of the GNU General Public License
17ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  along with this program; if not, write to the Free Software
18ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  02111-1307, USA.
20ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
21ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap  The GNU General Public License is contained in the file COPYING.
22ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap*/
23ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
24ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
25ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "drd_error.h"
26ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "drd_barrier.h"
27ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "drd_clientobj.h"
28ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "drd_cond.h"
29ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "drd_mutex.h"
30ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "drd_segment.h"
31ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "drd_semaphore.h"
32ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "drd_suppression.h"
33ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "drd_thread.h"
34ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "pub_tool_vki.h"
35ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "pub_tool_basics.h"      // Addr, SizeT
36ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "pub_tool_libcassert.h"  // tl_assert()
37ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "pub_tool_libcbase.h"    // VG_(strlen)()
38ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "pub_tool_libcprint.h"   // VG_(printf)()
39ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "pub_tool_machine.h"
40ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "pub_tool_mallocfree.h"  // VG_(malloc)(), VG_(free)()
41ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "pub_tool_options.h"     // VG_(clo_backtrace_size)
42ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
43ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
44ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
45ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
46ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap/* Local functions. */
47ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
48ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic void thread_append_segment(const DrdThreadId tid, Segment* const sg);
49ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
50ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic void thread_compute_conflict_set(struct bitmap** conflict_set,
51ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap                                        const DrdThreadId tid);
52ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
53ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
54ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
55ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap/* Local variables. */
56ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
57ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic ULong    s_context_switch_count;
58ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic ULong    s_discard_ordered_segments_count;
59ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic ULong    s_compute_conflict_set_count;
60ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic ULong    s_update_conflict_set_count;
61ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic ULong    s_update_conflict_set_new_sg_count;
62ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic ULong    s_update_conflict_set_sync_count;
63ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic ULong    s_update_conflict_set_join_count;
64ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic ULong    s_conflict_set_bitmap_creation_count;
65ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic ULong    s_conflict_set_bitmap2_creation_count;
66ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic ThreadId s_vg_running_tid  = VG_INVALID_THREADID;
67ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapDrdThreadId     DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
68ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapThreadInfo      DRD_(g_threadinfo)[DRD_N_THREADS];
69ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstruct bitmap*  DRD_(g_conflict_set);
70ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapBool DRD_(verify_conflict_set);
71ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic Bool     s_trace_context_switches = False;
72ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic Bool     s_trace_conflict_set = False;
73ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic Bool     s_trace_conflict_set_bm = False;
74ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic Bool     s_trace_fork_join = False;
75ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic Bool     s_segment_merging = True;
76ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic Bool     s_new_segments_since_last_merge;
77ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic int      s_segment_merge_interval = 10;
78ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic unsigned s_join_list_vol = 10;
79ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic unsigned s_deletion_head;
80ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapstatic unsigned s_deletion_tail;
81ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
82ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
83ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap/* Function definitions. */
84ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
85ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap/** Enables/disables context switch tracing. */
86ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapvoid DRD_(thread_trace_context_switches)(const Bool t)
87ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap{
88ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap   tl_assert(t == False || t == True);
89ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap   s_trace_context_switches = t;
90ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap}
91ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
92ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap/** Enables/disables conflict set tracing. */
93ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapvoid DRD_(thread_trace_conflict_set)(const Bool t)
94ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap{
95ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap   tl_assert(t == False || t == True);
96ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap   s_trace_conflict_set = t;
97ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap}
98ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
99ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap/** Enables/disables conflict set bitmap tracing. */
100ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapvoid DRD_(thread_trace_conflict_set_bm)(const Bool t)
101ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap{
102ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap   tl_assert(t == False || t == True);
103ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap   s_trace_conflict_set_bm = t;
104ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap}
105ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
106ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap/** Report whether fork/join tracing is enabled. */
107ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapBool DRD_(thread_get_trace_fork_join)(void)
108ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap{
109ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap   return s_trace_fork_join;
110ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap}
111ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
112ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap/** Enables/disables fork/join tracing. */
113ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchapvoid DRD_(thread_set_trace_fork_join)(const Bool t)
114ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap{
115ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap   tl_assert(t == False || t == True);
116ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap   s_trace_fork_join = t;
117ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap}
118ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap
119ef3d805f6e50bc488f8e4e9e353068b78c73d17fdanilchap/** Enables/disables segment merging. */
120void DRD_(thread_set_segment_merging)(const Bool m)
121{
122   tl_assert(m == False || m == True);
123   s_segment_merging = m;
124}
125
126/** Get the segment merging interval. */
127int DRD_(thread_get_segment_merge_interval)(void)
128{
129   return s_segment_merge_interval;
130}
131
132/** Set the segment merging interval. */
133void DRD_(thread_set_segment_merge_interval)(const int i)
134{
135   s_segment_merge_interval = i;
136}
137
138void DRD_(thread_set_join_list_vol)(const int jlv)
139{
140   s_join_list_vol = jlv;
141}
142
143void DRD_(thread_init)(void)
144{
145}
146
147/**
148 * Convert Valgrind's ThreadId into a DrdThreadId.
149 *
150 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
151 *         Valgrind ThreadId does not yet exist.
152 */
153DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
154{
155   int i;
156
157   if (tid == VG_INVALID_THREADID)
158      return DRD_INVALID_THREADID;
159
160   for (i = 1; i < DRD_N_THREADS; i++)
161   {
162      if (DRD_(g_threadinfo)[i].vg_thread_exists == True
163          && DRD_(g_threadinfo)[i].vg_threadid == tid)
164      {
165         return i;
166      }
167   }
168
169   return DRD_INVALID_THREADID;
170}
171
172/** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
173static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
174{
175   int i;
176
177   tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
178
179   for (i = 1; i < DRD_N_THREADS; i++)
180   {
181      if (!DRD_(g_threadinfo)[i].valid)
182      {
183         tl_assert(! DRD_(IsValidDrdThreadId)(i));
184
185         DRD_(g_threadinfo)[i].valid         = True;
186         DRD_(g_threadinfo)[i].vg_thread_exists = True;
187         DRD_(g_threadinfo)[i].vg_threadid   = tid;
188         DRD_(g_threadinfo)[i].pt_threadid   = INVALID_POSIX_THREADID;
189         DRD_(g_threadinfo)[i].stack_min     = 0;
190         DRD_(g_threadinfo)[i].stack_min_min = 0;
191         DRD_(g_threadinfo)[i].stack_startup = 0;
192         DRD_(g_threadinfo)[i].stack_max     = 0;
193         DRD_(thread_set_name)(i, "");
194         DRD_(g_threadinfo)[i].on_alt_stack        = False;
195         DRD_(g_threadinfo)[i].is_recording_loads  = True;
196         DRD_(g_threadinfo)[i].is_recording_stores = True;
197         DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
198         DRD_(g_threadinfo)[i].synchr_nesting = 0;
199         DRD_(g_threadinfo)[i].deletion_seq = s_deletion_tail - 1;
200         tl_assert(DRD_(g_threadinfo)[i].sg_first == NULL);
201         tl_assert(DRD_(g_threadinfo)[i].sg_last == NULL);
202
203         tl_assert(DRD_(IsValidDrdThreadId)(i));
204
205         return i;
206      }
207   }
208
209   VG_(printf)(
210"\nSorry, but the maximum number of threads supported by DRD has been exceeded."
211"Aborting.\n");
212
213   tl_assert(False);
214
215   return DRD_INVALID_THREADID;
216}
217
218/** Convert a POSIX thread ID into a DRD thread ID. */
219DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
220{
221   int i;
222
223   if (tid != INVALID_POSIX_THREADID)
224   {
225      for (i = 1; i < DRD_N_THREADS; i++)
226      {
227         if (DRD_(g_threadinfo)[i].posix_thread_exists
228             && DRD_(g_threadinfo)[i].pt_threadid == tid)
229         {
230            return i;
231         }
232      }
233   }
234   return DRD_INVALID_THREADID;
235}
236
237/** Convert a DRD thread ID into a Valgrind thread ID. */
238ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
239{
240   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
241             && tid != DRD_INVALID_THREADID);
242
243   return (DRD_(g_threadinfo)[tid].vg_thread_exists
244           ? DRD_(g_threadinfo)[tid].vg_threadid
245           : VG_INVALID_THREADID);
246}
247
248#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
249/**
250 * Sanity check of the doubly linked list of segments referenced by a
251 * ThreadInfo struct.
252 * @return True if sane, False if not.
253 */
254static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
255{
256   Segment* p;
257
258   for (p = ti->sg_first; p; p = p->thr_next) {
259      if (p->thr_next && p->thr_next->thr_prev != p)
260         return False;
261      if (p->thr_next == 0 && p != ti->sg_last)
262         return False;
263   }
264   for (p = ti->sg_last; p; p = p->thr_prev) {
265      if (p->thr_prev && p->thr_prev->thr_next != p)
266         return False;
267      if (p->thr_prev == 0 && p != ti->sg_first)
268         return False;
269   }
270   return True;
271}
272#endif
273
274/**
275 * Create the first segment for a newly started thread.
276 *
277 * This function is called from the handler installed via
278 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
279 * from the context of the creator thread, before the new thread has been
280 * created.
281 *
282 * @param[in] creator    DRD thread ID of the creator thread.
283 * @param[in] vg_created Valgrind thread ID of the created thread.
284 *
285 * @return DRD thread ID of the created thread.
286 */
287DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
288                                    const ThreadId vg_created)
289{
290   DrdThreadId created;
291
292   tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
293   created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
294   tl_assert(0 <= (int)created && created < DRD_N_THREADS
295             && created != DRD_INVALID_THREADID);
296
297   tl_assert(DRD_(g_threadinfo)[created].sg_first == NULL);
298   tl_assert(DRD_(g_threadinfo)[created].sg_last == NULL);
299   /* Create an initial segment for the newly created thread. */
300   thread_append_segment(created, DRD_(sg_new)(creator, created));
301
302   return created;
303}
304
305/**
306 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
307 * after the thread has been created and before any client instructions are run
308 * on the newly created thread, e.g. from the handler installed via
309 * VG_(track_pre_thread_first_insn)().
310 *
311 * @param[in] vg_created Valgrind thread ID of the newly created thread.
312 *
313 * @return DRD thread ID for the new thread.
314 */
315DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
316{
317   const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
318
319   tl_assert(0 <= (int)created && created < DRD_N_THREADS
320             && created != DRD_INVALID_THREADID);
321
322   DRD_(g_threadinfo)[created].stack_max
323      = VG_(thread_get_stack_max)(vg_created);
324   DRD_(g_threadinfo)[created].stack_startup
325      = DRD_(g_threadinfo)[created].stack_max;
326   DRD_(g_threadinfo)[created].stack_min
327      = DRD_(g_threadinfo)[created].stack_max;
328   DRD_(g_threadinfo)[created].stack_min_min
329      = DRD_(g_threadinfo)[created].stack_max;
330   DRD_(g_threadinfo)[created].stack_size
331      = VG_(thread_get_stack_size)(vg_created);
332   tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
333
334   return created;
335}
336
337static void DRD_(thread_delayed_delete)(const DrdThreadId tid)
338{
339   int j;
340
341   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
342   DRD_(g_threadinfo)[tid].posix_thread_exists = False;
343   DRD_(g_threadinfo)[tid].deletion_seq = s_deletion_head++;
344#if 0
345   VG_(message)(Vg_DebugMsg, "Adding thread %d to the deletion list\n", tid);
346#endif
347   if (s_deletion_head - s_deletion_tail >= s_join_list_vol) {
348      for (j = 0; j < DRD_N_THREADS; ++j) {
349         if (DRD_(IsValidDrdThreadId)(j)
350             && DRD_(g_threadinfo)[j].deletion_seq == s_deletion_tail)
351         {
352            s_deletion_tail++;
353#if 0
354            VG_(message)(Vg_DebugMsg, "Delayed delete of thread %d\n", j);
355#endif
356            DRD_(thread_delete)(j, False);
357            break;
358         }
359      }
360   }
361}
362
363/**
364 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
365 * after thread drd_joiner joined thread drd_joinee.
366 */
367void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
368{
369   tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
370   tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
371
372   DRD_(thread_new_segment)(drd_joiner);
373   DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
374   DRD_(thread_new_segment)(drd_joinee);
375
376   if (s_trace_fork_join)
377   {
378      const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
379      const unsigned msg_size = 256;
380      HChar* msg;
381
382      msg = VG_(malloc)("drd.main.dptj.1", msg_size);
383      tl_assert(msg);
384      VG_(snprintf)(msg, msg_size,
385                    "drd_post_thread_join joiner = %d, joinee = %d",
386                    drd_joiner, drd_joinee);
387      if (joiner)
388      {
389         HChar* vc;
390
391         vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
392         VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
393                       ", new vc: %s", vc);
394         VG_(free)(vc);
395      }
396      DRD_(trace_msg)("%pS", msg);
397      VG_(free)(msg);
398   }
399
400   if (!  DRD_(get_check_stack_accesses)())
401   {
402      DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
403                               - DRD_(thread_get_stack_size)(drd_joinee),
404                               DRD_(thread_get_stack_max)(drd_joinee));
405   }
406   DRD_(clientobj_delete_thread)(drd_joinee);
407   DRD_(thread_delayed_delete)(drd_joinee);
408}
409
410/**
411 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
412 * and accesses this data structure from multiple threads without locking.
413 * Any conflicting accesses in the range stack_startup..stack_max will be
414 * ignored.
415 */
416void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
417                                    const Addr stack_startup)
418{
419   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
420             && tid != DRD_INVALID_THREADID);
421   tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
422   tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
423   DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
424}
425
426/** Return the stack pointer for the specified thread. */
427Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
428{
429   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
430             && tid != DRD_INVALID_THREADID);
431   return DRD_(g_threadinfo)[tid].stack_min;
432}
433
434/**
435 * Return the lowest value that was ever assigned to the stack pointer
436 * for the specified thread.
437 */
438Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
439{
440   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
441             && tid != DRD_INVALID_THREADID);
442   return DRD_(g_threadinfo)[tid].stack_min_min;
443}
444
445/** Return the top address for the stack of the specified thread. */
446Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
447{
448   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
449             && tid != DRD_INVALID_THREADID);
450   return DRD_(g_threadinfo)[tid].stack_max;
451}
452
453/** Return the maximum stack size for the specified thread. */
454SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
455{
456   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
457             && tid != DRD_INVALID_THREADID);
458   return DRD_(g_threadinfo)[tid].stack_size;
459}
460
461Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
462{
463   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
464             && tid != DRD_INVALID_THREADID);
465   return DRD_(g_threadinfo)[tid].on_alt_stack;
466}
467
468void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
469                                   const Bool on_alt_stack)
470{
471   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
472             && tid != DRD_INVALID_THREADID);
473   tl_assert(on_alt_stack == !!on_alt_stack);
474   DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
475}
476
477Int DRD_(thread_get_threads_on_alt_stack)(void)
478{
479   int i, n = 0;
480
481   for (i = 1; i < DRD_N_THREADS; i++)
482      n += DRD_(g_threadinfo)[i].on_alt_stack;
483   return n;
484}
485
486/**
487 * Clean up thread-specific data structures.
488 */
489void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
490{
491   Segment* sg;
492   Segment* sg_prev;
493
494   tl_assert(DRD_(IsValidDrdThreadId)(tid));
495
496   tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
497   for (sg = DRD_(g_threadinfo)[tid].sg_last; sg; sg = sg_prev) {
498      sg_prev = sg->thr_prev;
499      sg->thr_next = NULL;
500      sg->thr_prev = NULL;
501      DRD_(sg_put)(sg);
502   }
503   DRD_(g_threadinfo)[tid].valid = False;
504   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
505   DRD_(g_threadinfo)[tid].posix_thread_exists = False;
506   if (detached)
507      DRD_(g_threadinfo)[tid].detached_posix_thread = False;
508   else
509      tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
510   DRD_(g_threadinfo)[tid].sg_first = NULL;
511   DRD_(g_threadinfo)[tid].sg_last = NULL;
512
513   tl_assert(!DRD_(IsValidDrdThreadId)(tid));
514}
515
516/**
517 * Called after a thread performed its last memory access and before
518 * thread_delete() is called. Note: thread_delete() is only called for
519 * joinable threads, not for detached threads.
520 */
521void DRD_(thread_finished)(const DrdThreadId tid)
522{
523   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
524             && tid != DRD_INVALID_THREADID);
525
526   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
527
528   if (DRD_(g_threadinfo)[tid].detached_posix_thread)
529   {
530      /*
531       * Once a detached thread has finished, its stack is deallocated and
532       * should no longer be taken into account when computing the conflict set.
533       */
534      DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
535
536      /*
537       * For a detached thread, calling pthread_exit() invalidates the
538       * POSIX thread ID associated with the detached thread. For joinable
539       * POSIX threads however, the POSIX thread ID remains live after the
540       * pthread_exit() call until pthread_join() is called.
541       */
542      DRD_(g_threadinfo)[tid].posix_thread_exists = False;
543   }
544}
545
546/** Called just after fork() in the child process. */
547void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
548{
549   unsigned i;
550
551   for (i = 1; i < DRD_N_THREADS; i++)
552   {
553      if (i == tid)
554	 continue;
555      if (DRD_(IsValidDrdThreadId(i)))
556	 DRD_(thread_delete)(i, True);
557      tl_assert(!DRD_(IsValidDrdThreadId(i)));
558   }
559
560   DRD_(bm_cleanup)(DRD_(g_conflict_set));
561   DRD_(bm_init)(DRD_(g_conflict_set));
562}
563
564/** Called just before pthread_cancel(). */
565void DRD_(thread_pre_cancel)(const DrdThreadId tid)
566{
567   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
568             && tid != DRD_INVALID_THREADID);
569   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
570
571   if (DRD_(thread_get_trace_fork_join)())
572      DRD_(trace_msg)("[%d] drd_thread_pre_cancel %d",
573                      DRD_(g_drd_running_tid), tid);
574}
575
576/**
577 * Store the POSIX thread ID for the specified thread.
578 *
579 * @note This function can be called two times for the same thread -- see also
580 * the comment block preceding the pthread_create() wrapper in
581 * drd_pthread_intercepts.c.
582 */
583void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
584{
585   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
586             && tid != DRD_INVALID_THREADID);
587   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
588             || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
589   tl_assert(ptid != INVALID_POSIX_THREADID);
590   DRD_(g_threadinfo)[tid].posix_thread_exists = True;
591   DRD_(g_threadinfo)[tid].pt_threadid         = ptid;
592}
593
594/** Returns true for joinable threads and false for detached threads. */
595Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
596{
597   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
598             && tid != DRD_INVALID_THREADID);
599   return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
600}
601
602/** Store the thread mode: joinable or detached. */
603#if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
604 /* There is a cse related issue in gcc for MIPS. Optimization level
605    has to be lowered, so cse related optimizations are not
606    included.*/
607 __attribute__((optimize("O1")))
608#endif
609void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
610{
611   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
612             && tid != DRD_INVALID_THREADID);
613   tl_assert(!! joinable == joinable);
614   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
615
616   DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
617}
618
619/** Tells DRD that the calling thread is about to enter pthread_create(). */
620void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
621{
622   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
623             && tid != DRD_INVALID_THREADID);
624   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
625   tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
626
627   DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
628}
629
630/** Tells DRD that the calling thread has left pthread_create(). */
631void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
632{
633   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
634             && tid != DRD_INVALID_THREADID);
635   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
636   tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
637
638   DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
639}
640
641/** Obtain the thread number and the user-assigned thread name. */
642const HChar* DRD_(thread_get_name)(const DrdThreadId tid)
643{
644   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
645             && tid != DRD_INVALID_THREADID);
646
647   return DRD_(g_threadinfo)[tid].name;
648}
649
650/** Set the name of the specified thread. */
651void DRD_(thread_set_name)(const DrdThreadId tid, const HChar* const name)
652{
653   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
654             && tid != DRD_INVALID_THREADID);
655
656   if (name == NULL || name[0] == 0)
657      VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
658                    sizeof(DRD_(g_threadinfo)[tid].name),
659                    "Thread %d",
660                    tid);
661   else
662      VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
663                    sizeof(DRD_(g_threadinfo)[tid].name),
664                    "Thread %d (%s)",
665                    tid, name);
666   DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
667}
668
669/**
670 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
671 * conflict set.
672 */
673void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
674{
675   tl_assert(vg_tid != VG_INVALID_THREADID);
676
677   if (vg_tid != s_vg_running_tid)
678   {
679      DRD_(thread_set_running_tid)(vg_tid,
680                                   DRD_(VgThreadIdToDrdThreadId)(vg_tid));
681   }
682
683   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
684   tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
685}
686
687/**
688 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
689 * conflict set.
690 */
691void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
692                                  const DrdThreadId drd_tid)
693{
694   tl_assert(vg_tid != VG_INVALID_THREADID);
695   tl_assert(drd_tid != DRD_INVALID_THREADID);
696
697   if (vg_tid != s_vg_running_tid)
698   {
699      if (s_trace_context_switches
700          && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
701      {
702         VG_(message)(Vg_DebugMsg,
703                      "Context switch from thread %d to thread %d;"
704                      " segments: %llu\n",
705                      DRD_(g_drd_running_tid), drd_tid,
706                      DRD_(sg_get_segments_alive_count)());
707      }
708      s_vg_running_tid = vg_tid;
709      DRD_(g_drd_running_tid) = drd_tid;
710      thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
711      s_context_switch_count++;
712   }
713
714   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
715   tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
716}
717
718/**
719 * Increase the synchronization nesting counter. Must be called before the
720 * client calls a synchronization function.
721 */
722int DRD_(thread_enter_synchr)(const DrdThreadId tid)
723{
724   tl_assert(DRD_(IsValidDrdThreadId)(tid));
725   return DRD_(g_threadinfo)[tid].synchr_nesting++;
726}
727
728/**
729 * Decrease the synchronization nesting counter. Must be called after the
730 * client left a synchronization function.
731 */
732int DRD_(thread_leave_synchr)(const DrdThreadId tid)
733{
734   tl_assert(DRD_(IsValidDrdThreadId)(tid));
735   tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
736   return --DRD_(g_threadinfo)[tid].synchr_nesting;
737}
738
739/** Returns the synchronization nesting counter. */
740int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
741{
742   tl_assert(DRD_(IsValidDrdThreadId)(tid));
743   return DRD_(g_threadinfo)[tid].synchr_nesting;
744}
745
746/** Append a new segment at the end of the segment list. */
747static
748void thread_append_segment(const DrdThreadId tid, Segment* const sg)
749{
750   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
751             && tid != DRD_INVALID_THREADID);
752
753#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
754   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
755#endif
756
757   // add at tail
758   sg->thr_prev = DRD_(g_threadinfo)[tid].sg_last;
759   sg->thr_next = NULL;
760   if (DRD_(g_threadinfo)[tid].sg_last)
761      DRD_(g_threadinfo)[tid].sg_last->thr_next = sg;
762   DRD_(g_threadinfo)[tid].sg_last = sg;
763   if (DRD_(g_threadinfo)[tid].sg_first == NULL)
764      DRD_(g_threadinfo)[tid].sg_first = sg;
765
766#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
767   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
768#endif
769}
770
771/**
772 * Remove a segment from the segment list of thread threadid, and free the
773 * associated memory.
774 */
775static
776void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
777{
778   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
779             && tid != DRD_INVALID_THREADID);
780
781#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
782   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
783#endif
784
785   if (sg->thr_prev)
786      sg->thr_prev->thr_next = sg->thr_next;
787   if (sg->thr_next)
788      sg->thr_next->thr_prev = sg->thr_prev;
789   if (sg == DRD_(g_threadinfo)[tid].sg_first)
790      DRD_(g_threadinfo)[tid].sg_first = sg->thr_next;
791   if (sg == DRD_(g_threadinfo)[tid].sg_last)
792      DRD_(g_threadinfo)[tid].sg_last = sg->thr_prev;
793   DRD_(sg_put)(sg);
794
795#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
796   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
797#endif
798}
799
800/**
801 * Returns a pointer to the vector clock of the most recent segment associated
802 * with thread 'tid'.
803 */
804VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
805{
806   Segment* latest_sg;
807
808   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
809             && tid != DRD_INVALID_THREADID);
810   latest_sg = DRD_(g_threadinfo)[tid].sg_last;
811   tl_assert(latest_sg);
812   return &latest_sg->vc;
813}
814
815/**
816 * Return the latest segment of thread 'tid' and increment its reference count.
817 */
818void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
819{
820   Segment* latest_sg;
821
822   tl_assert(sg);
823   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
824             && tid != DRD_INVALID_THREADID);
825   latest_sg = DRD_(g_threadinfo)[tid].sg_last;
826   tl_assert(latest_sg);
827
828   DRD_(sg_put)(*sg);
829   *sg = DRD_(sg_get)(latest_sg);
830}
831
832/**
833 * Compute the minimum of all latest vector clocks of all threads
834 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
835 *
836 * @param vc pointer to a vectorclock, holds result upon return.
837 */
838static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
839{
840   unsigned i;
841   Bool first;
842   Segment* latest_sg;
843
844   first = True;
845   for (i = 0; i < DRD_N_THREADS; i++)
846   {
847      latest_sg = DRD_(g_threadinfo)[i].sg_last;
848      if (latest_sg) {
849         if (first)
850            DRD_(vc_assign)(vc, &latest_sg->vc);
851         else
852            DRD_(vc_min)(vc, &latest_sg->vc);
853         first = False;
854      }
855   }
856}
857
858/**
859 * Compute the maximum of all latest vector clocks of all threads.
860 *
861 * @param vc pointer to a vectorclock, holds result upon return.
862 */
863static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
864{
865   unsigned i;
866   Bool first;
867   Segment* latest_sg;
868
869   first = True;
870   for (i = 0; i < DRD_N_THREADS; i++)
871   {
872      latest_sg = DRD_(g_threadinfo)[i].sg_last;
873      if (latest_sg) {
874         if (first)
875            DRD_(vc_assign)(vc, &latest_sg->vc);
876         else
877            DRD_(vc_combine)(vc, &latest_sg->vc);
878         first = False;
879      }
880   }
881}
882
883/**
884 * Discard all segments that have a defined order against the latest vector
885 * clock of all threads -- these segments can no longer be involved in a
886 * data race.
887 */
888static void thread_discard_ordered_segments(void)
889{
890   unsigned i;
891   VectorClock thread_vc_min;
892
893   s_discard_ordered_segments_count++;
894
895   DRD_(vc_init)(&thread_vc_min, 0, 0);
896   DRD_(thread_compute_minimum_vc)(&thread_vc_min);
897   if (DRD_(sg_get_trace)())
898   {
899      HChar *vc_min, *vc_max;
900      VectorClock thread_vc_max;
901
902      DRD_(vc_init)(&thread_vc_max, 0, 0);
903      DRD_(thread_compute_maximum_vc)(&thread_vc_max);
904      vc_min = DRD_(vc_aprint)(&thread_vc_min);
905      vc_max = DRD_(vc_aprint)(&thread_vc_max);
906      VG_(message)(Vg_DebugMsg,
907                   "Discarding ordered segments -- min vc is %s, max vc is %s\n",
908                   vc_min, vc_max);
909      VG_(free)(vc_min);
910      VG_(free)(vc_max);
911      DRD_(vc_cleanup)(&thread_vc_max);
912   }
913
914   for (i = 0; i < DRD_N_THREADS; i++) {
915      Segment* sg;
916      Segment* sg_next;
917
918      for (sg = DRD_(g_threadinfo)[i].sg_first;
919           sg && (sg_next = sg->thr_next)
920              && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
921           sg = sg_next)
922      {
923         thread_discard_segment(i, sg);
924      }
925   }
926   DRD_(vc_cleanup)(&thread_vc_min);
927}
928
929/**
930 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
931 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
932 * all segments in the set CS are ordered consistently against both sg1 and
933 * sg2. The set CS is defined as the set of segments that can immediately
934 * precede future segments via inter-thread synchronization operations. In
935 * DRD the set CS consists of the latest segment of each thread combined with
936 * all segments for which the reference count is strictly greater than one.
937 * The code below is an optimized version of the following:
938 *
939 * for (i = 0; i < DRD_N_THREADS; i++)
940 * {
941 *    Segment* sg;
942 *
943 *    for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
944 *    {
945 *       if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
946 *       {
947 *          if (   DRD_(vc_lte)(&sg1->vc, &sg->vc)
948 *              != DRD_(vc_lte)(&sg2->vc, &sg->vc)
949 *              || DRD_(vc_lte)(&sg->vc, &sg1->vc)
950 *              != DRD_(vc_lte)(&sg->vc, &sg2->vc))
951 *          {
952 *             return False;
953 *          }
954 *       }
955 *    }
956 * }
957 */
958static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
959                                               Segment* const sg1,
960                                               Segment* const sg2)
961{
962   unsigned i;
963
964   tl_assert(sg1->thr_next);
965   tl_assert(sg2->thr_next);
966   tl_assert(sg1->thr_next == sg2);
967   tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
968
969   for (i = 0; i < DRD_N_THREADS; i++)
970   {
971      Segment* sg;
972
973      for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
974         if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
975            if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
976               break;
977            if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
978               return False;
979         }
980      }
981      for (sg = DRD_(g_threadinfo)[i].sg_last; sg; sg = sg->thr_prev) {
982         if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
983            if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
984               break;
985            if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
986               return False;
987         }
988      }
989   }
990   return True;
991}
992
993/**
994 * Merge all segments that may be merged without triggering false positives
995 * or discarding real data races. For the theoretical background of segment
996 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
997 * and Koen De Bosschere. Bounding the number of segment histories during
998 * data race detection. Parallel Computing archive, Volume 28, Issue 9,
999 * pp 1221-1238, September 2002. This paper contains a proof that merging
1000 * consecutive segments for which the property equiv(s1,s2) holds can be
1001 * merged without reducing the accuracy of datarace detection. Furthermore
1002 * it is also proven that the total number of all segments will never grow
1003 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
1004 * every time a new segment is created. The property equiv(s1, s2) is defined
1005 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
1006 * clocks of segments s and s1 are ordered in the same way as those of segments
1007 * s and s2. The set CS is defined as the set of existing segments s that have
1008 * the potential to conflict with not yet created segments, either because the
1009 * segment s is the latest segment of a thread or because it can become the
1010 * immediate predecessor of a new segment due to a synchronization operation.
1011 */
1012static void thread_merge_segments(void)
1013{
1014   unsigned i;
1015
1016   s_new_segments_since_last_merge = 0;
1017
1018   for (i = 0; i < DRD_N_THREADS; i++)
1019   {
1020      Segment* sg;
1021
1022#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1023      tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1024#endif
1025
1026      for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
1027         if (DRD_(sg_get_refcnt)(sg) == 1 && sg->thr_next) {
1028            Segment* const sg_next = sg->thr_next;
1029            if (DRD_(sg_get_refcnt)(sg_next) == 1
1030                && sg_next->thr_next
1031                && thread_consistent_segment_ordering(i, sg, sg_next))
1032            {
1033               /* Merge sg and sg_next into sg. */
1034               DRD_(sg_merge)(sg, sg_next);
1035               thread_discard_segment(i, sg_next);
1036            }
1037         }
1038      }
1039
1040#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1041      tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1042#endif
1043   }
1044}
1045
1046/**
1047 * Create a new segment for the specified thread, and discard any segments
1048 * that cannot cause races anymore.
1049 */
1050void DRD_(thread_new_segment)(const DrdThreadId tid)
1051{
1052   Segment* last_sg;
1053   Segment* new_sg;
1054
1055   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1056             && tid != DRD_INVALID_THREADID);
1057   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1058
1059   last_sg = DRD_(g_threadinfo)[tid].sg_last;
1060   new_sg = DRD_(sg_new)(tid, tid);
1061   thread_append_segment(tid, new_sg);
1062   if (tid == DRD_(g_drd_running_tid) && last_sg)
1063   {
1064      DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
1065      s_update_conflict_set_new_sg_count++;
1066   }
1067
1068   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1069
1070   if (s_segment_merging
1071       && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1072   {
1073      thread_discard_ordered_segments();
1074      thread_merge_segments();
1075   }
1076}
1077
1078/** Call this function after thread 'joiner' joined thread 'joinee'. */
1079void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
1080{
1081   tl_assert(joiner != joinee);
1082   tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
1083             && joiner != DRD_INVALID_THREADID);
1084   tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
1085             && joinee != DRD_INVALID_THREADID);
1086   tl_assert(DRD_(g_threadinfo)[joiner].sg_first);
1087   tl_assert(DRD_(g_threadinfo)[joiner].sg_last);
1088   tl_assert(DRD_(g_threadinfo)[joinee].sg_first);
1089   tl_assert(DRD_(g_threadinfo)[joinee].sg_last);
1090
1091   if (DRD_(sg_get_trace)())
1092   {
1093      HChar *str1, *str2;
1094      str1 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1095      str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joinee));
1096      VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
1097                   str1, str2);
1098      VG_(free)(str1);
1099      VG_(free)(str2);
1100   }
1101   if (joiner == DRD_(g_drd_running_tid)) {
1102      VectorClock old_vc;
1103
1104      DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(joiner));
1105      DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1106                       DRD_(thread_get_vc)(joinee));
1107      DRD_(thread_update_conflict_set)(joiner, &old_vc);
1108      s_update_conflict_set_join_count++;
1109      DRD_(vc_cleanup)(&old_vc);
1110   } else {
1111      DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1112                       DRD_(thread_get_vc)(joinee));
1113   }
1114
1115   thread_discard_ordered_segments();
1116
1117   if (DRD_(sg_get_trace)()) {
1118      HChar* str;
1119
1120      str = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1121      VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
1122      VG_(free)(str);
1123   }
1124}
1125
1126/**
1127 * Update the vector clock of the last segment of thread tid with the
1128 * the vector clock of segment sg.
1129 */
1130static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
1131{
1132   const VectorClock* const vc = &sg->vc;
1133
1134   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1135             && tid != DRD_INVALID_THREADID);
1136   tl_assert(DRD_(g_threadinfo)[tid].sg_first);
1137   tl_assert(DRD_(g_threadinfo)[tid].sg_last);
1138   tl_assert(sg);
1139   tl_assert(vc);
1140
1141   if (tid != sg->tid) {
1142      VectorClock old_vc;
1143
1144      DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(tid));
1145      DRD_(vc_combine)(DRD_(thread_get_vc)(tid), vc);
1146      if (DRD_(sg_get_trace)()) {
1147         HChar *str1, *str2;
1148         str1 = DRD_(vc_aprint)(&old_vc);
1149         str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1150         VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2);
1151         VG_(free)(str1);
1152         VG_(free)(str2);
1153      }
1154
1155      thread_discard_ordered_segments();
1156
1157      DRD_(thread_update_conflict_set)(tid, &old_vc);
1158      s_update_conflict_set_sync_count++;
1159
1160      DRD_(vc_cleanup)(&old_vc);
1161   } else {
1162      tl_assert(DRD_(vc_lte)(vc, DRD_(thread_get_vc)(tid)));
1163   }
1164}
1165
1166/**
1167 * Create a new segment for thread tid and update the vector clock of the last
1168 * segment of this thread with the the vector clock of segment sg. Call this
1169 * function after thread tid had to wait because of thread synchronization
1170 * until the memory accesses in the segment sg finished.
1171 */
1172void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
1173{
1174   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1175             && tid != DRD_INVALID_THREADID);
1176   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1177   tl_assert(sg);
1178
1179   thread_append_segment(tid, DRD_(sg_new)(tid, tid));
1180
1181   thread_combine_vc_sync(tid, sg);
1182
1183   if (s_segment_merging
1184       && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1185   {
1186      thread_discard_ordered_segments();
1187      thread_merge_segments();
1188   }
1189}
1190
1191/**
1192 * Call this function whenever a thread is no longer using the memory
1193 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
1194 * increase.
1195 */
1196void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
1197{
1198   Segment* p;
1199
1200   for (p = DRD_(g_sg_list); p; p = p->g_next)
1201      DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
1202
1203   DRD_(bm_clear)(DRD_(g_conflict_set), a1, a2);
1204}
1205
1206/** Specify whether memory loads should be recorded. */
1207void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
1208{
1209   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1210             && tid != DRD_INVALID_THREADID);
1211   tl_assert(enabled == !! enabled);
1212
1213   DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
1214}
1215
1216/** Specify whether memory stores should be recorded. */
1217void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
1218{
1219   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1220             && tid != DRD_INVALID_THREADID);
1221   tl_assert(enabled == !! enabled);
1222
1223   DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
1224}
1225
1226/**
1227 * Print the segment information for all threads.
1228 *
1229 * This function is only used for debugging purposes.
1230 */
1231void DRD_(thread_print_all)(void)
1232{
1233   unsigned i;
1234   Segment* p;
1235
1236   for (i = 0; i < DRD_N_THREADS; i++)
1237   {
1238      p = DRD_(g_threadinfo)[i].sg_first;
1239      if (p) {
1240         VG_(printf)("**************\n"
1241                     "* thread %3d (%d/%d/%d/%d/0x%lx/%d) *\n"
1242                     "**************\n",
1243                     i,
1244                     DRD_(g_threadinfo)[i].valid,
1245                     DRD_(g_threadinfo)[i].vg_thread_exists,
1246                     DRD_(g_threadinfo)[i].vg_threadid,
1247                     DRD_(g_threadinfo)[i].posix_thread_exists,
1248                     DRD_(g_threadinfo)[i].pt_threadid,
1249                     DRD_(g_threadinfo)[i].detached_posix_thread);
1250         for ( ; p; p = p->thr_next)
1251            DRD_(sg_print)(p);
1252      }
1253   }
1254}
1255
1256/** Show a call stack involved in a data race. */
1257static void show_call_stack(const DrdThreadId tid, ExeContext* const callstack)
1258{
1259   const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
1260
1261   if (vg_tid != VG_INVALID_THREADID) {
1262      if (callstack)
1263         VG_(pp_ExeContext)(callstack);
1264      else
1265         VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
1266   } else {
1267      if (!VG_(clo_xml))
1268         VG_(message)(Vg_UserMsg,
1269                      "   (thread finished, call stack no longer available)\n");
1270   }
1271}
1272
1273/** Print information about the segments involved in a data race. */
1274static void
1275thread_report_conflicting_segments_segment(const DrdThreadId tid,
1276                                           const Addr addr,
1277                                           const SizeT size,
1278                                           const BmAccessTypeT access_type,
1279                                           const Segment* const p)
1280{
1281   unsigned i;
1282
1283   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1284             && tid != DRD_INVALID_THREADID);
1285   tl_assert(p);
1286
1287   for (i = 0; i < DRD_N_THREADS; i++) {
1288      if (i != tid) {
1289         Segment* q;
1290
1291         for (q = DRD_(g_threadinfo)[i].sg_last; q; q = q->thr_prev) {
1292            /*
1293             * Since q iterates over the segments of thread i in order of
1294             * decreasing vector clocks, if q->vc <= p->vc, then
1295             * q->next->vc <= p->vc will also hold. Hence, break out of the
1296             * loop once this condition is met.
1297             */
1298            if (DRD_(vc_lte)(&q->vc, &p->vc))
1299               break;
1300            if (!DRD_(vc_lte)(&p->vc, &q->vc)) {
1301               if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
1302                                              access_type)) {
1303                  Segment* q_next;
1304
1305                  tl_assert(q->stacktrace);
1306                  if (VG_(clo_xml))
1307                     VG_(printf_xml)("  <other_segment_start>\n");
1308                  else
1309                     VG_(message)(Vg_UserMsg,
1310                                  "Other segment start (thread %d)\n", i);
1311                  show_call_stack(i, q->stacktrace);
1312                  if (VG_(clo_xml))
1313                     VG_(printf_xml)("  </other_segment_start>\n"
1314                                     "  <other_segment_end>\n");
1315                  else
1316                     VG_(message)(Vg_UserMsg,
1317                                  "Other segment end (thread %d)\n", i);
1318                  q_next = q->thr_next;
1319                  show_call_stack(i, q_next ? q_next->stacktrace : 0);
1320                  if (VG_(clo_xml))
1321                     VG_(printf_xml)("  </other_segment_end>\n");
1322               }
1323            }
1324         }
1325      }
1326   }
1327}
1328
1329/** Print information about all segments involved in a data race. */
1330void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
1331                                              const Addr addr,
1332                                              const SizeT size,
1333                                              const BmAccessTypeT access_type)
1334{
1335   Segment* p;
1336
1337   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1338             && tid != DRD_INVALID_THREADID);
1339
1340   for (p = DRD_(g_threadinfo)[tid].sg_first; p; p = p->thr_next) {
1341      if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
1342         thread_report_conflicting_segments_segment(tid, addr, size,
1343                                                    access_type, p);
1344   }
1345}
1346
1347/**
1348 * Verify whether the conflict set for thread tid is up to date. Only perform
1349 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
1350 */
1351static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
1352{
1353   Bool result;
1354   struct bitmap* computed_conflict_set = 0;
1355
1356   if (!DRD_(verify_conflict_set))
1357      return True;
1358
1359   thread_compute_conflict_set(&computed_conflict_set, tid);
1360   result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
1361   if (! result)
1362   {
1363      VG_(printf)("actual conflict set:\n");
1364      DRD_(bm_print)(DRD_(g_conflict_set));
1365      VG_(printf)("\n");
1366      VG_(printf)("computed conflict set:\n");
1367      DRD_(bm_print)(computed_conflict_set);
1368      VG_(printf)("\n");
1369   }
1370   DRD_(bm_delete)(computed_conflict_set);
1371   return result;
1372}
1373
1374/**
1375 * Compute the conflict set: a bitmap that represents the union of all memory
1376 * accesses of all segments that are unordered to the current segment of the
1377 * thread tid.
1378 */
1379static void thread_compute_conflict_set(struct bitmap** conflict_set,
1380                                        const DrdThreadId tid)
1381{
1382   Segment* p;
1383
1384   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1385             && tid != DRD_INVALID_THREADID);
1386   tl_assert(tid == DRD_(g_drd_running_tid));
1387
1388   s_compute_conflict_set_count++;
1389   s_conflict_set_bitmap_creation_count
1390      -= DRD_(bm_get_bitmap_creation_count)();
1391   s_conflict_set_bitmap2_creation_count
1392      -= DRD_(bm_get_bitmap2_creation_count)();
1393
1394   if (*conflict_set) {
1395      DRD_(bm_cleanup)(*conflict_set);
1396      DRD_(bm_init)(*conflict_set);
1397   } else {
1398      *conflict_set = DRD_(bm_new)();
1399   }
1400
1401   if (s_trace_conflict_set) {
1402      HChar* str;
1403
1404      str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1405      VG_(message)(Vg_DebugMsg,
1406                   "computing conflict set for thread %d with vc %s\n",
1407                   tid, str);
1408      VG_(free)(str);
1409   }
1410
1411   p = DRD_(g_threadinfo)[tid].sg_last;
1412   {
1413      unsigned j;
1414
1415      if (s_trace_conflict_set) {
1416         HChar* vc;
1417
1418         vc = DRD_(vc_aprint)(&p->vc);
1419         VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n",
1420                      tid, vc);
1421         VG_(free)(vc);
1422      }
1423
1424      for (j = 0; j < DRD_N_THREADS; j++) {
1425         if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
1426            Segment* q;
1427
1428            for (q = DRD_(g_threadinfo)[j].sg_last; q; q = q->thr_prev) {
1429               if (!DRD_(vc_lte)(&q->vc, &p->vc)
1430                   && !DRD_(vc_lte)(&p->vc, &q->vc)) {
1431                  if (s_trace_conflict_set) {
1432                     HChar* str;
1433
1434                     str = DRD_(vc_aprint)(&q->vc);
1435                     VG_(message)(Vg_DebugMsg,
1436                                  "conflict set: [%d] merging segment %s\n",
1437                                  j, str);
1438                     VG_(free)(str);
1439                  }
1440                  DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
1441               } else {
1442                  if (s_trace_conflict_set) {
1443                     HChar* str;
1444
1445                     str = DRD_(vc_aprint)(&q->vc);
1446                     VG_(message)(Vg_DebugMsg,
1447                                  "conflict set: [%d] ignoring segment %s\n",
1448                                  j, str);
1449                     VG_(free)(str);
1450                  }
1451               }
1452            }
1453         }
1454      }
1455   }
1456
1457   s_conflict_set_bitmap_creation_count
1458      += DRD_(bm_get_bitmap_creation_count)();
1459   s_conflict_set_bitmap2_creation_count
1460      += DRD_(bm_get_bitmap2_creation_count)();
1461
1462   if (s_trace_conflict_set_bm) {
1463      VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid);
1464      DRD_(bm_print)(*conflict_set);
1465      VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid);
1466   }
1467}
1468
1469/**
1470 * Update the conflict set after the vector clock of thread tid has been
1471 * updated from old_vc to its current value, either because a new segment has
1472 * been created or because of a synchronization operation.
1473 */
1474void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
1475                                      const VectorClock* const old_vc)
1476{
1477   const VectorClock* new_vc;
1478   Segment* p;
1479   unsigned j;
1480
1481   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1482             && tid != DRD_INVALID_THREADID);
1483   tl_assert(old_vc);
1484   tl_assert(tid == DRD_(g_drd_running_tid));
1485   tl_assert(DRD_(g_conflict_set));
1486
1487   if (s_trace_conflict_set) {
1488      HChar* str;
1489
1490      str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1491      VG_(message)(Vg_DebugMsg,
1492                   "updating conflict set for thread %d with vc %s\n",
1493                   tid, str);
1494      VG_(free)(str);
1495   }
1496
1497   new_vc = DRD_(thread_get_vc)(tid);
1498   tl_assert(DRD_(vc_lte)(old_vc, new_vc));
1499
1500   DRD_(bm_unmark)(DRD_(g_conflict_set));
1501
1502   for (j = 0; j < DRD_N_THREADS; j++)
1503   {
1504      Segment* q;
1505
1506      if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
1507         continue;
1508
1509      for (q = DRD_(g_threadinfo)[j].sg_last;
1510           q && !DRD_(vc_lte)(&q->vc, new_vc);
1511           q = q->thr_prev) {
1512         const Bool included_in_old_conflict_set
1513            = !DRD_(vc_lte)(old_vc, &q->vc);
1514         const Bool included_in_new_conflict_set
1515            = !DRD_(vc_lte)(new_vc, &q->vc);
1516
1517         if (UNLIKELY(s_trace_conflict_set)) {
1518            HChar* str;
1519
1520            str = DRD_(vc_aprint)(&q->vc);
1521            VG_(message)(Vg_DebugMsg,
1522                         "conflict set: [%d] %s segment %s\n", j,
1523                         included_in_old_conflict_set
1524                         != included_in_new_conflict_set
1525                         ? "merging" : "ignoring", str);
1526            VG_(free)(str);
1527         }
1528         if (included_in_old_conflict_set != included_in_new_conflict_set)
1529            DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1530      }
1531
1532      for ( ; q && !DRD_(vc_lte)(&q->vc, old_vc); q = q->thr_prev) {
1533         const Bool included_in_old_conflict_set
1534            = !DRD_(vc_lte)(old_vc, &q->vc);
1535         const Bool included_in_new_conflict_set
1536            = !DRD_(vc_lte)(&q->vc, new_vc)
1537            && !DRD_(vc_lte)(new_vc, &q->vc);
1538
1539         if (UNLIKELY(s_trace_conflict_set)) {
1540            HChar* str;
1541
1542            str = DRD_(vc_aprint)(&q->vc);
1543            VG_(message)(Vg_DebugMsg,
1544                         "conflict set: [%d] %s segment %s\n", j,
1545                         included_in_old_conflict_set
1546                         != included_in_new_conflict_set
1547                         ? "merging" : "ignoring", str);
1548            VG_(free)(str);
1549         }
1550         if (included_in_old_conflict_set != included_in_new_conflict_set)
1551            DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1552      }
1553   }
1554
1555   DRD_(bm_clear_marked)(DRD_(g_conflict_set));
1556
1557   p = DRD_(g_threadinfo)[tid].sg_last;
1558   for (j = 0; j < DRD_N_THREADS; j++) {
1559      if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
1560         Segment* q;
1561         for (q = DRD_(g_threadinfo)[j].sg_last;
1562              q && !DRD_(vc_lte)(&q->vc, &p->vc);
1563              q = q->thr_prev) {
1564            if (!DRD_(vc_lte)(&p->vc, &q->vc))
1565               DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1566         }
1567      }
1568   }
1569
1570   DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
1571
1572   s_update_conflict_set_count++;
1573
1574   if (s_trace_conflict_set_bm)
1575   {
1576      VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid);
1577      DRD_(bm_print)(DRD_(g_conflict_set));
1578      VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid);
1579   }
1580
1581   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1582}
1583
1584/** Report the number of context switches performed. */
1585ULong DRD_(thread_get_context_switch_count)(void)
1586{
1587   return s_context_switch_count;
1588}
1589
1590/** Report the number of ordered segments that have been discarded. */
1591ULong DRD_(thread_get_discard_ordered_segments_count)(void)
1592{
1593   return s_discard_ordered_segments_count;
1594}
1595
1596/** Return how many times the conflict set has been updated entirely. */
1597ULong DRD_(thread_get_compute_conflict_set_count)()
1598{
1599   return s_compute_conflict_set_count;
1600}
1601
1602/** Return how many times the conflict set has been updated partially. */
1603ULong DRD_(thread_get_update_conflict_set_count)(void)
1604{
1605   return s_update_conflict_set_count;
1606}
1607
1608/**
1609 * Return how many times the conflict set has been updated partially
1610 * because a new segment has been created.
1611 */
1612ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
1613{
1614   return s_update_conflict_set_new_sg_count;
1615}
1616
1617/**
1618 * Return how many times the conflict set has been updated partially
1619 * because of combining vector clocks due to synchronization operations
1620 * other than reader/writer lock or barrier operations.
1621 */
1622ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
1623{
1624   return s_update_conflict_set_sync_count;
1625}
1626
1627/**
1628 * Return how many times the conflict set has been updated partially
1629 * because of thread joins.
1630 */
1631ULong DRD_(thread_get_update_conflict_set_join_count)(void)
1632{
1633   return s_update_conflict_set_join_count;
1634}
1635
1636/**
1637 * Return the number of first-level bitmaps that have been created during
1638 * conflict set updates.
1639 */
1640ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
1641{
1642   return s_conflict_set_bitmap_creation_count;
1643}
1644
1645/**
1646 * Return the number of second-level bitmaps that have been created during
1647 * conflict set updates.
1648 */
1649ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
1650{
1651   return s_conflict_set_bitmap2_creation_count;
1652}
1653