drd_thread.c revision e278ab506b9a73ef1c17a17077546b2de9a11d7c
1/*
2  This file is part of drd, a thread error detector.
3
4  Copyright (C) 2006-2011 Bart Van Assche <bvanassche@acm.org>.
5
6  This program is free software; you can redistribute it and/or
7  modify it under the terms of the GNU General Public License as
8  published by the Free Software Foundation; either version 2 of the
9  License, or (at your option) any later version.
10
11  This program is distributed in the hope that it will be useful, but
12  WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  General Public License for more details.
15
16  You should have received a copy of the GNU General Public License
17  along with this program; if not, write to the Free Software
18  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19  02111-1307, USA.
20
21  The GNU General Public License is contained in the file COPYING.
22*/
23
24
25#include "drd_error.h"
26#include "drd_barrier.h"
27#include "drd_clientobj.h"
28#include "drd_cond.h"
29#include "drd_mutex.h"
30#include "drd_segment.h"
31#include "drd_semaphore.h"
32#include "drd_suppression.h"
33#include "drd_thread.h"
34#include "pub_tool_vki.h"
35#include "pub_tool_basics.h"      // Addr, SizeT
36#include "pub_tool_libcassert.h"  // tl_assert()
37#include "pub_tool_libcbase.h"    // VG_(strlen)()
38#include "pub_tool_libcprint.h"   // VG_(printf)()
39#include "pub_tool_libcproc.h"    // VG_(getenv)()
40#include "pub_tool_machine.h"
41#include "pub_tool_mallocfree.h"  // VG_(malloc)(), VG_(free)()
42#include "pub_tool_options.h"     // VG_(clo_backtrace_size)
43#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
44
45
46
47/* Local functions. */
48
49static void thread_append_segment(const DrdThreadId tid, Segment* const sg);
50static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
51static void thread_compute_conflict_set(struct bitmap** conflict_set,
52                                        const DrdThreadId tid);
53static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
54
55
56/* Local variables. */
57
58static ULong    s_context_switch_count;
59static ULong    s_discard_ordered_segments_count;
60static ULong    s_compute_conflict_set_count;
61static ULong    s_update_conflict_set_count;
62static ULong    s_update_conflict_set_new_sg_count;
63static ULong    s_update_conflict_set_sync_count;
64static ULong    s_update_conflict_set_join_count;
65static ULong    s_conflict_set_bitmap_creation_count;
66static ULong    s_conflict_set_bitmap2_creation_count;
67static ThreadId s_vg_running_tid  = VG_INVALID_THREADID;
68DrdThreadId     DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
69ThreadInfo      DRD_(g_threadinfo)[DRD_N_THREADS];
70struct bitmap*  DRD_(g_conflict_set);
71static Bool     s_trace_context_switches = False;
72static Bool     s_trace_conflict_set = False;
73static Bool     s_trace_conflict_set_bm = False;
74static Bool     s_trace_fork_join = False;
75static Bool     s_segment_merging = True;
76static Bool     s_new_segments_since_last_merge;
77static int      s_segment_merge_interval = 10;
78static unsigned s_join_list_vol = 10;
79static unsigned s_deletion_head;
80static unsigned s_deletion_tail;
81
82
83/* Function definitions. */
84
85/** Enables/disables context switch tracing. */
86void DRD_(thread_trace_context_switches)(const Bool t)
87{
88   tl_assert(t == False || t == True);
89   s_trace_context_switches = t;
90}
91
92/** Enables/disables conflict set tracing. */
93void DRD_(thread_trace_conflict_set)(const Bool t)
94{
95   tl_assert(t == False || t == True);
96   s_trace_conflict_set = t;
97}
98
99/** Enables/disables conflict set bitmap tracing. */
100void DRD_(thread_trace_conflict_set_bm)(const Bool t)
101{
102   tl_assert(t == False || t == True);
103   s_trace_conflict_set_bm = t;
104}
105
106/** Report whether fork/join tracing is enabled. */
107Bool DRD_(thread_get_trace_fork_join)(void)
108{
109   return s_trace_fork_join;
110}
111
112/** Enables/disables fork/join tracing. */
113void DRD_(thread_set_trace_fork_join)(const Bool t)
114{
115   tl_assert(t == False || t == True);
116   s_trace_fork_join = t;
117}
118
119/** Enables/disables segment merging. */
120void DRD_(thread_set_segment_merging)(const Bool m)
121{
122   tl_assert(m == False || m == True);
123   s_segment_merging = m;
124}
125
126/** Get the segment merging interval. */
127int DRD_(thread_get_segment_merge_interval)(void)
128{
129   return s_segment_merge_interval;
130}
131
132/** Set the segment merging interval. */
133void DRD_(thread_set_segment_merge_interval)(const int i)
134{
135   s_segment_merge_interval = i;
136}
137
138void DRD_(thread_set_join_list_vol)(const int jlv)
139{
140   s_join_list_vol = jlv;
141}
142
143void DRD_(thread_init)(void)
144{
145   int i;
146
147   for (i = 0; i < DRD_N_THREADS; i++)
148      init_list_head(&DRD_(g_threadinfo)[i].sg_list);
149}
150
151/**
152 * Convert Valgrind's ThreadId into a DrdThreadId.
153 *
154 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
155 *         Valgrind ThreadId does not yet exist.
156 */
157DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
158{
159   int i;
160
161   if (tid == VG_INVALID_THREADID)
162      return DRD_INVALID_THREADID;
163
164   for (i = 1; i < DRD_N_THREADS; i++)
165   {
166      if (DRD_(g_threadinfo)[i].vg_thread_exists == True
167          && DRD_(g_threadinfo)[i].vg_threadid == tid)
168      {
169         return i;
170      }
171   }
172
173   return DRD_INVALID_THREADID;
174}
175
176/** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
177static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
178{
179   int i;
180
181   tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
182
183   for (i = 1; i < DRD_N_THREADS; i++)
184   {
185      if (!DRD_(g_threadinfo)[i].valid)
186      {
187         tl_assert(! DRD_(IsValidDrdThreadId)(i));
188
189         DRD_(g_threadinfo)[i].valid         = True;
190         DRD_(g_threadinfo)[i].vg_thread_exists = True;
191         DRD_(g_threadinfo)[i].vg_threadid   = tid;
192         DRD_(g_threadinfo)[i].pt_threadid   = INVALID_POSIX_THREADID;
193         DRD_(g_threadinfo)[i].stack_min     = 0;
194         DRD_(g_threadinfo)[i].stack_min_min = 0;
195         DRD_(g_threadinfo)[i].stack_startup = 0;
196         DRD_(g_threadinfo)[i].stack_max     = 0;
197         DRD_(thread_set_name)(i, "");
198         DRD_(g_threadinfo)[i].on_alt_stack        = False;
199         DRD_(g_threadinfo)[i].is_recording_loads  = True;
200         DRD_(g_threadinfo)[i].is_recording_stores = True;
201         DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
202         DRD_(g_threadinfo)[i].synchr_nesting = 0;
203         DRD_(g_threadinfo)[i].deletion_seq = s_deletion_tail - 1;
204         tl_assert(list_empty(&DRD_(g_threadinfo)[i].sg_list));
205
206         tl_assert(DRD_(IsValidDrdThreadId)(i));
207
208         return i;
209      }
210   }
211
212   VG_(printf)(
213"\nSorry, but the maximum number of threads supported by DRD has been exceeded."
214"Aborting.\n");
215
216   tl_assert(False);
217
218   return DRD_INVALID_THREADID;
219}
220
221/** Convert a POSIX thread ID into a DRD thread ID. */
222DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
223{
224   int i;
225
226   if (tid != INVALID_POSIX_THREADID)
227   {
228      for (i = 1; i < DRD_N_THREADS; i++)
229      {
230         if (DRD_(g_threadinfo)[i].posix_thread_exists
231             && DRD_(g_threadinfo)[i].pt_threadid == tid)
232         {
233            return i;
234         }
235      }
236   }
237   return DRD_INVALID_THREADID;
238}
239
240/** Convert a DRD thread ID into a Valgrind thread ID. */
241ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
242{
243   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
244             && tid != DRD_INVALID_THREADID);
245
246   return (DRD_(g_threadinfo)[tid].vg_thread_exists
247           ? DRD_(g_threadinfo)[tid].vg_threadid
248           : VG_INVALID_THREADID);
249}
250
251#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
252/**
253 * Sanity check of the doubly linked list of segments referenced by a
254 * ThreadInfo struct.
255 * @return True if sane, False if not.
256 */
257static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
258{
259   Segment* p;
260
261   for (p = ti->first; p; p = p->next) {
262      if (p->next && p->next->prev != p)
263         return False;
264      if (p->next == 0 && p != ti->last)
265         return False;
266   }
267   for (p = ti->last; p; p = p->prev) {
268      if (p->prev && p->prev->next != p)
269         return False;
270      if (p->prev == 0 && p != ti->first)
271         return False;
272   }
273   return True;
274}
275#endif
276
277/**
278 * Create the first segment for a newly started thread.
279 *
280 * This function is called from the handler installed via
281 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
282 * from the context of the creator thread, before the new thread has been
283 * created.
284 *
285 * @param[in] creator    DRD thread ID of the creator thread.
286 * @param[in] vg_created Valgrind thread ID of the created thread.
287 *
288 * @return DRD thread ID of the created thread.
289 */
290DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
291                                    const ThreadId vg_created)
292{
293   DrdThreadId created;
294
295   tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
296   created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
297   tl_assert(0 <= (int)created && created < DRD_N_THREADS
298             && created != DRD_INVALID_THREADID);
299
300   tl_assert(list_empty(&DRD_(g_threadinfo)[created].sg_list));
301   /* Create an initial segment for the newly created thread. */
302   thread_append_segment(created, DRD_(sg_new)(creator, created));
303
304   return created;
305}
306
307/**
308 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
309 * after the thread has been created and before any client instructions are run
310 * on the newly created thread, e.g. from the handler installed via
311 * VG_(track_pre_thread_first_insn)().
312 *
313 * @param[in] vg_created Valgrind thread ID of the newly created thread.
314 *
315 * @return DRD thread ID for the new thread.
316 */
317DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
318{
319   const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
320
321   tl_assert(0 <= (int)created && created < DRD_N_THREADS
322             && created != DRD_INVALID_THREADID);
323
324   DRD_(g_threadinfo)[created].stack_max
325      = VG_(thread_get_stack_max)(vg_created);
326   DRD_(g_threadinfo)[created].stack_startup
327      = DRD_(g_threadinfo)[created].stack_max;
328   DRD_(g_threadinfo)[created].stack_min
329      = DRD_(g_threadinfo)[created].stack_max;
330   DRD_(g_threadinfo)[created].stack_min_min
331      = DRD_(g_threadinfo)[created].stack_max;
332   DRD_(g_threadinfo)[created].stack_size
333      = VG_(thread_get_stack_size)(vg_created);
334   tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
335
336   return created;
337}
338
339static void DRD_(thread_delayed_delete)(const DrdThreadId tid)
340{
341   int j;
342
343   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
344   DRD_(g_threadinfo)[tid].posix_thread_exists = False;
345   DRD_(g_threadinfo)[tid].deletion_seq = s_deletion_head++;
346#if 0
347   VG_(message)(Vg_DebugMsg, "Adding thread %d to the deletion list\n", tid);
348#endif
349   if (s_deletion_head - s_deletion_tail >= s_join_list_vol) {
350      for (j = 0; j < DRD_N_THREADS; ++j) {
351         if (DRD_(IsValidDrdThreadId)(j)
352             && DRD_(g_threadinfo)[j].deletion_seq == s_deletion_tail)
353         {
354            s_deletion_tail++;
355#if 0
356            VG_(message)(Vg_DebugMsg, "Delayed delete of thread %d\n", j);
357#endif
358            DRD_(thread_delete)(j, False);
359            break;
360         }
361      }
362   }
363}
364
365/**
366 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
367 * after thread drd_joiner joined thread drd_joinee.
368 */
369void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
370{
371   tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
372   tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
373
374   DRD_(thread_new_segment)(drd_joiner);
375   DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
376   DRD_(thread_new_segment)(drd_joinee);
377
378   if (s_trace_fork_join)
379   {
380      const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
381      const unsigned msg_size = 256;
382      char* msg;
383
384      msg = VG_(malloc)("drd.main.dptj.1", msg_size);
385      tl_assert(msg);
386      VG_(snprintf)(msg, msg_size,
387                    "drd_post_thread_join joiner = %d, joinee = %d",
388                    drd_joiner, drd_joinee);
389      if (joiner)
390      {
391         char* vc;
392
393         vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
394         VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
395                       ", new vc: %s", vc);
396         VG_(free)(vc);
397      }
398      DRD_(trace_msg)("%pS", msg);
399      VG_(free)(msg);
400   }
401
402   if (!  DRD_(get_check_stack_accesses)())
403   {
404      DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
405                               - DRD_(thread_get_stack_size)(drd_joinee),
406                               DRD_(thread_get_stack_max)(drd_joinee));
407   }
408   DRD_(clientobj_delete_thread)(drd_joinee);
409   DRD_(thread_delayed_delete)(drd_joinee);
410}
411
412/**
413 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
414 * and accesses this data structure from multiple threads without locking.
415 * Any conflicting accesses in the range stack_startup..stack_max will be
416 * ignored.
417 */
418void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
419                                    const Addr stack_startup)
420{
421   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
422             && tid != DRD_INVALID_THREADID);
423   tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
424   tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
425   DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
426}
427
428/** Return the stack pointer for the specified thread. */
429Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
430{
431   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
432             && tid != DRD_INVALID_THREADID);
433   return DRD_(g_threadinfo)[tid].stack_min;
434}
435
436/**
437 * Return the lowest value that was ever assigned to the stack pointer
438 * for the specified thread.
439 */
440Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
441{
442   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
443             && tid != DRD_INVALID_THREADID);
444   return DRD_(g_threadinfo)[tid].stack_min_min;
445}
446
447/** Return the top address for the stack of the specified thread. */
448Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
449{
450   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
451             && tid != DRD_INVALID_THREADID);
452   return DRD_(g_threadinfo)[tid].stack_max;
453}
454
455/** Return the maximum stack size for the specified thread. */
456SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
457{
458   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
459             && tid != DRD_INVALID_THREADID);
460   return DRD_(g_threadinfo)[tid].stack_size;
461}
462
463Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
464{
465   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
466             && tid != DRD_INVALID_THREADID);
467   return DRD_(g_threadinfo)[tid].on_alt_stack;
468}
469
470void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
471                                   const Bool on_alt_stack)
472{
473   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
474             && tid != DRD_INVALID_THREADID);
475   tl_assert(on_alt_stack == !!on_alt_stack);
476   DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
477}
478
479Int DRD_(thread_get_threads_on_alt_stack)(void)
480{
481   int i, n = 0;
482
483   for (i = 1; i < DRD_N_THREADS; i++)
484      n += DRD_(g_threadinfo)[i].on_alt_stack;
485   return n;
486}
487
488/**
489 * Clean up thread-specific data structures.
490 */
491void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
492{
493   Segment* sg;
494   Segment* sg_prev;
495
496   tl_assert(DRD_(IsValidDrdThreadId)(tid));
497
498   tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
499   list_for_each_entry_safe(sg, sg_prev, &DRD_(g_threadinfo)[tid].sg_list,
500                            thr_list)
501   {
502      list_del(&sg->thr_list);
503      DRD_(sg_put)(sg);
504   }
505   DRD_(g_threadinfo)[tid].valid = False;
506   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
507   DRD_(g_threadinfo)[tid].posix_thread_exists = False;
508   if (detached)
509      DRD_(g_threadinfo)[tid].detached_posix_thread = False;
510   else
511      tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
512   tl_assert(list_empty(&DRD_(g_threadinfo)[tid].sg_list));
513
514   tl_assert(! DRD_(IsValidDrdThreadId)(tid));
515}
516
517/**
518 * Called after a thread performed its last memory access and before
519 * thread_delete() is called. Note: thread_delete() is only called for
520 * joinable threads, not for detached threads.
521 */
522void DRD_(thread_finished)(const DrdThreadId tid)
523{
524   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
525             && tid != DRD_INVALID_THREADID);
526
527   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
528
529   if (DRD_(g_threadinfo)[tid].detached_posix_thread)
530   {
531      /*
532       * Once a detached thread has finished, its stack is deallocated and
533       * should no longer be taken into account when computing the conflict set.
534       */
535      DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
536
537      /*
538       * For a detached thread, calling pthread_exit() invalidates the
539       * POSIX thread ID associated with the detached thread. For joinable
540       * POSIX threads however, the POSIX thread ID remains live after the
541       * pthread_exit() call until pthread_join() is called.
542       */
543      DRD_(g_threadinfo)[tid].posix_thread_exists = False;
544   }
545}
546
547/** Called just after fork() in the child process. */
548void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
549{
550   unsigned i;
551
552   for (i = 1; i < DRD_N_THREADS; i++)
553   {
554      if (i == tid)
555	 continue;
556      if (DRD_(IsValidDrdThreadId(i)))
557	 DRD_(thread_delete)(i, True);
558      tl_assert(!DRD_(IsValidDrdThreadId(i)));
559   }
560}
561
562/** Called just before pthread_cancel(). */
563void DRD_(thread_pre_cancel)(const DrdThreadId tid)
564{
565   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
566             && tid != DRD_INVALID_THREADID);
567   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
568
569   if (DRD_(thread_get_trace_fork_join)())
570      DRD_(trace_msg)("[%d] drd_thread_pre_cancel %d",
571                      DRD_(g_drd_running_tid), tid);
572}
573
574/**
575 * Store the POSIX thread ID for the specified thread.
576 *
577 * @note This function can be called two times for the same thread -- see also
578 * the comment block preceding the pthread_create() wrapper in
579 * drd_pthread_intercepts.c.
580 */
581void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
582{
583   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
584             && tid != DRD_INVALID_THREADID);
585   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
586             || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
587   tl_assert(ptid != INVALID_POSIX_THREADID);
588   DRD_(g_threadinfo)[tid].posix_thread_exists = True;
589   DRD_(g_threadinfo)[tid].pt_threadid         = ptid;
590}
591
592/** Returns true for joinable threads and false for detached threads. */
593Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
594{
595   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
596             && tid != DRD_INVALID_THREADID);
597   return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
598}
599
600/** Store the thread mode: joinable or detached. */
601void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
602{
603   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
604             && tid != DRD_INVALID_THREADID);
605   tl_assert(!! joinable == joinable);
606   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
607
608   DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
609}
610
611/** Tells DRD that the calling thread is about to enter pthread_create(). */
612void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
613{
614   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
615             && tid != DRD_INVALID_THREADID);
616   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
617   tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
618
619   DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
620}
621
622/** Tells DRD that the calling thread has left pthread_create(). */
623void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
624{
625   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
626             && tid != DRD_INVALID_THREADID);
627   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
628   tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
629
630   DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
631}
632
633/** Obtain the thread number and the user-assigned thread name. */
634const char* DRD_(thread_get_name)(const DrdThreadId tid)
635{
636   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
637             && tid != DRD_INVALID_THREADID);
638
639   return DRD_(g_threadinfo)[tid].name;
640}
641
642/** Set the name of the specified thread. */
643void DRD_(thread_set_name)(const DrdThreadId tid, const char* const name)
644{
645   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
646             && tid != DRD_INVALID_THREADID);
647
648   if (name == NULL || name[0] == 0)
649      VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
650                    sizeof(DRD_(g_threadinfo)[tid].name),
651                    "Thread %d",
652                    tid);
653   else
654      VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
655                    sizeof(DRD_(g_threadinfo)[tid].name),
656                    "Thread %d (%s)",
657                    tid, name);
658   DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
659}
660
661/**
662 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
663 * conflict set.
664 */
665void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
666{
667   tl_assert(vg_tid != VG_INVALID_THREADID);
668
669   if (vg_tid != s_vg_running_tid)
670   {
671      DRD_(thread_set_running_tid)(vg_tid,
672                                   DRD_(VgThreadIdToDrdThreadId)(vg_tid));
673   }
674
675   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
676   tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
677}
678
679/**
680 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
681 * conflict set.
682 */
683void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
684                                  const DrdThreadId drd_tid)
685{
686   tl_assert(vg_tid != VG_INVALID_THREADID);
687   tl_assert(drd_tid != DRD_INVALID_THREADID);
688
689   if (vg_tid != s_vg_running_tid)
690   {
691      if (s_trace_context_switches
692          && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
693      {
694         VG_(message)(Vg_DebugMsg,
695                      "Context switch from thread %d to thread %d;"
696                      " segments: %llu\n",
697                      DRD_(g_drd_running_tid), drd_tid,
698                      DRD_(sg_get_segments_alive_count)());
699      }
700      s_vg_running_tid = vg_tid;
701      DRD_(g_drd_running_tid) = drd_tid;
702      thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
703      s_context_switch_count++;
704   }
705
706   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
707   tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
708}
709
710/**
711 * Increase the synchronization nesting counter. Must be called before the
712 * client calls a synchronization function.
713 */
714int DRD_(thread_enter_synchr)(const DrdThreadId tid)
715{
716   tl_assert(DRD_(IsValidDrdThreadId)(tid));
717   return DRD_(g_threadinfo)[tid].synchr_nesting++;
718}
719
720/**
721 * Decrease the synchronization nesting counter. Must be called after the
722 * client left a synchronization function.
723 */
724int DRD_(thread_leave_synchr)(const DrdThreadId tid)
725{
726   tl_assert(DRD_(IsValidDrdThreadId)(tid));
727   tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
728   return --DRD_(g_threadinfo)[tid].synchr_nesting;
729}
730
731/** Returns the synchronization nesting counter. */
732int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
733{
734   tl_assert(DRD_(IsValidDrdThreadId)(tid));
735   return DRD_(g_threadinfo)[tid].synchr_nesting;
736}
737
738/** Append a new segment at the end of the segment list. */
739static
740void thread_append_segment(const DrdThreadId tid, Segment* const sg)
741{
742   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
743             && tid != DRD_INVALID_THREADID);
744
745#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
746   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
747#endif
748
749   list_add_tail(&sg->thr_list, &DRD_(g_threadinfo)[tid].sg_list);
750
751#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
752   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
753#endif
754}
755
756/**
757 * Remove a segment from the segment list of thread threadid, and free the
758 * associated memory.
759 */
760static
761void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
762{
763   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
764             && tid != DRD_INVALID_THREADID);
765
766#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
767   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
768#endif
769
770   list_del(&sg->thr_list);
771   DRD_(sg_put)(sg);
772
773#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
774   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
775#endif
776}
777
778/**
779 * Returns a pointer to the vector clock of the most recent segment associated
780 * with thread 'tid'.
781 */
782VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
783{
784   struct list_head* sg_list;
785
786   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
787             && tid != DRD_INVALID_THREADID);
788   sg_list = &DRD_(g_threadinfo)[tid].sg_list;
789   tl_assert(!list_empty(sg_list));
790   return &list_last_entry(sg_list, Segment, thr_list)->vc;
791}
792
793/**
794 * Return the latest segment of thread 'tid' and increment its reference count.
795 */
796void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
797{
798   struct list_head* sg_list;
799
800   tl_assert(sg);
801   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
802             && tid != DRD_INVALID_THREADID);
803   sg_list = &DRD_(g_threadinfo)[tid].sg_list;
804   tl_assert(!list_empty(sg_list));
805
806   DRD_(sg_put)(*sg);
807   *sg = DRD_(sg_get)(list_last_entry(sg_list, Segment, thr_list));
808}
809
810/**
811 * Compute the minimum of all latest vector clocks of all threads
812 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
813 *
814 * @param vc pointer to a vectorclock, holds result upon return.
815 */
816static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
817{
818   unsigned i;
819   Bool first;
820   struct list_head* sg_list;
821   Segment* latest_sg;
822
823   first = True;
824   for (i = 0; i < DRD_N_THREADS; i++)
825   {
826      sg_list = &DRD_(g_threadinfo)[i].sg_list;
827      if (!list_empty(sg_list)) {
828         latest_sg = list_last_entry(sg_list, Segment, thr_list);
829         if (first)
830            DRD_(vc_assign)(vc, &latest_sg->vc);
831         else
832            DRD_(vc_min)(vc, &latest_sg->vc);
833         first = False;
834      }
835   }
836}
837
838/**
839 * Compute the maximum of all latest vector clocks of all threads.
840 *
841 * @param vc pointer to a vectorclock, holds result upon return.
842 */
843static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
844{
845   unsigned i;
846   Bool first;
847   struct list_head* sg_list;
848   Segment* latest_sg;
849
850   first = True;
851   for (i = 0; i < DRD_N_THREADS; i++)
852   {
853      sg_list = &DRD_(g_threadinfo)[i].sg_list;
854      if (!list_empty(sg_list)) {
855         latest_sg = list_last_entry(sg_list, Segment, thr_list);
856         if (first)
857            DRD_(vc_assign)(vc, &latest_sg->vc);
858         else
859            DRD_(vc_combine)(vc, &latest_sg->vc);
860         first = False;
861      }
862   }
863}
864
865/**
866 * Discard all segments that have a defined order against the latest vector
867 * clock of all threads -- these segments can no longer be involved in a
868 * data race.
869 */
870static void thread_discard_ordered_segments(void)
871{
872   unsigned i;
873   VectorClock thread_vc_min;
874
875   s_discard_ordered_segments_count++;
876
877   DRD_(vc_init)(&thread_vc_min, 0, 0);
878   DRD_(thread_compute_minimum_vc)(&thread_vc_min);
879   if (DRD_(sg_get_trace)())
880   {
881      char *vc_min, *vc_max;
882      VectorClock thread_vc_max;
883
884      DRD_(vc_init)(&thread_vc_max, 0, 0);
885      DRD_(thread_compute_maximum_vc)(&thread_vc_max);
886      vc_min = DRD_(vc_aprint)(&thread_vc_min);
887      vc_max = DRD_(vc_aprint)(&thread_vc_max);
888      VG_(message)(Vg_DebugMsg,
889                   "Discarding ordered segments -- min vc is %s, max vc is %s\n",
890                   vc_min, vc_max);
891      VG_(free)(vc_min);
892      VG_(free)(vc_max);
893      DRD_(vc_cleanup)(&thread_vc_max);
894   }
895
896   for (i = 0; i < DRD_N_THREADS; i++)
897   {
898      Segment* sg;
899      Segment* sg_next;
900      struct list_head* sg_list;
901
902      sg_list = &DRD_(g_threadinfo)[i].sg_list;
903      list_for_each_entry_safe(sg, sg_next, sg_list, thr_list) {
904         if (list_is_last(&sg->thr_list, sg_list)
905             || !DRD_(vc_lte)(&sg->vc, &thread_vc_min))
906            break;
907         thread_discard_segment(i, sg);
908      }
909   }
910   DRD_(vc_cleanup)(&thread_vc_min);
911}
912
913/**
914 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
915 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
916 * all segments in the set CS are ordered consistently against both sg1 and
917 * sg2. The set CS is defined as the set of segments that can immediately
918 * precede future segments via inter-thread synchronization operations. In
919 * DRD the set CS consists of the latest segment of each thread combined with
920 * all segments for which the reference count is strictly greater than one.
921 * The code below is an optimized version of the following:
922 *
923 * for (i = 0; i < DRD_N_THREADS; i++)
924 * {
925 *    Segment* sg;
926 *
927 *    for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
928 *    {
929 *       if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
930 *       {
931 *          if (   DRD_(vc_lte)(&sg1->vc, &sg->vc)
932 *              != DRD_(vc_lte)(&sg2->vc, &sg->vc)
933 *              || DRD_(vc_lte)(&sg->vc, &sg1->vc)
934 *              != DRD_(vc_lte)(&sg->vc, &sg2->vc))
935 *          {
936 *             return False;
937 *          }
938 *       }
939 *    }
940 * }
941 */
942static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
943                                               Segment* const sg1,
944                                               Segment* const sg2)
945{
946   unsigned i;
947   struct list_head* sg_list;
948
949   sg_list = &DRD_(g_threadinfo)[tid].sg_list;
950   tl_assert(!list_is_last(&sg1->thr_list, sg_list));
951   tl_assert(!list_is_last(&sg2->thr_list, sg_list));
952   tl_assert(list_next_entry(&sg1->thr_list, Segment, thr_list) == sg2);
953   tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
954
955   for (i = 0; i < DRD_N_THREADS; i++)
956   {
957      Segment* sg;
958
959      sg_list = &DRD_(g_threadinfo)[i].sg_list;
960      list_for_each_entry(sg, sg_list, thr_list)
961      {
962         if (list_is_last(&sg->thr_list, sg_list)
963             || DRD_(sg_get_refcnt)(sg) > 1)
964         {
965            if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
966               break;
967            if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
968               return False;
969         }
970      }
971      list_for_each_entry_reverse(sg, sg_list, thr_list)
972      {
973         if (list_is_last(&sg->thr_list, sg_list)
974             || DRD_(sg_get_refcnt)(sg) > 1)
975         {
976            if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
977               break;
978            if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
979               return False;
980         }
981      }
982   }
983   return True;
984}
985
986/**
987 * Merge all segments that may be merged without triggering false positives
988 * or discarding real data races. For the theoretical background of segment
989 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
990 * and Koen De Bosschere. Bounding the number of segment histories during
991 * data race detection. Parallel Computing archive, Volume 28, Issue 9,
992 * pp 1221-1238, September 2002. This paper contains a proof that merging
993 * consecutive segments for which the property equiv(s1,s2) holds can be
994 * merged without reducing the accuracy of datarace detection. Furthermore
995 * it is also proven that the total number of all segments will never grow
996 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
997 * every time a new segment is created. The property equiv(s1, s2) is defined
998 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
999 * clocks of segments s and s1 are ordered in the same way as those of segments
1000 * s and s2. The set CS is defined as the set of existing segments s that have
1001 * the potential to conflict with not yet created segments, either because the
1002 * segment s is the latest segment of a thread or because it can become the
1003 * immediate predecessor of a new segment due to a synchronization operation.
1004 */
1005static void thread_merge_segments(void)
1006{
1007   unsigned i;
1008
1009   s_new_segments_since_last_merge = 0;
1010
1011   for (i = 0; i < DRD_N_THREADS; i++)
1012   {
1013      Segment* sg;
1014
1015#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1016      tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1017#endif
1018
1019      struct list_head* sg_list = &DRD_(g_threadinfo)[i].sg_list;
1020      list_for_each_entry(sg, sg_list, thr_list)
1021      {
1022         if (DRD_(sg_get_refcnt)(sg) == 1
1023             && !list_is_last(&sg->thr_list, sg_list)) {
1024            Segment* sg_next = list_next_entry(&sg->thr_list, Segment,
1025                                               thr_list);
1026            if (DRD_(sg_get_refcnt)(sg_next) == 1
1027                && !list_is_last(&sg_next->thr_list, sg_list)
1028                && thread_consistent_segment_ordering(i, sg, sg_next))
1029            {
1030               /* Merge sg and sg_next into sg. */
1031               DRD_(sg_merge)(sg, sg_next);
1032               thread_discard_segment(i, sg_next);
1033            }
1034         }
1035      }
1036
1037#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1038      tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1039#endif
1040   }
1041}
1042
1043/**
1044 * Create a new segment for the specified thread, and discard any segments
1045 * that cannot cause races anymore.
1046 */
1047void DRD_(thread_new_segment)(const DrdThreadId tid)
1048{
1049   struct list_head* sg_list;
1050   Segment* last_sg;
1051   Segment* new_sg;
1052
1053   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1054             && tid != DRD_INVALID_THREADID);
1055   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1056
1057   sg_list = &DRD_(g_threadinfo)[tid].sg_list;
1058   last_sg = list_empty(sg_list) ? NULL
1059      : list_last_entry(sg_list, Segment, thr_list);
1060   new_sg = DRD_(sg_new)(tid, tid);
1061   thread_append_segment(tid, new_sg);
1062   if (tid == DRD_(g_drd_running_tid) && last_sg)
1063   {
1064      DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
1065      s_update_conflict_set_new_sg_count++;
1066   }
1067
1068   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1069
1070   if (s_segment_merging
1071       && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1072   {
1073      thread_discard_ordered_segments();
1074      thread_merge_segments();
1075   }
1076}
1077
1078/** Call this function after thread 'joiner' joined thread 'joinee'. */
1079void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
1080{
1081   tl_assert(joiner != joinee);
1082   tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
1083             && joiner != DRD_INVALID_THREADID);
1084   tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
1085             && joinee != DRD_INVALID_THREADID);
1086   tl_assert(!list_empty(&DRD_(g_threadinfo)[joiner].sg_list));
1087   tl_assert(!list_empty(&DRD_(g_threadinfo)[joinee].sg_list));
1088
1089   if (DRD_(sg_get_trace)())
1090   {
1091      char *str1, *str2;
1092      str1 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1093      str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joinee));
1094      VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
1095                   str1, str2);
1096      VG_(free)(str1);
1097      VG_(free)(str2);
1098   }
1099   if (joiner == DRD_(g_drd_running_tid)) {
1100      VectorClock old_vc;
1101
1102      DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(joiner));
1103      DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1104                       DRD_(thread_get_vc)(joinee));
1105      DRD_(thread_update_conflict_set)(joiner, &old_vc);
1106      s_update_conflict_set_join_count++;
1107      DRD_(vc_cleanup)(&old_vc);
1108   } else {
1109      DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1110                       DRD_(thread_get_vc)(joinee));
1111   }
1112
1113   thread_discard_ordered_segments();
1114
1115   if (DRD_(sg_get_trace)()) {
1116      char* str;
1117
1118      str = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1119      VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
1120      VG_(free)(str);
1121   }
1122}
1123
1124/**
1125 * Update the vector clock of the last segment of thread tid with the
1126 * the vector clock of segment sg.
1127 */
1128static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
1129{
1130   const VectorClock* const vc = &sg->vc;
1131
1132   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1133             && tid != DRD_INVALID_THREADID);
1134   tl_assert(!list_empty(&DRD_(g_threadinfo)[tid].sg_list));
1135   tl_assert(sg);
1136   tl_assert(vc);
1137
1138   if (tid != sg->tid) {
1139      VectorClock old_vc;
1140
1141      DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(tid));
1142      DRD_(vc_combine)(DRD_(thread_get_vc)(tid), vc);
1143      if (DRD_(sg_get_trace)()) {
1144         char *str1, *str2;
1145         str1 = DRD_(vc_aprint)(&old_vc);
1146         str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1147         VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2);
1148         VG_(free)(str1);
1149         VG_(free)(str2);
1150      }
1151
1152      thread_discard_ordered_segments();
1153
1154      DRD_(thread_update_conflict_set)(tid, &old_vc);
1155      s_update_conflict_set_sync_count++;
1156
1157      DRD_(vc_cleanup)(&old_vc);
1158   } else {
1159      tl_assert(DRD_(vc_lte)(vc, DRD_(thread_get_vc)(tid)));
1160   }
1161}
1162
1163/**
1164 * Create a new segment for thread tid and update the vector clock of the last
1165 * segment of this thread with the the vector clock of segment sg. Call this
1166 * function after thread tid had to wait because of thread synchronization
1167 * until the memory accesses in the segment sg finished.
1168 */
1169void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
1170{
1171   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1172             && tid != DRD_INVALID_THREADID);
1173   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1174   tl_assert(sg);
1175
1176   thread_append_segment(tid, DRD_(sg_new)(tid, tid));
1177
1178   thread_combine_vc_sync(tid, sg);
1179
1180   if (s_segment_merging
1181       && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1182   {
1183      thread_discard_ordered_segments();
1184      thread_merge_segments();
1185   }
1186}
1187
1188/**
1189 * Call this function whenever a thread is no longer using the memory
1190 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
1191 * increase.
1192 */
1193void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
1194{
1195   Segment* p;
1196
1197   list_for_each_entry(p, &DRD_(g_sg_list), g_list)
1198      DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
1199
1200   DRD_(bm_clear)(DRD_(g_conflict_set), a1, a2);
1201}
1202
1203/** Specify whether memory loads should be recorded. */
1204void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
1205{
1206   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1207             && tid != DRD_INVALID_THREADID);
1208   tl_assert(enabled == !! enabled);
1209
1210   DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
1211}
1212
1213/** Specify whether memory stores should be recorded. */
1214void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
1215{
1216   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1217             && tid != DRD_INVALID_THREADID);
1218   tl_assert(enabled == !! enabled);
1219
1220   DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
1221}
1222
1223/**
1224 * Print the segment information for all threads.
1225 *
1226 * This function is only used for debugging purposes.
1227 */
1228void DRD_(thread_print_all)(void)
1229{
1230   unsigned i;
1231   struct list_head* sg_list;
1232   Segment* p;
1233
1234   for (i = 0; i < DRD_N_THREADS; i++)
1235   {
1236      sg_list = &DRD_(g_threadinfo)[i].sg_list;
1237      if (!list_empty(sg_list))
1238      {
1239         VG_(printf)("**************\n"
1240                     "* thread %3d (%d/%d/%d/%d/0x%lx/%d) *\n"
1241                     "**************\n",
1242                     i,
1243                     DRD_(g_threadinfo)[i].valid,
1244                     DRD_(g_threadinfo)[i].vg_thread_exists,
1245                     DRD_(g_threadinfo)[i].vg_threadid,
1246                     DRD_(g_threadinfo)[i].posix_thread_exists,
1247                     DRD_(g_threadinfo)[i].pt_threadid,
1248                     DRD_(g_threadinfo)[i].detached_posix_thread);
1249         list_for_each_entry(p, sg_list, thr_list)
1250            DRD_(sg_print)(p);
1251      }
1252   }
1253}
1254
1255/** Show a call stack involved in a data race. */
1256static void show_call_stack(const DrdThreadId tid, ExeContext* const callstack)
1257{
1258   const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
1259
1260   if (vg_tid != VG_INVALID_THREADID) {
1261      if (callstack)
1262         VG_(pp_ExeContext)(callstack);
1263      else
1264         VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
1265   } else {
1266      if (!VG_(clo_xml))
1267         VG_(message)(Vg_UserMsg,
1268                      "   (thread finished, call stack no longer available)\n");
1269   }
1270}
1271
1272/** Print information about the segments involved in a data race. */
1273static void
1274thread_report_conflicting_segments_segment(const DrdThreadId tid,
1275                                           const Addr addr,
1276                                           const SizeT size,
1277                                           const BmAccessTypeT access_type,
1278                                           const Segment* const p)
1279{
1280   unsigned i;
1281
1282   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1283             && tid != DRD_INVALID_THREADID);
1284   tl_assert(p);
1285
1286   for (i = 0; i < DRD_N_THREADS; i++)
1287   {
1288      if (i != tid)
1289      {
1290         Segment* q;
1291         struct list_head *sg_list;
1292
1293         sg_list = &DRD_(g_threadinfo)[i].sg_list;
1294         list_for_each_entry_reverse(q, sg_list, thr_list) {
1295            /*
1296             * Since q iterates over the segments of thread i in order of
1297             * decreasing vector clocks, if q->vc <= p->vc, then
1298             * q->next->vc <= p->vc will also hold. Hence, break out of the
1299             * loop once this condition is met.
1300             */
1301            if (DRD_(vc_lte)(&q->vc, &p->vc))
1302               break;
1303            if (! DRD_(vc_lte)(&p->vc, &q->vc))
1304            {
1305               if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
1306                                              access_type))
1307               {
1308                  Segment* q_next;
1309
1310                  tl_assert(q->stacktrace);
1311                  if (VG_(clo_xml))
1312                     VG_(printf_xml)("  <other_segment_start>\n");
1313                  else
1314                     VG_(message)(Vg_UserMsg,
1315                                  "Other segment start (thread %d)\n", i);
1316                  show_call_stack(i, q->stacktrace);
1317                  if (VG_(clo_xml))
1318                     VG_(printf_xml)("  </other_segment_start>\n"
1319                                     "  <other_segment_end>\n");
1320                  else
1321                     VG_(message)(Vg_UserMsg,
1322                                  "Other segment end (thread %d)\n", i);
1323                  q_next = list_is_last(&q->thr_list, sg_list)
1324                     ? NULL : list_next_entry(&q->thr_list, Segment, thr_list);
1325                  show_call_stack(i, q_next ? q_next->stacktrace : 0);
1326                  if (VG_(clo_xml))
1327                     VG_(printf_xml)("  </other_segment_end>\n");
1328               }
1329            }
1330         }
1331      }
1332   }
1333}
1334
1335/** Print information about all segments involved in a data race. */
1336void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
1337                                              const Addr addr,
1338                                              const SizeT size,
1339                                              const BmAccessTypeT access_type)
1340{
1341   Segment* p;
1342
1343   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1344             && tid != DRD_INVALID_THREADID);
1345
1346   list_for_each_entry(p, &DRD_(g_threadinfo)[tid].sg_list, thr_list) {
1347      if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
1348         thread_report_conflicting_segments_segment(tid, addr, size,
1349                                                    access_type, p);
1350   }
1351}
1352
1353/**
1354 * Verify whether the conflict set for thread tid is up to date. Only perform
1355 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
1356 */
1357static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
1358{
1359   static int do_verify_conflict_set = -1;
1360   Bool result;
1361   struct bitmap* computed_conflict_set = 0;
1362
1363   if (do_verify_conflict_set < 0)
1364      do_verify_conflict_set = VG_(getenv)("DRD_VERIFY_CONFLICT_SET") != 0;
1365
1366   if (do_verify_conflict_set == 0)
1367      return True;
1368
1369   thread_compute_conflict_set(&computed_conflict_set, tid);
1370   result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
1371   if (! result)
1372   {
1373      VG_(printf)("actual conflict set:\n");
1374      DRD_(bm_print)(DRD_(g_conflict_set));
1375      VG_(printf)("\n");
1376      VG_(printf)("computed conflict set:\n");
1377      DRD_(bm_print)(computed_conflict_set);
1378      VG_(printf)("\n");
1379   }
1380   DRD_(bm_delete)(computed_conflict_set);
1381   return result;
1382}
1383
1384/**
1385 * Compute the conflict set: a bitmap that represents the union of all memory
1386 * accesses of all segments that are unordered to the current segment of the
1387 * thread tid.
1388 */
1389static void thread_compute_conflict_set(struct bitmap** conflict_set,
1390                                        const DrdThreadId tid)
1391{
1392   Segment* p;
1393
1394   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1395             && tid != DRD_INVALID_THREADID);
1396   tl_assert(tid == DRD_(g_drd_running_tid));
1397
1398   s_compute_conflict_set_count++;
1399   s_conflict_set_bitmap_creation_count
1400      -= DRD_(bm_get_bitmap_creation_count)();
1401   s_conflict_set_bitmap2_creation_count
1402      -= DRD_(bm_get_bitmap2_creation_count)();
1403
1404   if (*conflict_set) {
1405      DRD_(bm_cleanup)(*conflict_set);
1406      DRD_(bm_init)(*conflict_set);
1407   } else {
1408      *conflict_set = DRD_(bm_new)();
1409   }
1410
1411   if (s_trace_conflict_set) {
1412      char* str;
1413
1414      str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1415      VG_(message)(Vg_DebugMsg,
1416                   "computing conflict set for thread %d with vc %s\n",
1417                   tid, str);
1418      VG_(free)(str);
1419   }
1420
1421   p = list_last_entry(&DRD_(g_threadinfo)[tid].sg_list, Segment, thr_list);
1422   {
1423      unsigned j;
1424
1425      if (s_trace_conflict_set)
1426      {
1427         char* vc;
1428
1429         vc = DRD_(vc_aprint)(&p->vc);
1430         VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n",
1431                      tid, vc);
1432         VG_(free)(vc);
1433      }
1434
1435      for (j = 0; j < DRD_N_THREADS; j++)
1436      {
1437         if (j != tid && DRD_(IsValidDrdThreadId)(j))
1438         {
1439            Segment* q;
1440            list_for_each_entry_reverse(q, &DRD_(g_threadinfo)[j].sg_list,
1441                                        thr_list) {
1442               if (! DRD_(vc_lte)(&q->vc, &p->vc)
1443                   && ! DRD_(vc_lte)(&p->vc, &q->vc))
1444               {
1445                  if (s_trace_conflict_set)
1446                  {
1447                     char* str;
1448
1449                     str = DRD_(vc_aprint)(&q->vc);
1450                     VG_(message)(Vg_DebugMsg,
1451                                  "conflict set: [%d] merging segment %s\n",
1452                                  j, str);
1453                     VG_(free)(str);
1454                  }
1455                  DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
1456               }
1457               else
1458               {
1459                  if (s_trace_conflict_set)
1460                  {
1461                     char* str;
1462
1463                     str = DRD_(vc_aprint)(&q->vc);
1464                     VG_(message)(Vg_DebugMsg,
1465                                  "conflict set: [%d] ignoring segment %s\n",
1466                                  j, str);
1467                     VG_(free)(str);
1468                  }
1469               }
1470            }
1471         }
1472      }
1473   }
1474
1475   s_conflict_set_bitmap_creation_count
1476      += DRD_(bm_get_bitmap_creation_count)();
1477   s_conflict_set_bitmap2_creation_count
1478      += DRD_(bm_get_bitmap2_creation_count)();
1479
1480   if (s_trace_conflict_set_bm)
1481   {
1482      VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid);
1483      DRD_(bm_print)(*conflict_set);
1484      VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid);
1485   }
1486}
1487
1488/**
1489 * Update the conflict set after the vector clock of thread tid has been
1490 * updated from old_vc to its current value, either because a new segment has
1491 * been created or because of a synchronization operation.
1492 */
1493void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
1494                                      const VectorClock* const old_vc)
1495{
1496   const VectorClock* new_vc;
1497   Segment* p;
1498   unsigned j;
1499
1500   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1501             && tid != DRD_INVALID_THREADID);
1502   tl_assert(old_vc);
1503   tl_assert(tid == DRD_(g_drd_running_tid));
1504   tl_assert(DRD_(g_conflict_set));
1505
1506   if (s_trace_conflict_set) {
1507      char* str;
1508
1509      str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1510      VG_(message)(Vg_DebugMsg,
1511                   "updating conflict set for thread %d with vc %s\n",
1512                   tid, str);
1513      VG_(free)(str);
1514   }
1515
1516   new_vc = DRD_(thread_get_vc)(tid);
1517   tl_assert(DRD_(vc_lte)(old_vc, new_vc));
1518
1519   DRD_(bm_unmark)(DRD_(g_conflict_set));
1520
1521   for (j = 0; j < DRD_N_THREADS; j++)
1522   {
1523      Segment* q;
1524
1525      if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
1526         continue;
1527
1528      list_for_each_entry_reverse(q, &DRD_(g_threadinfo)[j].sg_list, thr_list) {
1529         Bool included_in_old_conflict_set, included_in_new_conflict_set;
1530
1531         if (DRD_(vc_lte)(&q->vc, new_vc))
1532            break;
1533
1534         included_in_old_conflict_set = !DRD_(vc_lte)(old_vc, &q->vc);
1535         included_in_new_conflict_set = !DRD_(vc_lte)(new_vc, &q->vc);
1536
1537         if (UNLIKELY(s_trace_conflict_set)) {
1538            char* str;
1539
1540            str = DRD_(vc_aprint)(&q->vc);
1541            VG_(message)(Vg_DebugMsg,
1542                         "conflict set: [%d] %s segment %s\n", j,
1543                         included_in_old_conflict_set
1544                         != included_in_new_conflict_set
1545                         ? "merging" : "ignoring", str);
1546            VG_(free)(str);
1547         }
1548         if (included_in_old_conflict_set != included_in_new_conflict_set)
1549            DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1550      }
1551
1552      list_for_each_entry_reverse_continue(q, &DRD_(g_threadinfo)[j].sg_list,
1553                                           thr_list) {
1554         Bool included_in_old_conflict_set, included_in_new_conflict_set;
1555
1556         if (DRD_(vc_lte)(&q->vc, old_vc))
1557            break;
1558
1559         included_in_old_conflict_set = !DRD_(vc_lte)(old_vc, &q->vc);
1560         included_in_new_conflict_set
1561            = !DRD_(vc_lte)(&q->vc, new_vc) && !DRD_(vc_lte)(new_vc, &q->vc);
1562
1563         if (UNLIKELY(s_trace_conflict_set)) {
1564            char* str;
1565
1566            str = DRD_(vc_aprint)(&q->vc);
1567            VG_(message)(Vg_DebugMsg,
1568                         "conflict set: [%d] %s segment %s\n", j,
1569                         included_in_old_conflict_set
1570                         != included_in_new_conflict_set
1571                         ? "merging" : "ignoring", str);
1572            VG_(free)(str);
1573         }
1574         if (included_in_old_conflict_set != included_in_new_conflict_set)
1575            DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1576      }
1577   }
1578
1579   DRD_(bm_clear_marked)(DRD_(g_conflict_set));
1580
1581   p = list_last_entry(&DRD_(g_threadinfo)[tid].sg_list, Segment, thr_list);
1582   for (j = 0; j < DRD_N_THREADS; j++)
1583   {
1584      if (j != tid && DRD_(IsValidDrdThreadId)(j))
1585      {
1586         Segment* q;
1587         list_for_each_entry_reverse(q, &DRD_(g_threadinfo)[j].sg_list,
1588                                     thr_list) {
1589            if (DRD_(vc_lte)(&q->vc, &p->vc))
1590               break;
1591            if (!DRD_(vc_lte)(&p->vc, &q->vc))
1592               DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1593         }
1594      }
1595   }
1596
1597   DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
1598
1599   s_update_conflict_set_count++;
1600
1601   if (s_trace_conflict_set_bm)
1602   {
1603      VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid);
1604      DRD_(bm_print)(DRD_(g_conflict_set));
1605      VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid);
1606   }
1607
1608   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1609}
1610
1611/** Report the number of context switches performed. */
1612ULong DRD_(thread_get_context_switch_count)(void)
1613{
1614   return s_context_switch_count;
1615}
1616
1617/** Report the number of ordered segments that have been discarded. */
1618ULong DRD_(thread_get_discard_ordered_segments_count)(void)
1619{
1620   return s_discard_ordered_segments_count;
1621}
1622
1623/** Return how many times the conflict set has been updated entirely. */
1624ULong DRD_(thread_get_compute_conflict_set_count)()
1625{
1626   return s_compute_conflict_set_count;
1627}
1628
1629/** Return how many times the conflict set has been updated partially. */
1630ULong DRD_(thread_get_update_conflict_set_count)(void)
1631{
1632   return s_update_conflict_set_count;
1633}
1634
1635/**
1636 * Return how many times the conflict set has been updated partially
1637 * because a new segment has been created.
1638 */
1639ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
1640{
1641   return s_update_conflict_set_new_sg_count;
1642}
1643
1644/**
1645 * Return how many times the conflict set has been updated partially
1646 * because of combining vector clocks due to synchronization operations
1647 * other than reader/writer lock or barrier operations.
1648 */
1649ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
1650{
1651   return s_update_conflict_set_sync_count;
1652}
1653
1654/**
1655 * Return how many times the conflict set has been updated partially
1656 * because of thread joins.
1657 */
1658ULong DRD_(thread_get_update_conflict_set_join_count)(void)
1659{
1660   return s_update_conflict_set_join_count;
1661}
1662
1663/**
1664 * Return the number of first-level bitmaps that have been created during
1665 * conflict set updates.
1666 */
1667ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
1668{
1669   return s_conflict_set_bitmap_creation_count;
1670}
1671
1672/**
1673 * Return the number of second-level bitmaps that have been created during
1674 * conflict set updates.
1675 */
1676ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
1677{
1678   return s_conflict_set_bitmap2_creation_count;
1679}
1680