drd_thread.c revision ae37e6d4c9ca1f400ef4c1636319be60f969b2af
1/*
2  This file is part of drd, a thread error detector.
3
4  Copyright (C) 2006-2011 Bart Van Assche <bvanassche@acm.org>.
5
6  This program is free software; you can redistribute it and/or
7  modify it under the terms of the GNU General Public License as
8  published by the Free Software Foundation; either version 2 of the
9  License, or (at your option) any later version.
10
11  This program is distributed in the hope that it will be useful, but
12  WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  General Public License for more details.
15
16  You should have received a copy of the GNU General Public License
17  along with this program; if not, write to the Free Software
18  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19  02111-1307, USA.
20
21  The GNU General Public License is contained in the file COPYING.
22*/
23
24
25#include "drd_error.h"
26#include "drd_barrier.h"
27#include "drd_clientobj.h"
28#include "drd_cond.h"
29#include "drd_mutex.h"
30#include "drd_segment.h"
31#include "drd_semaphore.h"
32#include "drd_suppression.h"
33#include "drd_thread.h"
34#include "pub_tool_vki.h"
35#include "pub_tool_basics.h"      // Addr, SizeT
36#include "pub_tool_libcassert.h"  // tl_assert()
37#include "pub_tool_libcbase.h"    // VG_(strlen)()
38#include "pub_tool_libcprint.h"   // VG_(printf)()
39#include "pub_tool_libcproc.h"    // VG_(getenv)()
40#include "pub_tool_machine.h"
41#include "pub_tool_mallocfree.h"  // VG_(malloc)(), VG_(free)()
42#include "pub_tool_options.h"     // VG_(clo_backtrace_size)
43#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
44
45
46
47/* Local functions. */
48
49static void thread_append_segment(const DrdThreadId tid, Segment* const sg);
50static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
51static void thread_compute_conflict_set(struct bitmap** conflict_set,
52                                        const DrdThreadId tid);
53static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
54
55
56/* Local variables. */
57
58static ULong    s_context_switch_count;
59static ULong    s_discard_ordered_segments_count;
60static ULong    s_compute_conflict_set_count;
61static ULong    s_update_conflict_set_count;
62static ULong    s_update_conflict_set_new_sg_count;
63static ULong    s_update_conflict_set_sync_count;
64static ULong    s_update_conflict_set_join_count;
65static ULong    s_conflict_set_bitmap_creation_count;
66static ULong    s_conflict_set_bitmap2_creation_count;
67static ThreadId s_vg_running_tid  = VG_INVALID_THREADID;
68DrdThreadId     DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
69ThreadInfo      DRD_(g_threadinfo)[DRD_N_THREADS];
70struct bitmap*  DRD_(g_conflict_set);
71static Bool     s_trace_context_switches = False;
72static Bool     s_trace_conflict_set = False;
73static Bool     s_trace_conflict_set_bm = False;
74static Bool     s_trace_fork_join = False;
75static Bool     s_segment_merging = True;
76static Bool     s_new_segments_since_last_merge;
77static int      s_segment_merge_interval = 10;
78static unsigned s_join_list_vol = 10;
79static unsigned s_deletion_head;
80static unsigned s_deletion_tail;
81
82
83/* Function definitions. */
84
85/** Enables/disables context switch tracing. */
86void DRD_(thread_trace_context_switches)(const Bool t)
87{
88   tl_assert(t == False || t == True);
89   s_trace_context_switches = t;
90}
91
92/** Enables/disables conflict set tracing. */
93void DRD_(thread_trace_conflict_set)(const Bool t)
94{
95   tl_assert(t == False || t == True);
96   s_trace_conflict_set = t;
97}
98
99/** Enables/disables conflict set bitmap tracing. */
100void DRD_(thread_trace_conflict_set_bm)(const Bool t)
101{
102   tl_assert(t == False || t == True);
103   s_trace_conflict_set_bm = t;
104}
105
106/** Report whether fork/join tracing is enabled. */
107Bool DRD_(thread_get_trace_fork_join)(void)
108{
109   return s_trace_fork_join;
110}
111
112/** Enables/disables fork/join tracing. */
113void DRD_(thread_set_trace_fork_join)(const Bool t)
114{
115   tl_assert(t == False || t == True);
116   s_trace_fork_join = t;
117}
118
119/** Enables/disables segment merging. */
120void DRD_(thread_set_segment_merging)(const Bool m)
121{
122   tl_assert(m == False || m == True);
123   s_segment_merging = m;
124}
125
126/** Get the segment merging interval. */
127int DRD_(thread_get_segment_merge_interval)(void)
128{
129   return s_segment_merge_interval;
130}
131
132/** Set the segment merging interval. */
133void DRD_(thread_set_segment_merge_interval)(const int i)
134{
135   s_segment_merge_interval = i;
136}
137
138void DRD_(thread_set_join_list_vol)(const int jlv)
139{
140   s_join_list_vol = jlv;
141}
142
143/**
144 * Convert Valgrind's ThreadId into a DrdThreadId.
145 *
146 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
147 *         Valgrind ThreadId does not yet exist.
148 */
149DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
150{
151   int i;
152
153   if (tid == VG_INVALID_THREADID)
154      return DRD_INVALID_THREADID;
155
156   for (i = 1; i < DRD_N_THREADS; i++)
157   {
158      if (DRD_(g_threadinfo)[i].vg_thread_exists == True
159          && DRD_(g_threadinfo)[i].vg_threadid == tid)
160      {
161         return i;
162      }
163   }
164
165   return DRD_INVALID_THREADID;
166}
167
168/** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
169static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
170{
171   int i;
172
173   tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
174
175   for (i = 1; i < DRD_N_THREADS; i++)
176   {
177      if (!DRD_(g_threadinfo)[i].valid)
178      {
179         tl_assert(! DRD_(IsValidDrdThreadId)(i));
180
181         DRD_(g_threadinfo)[i].valid         = True;
182         DRD_(g_threadinfo)[i].vg_thread_exists = True;
183         DRD_(g_threadinfo)[i].vg_threadid   = tid;
184         DRD_(g_threadinfo)[i].pt_threadid   = INVALID_POSIX_THREADID;
185         DRD_(g_threadinfo)[i].stack_min     = 0;
186         DRD_(g_threadinfo)[i].stack_min_min = 0;
187         DRD_(g_threadinfo)[i].stack_startup = 0;
188         DRD_(g_threadinfo)[i].stack_max     = 0;
189         DRD_(thread_set_name)(i, "");
190         DRD_(g_threadinfo)[i].on_alt_stack        = False;
191         DRD_(g_threadinfo)[i].is_recording_loads  = True;
192         DRD_(g_threadinfo)[i].is_recording_stores = True;
193         DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
194         DRD_(g_threadinfo)[i].synchr_nesting = 0;
195         DRD_(g_threadinfo)[i].deletion_seq = s_deletion_tail - 1;
196         tl_assert(DRD_(g_threadinfo)[i].first == 0);
197         tl_assert(DRD_(g_threadinfo)[i].last == 0);
198
199         tl_assert(DRD_(IsValidDrdThreadId)(i));
200
201         return i;
202      }
203   }
204
205   VG_(printf)(
206"\nSorry, but the maximum number of threads supported by DRD has been exceeded."
207"Aborting.\n");
208
209   tl_assert(False);
210
211   return DRD_INVALID_THREADID;
212}
213
214/** Convert a POSIX thread ID into a DRD thread ID. */
215DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
216{
217   int i;
218
219   if (tid != INVALID_POSIX_THREADID)
220   {
221      for (i = 1; i < DRD_N_THREADS; i++)
222      {
223         if (DRD_(g_threadinfo)[i].posix_thread_exists
224             && DRD_(g_threadinfo)[i].pt_threadid == tid)
225         {
226            return i;
227         }
228      }
229   }
230   return DRD_INVALID_THREADID;
231}
232
233/** Convert a DRD thread ID into a Valgrind thread ID. */
234ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
235{
236   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
237             && tid != DRD_INVALID_THREADID);
238
239   return (DRD_(g_threadinfo)[tid].vg_thread_exists
240           ? DRD_(g_threadinfo)[tid].vg_threadid
241           : VG_INVALID_THREADID);
242}
243
244#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
245/**
246 * Sanity check of the doubly linked list of segments referenced by a
247 * ThreadInfo struct.
248 * @return True if sane, False if not.
249 */
250static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
251{
252   Segment* p;
253
254   for (p = ti->first; p; p = p->next) {
255      if (p->next && p->next->prev != p)
256         return False;
257      if (p->next == 0 && p != ti->last)
258         return False;
259   }
260   for (p = ti->last; p; p = p->prev) {
261      if (p->prev && p->prev->next != p)
262         return False;
263      if (p->prev == 0 && p != ti->first)
264         return False;
265   }
266   return True;
267}
268#endif
269
270/**
271 * Create the first segment for a newly started thread.
272 *
273 * This function is called from the handler installed via
274 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
275 * from the context of the creator thread, before the new thread has been
276 * created.
277 *
278 * @param[in] creator    DRD thread ID of the creator thread.
279 * @param[in] vg_created Valgrind thread ID of the created thread.
280 *
281 * @return DRD thread ID of the created thread.
282 */
283DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
284                                    const ThreadId vg_created)
285{
286   DrdThreadId created;
287
288   tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
289   created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
290   tl_assert(0 <= (int)created && created < DRD_N_THREADS
291             && created != DRD_INVALID_THREADID);
292
293   tl_assert(DRD_(g_threadinfo)[created].first == 0);
294   tl_assert(DRD_(g_threadinfo)[created].last == 0);
295   /* Create an initial segment for the newly created thread. */
296   thread_append_segment(created, DRD_(sg_new)(creator, created));
297
298   return created;
299}
300
301/**
302 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
303 * after the thread has been created and before any client instructions are run
304 * on the newly created thread, e.g. from the handler installed via
305 * VG_(track_pre_thread_first_insn)().
306 *
307 * @param[in] vg_created Valgrind thread ID of the newly created thread.
308 *
309 * @return DRD thread ID for the new thread.
310 */
311DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
312{
313   const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
314
315   tl_assert(0 <= (int)created && created < DRD_N_THREADS
316             && created != DRD_INVALID_THREADID);
317
318   DRD_(g_threadinfo)[created].stack_max
319      = VG_(thread_get_stack_max)(vg_created);
320   DRD_(g_threadinfo)[created].stack_startup
321      = DRD_(g_threadinfo)[created].stack_max;
322   DRD_(g_threadinfo)[created].stack_min
323      = DRD_(g_threadinfo)[created].stack_max;
324   DRD_(g_threadinfo)[created].stack_min_min
325      = DRD_(g_threadinfo)[created].stack_max;
326   DRD_(g_threadinfo)[created].stack_size
327      = VG_(thread_get_stack_size)(vg_created);
328   tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
329
330   return created;
331}
332
333static void DRD_(thread_delayed_delete)(const DrdThreadId tid)
334{
335   int j;
336
337   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
338   DRD_(g_threadinfo)[tid].posix_thread_exists = False;
339   DRD_(g_threadinfo)[tid].deletion_seq = s_deletion_head++;
340#if 0
341   VG_(message)(Vg_DebugMsg, "Adding thread %d to the deletion list\n", tid);
342#endif
343   if (s_deletion_head - s_deletion_tail >= s_join_list_vol) {
344      for (j = 0; j < DRD_N_THREADS; ++j) {
345         if (DRD_(IsValidDrdThreadId)(j)
346             && DRD_(g_threadinfo)[j].deletion_seq == s_deletion_tail)
347         {
348            s_deletion_tail++;
349#if 0
350            VG_(message)(Vg_DebugMsg, "Delayed delete of thread %d\n", j);
351#endif
352            DRD_(thread_delete)(j, False);
353            break;
354         }
355      }
356   }
357}
358
359/**
360 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
361 * after thread drd_joiner joined thread drd_joinee.
362 */
363void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
364{
365   tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
366   tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
367
368   DRD_(thread_new_segment)(drd_joiner);
369   DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
370   DRD_(thread_new_segment)(drd_joinee);
371
372   if (s_trace_fork_join)
373   {
374      const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
375      const unsigned msg_size = 256;
376      char* msg;
377
378      msg = VG_(malloc)("drd.main.dptj.1", msg_size);
379      tl_assert(msg);
380      VG_(snprintf)(msg, msg_size,
381                    "drd_post_thread_join joiner = %d, joinee = %d",
382                    drd_joiner, drd_joinee);
383      if (joiner)
384      {
385         char* vc;
386
387         vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
388         VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
389                       ", new vc: %s", vc);
390         VG_(free)(vc);
391      }
392      DRD_(trace_msg)("%pS", msg);
393      VG_(free)(msg);
394   }
395
396   if (!  DRD_(get_check_stack_accesses)())
397   {
398      DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
399                               - DRD_(thread_get_stack_size)(drd_joinee),
400                               DRD_(thread_get_stack_max)(drd_joinee));
401   }
402   DRD_(clientobj_delete_thread)(drd_joinee);
403   DRD_(thread_delayed_delete)(drd_joinee);
404}
405
406/**
407 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
408 * and accesses this data structure from multiple threads without locking.
409 * Any conflicting accesses in the range stack_startup..stack_max will be
410 * ignored.
411 */
412void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
413                                    const Addr stack_startup)
414{
415   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
416             && tid != DRD_INVALID_THREADID);
417   tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
418   tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
419   DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
420}
421
422/** Return the stack pointer for the specified thread. */
423Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
424{
425   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
426             && tid != DRD_INVALID_THREADID);
427   return DRD_(g_threadinfo)[tid].stack_min;
428}
429
430/**
431 * Return the lowest value that was ever assigned to the stack pointer
432 * for the specified thread.
433 */
434Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
435{
436   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
437             && tid != DRD_INVALID_THREADID);
438   return DRD_(g_threadinfo)[tid].stack_min_min;
439}
440
441/** Return the top address for the stack of the specified thread. */
442Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
443{
444   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
445             && tid != DRD_INVALID_THREADID);
446   return DRD_(g_threadinfo)[tid].stack_max;
447}
448
449/** Return the maximum stack size for the specified thread. */
450SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
451{
452   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
453             && tid != DRD_INVALID_THREADID);
454   return DRD_(g_threadinfo)[tid].stack_size;
455}
456
457Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
458{
459   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
460             && tid != DRD_INVALID_THREADID);
461   return DRD_(g_threadinfo)[tid].on_alt_stack;
462}
463
464void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
465                                   const Bool on_alt_stack)
466{
467   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
468             && tid != DRD_INVALID_THREADID);
469   tl_assert(on_alt_stack == !!on_alt_stack);
470   DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
471}
472
473Int DRD_(thread_get_threads_on_alt_stack)(void)
474{
475   int i, n = 0;
476
477   for (i = 1; i < DRD_N_THREADS; i++)
478      n += DRD_(g_threadinfo)[i].on_alt_stack;
479   return n;
480}
481
482/**
483 * Clean up thread-specific data structures.
484 */
485void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
486{
487   Segment* sg;
488   Segment* sg_prev;
489
490   tl_assert(DRD_(IsValidDrdThreadId)(tid));
491
492   tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
493   for (sg = DRD_(g_threadinfo)[tid].last; sg; sg = sg_prev)
494   {
495      sg_prev = sg->prev;
496      sg->prev = 0;
497      sg->next = 0;
498      DRD_(sg_put)(sg);
499   }
500   DRD_(g_threadinfo)[tid].valid = False;
501   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
502   DRD_(g_threadinfo)[tid].posix_thread_exists = False;
503   if (detached)
504      DRD_(g_threadinfo)[tid].detached_posix_thread = False;
505   else
506      tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
507   DRD_(g_threadinfo)[tid].first = 0;
508   DRD_(g_threadinfo)[tid].last = 0;
509
510   tl_assert(! DRD_(IsValidDrdThreadId)(tid));
511}
512
513/**
514 * Called after a thread performed its last memory access and before
515 * thread_delete() is called. Note: thread_delete() is only called for
516 * joinable threads, not for detached threads.
517 */
518void DRD_(thread_finished)(const DrdThreadId tid)
519{
520   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
521             && tid != DRD_INVALID_THREADID);
522
523   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
524
525   if (DRD_(g_threadinfo)[tid].detached_posix_thread)
526   {
527      /*
528       * Once a detached thread has finished, its stack is deallocated and
529       * should no longer be taken into account when computing the conflict set.
530       */
531      DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
532
533      /*
534       * For a detached thread, calling pthread_exit() invalidates the
535       * POSIX thread ID associated with the detached thread. For joinable
536       * POSIX threads however, the POSIX thread ID remains live after the
537       * pthread_exit() call until pthread_join() is called.
538       */
539      DRD_(g_threadinfo)[tid].posix_thread_exists = False;
540   }
541}
542
543/** Called just after fork() in the child process. */
544void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
545{
546   unsigned i;
547
548   for (i = 1; i < DRD_N_THREADS; i++)
549   {
550      if (i == tid)
551	 continue;
552      if (DRD_(IsValidDrdThreadId(i)))
553	 DRD_(thread_delete)(i, True);
554      tl_assert(!DRD_(IsValidDrdThreadId(i)));
555   }
556}
557
558/** Called just before pthread_cancel(). */
559void DRD_(thread_pre_cancel)(const DrdThreadId tid)
560{
561   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
562             && tid != DRD_INVALID_THREADID);
563   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
564
565   if (DRD_(thread_get_trace_fork_join)())
566      DRD_(trace_msg)("[%d] drd_thread_pre_cancel %d",
567                      DRD_(g_drd_running_tid), tid);
568}
569
570/**
571 * Store the POSIX thread ID for the specified thread.
572 *
573 * @note This function can be called two times for the same thread -- see also
574 * the comment block preceding the pthread_create() wrapper in
575 * drd_pthread_intercepts.c.
576 */
577void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
578{
579   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
580             && tid != DRD_INVALID_THREADID);
581   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
582             || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
583   tl_assert(ptid != INVALID_POSIX_THREADID);
584   DRD_(g_threadinfo)[tid].posix_thread_exists = True;
585   DRD_(g_threadinfo)[tid].pt_threadid         = ptid;
586}
587
588/** Returns true for joinable threads and false for detached threads. */
589Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
590{
591   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
592             && tid != DRD_INVALID_THREADID);
593   return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
594}
595
596/** Store the thread mode: joinable or detached. */
597void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
598{
599   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
600             && tid != DRD_INVALID_THREADID);
601   tl_assert(!! joinable == joinable);
602   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
603
604   DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
605}
606
607/** Tells DRD that the calling thread is about to enter pthread_create(). */
608void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
609{
610   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
611             && tid != DRD_INVALID_THREADID);
612   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
613   tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
614
615   DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
616}
617
618/** Tells DRD that the calling thread has left pthread_create(). */
619void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
620{
621   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
622             && tid != DRD_INVALID_THREADID);
623   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
624   tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
625
626   DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
627}
628
629/** Obtain the thread number and the user-assigned thread name. */
630const char* DRD_(thread_get_name)(const DrdThreadId tid)
631{
632   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
633             && tid != DRD_INVALID_THREADID);
634
635   return DRD_(g_threadinfo)[tid].name;
636}
637
638/** Set the name of the specified thread. */
639void DRD_(thread_set_name)(const DrdThreadId tid, const char* const name)
640{
641   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
642             && tid != DRD_INVALID_THREADID);
643
644   if (name == NULL || name[0] == 0)
645      VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
646                    sizeof(DRD_(g_threadinfo)[tid].name),
647                    "Thread %d",
648                    tid);
649   else
650      VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
651                    sizeof(DRD_(g_threadinfo)[tid].name),
652                    "Thread %d (%s)",
653                    tid, name);
654   DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
655}
656
657/**
658 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
659 * conflict set.
660 */
661void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
662{
663   tl_assert(vg_tid != VG_INVALID_THREADID);
664
665   if (vg_tid != s_vg_running_tid)
666   {
667      DRD_(thread_set_running_tid)(vg_tid,
668                                   DRD_(VgThreadIdToDrdThreadId)(vg_tid));
669   }
670
671   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
672   tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
673}
674
675/**
676 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
677 * conflict set.
678 */
679void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
680                                  const DrdThreadId drd_tid)
681{
682   tl_assert(vg_tid != VG_INVALID_THREADID);
683   tl_assert(drd_tid != DRD_INVALID_THREADID);
684
685   if (vg_tid != s_vg_running_tid)
686   {
687      if (s_trace_context_switches
688          && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
689      {
690         VG_(message)(Vg_DebugMsg,
691                      "Context switch from thread %d to thread %d;"
692                      " segments: %llu\n",
693                      DRD_(g_drd_running_tid), drd_tid,
694                      DRD_(sg_get_segments_alive_count)());
695      }
696      s_vg_running_tid = vg_tid;
697      DRD_(g_drd_running_tid) = drd_tid;
698      thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
699      s_context_switch_count++;
700   }
701
702   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
703   tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
704}
705
706/**
707 * Increase the synchronization nesting counter. Must be called before the
708 * client calls a synchronization function.
709 */
710int DRD_(thread_enter_synchr)(const DrdThreadId tid)
711{
712   tl_assert(DRD_(IsValidDrdThreadId)(tid));
713   return DRD_(g_threadinfo)[tid].synchr_nesting++;
714}
715
716/**
717 * Decrease the synchronization nesting counter. Must be called after the
718 * client left a synchronization function.
719 */
720int DRD_(thread_leave_synchr)(const DrdThreadId tid)
721{
722   tl_assert(DRD_(IsValidDrdThreadId)(tid));
723   tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
724   return --DRD_(g_threadinfo)[tid].synchr_nesting;
725}
726
727/** Returns the synchronization nesting counter. */
728int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
729{
730   tl_assert(DRD_(IsValidDrdThreadId)(tid));
731   return DRD_(g_threadinfo)[tid].synchr_nesting;
732}
733
734/** Append a new segment at the end of the segment list. */
735static
736void thread_append_segment(const DrdThreadId tid, Segment* const sg)
737{
738   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
739             && tid != DRD_INVALID_THREADID);
740
741#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
742   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
743#endif
744
745   sg->prev = DRD_(g_threadinfo)[tid].last;
746   sg->next = 0;
747   if (DRD_(g_threadinfo)[tid].last)
748      DRD_(g_threadinfo)[tid].last->next = sg;
749   DRD_(g_threadinfo)[tid].last = sg;
750   if (DRD_(g_threadinfo)[tid].first == 0)
751      DRD_(g_threadinfo)[tid].first = sg;
752
753#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
754   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
755#endif
756}
757
758/**
759 * Remove a segment from the segment list of thread threadid, and free the
760 * associated memory.
761 */
762static
763void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
764{
765   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
766             && tid != DRD_INVALID_THREADID);
767
768#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
769   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
770#endif
771
772   if (sg->prev)
773      sg->prev->next = sg->next;
774   if (sg->next)
775      sg->next->prev = sg->prev;
776   if (sg == DRD_(g_threadinfo)[tid].first)
777      DRD_(g_threadinfo)[tid].first = sg->next;
778   if (sg == DRD_(g_threadinfo)[tid].last)
779      DRD_(g_threadinfo)[tid].last = sg->prev;
780   DRD_(sg_put)(sg);
781
782#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
783   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
784#endif
785}
786
787/**
788 * Returns a pointer to the vector clock of the most recent segment associated
789 * with thread 'tid'.
790 */
791VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
792{
793   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
794             && tid != DRD_INVALID_THREADID);
795   tl_assert(DRD_(g_threadinfo)[tid].last);
796   return &DRD_(g_threadinfo)[tid].last->vc;
797}
798
799/**
800 * Return the latest segment of thread 'tid' and increment its reference count.
801 */
802void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
803{
804   tl_assert(sg);
805   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
806             && tid != DRD_INVALID_THREADID);
807   tl_assert(DRD_(g_threadinfo)[tid].last);
808
809   DRD_(sg_put)(*sg);
810   *sg = DRD_(sg_get)(DRD_(g_threadinfo)[tid].last);
811}
812
813/**
814 * Compute the minimum of all latest vector clocks of all threads
815 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
816 *
817 * @param vc pointer to a vectorclock, holds result upon return.
818 */
819static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
820{
821   unsigned i;
822   Bool first;
823   Segment* latest_sg;
824
825   first = True;
826   for (i = 0; i < DRD_N_THREADS; i++)
827   {
828      latest_sg = DRD_(g_threadinfo)[i].last;
829      if (latest_sg)
830      {
831         if (first)
832            DRD_(vc_assign)(vc, &latest_sg->vc);
833         else
834            DRD_(vc_min)(vc, &latest_sg->vc);
835         first = False;
836      }
837   }
838}
839
840/**
841 * Compute the maximum of all latest vector clocks of all threads.
842 *
843 * @param vc pointer to a vectorclock, holds result upon return.
844 */
845static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
846{
847   unsigned i;
848   Bool first;
849   Segment* latest_sg;
850
851   first = True;
852   for (i = 0; i < DRD_N_THREADS; i++)
853   {
854      latest_sg = DRD_(g_threadinfo)[i].last;
855      if (latest_sg)
856      {
857         if (first)
858            DRD_(vc_assign)(vc, &latest_sg->vc);
859         else
860            DRD_(vc_combine)(vc, &latest_sg->vc);
861         first = False;
862      }
863   }
864}
865
866/**
867 * Discard all segments that have a defined order against the latest vector
868 * clock of all threads -- these segments can no longer be involved in a
869 * data race.
870 */
871static void thread_discard_ordered_segments(void)
872{
873   unsigned i;
874   VectorClock thread_vc_min;
875
876   s_discard_ordered_segments_count++;
877
878   DRD_(vc_init)(&thread_vc_min, 0, 0);
879   DRD_(thread_compute_minimum_vc)(&thread_vc_min);
880   if (DRD_(sg_get_trace)())
881   {
882      char *vc_min, *vc_max;
883      VectorClock thread_vc_max;
884
885      DRD_(vc_init)(&thread_vc_max, 0, 0);
886      DRD_(thread_compute_maximum_vc)(&thread_vc_max);
887      vc_min = DRD_(vc_aprint)(&thread_vc_min);
888      vc_max = DRD_(vc_aprint)(&thread_vc_max);
889      VG_(message)(Vg_DebugMsg,
890                   "Discarding ordered segments -- min vc is %s, max vc is %s\n",
891                   vc_min, vc_max);
892      VG_(free)(vc_min);
893      VG_(free)(vc_max);
894      DRD_(vc_cleanup)(&thread_vc_max);
895   }
896
897   for (i = 0; i < DRD_N_THREADS; i++)
898   {
899      Segment* sg;
900      Segment* sg_next;
901      for (sg = DRD_(g_threadinfo)[i].first;
902           sg && (sg_next = sg->next) && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
903           sg = sg_next)
904      {
905         thread_discard_segment(i, sg);
906      }
907   }
908   DRD_(vc_cleanup)(&thread_vc_min);
909}
910
911/**
912 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
913 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
914 * all segments in the set CS are ordered consistently against both sg1 and
915 * sg2. The set CS is defined as the set of segments that can immediately
916 * precede future segments via inter-thread synchronization operations. In
917 * DRD the set CS consists of the latest segment of each thread combined with
918 * all segments for which the reference count is strictly greater than one.
919 * The code below is an optimized version of the following:
920 *
921 * for (i = 0; i < DRD_N_THREADS; i++)
922 * {
923 *    Segment* sg;
924 *
925 *    for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
926 *    {
927 *       if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
928 *       {
929 *          if (   DRD_(vc_lte)(&sg1->vc, &sg->vc)
930 *              != DRD_(vc_lte)(&sg2->vc, &sg->vc)
931 *              || DRD_(vc_lte)(&sg->vc, &sg1->vc)
932 *              != DRD_(vc_lte)(&sg->vc, &sg2->vc))
933 *          {
934 *             return False;
935 *          }
936 *       }
937 *    }
938 * }
939 */
940static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
941                                               Segment* const sg1,
942                                               Segment* const sg2)
943{
944   unsigned i;
945
946   tl_assert(sg1->next);
947   tl_assert(sg2->next);
948   tl_assert(sg1->next == sg2);
949   tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
950
951   for (i = 0; i < DRD_N_THREADS; i++)
952   {
953      Segment* sg;
954
955      for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
956      {
957         if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
958         {
959            if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
960               break;
961            if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
962               return False;
963         }
964      }
965      for (sg = DRD_(g_threadinfo)[i].last; sg; sg = sg->prev)
966      {
967         if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
968         {
969            if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
970               break;
971            if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
972               return False;
973         }
974      }
975   }
976   return True;
977}
978
979/**
980 * Merge all segments that may be merged without triggering false positives
981 * or discarding real data races. For the theoretical background of segment
982 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
983 * and Koen De Bosschere. Bounding the number of segment histories during
984 * data race detection. Parallel Computing archive, Volume 28, Issue 9,
985 * pp 1221-1238, September 2002. This paper contains a proof that merging
986 * consecutive segments for which the property equiv(s1,s2) holds can be
987 * merged without reducing the accuracy of datarace detection. Furthermore
988 * it is also proven that the total number of all segments will never grow
989 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
990 * every time a new segment is created. The property equiv(s1, s2) is defined
991 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
992 * clocks of segments s and s1 are ordered in the same way as those of segments
993 * s and s2. The set CS is defined as the set of existing segments s that have
994 * the potential to conflict with not yet created segments, either because the
995 * segment s is the latest segment of a thread or because it can become the
996 * immediate predecessor of a new segment due to a synchronization operation.
997 */
998static void thread_merge_segments(void)
999{
1000   unsigned i;
1001
1002   s_new_segments_since_last_merge = 0;
1003
1004   for (i = 0; i < DRD_N_THREADS; i++)
1005   {
1006      Segment* sg;
1007
1008#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1009      tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1010#endif
1011
1012      for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
1013      {
1014         if (DRD_(sg_get_refcnt)(sg) == 1
1015             && sg->next
1016             && DRD_(sg_get_refcnt)(sg->next) == 1
1017             && sg->next->next
1018             && thread_consistent_segment_ordering(i, sg, sg->next))
1019         {
1020            /* Merge sg and sg->next into sg. */
1021            DRD_(sg_merge)(sg, sg->next);
1022            thread_discard_segment(i, sg->next);
1023         }
1024      }
1025
1026#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1027      tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1028#endif
1029   }
1030}
1031
1032/**
1033 * Create a new segment for the specified thread, and discard any segments
1034 * that cannot cause races anymore.
1035 */
1036void DRD_(thread_new_segment)(const DrdThreadId tid)
1037{
1038   Segment* last_sg;
1039   Segment* new_sg;
1040
1041   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1042             && tid != DRD_INVALID_THREADID);
1043   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1044
1045   last_sg = DRD_(g_threadinfo)[tid].last;
1046   new_sg = DRD_(sg_new)(tid, tid);
1047   thread_append_segment(tid, new_sg);
1048   if (tid == DRD_(g_drd_running_tid) && last_sg)
1049   {
1050      DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
1051      s_update_conflict_set_new_sg_count++;
1052   }
1053
1054   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1055
1056   if (s_segment_merging
1057       && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1058   {
1059      thread_discard_ordered_segments();
1060      thread_merge_segments();
1061   }
1062}
1063
1064/** Call this function after thread 'joiner' joined thread 'joinee'. */
1065void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
1066{
1067   tl_assert(joiner != joinee);
1068   tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
1069             && joiner != DRD_INVALID_THREADID);
1070   tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
1071             && joinee != DRD_INVALID_THREADID);
1072   tl_assert(DRD_(g_threadinfo)[joiner].last);
1073   tl_assert(DRD_(g_threadinfo)[joinee].last);
1074
1075   if (DRD_(sg_get_trace)())
1076   {
1077      char *str1, *str2;
1078      str1 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1079      str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joinee));
1080      VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
1081                   str1, str2);
1082      VG_(free)(str1);
1083      VG_(free)(str2);
1084   }
1085   if (joiner == DRD_(g_drd_running_tid)) {
1086      VectorClock old_vc;
1087
1088      DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(joiner));
1089      DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1090                       DRD_(thread_get_vc)(joinee));
1091      DRD_(thread_update_conflict_set)(joiner, &old_vc);
1092      s_update_conflict_set_join_count++;
1093      DRD_(vc_cleanup)(&old_vc);
1094   } else {
1095      DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1096                       DRD_(thread_get_vc)(joinee));
1097   }
1098
1099   thread_discard_ordered_segments();
1100
1101   if (DRD_(sg_get_trace)()) {
1102      char* str;
1103
1104      str = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1105      VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
1106      VG_(free)(str);
1107   }
1108}
1109
1110/**
1111 * Update the vector clock of the last segment of thread tid with the
1112 * the vector clock of segment sg.
1113 */
1114static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
1115{
1116   const VectorClock* const vc = &sg->vc;
1117
1118   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1119             && tid != DRD_INVALID_THREADID);
1120   tl_assert(DRD_(g_threadinfo)[tid].last);
1121   tl_assert(sg);
1122   tl_assert(vc);
1123
1124   if (tid != sg->tid) {
1125      VectorClock old_vc;
1126
1127      DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(tid));
1128      DRD_(vc_combine)(DRD_(thread_get_vc)(tid), vc);
1129      if (DRD_(sg_get_trace)()) {
1130         char *str1, *str2;
1131         str1 = DRD_(vc_aprint)(&old_vc);
1132         str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1133         VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2);
1134         VG_(free)(str1);
1135         VG_(free)(str2);
1136      }
1137
1138      thread_discard_ordered_segments();
1139
1140      DRD_(thread_update_conflict_set)(tid, &old_vc);
1141      s_update_conflict_set_sync_count++;
1142
1143      DRD_(vc_cleanup)(&old_vc);
1144   } else {
1145      tl_assert(DRD_(vc_lte)(vc, DRD_(thread_get_vc)(tid)));
1146   }
1147}
1148
1149/**
1150 * Create a new segment for thread tid and update the vector clock of the last
1151 * segment of this thread with the the vector clock of segment sg. Call this
1152 * function after thread tid had to wait because of thread synchronization
1153 * until the memory accesses in the segment sg finished.
1154 */
1155void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
1156{
1157   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1158             && tid != DRD_INVALID_THREADID);
1159   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1160   tl_assert(sg);
1161
1162   thread_append_segment(tid, DRD_(sg_new)(tid, tid));
1163
1164   thread_combine_vc_sync(tid, sg);
1165
1166   if (s_segment_merging
1167       && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1168   {
1169      thread_discard_ordered_segments();
1170      thread_merge_segments();
1171   }
1172}
1173
1174/**
1175 * Call this function whenever a thread is no longer using the memory
1176 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
1177 * increase.
1178 */
1179void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
1180{
1181   unsigned i;
1182   Segment* p;
1183
1184   for (i = 0; i < DRD_N_THREADS; i++)
1185      for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
1186         DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
1187
1188   DRD_(bm_clear)(DRD_(g_conflict_set), a1, a2);
1189}
1190
1191/** Specify whether memory loads should be recorded. */
1192void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
1193{
1194   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1195             && tid != DRD_INVALID_THREADID);
1196   tl_assert(enabled == !! enabled);
1197
1198   DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
1199}
1200
1201/** Specify whether memory stores should be recorded. */
1202void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
1203{
1204   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1205             && tid != DRD_INVALID_THREADID);
1206   tl_assert(enabled == !! enabled);
1207
1208   DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
1209}
1210
1211/**
1212 * Print the segment information for all threads.
1213 *
1214 * This function is only used for debugging purposes.
1215 */
1216void DRD_(thread_print_all)(void)
1217{
1218   unsigned i;
1219   Segment* p;
1220
1221   for (i = 0; i < DRD_N_THREADS; i++)
1222   {
1223      if (DRD_(g_threadinfo)[i].first)
1224      {
1225         VG_(printf)("**************\n"
1226                     "* thread %3d (%d/%d/%d/%d/0x%lx/%d) *\n"
1227                     "**************\n",
1228                     i,
1229                     DRD_(g_threadinfo)[i].valid,
1230                     DRD_(g_threadinfo)[i].vg_thread_exists,
1231                     DRD_(g_threadinfo)[i].vg_threadid,
1232                     DRD_(g_threadinfo)[i].posix_thread_exists,
1233                     DRD_(g_threadinfo)[i].pt_threadid,
1234                     DRD_(g_threadinfo)[i].detached_posix_thread);
1235         for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
1236         {
1237            DRD_(sg_print)(p);
1238         }
1239      }
1240   }
1241}
1242
1243/** Show a call stack involved in a data race. */
1244static void show_call_stack(const DrdThreadId tid, ExeContext* const callstack)
1245{
1246   const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
1247
1248   if (vg_tid != VG_INVALID_THREADID) {
1249      if (callstack)
1250         VG_(pp_ExeContext)(callstack);
1251      else
1252         VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
1253   } else {
1254      if (!VG_(clo_xml))
1255         VG_(message)(Vg_UserMsg,
1256                      "   (thread finished, call stack no longer available)\n");
1257   }
1258}
1259
1260/** Print information about the segments involved in a data race. */
1261static void
1262thread_report_conflicting_segments_segment(const DrdThreadId tid,
1263                                           const Addr addr,
1264                                           const SizeT size,
1265                                           const BmAccessTypeT access_type,
1266                                           const Segment* const p)
1267{
1268   unsigned i;
1269
1270   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1271             && tid != DRD_INVALID_THREADID);
1272   tl_assert(p);
1273
1274   for (i = 0; i < DRD_N_THREADS; i++)
1275   {
1276      if (i != tid)
1277      {
1278         Segment* q;
1279         for (q = DRD_(g_threadinfo)[i].last; q; q = q->prev)
1280         {
1281            /*
1282             * Since q iterates over the segments of thread i in order of
1283             * decreasing vector clocks, if q->vc <= p->vc, then
1284             * q->next->vc <= p->vc will also hold. Hence, break out of the
1285             * loop once this condition is met.
1286             */
1287            if (DRD_(vc_lte)(&q->vc, &p->vc))
1288               break;
1289            if (! DRD_(vc_lte)(&p->vc, &q->vc))
1290            {
1291               if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
1292                                              access_type))
1293               {
1294                  tl_assert(q->stacktrace);
1295                  if (VG_(clo_xml))
1296                     VG_(printf_xml)("  <other_segment_start>\n");
1297                  else
1298                     VG_(message)(Vg_UserMsg,
1299                                  "Other segment start (thread %d)\n", i);
1300                  show_call_stack(i, q->stacktrace);
1301                  if (VG_(clo_xml))
1302                     VG_(printf_xml)("  </other_segment_start>\n"
1303                                     "  <other_segment_end>\n");
1304                  else
1305                     VG_(message)(Vg_UserMsg,
1306                                  "Other segment end (thread %d)\n", i);
1307                  show_call_stack(i, q->next ? q->next->stacktrace : 0);
1308                  if (VG_(clo_xml))
1309                     VG_(printf_xml)("  </other_segment_end>\n");
1310               }
1311            }
1312         }
1313      }
1314   }
1315}
1316
1317/** Print information about all segments involved in a data race. */
1318void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
1319                                              const Addr addr,
1320                                              const SizeT size,
1321                                              const BmAccessTypeT access_type)
1322{
1323   Segment* p;
1324
1325   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1326             && tid != DRD_INVALID_THREADID);
1327
1328   for (p = DRD_(g_threadinfo)[tid].first; p; p = p->next)
1329   {
1330      if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
1331      {
1332         thread_report_conflicting_segments_segment(tid, addr, size,
1333                                                    access_type, p);
1334      }
1335   }
1336}
1337
1338/**
1339 * Verify whether the conflict set for thread tid is up to date. Only perform
1340 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
1341 */
1342static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
1343{
1344   static int do_verify_conflict_set = -1;
1345   Bool result;
1346   struct bitmap* computed_conflict_set = 0;
1347
1348   if (do_verify_conflict_set < 0)
1349      do_verify_conflict_set = VG_(getenv)("DRD_VERIFY_CONFLICT_SET") != 0;
1350
1351   if (do_verify_conflict_set == 0)
1352      return True;
1353
1354   thread_compute_conflict_set(&computed_conflict_set, tid);
1355   result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
1356   if (! result)
1357   {
1358      VG_(printf)("actual conflict set:\n");
1359      DRD_(bm_print)(DRD_(g_conflict_set));
1360      VG_(printf)("\n");
1361      VG_(printf)("computed conflict set:\n");
1362      DRD_(bm_print)(computed_conflict_set);
1363      VG_(printf)("\n");
1364   }
1365   DRD_(bm_delete)(computed_conflict_set);
1366   return result;
1367}
1368
1369/**
1370 * Compute the conflict set: a bitmap that represents the union of all memory
1371 * accesses of all segments that are unordered to the current segment of the
1372 * thread tid.
1373 */
1374static void thread_compute_conflict_set(struct bitmap** conflict_set,
1375                                        const DrdThreadId tid)
1376{
1377   Segment* p;
1378
1379   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1380             && tid != DRD_INVALID_THREADID);
1381   tl_assert(tid == DRD_(g_drd_running_tid));
1382
1383   s_compute_conflict_set_count++;
1384   s_conflict_set_bitmap_creation_count
1385      -= DRD_(bm_get_bitmap_creation_count)();
1386   s_conflict_set_bitmap2_creation_count
1387      -= DRD_(bm_get_bitmap2_creation_count)();
1388
1389   if (*conflict_set) {
1390      DRD_(bm_cleanup)(*conflict_set);
1391      DRD_(bm_init)(*conflict_set);
1392   } else {
1393      *conflict_set = DRD_(bm_new)();
1394   }
1395
1396   if (s_trace_conflict_set) {
1397      char* str;
1398
1399      str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1400      VG_(message)(Vg_DebugMsg,
1401                   "computing conflict set for thread %d with vc %s\n",
1402                   tid, str);
1403      VG_(free)(str);
1404   }
1405
1406   p = DRD_(g_threadinfo)[tid].last;
1407   {
1408      unsigned j;
1409
1410      if (s_trace_conflict_set)
1411      {
1412         char* vc;
1413
1414         vc = DRD_(vc_aprint)(&p->vc);
1415         VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n",
1416                      tid, vc);
1417         VG_(free)(vc);
1418      }
1419
1420      for (j = 0; j < DRD_N_THREADS; j++)
1421      {
1422         if (j != tid && DRD_(IsValidDrdThreadId)(j))
1423         {
1424            Segment* q;
1425            for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
1426            {
1427               if (! DRD_(vc_lte)(&q->vc, &p->vc)
1428                   && ! DRD_(vc_lte)(&p->vc, &q->vc))
1429               {
1430                  if (s_trace_conflict_set)
1431                  {
1432                     char* str;
1433
1434                     str = DRD_(vc_aprint)(&q->vc);
1435                     VG_(message)(Vg_DebugMsg,
1436                                  "conflict set: [%d] merging segment %s\n",
1437                                  j, str);
1438                     VG_(free)(str);
1439                  }
1440                  DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
1441               }
1442               else
1443               {
1444                  if (s_trace_conflict_set)
1445                  {
1446                     char* str;
1447
1448                     str = DRD_(vc_aprint)(&q->vc);
1449                     VG_(message)(Vg_DebugMsg,
1450                                  "conflict set: [%d] ignoring segment %s\n",
1451                                  j, str);
1452                     VG_(free)(str);
1453                  }
1454               }
1455            }
1456         }
1457      }
1458   }
1459
1460   s_conflict_set_bitmap_creation_count
1461      += DRD_(bm_get_bitmap_creation_count)();
1462   s_conflict_set_bitmap2_creation_count
1463      += DRD_(bm_get_bitmap2_creation_count)();
1464
1465   if (s_trace_conflict_set_bm)
1466   {
1467      VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid);
1468      DRD_(bm_print)(*conflict_set);
1469      VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid);
1470   }
1471}
1472
1473/**
1474 * Update the conflict set after the vector clock of thread tid has been
1475 * updated from old_vc to its current value, either because a new segment has
1476 * been created or because of a synchronization operation.
1477 */
1478void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
1479                                      const VectorClock* const old_vc)
1480{
1481   const VectorClock* new_vc;
1482   Segment* p;
1483   unsigned j;
1484
1485   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1486             && tid != DRD_INVALID_THREADID);
1487   tl_assert(old_vc);
1488   tl_assert(tid == DRD_(g_drd_running_tid));
1489   tl_assert(DRD_(g_conflict_set));
1490
1491   if (s_trace_conflict_set) {
1492      char* str;
1493
1494      str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1495      VG_(message)(Vg_DebugMsg,
1496                   "updating conflict set for thread %d with vc %s\n",
1497                   tid, str);
1498      VG_(free)(str);
1499   }
1500
1501   new_vc = DRD_(thread_get_vc)(tid);
1502   tl_assert(DRD_(vc_lte)(old_vc, new_vc));
1503
1504   DRD_(bm_unmark)(DRD_(g_conflict_set));
1505
1506   for (j = 0; j < DRD_N_THREADS; j++)
1507   {
1508      Segment* q;
1509
1510      if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
1511         continue;
1512
1513      for (q = DRD_(g_threadinfo)[j].last;
1514           q && !DRD_(vc_lte)(&q->vc, new_vc);
1515           q = q->prev) {
1516         const Bool included_in_old_conflict_set
1517            = !DRD_(vc_lte)(old_vc, &q->vc);
1518         const Bool included_in_new_conflict_set
1519            = !DRD_(vc_lte)(new_vc, &q->vc);
1520
1521         if (UNLIKELY(s_trace_conflict_set)) {
1522            char* str;
1523
1524            str = DRD_(vc_aprint)(&q->vc);
1525            VG_(message)(Vg_DebugMsg,
1526                         "conflict set: [%d] %s segment %s\n", j,
1527                         included_in_old_conflict_set
1528                         != included_in_new_conflict_set
1529                         ? "merging" : "ignoring", str);
1530            VG_(free)(str);
1531         }
1532         if (included_in_old_conflict_set != included_in_new_conflict_set)
1533            DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1534      }
1535
1536      for ( ; q && !DRD_(vc_lte)(&q->vc, old_vc); q = q->prev) {
1537         const Bool included_in_old_conflict_set
1538            = !DRD_(vc_lte)(old_vc, &q->vc);
1539         const Bool included_in_new_conflict_set
1540            = !DRD_(vc_lte)(&q->vc, new_vc)
1541            && !DRD_(vc_lte)(new_vc, &q->vc);
1542
1543         if (UNLIKELY(s_trace_conflict_set)) {
1544            char* str;
1545
1546            str = DRD_(vc_aprint)(&q->vc);
1547            VG_(message)(Vg_DebugMsg,
1548                         "conflict set: [%d] %s segment %s\n", j,
1549                         included_in_old_conflict_set
1550                         != included_in_new_conflict_set
1551                         ? "merging" : "ignoring", str);
1552            VG_(free)(str);
1553         }
1554         if (included_in_old_conflict_set != included_in_new_conflict_set)
1555            DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1556      }
1557   }
1558
1559   DRD_(bm_clear_marked)(DRD_(g_conflict_set));
1560
1561   p = DRD_(g_threadinfo)[tid].last;
1562   for (j = 0; j < DRD_N_THREADS; j++)
1563   {
1564      if (j != tid && DRD_(IsValidDrdThreadId)(j))
1565      {
1566         Segment* q;
1567         for (q = DRD_(g_threadinfo)[j].last;
1568              q && !DRD_(vc_lte)(&q->vc, &p->vc);
1569              q = q->prev) {
1570            if (!DRD_(vc_lte)(&p->vc, &q->vc))
1571               DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1572         }
1573      }
1574   }
1575
1576   DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
1577
1578   s_update_conflict_set_count++;
1579
1580   if (s_trace_conflict_set_bm)
1581   {
1582      VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid);
1583      DRD_(bm_print)(DRD_(g_conflict_set));
1584      VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid);
1585   }
1586
1587   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1588}
1589
1590/** Report the number of context switches performed. */
1591ULong DRD_(thread_get_context_switch_count)(void)
1592{
1593   return s_context_switch_count;
1594}
1595
1596/** Report the number of ordered segments that have been discarded. */
1597ULong DRD_(thread_get_discard_ordered_segments_count)(void)
1598{
1599   return s_discard_ordered_segments_count;
1600}
1601
1602/** Return how many times the conflict set has been updated entirely. */
1603ULong DRD_(thread_get_compute_conflict_set_count)()
1604{
1605   return s_compute_conflict_set_count;
1606}
1607
1608/** Return how many times the conflict set has been updated partially. */
1609ULong DRD_(thread_get_update_conflict_set_count)(void)
1610{
1611   return s_update_conflict_set_count;
1612}
1613
1614/**
1615 * Return how many times the conflict set has been updated partially
1616 * because a new segment has been created.
1617 */
1618ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
1619{
1620   return s_update_conflict_set_new_sg_count;
1621}
1622
1623/**
1624 * Return how many times the conflict set has been updated partially
1625 * because of combining vector clocks due to synchronization operations
1626 * other than reader/writer lock or barrier operations.
1627 */
1628ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
1629{
1630   return s_update_conflict_set_sync_count;
1631}
1632
1633/**
1634 * Return how many times the conflict set has been updated partially
1635 * because of thread joins.
1636 */
1637ULong DRD_(thread_get_update_conflict_set_join_count)(void)
1638{
1639   return s_update_conflict_set_join_count;
1640}
1641
1642/**
1643 * Return the number of first-level bitmaps that have been created during
1644 * conflict set updates.
1645 */
1646ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
1647{
1648   return s_conflict_set_bitmap_creation_count;
1649}
1650
1651/**
1652 * Return the number of second-level bitmaps that have been created during
1653 * conflict set updates.
1654 */
1655ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
1656{
1657   return s_conflict_set_bitmap2_creation_count;
1658}
1659