drd_thread.c revision 0c0cd77a6ffda2fd1abf4877ef125889c2e07f9e
1/* -*- mode: C; c-basic-offset: 3; -*- */
2/*
3  This file is part of drd, a thread error detector.
4
5  Copyright (C) 2006-2010 Bart Van Assche <bvanassche@acm.org>.
6
7  This program is free software; you can redistribute it and/or
8  modify it under the terms of the GNU General Public License as
9  published by the Free Software Foundation; either version 2 of the
10  License, or (at your option) any later version.
11
12  This program is distributed in the hope that it will be useful, but
13  WITHOUT ANY WARRANTY; without even the implied warranty of
14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  General Public License for more details.
16
17  You should have received a copy of the GNU General Public License
18  along with this program; if not, write to the Free Software
19  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20  02111-1307, USA.
21
22  The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "drd_error.h"
27#include "drd_barrier.h"
28#include "drd_clientobj.h"
29#include "drd_cond.h"
30#include "drd_mutex.h"
31#include "drd_segment.h"
32#include "drd_semaphore.h"
33#include "drd_suppression.h"
34#include "drd_thread.h"
35#include "pub_tool_vki.h"
36#include "pub_tool_basics.h"      // Addr, SizeT
37#include "pub_tool_libcassert.h"  // tl_assert()
38#include "pub_tool_libcbase.h"    // VG_(strlen)()
39#include "pub_tool_libcprint.h"   // VG_(printf)()
40#include "pub_tool_libcproc.h"    // VG_(getenv)()
41#include "pub_tool_machine.h"
42#include "pub_tool_mallocfree.h"  // VG_(malloc)(), VG_(free)()
43#include "pub_tool_options.h"     // VG_(clo_backtrace_size)
44#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
45
46
47
48/* Local functions. */
49
50static void thread_append_segment(const DrdThreadId tid, Segment* const sg);
51static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
52static void thread_compute_conflict_set(struct bitmap** conflict_set,
53                                        const DrdThreadId tid);
54static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
55
56
57/* Local variables. */
58
59static ULong    s_context_switch_count;
60static ULong    s_discard_ordered_segments_count;
61static ULong    s_compute_conflict_set_count;
62static ULong    s_update_conflict_set_count;
63static ULong    s_update_conflict_set_new_sg_count;
64static ULong    s_update_conflict_set_sync_count;
65static ULong    s_update_conflict_set_join_count;
66static ULong    s_conflict_set_bitmap_creation_count;
67static ULong    s_conflict_set_bitmap2_creation_count;
68static ThreadId s_vg_running_tid  = VG_INVALID_THREADID;
69DrdThreadId     DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
70ThreadInfo      DRD_(g_threadinfo)[DRD_N_THREADS];
71struct bitmap*  DRD_(g_conflict_set);
72static Bool     s_trace_context_switches = False;
73static Bool     s_trace_conflict_set = False;
74static Bool     s_trace_conflict_set_bm = False;
75static Bool     s_trace_fork_join = False;
76static Bool     s_segment_merging = True;
77static Bool     s_new_segments_since_last_merge;
78static int      s_segment_merge_interval = 10;
79
80
81/* Function definitions. */
82
83/** Enables/disables context switch tracing. */
84void DRD_(thread_trace_context_switches)(const Bool t)
85{
86   tl_assert(t == False || t == True);
87   s_trace_context_switches = t;
88}
89
90/** Enables/disables conflict set tracing. */
91void DRD_(thread_trace_conflict_set)(const Bool t)
92{
93   tl_assert(t == False || t == True);
94   s_trace_conflict_set = t;
95}
96
97/** Enables/disables conflict set bitmap tracing. */
98void DRD_(thread_trace_conflict_set_bm)(const Bool t)
99{
100   tl_assert(t == False || t == True);
101   s_trace_conflict_set_bm = t;
102}
103
104/** Report whether fork/join tracing is enabled. */
105Bool DRD_(thread_get_trace_fork_join)(void)
106{
107   return s_trace_fork_join;
108}
109
110/** Enables/disables fork/join tracing. */
111void DRD_(thread_set_trace_fork_join)(const Bool t)
112{
113   tl_assert(t == False || t == True);
114   s_trace_fork_join = t;
115}
116
117/** Enables/disables segment merging. */
118void DRD_(thread_set_segment_merging)(const Bool m)
119{
120   tl_assert(m == False || m == True);
121   s_segment_merging = m;
122}
123
124/** Get the segment merging interval. */
125int DRD_(thread_get_segment_merge_interval)(void)
126{
127   return s_segment_merge_interval;
128}
129
130/** Set the segment merging interval. */
131void DRD_(thread_set_segment_merge_interval)(const int i)
132{
133   s_segment_merge_interval = i;
134}
135
136/**
137 * Convert Valgrind's ThreadId into a DrdThreadId.
138 *
139 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
140 *         Valgrind ThreadId does not yet exist.
141 */
142DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
143{
144   int i;
145
146   if (tid == VG_INVALID_THREADID)
147      return DRD_INVALID_THREADID;
148
149   for (i = 1; i < DRD_N_THREADS; i++)
150   {
151      if (DRD_(g_threadinfo)[i].vg_thread_exists == True
152          && DRD_(g_threadinfo)[i].vg_threadid == tid)
153      {
154         return i;
155      }
156   }
157
158   return DRD_INVALID_THREADID;
159}
160
161/** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
162static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
163{
164   int i;
165
166   tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
167
168   for (i = 1; i < DRD_N_THREADS; i++)
169   {
170      if (DRD_(g_threadinfo)[i].vg_thread_exists == False
171          && DRD_(g_threadinfo)[i].posix_thread_exists == False
172          && DRD_(g_threadinfo)[i].detached_posix_thread == False)
173      {
174         tl_assert(! DRD_(IsValidDrdThreadId)(i));
175
176         DRD_(g_threadinfo)[i].vg_thread_exists = True;
177         DRD_(g_threadinfo)[i].vg_threadid   = tid;
178         DRD_(g_threadinfo)[i].pt_threadid   = INVALID_POSIX_THREADID;
179         DRD_(g_threadinfo)[i].stack_min     = 0;
180         DRD_(g_threadinfo)[i].stack_min_min = 0;
181         DRD_(g_threadinfo)[i].stack_startup = 0;
182         DRD_(g_threadinfo)[i].stack_max     = 0;
183         DRD_(thread_set_name)(i, "");
184         DRD_(g_threadinfo)[i].on_alt_stack        = False;
185         DRD_(g_threadinfo)[i].is_recording_loads  = True;
186         DRD_(g_threadinfo)[i].is_recording_stores = True;
187         DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
188         DRD_(g_threadinfo)[i].synchr_nesting = 0;
189         tl_assert(DRD_(g_threadinfo)[i].first == 0);
190         tl_assert(DRD_(g_threadinfo)[i].last == 0);
191
192         tl_assert(DRD_(IsValidDrdThreadId)(i));
193
194         return i;
195      }
196   }
197
198   VG_(printf)(
199"\nSorry, but the maximum number of threads supported by DRD has been exceeded."
200"Aborting.\n");
201
202   tl_assert(False);
203
204   return DRD_INVALID_THREADID;
205}
206
207/** Convert a POSIX thread ID into a DRD thread ID. */
208DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
209{
210   int i;
211
212   if (tid != INVALID_POSIX_THREADID)
213   {
214      for (i = 1; i < DRD_N_THREADS; i++)
215      {
216         if (DRD_(g_threadinfo)[i].posix_thread_exists
217             && DRD_(g_threadinfo)[i].pt_threadid == tid)
218         {
219            return i;
220         }
221      }
222   }
223   return DRD_INVALID_THREADID;
224}
225
226/** Convert a DRD thread ID into a Valgrind thread ID. */
227ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
228{
229   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
230             && tid != DRD_INVALID_THREADID);
231
232   return (DRD_(g_threadinfo)[tid].vg_thread_exists
233           ? DRD_(g_threadinfo)[tid].vg_threadid
234           : VG_INVALID_THREADID);
235}
236
237#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
238/**
239 * Sanity check of the doubly linked list of segments referenced by a
240 * ThreadInfo struct.
241 * @return True if sane, False if not.
242 */
243static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
244{
245   Segment* p;
246
247   for (p = ti->first; p; p = p->next) {
248      if (p->next && p->next->prev != p)
249         return False;
250      if (p->next == 0 && p != ti->last)
251         return False;
252   }
253   for (p = ti->last; p; p = p->prev) {
254      if (p->prev && p->prev->next != p)
255         return False;
256      if (p->prev == 0 && p != ti->first)
257         return False;
258   }
259   return True;
260}
261#endif
262
263/**
264 * Create the first segment for a newly started thread.
265 *
266 * This function is called from the handler installed via
267 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
268 * from the context of the creator thread, before the new thread has been
269 * created.
270 *
271 * @param[in] creator    DRD thread ID of the creator thread.
272 * @param[in] vg_created Valgrind thread ID of the created thread.
273 *
274 * @return DRD thread ID of the created thread.
275 */
276DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
277                                    const ThreadId vg_created)
278{
279   DrdThreadId created;
280
281   tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
282   created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
283   tl_assert(0 <= (int)created && created < DRD_N_THREADS
284             && created != DRD_INVALID_THREADID);
285
286   tl_assert(DRD_(g_threadinfo)[created].first == 0);
287   tl_assert(DRD_(g_threadinfo)[created].last == 0);
288   /* Create an initial segment for the newly created thread. */
289   thread_append_segment(created, DRD_(sg_new)(creator, created));
290
291   return created;
292}
293
294/**
295 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
296 * after the thread has been created and before any client instructions are run
297 * on the newly created thread, e.g. from the handler installed via
298 * VG_(track_pre_thread_first_insn)().
299 *
300 * @param[in] vg_created Valgrind thread ID of the newly created thread.
301 *
302 * @return DRD thread ID for the new thread.
303 */
304DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
305{
306   const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
307
308   tl_assert(0 <= (int)created && created < DRD_N_THREADS
309             && created != DRD_INVALID_THREADID);
310
311   DRD_(g_threadinfo)[created].stack_max
312      = VG_(thread_get_stack_max)(vg_created);
313   DRD_(g_threadinfo)[created].stack_startup
314      = DRD_(g_threadinfo)[created].stack_max;
315   DRD_(g_threadinfo)[created].stack_min
316      = DRD_(g_threadinfo)[created].stack_max;
317   DRD_(g_threadinfo)[created].stack_min_min
318      = DRD_(g_threadinfo)[created].stack_max;
319   DRD_(g_threadinfo)[created].stack_size
320      = VG_(thread_get_stack_size)(vg_created);
321   tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
322
323   return created;
324}
325
326/**
327 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
328 * after thread drd_joiner joined thread drd_joinee.
329 */
330void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
331{
332   tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
333   tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
334
335   DRD_(thread_new_segment)(drd_joiner);
336   DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
337   DRD_(thread_new_segment)(drd_joinee);
338
339   if (s_trace_fork_join)
340   {
341      const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
342      const unsigned msg_size = 256;
343      char* msg;
344
345      msg = VG_(malloc)("drd.main.dptj.1", msg_size);
346      tl_assert(msg);
347      VG_(snprintf)(msg, msg_size,
348                    "drd_post_thread_join joiner = %d, joinee = %d",
349                    drd_joiner, drd_joinee);
350      if (joiner)
351      {
352         char* vc;
353
354         vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
355         VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
356                       ", new vc: %s", vc);
357         VG_(free)(vc);
358      }
359      VG_(message)(Vg_DebugMsg, "%s\n", msg);
360      VG_(free)(msg);
361   }
362
363   if (!  DRD_(get_check_stack_accesses)())
364   {
365      DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
366                               - DRD_(thread_get_stack_size)(drd_joinee),
367                               DRD_(thread_get_stack_max)(drd_joinee));
368   }
369   DRD_(clientobj_delete_thread)(drd_joinee);
370   DRD_(thread_delete)(drd_joinee, False);
371}
372
373/**
374 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
375 * and accesses this data structure from multiple threads without locking.
376 * Any conflicting accesses in the range stack_startup..stack_max will be
377 * ignored.
378 */
379void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
380                                    const Addr stack_startup)
381{
382   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
383             && tid != DRD_INVALID_THREADID);
384   tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
385   tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
386   DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
387}
388
389/** Return the stack pointer for the specified thread. */
390Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
391{
392   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
393             && tid != DRD_INVALID_THREADID);
394   return DRD_(g_threadinfo)[tid].stack_min;
395}
396
397/**
398 * Return the lowest value that was ever assigned to the stack pointer
399 * for the specified thread.
400 */
401Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
402{
403   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
404             && tid != DRD_INVALID_THREADID);
405   return DRD_(g_threadinfo)[tid].stack_min_min;
406}
407
408/** Return the top address for the stack of the specified thread. */
409Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
410{
411   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
412             && tid != DRD_INVALID_THREADID);
413   return DRD_(g_threadinfo)[tid].stack_max;
414}
415
416/** Return the maximum stack size for the specified thread. */
417SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
418{
419   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
420             && tid != DRD_INVALID_THREADID);
421   return DRD_(g_threadinfo)[tid].stack_size;
422}
423
424Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
425{
426   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
427             && tid != DRD_INVALID_THREADID);
428   return DRD_(g_threadinfo)[tid].on_alt_stack;
429}
430
431void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
432                                   const Bool on_alt_stack)
433{
434   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
435             && tid != DRD_INVALID_THREADID);
436   tl_assert(on_alt_stack == !!on_alt_stack);
437   DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
438}
439
440Int DRD_(thread_get_threads_on_alt_stack)(void)
441{
442   int i, n = 0;
443
444   for (i = 1; i < DRD_N_THREADS; i++)
445      n += DRD_(g_threadinfo)[i].on_alt_stack;
446   return n;
447}
448
449/**
450 * Clean up thread-specific data structures. Call this just after
451 * pthread_join().
452 */
453void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
454{
455   Segment* sg;
456   Segment* sg_prev;
457
458   tl_assert(DRD_(IsValidDrdThreadId)(tid));
459
460   tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
461   for (sg = DRD_(g_threadinfo)[tid].last; sg; sg = sg_prev)
462   {
463      sg_prev = sg->prev;
464      sg->prev = 0;
465      sg->next = 0;
466      DRD_(sg_put)(sg);
467   }
468   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
469   DRD_(g_threadinfo)[tid].posix_thread_exists = False;
470   if (detached)
471      DRD_(g_threadinfo)[tid].detached_posix_thread = False;
472   else
473      tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
474   DRD_(g_threadinfo)[tid].first = 0;
475   DRD_(g_threadinfo)[tid].last = 0;
476
477   tl_assert(! DRD_(IsValidDrdThreadId)(tid));
478}
479
480/**
481 * Called after a thread performed its last memory access and before
482 * thread_delete() is called. Note: thread_delete() is only called for
483 * joinable threads, not for detached threads.
484 */
485void DRD_(thread_finished)(const DrdThreadId tid)
486{
487   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
488             && tid != DRD_INVALID_THREADID);
489
490   DRD_(g_threadinfo)[tid].vg_thread_exists = False;
491
492   if (DRD_(g_threadinfo)[tid].detached_posix_thread)
493   {
494      /*
495       * Once a detached thread has finished, its stack is deallocated and
496       * should no longer be taken into account when computing the conflict set.
497       */
498      DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
499
500      /*
501       * For a detached thread, calling pthread_exit() invalidates the
502       * POSIX thread ID associated with the detached thread. For joinable
503       * POSIX threads however, the POSIX thread ID remains live after the
504       * pthread_exit() call until pthread_join() is called.
505       */
506      DRD_(g_threadinfo)[tid].posix_thread_exists = False;
507   }
508}
509
510/** Called just after fork() in the child process. */
511void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
512{
513   unsigned i;
514
515   for (i = 1; i < DRD_N_THREADS; i++)
516   {
517      if (i == tid)
518	 continue;
519      if (DRD_(IsValidDrdThreadId(i)))
520	 DRD_(thread_delete)(i, True);
521      tl_assert(!DRD_(IsValidDrdThreadId(i)));
522   }
523}
524
525/** Called just before pthread_cancel(). */
526void DRD_(thread_pre_cancel)(const DrdThreadId tid)
527{
528   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
529             && tid != DRD_INVALID_THREADID);
530   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
531
532   if (DRD_(thread_get_trace_fork_join)())
533      VG_(message)(Vg_UserMsg, "[%d] drd_thread_pre_cancel %d\n",
534		   DRD_(g_drd_running_tid), tid);
535}
536
537/**
538 * Store the POSIX thread ID for the specified thread.
539 *
540 * @note This function can be called two times for the same thread -- see also
541 * the comment block preceding the pthread_create() wrapper in
542 * drd_pthread_intercepts.c.
543 */
544void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
545{
546   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
547             && tid != DRD_INVALID_THREADID);
548   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
549             || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
550   tl_assert(ptid != INVALID_POSIX_THREADID);
551   DRD_(g_threadinfo)[tid].posix_thread_exists = True;
552   DRD_(g_threadinfo)[tid].pt_threadid         = ptid;
553}
554
555/** Returns true for joinable threads and false for detached threads. */
556Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
557{
558   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
559             && tid != DRD_INVALID_THREADID);
560   return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
561}
562
563/** Store the thread mode: joinable or detached. */
564void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
565{
566   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
567             && tid != DRD_INVALID_THREADID);
568   tl_assert(!! joinable == joinable);
569   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
570
571   DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
572}
573
574/** Tells DRD that the calling thread is about to enter pthread_create(). */
575void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
576{
577   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
578             && tid != DRD_INVALID_THREADID);
579   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
580   tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
581
582   DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
583}
584
585/** Tells DRD that the calling thread has left pthread_create(). */
586void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
587{
588   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
589             && tid != DRD_INVALID_THREADID);
590   tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
591   tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
592
593   DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
594}
595
596/** Obtain the thread number and the user-assigned thread name. */
597const char* DRD_(thread_get_name)(const DrdThreadId tid)
598{
599   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
600             && tid != DRD_INVALID_THREADID);
601
602   return DRD_(g_threadinfo)[tid].name;
603}
604
605/** Set the name of the specified thread. */
606void DRD_(thread_set_name)(const DrdThreadId tid, const char* const name)
607{
608   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
609             && tid != DRD_INVALID_THREADID);
610
611   if (name == NULL || name[0] == 0)
612      VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
613                    sizeof(DRD_(g_threadinfo)[tid].name),
614                    "Thread %d",
615                    tid);
616   else
617      VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
618                    sizeof(DRD_(g_threadinfo)[tid].name),
619                    "Thread %d (%s)",
620                    tid, name);
621   DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
622}
623
624/**
625 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
626 * conflict set.
627 */
628void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
629{
630   tl_assert(vg_tid != VG_INVALID_THREADID);
631
632   if (vg_tid != s_vg_running_tid)
633   {
634      DRD_(thread_set_running_tid)(vg_tid,
635                                   DRD_(VgThreadIdToDrdThreadId)(vg_tid));
636   }
637
638   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
639   tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
640}
641
642/**
643 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
644 * conflict set.
645 */
646void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
647                                  const DrdThreadId drd_tid)
648{
649   tl_assert(vg_tid != VG_INVALID_THREADID);
650   tl_assert(drd_tid != DRD_INVALID_THREADID);
651
652   if (vg_tid != s_vg_running_tid)
653   {
654      if (s_trace_context_switches
655          && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
656      {
657         VG_(message)(Vg_DebugMsg,
658                      "Context switch from thread %d to thread %d;"
659                      " segments: %llu\n",
660                      DRD_(g_drd_running_tid), drd_tid,
661                      DRD_(sg_get_segments_alive_count)());
662      }
663      s_vg_running_tid = vg_tid;
664      DRD_(g_drd_running_tid) = drd_tid;
665      thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
666      s_context_switch_count++;
667   }
668
669   tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
670   tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
671}
672
673/**
674 * Increase the synchronization nesting counter. Must be called before the
675 * client calls a synchronization function.
676 */
677int DRD_(thread_enter_synchr)(const DrdThreadId tid)
678{
679   tl_assert(DRD_(IsValidDrdThreadId)(tid));
680   return DRD_(g_threadinfo)[tid].synchr_nesting++;
681}
682
683/**
684 * Decrease the synchronization nesting counter. Must be called after the
685 * client left a synchronization function.
686 */
687int DRD_(thread_leave_synchr)(const DrdThreadId tid)
688{
689   tl_assert(DRD_(IsValidDrdThreadId)(tid));
690   tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
691   return --DRD_(g_threadinfo)[tid].synchr_nesting;
692}
693
694/** Returns the synchronization nesting counter. */
695int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
696{
697   tl_assert(DRD_(IsValidDrdThreadId)(tid));
698   return DRD_(g_threadinfo)[tid].synchr_nesting;
699}
700
701/** Append a new segment at the end of the segment list. */
702static
703void thread_append_segment(const DrdThreadId tid, Segment* const sg)
704{
705   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
706             && tid != DRD_INVALID_THREADID);
707
708#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
709   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
710#endif
711
712   sg->prev = DRD_(g_threadinfo)[tid].last;
713   sg->next = 0;
714   if (DRD_(g_threadinfo)[tid].last)
715      DRD_(g_threadinfo)[tid].last->next = sg;
716   DRD_(g_threadinfo)[tid].last = sg;
717   if (DRD_(g_threadinfo)[tid].first == 0)
718      DRD_(g_threadinfo)[tid].first = sg;
719
720#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
721   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
722#endif
723}
724
725/**
726 * Remove a segment from the segment list of thread threadid, and free the
727 * associated memory.
728 */
729static
730void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
731{
732   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
733             && tid != DRD_INVALID_THREADID);
734
735#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
736   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
737#endif
738
739   if (sg->prev)
740      sg->prev->next = sg->next;
741   if (sg->next)
742      sg->next->prev = sg->prev;
743   if (sg == DRD_(g_threadinfo)[tid].first)
744      DRD_(g_threadinfo)[tid].first = sg->next;
745   if (sg == DRD_(g_threadinfo)[tid].last)
746      DRD_(g_threadinfo)[tid].last = sg->prev;
747   DRD_(sg_put)(sg);
748
749#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
750   tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
751#endif
752}
753
754/**
755 * Returns a pointer to the vector clock of the most recent segment associated
756 * with thread 'tid'.
757 */
758VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
759{
760   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
761             && tid != DRD_INVALID_THREADID);
762   tl_assert(DRD_(g_threadinfo)[tid].last);
763   return &DRD_(g_threadinfo)[tid].last->vc;
764}
765
766/**
767 * Return the latest segment of thread 'tid' and increment its reference count.
768 */
769void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
770{
771   tl_assert(sg);
772   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
773             && tid != DRD_INVALID_THREADID);
774   tl_assert(DRD_(g_threadinfo)[tid].last);
775
776   DRD_(sg_put)(*sg);
777   *sg = DRD_(sg_get)(DRD_(g_threadinfo)[tid].last);
778}
779
780/**
781 * Compute the minimum of all latest vector clocks of all threads
782 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
783 *
784 * @param vc pointer to a vectorclock, holds result upon return.
785 */
786static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
787{
788   unsigned i;
789   Bool first;
790   Segment* latest_sg;
791
792   first = True;
793   for (i = 0; i < DRD_N_THREADS; i++)
794   {
795      latest_sg = DRD_(g_threadinfo)[i].last;
796      if (latest_sg)
797      {
798         if (first)
799            DRD_(vc_assign)(vc, &latest_sg->vc);
800         else
801            DRD_(vc_min)(vc, &latest_sg->vc);
802         first = False;
803      }
804   }
805}
806
807/**
808 * Compute the maximum of all latest vector clocks of all threads.
809 *
810 * @param vc pointer to a vectorclock, holds result upon return.
811 */
812static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
813{
814   unsigned i;
815   Bool first;
816   Segment* latest_sg;
817
818   first = True;
819   for (i = 0; i < DRD_N_THREADS; i++)
820   {
821      latest_sg = DRD_(g_threadinfo)[i].last;
822      if (latest_sg)
823      {
824         if (first)
825            DRD_(vc_assign)(vc, &latest_sg->vc);
826         else
827            DRD_(vc_combine)(vc, &latest_sg->vc);
828         first = False;
829      }
830   }
831}
832
833/**
834 * Discard all segments that have a defined order against the latest vector
835 * clock of all threads -- these segments can no longer be involved in a
836 * data race.
837 */
838static void thread_discard_ordered_segments(void)
839{
840   unsigned i;
841   VectorClock thread_vc_min;
842
843   s_discard_ordered_segments_count++;
844
845   DRD_(vc_init)(&thread_vc_min, 0, 0);
846   DRD_(thread_compute_minimum_vc)(&thread_vc_min);
847   if (DRD_(sg_get_trace)())
848   {
849      char *vc_min, *vc_max;
850      VectorClock thread_vc_max;
851
852      DRD_(vc_init)(&thread_vc_max, 0, 0);
853      DRD_(thread_compute_maximum_vc)(&thread_vc_max);
854      vc_min = DRD_(vc_aprint)(&thread_vc_min);
855      vc_max = DRD_(vc_aprint)(&thread_vc_max);
856      VG_(message)(Vg_DebugMsg,
857                   "Discarding ordered segments -- min vc is %s, max vc is %s\n",
858                   vc_min, vc_max);
859      VG_(free)(vc_min);
860      VG_(free)(vc_max);
861      DRD_(vc_cleanup)(&thread_vc_max);
862   }
863
864   for (i = 0; i < DRD_N_THREADS; i++)
865   {
866      Segment* sg;
867      Segment* sg_next;
868      for (sg = DRD_(g_threadinfo)[i].first;
869           sg && (sg_next = sg->next) && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
870           sg = sg_next)
871      {
872         thread_discard_segment(i, sg);
873      }
874   }
875   DRD_(vc_cleanup)(&thread_vc_min);
876}
877
878/**
879 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
880 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
881 * all segments in the set CS are ordered consistently against both sg1 and
882 * sg2. The set CS is defined as the set of segments that can immediately
883 * precede future segments via inter-thread synchronization operations. In
884 * DRD the set CS consists of the latest segment of each thread combined with
885 * all segments for which the reference count is strictly greater than one.
886 * The code below is an optimized version of the following:
887 *
888 * for (i = 0; i < DRD_N_THREADS; i++)
889 * {
890 *    Segment* sg;
891 *
892 *    for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
893 *    {
894 *       if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
895 *       {
896 *          if (   DRD_(vc_lte)(&sg1->vc, &sg->vc)
897 *              != DRD_(vc_lte)(&sg2->vc, &sg->vc)
898 *              || DRD_(vc_lte)(&sg->vc, &sg1->vc)
899 *              != DRD_(vc_lte)(&sg->vc, &sg2->vc))
900 *          {
901 *             return False;
902 *          }
903 *       }
904 *    }
905 * }
906 */
907static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
908                                               Segment* const sg1,
909                                               Segment* const sg2)
910{
911   unsigned i;
912
913   tl_assert(sg1->next);
914   tl_assert(sg2->next);
915   tl_assert(sg1->next == sg2);
916   tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
917
918   for (i = 0; i < DRD_N_THREADS; i++)
919   {
920      Segment* sg;
921
922      for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
923      {
924         if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
925         {
926            if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
927               break;
928            if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
929               return False;
930         }
931      }
932      for (sg = DRD_(g_threadinfo)[i].last; sg; sg = sg->prev)
933      {
934         if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1)
935         {
936            if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
937               break;
938            if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
939               return False;
940         }
941      }
942   }
943   return True;
944}
945
946/**
947 * Merge all segments that may be merged without triggering false positives
948 * or discarding real data races. For the theoretical background of segment
949 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
950 * and Koen De Bosschere. Bounding the number of segment histories during
951 * data race detection. Parallel Computing archive, Volume 28, Issue 9,
952 * pp 1221-1238, September 2002. This paper contains a proof that merging
953 * consecutive segments for which the property equiv(s1,s2) holds can be
954 * merged without reducing the accuracy of datarace detection. Furthermore
955 * it is also proven that the total number of all segments will never grow
956 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
957 * every time a new segment is created. The property equiv(s1, s2) is defined
958 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
959 * clocks of segments s and s1 are ordered in the same way as those of segments
960 * s and s2. The set CS is defined as the set of existing segments s that have
961 * the potential to conflict with not yet created segments, either because the
962 * segment s is the latest segment of a thread or because it can become the
963 * immediate predecessor of a new segment due to a synchronization operation.
964 */
965static void thread_merge_segments(void)
966{
967   unsigned i;
968
969   s_new_segments_since_last_merge = 0;
970
971   for (i = 0; i < DRD_N_THREADS; i++)
972   {
973      Segment* sg;
974
975#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
976      tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
977#endif
978
979      for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
980      {
981         if (DRD_(sg_get_refcnt)(sg) == 1
982             && sg->next
983             && DRD_(sg_get_refcnt)(sg->next) == 1
984             && sg->next->next
985             && thread_consistent_segment_ordering(i, sg, sg->next))
986         {
987            /* Merge sg and sg->next into sg. */
988            DRD_(sg_merge)(sg, sg->next);
989            thread_discard_segment(i, sg->next);
990         }
991      }
992
993#ifdef ENABLE_DRD_CONSISTENCY_CHECKS
994      tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
995#endif
996   }
997}
998
999/**
1000 * Create a new segment for the specified thread, and discard any segments
1001 * that cannot cause races anymore.
1002 */
1003void DRD_(thread_new_segment)(const DrdThreadId tid)
1004{
1005   Segment* last_sg;
1006   Segment* new_sg;
1007
1008   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1009             && tid != DRD_INVALID_THREADID);
1010   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1011
1012   last_sg = DRD_(g_threadinfo)[tid].last;
1013   new_sg = DRD_(sg_new)(tid, tid);
1014   thread_append_segment(tid, new_sg);
1015   if (tid == DRD_(g_drd_running_tid) && last_sg)
1016   {
1017      DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
1018      s_update_conflict_set_new_sg_count++;
1019   }
1020
1021   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1022
1023   if (s_segment_merging
1024       && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1025   {
1026      thread_discard_ordered_segments();
1027      thread_merge_segments();
1028   }
1029}
1030
1031/** Call this function after thread 'joiner' joined thread 'joinee'. */
1032void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
1033{
1034   tl_assert(joiner != joinee);
1035   tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
1036             && joiner != DRD_INVALID_THREADID);
1037   tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
1038             && joinee != DRD_INVALID_THREADID);
1039   tl_assert(DRD_(g_threadinfo)[joiner].last);
1040   tl_assert(DRD_(g_threadinfo)[joinee].last);
1041
1042   if (DRD_(sg_get_trace)())
1043   {
1044      char *str1, *str2;
1045      str1 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc);
1046      str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joinee].last->vc);
1047      VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
1048                   str1, str2);
1049      VG_(free)(str1);
1050      VG_(free)(str2);
1051   }
1052   if (joiner == DRD_(g_drd_running_tid))
1053   {
1054      VectorClock old_vc;
1055
1056      DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[joiner].last->vc);
1057      DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc,
1058                       &DRD_(g_threadinfo)[joinee].last->vc);
1059      DRD_(thread_update_conflict_set)(joiner, &old_vc);
1060      s_update_conflict_set_join_count++;
1061      DRD_(vc_cleanup)(&old_vc);
1062   }
1063   else
1064   {
1065      DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc,
1066                       &DRD_(g_threadinfo)[joinee].last->vc);
1067   }
1068
1069   thread_discard_ordered_segments();
1070
1071   if (DRD_(sg_get_trace)())
1072   {
1073      char* str;
1074      str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc);
1075      VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
1076      VG_(free)(str);
1077   }
1078}
1079
1080/**
1081 * Update the vector clock of the last segment of thread tid with the
1082 * the vector clock of segment sg.
1083 */
1084static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
1085{
1086   const VectorClock* const vc = &sg->vc;
1087
1088   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1089             && tid != DRD_INVALID_THREADID);
1090   tl_assert(DRD_(g_threadinfo)[tid].last);
1091   tl_assert(sg);
1092   tl_assert(vc);
1093
1094   if (tid != sg->tid)
1095   {
1096      VectorClock old_vc;
1097
1098      DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[tid].last->vc);
1099      DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc, vc);
1100      if (DRD_(sg_get_trace)())
1101      {
1102         char *str1, *str2;
1103         str1 = DRD_(vc_aprint)(&old_vc);
1104         str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
1105         VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2);
1106         VG_(free)(str1);
1107         VG_(free)(str2);
1108      }
1109
1110      thread_discard_ordered_segments();
1111
1112      DRD_(thread_update_conflict_set)(tid, &old_vc);
1113      s_update_conflict_set_sync_count++;
1114
1115      DRD_(vc_cleanup)(&old_vc);
1116   }
1117   else
1118   {
1119      tl_assert(DRD_(vc_lte)(vc, &DRD_(g_threadinfo)[tid].last->vc));
1120   }
1121}
1122
1123/**
1124 * Create a new segment for thread tid and update the vector clock of the last
1125 * segment of this thread with the the vector clock of segment sg. Call this
1126 * function after thread tid had to wait because of thread synchronization
1127 * until the memory accesses in the segment sg finished.
1128 */
1129void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
1130{
1131   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1132             && tid != DRD_INVALID_THREADID);
1133   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1134   tl_assert(sg);
1135
1136   thread_append_segment(tid, DRD_(sg_new)(tid, tid));
1137
1138   thread_combine_vc_sync(tid, sg);
1139
1140   if (s_segment_merging
1141       && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1142   {
1143      thread_discard_ordered_segments();
1144      thread_merge_segments();
1145   }
1146}
1147
1148/**
1149 * Call this function whenever a thread is no longer using the memory
1150 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
1151 * increase.
1152 */
1153void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2,
1154                                 const Bool dont_clear_access)
1155{
1156   DrdThreadId other_user;
1157   unsigned i;
1158
1159   /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
1160   other_user = DRD_INVALID_THREADID;
1161   for (i = 0; i < DRD_N_THREADS; i++)
1162   {
1163      Segment* p;
1164      for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
1165      {
1166         if (other_user == DRD_INVALID_THREADID
1167             && i != DRD_(g_drd_running_tid))
1168         {
1169            if (UNLIKELY((!dont_clear_access
1170                          && DRD_(bm_test_and_clear)(DRD_(sg_bm)(p), a1, a2))
1171                         || (dont_clear_access
1172                             && DRD_(bm_has_any_access)(DRD_(sg_bm)(p), a1, a2))
1173                         ))
1174            {
1175               other_user = i;
1176            }
1177            continue;
1178         }
1179         if (!dont_clear_access)
1180            DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
1181      }
1182   }
1183
1184   /*
1185    * If any other thread had accessed memory in [ a1, a2 [, update the
1186    * conflict set.
1187    */
1188   if (other_user != DRD_INVALID_THREADID
1189       && DRD_(bm_has_any_access)(DRD_(g_conflict_set), a1, a2))
1190   {
1191      thread_compute_conflict_set(&DRD_(g_conflict_set),
1192                                  DRD_(thread_get_running_tid)());
1193   }
1194}
1195
1196/** Specify whether memory loads should be recorded. */
1197void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
1198{
1199   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1200             && tid != DRD_INVALID_THREADID);
1201   tl_assert(enabled == !! enabled);
1202
1203   DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
1204}
1205
1206/** Specify whether memory stores should be recorded. */
1207void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
1208{
1209   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1210             && tid != DRD_INVALID_THREADID);
1211   tl_assert(enabled == !! enabled);
1212
1213   DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
1214}
1215
1216/**
1217 * Print the segment information for all threads.
1218 *
1219 * This function is only used for debugging purposes.
1220 */
1221void DRD_(thread_print_all)(void)
1222{
1223   unsigned i;
1224   Segment* p;
1225
1226   for (i = 0; i < DRD_N_THREADS; i++)
1227   {
1228      if (DRD_(g_threadinfo)[i].first)
1229      {
1230         VG_(printf)("**************\n"
1231                     "* thread %3d (%d/%d/%d/0x%lx/%d) *\n"
1232                     "**************\n",
1233                     i,
1234                     DRD_(g_threadinfo)[i].vg_thread_exists,
1235                     DRD_(g_threadinfo)[i].vg_threadid,
1236                     DRD_(g_threadinfo)[i].posix_thread_exists,
1237                     DRD_(g_threadinfo)[i].pt_threadid,
1238                     DRD_(g_threadinfo)[i].detached_posix_thread);
1239         for (p = DRD_(g_threadinfo)[i].first; p; p = p->next)
1240         {
1241            DRD_(sg_print)(p);
1242         }
1243      }
1244   }
1245}
1246
1247/** Show a call stack involved in a data race. */
1248static void show_call_stack(const DrdThreadId tid,
1249                            const Char* const msg,
1250                            ExeContext* const callstack)
1251{
1252   const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
1253
1254   VG_(message)(Vg_UserMsg, "%s (thread %d)\n", msg, tid);
1255
1256   if (vg_tid != VG_INVALID_THREADID)
1257   {
1258      if (callstack)
1259      {
1260         VG_(pp_ExeContext)(callstack);
1261      }
1262      else
1263      {
1264         VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
1265      }
1266   }
1267   else
1268   {
1269      VG_(message)(Vg_UserMsg,
1270                   "   (thread finished, call stack no longer available)\n");
1271   }
1272}
1273
1274/** Print information about the segments involved in a data race. */
1275static void
1276thread_report_conflicting_segments_segment(const DrdThreadId tid,
1277                                           const Addr addr,
1278                                           const SizeT size,
1279                                           const BmAccessTypeT access_type,
1280                                           const Segment* const p)
1281{
1282   unsigned i;
1283
1284   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1285             && tid != DRD_INVALID_THREADID);
1286   tl_assert(p);
1287
1288   for (i = 0; i < DRD_N_THREADS; i++)
1289   {
1290      if (i != tid)
1291      {
1292         Segment* q;
1293         for (q = DRD_(g_threadinfo)[i].last; q; q = q->prev)
1294         {
1295            /*
1296             * Since q iterates over the segments of thread i in order of
1297             * decreasing vector clocks, if q->vc <= p->vc, then
1298             * q->next->vc <= p->vc will also hold. Hence, break out of the
1299             * loop once this condition is met.
1300             */
1301            if (DRD_(vc_lte)(&q->vc, &p->vc))
1302               break;
1303            if (! DRD_(vc_lte)(&p->vc, &q->vc))
1304            {
1305               if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
1306                                              access_type))
1307               {
1308                  tl_assert(q->stacktrace);
1309                  show_call_stack(i,        "Other segment start",
1310                                  q->stacktrace);
1311                  show_call_stack(i,        "Other segment end",
1312                                  q->next ? q->next->stacktrace : 0);
1313               }
1314            }
1315         }
1316      }
1317   }
1318}
1319
1320/** Print information about all segments involved in a data race. */
1321void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
1322                                              const Addr addr,
1323                                              const SizeT size,
1324                                              const BmAccessTypeT access_type)
1325{
1326   Segment* p;
1327
1328   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1329             && tid != DRD_INVALID_THREADID);
1330
1331   for (p = DRD_(g_threadinfo)[tid].first; p; p = p->next)
1332   {
1333      if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
1334      {
1335         thread_report_conflicting_segments_segment(tid, addr, size,
1336                                                    access_type, p);
1337      }
1338   }
1339}
1340
1341/**
1342 * Verify whether the conflict set for thread tid is up to date. Only perform
1343 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
1344 */
1345static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
1346{
1347   static int do_verify_conflict_set = -1;
1348   Bool result;
1349   struct bitmap* computed_conflict_set = 0;
1350
1351   if (do_verify_conflict_set < 0)
1352      do_verify_conflict_set = VG_(getenv)("DRD_VERIFY_CONFLICT_SET") != 0;
1353
1354   if (do_verify_conflict_set == 0)
1355      return True;
1356
1357   thread_compute_conflict_set(&computed_conflict_set, tid);
1358   result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
1359   if (! result)
1360   {
1361      VG_(printf)("actual conflict set:\n");
1362      DRD_(bm_print)(DRD_(g_conflict_set));
1363      VG_(printf)("\n");
1364      VG_(printf)("computed conflict set:\n");
1365      DRD_(bm_print)(computed_conflict_set);
1366      VG_(printf)("\n");
1367   }
1368   DRD_(bm_delete)(computed_conflict_set);
1369   return result;
1370}
1371
1372/**
1373 * Compute the conflict set: a bitmap that represents the union of all memory
1374 * accesses of all segments that are unordered to the current segment of the
1375 * thread tid.
1376 */
1377static void thread_compute_conflict_set(struct bitmap** conflict_set,
1378                                        const DrdThreadId tid)
1379{
1380   Segment* p;
1381
1382   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1383             && tid != DRD_INVALID_THREADID);
1384   tl_assert(tid == DRD_(g_drd_running_tid));
1385
1386   s_compute_conflict_set_count++;
1387   s_conflict_set_bitmap_creation_count
1388      -= DRD_(bm_get_bitmap_creation_count)();
1389   s_conflict_set_bitmap2_creation_count
1390      -= DRD_(bm_get_bitmap2_creation_count)();
1391
1392   if (*conflict_set)
1393   {
1394      DRD_(bm_cleanup)(*conflict_set);
1395      DRD_(bm_init)(*conflict_set);
1396   }
1397   else
1398   {
1399      *conflict_set = DRD_(bm_new)();
1400   }
1401
1402   if (s_trace_conflict_set)
1403   {
1404      char* str;
1405
1406      str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
1407      VG_(message)(Vg_DebugMsg,
1408                   "computing conflict set for thread %d with vc %s\n",
1409                   tid, str);
1410      VG_(free)(str);
1411   }
1412
1413   p = DRD_(g_threadinfo)[tid].last;
1414   {
1415      unsigned j;
1416
1417      if (s_trace_conflict_set)
1418      {
1419         char* vc;
1420
1421         vc = DRD_(vc_aprint)(&p->vc);
1422         VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n",
1423                      tid, vc);
1424         VG_(free)(vc);
1425      }
1426
1427      for (j = 0; j < DRD_N_THREADS; j++)
1428      {
1429         if (j != tid && DRD_(IsValidDrdThreadId)(j))
1430         {
1431            Segment* q;
1432            for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
1433            {
1434               if (! DRD_(vc_lte)(&q->vc, &p->vc)
1435                   && ! DRD_(vc_lte)(&p->vc, &q->vc))
1436               {
1437                  if (s_trace_conflict_set)
1438                  {
1439                     char* str;
1440
1441                     str = DRD_(vc_aprint)(&q->vc);
1442                     VG_(message)(Vg_DebugMsg,
1443                                  "conflict set: [%d] merging segment %s\n",
1444                                  j, str);
1445                     VG_(free)(str);
1446                  }
1447                  DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
1448               }
1449               else
1450               {
1451                  if (s_trace_conflict_set)
1452                  {
1453                     char* str;
1454
1455                     str = DRD_(vc_aprint)(&q->vc);
1456                     VG_(message)(Vg_DebugMsg,
1457                                  "conflict set: [%d] ignoring segment %s\n",
1458                                  j, str);
1459                     VG_(free)(str);
1460                  }
1461               }
1462            }
1463         }
1464      }
1465   }
1466
1467   s_conflict_set_bitmap_creation_count
1468      += DRD_(bm_get_bitmap_creation_count)();
1469   s_conflict_set_bitmap2_creation_count
1470      += DRD_(bm_get_bitmap2_creation_count)();
1471
1472   if (s_trace_conflict_set_bm)
1473   {
1474      VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid);
1475      DRD_(bm_print)(*conflict_set);
1476      VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid);
1477   }
1478}
1479
1480/**
1481 * Update the conflict set after the vector clock of thread tid has been
1482 * updated from old_vc to its current value, either because a new segment has
1483 * been created or because of a synchronization operation.
1484 */
1485void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
1486                                      const VectorClock* const old_vc)
1487{
1488   const VectorClock* new_vc;
1489   Segment* p;
1490   unsigned j;
1491
1492   tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1493             && tid != DRD_INVALID_THREADID);
1494   tl_assert(old_vc);
1495   tl_assert(tid == DRD_(g_drd_running_tid));
1496   tl_assert(DRD_(g_conflict_set));
1497
1498   if (s_trace_conflict_set)
1499   {
1500      char* str;
1501
1502      str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc);
1503      VG_(message)(Vg_DebugMsg,
1504                   "updating conflict set for thread %d with vc %s\n",
1505                   tid, str);
1506      VG_(free)(str);
1507   }
1508
1509   new_vc = &DRD_(g_threadinfo)[tid].last->vc;
1510
1511   DRD_(bm_unmark)(DRD_(g_conflict_set));
1512
1513   for (j = 0; j < DRD_N_THREADS; j++)
1514   {
1515      Segment* q;
1516
1517      if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
1518         continue;
1519
1520      for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
1521      {
1522         const int included_in_old_conflict_set
1523            = ! DRD_(vc_lte)(&q->vc, old_vc)
1524            && ! DRD_(vc_lte)(old_vc, &q->vc);
1525         const int included_in_new_conflict_set
1526            = ! DRD_(vc_lte)(&q->vc, new_vc)
1527            && ! DRD_(vc_lte)(new_vc, &q->vc);
1528         if (included_in_old_conflict_set != included_in_new_conflict_set)
1529         {
1530            if (s_trace_conflict_set)
1531            {
1532               char* str;
1533
1534               str = DRD_(vc_aprint)(&q->vc);
1535               VG_(message)(Vg_DebugMsg,
1536                            "conflict set: [%d] merging segment %s\n", j, str);
1537               VG_(free)(str);
1538            }
1539            DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1540         }
1541         else
1542         {
1543            if (s_trace_conflict_set)
1544            {
1545               char* str;
1546
1547               str = DRD_(vc_aprint)(&q->vc);
1548               VG_(message)(Vg_DebugMsg,
1549                            "conflict set: [%d] ignoring segment %s\n", j, str);
1550               VG_(free)(str);
1551            }
1552         }
1553      }
1554   }
1555
1556   DRD_(bm_clear_marked)(DRD_(g_conflict_set));
1557
1558   p = DRD_(g_threadinfo)[tid].last;
1559   {
1560      for (j = 0; j < DRD_N_THREADS; j++)
1561      {
1562         if (j != tid && DRD_(IsValidDrdThreadId)(j))
1563         {
1564            Segment* q;
1565            for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev)
1566            {
1567               if (! DRD_(vc_lte)(&q->vc, &p->vc)
1568                   && ! DRD_(vc_lte)(&p->vc, &q->vc))
1569               {
1570                  DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1571               }
1572            }
1573         }
1574      }
1575   }
1576
1577   DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
1578
1579   s_update_conflict_set_count++;
1580
1581   if (s_trace_conflict_set_bm)
1582   {
1583      VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid);
1584      DRD_(bm_print)(DRD_(g_conflict_set));
1585      VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid);
1586   }
1587
1588   tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1589}
1590
1591/** Report the number of context switches performed. */
1592ULong DRD_(thread_get_context_switch_count)(void)
1593{
1594   return s_context_switch_count;
1595}
1596
1597/** Report the number of ordered segments that have been discarded. */
1598ULong DRD_(thread_get_discard_ordered_segments_count)(void)
1599{
1600   return s_discard_ordered_segments_count;
1601}
1602
1603/** Return how many times the conflict set has been updated entirely. */
1604ULong DRD_(thread_get_compute_conflict_set_count)()
1605{
1606   return s_compute_conflict_set_count;
1607}
1608
1609/** Return how many times the conflict set has been updated partially. */
1610ULong DRD_(thread_get_update_conflict_set_count)(void)
1611{
1612   return s_update_conflict_set_count;
1613}
1614
1615/**
1616 * Return how many times the conflict set has been updated partially
1617 * because a new segment has been created.
1618 */
1619ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
1620{
1621   return s_update_conflict_set_new_sg_count;
1622}
1623
1624/**
1625 * Return how many times the conflict set has been updated partially
1626 * because of combining vector clocks due to synchronization operations
1627 * other than reader/writer lock or barrier operations.
1628 */
1629ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
1630{
1631   return s_update_conflict_set_sync_count;
1632}
1633
1634/**
1635 * Return how many times the conflict set has been updated partially
1636 * because of thread joins.
1637 */
1638ULong DRD_(thread_get_update_conflict_set_join_count)(void)
1639{
1640   return s_update_conflict_set_join_count;
1641}
1642
1643/**
1644 * Return the number of first-level bitmaps that have been created during
1645 * conflict set updates.
1646 */
1647ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
1648{
1649   return s_conflict_set_bitmap_creation_count;
1650}
1651
1652/**
1653 * Return the number of second-level bitmaps that have been created during
1654 * conflict set updates.
1655 */
1656ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
1657{
1658   return s_conflict_set_bitmap2_creation_count;
1659}
1660