drd_thread.c revision 9b2974ad8c14abb2a0cbcbc66e43f9d97d3deacc
1/*
2  This file is part of drd, a data race detector.
3
4  Copyright (C) 2006-2008 Bart Van Assche
5  bart.vanassche@gmail.com
6
7  This program is free software; you can redistribute it and/or
8  modify it under the terms of the GNU General Public License as
9  published by the Free Software Foundation; either version 2 of the
10  License, or (at your option) any later version.
11
12  This program is distributed in the hope that it will be useful, but
13  WITHOUT ANY WARRANTY; without even the implied warranty of
14  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  General Public License for more details.
16
17  You should have received a copy of the GNU General Public License
18  along with this program; if not, write to the Free Software
19  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20  02111-1307, USA.
21
22  The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "drd_error.h"
27#include "drd_segment.h"
28#include "drd_suppression.h"
29#include "drd_thread.h"
30#include "pub_tool_vki.h"
31#include "pub_tool_basics.h"      // Addr, SizeT
32#include "pub_tool_errormgr.h"    // VG_(unique_error)()
33#include "pub_tool_libcassert.h"  // tl_assert()
34#include "pub_tool_libcbase.h"    // VG_(strlen)()
35#include "pub_tool_libcprint.h"   // VG_(printf)()
36#include "pub_tool_libcproc.h"    // VG_(getenv)()
37#include "pub_tool_machine.h"
38#include "pub_tool_mallocfree.h"  // VG_(malloc)(), VG_(free)()
39#include "pub_tool_options.h"     // VG_(clo_backtrace_size)
40#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
41
42
43
44// Local functions.
45
46static void thread_append_segment(const DrdThreadId tid,
47                                  Segment* const sg);
48static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
49static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
50static void thread_compute_conflict_set(struct bitmap** conflict_set,
51                                        const DrdThreadId tid);
52
53
54// Local variables.
55
56static ULong s_context_switch_count;
57static ULong s_discard_ordered_segments_count;
58static ULong s_update_conflict_set_count;
59static ULong s_conflict_set_new_segment_count;
60static ULong s_conflict_set_combine_vc_count;
61static ULong s_conflict_set_bitmap_creation_count;
62static ULong s_conflict_set_bitmap2_creation_count;
63static ThreadId    s_vg_running_tid  = VG_INVALID_THREADID;
64DrdThreadId s_drd_running_tid = DRD_INVALID_THREADID;
65ThreadInfo s_threadinfo[DRD_N_THREADS];
66struct bitmap* s_conflict_set;
67static Bool s_trace_context_switches = False;
68static Bool s_trace_conflict_set = False;
69static Bool s_segment_merging = True;
70
71
72// Function definitions.
73
74void thread_trace_context_switches(const Bool t)
75{
76  s_trace_context_switches = t;
77}
78
79void thread_trace_conflict_set(const Bool t)
80{
81  s_trace_conflict_set = t;
82}
83
84void thread_set_segment_merging(const Bool m)
85{
86  s_segment_merging = m;
87}
88
89/**
90 * Convert Valgrind's ThreadId into a DrdThreadId. Report failure if
91 * Valgrind's ThreadId does not yet exist.
92 **/
93DrdThreadId VgThreadIdToDrdThreadId(const ThreadId tid)
94{
95  int i;
96
97  if (tid == VG_INVALID_THREADID)
98    return DRD_INVALID_THREADID;
99
100  for (i = 1; i < DRD_N_THREADS; i++)
101  {
102    if (s_threadinfo[i].vg_thread_exists == True
103        && s_threadinfo[i].vg_threadid == tid)
104    {
105      return i;
106    }
107  }
108
109  return DRD_INVALID_THREADID;
110}
111
112static
113DrdThreadId VgThreadIdToNewDrdThreadId(const ThreadId tid)
114{
115  int i;
116
117  tl_assert(VgThreadIdToDrdThreadId(tid) == DRD_INVALID_THREADID);
118
119  for (i = 1; i < DRD_N_THREADS; i++)
120  {
121    if (s_threadinfo[i].vg_thread_exists == False
122        && s_threadinfo[i].posix_thread_exists == False
123        && s_threadinfo[i].detached_posix_thread == False)
124    {
125      s_threadinfo[i].vg_thread_exists = True;
126      s_threadinfo[i].vg_threadid   = tid;
127      s_threadinfo[i].pt_threadid   = INVALID_POSIX_THREADID;
128      s_threadinfo[i].stack_min     = 0;
129      s_threadinfo[i].stack_min_min = 0;
130      s_threadinfo[i].stack_startup = 0;
131      s_threadinfo[i].stack_max     = 0;
132      s_threadinfo[i].is_recording  = True;
133      s_threadinfo[i].synchr_nesting = 0;
134      if (s_threadinfo[i].first != 0)
135        VG_(printf)("drd thread id = %d\n", i);
136      tl_assert(s_threadinfo[i].first == 0);
137      tl_assert(s_threadinfo[i].last == 0);
138      return i;
139    }
140  }
141
142  tl_assert(False);
143
144  return DRD_INVALID_THREADID;
145}
146
147DrdThreadId PtThreadIdToDrdThreadId(const PThreadId tid)
148{
149  int i;
150
151  tl_assert(tid != INVALID_POSIX_THREADID);
152
153  for (i = 1; i < DRD_N_THREADS; i++)
154  {
155    if (s_threadinfo[i].posix_thread_exists
156        && s_threadinfo[i].pt_threadid == tid)
157    {
158      return i;
159    }
160  }
161  return DRD_INVALID_THREADID;
162}
163
164ThreadId DrdThreadIdToVgThreadId(const DrdThreadId tid)
165{
166  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
167            && tid != DRD_INVALID_THREADID);
168  return (s_threadinfo[tid].vg_thread_exists
169          ? s_threadinfo[tid].vg_threadid
170          : VG_INVALID_THREADID);
171}
172
173#if 0
174/** Sanity check of the doubly linked list of segments referenced by a
175 *  ThreadInfo struct.
176 *  @return True if sane, False if not.
177 */
178static Bool sane_ThreadInfo(const ThreadInfo* const ti)
179{
180  Segment* p;
181  for (p = ti->first; p; p = p->next) {
182    if (p->next && p->next->prev != p)
183      return False;
184    if (p->next == 0 && p != ti->last)
185      return False;
186  }
187  for (p = ti->last; p; p = p->prev) {
188    if (p->prev && p->prev->next != p)
189      return False;
190    if (p->prev == 0 && p != ti->first)
191      return False;
192  }
193  return True;
194}
195#endif
196
197DrdThreadId thread_pre_create(const DrdThreadId creator,
198                              const ThreadId vg_created)
199{
200  DrdThreadId created;
201
202  tl_assert(VgThreadIdToDrdThreadId(vg_created) == DRD_INVALID_THREADID);
203  created = VgThreadIdToNewDrdThreadId(vg_created);
204  tl_assert(0 <= (int)created && created < DRD_N_THREADS
205            && created != DRD_INVALID_THREADID);
206
207  tl_assert(s_threadinfo[created].first == 0);
208  tl_assert(s_threadinfo[created].last == 0);
209  thread_append_segment(created, sg_new(creator, created));
210
211  return created;
212}
213
214/** Allocate the first segment for a thread. Call this just after
215 *  pthread_create().
216 */
217DrdThreadId thread_post_create(const ThreadId vg_created)
218{
219  const DrdThreadId created = VgThreadIdToDrdThreadId(vg_created);
220
221  tl_assert(0 <= (int)created && created < DRD_N_THREADS
222            && created != DRD_INVALID_THREADID);
223
224  s_threadinfo[created].stack_max     = VG_(thread_get_stack_max)(vg_created);
225  s_threadinfo[created].stack_startup = s_threadinfo[created].stack_max;
226  s_threadinfo[created].stack_min     = s_threadinfo[created].stack_max;
227  s_threadinfo[created].stack_min_min = s_threadinfo[created].stack_max;
228  s_threadinfo[created].stack_size    = VG_(thread_get_stack_size)(vg_created);
229  tl_assert(s_threadinfo[created].stack_max != 0);
230
231  return created;
232}
233
234/* NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,     */
235/* and accesses this data structure from multiple threads without locking. */
236/* Any conflicting accesses in the range stack_startup..stack_max will be  */
237/* ignored.                                                                */
238void thread_set_stack_startup(const DrdThreadId tid, const Addr stack_startup)
239{
240  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
241            && tid != DRD_INVALID_THREADID);
242  tl_assert(s_threadinfo[tid].stack_min <= stack_startup);
243  tl_assert(stack_startup <= s_threadinfo[tid].stack_max);
244  s_threadinfo[tid].stack_startup = stack_startup;
245}
246
247Addr thread_get_stack_min(const DrdThreadId tid)
248{
249  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
250            && tid != DRD_INVALID_THREADID);
251  return s_threadinfo[tid].stack_min;
252}
253
254Addr thread_get_stack_min_min(const DrdThreadId tid)
255{
256  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
257            && tid != DRD_INVALID_THREADID);
258  return s_threadinfo[tid].stack_min_min;
259}
260
261Addr thread_get_stack_max(const DrdThreadId tid)
262{
263  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
264            && tid != DRD_INVALID_THREADID);
265  return s_threadinfo[tid].stack_max;
266}
267
268SizeT thread_get_stack_size(const DrdThreadId tid)
269{
270  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
271            && tid != DRD_INVALID_THREADID);
272  return s_threadinfo[tid].stack_size;
273}
274
275/** Clean up thread-specific data structures. Call this just after
276 *  pthread_join().
277 */
278void thread_delete(const DrdThreadId tid)
279{
280  Segment* sg;
281  Segment* sg_prev;
282
283  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
284            && tid != DRD_INVALID_THREADID);
285  tl_assert(s_threadinfo[tid].synchr_nesting == 0);
286  for (sg = s_threadinfo[tid].last; sg; sg = sg_prev)
287  {
288    sg_prev = sg->prev;
289    sg->prev = 0;
290    sg->next = 0;
291    sg_put(sg);
292  }
293  s_threadinfo[tid].vg_thread_exists = False;
294  s_threadinfo[tid].posix_thread_exists = False;
295  tl_assert(s_threadinfo[tid].detached_posix_thread == False);
296  s_threadinfo[tid].first = 0;
297  s_threadinfo[tid].last = 0;
298}
299
300/* Called after a thread performed its last memory access and before   */
301/* thread_delete() is called. Note: thread_delete() is only called for */
302/* joinable threads, not for detached threads.                         */
303void thread_finished(const DrdThreadId tid)
304{
305  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
306            && tid != DRD_INVALID_THREADID);
307
308  s_threadinfo[tid].vg_thread_exists = False;
309
310  if (s_threadinfo[tid].detached_posix_thread)
311  {
312    /* Once a detached thread has finished, its stack is deallocated and   */
313    /* should no longer be taken into account when computing the conflict set*/
314    s_threadinfo[tid].stack_min = s_threadinfo[tid].stack_max;
315
316    /* For a detached thread, calling pthread_exit() invalidates the     */
317    /* POSIX thread ID associated with the detached thread. For joinable */
318    /* POSIX threads however, the POSIX thread ID remains live after the */
319    /* pthread_exit() call until pthread_join() is called.               */
320    s_threadinfo[tid].posix_thread_exists = False;
321  }
322}
323
324/** Called just before pthread_cancel(). */
325void thread_pre_cancel(const DrdThreadId tid)
326{
327  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
328            && tid != DRD_INVALID_THREADID);
329  tl_assert(s_threadinfo[tid].pt_threadid != INVALID_POSIX_THREADID);
330
331  s_threadinfo[tid].synchr_nesting = 0;
332}
333
334void thread_set_pthreadid(const DrdThreadId tid, const PThreadId ptid)
335{
336  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
337            && tid != DRD_INVALID_THREADID);
338  tl_assert(s_threadinfo[tid].pt_threadid == INVALID_POSIX_THREADID);
339  tl_assert(ptid != INVALID_POSIX_THREADID);
340  s_threadinfo[tid].posix_thread_exists = True;
341  s_threadinfo[tid].pt_threadid         = ptid;
342}
343
344Bool thread_get_joinable(const DrdThreadId tid)
345{
346  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
347            && tid != DRD_INVALID_THREADID);
348  return ! s_threadinfo[tid].detached_posix_thread;
349}
350
351void thread_set_joinable(const DrdThreadId tid, const Bool joinable)
352{
353  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
354            && tid != DRD_INVALID_THREADID);
355  tl_assert(!! joinable == joinable);
356  tl_assert(s_threadinfo[tid].pt_threadid != INVALID_POSIX_THREADID);
357#if 0
358  VG_(message)(Vg_DebugMsg,
359               "thread_set_joinable(%d/%d, %s)",
360               tid,
361               s_threadinfo[tid].vg_threadid,
362               joinable ? "joinable" : "detached");
363#endif
364  s_threadinfo[tid].detached_posix_thread = ! joinable;
365}
366
367void thread_set_vg_running_tid(const ThreadId vg_tid)
368{
369  tl_assert(vg_tid != VG_INVALID_THREADID);
370
371  if (vg_tid != s_vg_running_tid)
372  {
373    thread_set_running_tid(vg_tid, VgThreadIdToDrdThreadId(vg_tid));
374  }
375
376  tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
377  tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
378}
379
380void thread_set_running_tid(const ThreadId vg_tid, const DrdThreadId drd_tid)
381{
382  tl_assert(vg_tid != VG_INVALID_THREADID);
383  tl_assert(drd_tid != DRD_INVALID_THREADID);
384
385  if (vg_tid != s_vg_running_tid)
386  {
387    if (s_trace_context_switches
388        && s_drd_running_tid != DRD_INVALID_THREADID)
389    {
390      VG_(message)(Vg_DebugMsg,
391                   "Context switch from thread %d/%d to thread %d/%d;"
392                   " segments: %llu",
393                   s_vg_running_tid, s_drd_running_tid,
394                   DrdThreadIdToVgThreadId(drd_tid), drd_tid,
395                   sg_get_alive_segments_count());
396    }
397    s_vg_running_tid = vg_tid;
398    s_drd_running_tid = drd_tid;
399    thread_compute_conflict_set(&s_conflict_set, drd_tid);
400    s_context_switch_count++;
401  }
402
403  tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
404  tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
405}
406
407int thread_enter_synchr(const DrdThreadId tid)
408{
409  tl_assert(IsValidDrdThreadId(tid));
410  return s_threadinfo[tid].synchr_nesting++;
411}
412
413int thread_leave_synchr(const DrdThreadId tid)
414{
415  tl_assert(IsValidDrdThreadId(tid));
416  tl_assert(s_threadinfo[tid].synchr_nesting >= 1);
417  return --s_threadinfo[tid].synchr_nesting;
418}
419
420int thread_get_synchr_nesting_count(const DrdThreadId tid)
421{
422  tl_assert(IsValidDrdThreadId(tid));
423  return s_threadinfo[tid].synchr_nesting;
424}
425
426/** Append a new segment at the end of the segment list. */
427static void thread_append_segment(const DrdThreadId tid, Segment* const sg)
428{
429  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
430            && tid != DRD_INVALID_THREADID);
431  // tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
432  sg->prev = s_threadinfo[tid].last;
433  sg->next = 0;
434  if (s_threadinfo[tid].last)
435    s_threadinfo[tid].last->next = sg;
436  s_threadinfo[tid].last = sg;
437  if (s_threadinfo[tid].first == 0)
438    s_threadinfo[tid].first = sg;
439  // tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
440}
441
442/** Remove a segment from the segment list of thread threadid, and free the
443 *  associated memory.
444 */
445static void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
446{
447  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
448            && tid != DRD_INVALID_THREADID);
449  //tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
450
451  if (sg->prev)
452    sg->prev->next = sg->next;
453  if (sg->next)
454    sg->next->prev = sg->prev;
455  if (sg == s_threadinfo[tid].first)
456    s_threadinfo[tid].first = sg->next;
457  if (sg == s_threadinfo[tid].last)
458    s_threadinfo[tid].last = sg->prev;
459  sg_put(sg);
460
461  //tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
462}
463
464VectorClock* thread_get_vc(const DrdThreadId tid)
465{
466  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
467            && tid != DRD_INVALID_THREADID);
468  tl_assert(s_threadinfo[tid].last);
469  return &s_threadinfo[tid].last->vc;
470}
471
472/** Return the latest segment of thread 'tid' and increment its reference
473 *  count.
474 */
475void thread_get_latest_segment(Segment** sg, const DrdThreadId tid)
476{
477  tl_assert(sg);
478  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
479            && tid != DRD_INVALID_THREADID);
480  tl_assert(s_threadinfo[tid].last);
481
482  sg_put(*sg);
483  *sg = sg_get(s_threadinfo[tid].last);
484}
485
486/**
487 * Compute the minimum of all latest vector clocks of all threads
488 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
489 * @param vc pointer to a vectorclock, holds result upon return.
490 */
491static void thread_compute_minimum_vc(VectorClock* vc)
492{
493  unsigned i;
494  Bool first;
495  Segment* latest_sg;
496
497  first = True;
498  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
499  {
500    latest_sg = s_threadinfo[i].last;
501    if (latest_sg)
502    {
503      if (first)
504        vc_assign(vc, &latest_sg->vc);
505      else
506        vc_min(vc, &latest_sg->vc);
507      first = False;
508    }
509  }
510}
511
512static void thread_compute_maximum_vc(VectorClock* vc)
513{
514  unsigned i;
515  Bool first;
516  Segment* latest_sg;
517
518  first = True;
519  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
520  {
521    latest_sg = s_threadinfo[i].last;
522    if (latest_sg)
523    {
524      if (first)
525        vc_assign(vc, &latest_sg->vc);
526      else
527        vc_combine(vc, &latest_sg->vc);
528      first = False;
529    }
530  }
531}
532
533/**
534 * Discard all segments that have a defined order against the latest vector
535 * clock of every thread -- these segments can no longer be involved in a
536 * data race.
537 */
538static void thread_discard_ordered_segments(void)
539{
540  unsigned i;
541  VectorClock thread_vc_min;
542
543  s_discard_ordered_segments_count++;
544
545  vc_init(&thread_vc_min, 0, 0);
546  thread_compute_minimum_vc(&thread_vc_min);
547  if (sg_get_trace())
548  {
549    char msg[256];
550    VectorClock thread_vc_max;
551
552    vc_init(&thread_vc_max, 0, 0);
553    thread_compute_maximum_vc(&thread_vc_max);
554    VG_(snprintf)(msg, sizeof(msg),
555                  "Discarding ordered segments -- min vc is ");
556    vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
557               &thread_vc_min);
558    VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
559                  ", max vc is ");
560    vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
561               &thread_vc_max);
562    VG_(message)(Vg_UserMsg, "%s", msg);
563    vc_cleanup(&thread_vc_max);
564  }
565
566  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
567  {
568    Segment* sg;
569    Segment* sg_next;
570    for (sg = s_threadinfo[i].first;
571         sg && (sg_next = sg->next) && vc_lte(&sg->vc, &thread_vc_min);
572         sg = sg_next)
573    {
574      thread_discard_segment(i, sg);
575    }
576  }
577  vc_cleanup(&thread_vc_min);
578}
579
580/** Merge all segments that may be merged without triggering false positives
581 *  or discarding real data races. For the theoretical background of segment
582 *  merging, see also the following paper:
583 *  Mark Christiaens, Michiel Ronsse and Koen De Bosschere.
584 *  Bounding the number of segment histories during data race detection.
585 *  Parallel Computing archive, Volume 28, Issue 9, pp 1221-1238,
586 *  September 2002.
587 */
588static void thread_merge_segments(void)
589{
590  unsigned i;
591
592  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
593  {
594    Segment* sg;
595
596    // tl_assert(sane_ThreadInfo(&s_threadinfo[i]));
597
598    for (sg = s_threadinfo[i].first; sg; sg = sg->next)
599    {
600      if (sg_get_refcnt(sg) == 1
601          && sg->next
602          && sg_get_refcnt(sg->next) == 1
603          && sg->next->next)
604      {
605        /* Merge sg and sg->next into sg. */
606        sg_merge(sg, sg->next);
607        thread_discard_segment(i, sg->next);
608      }
609    }
610
611    // tl_assert(sane_ThreadInfo(&s_threadinfo[i]));
612  }
613}
614
615/** Every change in the vector clock of a thread may cause segments that
616 *  were previously ordered to this thread to become unordered. Hence,
617 *  it may be necessary to recalculate the conflict set if the vector clock
618 *  of the current thread is updated. This function check whether such a
619 *  recalculation is necessary.
620 *
621 *  @param tid    Thread ID of the thread to which a new segment has been
622 *                appended.
623 *  @param new_sg Pointer to the most recent segment of thread tid.
624 */
625static Bool conflict_set_update_needed(const DrdThreadId tid,
626                                     const Segment* const new_sg)
627{
628#if 0
629  unsigned i;
630  const Segment* old_sg;
631
632  tl_assert(new_sg);
633
634  /* If a new segment was added to another thread than the running thread, */
635  /* just tell the caller to update the conflict set.                        */
636  if (tid != s_drd_running_tid)
637    return True;
638
639  /* Always let the caller update the conflict set after creation of the */
640  /* first segment.                                                    */
641  old_sg = new_sg->prev;
642  if (old_sg == 0)
643    return True;
644
645  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
646  {
647    Segment* q;
648
649    if (i == s_drd_running_tid)
650      continue;
651
652    for (q = s_threadinfo[i].last; q; q = q->prev)
653    {
654      /* If the expression below evaluates to false, this expression will */
655      /* also evaluate to false for all subsequent iterations. So stop    */
656      /* iterating.                                                       */
657      if (vc_lte(&q->vc, &old_sg->vc))
658        break;
659      /* If the vector clock of the 2nd the last segment is not ordered   */
660      /* to the vector clock of segment q, and the last segment is, ask   */
661      /* the caller to update the conflict set.                             */
662      if (! vc_lte(&old_sg->vc, &q->vc))
663      {
664        return True;
665      }
666      /* If the vector clock of the last segment is not ordered to the    */
667      /* vector clock of segment q, ask the caller to update the conflict   */
668      /* set.                                                             */
669      if (! vc_lte(&q->vc, &new_sg->vc) && ! vc_lte(&new_sg->vc, &q->vc))
670      {
671        return True;
672      }
673    }
674  }
675
676  return False;
677#else
678  return True;
679#endif
680}
681
682/** Create a new segment for the specified thread, and discard any segments
683 *  that cannot cause races anymore.
684 */
685void thread_new_segment(const DrdThreadId tid)
686{
687  Segment* new_sg;
688
689  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
690            && tid != DRD_INVALID_THREADID);
691
692  new_sg = sg_new(tid, tid);
693  thread_append_segment(tid, new_sg);
694
695  if (conflict_set_update_needed(tid, new_sg))
696  {
697    thread_compute_conflict_set(&s_conflict_set, s_drd_running_tid);
698    s_conflict_set_new_segment_count++;
699  }
700  else if (tid == s_drd_running_tid)
701  {
702    tl_assert(thread_conflict_set_up_to_date(s_drd_running_tid));
703  }
704
705  thread_discard_ordered_segments();
706
707  if (s_segment_merging)
708    thread_merge_segments();
709}
710
711/** Call this function after thread 'joiner' joined thread 'joinee'. */
712void thread_combine_vc(DrdThreadId joiner, DrdThreadId joinee)
713{
714  tl_assert(joiner != joinee);
715  tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
716            && joiner != DRD_INVALID_THREADID);
717  tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
718            && joinee != DRD_INVALID_THREADID);
719  tl_assert(s_threadinfo[joiner].last);
720  tl_assert(s_threadinfo[joinee].last);
721  vc_combine(&s_threadinfo[joiner].last->vc, &s_threadinfo[joinee].last->vc);
722  thread_discard_ordered_segments();
723
724  if (joiner == s_drd_running_tid)
725  {
726    thread_compute_conflict_set(&s_conflict_set, joiner);
727  }
728}
729
730/** Call this function after thread 'tid' had to wait because of thread
731 *  synchronization until the memory accesses in the segment with vector clock
732 *  'vc' finished.
733 */
734void thread_combine_vc2(DrdThreadId tid, const VectorClock* const vc)
735{
736  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
737            && tid != DRD_INVALID_THREADID);
738  tl_assert(s_threadinfo[tid].last);
739  tl_assert(vc);
740  vc_combine(&s_threadinfo[tid].last->vc, vc);
741  thread_compute_conflict_set(&s_conflict_set, tid);
742  thread_discard_ordered_segments();
743  s_conflict_set_combine_vc_count++;
744}
745
746/** Call this function whenever a thread is no longer using the memory
747 *  [ a1, a2 [, e.g. because of a call to free() or a stack pointer
748 *  increase.
749 */
750void thread_stop_using_mem(const Addr a1, const Addr a2)
751{
752  DrdThreadId other_user;
753  unsigned i;
754
755  /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
756  other_user = DRD_INVALID_THREADID;
757  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
758  {
759    Segment* p;
760    for (p = s_threadinfo[i].first; p; p = p->next)
761    {
762      if (other_user == DRD_INVALID_THREADID
763          && i != s_drd_running_tid)
764      {
765        if (UNLIKELY(bm_test_and_clear(p->bm, a1, a2)))
766        {
767          other_user = i;
768        }
769        continue;
770      }
771      bm_clear(p->bm, a1, a2);
772    }
773  }
774
775  /* If any other thread had accessed memory in [ a1, a2 [, update the */
776  /* conflict set. */
777  if (other_user != DRD_INVALID_THREADID
778      && bm_has_any_access(s_conflict_set, a1, a2))
779  {
780    thread_compute_conflict_set(&s_conflict_set, thread_get_running_tid());
781  }
782}
783
784void thread_start_recording(const DrdThreadId tid)
785{
786  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
787            && tid != DRD_INVALID_THREADID);
788  tl_assert(! s_threadinfo[tid].is_recording);
789  s_threadinfo[tid].is_recording = True;
790}
791
792void thread_stop_recording(const DrdThreadId tid)
793{
794  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
795            && tid != DRD_INVALID_THREADID);
796  tl_assert(s_threadinfo[tid].is_recording);
797  s_threadinfo[tid].is_recording = False;
798}
799
800void thread_print_all(void)
801{
802  unsigned i;
803  Segment* p;
804
805  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
806  {
807    if (s_threadinfo[i].first)
808    {
809      VG_(printf)("**************\n"
810                  "* thread %3d (%d/%d/%d/0x%lx/%d) *\n"
811                  "**************\n",
812                  i,
813                  s_threadinfo[i].vg_thread_exists,
814                  s_threadinfo[i].vg_threadid,
815                  s_threadinfo[i].posix_thread_exists,
816                  s_threadinfo[i].pt_threadid,
817                  s_threadinfo[i].detached_posix_thread);
818      for (p = s_threadinfo[i].first; p; p = p->next)
819      {
820        sg_print(p);
821      }
822    }
823  }
824}
825
826static void show_call_stack(const DrdThreadId tid,
827                            const Char* const msg,
828                            ExeContext* const callstack)
829{
830  const ThreadId vg_tid = DrdThreadIdToVgThreadId(tid);
831
832  VG_(message)(Vg_UserMsg, "%s (thread %d/%d)", msg, vg_tid, tid);
833
834  if (vg_tid != VG_INVALID_THREADID)
835  {
836    if (callstack)
837    {
838      VG_(pp_ExeContext)(callstack);
839    }
840    else
841    {
842      VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
843    }
844  }
845  else
846  {
847    VG_(message)(Vg_UserMsg,
848                 "   (thread finished, call stack no longer available)");
849  }
850}
851
852static void
853thread_report_conflicting_segments_segment(const DrdThreadId tid,
854                                           const Addr addr,
855                                           const SizeT size,
856                                           const BmAccessTypeT access_type,
857                                           const Segment* const p)
858{
859  unsigned i;
860
861  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
862            && tid != DRD_INVALID_THREADID);
863  tl_assert(p);
864
865  for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
866  {
867    if (i != tid)
868    {
869      Segment* q;
870      for (q = s_threadinfo[i].last; q; q = q->prev)
871      {
872        // Since q iterates over the segments of thread i in order of
873        // decreasing vector clocks, if q->vc <= p->vc, then
874        // q->next->vc <= p->vc will also hold. Hence, break out of the
875        // loop once this condition is met.
876        if (vc_lte(&q->vc, &p->vc))
877          break;
878        if (! vc_lte(&p->vc, &q->vc))
879        {
880          if (bm_has_conflict_with(q->bm, addr, addr + size, access_type))
881          {
882            tl_assert(q->stacktrace);
883            show_call_stack(i,        "Other segment start",
884                            q->stacktrace);
885            show_call_stack(i,        "Other segment end",
886                            q->next ? q->next->stacktrace : 0);
887          }
888        }
889      }
890    }
891  }
892}
893
894void thread_report_conflicting_segments(const DrdThreadId tid,
895                                        const Addr addr,
896                                        const SizeT size,
897                                        const BmAccessTypeT access_type)
898{
899  Segment* p;
900
901  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
902            && tid != DRD_INVALID_THREADID);
903
904  for (p = s_threadinfo[tid].first; p; p = p->next)
905  {
906    if (bm_has(p->bm, addr, addr + size, access_type))
907    {
908      thread_report_conflicting_segments_segment(tid, addr, size,
909                                                 access_type, p);
910    }
911  }
912}
913
914/** Verify whether the conflict set for thread tid is up to date. Only perform
915 *  the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
916 */
917static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
918{
919  static int do_verify_conflict_set = -1;
920  Bool result;
921  struct bitmap* computed_conflict_set = 0;
922
923  if (do_verify_conflict_set < 0)
924  {
925    //VG_(message)(Vg_DebugMsg, "%s", VG_(getenv)("DRD_VERIFY_CONFLICT_SET"));
926    do_verify_conflict_set = VG_(getenv)("DRD_VERIFY_CONFLICT_SET") != 0;
927  }
928  if (do_verify_conflict_set == 0)
929    return True;
930
931  thread_compute_conflict_set(&computed_conflict_set, tid);
932  result = bm_equal(s_conflict_set, computed_conflict_set);
933  bm_delete(computed_conflict_set);
934  return result;
935}
936
937/** Compute a bitmap that represents the union of all memory accesses of all
938 *  segments that are unordered to the current segment of the thread tid.
939 */
940static void thread_compute_conflict_set(struct bitmap** conflict_set,
941                                        const DrdThreadId tid)
942{
943  Segment* p;
944
945  tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
946            && tid != DRD_INVALID_THREADID);
947  tl_assert(tid == s_drd_running_tid);
948
949  s_update_conflict_set_count++;
950  s_conflict_set_bitmap_creation_count  -= bm_get_bitmap_creation_count();
951  s_conflict_set_bitmap2_creation_count -= bm_get_bitmap2_creation_count();
952
953  if (*conflict_set)
954  {
955    bm_delete(*conflict_set);
956  }
957  *conflict_set = bm_new();
958
959  if (s_trace_conflict_set)
960  {
961    char msg[256];
962
963    VG_(snprintf)(msg, sizeof(msg),
964                  "computing conflict set for thread %d/%d with vc ",
965                  DrdThreadIdToVgThreadId(tid), tid);
966    vc_snprint(msg + VG_(strlen)(msg),
967               sizeof(msg) - VG_(strlen)(msg),
968               &s_threadinfo[tid].last->vc);
969    VG_(message)(Vg_UserMsg, "%s", msg);
970  }
971
972  p = s_threadinfo[tid].last;
973  {
974    unsigned j;
975
976    if (s_trace_conflict_set)
977    {
978      char msg[256];
979
980      VG_(snprintf)(msg, sizeof(msg),
981                    "conflict set: thread [%d] at vc ",
982                    tid);
983      vc_snprint(msg + VG_(strlen)(msg),
984                 sizeof(msg) - VG_(strlen)(msg),
985                 &p->vc);
986      VG_(message)(Vg_UserMsg, "%s", msg);
987    }
988
989    for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
990    {
991      if (j != tid && IsValidDrdThreadId(j))
992      {
993        const Segment* q;
994        for (q = s_threadinfo[j].last; q; q = q->prev)
995        {
996          if (! vc_lte(&q->vc, &p->vc) && ! vc_lte(&p->vc, &q->vc))
997          {
998            if (s_trace_conflict_set)
999            {
1000              char msg[256];
1001              VG_(snprintf)(msg, sizeof(msg),
1002                            "conflict set: [%d] merging segment ", j);
1003              vc_snprint(msg + VG_(strlen)(msg),
1004                         sizeof(msg) - VG_(strlen)(msg),
1005                         &q->vc);
1006              VG_(message)(Vg_UserMsg, "%s", msg);
1007            }
1008            bm_merge2(*conflict_set, q->bm);
1009          }
1010          else
1011          {
1012            if (s_trace_conflict_set)
1013            {
1014              char msg[256];
1015              VG_(snprintf)(msg, sizeof(msg),
1016                            "conflict set: [%d] ignoring segment ", j);
1017              vc_snprint(msg + VG_(strlen)(msg),
1018                         sizeof(msg) - VG_(strlen)(msg),
1019                         &q->vc);
1020              VG_(message)(Vg_UserMsg, "%s", msg);
1021            }
1022          }
1023        }
1024      }
1025    }
1026  }
1027
1028  s_conflict_set_bitmap_creation_count  += bm_get_bitmap_creation_count();
1029  s_conflict_set_bitmap2_creation_count += bm_get_bitmap2_creation_count();
1030
1031  if (0 && s_trace_conflict_set)
1032  {
1033    VG_(message)(Vg_UserMsg, "[%d] new conflict set:", tid);
1034    bm_print(*conflict_set);
1035    VG_(message)(Vg_UserMsg, "[%d] end of new conflict set.", tid);
1036  }
1037}
1038
1039ULong thread_get_context_switch_count(void)
1040{
1041  return s_context_switch_count;
1042}
1043
1044ULong thread_get_discard_ordered_segments_count(void)
1045{
1046  return s_discard_ordered_segments_count;
1047}
1048
1049ULong thread_get_update_conflict_set_count(ULong* dsnsc, ULong* dscvc)
1050{
1051  tl_assert(dsnsc);
1052  tl_assert(dscvc);
1053  *dsnsc = s_conflict_set_new_segment_count;
1054  *dscvc = s_conflict_set_combine_vc_count;
1055  return s_update_conflict_set_count;
1056}
1057
1058ULong thread_get_conflict_set_bitmap_creation_count(void)
1059{
1060  return s_conflict_set_bitmap_creation_count;
1061}
1062
1063ULong thread_get_conflict_set_bitmap2_creation_count(void)
1064{
1065  return s_conflict_set_bitmap2_creation_count;
1066}
1067