drd_thread.c revision 178b686b4886b7c196df3b5fdd1187113c4e547d
1/* -*- mode: C; c-basic-offset: 3; indent-tabs-mode: nil; -*- */ 2/* 3 This file is part of drd, a thread error detector. 4 5 Copyright (C) 2006-2011 Bart Van Assche <bvanassche@acm.org>. 6 7 This program is free software; you can redistribute it and/or 8 modify it under the terms of the GNU General Public License as 9 published by the Free Software Foundation; either version 2 of the 10 License, or (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, but 13 WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the Free Software 19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 20 02111-1307, USA. 21 22 The GNU General Public License is contained in the file COPYING. 23*/ 24 25 26#include "drd_error.h" 27#include "drd_barrier.h" 28#include "drd_clientobj.h" 29#include "drd_cond.h" 30#include "drd_mutex.h" 31#include "drd_segment.h" 32#include "drd_semaphore.h" 33#include "drd_suppression.h" 34#include "drd_thread.h" 35#include "pub_tool_vki.h" 36#include "pub_tool_basics.h" // Addr, SizeT 37#include "pub_tool_libcassert.h" // tl_assert() 38#include "pub_tool_libcbase.h" // VG_(strlen)() 39#include "pub_tool_libcprint.h" // VG_(printf)() 40#include "pub_tool_libcproc.h" // VG_(getenv)() 41#include "pub_tool_machine.h" 42#include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)() 43#include "pub_tool_options.h" // VG_(clo_backtrace_size) 44#include "pub_tool_threadstate.h" // VG_(get_pthread_id)() 45 46 47 48/* Local functions. */ 49 50static void thread_append_segment(const DrdThreadId tid, Segment* const sg); 51static void thread_discard_segment(const DrdThreadId tid, Segment* const sg); 52static void thread_compute_conflict_set(struct bitmap** conflict_set, 53 const DrdThreadId tid); 54static Bool thread_conflict_set_up_to_date(const DrdThreadId tid); 55 56 57/* Local variables. */ 58 59static ULong s_context_switch_count; 60static ULong s_discard_ordered_segments_count; 61static ULong s_compute_conflict_set_count; 62static ULong s_update_conflict_set_count; 63static ULong s_update_conflict_set_new_sg_count; 64static ULong s_update_conflict_set_sync_count; 65static ULong s_update_conflict_set_join_count; 66static ULong s_conflict_set_bitmap_creation_count; 67static ULong s_conflict_set_bitmap2_creation_count; 68static ThreadId s_vg_running_tid = VG_INVALID_THREADID; 69DrdThreadId DRD_(g_drd_running_tid) = DRD_INVALID_THREADID; 70ThreadInfo DRD_(g_threadinfo)[DRD_N_THREADS]; 71struct bitmap* DRD_(g_conflict_set); 72static Bool s_trace_context_switches = False; 73static Bool s_trace_conflict_set = False; 74static Bool s_trace_conflict_set_bm = False; 75static Bool s_trace_fork_join = False; 76static Bool s_segment_merging = True; 77static Bool s_new_segments_since_last_merge; 78static int s_segment_merge_interval = 10; 79static unsigned s_join_list_vol = 10; 80static unsigned s_deletion_head; 81static unsigned s_deletion_tail; 82 83 84/* Function definitions. */ 85 86/** Enables/disables context switch tracing. */ 87void DRD_(thread_trace_context_switches)(const Bool t) 88{ 89 tl_assert(t == False || t == True); 90 s_trace_context_switches = t; 91} 92 93/** Enables/disables conflict set tracing. */ 94void DRD_(thread_trace_conflict_set)(const Bool t) 95{ 96 tl_assert(t == False || t == True); 97 s_trace_conflict_set = t; 98} 99 100/** Enables/disables conflict set bitmap tracing. */ 101void DRD_(thread_trace_conflict_set_bm)(const Bool t) 102{ 103 tl_assert(t == False || t == True); 104 s_trace_conflict_set_bm = t; 105} 106 107/** Report whether fork/join tracing is enabled. */ 108Bool DRD_(thread_get_trace_fork_join)(void) 109{ 110 return s_trace_fork_join; 111} 112 113/** Enables/disables fork/join tracing. */ 114void DRD_(thread_set_trace_fork_join)(const Bool t) 115{ 116 tl_assert(t == False || t == True); 117 s_trace_fork_join = t; 118} 119 120/** Enables/disables segment merging. */ 121void DRD_(thread_set_segment_merging)(const Bool m) 122{ 123 tl_assert(m == False || m == True); 124 s_segment_merging = m; 125} 126 127/** Get the segment merging interval. */ 128int DRD_(thread_get_segment_merge_interval)(void) 129{ 130 return s_segment_merge_interval; 131} 132 133/** Set the segment merging interval. */ 134void DRD_(thread_set_segment_merge_interval)(const int i) 135{ 136 s_segment_merge_interval = i; 137} 138 139void DRD_(thread_set_join_list_vol)(const int jlv) 140{ 141 s_join_list_vol = jlv; 142} 143 144/** 145 * Convert Valgrind's ThreadId into a DrdThreadId. 146 * 147 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed 148 * Valgrind ThreadId does not yet exist. 149 */ 150DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid) 151{ 152 int i; 153 154 if (tid == VG_INVALID_THREADID) 155 return DRD_INVALID_THREADID; 156 157 for (i = 1; i < DRD_N_THREADS; i++) 158 { 159 if (DRD_(g_threadinfo)[i].vg_thread_exists == True 160 && DRD_(g_threadinfo)[i].vg_threadid == tid) 161 { 162 return i; 163 } 164 } 165 166 return DRD_INVALID_THREADID; 167} 168 169/** Allocate a new DRD thread ID for the specified Valgrind thread ID. */ 170static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid) 171{ 172 int i; 173 174 tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID); 175 176 for (i = 1; i < DRD_N_THREADS; i++) 177 { 178 if (!DRD_(g_threadinfo)[i].valid) 179 { 180 tl_assert(! DRD_(IsValidDrdThreadId)(i)); 181 182 DRD_(g_threadinfo)[i].valid = True; 183 DRD_(g_threadinfo)[i].vg_thread_exists = True; 184 DRD_(g_threadinfo)[i].vg_threadid = tid; 185 DRD_(g_threadinfo)[i].pt_threadid = INVALID_POSIX_THREADID; 186 DRD_(g_threadinfo)[i].stack_min = 0; 187 DRD_(g_threadinfo)[i].stack_min_min = 0; 188 DRD_(g_threadinfo)[i].stack_startup = 0; 189 DRD_(g_threadinfo)[i].stack_max = 0; 190 DRD_(thread_set_name)(i, ""); 191 DRD_(g_threadinfo)[i].on_alt_stack = False; 192 DRD_(g_threadinfo)[i].is_recording_loads = True; 193 DRD_(g_threadinfo)[i].is_recording_stores = True; 194 DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0; 195 DRD_(g_threadinfo)[i].synchr_nesting = 0; 196 DRD_(g_threadinfo)[i].deletion_seq = s_deletion_tail - 1; 197 tl_assert(DRD_(g_threadinfo)[i].first == 0); 198 tl_assert(DRD_(g_threadinfo)[i].last == 0); 199 200 tl_assert(DRD_(IsValidDrdThreadId)(i)); 201 202 return i; 203 } 204 } 205 206 VG_(printf)( 207"\nSorry, but the maximum number of threads supported by DRD has been exceeded." 208"Aborting.\n"); 209 210 tl_assert(False); 211 212 return DRD_INVALID_THREADID; 213} 214 215/** Convert a POSIX thread ID into a DRD thread ID. */ 216DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid) 217{ 218 int i; 219 220 if (tid != INVALID_POSIX_THREADID) 221 { 222 for (i = 1; i < DRD_N_THREADS; i++) 223 { 224 if (DRD_(g_threadinfo)[i].posix_thread_exists 225 && DRD_(g_threadinfo)[i].pt_threadid == tid) 226 { 227 return i; 228 } 229 } 230 } 231 return DRD_INVALID_THREADID; 232} 233 234/** Convert a DRD thread ID into a Valgrind thread ID. */ 235ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid) 236{ 237 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 238 && tid != DRD_INVALID_THREADID); 239 240 return (DRD_(g_threadinfo)[tid].vg_thread_exists 241 ? DRD_(g_threadinfo)[tid].vg_threadid 242 : VG_INVALID_THREADID); 243} 244 245#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 246/** 247 * Sanity check of the doubly linked list of segments referenced by a 248 * ThreadInfo struct. 249 * @return True if sane, False if not. 250 */ 251static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti) 252{ 253 Segment* p; 254 255 for (p = ti->first; p; p = p->next) { 256 if (p->next && p->next->prev != p) 257 return False; 258 if (p->next == 0 && p != ti->last) 259 return False; 260 } 261 for (p = ti->last; p; p = p->prev) { 262 if (p->prev && p->prev->next != p) 263 return False; 264 if (p->prev == 0 && p != ti->first) 265 return False; 266 } 267 return True; 268} 269#endif 270 271/** 272 * Create the first segment for a newly started thread. 273 * 274 * This function is called from the handler installed via 275 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler 276 * from the context of the creator thread, before the new thread has been 277 * created. 278 * 279 * @param[in] creator DRD thread ID of the creator thread. 280 * @param[in] vg_created Valgrind thread ID of the created thread. 281 * 282 * @return DRD thread ID of the created thread. 283 */ 284DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator, 285 const ThreadId vg_created) 286{ 287 DrdThreadId created; 288 289 tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID); 290 created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created); 291 tl_assert(0 <= (int)created && created < DRD_N_THREADS 292 && created != DRD_INVALID_THREADID); 293 294 tl_assert(DRD_(g_threadinfo)[created].first == 0); 295 tl_assert(DRD_(g_threadinfo)[created].last == 0); 296 /* Create an initial segment for the newly created thread. */ 297 thread_append_segment(created, DRD_(sg_new)(creator, created)); 298 299 return created; 300} 301 302/** 303 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called 304 * after the thread has been created and before any client instructions are run 305 * on the newly created thread, e.g. from the handler installed via 306 * VG_(track_pre_thread_first_insn)(). 307 * 308 * @param[in] vg_created Valgrind thread ID of the newly created thread. 309 * 310 * @return DRD thread ID for the new thread. 311 */ 312DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created) 313{ 314 const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created); 315 316 tl_assert(0 <= (int)created && created < DRD_N_THREADS 317 && created != DRD_INVALID_THREADID); 318 319 DRD_(g_threadinfo)[created].stack_max 320 = VG_(thread_get_stack_max)(vg_created); 321 DRD_(g_threadinfo)[created].stack_startup 322 = DRD_(g_threadinfo)[created].stack_max; 323 DRD_(g_threadinfo)[created].stack_min 324 = DRD_(g_threadinfo)[created].stack_max; 325 DRD_(g_threadinfo)[created].stack_min_min 326 = DRD_(g_threadinfo)[created].stack_max; 327 DRD_(g_threadinfo)[created].stack_size 328 = VG_(thread_get_stack_size)(vg_created); 329 tl_assert(DRD_(g_threadinfo)[created].stack_max != 0); 330 331 return created; 332} 333 334static void DRD_(thread_delayed_delete)(const DrdThreadId tid) 335{ 336 int j; 337 338 DRD_(g_threadinfo)[tid].vg_thread_exists = False; 339 DRD_(g_threadinfo)[tid].posix_thread_exists = False; 340 DRD_(g_threadinfo)[tid].deletion_seq = s_deletion_head++; 341#if 0 342 VG_(message)(Vg_DebugMsg, "Adding thread %d to the deletion list\n", tid); 343#endif 344 if (s_deletion_head - s_deletion_tail >= s_join_list_vol) { 345 for (j = 0; j < DRD_N_THREADS; ++j) { 346 if (DRD_(IsValidDrdThreadId)(j) 347 && DRD_(g_threadinfo)[j].deletion_seq == s_deletion_tail) 348 { 349 s_deletion_tail++; 350#if 0 351 VG_(message)(Vg_DebugMsg, "Delayed delete of thread %d\n", j); 352#endif 353 DRD_(thread_delete)(j, False); 354 break; 355 } 356 } 357 } 358} 359 360/** 361 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just 362 * after thread drd_joiner joined thread drd_joinee. 363 */ 364void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee) 365{ 366 tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner)); 367 tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee)); 368 369 DRD_(thread_new_segment)(drd_joiner); 370 DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee); 371 DRD_(thread_new_segment)(drd_joinee); 372 373 if (s_trace_fork_join) 374 { 375 const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner); 376 const unsigned msg_size = 256; 377 char* msg; 378 379 msg = VG_(malloc)("drd.main.dptj.1", msg_size); 380 tl_assert(msg); 381 VG_(snprintf)(msg, msg_size, 382 "drd_post_thread_join joiner = %d, joinee = %d", 383 drd_joiner, drd_joinee); 384 if (joiner) 385 { 386 char* vc; 387 388 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner)); 389 VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg), 390 ", new vc: %s", vc); 391 VG_(free)(vc); 392 } 393 VG_(message)(Vg_DebugMsg, "%s\n", msg); 394 VG_(free)(msg); 395 } 396 397 if (! DRD_(get_check_stack_accesses)()) 398 { 399 DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee) 400 - DRD_(thread_get_stack_size)(drd_joinee), 401 DRD_(thread_get_stack_max)(drd_joinee)); 402 } 403 DRD_(clientobj_delete_thread)(drd_joinee); 404 DRD_(thread_delayed_delete)(drd_joinee); 405} 406 407/** 408 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack, 409 * and accesses this data structure from multiple threads without locking. 410 * Any conflicting accesses in the range stack_startup..stack_max will be 411 * ignored. 412 */ 413void DRD_(thread_set_stack_startup)(const DrdThreadId tid, 414 const Addr stack_startup) 415{ 416 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 417 && tid != DRD_INVALID_THREADID); 418 tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup); 419 tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max); 420 DRD_(g_threadinfo)[tid].stack_startup = stack_startup; 421} 422 423/** Return the stack pointer for the specified thread. */ 424Addr DRD_(thread_get_stack_min)(const DrdThreadId tid) 425{ 426 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 427 && tid != DRD_INVALID_THREADID); 428 return DRD_(g_threadinfo)[tid].stack_min; 429} 430 431/** 432 * Return the lowest value that was ever assigned to the stack pointer 433 * for the specified thread. 434 */ 435Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid) 436{ 437 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 438 && tid != DRD_INVALID_THREADID); 439 return DRD_(g_threadinfo)[tid].stack_min_min; 440} 441 442/** Return the top address for the stack of the specified thread. */ 443Addr DRD_(thread_get_stack_max)(const DrdThreadId tid) 444{ 445 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 446 && tid != DRD_INVALID_THREADID); 447 return DRD_(g_threadinfo)[tid].stack_max; 448} 449 450/** Return the maximum stack size for the specified thread. */ 451SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid) 452{ 453 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 454 && tid != DRD_INVALID_THREADID); 455 return DRD_(g_threadinfo)[tid].stack_size; 456} 457 458Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid) 459{ 460 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 461 && tid != DRD_INVALID_THREADID); 462 return DRD_(g_threadinfo)[tid].on_alt_stack; 463} 464 465void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid, 466 const Bool on_alt_stack) 467{ 468 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 469 && tid != DRD_INVALID_THREADID); 470 tl_assert(on_alt_stack == !!on_alt_stack); 471 DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack; 472} 473 474Int DRD_(thread_get_threads_on_alt_stack)(void) 475{ 476 int i, n = 0; 477 478 for (i = 1; i < DRD_N_THREADS; i++) 479 n += DRD_(g_threadinfo)[i].on_alt_stack; 480 return n; 481} 482 483/** 484 * Clean up thread-specific data structures. 485 */ 486void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached) 487{ 488 Segment* sg; 489 Segment* sg_prev; 490 491 tl_assert(DRD_(IsValidDrdThreadId)(tid)); 492 493 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0); 494 for (sg = DRD_(g_threadinfo)[tid].last; sg; sg = sg_prev) 495 { 496 sg_prev = sg->prev; 497 sg->prev = 0; 498 sg->next = 0; 499 DRD_(sg_put)(sg); 500 } 501 DRD_(g_threadinfo)[tid].valid = False; 502 DRD_(g_threadinfo)[tid].vg_thread_exists = False; 503 DRD_(g_threadinfo)[tid].posix_thread_exists = False; 504 if (detached) 505 DRD_(g_threadinfo)[tid].detached_posix_thread = False; 506 else 507 tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread); 508 DRD_(g_threadinfo)[tid].first = 0; 509 DRD_(g_threadinfo)[tid].last = 0; 510 511 tl_assert(! DRD_(IsValidDrdThreadId)(tid)); 512} 513 514/** 515 * Called after a thread performed its last memory access and before 516 * thread_delete() is called. Note: thread_delete() is only called for 517 * joinable threads, not for detached threads. 518 */ 519void DRD_(thread_finished)(const DrdThreadId tid) 520{ 521 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 522 && tid != DRD_INVALID_THREADID); 523 524 DRD_(g_threadinfo)[tid].vg_thread_exists = False; 525 526 if (DRD_(g_threadinfo)[tid].detached_posix_thread) 527 { 528 /* 529 * Once a detached thread has finished, its stack is deallocated and 530 * should no longer be taken into account when computing the conflict set. 531 */ 532 DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max; 533 534 /* 535 * For a detached thread, calling pthread_exit() invalidates the 536 * POSIX thread ID associated with the detached thread. For joinable 537 * POSIX threads however, the POSIX thread ID remains live after the 538 * pthread_exit() call until pthread_join() is called. 539 */ 540 DRD_(g_threadinfo)[tid].posix_thread_exists = False; 541 } 542} 543 544/** Called just after fork() in the child process. */ 545void DRD_(drd_thread_atfork_child)(const DrdThreadId tid) 546{ 547 unsigned i; 548 549 for (i = 1; i < DRD_N_THREADS; i++) 550 { 551 if (i == tid) 552 continue; 553 if (DRD_(IsValidDrdThreadId(i))) 554 DRD_(thread_delete)(i, True); 555 tl_assert(!DRD_(IsValidDrdThreadId(i))); 556 } 557} 558 559/** Called just before pthread_cancel(). */ 560void DRD_(thread_pre_cancel)(const DrdThreadId tid) 561{ 562 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 563 && tid != DRD_INVALID_THREADID); 564 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID); 565 566 if (DRD_(thread_get_trace_fork_join)()) 567 VG_(message)(Vg_UserMsg, "[%d] drd_thread_pre_cancel %d\n", 568 DRD_(g_drd_running_tid), tid); 569} 570 571/** 572 * Store the POSIX thread ID for the specified thread. 573 * 574 * @note This function can be called two times for the same thread -- see also 575 * the comment block preceding the pthread_create() wrapper in 576 * drd_pthread_intercepts.c. 577 */ 578void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid) 579{ 580 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 581 && tid != DRD_INVALID_THREADID); 582 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID 583 || DRD_(g_threadinfo)[tid].pt_threadid == ptid); 584 tl_assert(ptid != INVALID_POSIX_THREADID); 585 DRD_(g_threadinfo)[tid].posix_thread_exists = True; 586 DRD_(g_threadinfo)[tid].pt_threadid = ptid; 587} 588 589/** Returns true for joinable threads and false for detached threads. */ 590Bool DRD_(thread_get_joinable)(const DrdThreadId tid) 591{ 592 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 593 && tid != DRD_INVALID_THREADID); 594 return ! DRD_(g_threadinfo)[tid].detached_posix_thread; 595} 596 597/** Store the thread mode: joinable or detached. */ 598void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable) 599{ 600 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 601 && tid != DRD_INVALID_THREADID); 602 tl_assert(!! joinable == joinable); 603 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID); 604 605 DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable; 606} 607 608/** Tells DRD that the calling thread is about to enter pthread_create(). */ 609void DRD_(thread_entering_pthread_create)(const DrdThreadId tid) 610{ 611 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 612 && tid != DRD_INVALID_THREADID); 613 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID); 614 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0); 615 616 DRD_(g_threadinfo)[tid].pthread_create_nesting_level++; 617} 618 619/** Tells DRD that the calling thread has left pthread_create(). */ 620void DRD_(thread_left_pthread_create)(const DrdThreadId tid) 621{ 622 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 623 && tid != DRD_INVALID_THREADID); 624 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID); 625 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0); 626 627 DRD_(g_threadinfo)[tid].pthread_create_nesting_level--; 628} 629 630/** Obtain the thread number and the user-assigned thread name. */ 631const char* DRD_(thread_get_name)(const DrdThreadId tid) 632{ 633 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 634 && tid != DRD_INVALID_THREADID); 635 636 return DRD_(g_threadinfo)[tid].name; 637} 638 639/** Set the name of the specified thread. */ 640void DRD_(thread_set_name)(const DrdThreadId tid, const char* const name) 641{ 642 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 643 && tid != DRD_INVALID_THREADID); 644 645 if (name == NULL || name[0] == 0) 646 VG_(snprintf)(DRD_(g_threadinfo)[tid].name, 647 sizeof(DRD_(g_threadinfo)[tid].name), 648 "Thread %d", 649 tid); 650 else 651 VG_(snprintf)(DRD_(g_threadinfo)[tid].name, 652 sizeof(DRD_(g_threadinfo)[tid].name), 653 "Thread %d (%s)", 654 tid, name); 655 DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0; 656} 657 658/** 659 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the 660 * conflict set. 661 */ 662void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid) 663{ 664 tl_assert(vg_tid != VG_INVALID_THREADID); 665 666 if (vg_tid != s_vg_running_tid) 667 { 668 DRD_(thread_set_running_tid)(vg_tid, 669 DRD_(VgThreadIdToDrdThreadId)(vg_tid)); 670 } 671 672 tl_assert(s_vg_running_tid != VG_INVALID_THREADID); 673 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID); 674} 675 676/** 677 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the 678 * conflict set. 679 */ 680void DRD_(thread_set_running_tid)(const ThreadId vg_tid, 681 const DrdThreadId drd_tid) 682{ 683 tl_assert(vg_tid != VG_INVALID_THREADID); 684 tl_assert(drd_tid != DRD_INVALID_THREADID); 685 686 if (vg_tid != s_vg_running_tid) 687 { 688 if (s_trace_context_switches 689 && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID) 690 { 691 VG_(message)(Vg_DebugMsg, 692 "Context switch from thread %d to thread %d;" 693 " segments: %llu\n", 694 DRD_(g_drd_running_tid), drd_tid, 695 DRD_(sg_get_segments_alive_count)()); 696 } 697 s_vg_running_tid = vg_tid; 698 DRD_(g_drd_running_tid) = drd_tid; 699 thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid); 700 s_context_switch_count++; 701 } 702 703 tl_assert(s_vg_running_tid != VG_INVALID_THREADID); 704 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID); 705} 706 707/** 708 * Increase the synchronization nesting counter. Must be called before the 709 * client calls a synchronization function. 710 */ 711int DRD_(thread_enter_synchr)(const DrdThreadId tid) 712{ 713 tl_assert(DRD_(IsValidDrdThreadId)(tid)); 714 return DRD_(g_threadinfo)[tid].synchr_nesting++; 715} 716 717/** 718 * Decrease the synchronization nesting counter. Must be called after the 719 * client left a synchronization function. 720 */ 721int DRD_(thread_leave_synchr)(const DrdThreadId tid) 722{ 723 tl_assert(DRD_(IsValidDrdThreadId)(tid)); 724 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1); 725 return --DRD_(g_threadinfo)[tid].synchr_nesting; 726} 727 728/** Returns the synchronization nesting counter. */ 729int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid) 730{ 731 tl_assert(DRD_(IsValidDrdThreadId)(tid)); 732 return DRD_(g_threadinfo)[tid].synchr_nesting; 733} 734 735/** Append a new segment at the end of the segment list. */ 736static 737void thread_append_segment(const DrdThreadId tid, Segment* const sg) 738{ 739 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 740 && tid != DRD_INVALID_THREADID); 741 742#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 743 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid])); 744#endif 745 746 sg->prev = DRD_(g_threadinfo)[tid].last; 747 sg->next = 0; 748 if (DRD_(g_threadinfo)[tid].last) 749 DRD_(g_threadinfo)[tid].last->next = sg; 750 DRD_(g_threadinfo)[tid].last = sg; 751 if (DRD_(g_threadinfo)[tid].first == 0) 752 DRD_(g_threadinfo)[tid].first = sg; 753 754#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 755 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid])); 756#endif 757} 758 759/** 760 * Remove a segment from the segment list of thread threadid, and free the 761 * associated memory. 762 */ 763static 764void thread_discard_segment(const DrdThreadId tid, Segment* const sg) 765{ 766 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 767 && tid != DRD_INVALID_THREADID); 768 769#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 770 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid])); 771#endif 772 773 if (sg->prev) 774 sg->prev->next = sg->next; 775 if (sg->next) 776 sg->next->prev = sg->prev; 777 if (sg == DRD_(g_threadinfo)[tid].first) 778 DRD_(g_threadinfo)[tid].first = sg->next; 779 if (sg == DRD_(g_threadinfo)[tid].last) 780 DRD_(g_threadinfo)[tid].last = sg->prev; 781 DRD_(sg_put)(sg); 782 783#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 784 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid])); 785#endif 786} 787 788/** 789 * Returns a pointer to the vector clock of the most recent segment associated 790 * with thread 'tid'. 791 */ 792VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid) 793{ 794 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 795 && tid != DRD_INVALID_THREADID); 796 tl_assert(DRD_(g_threadinfo)[tid].last); 797 return &DRD_(g_threadinfo)[tid].last->vc; 798} 799 800/** 801 * Return the latest segment of thread 'tid' and increment its reference count. 802 */ 803void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid) 804{ 805 tl_assert(sg); 806 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 807 && tid != DRD_INVALID_THREADID); 808 tl_assert(DRD_(g_threadinfo)[tid].last); 809 810 DRD_(sg_put)(*sg); 811 *sg = DRD_(sg_get)(DRD_(g_threadinfo)[tid].last); 812} 813 814/** 815 * Compute the minimum of all latest vector clocks of all threads 816 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA). 817 * 818 * @param vc pointer to a vectorclock, holds result upon return. 819 */ 820static void DRD_(thread_compute_minimum_vc)(VectorClock* vc) 821{ 822 unsigned i; 823 Bool first; 824 Segment* latest_sg; 825 826 first = True; 827 for (i = 0; i < DRD_N_THREADS; i++) 828 { 829 latest_sg = DRD_(g_threadinfo)[i].last; 830 if (latest_sg) 831 { 832 if (first) 833 DRD_(vc_assign)(vc, &latest_sg->vc); 834 else 835 DRD_(vc_min)(vc, &latest_sg->vc); 836 first = False; 837 } 838 } 839} 840 841/** 842 * Compute the maximum of all latest vector clocks of all threads. 843 * 844 * @param vc pointer to a vectorclock, holds result upon return. 845 */ 846static void DRD_(thread_compute_maximum_vc)(VectorClock* vc) 847{ 848 unsigned i; 849 Bool first; 850 Segment* latest_sg; 851 852 first = True; 853 for (i = 0; i < DRD_N_THREADS; i++) 854 { 855 latest_sg = DRD_(g_threadinfo)[i].last; 856 if (latest_sg) 857 { 858 if (first) 859 DRD_(vc_assign)(vc, &latest_sg->vc); 860 else 861 DRD_(vc_combine)(vc, &latest_sg->vc); 862 first = False; 863 } 864 } 865} 866 867/** 868 * Discard all segments that have a defined order against the latest vector 869 * clock of all threads -- these segments can no longer be involved in a 870 * data race. 871 */ 872static void thread_discard_ordered_segments(void) 873{ 874 unsigned i; 875 VectorClock thread_vc_min; 876 877 s_discard_ordered_segments_count++; 878 879 DRD_(vc_init)(&thread_vc_min, 0, 0); 880 DRD_(thread_compute_minimum_vc)(&thread_vc_min); 881 if (DRD_(sg_get_trace)()) 882 { 883 char *vc_min, *vc_max; 884 VectorClock thread_vc_max; 885 886 DRD_(vc_init)(&thread_vc_max, 0, 0); 887 DRD_(thread_compute_maximum_vc)(&thread_vc_max); 888 vc_min = DRD_(vc_aprint)(&thread_vc_min); 889 vc_max = DRD_(vc_aprint)(&thread_vc_max); 890 VG_(message)(Vg_DebugMsg, 891 "Discarding ordered segments -- min vc is %s, max vc is %s\n", 892 vc_min, vc_max); 893 VG_(free)(vc_min); 894 VG_(free)(vc_max); 895 DRD_(vc_cleanup)(&thread_vc_max); 896 } 897 898 for (i = 0; i < DRD_N_THREADS; i++) 899 { 900 Segment* sg; 901 Segment* sg_next; 902 for (sg = DRD_(g_threadinfo)[i].first; 903 sg && (sg_next = sg->next) && DRD_(vc_lte)(&sg->vc, &thread_vc_min); 904 sg = sg_next) 905 { 906 thread_discard_segment(i, sg); 907 } 908 } 909 DRD_(vc_cleanup)(&thread_vc_min); 910} 911 912/** 913 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper 914 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if 915 * all segments in the set CS are ordered consistently against both sg1 and 916 * sg2. The set CS is defined as the set of segments that can immediately 917 * precede future segments via inter-thread synchronization operations. In 918 * DRD the set CS consists of the latest segment of each thread combined with 919 * all segments for which the reference count is strictly greater than one. 920 * The code below is an optimized version of the following: 921 * 922 * for (i = 0; i < DRD_N_THREADS; i++) 923 * { 924 * Segment* sg; 925 * 926 * for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next) 927 * { 928 * if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1) 929 * { 930 * if ( DRD_(vc_lte)(&sg1->vc, &sg->vc) 931 * != DRD_(vc_lte)(&sg2->vc, &sg->vc) 932 * || DRD_(vc_lte)(&sg->vc, &sg1->vc) 933 * != DRD_(vc_lte)(&sg->vc, &sg2->vc)) 934 * { 935 * return False; 936 * } 937 * } 938 * } 939 * } 940 */ 941static Bool thread_consistent_segment_ordering(const DrdThreadId tid, 942 Segment* const sg1, 943 Segment* const sg2) 944{ 945 unsigned i; 946 947 tl_assert(sg1->next); 948 tl_assert(sg2->next); 949 tl_assert(sg1->next == sg2); 950 tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc)); 951 952 for (i = 0; i < DRD_N_THREADS; i++) 953 { 954 Segment* sg; 955 956 for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next) 957 { 958 if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1) 959 { 960 if (DRD_(vc_lte)(&sg2->vc, &sg->vc)) 961 break; 962 if (DRD_(vc_lte)(&sg1->vc, &sg->vc)) 963 return False; 964 } 965 } 966 for (sg = DRD_(g_threadinfo)[i].last; sg; sg = sg->prev) 967 { 968 if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1) 969 { 970 if (DRD_(vc_lte)(&sg->vc, &sg1->vc)) 971 break; 972 if (DRD_(vc_lte)(&sg->vc, &sg2->vc)) 973 return False; 974 } 975 } 976 } 977 return True; 978} 979 980/** 981 * Merge all segments that may be merged without triggering false positives 982 * or discarding real data races. For the theoretical background of segment 983 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse 984 * and Koen De Bosschere. Bounding the number of segment histories during 985 * data race detection. Parallel Computing archive, Volume 28, Issue 9, 986 * pp 1221-1238, September 2002. This paper contains a proof that merging 987 * consecutive segments for which the property equiv(s1,s2) holds can be 988 * merged without reducing the accuracy of datarace detection. Furthermore 989 * it is also proven that the total number of all segments will never grow 990 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged 991 * every time a new segment is created. The property equiv(s1, s2) is defined 992 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector 993 * clocks of segments s and s1 are ordered in the same way as those of segments 994 * s and s2. The set CS is defined as the set of existing segments s that have 995 * the potential to conflict with not yet created segments, either because the 996 * segment s is the latest segment of a thread or because it can become the 997 * immediate predecessor of a new segment due to a synchronization operation. 998 */ 999static void thread_merge_segments(void) 1000{ 1001 unsigned i; 1002 1003 s_new_segments_since_last_merge = 0; 1004 1005 for (i = 0; i < DRD_N_THREADS; i++) 1006 { 1007 Segment* sg; 1008 1009#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 1010 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i])); 1011#endif 1012 1013 for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next) 1014 { 1015 if (DRD_(sg_get_refcnt)(sg) == 1 1016 && sg->next 1017 && DRD_(sg_get_refcnt)(sg->next) == 1 1018 && sg->next->next 1019 && thread_consistent_segment_ordering(i, sg, sg->next)) 1020 { 1021 /* Merge sg and sg->next into sg. */ 1022 DRD_(sg_merge)(sg, sg->next); 1023 thread_discard_segment(i, sg->next); 1024 } 1025 } 1026 1027#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 1028 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i])); 1029#endif 1030 } 1031} 1032 1033/** 1034 * Create a new segment for the specified thread, and discard any segments 1035 * that cannot cause races anymore. 1036 */ 1037void DRD_(thread_new_segment)(const DrdThreadId tid) 1038{ 1039 Segment* last_sg; 1040 Segment* new_sg; 1041 1042 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1043 && tid != DRD_INVALID_THREADID); 1044 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid))); 1045 1046 last_sg = DRD_(g_threadinfo)[tid].last; 1047 new_sg = DRD_(sg_new)(tid, tid); 1048 thread_append_segment(tid, new_sg); 1049 if (tid == DRD_(g_drd_running_tid) && last_sg) 1050 { 1051 DRD_(thread_update_conflict_set)(tid, &last_sg->vc); 1052 s_update_conflict_set_new_sg_count++; 1053 } 1054 1055 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid))); 1056 1057 if (s_segment_merging 1058 && ++s_new_segments_since_last_merge >= s_segment_merge_interval) 1059 { 1060 thread_discard_ordered_segments(); 1061 thread_merge_segments(); 1062 } 1063} 1064 1065/** Call this function after thread 'joiner' joined thread 'joinee'. */ 1066void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee) 1067{ 1068 tl_assert(joiner != joinee); 1069 tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS 1070 && joiner != DRD_INVALID_THREADID); 1071 tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS 1072 && joinee != DRD_INVALID_THREADID); 1073 tl_assert(DRD_(g_threadinfo)[joiner].last); 1074 tl_assert(DRD_(g_threadinfo)[joinee].last); 1075 1076 if (DRD_(sg_get_trace)()) 1077 { 1078 char *str1, *str2; 1079 str1 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc); 1080 str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joinee].last->vc); 1081 VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n", 1082 str1, str2); 1083 VG_(free)(str1); 1084 VG_(free)(str2); 1085 } 1086 if (joiner == DRD_(g_drd_running_tid)) 1087 { 1088 VectorClock old_vc; 1089 1090 DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[joiner].last->vc); 1091 DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc, 1092 &DRD_(g_threadinfo)[joinee].last->vc); 1093 DRD_(thread_update_conflict_set)(joiner, &old_vc); 1094 s_update_conflict_set_join_count++; 1095 DRD_(vc_cleanup)(&old_vc); 1096 } 1097 else 1098 { 1099 DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc, 1100 &DRD_(g_threadinfo)[joinee].last->vc); 1101 } 1102 1103 thread_discard_ordered_segments(); 1104 1105 if (DRD_(sg_get_trace)()) 1106 { 1107 char* str; 1108 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc); 1109 VG_(message)(Vg_DebugMsg, "After join: %s\n", str); 1110 VG_(free)(str); 1111 } 1112} 1113 1114/** 1115 * Update the vector clock of the last segment of thread tid with the 1116 * the vector clock of segment sg. 1117 */ 1118static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg) 1119{ 1120 const VectorClock* const vc = &sg->vc; 1121 1122 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1123 && tid != DRD_INVALID_THREADID); 1124 tl_assert(DRD_(g_threadinfo)[tid].last); 1125 tl_assert(sg); 1126 tl_assert(vc); 1127 1128 if (tid != sg->tid) 1129 { 1130 VectorClock old_vc; 1131 1132 DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[tid].last->vc); 1133 DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc, vc); 1134 if (DRD_(sg_get_trace)()) 1135 { 1136 char *str1, *str2; 1137 str1 = DRD_(vc_aprint)(&old_vc); 1138 str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc); 1139 VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2); 1140 VG_(free)(str1); 1141 VG_(free)(str2); 1142 } 1143 1144 thread_discard_ordered_segments(); 1145 1146 DRD_(thread_update_conflict_set)(tid, &old_vc); 1147 s_update_conflict_set_sync_count++; 1148 1149 DRD_(vc_cleanup)(&old_vc); 1150 } 1151 else 1152 { 1153 tl_assert(DRD_(vc_lte)(vc, &DRD_(g_threadinfo)[tid].last->vc)); 1154 } 1155} 1156 1157/** 1158 * Create a new segment for thread tid and update the vector clock of the last 1159 * segment of this thread with the the vector clock of segment sg. Call this 1160 * function after thread tid had to wait because of thread synchronization 1161 * until the memory accesses in the segment sg finished. 1162 */ 1163void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg) 1164{ 1165 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1166 && tid != DRD_INVALID_THREADID); 1167 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid))); 1168 tl_assert(sg); 1169 1170 thread_append_segment(tid, DRD_(sg_new)(tid, tid)); 1171 1172 thread_combine_vc_sync(tid, sg); 1173 1174 if (s_segment_merging 1175 && ++s_new_segments_since_last_merge >= s_segment_merge_interval) 1176 { 1177 thread_discard_ordered_segments(); 1178 thread_merge_segments(); 1179 } 1180} 1181 1182/** 1183 * Call this function whenever a thread is no longer using the memory 1184 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer 1185 * increase. 1186 */ 1187void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2) 1188{ 1189 unsigned i; 1190 Segment* p; 1191 1192 for (i = 0; i < DRD_N_THREADS; i++) 1193 for (p = DRD_(g_threadinfo)[i].first; p; p = p->next) 1194 DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2); 1195 1196 DRD_(bm_clear)(DRD_(g_conflict_set), a1, a2); 1197} 1198 1199/** Specify whether memory loads should be recorded. */ 1200void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled) 1201{ 1202 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1203 && tid != DRD_INVALID_THREADID); 1204 tl_assert(enabled == !! enabled); 1205 1206 DRD_(g_threadinfo)[tid].is_recording_loads = enabled; 1207} 1208 1209/** Specify whether memory stores should be recorded. */ 1210void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled) 1211{ 1212 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1213 && tid != DRD_INVALID_THREADID); 1214 tl_assert(enabled == !! enabled); 1215 1216 DRD_(g_threadinfo)[tid].is_recording_stores = enabled; 1217} 1218 1219/** 1220 * Print the segment information for all threads. 1221 * 1222 * This function is only used for debugging purposes. 1223 */ 1224void DRD_(thread_print_all)(void) 1225{ 1226 unsigned i; 1227 Segment* p; 1228 1229 for (i = 0; i < DRD_N_THREADS; i++) 1230 { 1231 if (DRD_(g_threadinfo)[i].first) 1232 { 1233 VG_(printf)("**************\n" 1234 "* thread %3d (%d/%d/%d/%d/0x%lx/%d) *\n" 1235 "**************\n", 1236 i, 1237 DRD_(g_threadinfo)[i].valid, 1238 DRD_(g_threadinfo)[i].vg_thread_exists, 1239 DRD_(g_threadinfo)[i].vg_threadid, 1240 DRD_(g_threadinfo)[i].posix_thread_exists, 1241 DRD_(g_threadinfo)[i].pt_threadid, 1242 DRD_(g_threadinfo)[i].detached_posix_thread); 1243 for (p = DRD_(g_threadinfo)[i].first; p; p = p->next) 1244 { 1245 DRD_(sg_print)(p); 1246 } 1247 } 1248 } 1249} 1250 1251/** Show a call stack involved in a data race. */ 1252static void show_call_stack(const DrdThreadId tid, 1253 const Char* const msg, 1254 ExeContext* const callstack) 1255{ 1256 const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid); 1257 1258 VG_(message)(Vg_UserMsg, "%s (thread %d)\n", msg, tid); 1259 1260 if (vg_tid != VG_INVALID_THREADID) 1261 { 1262 if (callstack) 1263 { 1264 VG_(pp_ExeContext)(callstack); 1265 } 1266 else 1267 { 1268 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size)); 1269 } 1270 } 1271 else 1272 { 1273 VG_(message)(Vg_UserMsg, 1274 " (thread finished, call stack no longer available)\n"); 1275 } 1276} 1277 1278/** Print information about the segments involved in a data race. */ 1279static void 1280thread_report_conflicting_segments_segment(const DrdThreadId tid, 1281 const Addr addr, 1282 const SizeT size, 1283 const BmAccessTypeT access_type, 1284 const Segment* const p) 1285{ 1286 unsigned i; 1287 1288 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1289 && tid != DRD_INVALID_THREADID); 1290 tl_assert(p); 1291 1292 for (i = 0; i < DRD_N_THREADS; i++) 1293 { 1294 if (i != tid) 1295 { 1296 Segment* q; 1297 for (q = DRD_(g_threadinfo)[i].last; q; q = q->prev) 1298 { 1299 /* 1300 * Since q iterates over the segments of thread i in order of 1301 * decreasing vector clocks, if q->vc <= p->vc, then 1302 * q->next->vc <= p->vc will also hold. Hence, break out of the 1303 * loop once this condition is met. 1304 */ 1305 if (DRD_(vc_lte)(&q->vc, &p->vc)) 1306 break; 1307 if (! DRD_(vc_lte)(&p->vc, &q->vc)) 1308 { 1309 if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size, 1310 access_type)) 1311 { 1312 tl_assert(q->stacktrace); 1313 show_call_stack(i, "Other segment start", 1314 q->stacktrace); 1315 show_call_stack(i, "Other segment end", 1316 q->next ? q->next->stacktrace : 0); 1317 } 1318 } 1319 } 1320 } 1321 } 1322} 1323 1324/** Print information about all segments involved in a data race. */ 1325void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid, 1326 const Addr addr, 1327 const SizeT size, 1328 const BmAccessTypeT access_type) 1329{ 1330 Segment* p; 1331 1332 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1333 && tid != DRD_INVALID_THREADID); 1334 1335 for (p = DRD_(g_threadinfo)[tid].first; p; p = p->next) 1336 { 1337 if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type)) 1338 { 1339 thread_report_conflicting_segments_segment(tid, addr, size, 1340 access_type, p); 1341 } 1342 } 1343} 1344 1345/** 1346 * Verify whether the conflict set for thread tid is up to date. Only perform 1347 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set. 1348 */ 1349static Bool thread_conflict_set_up_to_date(const DrdThreadId tid) 1350{ 1351 static int do_verify_conflict_set = -1; 1352 Bool result; 1353 struct bitmap* computed_conflict_set = 0; 1354 1355 if (do_verify_conflict_set < 0) 1356 do_verify_conflict_set = VG_(getenv)("DRD_VERIFY_CONFLICT_SET") != 0; 1357 1358 if (do_verify_conflict_set == 0) 1359 return True; 1360 1361 thread_compute_conflict_set(&computed_conflict_set, tid); 1362 result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set); 1363 if (! result) 1364 { 1365 VG_(printf)("actual conflict set:\n"); 1366 DRD_(bm_print)(DRD_(g_conflict_set)); 1367 VG_(printf)("\n"); 1368 VG_(printf)("computed conflict set:\n"); 1369 DRD_(bm_print)(computed_conflict_set); 1370 VG_(printf)("\n"); 1371 } 1372 DRD_(bm_delete)(computed_conflict_set); 1373 return result; 1374} 1375 1376/** 1377 * Compute the conflict set: a bitmap that represents the union of all memory 1378 * accesses of all segments that are unordered to the current segment of the 1379 * thread tid. 1380 */ 1381static void thread_compute_conflict_set(struct bitmap** conflict_set, 1382 const DrdThreadId tid) 1383{ 1384 Segment* p; 1385 1386 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1387 && tid != DRD_INVALID_THREADID); 1388 tl_assert(tid == DRD_(g_drd_running_tid)); 1389 1390 s_compute_conflict_set_count++; 1391 s_conflict_set_bitmap_creation_count 1392 -= DRD_(bm_get_bitmap_creation_count)(); 1393 s_conflict_set_bitmap2_creation_count 1394 -= DRD_(bm_get_bitmap2_creation_count)(); 1395 1396 if (*conflict_set) 1397 { 1398 DRD_(bm_cleanup)(*conflict_set); 1399 DRD_(bm_init)(*conflict_set); 1400 } 1401 else 1402 { 1403 *conflict_set = DRD_(bm_new)(); 1404 } 1405 1406 if (s_trace_conflict_set) 1407 { 1408 char* str; 1409 1410 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc); 1411 VG_(message)(Vg_DebugMsg, 1412 "computing conflict set for thread %d with vc %s\n", 1413 tid, str); 1414 VG_(free)(str); 1415 } 1416 1417 p = DRD_(g_threadinfo)[tid].last; 1418 { 1419 unsigned j; 1420 1421 if (s_trace_conflict_set) 1422 { 1423 char* vc; 1424 1425 vc = DRD_(vc_aprint)(&p->vc); 1426 VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n", 1427 tid, vc); 1428 VG_(free)(vc); 1429 } 1430 1431 for (j = 0; j < DRD_N_THREADS; j++) 1432 { 1433 if (j != tid && DRD_(IsValidDrdThreadId)(j)) 1434 { 1435 Segment* q; 1436 for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev) 1437 { 1438 if (! DRD_(vc_lte)(&q->vc, &p->vc) 1439 && ! DRD_(vc_lte)(&p->vc, &q->vc)) 1440 { 1441 if (s_trace_conflict_set) 1442 { 1443 char* str; 1444 1445 str = DRD_(vc_aprint)(&q->vc); 1446 VG_(message)(Vg_DebugMsg, 1447 "conflict set: [%d] merging segment %s\n", 1448 j, str); 1449 VG_(free)(str); 1450 } 1451 DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q)); 1452 } 1453 else 1454 { 1455 if (s_trace_conflict_set) 1456 { 1457 char* str; 1458 1459 str = DRD_(vc_aprint)(&q->vc); 1460 VG_(message)(Vg_DebugMsg, 1461 "conflict set: [%d] ignoring segment %s\n", 1462 j, str); 1463 VG_(free)(str); 1464 } 1465 } 1466 } 1467 } 1468 } 1469 } 1470 1471 s_conflict_set_bitmap_creation_count 1472 += DRD_(bm_get_bitmap_creation_count)(); 1473 s_conflict_set_bitmap2_creation_count 1474 += DRD_(bm_get_bitmap2_creation_count)(); 1475 1476 if (s_trace_conflict_set_bm) 1477 { 1478 VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid); 1479 DRD_(bm_print)(*conflict_set); 1480 VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid); 1481 } 1482} 1483 1484/** 1485 * Update the conflict set after the vector clock of thread tid has been 1486 * updated from old_vc to its current value, either because a new segment has 1487 * been created or because of a synchronization operation. 1488 */ 1489void DRD_(thread_update_conflict_set)(const DrdThreadId tid, 1490 const VectorClock* const old_vc) 1491{ 1492 const VectorClock* new_vc; 1493 Segment* p; 1494 unsigned j; 1495 1496 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1497 && tid != DRD_INVALID_THREADID); 1498 tl_assert(old_vc); 1499 tl_assert(tid == DRD_(g_drd_running_tid)); 1500 tl_assert(DRD_(g_conflict_set)); 1501 1502 if (s_trace_conflict_set) 1503 { 1504 char* str; 1505 1506 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc); 1507 VG_(message)(Vg_DebugMsg, 1508 "updating conflict set for thread %d with vc %s\n", 1509 tid, str); 1510 VG_(free)(str); 1511 } 1512 1513 new_vc = &DRD_(g_threadinfo)[tid].last->vc; 1514 tl_assert(DRD_(vc_lte)(old_vc, new_vc)); 1515 1516 DRD_(bm_unmark)(DRD_(g_conflict_set)); 1517 1518 for (j = 0; j < DRD_N_THREADS; j++) 1519 { 1520 Segment* q; 1521 1522 if (j == tid || ! DRD_(IsValidDrdThreadId)(j)) 1523 continue; 1524 1525 for (q = DRD_(g_threadinfo)[j].last; 1526 q && !DRD_(vc_lte)(&q->vc, new_vc); 1527 q = q->prev) { 1528 const Bool included_in_old_conflict_set 1529 = !DRD_(vc_lte)(old_vc, &q->vc); 1530 const Bool included_in_new_conflict_set 1531 = !DRD_(vc_lte)(new_vc, &q->vc); 1532 1533 if (UNLIKELY(s_trace_conflict_set)) { 1534 char* str; 1535 1536 str = DRD_(vc_aprint)(&q->vc); 1537 VG_(message)(Vg_DebugMsg, 1538 "conflict set: [%d] %s segment %s\n", j, 1539 included_in_old_conflict_set 1540 != included_in_new_conflict_set 1541 ? "merging" : "ignoring", str); 1542 VG_(free)(str); 1543 } 1544 if (included_in_old_conflict_set != included_in_new_conflict_set) 1545 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q)); 1546 } 1547 1548 for ( ; q && !DRD_(vc_lte)(&q->vc, old_vc); q = q->prev) { 1549 const Bool included_in_old_conflict_set 1550 = !DRD_(vc_lte)(old_vc, &q->vc); 1551 const Bool included_in_new_conflict_set 1552 = !DRD_(vc_lte)(&q->vc, new_vc) 1553 && !DRD_(vc_lte)(new_vc, &q->vc); 1554 1555 if (UNLIKELY(s_trace_conflict_set)) { 1556 char* str; 1557 1558 str = DRD_(vc_aprint)(&q->vc); 1559 VG_(message)(Vg_DebugMsg, 1560 "conflict set: [%d] %s segment %s\n", j, 1561 included_in_old_conflict_set 1562 != included_in_new_conflict_set 1563 ? "merging" : "ignoring", str); 1564 VG_(free)(str); 1565 } 1566 if (included_in_old_conflict_set != included_in_new_conflict_set) 1567 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q)); 1568 } 1569 } 1570 1571 DRD_(bm_clear_marked)(DRD_(g_conflict_set)); 1572 1573 p = DRD_(g_threadinfo)[tid].last; 1574 for (j = 0; j < DRD_N_THREADS; j++) 1575 { 1576 if (j != tid && DRD_(IsValidDrdThreadId)(j)) 1577 { 1578 Segment* q; 1579 for (q = DRD_(g_threadinfo)[j].last; 1580 q && !DRD_(vc_lte)(&q->vc, &p->vc); 1581 q = q->prev) { 1582 if (!DRD_(vc_lte)(&p->vc, &q->vc)) 1583 DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q)); 1584 } 1585 } 1586 } 1587 1588 DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set)); 1589 1590 s_update_conflict_set_count++; 1591 1592 if (s_trace_conflict_set_bm) 1593 { 1594 VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid); 1595 DRD_(bm_print)(DRD_(g_conflict_set)); 1596 VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid); 1597 } 1598 1599 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid))); 1600} 1601 1602/** Report the number of context switches performed. */ 1603ULong DRD_(thread_get_context_switch_count)(void) 1604{ 1605 return s_context_switch_count; 1606} 1607 1608/** Report the number of ordered segments that have been discarded. */ 1609ULong DRD_(thread_get_discard_ordered_segments_count)(void) 1610{ 1611 return s_discard_ordered_segments_count; 1612} 1613 1614/** Return how many times the conflict set has been updated entirely. */ 1615ULong DRD_(thread_get_compute_conflict_set_count)() 1616{ 1617 return s_compute_conflict_set_count; 1618} 1619 1620/** Return how many times the conflict set has been updated partially. */ 1621ULong DRD_(thread_get_update_conflict_set_count)(void) 1622{ 1623 return s_update_conflict_set_count; 1624} 1625 1626/** 1627 * Return how many times the conflict set has been updated partially 1628 * because a new segment has been created. 1629 */ 1630ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void) 1631{ 1632 return s_update_conflict_set_new_sg_count; 1633} 1634 1635/** 1636 * Return how many times the conflict set has been updated partially 1637 * because of combining vector clocks due to synchronization operations 1638 * other than reader/writer lock or barrier operations. 1639 */ 1640ULong DRD_(thread_get_update_conflict_set_sync_count)(void) 1641{ 1642 return s_update_conflict_set_sync_count; 1643} 1644 1645/** 1646 * Return how many times the conflict set has been updated partially 1647 * because of thread joins. 1648 */ 1649ULong DRD_(thread_get_update_conflict_set_join_count)(void) 1650{ 1651 return s_update_conflict_set_join_count; 1652} 1653 1654/** 1655 * Return the number of first-level bitmaps that have been created during 1656 * conflict set updates. 1657 */ 1658ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void) 1659{ 1660 return s_conflict_set_bitmap_creation_count; 1661} 1662 1663/** 1664 * Return the number of second-level bitmaps that have been created during 1665 * conflict set updates. 1666 */ 1667ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void) 1668{ 1669 return s_conflict_set_bitmap2_creation_count; 1670} 1671