drd_thread.c revision 31b983d29affe6c30a2283be8824c6d75c74d848
1/* -*- mode: C; c-basic-offset: 3; -*- */ 2/* 3 This file is part of drd, a thread error detector. 4 5 Copyright (C) 2006-2009 Bart Van Assche <bart.vanassche@gmail.com>. 6 7 This program is free software; you can redistribute it and/or 8 modify it under the terms of the GNU General Public License as 9 published by the Free Software Foundation; either version 2 of the 10 License, or (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, but 13 WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the Free Software 19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 20 02111-1307, USA. 21 22 The GNU General Public License is contained in the file COPYING. 23*/ 24 25 26#include "drd_error.h" 27#include "drd_barrier.h" 28#include "drd_clientobj.h" 29#include "drd_cond.h" 30#include "drd_mutex.h" 31#include "drd_segment.h" 32#include "drd_semaphore.h" 33#include "drd_suppression.h" 34#include "drd_thread.h" 35#include "pub_tool_vki.h" 36#include "pub_tool_basics.h" // Addr, SizeT 37#include "pub_tool_libcassert.h" // tl_assert() 38#include "pub_tool_libcbase.h" // VG_(strlen)() 39#include "pub_tool_libcprint.h" // VG_(printf)() 40#include "pub_tool_libcproc.h" // VG_(getenv)() 41#include "pub_tool_machine.h" 42#include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)() 43#include "pub_tool_options.h" // VG_(clo_backtrace_size) 44#include "pub_tool_threadstate.h" // VG_(get_pthread_id)() 45 46 47 48/* Local functions. */ 49 50static void thread_append_segment(const DrdThreadId tid, Segment* const sg); 51static void thread_discard_segment(const DrdThreadId tid, Segment* const sg); 52static void thread_compute_conflict_set(struct bitmap** conflict_set, 53 const DrdThreadId tid); 54static Bool thread_conflict_set_up_to_date(const DrdThreadId tid); 55 56 57/* Local variables. */ 58 59static ULong s_context_switch_count; 60static ULong s_discard_ordered_segments_count; 61static ULong s_compute_conflict_set_count; 62static ULong s_update_conflict_set_count; 63static ULong s_update_conflict_set_new_sg_count; 64static ULong s_update_conflict_set_sync_count; 65static ULong s_update_conflict_set_join_count; 66static ULong s_conflict_set_bitmap_creation_count; 67static ULong s_conflict_set_bitmap2_creation_count; 68static ThreadId s_vg_running_tid = VG_INVALID_THREADID; 69DrdThreadId DRD_(g_drd_running_tid) = DRD_INVALID_THREADID; 70ThreadInfo DRD_(g_threadinfo)[DRD_N_THREADS]; 71struct bitmap* DRD_(g_conflict_set); 72static Bool s_trace_context_switches = False; 73static Bool s_trace_conflict_set = False; 74static Bool s_trace_conflict_set_bm = False; 75static Bool s_trace_fork_join = False; 76static Bool s_segment_merging = True; 77static Bool s_new_segments_since_last_merge; 78static int s_segment_merge_interval = 10; 79 80 81/* Function definitions. */ 82 83/** Enables/disables context switch tracing. */ 84void DRD_(thread_trace_context_switches)(const Bool t) 85{ 86 tl_assert(t == False || t == True); 87 s_trace_context_switches = t; 88} 89 90/** Enables/disables conflict set tracing. */ 91void DRD_(thread_trace_conflict_set)(const Bool t) 92{ 93 tl_assert(t == False || t == True); 94 s_trace_conflict_set = t; 95} 96 97/** Enables/disables conflict set bitmap tracing. */ 98void DRD_(thread_trace_conflict_set_bm)(const Bool t) 99{ 100 tl_assert(t == False || t == True); 101 s_trace_conflict_set_bm = t; 102} 103 104/** Report whether fork/join tracing is enabled. */ 105Bool DRD_(thread_get_trace_fork_join)(void) 106{ 107 return s_trace_fork_join; 108} 109 110/** Enables/disables fork/join tracing. */ 111void DRD_(thread_set_trace_fork_join)(const Bool t) 112{ 113 tl_assert(t == False || t == True); 114 s_trace_fork_join = t; 115} 116 117/** Enables/disables segment merging. */ 118void DRD_(thread_set_segment_merging)(const Bool m) 119{ 120 tl_assert(m == False || m == True); 121 s_segment_merging = m; 122} 123 124/** Get the segment merging interval. */ 125int DRD_(thread_get_segment_merge_interval)(void) 126{ 127 return s_segment_merge_interval; 128} 129 130/** Set the segment merging interval. */ 131void DRD_(thread_set_segment_merge_interval)(const int i) 132{ 133 s_segment_merge_interval = i; 134} 135 136/** 137 * Convert Valgrind's ThreadId into a DrdThreadId. 138 * 139 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed 140 * Valgrind ThreadId does not yet exist. 141 */ 142DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid) 143{ 144 int i; 145 146 if (tid == VG_INVALID_THREADID) 147 return DRD_INVALID_THREADID; 148 149 for (i = 1; i < DRD_N_THREADS; i++) 150 { 151 if (DRD_(g_threadinfo)[i].vg_thread_exists == True 152 && DRD_(g_threadinfo)[i].vg_threadid == tid) 153 { 154 return i; 155 } 156 } 157 158 return DRD_INVALID_THREADID; 159} 160 161/** Allocate a new DRD thread ID for the specified Valgrind thread ID. */ 162static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid) 163{ 164 int i; 165 166 tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID); 167 168 for (i = 1; i < DRD_N_THREADS; i++) 169 { 170 if (DRD_(g_threadinfo)[i].vg_thread_exists == False 171 && DRD_(g_threadinfo)[i].posix_thread_exists == False 172 && DRD_(g_threadinfo)[i].detached_posix_thread == False) 173 { 174 tl_assert(! DRD_(IsValidDrdThreadId)(i)); 175 176 DRD_(g_threadinfo)[i].vg_thread_exists = True; 177 DRD_(g_threadinfo)[i].vg_threadid = tid; 178 DRD_(g_threadinfo)[i].pt_threadid = INVALID_POSIX_THREADID; 179 DRD_(g_threadinfo)[i].stack_min = 0; 180 DRD_(g_threadinfo)[i].stack_min_min = 0; 181 DRD_(g_threadinfo)[i].stack_startup = 0; 182 DRD_(g_threadinfo)[i].stack_max = 0; 183 DRD_(thread_set_name)(i, ""); 184 DRD_(g_threadinfo)[i].is_recording_loads = True; 185 DRD_(g_threadinfo)[i].is_recording_stores = True; 186 DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0; 187 DRD_(g_threadinfo)[i].synchr_nesting = 0; 188 tl_assert(DRD_(g_threadinfo)[i].first == 0); 189 tl_assert(DRD_(g_threadinfo)[i].last == 0); 190 191 tl_assert(DRD_(IsValidDrdThreadId)(i)); 192 193 return i; 194 } 195 } 196 197 VG_(printf)( 198"\nSorry, but the maximum number of threads supported by DRD has been exceeded." 199"Aborting.\n"); 200 201 tl_assert(False); 202 203 return DRD_INVALID_THREADID; 204} 205 206/** Convert a POSIX thread ID into a DRD thread ID. */ 207DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid) 208{ 209 int i; 210 211 if (tid != INVALID_POSIX_THREADID) 212 { 213 for (i = 1; i < DRD_N_THREADS; i++) 214 { 215 if (DRD_(g_threadinfo)[i].posix_thread_exists 216 && DRD_(g_threadinfo)[i].pt_threadid == tid) 217 { 218 return i; 219 } 220 } 221 } 222 return DRD_INVALID_THREADID; 223} 224 225/** Convert a DRD thread ID into a Valgrind thread ID. */ 226ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid) 227{ 228 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 229 && tid != DRD_INVALID_THREADID); 230 231 return (DRD_(g_threadinfo)[tid].vg_thread_exists 232 ? DRD_(g_threadinfo)[tid].vg_threadid 233 : VG_INVALID_THREADID); 234} 235 236#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 237/** 238 * Sanity check of the doubly linked list of segments referenced by a 239 * ThreadInfo struct. 240 * @return True if sane, False if not. 241 */ 242static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti) 243{ 244 Segment* p; 245 246 for (p = ti->first; p; p = p->next) { 247 if (p->next && p->next->prev != p) 248 return False; 249 if (p->next == 0 && p != ti->last) 250 return False; 251 } 252 for (p = ti->last; p; p = p->prev) { 253 if (p->prev && p->prev->next != p) 254 return False; 255 if (p->prev == 0 && p != ti->first) 256 return False; 257 } 258 return True; 259} 260#endif 261 262/** 263 * Create the first segment for a newly started thread. 264 * 265 * This function is called from the handler installed via 266 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler 267 * from the context of the creator thread, before the new thread has been 268 * created. 269 * 270 * @param[in] creator DRD thread ID of the creator thread. 271 * @param[in] vg_created Valgrind thread ID of the created thread. 272 * 273 * @return DRD thread ID of the created thread. 274 */ 275DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator, 276 const ThreadId vg_created) 277{ 278 DrdThreadId created; 279 280 tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID); 281 created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created); 282 tl_assert(0 <= (int)created && created < DRD_N_THREADS 283 && created != DRD_INVALID_THREADID); 284 285 tl_assert(DRD_(g_threadinfo)[created].first == 0); 286 tl_assert(DRD_(g_threadinfo)[created].last == 0); 287 /* Create an initial segment for the newly created thread. */ 288 thread_append_segment(created, DRD_(sg_new)(creator, created)); 289 290 return created; 291} 292 293/** 294 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called 295 * after the thread has been created and before any client instructions are run 296 * on the newly created thread, e.g. from the handler installed via 297 * VG_(track_pre_thread_first_insn)(). 298 * 299 * @param[in] vg_created Valgrind thread ID of the newly created thread. 300 * 301 * @return DRD thread ID for the new thread. 302 */ 303DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created) 304{ 305 const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created); 306 307 tl_assert(0 <= (int)created && created < DRD_N_THREADS 308 && created != DRD_INVALID_THREADID); 309 310 DRD_(g_threadinfo)[created].stack_max 311 = VG_(thread_get_stack_max)(vg_created); 312 DRD_(g_threadinfo)[created].stack_startup 313 = DRD_(g_threadinfo)[created].stack_max; 314 DRD_(g_threadinfo)[created].stack_min 315 = DRD_(g_threadinfo)[created].stack_max; 316 DRD_(g_threadinfo)[created].stack_min_min 317 = DRD_(g_threadinfo)[created].stack_max; 318 DRD_(g_threadinfo)[created].stack_size 319 = VG_(thread_get_stack_size)(vg_created); 320 tl_assert(DRD_(g_threadinfo)[created].stack_max != 0); 321 322 return created; 323} 324 325/** 326 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just 327 * after thread drd_joiner joined thread drd_joinee. 328 */ 329void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee) 330{ 331 tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner)); 332 tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee)); 333 334 DRD_(thread_new_segment)(drd_joiner); 335 DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee); 336 DRD_(thread_new_segment)(drd_joinee); 337 338 if (s_trace_fork_join) 339 { 340 const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner); 341 const unsigned msg_size = 256; 342 char* msg; 343 344 msg = VG_(malloc)("drd.main.dptj.1", msg_size); 345 tl_assert(msg); 346 VG_(snprintf)(msg, msg_size, 347 "drd_post_thread_join joiner = %d, joinee = %d", 348 drd_joiner, drd_joinee); 349 if (joiner) 350 { 351 char* vc; 352 353 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner)); 354 VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg), 355 ", new vc: %s", vc); 356 VG_(free)(vc); 357 } 358 VG_(message)(Vg_DebugMsg, "%s\n", msg); 359 VG_(free)(msg); 360 } 361 362 if (! DRD_(get_check_stack_accesses)()) 363 { 364 DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee) 365 - DRD_(thread_get_stack_size)(drd_joinee), 366 DRD_(thread_get_stack_max)(drd_joinee)); 367 } 368 DRD_(clientobj_delete_thread)(drd_joinee); 369 DRD_(thread_delete)(drd_joinee); 370} 371 372/** 373 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack, 374 * and accesses this data structure from multiple threads without locking. 375 * Any conflicting accesses in the range stack_startup..stack_max will be 376 * ignored. 377 */ 378void DRD_(thread_set_stack_startup)(const DrdThreadId tid, 379 const Addr stack_startup) 380{ 381 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 382 && tid != DRD_INVALID_THREADID); 383 tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup); 384 tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max); 385 DRD_(g_threadinfo)[tid].stack_startup = stack_startup; 386} 387 388/** Return the stack pointer for the specified thread. */ 389Addr DRD_(thread_get_stack_min)(const DrdThreadId tid) 390{ 391 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 392 && tid != DRD_INVALID_THREADID); 393 return DRD_(g_threadinfo)[tid].stack_min; 394} 395 396/** 397 * Return the lowest value that was ever assigned to the stack pointer 398 * for the specified thread. 399 */ 400Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid) 401{ 402 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 403 && tid != DRD_INVALID_THREADID); 404 return DRD_(g_threadinfo)[tid].stack_min_min; 405} 406 407/** Return the top address for the stack of the specified thread. */ 408Addr DRD_(thread_get_stack_max)(const DrdThreadId tid) 409{ 410 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 411 && tid != DRD_INVALID_THREADID); 412 return DRD_(g_threadinfo)[tid].stack_max; 413} 414 415/** Return the maximum stack size for the specified thread. */ 416SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid) 417{ 418 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 419 && tid != DRD_INVALID_THREADID); 420 return DRD_(g_threadinfo)[tid].stack_size; 421} 422 423/** 424 * Clean up thread-specific data structures. Call this just after 425 * pthread_join(). 426 */ 427void DRD_(thread_delete)(const DrdThreadId tid) 428{ 429 Segment* sg; 430 Segment* sg_prev; 431 432 tl_assert(DRD_(IsValidDrdThreadId)(tid)); 433 434 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0); 435 for (sg = DRD_(g_threadinfo)[tid].last; sg; sg = sg_prev) 436 { 437 sg_prev = sg->prev; 438 sg->prev = 0; 439 sg->next = 0; 440 DRD_(sg_put)(sg); 441 } 442 DRD_(g_threadinfo)[tid].vg_thread_exists = False; 443 DRD_(g_threadinfo)[tid].posix_thread_exists = False; 444 tl_assert(DRD_(g_threadinfo)[tid].detached_posix_thread == False); 445 DRD_(g_threadinfo)[tid].first = 0; 446 DRD_(g_threadinfo)[tid].last = 0; 447 448 tl_assert(! DRD_(IsValidDrdThreadId)(tid)); 449} 450 451/** 452 * Called after a thread performed its last memory access and before 453 * thread_delete() is called. Note: thread_delete() is only called for 454 * joinable threads, not for detached threads. 455 */ 456void DRD_(thread_finished)(const DrdThreadId tid) 457{ 458 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 459 && tid != DRD_INVALID_THREADID); 460 461 DRD_(g_threadinfo)[tid].vg_thread_exists = False; 462 463 if (DRD_(g_threadinfo)[tid].detached_posix_thread) 464 { 465 /* 466 * Once a detached thread has finished, its stack is deallocated and 467 * should no longer be taken into account when computing the conflict set. 468 */ 469 DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max; 470 471 /* 472 * For a detached thread, calling pthread_exit() invalidates the 473 * POSIX thread ID associated with the detached thread. For joinable 474 * POSIX threads however, the POSIX thread ID remains live after the 475 * pthread_exit() call until pthread_join() is called. 476 */ 477 DRD_(g_threadinfo)[tid].posix_thread_exists = False; 478 } 479} 480 481/** Called just before pthread_cancel(). */ 482void DRD_(thread_pre_cancel)(const DrdThreadId tid) 483{ 484 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 485 && tid != DRD_INVALID_THREADID); 486 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID); 487 488 DRD_(g_threadinfo)[tid].synchr_nesting = 0; 489} 490 491/** 492 * Store the POSIX thread ID for the specified thread. 493 * 494 * @note This function can be called two times for the same thread -- see also 495 * the comment block preceding the pthread_create() wrapper in 496 * drd_pthread_intercepts.c. 497 */ 498void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid) 499{ 500 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 501 && tid != DRD_INVALID_THREADID); 502 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID 503 || DRD_(g_threadinfo)[tid].pt_threadid == ptid); 504 tl_assert(ptid != INVALID_POSIX_THREADID); 505 DRD_(g_threadinfo)[tid].posix_thread_exists = True; 506 DRD_(g_threadinfo)[tid].pt_threadid = ptid; 507} 508 509/** Returns true for joinable threads and false for detached threads. */ 510Bool DRD_(thread_get_joinable)(const DrdThreadId tid) 511{ 512 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 513 && tid != DRD_INVALID_THREADID); 514 return ! DRD_(g_threadinfo)[tid].detached_posix_thread; 515} 516 517/** Store the thread mode: joinable or detached. */ 518void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable) 519{ 520 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 521 && tid != DRD_INVALID_THREADID); 522 tl_assert(!! joinable == joinable); 523 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID); 524 525 DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable; 526} 527 528/** Tells DRD that the calling thread is about to enter pthread_create(). */ 529void DRD_(thread_entering_pthread_create)(const DrdThreadId tid) 530{ 531 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 532 && tid != DRD_INVALID_THREADID); 533 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID); 534 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0); 535 536 DRD_(g_threadinfo)[tid].pthread_create_nesting_level++; 537} 538 539/** Tells DRD that the calling thread has left pthread_create(). */ 540void DRD_(thread_left_pthread_create)(const DrdThreadId tid) 541{ 542 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 543 && tid != DRD_INVALID_THREADID); 544 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID); 545 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0); 546 547 DRD_(g_threadinfo)[tid].pthread_create_nesting_level--; 548} 549 550/** Obtain the thread number and the user-assigned thread name. */ 551const char* DRD_(thread_get_name)(const DrdThreadId tid) 552{ 553 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 554 && tid != DRD_INVALID_THREADID); 555 556 return DRD_(g_threadinfo)[tid].name; 557} 558 559/** Set the name of the specified thread. */ 560void DRD_(thread_set_name)(const DrdThreadId tid, const char* const name) 561{ 562 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 563 && tid != DRD_INVALID_THREADID); 564 565 if (name == NULL || name[0] == 0) 566 VG_(snprintf)(DRD_(g_threadinfo)[tid].name, 567 sizeof(DRD_(g_threadinfo)[tid].name), 568 "Thread %d", 569 tid); 570 else 571 VG_(snprintf)(DRD_(g_threadinfo)[tid].name, 572 sizeof(DRD_(g_threadinfo)[tid].name), 573 "Thread %d (%s)", 574 tid, name); 575 DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0; 576} 577 578/** 579 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the 580 * conflict set. 581 */ 582void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid) 583{ 584 tl_assert(vg_tid != VG_INVALID_THREADID); 585 586 if (vg_tid != s_vg_running_tid) 587 { 588 DRD_(thread_set_running_tid)(vg_tid, 589 DRD_(VgThreadIdToDrdThreadId)(vg_tid)); 590 } 591 592 tl_assert(s_vg_running_tid != VG_INVALID_THREADID); 593 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID); 594} 595 596/** 597 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the 598 * conflict set. 599 */ 600void DRD_(thread_set_running_tid)(const ThreadId vg_tid, 601 const DrdThreadId drd_tid) 602{ 603 tl_assert(vg_tid != VG_INVALID_THREADID); 604 tl_assert(drd_tid != DRD_INVALID_THREADID); 605 606 if (vg_tid != s_vg_running_tid) 607 { 608 if (s_trace_context_switches 609 && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID) 610 { 611 VG_(message)(Vg_DebugMsg, 612 "Context switch from thread %d to thread %d;" 613 " segments: %llu\n", 614 DRD_(g_drd_running_tid), drd_tid, 615 DRD_(sg_get_segments_alive_count)()); 616 } 617 s_vg_running_tid = vg_tid; 618 DRD_(g_drd_running_tid) = drd_tid; 619 thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid); 620 s_context_switch_count++; 621 } 622 623 tl_assert(s_vg_running_tid != VG_INVALID_THREADID); 624 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID); 625} 626 627/** 628 * Increase the synchronization nesting counter. Must be called before the 629 * client calls a synchronization function. 630 */ 631int DRD_(thread_enter_synchr)(const DrdThreadId tid) 632{ 633 tl_assert(DRD_(IsValidDrdThreadId)(tid)); 634 return DRD_(g_threadinfo)[tid].synchr_nesting++; 635} 636 637/** 638 * Decrease the synchronization nesting counter. Must be called after the 639 * client left a synchronization function. 640 */ 641int DRD_(thread_leave_synchr)(const DrdThreadId tid) 642{ 643 tl_assert(DRD_(IsValidDrdThreadId)(tid)); 644 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1); 645 return --DRD_(g_threadinfo)[tid].synchr_nesting; 646} 647 648/** Returns the synchronization nesting counter. */ 649int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid) 650{ 651 tl_assert(DRD_(IsValidDrdThreadId)(tid)); 652 return DRD_(g_threadinfo)[tid].synchr_nesting; 653} 654 655/** Append a new segment at the end of the segment list. */ 656static 657void thread_append_segment(const DrdThreadId tid, Segment* const sg) 658{ 659 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 660 && tid != DRD_INVALID_THREADID); 661 662#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 663 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid])); 664#endif 665 666 sg->prev = DRD_(g_threadinfo)[tid].last; 667 sg->next = 0; 668 if (DRD_(g_threadinfo)[tid].last) 669 DRD_(g_threadinfo)[tid].last->next = sg; 670 DRD_(g_threadinfo)[tid].last = sg; 671 if (DRD_(g_threadinfo)[tid].first == 0) 672 DRD_(g_threadinfo)[tid].first = sg; 673 674#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 675 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid])); 676#endif 677} 678 679/** 680 * Remove a segment from the segment list of thread threadid, and free the 681 * associated memory. 682 */ 683static 684void thread_discard_segment(const DrdThreadId tid, Segment* const sg) 685{ 686 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 687 && tid != DRD_INVALID_THREADID); 688 689#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 690 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid])); 691#endif 692 693 if (sg->prev) 694 sg->prev->next = sg->next; 695 if (sg->next) 696 sg->next->prev = sg->prev; 697 if (sg == DRD_(g_threadinfo)[tid].first) 698 DRD_(g_threadinfo)[tid].first = sg->next; 699 if (sg == DRD_(g_threadinfo)[tid].last) 700 DRD_(g_threadinfo)[tid].last = sg->prev; 701 DRD_(sg_put)(sg); 702 703#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 704 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid])); 705#endif 706} 707 708/** 709 * Returns a pointer to the vector clock of the most recent segment associated 710 * with thread 'tid'. 711 */ 712VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid) 713{ 714 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 715 && tid != DRD_INVALID_THREADID); 716 tl_assert(DRD_(g_threadinfo)[tid].last); 717 return &DRD_(g_threadinfo)[tid].last->vc; 718} 719 720/** 721 * Return the latest segment of thread 'tid' and increment its reference count. 722 */ 723void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid) 724{ 725 tl_assert(sg); 726 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 727 && tid != DRD_INVALID_THREADID); 728 tl_assert(DRD_(g_threadinfo)[tid].last); 729 730 DRD_(sg_put)(*sg); 731 *sg = DRD_(sg_get)(DRD_(g_threadinfo)[tid].last); 732} 733 734/** 735 * Compute the minimum of all latest vector clocks of all threads 736 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA). 737 * 738 * @param vc pointer to a vectorclock, holds result upon return. 739 */ 740static void DRD_(thread_compute_minimum_vc)(VectorClock* vc) 741{ 742 unsigned i; 743 Bool first; 744 Segment* latest_sg; 745 746 first = True; 747 for (i = 0; i < DRD_N_THREADS; i++) 748 { 749 latest_sg = DRD_(g_threadinfo)[i].last; 750 if (latest_sg) 751 { 752 if (first) 753 DRD_(vc_assign)(vc, &latest_sg->vc); 754 else 755 DRD_(vc_min)(vc, &latest_sg->vc); 756 first = False; 757 } 758 } 759} 760 761/** 762 * Compute the maximum of all latest vector clocks of all threads. 763 * 764 * @param vc pointer to a vectorclock, holds result upon return. 765 */ 766static void DRD_(thread_compute_maximum_vc)(VectorClock* vc) 767{ 768 unsigned i; 769 Bool first; 770 Segment* latest_sg; 771 772 first = True; 773 for (i = 0; i < DRD_N_THREADS; i++) 774 { 775 latest_sg = DRD_(g_threadinfo)[i].last; 776 if (latest_sg) 777 { 778 if (first) 779 DRD_(vc_assign)(vc, &latest_sg->vc); 780 else 781 DRD_(vc_combine)(vc, &latest_sg->vc); 782 first = False; 783 } 784 } 785} 786 787/** 788 * Discard all segments that have a defined order against the latest vector 789 * clock of all threads -- these segments can no longer be involved in a 790 * data race. 791 */ 792static void thread_discard_ordered_segments(void) 793{ 794 unsigned i; 795 VectorClock thread_vc_min; 796 797 s_discard_ordered_segments_count++; 798 799 DRD_(vc_init)(&thread_vc_min, 0, 0); 800 DRD_(thread_compute_minimum_vc)(&thread_vc_min); 801 if (DRD_(sg_get_trace)()) 802 { 803 char *vc_min, *vc_max; 804 VectorClock thread_vc_max; 805 806 DRD_(vc_init)(&thread_vc_max, 0, 0); 807 DRD_(thread_compute_maximum_vc)(&thread_vc_max); 808 vc_min = DRD_(vc_aprint)(&thread_vc_min); 809 vc_max = DRD_(vc_aprint)(&thread_vc_max); 810 VG_(message)(Vg_DebugMsg, 811 "Discarding ordered segments -- min vc is %s, max vc is %s\n", 812 vc_min, vc_max); 813 VG_(free)(vc_min); 814 VG_(free)(vc_max); 815 DRD_(vc_cleanup)(&thread_vc_max); 816 } 817 818 for (i = 0; i < DRD_N_THREADS; i++) 819 { 820 Segment* sg; 821 Segment* sg_next; 822 for (sg = DRD_(g_threadinfo)[i].first; 823 sg && (sg_next = sg->next) && DRD_(vc_lte)(&sg->vc, &thread_vc_min); 824 sg = sg_next) 825 { 826 thread_discard_segment(i, sg); 827 } 828 } 829 DRD_(vc_cleanup)(&thread_vc_min); 830} 831 832/** 833 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper 834 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if 835 * all segments in the set CS are ordered consistently against both sg1 and 836 * sg2. The set CS is defined as the set of segments that can immediately 837 * precede future segments via inter-thread synchronization operations. In 838 * DRD the set CS consists of the latest segment of each thread combined with 839 * all segments for which the reference count is strictly greater than one. 840 * The code below is an optimized version of the following: 841 * 842 * for (i = 0; i < DRD_N_THREADS; i++) 843 * { 844 * Segment* sg; 845 * 846 * for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next) 847 * { 848 * if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1) 849 * { 850 * if ( DRD_(vc_lte)(&sg1->vc, &sg->vc) 851 * != DRD_(vc_lte)(&sg2->vc, &sg->vc) 852 * || DRD_(vc_lte)(&sg->vc, &sg1->vc) 853 * != DRD_(vc_lte)(&sg->vc, &sg2->vc)) 854 * { 855 * return False; 856 * } 857 * } 858 * } 859 * } 860 */ 861static Bool thread_consistent_segment_ordering(const DrdThreadId tid, 862 Segment* const sg1, 863 Segment* const sg2) 864{ 865 unsigned i; 866 867 tl_assert(sg1->next); 868 tl_assert(sg2->next); 869 tl_assert(sg1->next == sg2); 870 tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc)); 871 872 for (i = 0; i < DRD_N_THREADS; i++) 873 { 874 Segment* sg; 875 876 for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next) 877 { 878 if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1) 879 { 880 if (DRD_(vc_lte)(&sg2->vc, &sg->vc)) 881 break; 882 if (DRD_(vc_lte)(&sg1->vc, &sg->vc)) 883 return False; 884 } 885 } 886 for (sg = DRD_(g_threadinfo)[i].last; sg; sg = sg->prev) 887 { 888 if (! sg->next || DRD_(sg_get_refcnt)(sg) > 1) 889 { 890 if (DRD_(vc_lte)(&sg->vc, &sg1->vc)) 891 break; 892 if (DRD_(vc_lte)(&sg->vc, &sg2->vc)) 893 return False; 894 } 895 } 896 } 897 return True; 898} 899 900/** 901 * Merge all segments that may be merged without triggering false positives 902 * or discarding real data races. For the theoretical background of segment 903 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse 904 * and Koen De Bosschere. Bounding the number of segment histories during 905 * data race detection. Parallel Computing archive, Volume 28, Issue 9, 906 * pp 1221-1238, September 2002. This paper contains a proof that merging 907 * consecutive segments for which the property equiv(s1,s2) holds can be 908 * merged without reducing the accuracy of datarace detection. Furthermore 909 * it is also proven that the total number of all segments will never grow 910 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged 911 * every time a new segment is created. The property equiv(s1, s2) is defined 912 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector 913 * clocks of segments s and s1 are ordered in the same way as those of segments 914 * s and s2. The set CS is defined as the set of existing segments s that have 915 * the potential to conflict with not yet created segments, either because the 916 * segment s is the latest segment of a thread or because it can become the 917 * immediate predecessor of a new segment due to a synchronization operation. 918 */ 919static void thread_merge_segments(void) 920{ 921 unsigned i; 922 923 s_new_segments_since_last_merge = 0; 924 925 for (i = 0; i < DRD_N_THREADS; i++) 926 { 927 Segment* sg; 928 929#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 930 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i])); 931#endif 932 933 for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next) 934 { 935 if (DRD_(sg_get_refcnt)(sg) == 1 936 && sg->next 937 && DRD_(sg_get_refcnt)(sg->next) == 1 938 && sg->next->next 939 && thread_consistent_segment_ordering(i, sg, sg->next)) 940 { 941 /* Merge sg and sg->next into sg. */ 942 DRD_(sg_merge)(sg, sg->next); 943 thread_discard_segment(i, sg->next); 944 } 945 } 946 947#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 948 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i])); 949#endif 950 } 951} 952 953/** 954 * Create a new segment for the specified thread, and discard any segments 955 * that cannot cause races anymore. 956 */ 957void DRD_(thread_new_segment)(const DrdThreadId tid) 958{ 959 Segment* last_sg; 960 Segment* new_sg; 961 962 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 963 && tid != DRD_INVALID_THREADID); 964 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid))); 965 966 last_sg = DRD_(g_threadinfo)[tid].last; 967 new_sg = DRD_(sg_new)(tid, tid); 968 thread_append_segment(tid, new_sg); 969 if (tid == DRD_(g_drd_running_tid) && last_sg) 970 { 971 DRD_(thread_update_conflict_set)(tid, &last_sg->vc); 972 s_update_conflict_set_new_sg_count++; 973 } 974 975 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid))); 976 977 if (s_segment_merging 978 && ++s_new_segments_since_last_merge >= s_segment_merge_interval) 979 { 980 thread_discard_ordered_segments(); 981 thread_merge_segments(); 982 } 983} 984 985/** Call this function after thread 'joiner' joined thread 'joinee'. */ 986void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee) 987{ 988 tl_assert(joiner != joinee); 989 tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS 990 && joiner != DRD_INVALID_THREADID); 991 tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS 992 && joinee != DRD_INVALID_THREADID); 993 tl_assert(DRD_(g_threadinfo)[joiner].last); 994 tl_assert(DRD_(g_threadinfo)[joinee].last); 995 996 if (DRD_(sg_get_trace)()) 997 { 998 char *str1, *str2; 999 str1 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc); 1000 str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joinee].last->vc); 1001 VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n", 1002 str1, str2); 1003 VG_(free)(str1); 1004 VG_(free)(str2); 1005 } 1006 if (joiner == DRD_(g_drd_running_tid)) 1007 { 1008 VectorClock old_vc; 1009 1010 DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[joiner].last->vc); 1011 DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc, 1012 &DRD_(g_threadinfo)[joinee].last->vc); 1013 DRD_(thread_update_conflict_set)(joiner, &old_vc); 1014 s_update_conflict_set_join_count++; 1015 DRD_(vc_cleanup)(&old_vc); 1016 } 1017 else 1018 { 1019 DRD_(vc_combine)(&DRD_(g_threadinfo)[joiner].last->vc, 1020 &DRD_(g_threadinfo)[joinee].last->vc); 1021 } 1022 1023 thread_discard_ordered_segments(); 1024 1025 if (DRD_(sg_get_trace)()) 1026 { 1027 char* str; 1028 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[joiner].last->vc); 1029 VG_(message)(Vg_DebugMsg, "After join: %s\n", str); 1030 VG_(free)(str); 1031 } 1032} 1033 1034/** 1035 * Update the vector clock of the last segment of thread tid with the 1036 * the vector clock of segment sg. 1037 */ 1038static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg) 1039{ 1040 const VectorClock* const vc = &sg->vc; 1041 1042 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1043 && tid != DRD_INVALID_THREADID); 1044 tl_assert(DRD_(g_threadinfo)[tid].last); 1045 tl_assert(sg); 1046 tl_assert(vc); 1047 1048 if (tid != sg->tid) 1049 { 1050 VectorClock old_vc; 1051 1052 DRD_(vc_copy)(&old_vc, &DRD_(g_threadinfo)[tid].last->vc); 1053 DRD_(vc_combine)(&DRD_(g_threadinfo)[tid].last->vc, vc); 1054 if (DRD_(sg_get_trace)()) 1055 { 1056 char *str1, *str2; 1057 str1 = DRD_(vc_aprint)(&old_vc); 1058 str2 = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc); 1059 VG_(message)(Vg_DebugMsg, "thread %d: vc %s -> %s\n", tid, str1, str2); 1060 VG_(free)(str1); 1061 VG_(free)(str2); 1062 } 1063 1064 thread_discard_ordered_segments(); 1065 1066 DRD_(thread_update_conflict_set)(tid, &old_vc); 1067 s_update_conflict_set_sync_count++; 1068 1069 DRD_(vc_cleanup)(&old_vc); 1070 } 1071 else 1072 { 1073 tl_assert(DRD_(vc_lte)(vc, &DRD_(g_threadinfo)[tid].last->vc)); 1074 } 1075} 1076 1077/** 1078 * Create a new segment for thread tid and update the vector clock of the last 1079 * segment of this thread with the the vector clock of segment sg. Call this 1080 * function after thread tid had to wait because of thread synchronization 1081 * until the memory accesses in the segment sg finished. 1082 */ 1083void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg) 1084{ 1085 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1086 && tid != DRD_INVALID_THREADID); 1087 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid))); 1088 tl_assert(sg); 1089 1090 thread_append_segment(tid, DRD_(sg_new)(tid, tid)); 1091 1092 thread_combine_vc_sync(tid, sg); 1093 1094 if (s_segment_merging 1095 && ++s_new_segments_since_last_merge >= s_segment_merge_interval) 1096 { 1097 thread_discard_ordered_segments(); 1098 thread_merge_segments(); 1099 } 1100} 1101 1102/** 1103 * Call this function whenever a thread is no longer using the memory 1104 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer 1105 * increase. 1106 */ 1107void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2) 1108{ 1109 DrdThreadId other_user; 1110 unsigned i; 1111 1112 /* For all threads, mark the range [ a1, a2 [ as no longer in use. */ 1113 other_user = DRD_INVALID_THREADID; 1114 for (i = 0; i < DRD_N_THREADS; i++) 1115 { 1116 Segment* p; 1117 for (p = DRD_(g_threadinfo)[i].first; p; p = p->next) 1118 { 1119 if (other_user == DRD_INVALID_THREADID 1120 && i != DRD_(g_drd_running_tid)) 1121 { 1122 if (UNLIKELY(DRD_(bm_test_and_clear)(DRD_(sg_bm)(p), a1, a2))) 1123 { 1124 other_user = i; 1125 } 1126 continue; 1127 } 1128 DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2); 1129 } 1130 } 1131 1132 /* 1133 * If any other thread had accessed memory in [ a1, a2 [, update the 1134 * conflict set. 1135 */ 1136 if (other_user != DRD_INVALID_THREADID 1137 && DRD_(bm_has_any_access)(DRD_(g_conflict_set), a1, a2)) 1138 { 1139 thread_compute_conflict_set(&DRD_(g_conflict_set), 1140 DRD_(thread_get_running_tid)()); 1141 } 1142} 1143 1144/** Specify whether memory loads should be recorded. */ 1145void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled) 1146{ 1147 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1148 && tid != DRD_INVALID_THREADID); 1149 tl_assert(enabled == !! enabled); 1150 1151 DRD_(g_threadinfo)[tid].is_recording_loads = enabled; 1152} 1153 1154/** Specify whether memory stores should be recorded. */ 1155void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled) 1156{ 1157 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1158 && tid != DRD_INVALID_THREADID); 1159 tl_assert(enabled == !! enabled); 1160 1161 DRD_(g_threadinfo)[tid].is_recording_stores = enabled; 1162} 1163 1164/** 1165 * Print the segment information for all threads. 1166 * 1167 * This function is only used for debugging purposes. 1168 */ 1169void DRD_(thread_print_all)(void) 1170{ 1171 unsigned i; 1172 Segment* p; 1173 1174 for (i = 0; i < DRD_N_THREADS; i++) 1175 { 1176 if (DRD_(g_threadinfo)[i].first) 1177 { 1178 VG_(printf)("**************\n" 1179 "* thread %3d (%d/%d/%d/0x%lx/%d) *\n" 1180 "**************\n", 1181 i, 1182 DRD_(g_threadinfo)[i].vg_thread_exists, 1183 DRD_(g_threadinfo)[i].vg_threadid, 1184 DRD_(g_threadinfo)[i].posix_thread_exists, 1185 DRD_(g_threadinfo)[i].pt_threadid, 1186 DRD_(g_threadinfo)[i].detached_posix_thread); 1187 for (p = DRD_(g_threadinfo)[i].first; p; p = p->next) 1188 { 1189 DRD_(sg_print)(p); 1190 } 1191 } 1192 } 1193} 1194 1195/** Show a call stack involved in a data race. */ 1196static void show_call_stack(const DrdThreadId tid, 1197 const Char* const msg, 1198 ExeContext* const callstack) 1199{ 1200 const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid); 1201 1202 VG_(message)(Vg_UserMsg, "%s (thread %d)\n", msg, tid); 1203 1204 if (vg_tid != VG_INVALID_THREADID) 1205 { 1206 if (callstack) 1207 { 1208 VG_(pp_ExeContext)(callstack); 1209 } 1210 else 1211 { 1212 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size)); 1213 } 1214 } 1215 else 1216 { 1217 VG_(message)(Vg_UserMsg, 1218 " (thread finished, call stack no longer available)\n"); 1219 } 1220} 1221 1222/** Print information about the segments involved in a data race. */ 1223static void 1224thread_report_conflicting_segments_segment(const DrdThreadId tid, 1225 const Addr addr, 1226 const SizeT size, 1227 const BmAccessTypeT access_type, 1228 const Segment* const p) 1229{ 1230 unsigned i; 1231 1232 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1233 && tid != DRD_INVALID_THREADID); 1234 tl_assert(p); 1235 1236 for (i = 0; i < DRD_N_THREADS; i++) 1237 { 1238 if (i != tid) 1239 { 1240 Segment* q; 1241 for (q = DRD_(g_threadinfo)[i].last; q; q = q->prev) 1242 { 1243 /* 1244 * Since q iterates over the segments of thread i in order of 1245 * decreasing vector clocks, if q->vc <= p->vc, then 1246 * q->next->vc <= p->vc will also hold. Hence, break out of the 1247 * loop once this condition is met. 1248 */ 1249 if (DRD_(vc_lte)(&q->vc, &p->vc)) 1250 break; 1251 if (! DRD_(vc_lte)(&p->vc, &q->vc)) 1252 { 1253 if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size, 1254 access_type)) 1255 { 1256 tl_assert(q->stacktrace); 1257 show_call_stack(i, "Other segment start", 1258 q->stacktrace); 1259 show_call_stack(i, "Other segment end", 1260 q->next ? q->next->stacktrace : 0); 1261 } 1262 } 1263 } 1264 } 1265 } 1266} 1267 1268/** Print information about all segments involved in a data race. */ 1269void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid, 1270 const Addr addr, 1271 const SizeT size, 1272 const BmAccessTypeT access_type) 1273{ 1274 Segment* p; 1275 1276 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1277 && tid != DRD_INVALID_THREADID); 1278 1279 for (p = DRD_(g_threadinfo)[tid].first; p; p = p->next) 1280 { 1281 if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type)) 1282 { 1283 thread_report_conflicting_segments_segment(tid, addr, size, 1284 access_type, p); 1285 } 1286 } 1287} 1288 1289/** 1290 * Verify whether the conflict set for thread tid is up to date. Only perform 1291 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set. 1292 */ 1293static Bool thread_conflict_set_up_to_date(const DrdThreadId tid) 1294{ 1295 static int do_verify_conflict_set = -1; 1296 Bool result; 1297 struct bitmap* computed_conflict_set = 0; 1298 1299 if (do_verify_conflict_set < 0) 1300 do_verify_conflict_set = VG_(getenv)("DRD_VERIFY_CONFLICT_SET") != 0; 1301 1302 if (do_verify_conflict_set == 0) 1303 return True; 1304 1305 thread_compute_conflict_set(&computed_conflict_set, tid); 1306 result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set); 1307 if (! result) 1308 { 1309 VG_(printf)("actual conflict set:\n"); 1310 DRD_(bm_print)(DRD_(g_conflict_set)); 1311 VG_(printf)("\n"); 1312 VG_(printf)("computed conflict set:\n"); 1313 DRD_(bm_print)(computed_conflict_set); 1314 VG_(printf)("\n"); 1315 } 1316 DRD_(bm_delete)(computed_conflict_set); 1317 return result; 1318} 1319 1320/** 1321 * Compute the conflict set: a bitmap that represents the union of all memory 1322 * accesses of all segments that are unordered to the current segment of the 1323 * thread tid. 1324 */ 1325static void thread_compute_conflict_set(struct bitmap** conflict_set, 1326 const DrdThreadId tid) 1327{ 1328 Segment* p; 1329 1330 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1331 && tid != DRD_INVALID_THREADID); 1332 tl_assert(tid == DRD_(g_drd_running_tid)); 1333 1334 s_compute_conflict_set_count++; 1335 s_conflict_set_bitmap_creation_count 1336 -= DRD_(bm_get_bitmap_creation_count)(); 1337 s_conflict_set_bitmap2_creation_count 1338 -= DRD_(bm_get_bitmap2_creation_count)(); 1339 1340 if (*conflict_set) 1341 { 1342 DRD_(bm_cleanup)(*conflict_set); 1343 DRD_(bm_init)(*conflict_set); 1344 } 1345 else 1346 { 1347 *conflict_set = DRD_(bm_new)(); 1348 } 1349 1350 if (s_trace_conflict_set) 1351 { 1352 char* str; 1353 1354 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc); 1355 VG_(message)(Vg_DebugMsg, 1356 "computing conflict set for thread %d with vc %s\n", 1357 tid, str); 1358 VG_(free)(str); 1359 } 1360 1361 p = DRD_(g_threadinfo)[tid].last; 1362 { 1363 unsigned j; 1364 1365 if (s_trace_conflict_set) 1366 { 1367 char* vc; 1368 1369 vc = DRD_(vc_aprint)(&p->vc); 1370 VG_(message)(Vg_DebugMsg, "conflict set: thread [%d] at vc %s\n", 1371 tid, vc); 1372 VG_(free)(vc); 1373 } 1374 1375 for (j = 0; j < DRD_N_THREADS; j++) 1376 { 1377 if (j != tid && DRD_(IsValidDrdThreadId)(j)) 1378 { 1379 Segment* q; 1380 for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev) 1381 { 1382 if (! DRD_(vc_lte)(&q->vc, &p->vc) 1383 && ! DRD_(vc_lte)(&p->vc, &q->vc)) 1384 { 1385 if (s_trace_conflict_set) 1386 { 1387 char* str; 1388 1389 str = DRD_(vc_aprint)(&q->vc); 1390 VG_(message)(Vg_DebugMsg, 1391 "conflict set: [%d] merging segment %s\n", 1392 j, str); 1393 VG_(free)(str); 1394 } 1395 DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q)); 1396 } 1397 else 1398 { 1399 if (s_trace_conflict_set) 1400 { 1401 char* str; 1402 1403 str = DRD_(vc_aprint)(&q->vc); 1404 VG_(message)(Vg_DebugMsg, 1405 "conflict set: [%d] ignoring segment %s\n", 1406 j, str); 1407 VG_(free)(str); 1408 } 1409 } 1410 } 1411 } 1412 } 1413 } 1414 1415 s_conflict_set_bitmap_creation_count 1416 += DRD_(bm_get_bitmap_creation_count)(); 1417 s_conflict_set_bitmap2_creation_count 1418 += DRD_(bm_get_bitmap2_creation_count)(); 1419 1420 if (s_trace_conflict_set_bm) 1421 { 1422 VG_(message)(Vg_DebugMsg, "[%d] new conflict set:\n", tid); 1423 DRD_(bm_print)(*conflict_set); 1424 VG_(message)(Vg_DebugMsg, "[%d] end of new conflict set.\n", tid); 1425 } 1426} 1427 1428/** 1429 * Update the conflict set after the vector clock of thread tid has been 1430 * updated from old_vc to its current value, either because a new segment has 1431 * been created or because of a synchronization operation. 1432 */ 1433void DRD_(thread_update_conflict_set)(const DrdThreadId tid, 1434 const VectorClock* const old_vc) 1435{ 1436 const VectorClock* new_vc; 1437 Segment* p; 1438 unsigned j; 1439 1440 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS 1441 && tid != DRD_INVALID_THREADID); 1442 tl_assert(old_vc); 1443 tl_assert(tid == DRD_(g_drd_running_tid)); 1444 tl_assert(DRD_(g_conflict_set)); 1445 1446 if (s_trace_conflict_set) 1447 { 1448 char* str; 1449 1450 str = DRD_(vc_aprint)(&DRD_(g_threadinfo)[tid].last->vc); 1451 VG_(message)(Vg_DebugMsg, 1452 "updating conflict set for thread %d with vc %s\n", 1453 tid, str); 1454 VG_(free)(str); 1455 } 1456 1457 new_vc = &DRD_(g_threadinfo)[tid].last->vc; 1458 1459 DRD_(bm_unmark)(DRD_(g_conflict_set)); 1460 1461 for (j = 0; j < DRD_N_THREADS; j++) 1462 { 1463 Segment* q; 1464 1465 if (j == tid || ! DRD_(IsValidDrdThreadId)(j)) 1466 continue; 1467 1468 for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev) 1469 { 1470 const int included_in_old_conflict_set 1471 = ! DRD_(vc_lte)(&q->vc, old_vc) 1472 && ! DRD_(vc_lte)(old_vc, &q->vc); 1473 const int included_in_new_conflict_set 1474 = ! DRD_(vc_lte)(&q->vc, new_vc) 1475 && ! DRD_(vc_lte)(new_vc, &q->vc); 1476 if (included_in_old_conflict_set != included_in_new_conflict_set) 1477 { 1478 if (s_trace_conflict_set) 1479 { 1480 char* str; 1481 1482 str = DRD_(vc_aprint)(&q->vc); 1483 VG_(message)(Vg_DebugMsg, 1484 "conflict set: [%d] merging segment %s\n", j, str); 1485 VG_(free)(str); 1486 } 1487 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q)); 1488 } 1489 else 1490 { 1491 if (s_trace_conflict_set) 1492 { 1493 char* str; 1494 1495 str = DRD_(vc_aprint)(&q->vc); 1496 VG_(message)(Vg_DebugMsg, 1497 "conflict set: [%d] ignoring segment %s\n", j, str); 1498 VG_(free)(str); 1499 } 1500 } 1501 } 1502 } 1503 1504 DRD_(bm_clear_marked)(DRD_(g_conflict_set)); 1505 1506 p = DRD_(g_threadinfo)[tid].last; 1507 { 1508 for (j = 0; j < DRD_N_THREADS; j++) 1509 { 1510 if (j != tid && DRD_(IsValidDrdThreadId)(j)) 1511 { 1512 Segment* q; 1513 for (q = DRD_(g_threadinfo)[j].last; q; q = q->prev) 1514 { 1515 if (! DRD_(vc_lte)(&q->vc, &p->vc) 1516 && ! DRD_(vc_lte)(&p->vc, &q->vc)) 1517 { 1518 DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q)); 1519 } 1520 } 1521 } 1522 } 1523 } 1524 1525 DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set)); 1526 1527 s_update_conflict_set_count++; 1528 1529 if (s_trace_conflict_set_bm) 1530 { 1531 VG_(message)(Vg_DebugMsg, "[%d] updated conflict set:\n", tid); 1532 DRD_(bm_print)(DRD_(g_conflict_set)); 1533 VG_(message)(Vg_DebugMsg, "[%d] end of updated conflict set.\n", tid); 1534 } 1535 1536 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid))); 1537} 1538 1539/** Report the number of context switches performed. */ 1540ULong DRD_(thread_get_context_switch_count)(void) 1541{ 1542 return s_context_switch_count; 1543} 1544 1545/** Report the number of ordered segments that have been discarded. */ 1546ULong DRD_(thread_get_discard_ordered_segments_count)(void) 1547{ 1548 return s_discard_ordered_segments_count; 1549} 1550 1551/** Return how many times the conflict set has been updated entirely. */ 1552ULong DRD_(thread_get_compute_conflict_set_count)() 1553{ 1554 return s_compute_conflict_set_count; 1555} 1556 1557/** Return how many times the conflict set has been updated partially. */ 1558ULong DRD_(thread_get_update_conflict_set_count)(void) 1559{ 1560 return s_update_conflict_set_count; 1561} 1562 1563/** 1564 * Return how many times the conflict set has been updated partially 1565 * because a new segment has been created. 1566 */ 1567ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void) 1568{ 1569 return s_update_conflict_set_new_sg_count; 1570} 1571 1572/** 1573 * Return how many times the conflict set has been updated partially 1574 * because of combining vector clocks due to synchronization operations 1575 * other than reader/writer lock or barrier operations. 1576 */ 1577ULong DRD_(thread_get_update_conflict_set_sync_count)(void) 1578{ 1579 return s_update_conflict_set_sync_count; 1580} 1581 1582/** 1583 * Return how many times the conflict set has been updated partially 1584 * because of thread joins. 1585 */ 1586ULong DRD_(thread_get_update_conflict_set_join_count)(void) 1587{ 1588 return s_update_conflict_set_join_count; 1589} 1590 1591/** 1592 * Return the number of first-level bitmaps that have been created during 1593 * conflict set updates. 1594 */ 1595ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void) 1596{ 1597 return s_conflict_set_bitmap_creation_count; 1598} 1599 1600/** 1601 * Return the number of second-level bitmaps that have been created during 1602 * conflict set updates. 1603 */ 1604ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void) 1605{ 1606 return s_conflict_set_bitmap2_creation_count; 1607} 1608