tsan_unittest.cpp revision e291d9107266e128440f5b73294e5b2885722715
1/* 2 This file is part of Valgrind, a dynamic binary instrumentation 3 framework. 4 5 Copyright (C) 2008-2008 Google Inc 6 opensource@google.com 7 8 This program is free software; you can redistribute it and/or 9 modify it under the terms of the GNU General Public License as 10 published by the Free Software Foundation; either version 2 of the 11 License, or (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, but 14 WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program; if not, write to the Free Software 20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 21 02111-1307, USA. 22 23 The GNU General Public License is contained in the file COPYING. 24*/ 25 26// Author: Konstantin Serebryany <opensource@google.com> 27// 28// This file contains a set of unit tests for a data race detection tool. 29// 30// 31// 32// This test can be compiled with pthreads (default) or 33// with any other library that supports threads, locks, cond vars, etc. 34// 35// To compile with pthreads: 36// g++ racecheck_unittest.cc dynamic_annotations.cc 37// -lpthread -g -DDYNAMIC_ANNOTATIONS=1 38// 39// To compile with different library: 40// 1. cp thread_wrappers_pthread.h thread_wrappers_yourlib.h 41// 2. edit thread_wrappers_yourlib.h 42// 3. add '-DTHREAD_WRAPPERS="thread_wrappers_yourlib.h"' to your compilation. 43// 44// 45 46// This test must not include any other file specific to threading library, 47// everything should be inside THREAD_WRAPPERS. 48#ifndef THREAD_WRAPPERS 49# define THREAD_WRAPPERS "thread_wrappers_pthread.h" 50#endif 51#include THREAD_WRAPPERS 52 53#ifndef NEEDS_SEPERATE_RW_LOCK 54#define RWLock Mutex // Mutex does work as an rw-lock. 55#define WriterLockScoped MutexLock 56#define ReaderLockScoped ReaderMutexLock 57#endif // !NEEDS_SEPERATE_RW_LOCK 58 59 60// Helgrind memory usage testing stuff 61// If not present in dynamic_annotations.h/.cc - ignore 62#ifndef ANNOTATE_RESET_STATS 63#define ANNOTATE_RESET_STATS() do { } while(0) 64#endif 65#ifndef ANNOTATE_PRINT_STATS 66#define ANNOTATE_PRINT_STATS() do { } while(0) 67#endif 68#ifndef ANNOTATE_PRINT_MEMORY_USAGE 69#define ANNOTATE_PRINT_MEMORY_USAGE(a) do { } while(0) 70#endif 71// 72 73// A function that allows to suppress gcc's warnings about 74// unused return values in a portable way. 75template <typename T> 76static inline void IGNORE_RETURN_VALUE(T v) 77{ } 78 79#include <vector> 80#include <string> 81#include <map> 82#include <queue> 83#include <algorithm> 84#include <cstring> // strlen(), index(), rindex() 85#include <ctime> 86#include <sys/time.h> 87#include <sys/types.h> 88#include <sys/stat.h> 89#include <fcntl.h> 90#include <sys/mman.h> // mmap 91#include <errno.h> 92#include <stdint.h> // uintptr_t 93#include <stdlib.h> 94#include <dirent.h> 95 96#ifndef __APPLE__ 97#include <malloc.h> 98#endif 99 100// The tests are 101// - Stability tests (marked STAB) 102// - Performance tests (marked PERF) 103// - Feature tests 104// - TN (true negative) : no race exists and the tool is silent. 105// - TP (true positive) : a race exists and reported. 106// - FN (false negative): a race exists but not reported. 107// - FP (false positive): no race exists but the tool reports it. 108// 109// The feature tests are marked according to the behavior of helgrind 3.3.0. 110// 111// TP and FP tests are annotated with ANNOTATE_EXPECT_RACE, 112// so, no error reports should be seen when running under helgrind. 113// 114// When some of the FP cases are fixed in helgrind we'll need 115// to update this test. 116// 117// Each test resides in its own namespace. 118// Namespaces are named test01, test02, ... 119// Please, *DO NOT* change the logic of existing tests nor rename them. 120// Create a new test instead. 121// 122// Some tests use sleep()/usleep(). 123// This is not a synchronization, but a simple way to trigger 124// some specific behaviour of the race detector's scheduler. 125 126// Globals and utilities used by several tests. {{{1 127CondVar CV; 128int COND = 0; 129 130 131typedef void (*void_func_void_t)(void); 132enum TEST_FLAG { 133 FEATURE = 1 << 0, 134 STABILITY = 1 << 1, 135 PERFORMANCE = 1 << 2, 136 EXCLUDE_FROM_ALL = 1 << 3, 137 NEEDS_ANNOTATIONS = 1 << 4, 138 RACE_DEMO = 1 << 5, 139 MEMORY_USAGE = 1 << 6, 140 PRINT_STATS = 1 << 7 141}; 142 143// Put everything into stderr. 144Mutex printf_mu; 145#define printf(args...) \ 146 do{ \ 147 printf_mu.Lock();\ 148 fprintf(stderr, args);\ 149 printf_mu.Unlock(); \ 150 }while(0) 151 152long GetTimeInMs() { 153 struct timeval tv; 154 gettimeofday(&tv, NULL); 155 return (tv.tv_sec * 1000L) + (tv.tv_usec / 1000L); 156} 157 158struct Test{ 159 void_func_void_t f_; 160 int flags_; 161 Test(void_func_void_t f, int flags) 162 : f_(f) 163 , flags_(flags) 164 {} 165 Test() : f_(0), flags_(0) {} 166 void Run() { 167 ANNOTATE_RESET_STATS(); 168 if (flags_ & PERFORMANCE) { 169 long start = GetTimeInMs(); 170 f_(); 171 long end = GetTimeInMs(); 172 printf ("Time: %4ldms\n", end-start); 173 } else 174 f_(); 175 if (flags_ & PRINT_STATS) 176 ANNOTATE_PRINT_STATS(); 177 if (flags_ & MEMORY_USAGE) 178 ANNOTATE_PRINT_MEMORY_USAGE(0); 179 } 180}; 181std::map<int, Test> TheMapOfTests; 182 183#define NOINLINE __attribute__ ((noinline)) 184extern "C" void NOINLINE AnnotateSetVerbosity(const char *, int, int) {}; 185 186 187struct TestAdder { 188 TestAdder(void_func_void_t f, int id, int flags = FEATURE) { 189 // AnnotateSetVerbosity(__FILE__, __LINE__, 0); 190 CHECK(TheMapOfTests.count(id) == 0); 191 TheMapOfTests[id] = Test(f, flags); 192 } 193}; 194 195#define REGISTER_TEST(f, id) TestAdder add_test_##id (f, id); 196#define REGISTER_TEST2(f, id, flags) TestAdder add_test_##id (f, id, flags); 197 198static bool ArgIsOne(int *arg) { return *arg == 1; }; 199static bool ArgIsZero(int *arg) { return *arg == 0; }; 200static bool ArgIsTrue(bool *arg) { return *arg == true; }; 201 202// Call ANNOTATE_EXPECT_RACE only if 'machine' env variable is defined. 203// Useful to test against several different machines. 204// Supported machines so far: 205// MSM_HYBRID1 -- aka MSMProp1 206// MSM_HYBRID1_INIT_STATE -- aka MSMProp1 with --initialization-state=yes 207// MSM_THREAD_SANITIZER -- ThreadSanitizer's state machine 208#define ANNOTATE_EXPECT_RACE_FOR_MACHINE(mem, descr, machine) \ 209 while(getenv(machine)) {\ 210 ANNOTATE_EXPECT_RACE(mem, descr); \ 211 break;\ 212 }\ 213 214#define ANNOTATE_EXPECT_RACE_FOR_TSAN(mem, descr) \ 215 ANNOTATE_EXPECT_RACE_FOR_MACHINE(mem, descr, "MSM_THREAD_SANITIZER") 216 217inline bool Tsan_PureHappensBefore() { 218 return true; 219} 220 221inline bool Tsan_FastMode() { 222 return getenv("TSAN_FAST_MODE") != NULL; 223} 224 225// Initialize *(mem) to 0 if Tsan_FastMode. 226#define FAST_MODE_INIT(mem) do { if (Tsan_FastMode()) { *(mem) = 0; } } while(0) 227 228#ifndef MAIN_INIT_ACTION 229#define MAIN_INIT_ACTION 230#endif 231 232 233 234int main(int argc, char** argv) { // {{{1 235 MAIN_INIT_ACTION; 236 printf("FLAGS [phb=%i, fm=%i]\n", Tsan_PureHappensBefore(), Tsan_FastMode()); 237 if (argc == 2 && !strcmp(argv[1], "benchmark")) { 238 for (std::map<int,Test>::iterator it = TheMapOfTests.begin(); 239 it != TheMapOfTests.end(); ++it) { 240 if(!(it->second.flags_ & PERFORMANCE)) continue; 241 it->second.Run(); 242 } 243 } else if (argc == 2 && !strcmp(argv[1], "demo")) { 244 for (std::map<int,Test>::iterator it = TheMapOfTests.begin(); 245 it != TheMapOfTests.end(); ++it) { 246 if(!(it->second.flags_ & RACE_DEMO)) continue; 247 it->second.Run(); 248 } 249 } else if (argc > 1) { 250 // the tests are listed in command line flags 251 for (int i = 1; i < argc; i++) { 252 int f_num = atoi(argv[i]); 253 CHECK(TheMapOfTests.count(f_num)); 254 TheMapOfTests[f_num].Run(); 255 } 256 } else { 257 bool run_tests_with_annotations = false; 258 if (getenv("DRT_ALLOW_ANNOTATIONS")) { 259 run_tests_with_annotations = true; 260 } 261 for (std::map<int,Test>::iterator it = TheMapOfTests.begin(); 262 it != TheMapOfTests.end(); 263 ++it) { 264 if(it->second.flags_ & EXCLUDE_FROM_ALL) continue; 265 if(it->second.flags_ & RACE_DEMO) continue; 266 if((it->second.flags_ & NEEDS_ANNOTATIONS) 267 && run_tests_with_annotations == false) continue; 268 it->second.Run(); 269 } 270 } 271} 272 273#ifdef THREAD_WRAPPERS_PTHREAD_H 274#endif 275 276 277// An array of threads. Create/start/join all elements at once. {{{1 278class MyThreadArray { 279 public: 280 static const int kSize = 5; 281 typedef void (*F) (void); 282 MyThreadArray(F f1, F f2 = NULL, F f3 = NULL, F f4 = NULL, F f5 = NULL) { 283 ar_[0] = new MyThread(f1); 284 ar_[1] = f2 ? new MyThread(f2) : NULL; 285 ar_[2] = f3 ? new MyThread(f3) : NULL; 286 ar_[3] = f4 ? new MyThread(f4) : NULL; 287 ar_[4] = f5 ? new MyThread(f5) : NULL; 288 } 289 void Start() { 290 for(int i = 0; i < kSize; i++) { 291 if(ar_[i]) { 292 ar_[i]->Start(); 293 usleep(10); 294 } 295 } 296 } 297 298 void Join() { 299 for(int i = 0; i < kSize; i++) { 300 if(ar_[i]) { 301 ar_[i]->Join(); 302 } 303 } 304 } 305 306 ~MyThreadArray() { 307 for(int i = 0; i < kSize; i++) { 308 delete ar_[i]; 309 } 310 } 311 private: 312 MyThread *ar_[kSize]; 313}; 314 315 316 317// test00: {{{1 318namespace test00 { 319int GLOB = 0; 320void Run() { 321 printf("test00: negative\n"); 322 printf("\tGLOB=%d\n", GLOB); 323} 324REGISTER_TEST(Run, 00) 325} // namespace test00 326 327 328// test01: TP. Simple race (write vs write). {{{1 329namespace test01 { 330int GLOB = 0; 331void Worker() { 332 GLOB = 1; 333} 334 335void Parent() { 336 MyThread t(Worker); 337 t.Start(); 338 GLOB = 2; 339 t.Join(); 340} 341void Run() { 342 FAST_MODE_INIT(&GLOB); 343 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test01. TP."); 344 ANNOTATE_TRACE_MEMORY(&GLOB); 345 printf("test01: positive\n"); 346 Parent(); 347 const int tmp = GLOB; 348 printf("\tGLOB=%d\n", tmp); 349} 350REGISTER_TEST(Run, 1); 351} // namespace test01 352 353 354// test02: TN. Synchronization via CondVar. {{{1 355namespace test02 { 356int GLOB = 0; 357// Two write accesses to GLOB are synchronized because 358// the pair of CV.Signal() and CV.Wait() establish happens-before relation. 359// 360// Waiter: Waker: 361// 1. COND = 0 362// 2. Start(Waker) 363// 3. MU.Lock() a. write(GLOB) 364// b. MU.Lock() 365// c. COND = 1 366// /--- d. CV.Signal() 367// 4. while(COND) / e. MU.Unlock() 368// CV.Wait(MU) <---/ 369// 5. MU.Unlock() 370// 6. write(GLOB) 371Mutex MU; 372 373void Waker() { 374 usleep(100000); // Make sure the waiter blocks. 375 GLOB = 1; 376 377 MU.Lock(); 378 COND = 1; 379 CV.Signal(); 380 MU.Unlock(); 381} 382 383void Waiter() { 384 ThreadPool pool(1); 385 pool.StartWorkers(); 386 COND = 0; 387 pool.Add(NewCallback(Waker)); 388 MU.Lock(); 389 while(COND != 1) 390 CV.Wait(&MU); 391 MU.Unlock(); 392 GLOB = 2; 393} 394void Run() { 395 printf("test02: negative\n"); 396 Waiter(); 397 printf("\tGLOB=%d\n", GLOB); 398} 399REGISTER_TEST(Run, 2); 400} // namespace test02 401 402 403// test03: TN. Synchronization via LockWhen, signaller gets there first. {{{1 404namespace test03 { 405int GLOB = 0; 406// Two write accesses to GLOB are synchronized via conditional critical section. 407// Note that LockWhen() happens first (we use sleep(1) to make sure)! 408// 409// Waiter: Waker: 410// 1. COND = 0 411// 2. Start(Waker) 412// a. write(GLOB) 413// b. MU.Lock() 414// c. COND = 1 415// /--- d. MU.Unlock() 416// 3. MU.LockWhen(COND==1) <---/ 417// 4. MU.Unlock() 418// 5. write(GLOB) 419Mutex MU; 420 421void Waker() { 422 usleep(100000); // Make sure the waiter blocks. 423 GLOB = 1; 424 425 MU.Lock(); 426 COND = 1; // We are done! Tell the Waiter. 427 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL; 428} 429void Waiter() { 430 ThreadPool pool(1); 431 pool.StartWorkers(); 432 COND = 0; 433 pool.Add(NewCallback(Waker)); 434 MU.LockWhen(Condition(&ArgIsOne, &COND)); // calls ANNOTATE_CONDVAR_WAIT 435 MU.Unlock(); // Waker is done! 436 437 GLOB = 2; 438} 439void Run() { 440 printf("test03: negative\n"); 441 Waiter(); 442 printf("\tGLOB=%d\n", GLOB); 443} 444REGISTER_TEST2(Run, 3, FEATURE|NEEDS_ANNOTATIONS); 445} // namespace test03 446 447// test04: TN. Synchronization via PCQ. {{{1 448namespace test04 { 449int GLOB = 0; 450ProducerConsumerQueue Q(INT_MAX); 451// Two write accesses to GLOB are separated by PCQ Put/Get. 452// 453// Putter: Getter: 454// 1. write(GLOB) 455// 2. Q.Put() ---------\ . 456// \-------> a. Q.Get() 457// b. write(GLOB) 458 459 460void Putter() { 461 GLOB = 1; 462 Q.Put(NULL); 463} 464 465void Getter() { 466 Q.Get(); 467 GLOB = 2; 468} 469 470void Run() { 471 printf("test04: negative\n"); 472 MyThreadArray t(Putter, Getter); 473 t.Start(); 474 t.Join(); 475 printf("\tGLOB=%d\n", GLOB); 476} 477REGISTER_TEST(Run, 4); 478} // namespace test04 479 480 481// test05: FP. Synchronization via CondVar, but waiter does not block. {{{1 482// Since CondVar::Wait() is not called, we get a false positive. 483namespace test05 { 484int GLOB = 0; 485// Two write accesses to GLOB are synchronized via CondVar. 486// But race detector can not see it. 487// See this for details: 488// http://www.valgrind.org/docs/manual/hg-manual.html#hg-manual.effective-use. 489// 490// Waiter: Waker: 491// 1. COND = 0 492// 2. Start(Waker) 493// 3. MU.Lock() a. write(GLOB) 494// b. MU.Lock() 495// c. COND = 1 496// d. CV.Signal() 497// 4. while(COND) e. MU.Unlock() 498// CV.Wait(MU) <<< not called 499// 5. MU.Unlock() 500// 6. write(GLOB) 501Mutex MU; 502 503void Waker() { 504 GLOB = 1; 505 MU.Lock(); 506 COND = 1; 507 CV.Signal(); 508 MU.Unlock(); 509} 510 511void Waiter() { 512 ThreadPool pool(1); 513 pool.StartWorkers(); 514 COND = 0; 515 pool.Add(NewCallback(Waker)); 516 usleep(100000); // Make sure the signaller gets first. 517 MU.Lock(); 518 while(COND != 1) 519 CV.Wait(&MU); 520 MU.Unlock(); 521 GLOB = 2; 522} 523void Run() { 524 FAST_MODE_INIT(&GLOB); 525 if (!Tsan_PureHappensBefore()) 526 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test05. FP. Unavoidable in hybrid scheme."); 527 printf("test05: unavoidable false positive\n"); 528 Waiter(); 529 printf("\tGLOB=%d\n", GLOB); 530} 531REGISTER_TEST(Run, 5); 532} // namespace test05 533 534 535// test06: TN. Synchronization via CondVar, but Waker gets there first. {{{1 536namespace test06 { 537int GLOB = 0; 538// Same as test05 but we annotated the Wait() loop. 539// 540// Waiter: Waker: 541// 1. COND = 0 542// 2. Start(Waker) 543// 3. MU.Lock() a. write(GLOB) 544// b. MU.Lock() 545// c. COND = 1 546// /------- d. CV.Signal() 547// 4. while(COND) / e. MU.Unlock() 548// CV.Wait(MU) <<< not called / 549// 6. ANNOTATE_CONDVAR_WAIT(CV, MU) <----/ 550// 5. MU.Unlock() 551// 6. write(GLOB) 552 553Mutex MU; 554 555void Waker() { 556 GLOB = 1; 557 MU.Lock(); 558 COND = 1; 559 CV.Signal(); 560 MU.Unlock(); 561} 562 563void Waiter() { 564 ThreadPool pool(1); 565 pool.StartWorkers(); 566 COND = 0; 567 pool.Add(NewCallback(Waker)); 568 usleep(100000); // Make sure the signaller gets first. 569 MU.Lock(); 570 while(COND != 1) 571 CV.Wait(&MU); 572 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU); 573 574 MU.Unlock(); 575 GLOB = 2; 576} 577void Run() { 578 printf("test06: negative\n"); 579 Waiter(); 580 printf("\tGLOB=%d\n", GLOB); 581} 582REGISTER_TEST2(Run, 6, FEATURE|NEEDS_ANNOTATIONS); 583} // namespace test06 584 585 586// test07: TN. Synchronization via LockWhen(), Signaller is observed first. {{{1 587namespace test07 { 588int GLOB = 0; 589bool COND = 0; 590// Two write accesses to GLOB are synchronized via conditional critical section. 591// LockWhen() is observed after COND has been set (due to sleep). 592// Unlock() calls ANNOTATE_CONDVAR_SIGNAL(). 593// 594// Waiter: Signaller: 595// 1. COND = 0 596// 2. Start(Signaller) 597// a. write(GLOB) 598// b. MU.Lock() 599// c. COND = 1 600// /--- d. MU.Unlock calls ANNOTATE_CONDVAR_SIGNAL 601// 3. MU.LockWhen(COND==1) <---/ 602// 4. MU.Unlock() 603// 5. write(GLOB) 604 605Mutex MU; 606void Signaller() { 607 GLOB = 1; 608 MU.Lock(); 609 COND = true; // We are done! Tell the Waiter. 610 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL; 611} 612void Waiter() { 613 COND = false; 614 MyThread t(Signaller); 615 t.Start(); 616 usleep(100000); // Make sure the signaller gets there first. 617 618 MU.LockWhen(Condition(&ArgIsTrue, &COND)); // calls ANNOTATE_CONDVAR_WAIT 619 MU.Unlock(); // Signaller is done! 620 621 GLOB = 2; // If LockWhen didn't catch the signal, a race may be reported here. 622 t.Join(); 623} 624void Run() { 625 printf("test07: negative\n"); 626 Waiter(); 627 printf("\tGLOB=%d\n", GLOB); 628} 629REGISTER_TEST2(Run, 7, FEATURE|NEEDS_ANNOTATIONS); 630} // namespace test07 631 632// test08: TN. Synchronization via thread start/join. {{{1 633namespace test08 { 634int GLOB = 0; 635// Three accesses to GLOB are separated by thread start/join. 636// 637// Parent: Worker: 638// 1. write(GLOB) 639// 2. Start(Worker) ------------> 640// a. write(GLOB) 641// 3. Join(Worker) <------------ 642// 4. write(GLOB) 643void Worker() { 644 GLOB = 2; 645} 646 647void Parent() { 648 MyThread t(Worker); 649 GLOB = 1; 650 t.Start(); 651 t.Join(); 652 GLOB = 3; 653} 654void Run() { 655 printf("test08: negative\n"); 656 Parent(); 657 printf("\tGLOB=%d\n", GLOB); 658} 659REGISTER_TEST(Run, 8); 660} // namespace test08 661 662 663// test09: TP. Simple race (read vs write). {{{1 664namespace test09 { 665int GLOB = 0; 666// A simple data race between writer and reader. 667// Write happens after read (enforced by sleep). 668// Usually, easily detectable by a race detector. 669void Writer() { 670 usleep(100000); 671 GLOB = 3; 672} 673void Reader() { 674 CHECK(GLOB != -777); 675} 676 677void Run() { 678 ANNOTATE_TRACE_MEMORY(&GLOB); 679 FAST_MODE_INIT(&GLOB); 680 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test09. TP."); 681 printf("test09: positive\n"); 682 MyThreadArray t(Writer, Reader); 683 t.Start(); 684 t.Join(); 685 printf("\tGLOB=%d\n", GLOB); 686} 687REGISTER_TEST(Run, 9); 688} // namespace test09 689 690 691// test10: FN. Simple race (write vs read). {{{1 692namespace test10 { 693int GLOB = 0; 694// A simple data race between writer and reader. 695// Write happens before Read (enforced by sleep), 696// otherwise this test is the same as test09. 697// 698// Writer: Reader: 699// 1. write(GLOB) a. sleep(long enough so that GLOB 700// is most likely initialized by Writer) 701// b. read(GLOB) 702// 703// 704// Eraser algorithm does not detect the race here, 705// see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html. 706// 707void Writer() { 708 GLOB = 3; 709} 710void Reader() { 711 usleep(100000); 712 CHECK(GLOB != -777); 713} 714 715void Run() { 716 FAST_MODE_INIT(&GLOB); 717 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test10. TP. FN in MSMHelgrind."); 718 printf("test10: positive\n"); 719 MyThreadArray t(Writer, Reader); 720 t.Start(); 721 t.Join(); 722 printf("\tGLOB=%d\n", GLOB); 723} 724REGISTER_TEST(Run, 10); 725} // namespace test10 726 727 728// test11: FP. Synchronization via CondVar, 2 workers. {{{1 729// This test is properly synchronized, but currently (Dec 2007) 730// helgrind reports a false positive. 731// 732// Parent: Worker1, Worker2: 733// 1. Start(workers) a. read(GLOB) 734// 2. MU.Lock() b. MU.Lock() 735// 3. while(COND != 2) /-------- c. CV.Signal() 736// CV.Wait(&MU) <-------/ d. MU.Unlock() 737// 4. MU.Unlock() 738// 5. write(GLOB) 739// 740namespace test11 { 741int GLOB = 0; 742Mutex MU; 743void Worker() { 744 usleep(200000); 745 CHECK(GLOB != 777); 746 747 MU.Lock(); 748 COND++; 749 CV.Signal(); 750 MU.Unlock(); 751} 752 753void Parent() { 754 COND = 0; 755 756 MyThreadArray t(Worker, Worker); 757 t.Start(); 758 759 MU.Lock(); 760 while(COND != 2) { 761 CV.Wait(&MU); 762 } 763 MU.Unlock(); 764 765 GLOB = 2; 766 767 t.Join(); 768} 769 770void Run() { 771// ANNOTATE_EXPECT_RACE(&GLOB, "test11. FP. Fixed by MSMProp1."); 772 printf("test11: negative\n"); 773 Parent(); 774 printf("\tGLOB=%d\n", GLOB); 775} 776REGISTER_TEST(Run, 11); 777} // namespace test11 778 779 780// test12: FP. Synchronization via Mutex, then via PCQ. {{{1 781namespace test12 { 782int GLOB = 0; 783// This test is properly synchronized, but currently (Dec 2007) 784// helgrind reports a false positive. 785// 786// First, we write to GLOB under MU, then we synchronize via PCQ, 787// which is essentially a semaphore. 788// 789// Putter: Getter: 790// 1. MU.Lock() a. MU.Lock() 791// 2. write(GLOB) <---- MU ----> b. write(GLOB) 792// 3. MU.Unlock() c. MU.Unlock() 793// 4. Q.Put() ---------------> d. Q.Get() 794// e. write(GLOB) 795 796ProducerConsumerQueue Q(INT_MAX); 797Mutex MU; 798 799void Putter() { 800 MU.Lock(); 801 GLOB++; 802 MU.Unlock(); 803 804 Q.Put(NULL); 805} 806 807void Getter() { 808 MU.Lock(); 809 GLOB++; 810 MU.Unlock(); 811 812 Q.Get(); 813 GLOB++; 814} 815 816void Run() { 817// ANNOTATE_EXPECT_RACE(&GLOB, "test12. FP. Fixed by MSMProp1."); 818 printf("test12: negative\n"); 819 MyThreadArray t(Putter, Getter); 820 t.Start(); 821 t.Join(); 822 printf("\tGLOB=%d\n", GLOB); 823} 824REGISTER_TEST(Run, 12); 825} // namespace test12 826 827 828// test13: FP. Synchronization via Mutex, then via LockWhen. {{{1 829namespace test13 { 830int GLOB = 0; 831// This test is essentially the same as test12, but uses LockWhen 832// instead of PCQ. 833// 834// Waker: Waiter: 835// 1. MU.Lock() a. MU.Lock() 836// 2. write(GLOB) <---------- MU ----------> b. write(GLOB) 837// 3. MU.Unlock() c. MU.Unlock() 838// 4. MU.Lock() . 839// 5. COND = 1 . 840// 6. ANNOTATE_CONDVAR_SIGNAL -------\ . 841// 7. MU.Unlock() \ . 842// \----> d. MU.LockWhen(COND == 1) 843// e. MU.Unlock() 844// f. write(GLOB) 845Mutex MU; 846 847void Waker() { 848 MU.Lock(); 849 GLOB++; 850 MU.Unlock(); 851 852 MU.Lock(); 853 COND = 1; 854 ANNOTATE_CONDVAR_SIGNAL(&MU); 855 MU.Unlock(); 856} 857 858void Waiter() { 859 MU.Lock(); 860 GLOB++; 861 MU.Unlock(); 862 863 MU.LockWhen(Condition(&ArgIsOne, &COND)); 864 MU.Unlock(); 865 GLOB++; 866} 867 868void Run() { 869// ANNOTATE_EXPECT_RACE(&GLOB, "test13. FP. Fixed by MSMProp1."); 870 printf("test13: negative\n"); 871 COND = 0; 872 873 MyThreadArray t(Waker, Waiter); 874 t.Start(); 875 t.Join(); 876 877 printf("\tGLOB=%d\n", GLOB); 878} 879REGISTER_TEST2(Run, 13, FEATURE|NEEDS_ANNOTATIONS); 880} // namespace test13 881 882 883// test14: FP. Synchronization via PCQ, reads, 2 workers. {{{1 884namespace test14 { 885int GLOB = 0; 886// This test is properly synchronized, but currently (Dec 2007) 887// helgrind reports a false positive. 888// 889// This test is similar to test11, but uses PCQ (semaphore). 890// 891// Putter2: Putter1: Getter: 892// 1. read(GLOB) a. read(GLOB) 893// 2. Q2.Put() ----\ b. Q1.Put() -----\ . 894// \ \--------> A. Q1.Get() 895// \----------------------------------> B. Q2.Get() 896// C. write(GLOB) 897ProducerConsumerQueue Q1(INT_MAX), Q2(INT_MAX); 898 899void Putter1() { 900 CHECK(GLOB != 777); 901 Q1.Put(NULL); 902} 903void Putter2() { 904 CHECK(GLOB != 777); 905 Q2.Put(NULL); 906} 907void Getter() { 908 Q1.Get(); 909 Q2.Get(); 910 GLOB++; 911} 912void Run() { 913// ANNOTATE_EXPECT_RACE(&GLOB, "test14. FP. Fixed by MSMProp1."); 914 printf("test14: negative\n"); 915 MyThreadArray t(Getter, Putter1, Putter2); 916 t.Start(); 917 t.Join(); 918 printf("\tGLOB=%d\n", GLOB); 919} 920REGISTER_TEST(Run, 14); 921} // namespace test14 922 923 924// test15: TN. Synchronization via LockWhen. One waker and 2 waiters. {{{1 925namespace test15 { 926// Waker: Waiter1, Waiter2: 927// 1. write(GLOB) 928// 2. MU.Lock() 929// 3. COND = 1 930// 4. ANNOTATE_CONDVAR_SIGNAL ------------> a. MU.LockWhen(COND == 1) 931// 5. MU.Unlock() b. MU.Unlock() 932// c. read(GLOB) 933 934int GLOB = 0; 935Mutex MU; 936 937void Waker() { 938 GLOB = 2; 939 940 MU.Lock(); 941 COND = 1; 942 ANNOTATE_CONDVAR_SIGNAL(&MU); 943 MU.Unlock(); 944}; 945 946void Waiter() { 947 MU.LockWhen(Condition(&ArgIsOne, &COND)); 948 MU.Unlock(); 949 CHECK(GLOB != 777); 950} 951 952 953void Run() { 954 COND = 0; 955 printf("test15: negative\n"); 956 MyThreadArray t(Waker, Waiter, Waiter); 957 t.Start(); 958 t.Join(); 959 printf("\tGLOB=%d\n", GLOB); 960} 961REGISTER_TEST(Run, 15); 962} // namespace test15 963 964 965// test16: FP. Barrier (emulated by CV), 2 threads. {{{1 966namespace test16 { 967// Worker1: Worker2: 968// 1. MU.Lock() a. MU.Lock() 969// 2. write(GLOB) <------------ MU ----------> b. write(GLOB) 970// 3. MU.Unlock() c. MU.Unlock() 971// 4. MU2.Lock() d. MU2.Lock() 972// 5. COND-- e. COND-- 973// 6. ANNOTATE_CONDVAR_SIGNAL(MU2) ---->V . 974// 7. MU2.Await(COND == 0) <------------+------ f. ANNOTATE_CONDVAR_SIGNAL(MU2) 975// 8. MU2.Unlock() V-----> g. MU2.Await(COND == 0) 976// 9. read(GLOB) h. MU2.Unlock() 977// i. read(GLOB) 978// 979// 980// TODO: This way we may create too many edges in happens-before graph. 981// Arndt Mühlenfeld in his PhD (TODO: link) suggests creating special nodes in 982// happens-before graph to reduce the total number of edges. 983// See figure 3.14. 984// 985// 986int GLOB = 0; 987Mutex MU; 988Mutex MU2; 989 990void Worker() { 991 MU.Lock(); 992 GLOB++; 993 MU.Unlock(); 994 995 MU2.Lock(); 996 COND--; 997 ANNOTATE_CONDVAR_SIGNAL(&MU2); 998 MU2.Await(Condition(&ArgIsZero, &COND)); 999 MU2.Unlock(); 1000 1001 CHECK(GLOB == 2); 1002} 1003 1004void Run() { 1005// ANNOTATE_EXPECT_RACE(&GLOB, "test16. FP. Fixed by MSMProp1 + Barrier support."); 1006 COND = 2; 1007 printf("test16: negative\n"); 1008 MyThreadArray t(Worker, Worker); 1009 t.Start(); 1010 t.Join(); 1011 printf("\tGLOB=%d\n", GLOB); 1012} 1013REGISTER_TEST2(Run, 16, FEATURE|NEEDS_ANNOTATIONS); 1014} // namespace test16 1015 1016 1017// test17: FP. Barrier (emulated by CV), 3 threads. {{{1 1018namespace test17 { 1019// Same as test16, but with 3 threads. 1020int GLOB = 0; 1021Mutex MU; 1022Mutex MU2; 1023 1024void Worker() { 1025 MU.Lock(); 1026 GLOB++; 1027 MU.Unlock(); 1028 1029 MU2.Lock(); 1030 COND--; 1031 ANNOTATE_CONDVAR_SIGNAL(&MU2); 1032 MU2.Await(Condition(&ArgIsZero, &COND)); 1033 MU2.Unlock(); 1034 1035 CHECK(GLOB == 3); 1036} 1037 1038void Run() { 1039// ANNOTATE_EXPECT_RACE(&GLOB, "test17. FP. Fixed by MSMProp1 + Barrier support."); 1040 COND = 3; 1041 printf("test17: negative\n"); 1042 MyThreadArray t(Worker, Worker, Worker); 1043 t.Start(); 1044 t.Join(); 1045 printf("\tGLOB=%d\n", GLOB); 1046} 1047REGISTER_TEST2(Run, 17, FEATURE|NEEDS_ANNOTATIONS); 1048} // namespace test17 1049 1050 1051// test18: TN. Synchronization via Await(), signaller gets there first. {{{1 1052namespace test18 { 1053int GLOB = 0; 1054Mutex MU; 1055// Same as test03, but uses Mutex::Await() instead of Mutex::LockWhen(). 1056 1057void Waker() { 1058 usleep(100000); // Make sure the waiter blocks. 1059 GLOB = 1; 1060 1061 MU.Lock(); 1062 COND = 1; // We are done! Tell the Waiter. 1063 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL; 1064} 1065void Waiter() { 1066 ThreadPool pool(1); 1067 pool.StartWorkers(); 1068 COND = 0; 1069 pool.Add(NewCallback(Waker)); 1070 1071 MU.Lock(); 1072 MU.Await(Condition(&ArgIsOne, &COND)); // calls ANNOTATE_CONDVAR_WAIT 1073 MU.Unlock(); // Waker is done! 1074 1075 GLOB = 2; 1076} 1077void Run() { 1078 printf("test18: negative\n"); 1079 Waiter(); 1080 printf("\tGLOB=%d\n", GLOB); 1081} 1082REGISTER_TEST2(Run, 18, FEATURE|NEEDS_ANNOTATIONS); 1083} // namespace test18 1084 1085// test19: TN. Synchronization via AwaitWithTimeout(). {{{1 1086namespace test19 { 1087int GLOB = 0; 1088// Same as test18, but with AwaitWithTimeout. Do not timeout. 1089Mutex MU; 1090void Waker() { 1091 usleep(100000); // Make sure the waiter blocks. 1092 GLOB = 1; 1093 1094 MU.Lock(); 1095 COND = 1; // We are done! Tell the Waiter. 1096 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL; 1097} 1098void Waiter() { 1099 ThreadPool pool(1); 1100 pool.StartWorkers(); 1101 COND = 0; 1102 pool.Add(NewCallback(Waker)); 1103 1104 MU.Lock(); 1105 CHECK(MU.AwaitWithTimeout(Condition(&ArgIsOne, &COND), INT_MAX)); 1106 MU.Unlock(); 1107 1108 GLOB = 2; 1109} 1110void Run() { 1111 printf("test19: negative\n"); 1112 Waiter(); 1113 printf("\tGLOB=%d\n", GLOB); 1114} 1115REGISTER_TEST2(Run, 19, FEATURE|NEEDS_ANNOTATIONS); 1116} // namespace test19 1117 1118// test20: TP. Incorrect synchronization via AwaitWhen(), timeout. {{{1 1119namespace test20 { 1120int GLOB = 0; 1121Mutex MU; 1122// True race. We timeout in AwaitWhen. 1123void Waker() { 1124 GLOB = 1; 1125 usleep(100 * 1000); 1126} 1127void Waiter() { 1128 ThreadPool pool(1); 1129 pool.StartWorkers(); 1130 COND = 0; 1131 pool.Add(NewCallback(Waker)); 1132 1133 MU.Lock(); 1134 CHECK(!MU.AwaitWithTimeout(Condition(&ArgIsOne, &COND), 100)); 1135 MU.Unlock(); 1136 1137 GLOB = 2; 1138} 1139void Run() { 1140 FAST_MODE_INIT(&GLOB); 1141 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test20. TP."); 1142 printf("test20: positive\n"); 1143 Waiter(); 1144 printf("\tGLOB=%d\n", GLOB); 1145} 1146REGISTER_TEST2(Run, 20, FEATURE|NEEDS_ANNOTATIONS); 1147} // namespace test20 1148 1149// test21: TP. Incorrect synchronization via LockWhenWithTimeout(). {{{1 1150namespace test21 { 1151int GLOB = 0; 1152// True race. We timeout in LockWhenWithTimeout(). 1153Mutex MU; 1154void Waker() { 1155 GLOB = 1; 1156 usleep(100 * 1000); 1157} 1158void Waiter() { 1159 ThreadPool pool(1); 1160 pool.StartWorkers(); 1161 COND = 0; 1162 pool.Add(NewCallback(Waker)); 1163 1164 CHECK(!MU.LockWhenWithTimeout(Condition(&ArgIsOne, &COND), 100)); 1165 MU.Unlock(); 1166 1167 GLOB = 2; 1168} 1169void Run() { 1170 FAST_MODE_INIT(&GLOB); 1171 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test21. TP."); 1172 printf("test21: positive\n"); 1173 Waiter(); 1174 printf("\tGLOB=%d\n", GLOB); 1175} 1176REGISTER_TEST2(Run, 21, FEATURE|NEEDS_ANNOTATIONS); 1177} // namespace test21 1178 1179// test22: TP. Incorrect synchronization via CondVar::WaitWithTimeout(). {{{1 1180namespace test22 { 1181int GLOB = 0; 1182Mutex MU; 1183// True race. We timeout in CondVar::WaitWithTimeout(). 1184void Waker() { 1185 GLOB = 1; 1186 usleep(100 * 1000); 1187} 1188void Waiter() { 1189 ThreadPool pool(1); 1190 pool.StartWorkers(); 1191 COND = 0; 1192 pool.Add(NewCallback(Waker)); 1193 1194 int64_t ms_left_to_wait = 100; 1195 int64_t deadline_ms = GetCurrentTimeMillis() + ms_left_to_wait; 1196 MU.Lock(); 1197 while(COND != 1 && ms_left_to_wait > 0) { 1198 CV.WaitWithTimeout(&MU, ms_left_to_wait); 1199 ms_left_to_wait = deadline_ms - GetCurrentTimeMillis(); 1200 } 1201 MU.Unlock(); 1202 1203 GLOB = 2; 1204} 1205void Run() { 1206 FAST_MODE_INIT(&GLOB); 1207 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test22. TP."); 1208 printf("test22: positive\n"); 1209 Waiter(); 1210 printf("\tGLOB=%d\n", GLOB); 1211} 1212REGISTER_TEST(Run, 22); 1213} // namespace test22 1214 1215// test23: TN. TryLock, ReaderLock, ReaderTryLock. {{{1 1216namespace test23 { 1217// Correct synchronization with TryLock, Lock, ReaderTryLock, ReaderLock. 1218int GLOB = 0; 1219Mutex MU; 1220void Worker_TryLock() { 1221 for (int i = 0; i < 20; i++) { 1222 while (true) { 1223 if (MU.TryLock()) { 1224 GLOB++; 1225 MU.Unlock(); 1226 break; 1227 } 1228 usleep(1000); 1229 } 1230 } 1231} 1232 1233void Worker_ReaderTryLock() { 1234 for (int i = 0; i < 20; i++) { 1235 while (true) { 1236 if (MU.ReaderTryLock()) { 1237 CHECK(GLOB != 777); 1238 MU.ReaderUnlock(); 1239 break; 1240 } 1241 usleep(1000); 1242 } 1243 } 1244} 1245 1246void Worker_ReaderLock() { 1247 for (int i = 0; i < 20; i++) { 1248 MU.ReaderLock(); 1249 CHECK(GLOB != 777); 1250 MU.ReaderUnlock(); 1251 usleep(1000); 1252 } 1253} 1254 1255void Worker_Lock() { 1256 for (int i = 0; i < 20; i++) { 1257 MU.Lock(); 1258 GLOB++; 1259 MU.Unlock(); 1260 usleep(1000); 1261 } 1262} 1263 1264void Run() { 1265 printf("test23: negative\n"); 1266 MyThreadArray t(Worker_TryLock, 1267 Worker_ReaderTryLock, 1268 Worker_ReaderLock, 1269 Worker_Lock 1270 ); 1271 t.Start(); 1272 t.Join(); 1273 printf("\tGLOB=%d\n", GLOB); 1274} 1275REGISTER_TEST(Run, 23); 1276} // namespace test23 1277 1278// test24: TN. Synchronization via ReaderLockWhen(). {{{1 1279namespace test24 { 1280int GLOB = 0; 1281Mutex MU; 1282// Same as test03, but uses ReaderLockWhen(). 1283 1284void Waker() { 1285 usleep(100000); // Make sure the waiter blocks. 1286 GLOB = 1; 1287 1288 MU.Lock(); 1289 COND = 1; // We are done! Tell the Waiter. 1290 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL; 1291} 1292void Waiter() { 1293 ThreadPool pool(1); 1294 pool.StartWorkers(); 1295 COND = 0; 1296 pool.Add(NewCallback(Waker)); 1297 MU.ReaderLockWhen(Condition(&ArgIsOne, &COND)); 1298 MU.ReaderUnlock(); 1299 1300 GLOB = 2; 1301} 1302void Run() { 1303 printf("test24: negative\n"); 1304 Waiter(); 1305 printf("\tGLOB=%d\n", GLOB); 1306} 1307REGISTER_TEST2(Run, 24, FEATURE|NEEDS_ANNOTATIONS); 1308} // namespace test24 1309 1310// test25: TN. Synchronization via ReaderLockWhenWithTimeout(). {{{1 1311namespace test25 { 1312int GLOB = 0; 1313Mutex MU; 1314// Same as test24, but uses ReaderLockWhenWithTimeout(). 1315// We do not timeout. 1316 1317void Waker() { 1318 usleep(100000); // Make sure the waiter blocks. 1319 GLOB = 1; 1320 1321 MU.Lock(); 1322 COND = 1; // We are done! Tell the Waiter. 1323 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL; 1324} 1325void Waiter() { 1326 ThreadPool pool(1); 1327 pool.StartWorkers(); 1328 COND = 0; 1329 pool.Add(NewCallback(Waker)); 1330 CHECK(MU.ReaderLockWhenWithTimeout(Condition(&ArgIsOne, &COND), INT_MAX)); 1331 MU.ReaderUnlock(); 1332 1333 GLOB = 2; 1334} 1335void Run() { 1336 printf("test25: negative\n"); 1337 Waiter(); 1338 printf("\tGLOB=%d\n", GLOB); 1339} 1340REGISTER_TEST2(Run, 25, FEATURE|NEEDS_ANNOTATIONS); 1341} // namespace test25 1342 1343// test26: TP. Incorrect synchronization via ReaderLockWhenWithTimeout(). {{{1 1344namespace test26 { 1345int GLOB = 0; 1346Mutex MU; 1347// Same as test25, but we timeout and incorrectly assume happens-before. 1348 1349void Waker() { 1350 GLOB = 1; 1351 usleep(10000); 1352} 1353void Waiter() { 1354 ThreadPool pool(1); 1355 pool.StartWorkers(); 1356 COND = 0; 1357 pool.Add(NewCallback(Waker)); 1358 CHECK(!MU.ReaderLockWhenWithTimeout(Condition(&ArgIsOne, &COND), 100)); 1359 MU.ReaderUnlock(); 1360 1361 GLOB = 2; 1362} 1363void Run() { 1364 FAST_MODE_INIT(&GLOB); 1365 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test26. TP"); 1366 printf("test26: positive\n"); 1367 Waiter(); 1368 printf("\tGLOB=%d\n", GLOB); 1369} 1370REGISTER_TEST2(Run, 26, FEATURE|NEEDS_ANNOTATIONS); 1371} // namespace test26 1372 1373 1374// test27: TN. Simple synchronization via SpinLock. {{{1 1375namespace test27 { 1376#ifndef NO_SPINLOCK 1377int GLOB = 0; 1378SpinLock MU; 1379void Worker() { 1380 MU.Lock(); 1381 GLOB++; 1382 MU.Unlock(); 1383 usleep(10000); 1384} 1385 1386void Run() { 1387 printf("test27: negative\n"); 1388 MyThreadArray t(Worker, Worker, Worker, Worker); 1389 t.Start(); 1390 t.Join(); 1391 printf("\tGLOB=%d\n", GLOB); 1392} 1393REGISTER_TEST2(Run, 27, FEATURE|NEEDS_ANNOTATIONS); 1394#endif // NO_SPINLOCK 1395} // namespace test27 1396 1397 1398// test28: TN. Synchronization via Mutex, then PCQ. 3 threads {{{1 1399namespace test28 { 1400// Putter1: Getter: Putter2: 1401// 1. MU.Lock() A. MU.Lock() 1402// 2. write(GLOB) B. write(GLOB) 1403// 3. MU.Unlock() C. MU.Unlock() 1404// 4. Q.Put() ---------\ /------- D. Q.Put() 1405// 5. MU.Lock() \-------> a. Q.Get() / E. MU.Lock() 1406// 6. read(GLOB) b. Q.Get() <---------/ F. read(GLOB) 1407// 7. MU.Unlock() (sleep) G. MU.Unlock() 1408// c. read(GLOB) 1409ProducerConsumerQueue Q(INT_MAX); 1410int GLOB = 0; 1411Mutex MU; 1412 1413void Putter() { 1414 MU.Lock(); 1415 GLOB++; 1416 MU.Unlock(); 1417 1418 Q.Put(NULL); 1419 1420 MU.Lock(); 1421 CHECK(GLOB != 777); 1422 MU.Unlock(); 1423} 1424 1425void Getter() { 1426 Q.Get(); 1427 Q.Get(); 1428 usleep(100000); 1429 CHECK(GLOB == 2); 1430} 1431 1432void Run() { 1433 printf("test28: negative\n"); 1434 MyThreadArray t(Getter, Putter, Putter); 1435 t.Start(); 1436 t.Join(); 1437 printf("\tGLOB=%d\n", GLOB); 1438} 1439REGISTER_TEST(Run, 28); 1440} // namespace test28 1441 1442 1443// test29: TN. Synchronization via Mutex, then PCQ. 4 threads. {{{1 1444namespace test29 { 1445// Similar to test28, but has two Getters and two PCQs. 1446ProducerConsumerQueue *Q1, *Q2; 1447Mutex MU; 1448int GLOB = 0; 1449 1450void Putter(ProducerConsumerQueue *q) { 1451 MU.Lock(); 1452 GLOB++; 1453 MU.Unlock(); 1454 1455 q->Put(NULL); 1456 q->Put(NULL); 1457 1458 MU.Lock(); 1459 CHECK(GLOB != 777); 1460 MU.Unlock(); 1461 1462} 1463 1464void Putter1() { Putter(Q1); } 1465void Putter2() { Putter(Q2); } 1466 1467void Getter() { 1468 Q1->Get(); 1469 Q2->Get(); 1470 usleep(100000); 1471 CHECK(GLOB == 2); 1472 usleep(48000); // TODO: remove this when FP in test32 is fixed. 1473} 1474 1475void Run() { 1476 printf("test29: negative\n"); 1477 Q1 = new ProducerConsumerQueue(INT_MAX); 1478 Q2 = new ProducerConsumerQueue(INT_MAX); 1479 MyThreadArray t(Getter, Getter, Putter1, Putter2); 1480 t.Start(); 1481 t.Join(); 1482 printf("\tGLOB=%d\n", GLOB); 1483 delete Q1; 1484 delete Q2; 1485} 1486REGISTER_TEST(Run, 29); 1487} // namespace test29 1488 1489 1490// test30: TN. Synchronization via 'safe' race. Writer vs multiple Readers. {{{1 1491namespace test30 { 1492// This test shows a very risky kind of synchronization which is very easy 1493// to get wrong. Actually, I am not sure I've got it right. 1494// 1495// Writer: Reader1, Reader2, ..., ReaderN: 1496// 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY 1497// 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n) 1498// 3. BOUNDARY++; c. read(GLOB[i]: i < n) 1499// 1500// Here we have a 'safe' race on accesses to BOUNDARY and 1501// no actual races on accesses to GLOB[]: 1502// Writer writes to GLOB[i] where i>=BOUNDARY and then increments BOUNDARY. 1503// Readers read BOUNDARY and read GLOB[i] where i<BOUNDARY. 1504// 1505// I am not completely sure that this scheme guaranties no race between 1506// accesses to GLOB since compilers and CPUs 1507// are free to rearrange memory operations. 1508// I am actually sure that this scheme is wrong unless we use 1509// some smart memory fencing... 1510 1511 1512const int N = 48; 1513static int GLOB[N]; 1514volatile int BOUNDARY = 0; 1515 1516void Writer() { 1517 for (int i = 0; i < N; i++) { 1518 CHECK(BOUNDARY == i); 1519 for (int j = i; j < N; j++) { 1520 GLOB[j] = j; 1521 } 1522 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY+1)); 1523 BOUNDARY++; 1524 usleep(1000); 1525 } 1526} 1527 1528void Reader() { 1529 int n; 1530 do { 1531 n = BOUNDARY; 1532 if (n == 0) continue; 1533 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n)); 1534 for (int i = 0; i < n; i++) { 1535 CHECK(GLOB[i] == i); 1536 } 1537 usleep(100); 1538 } while(n < N); 1539} 1540 1541void Run() { 1542 FAST_MODE_INIT(&BOUNDARY); 1543 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY), "test30. Sync via 'safe' race."); 1544 printf("test30: negative\n"); 1545 MyThreadArray t(Writer, Reader, Reader, Reader); 1546 t.Start(); 1547 t.Join(); 1548 printf("\tGLOB=%d\n", GLOB[N-1]); 1549} 1550REGISTER_TEST2(Run, 30, FEATURE|NEEDS_ANNOTATIONS); 1551} // namespace test30 1552 1553 1554// test31: TN. Synchronization via 'safe' race. Writer vs Writer. {{{1 1555namespace test31 { 1556// This test is similar to test30, but 1557// it has one Writer instead of mulitple Readers. 1558// 1559// Writer1: Writer2 1560// 1. write(GLOB[i]: i >= BOUNDARY) a. n = BOUNDARY 1561// 2. HAPPENS_BEFORE(BOUNDARY+1) -------> b. HAPPENS_AFTER(n) 1562// 3. BOUNDARY++; c. write(GLOB[i]: i < n) 1563// 1564 1565const int N = 48; 1566static int GLOB[N]; 1567volatile int BOUNDARY = 0; 1568 1569void Writer1() { 1570 for (int i = 0; i < N; i++) { 1571 CHECK(BOUNDARY == i); 1572 for (int j = i; j < N; j++) { 1573 GLOB[j] = j; 1574 } 1575 ANNOTATE_HAPPENS_BEFORE(reinterpret_cast<void*>(BOUNDARY+1)); 1576 BOUNDARY++; 1577 usleep(1000); 1578 } 1579} 1580 1581void Writer2() { 1582 int n; 1583 do { 1584 n = BOUNDARY; 1585 if (n == 0) continue; 1586 ANNOTATE_HAPPENS_AFTER(reinterpret_cast<void*>(n)); 1587 for (int i = 0; i < n; i++) { 1588 if(GLOB[i] == i) { 1589 GLOB[i]++; 1590 } 1591 } 1592 usleep(100); 1593 } while(n < N); 1594} 1595 1596void Run() { 1597 FAST_MODE_INIT(&BOUNDARY); 1598 ANNOTATE_EXPECT_RACE((void*)(&BOUNDARY), "test31. Sync via 'safe' race."); 1599 printf("test31: negative\n"); 1600 MyThreadArray t(Writer1, Writer2); 1601 t.Start(); 1602 t.Join(); 1603 printf("\tGLOB=%d\n", GLOB[N-1]); 1604} 1605REGISTER_TEST2(Run, 31, FEATURE|NEEDS_ANNOTATIONS); 1606} // namespace test31 1607 1608 1609// test32: FP. Synchronization via thread create/join. W/R. {{{1 1610namespace test32 { 1611// This test is well synchronized but helgrind 3.3.0 reports a race. 1612// 1613// Parent: Writer: Reader: 1614// 1. Start(Reader) -----------------------\ . 1615// \ . 1616// 2. Start(Writer) ---\ \ . 1617// \---> a. MU.Lock() \--> A. sleep(long enough) 1618// b. write(GLOB) 1619// /---- c. MU.Unlock() 1620// 3. Join(Writer) <---/ 1621// B. MU.Lock() 1622// C. read(GLOB) 1623// /------------ D. MU.Unlock() 1624// 4. Join(Reader) <----------------/ 1625// 5. write(GLOB) 1626// 1627// 1628// The call to sleep() in Reader is not part of synchronization, 1629// it is required to trigger the false positive in helgrind 3.3.0. 1630// 1631int GLOB = 0; 1632Mutex MU; 1633 1634void Writer() { 1635 MU.Lock(); 1636 GLOB = 1; 1637 MU.Unlock(); 1638} 1639 1640void Reader() { 1641 usleep(480000); 1642 MU.Lock(); 1643 CHECK(GLOB != 777); 1644 MU.Unlock(); 1645} 1646 1647void Parent() { 1648 MyThread r(Reader); 1649 MyThread w(Writer); 1650 r.Start(); 1651 w.Start(); 1652 1653 w.Join(); // 'w' joins first. 1654 r.Join(); 1655 1656 GLOB = 2; 1657} 1658 1659void Run() { 1660// ANNOTATE_EXPECT_RACE(&GLOB, "test32. FP. Fixed by MSMProp1."); 1661 printf("test32: negative\n"); 1662 Parent(); 1663 printf("\tGLOB=%d\n", GLOB); 1664} 1665 1666REGISTER_TEST(Run, 32); 1667} // namespace test32 1668 1669 1670// test33: STAB. Stress test for the number of thread sets (TSETs). {{{1 1671namespace test33 { 1672int GLOB = 0; 1673// Here we access N memory locations from within log(N) threads. 1674// We do it in such a way that helgrind creates nearly all possible TSETs. 1675// Then we join all threads and start again (N_iter times). 1676const int N_iter = 48; 1677const int Nlog = 15; 1678const int N = 1 << Nlog; 1679static int ARR[N]; 1680Mutex MU; 1681 1682void Worker() { 1683 MU.Lock(); 1684 int n = ++GLOB; 1685 MU.Unlock(); 1686 1687 n %= Nlog; 1688 for (int i = 0; i < N; i++) { 1689 // ARR[i] is accessed by threads from i-th subset 1690 if (i & (1 << n)) { 1691 CHECK(ARR[i] == 0); 1692 } 1693 } 1694} 1695 1696void Run() { 1697 printf("test33:\n"); 1698 1699 std::vector<MyThread*> vec(Nlog); 1700 1701 for (int j = 0; j < N_iter; j++) { 1702 // Create and start Nlog threads 1703 for (int i = 0; i < Nlog; i++) { 1704 vec[i] = new MyThread(Worker); 1705 } 1706 for (int i = 0; i < Nlog; i++) { 1707 vec[i]->Start(); 1708 } 1709 // Join all threads. 1710 for (int i = 0; i < Nlog; i++) { 1711 vec[i]->Join(); 1712 delete vec[i]; 1713 } 1714 printf("------------------\n"); 1715 } 1716 1717 printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n", 1718 GLOB, ARR[1], ARR[7], ARR[N-1]); 1719} 1720REGISTER_TEST2(Run, 33, STABILITY|EXCLUDE_FROM_ALL); 1721} // namespace test33 1722 1723 1724// test34: STAB. Stress test for the number of locks sets (LSETs). {{{1 1725namespace test34 { 1726// Similar to test33, but for lock sets. 1727int GLOB = 0; 1728const int N_iter = 48; 1729const int Nlog = 10; 1730const int N = 1 << Nlog; 1731static int ARR[N]; 1732static Mutex *MUs[Nlog]; 1733 1734void Worker() { 1735 for (int i = 0; i < N; i++) { 1736 // ARR[i] is protected by MUs from i-th subset of all MUs 1737 for (int j = 0; j < Nlog; j++) if (i & (1 << j)) MUs[j]->Lock(); 1738 CHECK(ARR[i] == 0); 1739 for (int j = 0; j < Nlog; j++) if (i & (1 << j)) MUs[j]->Unlock(); 1740 } 1741} 1742 1743void Run() { 1744 printf("test34:\n"); 1745 for (int iter = 0; iter < N_iter; iter++) { 1746 for (int i = 0; i < Nlog; i++) { 1747 MUs[i] = new Mutex; 1748 } 1749 MyThreadArray t(Worker, Worker); 1750 t.Start(); 1751 t.Join(); 1752 for (int i = 0; i < Nlog; i++) { 1753 delete MUs[i]; 1754 } 1755 printf("------------------\n"); 1756 } 1757 printf("\tGLOB=%d\n", GLOB); 1758} 1759REGISTER_TEST2(Run, 34, STABILITY|EXCLUDE_FROM_ALL); 1760} // namespace test34 1761 1762 1763// test35: PERF. Lots of mutexes and lots of call to free(). {{{1 1764namespace test35 { 1765// Helgrind 3.3.0 has very slow in shadow_mem_make_NoAccess(). Fixed locally. 1766// With the fix helgrind runs this test about a minute. 1767// Without the fix -- about 5 minutes. (on c2d 2.4GHz). 1768// 1769// TODO: need to figure out the best way for performance testing. 1770int **ARR; 1771const int N_mu = 25000; 1772const int N_free = 48000; 1773 1774void Worker() { 1775 for (int i = 0; i < N_free; i++) 1776 CHECK(777 == *ARR[i]); 1777} 1778 1779void Run() { 1780 printf("test35:\n"); 1781 std::vector<Mutex*> mus; 1782 1783 ARR = new int *[N_free]; 1784 for (int i = 0; i < N_free; i++) { 1785 const int c = N_free / N_mu; 1786 if ((i % c) == 0) { 1787 mus.push_back(new Mutex); 1788 mus.back()->Lock(); 1789 mus.back()->Unlock(); 1790 } 1791 ARR[i] = new int(777); 1792 } 1793 1794 // Need to put all ARR[i] into shared state in order 1795 // to trigger the performance bug. 1796 MyThreadArray t(Worker, Worker); 1797 t.Start(); 1798 t.Join(); 1799 1800 for (int i = 0; i < N_free; i++) delete ARR[i]; 1801 delete [] ARR; 1802 1803 for (size_t i = 0; i < mus.size(); i++) { 1804 delete mus[i]; 1805 } 1806} 1807REGISTER_TEST2(Run, 35, PERFORMANCE|EXCLUDE_FROM_ALL); 1808} // namespace test35 1809 1810 1811// test36: TN. Synchronization via Mutex, then PCQ. 3 threads. W/W {{{1 1812namespace test36 { 1813// variation of test28 (W/W instead of W/R) 1814 1815// Putter1: Getter: Putter2: 1816// 1. MU.Lock(); A. MU.Lock() 1817// 2. write(GLOB) B. write(GLOB) 1818// 3. MU.Unlock() C. MU.Unlock() 1819// 4. Q.Put() ---------\ /------- D. Q.Put() 1820// 5. MU1.Lock() \-------> a. Q.Get() / E. MU1.Lock() 1821// 6. MU.Lock() b. Q.Get() <---------/ F. MU.Lock() 1822// 7. write(GLOB) G. write(GLOB) 1823// 8. MU.Unlock() H. MU.Unlock() 1824// 9. MU1.Unlock() (sleep) I. MU1.Unlock() 1825// c. MU1.Lock() 1826// d. write(GLOB) 1827// e. MU1.Unlock() 1828ProducerConsumerQueue Q(INT_MAX); 1829int GLOB = 0; 1830Mutex MU, MU1; 1831 1832void Putter() { 1833 MU.Lock(); 1834 GLOB++; 1835 MU.Unlock(); 1836 1837 Q.Put(NULL); 1838 1839 MU1.Lock(); 1840 MU.Lock(); 1841 GLOB++; 1842 MU.Unlock(); 1843 MU1.Unlock(); 1844} 1845 1846void Getter() { 1847 Q.Get(); 1848 Q.Get(); 1849 usleep(100000); 1850 MU1.Lock(); 1851 GLOB++; 1852 MU1.Unlock(); 1853} 1854 1855void Run() { 1856 printf("test36: negative \n"); 1857 MyThreadArray t(Getter, Putter, Putter); 1858 t.Start(); 1859 t.Join(); 1860 printf("\tGLOB=%d\n", GLOB); 1861} 1862REGISTER_TEST(Run, 36); 1863} // namespace test36 1864 1865 1866// test37: TN. Simple synchronization (write vs read). {{{1 1867namespace test37 { 1868int GLOB = 0; 1869Mutex MU; 1870// Similar to test10, but properly locked. 1871// Writer: Reader: 1872// 1. MU.Lock() 1873// 2. write 1874// 3. MU.Unlock() 1875// a. MU.Lock() 1876// b. read 1877// c. MU.Unlock(); 1878 1879void Writer() { 1880 MU.Lock(); 1881 GLOB = 3; 1882 MU.Unlock(); 1883} 1884void Reader() { 1885 usleep(100000); 1886 MU.Lock(); 1887 CHECK(GLOB != -777); 1888 MU.Unlock(); 1889} 1890 1891void Run() { 1892 printf("test37: negative\n"); 1893 MyThreadArray t(Writer, Reader); 1894 t.Start(); 1895 t.Join(); 1896 printf("\tGLOB=%d\n", GLOB); 1897} 1898REGISTER_TEST(Run, 37); 1899} // namespace test37 1900 1901 1902// test38: TN. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1 1903namespace test38 { 1904// Fusion of test29 and test36. 1905 1906// Putter1: Putter2: Getter1: Getter2: 1907// MU1.Lock() MU1.Lock() 1908// write(GLOB) write(GLOB) 1909// MU1.Unlock() MU1.Unlock() 1910// Q1.Put() Q2.Put() 1911// Q1.Put() Q2.Put() 1912// MU1.Lock() MU1.Lock() 1913// MU2.Lock() MU2.Lock() 1914// write(GLOB) write(GLOB) 1915// MU2.Unlock() MU2.Unlock() 1916// MU1.Unlock() MU1.Unlock() sleep sleep 1917// Q1.Get() Q1.Get() 1918// Q2.Get() Q2.Get() 1919// MU2.Lock() MU2.Lock() 1920// write(GLOB) write(GLOB) 1921// MU2.Unlock() MU2.Unlock() 1922// 1923 1924 1925ProducerConsumerQueue *Q1, *Q2; 1926int GLOB = 0; 1927Mutex MU, MU1, MU2; 1928 1929void Putter(ProducerConsumerQueue *q) { 1930 MU1.Lock(); 1931 GLOB++; 1932 MU1.Unlock(); 1933 1934 q->Put(NULL); 1935 q->Put(NULL); 1936 1937 MU1.Lock(); 1938 MU2.Lock(); 1939 GLOB++; 1940 MU2.Unlock(); 1941 MU1.Unlock(); 1942 1943} 1944 1945void Putter1() { Putter(Q1); } 1946void Putter2() { Putter(Q2); } 1947 1948void Getter() { 1949 usleep(100000); 1950 Q1->Get(); 1951 Q2->Get(); 1952 1953 MU2.Lock(); 1954 GLOB++; 1955 MU2.Unlock(); 1956 1957 usleep(48000); // TODO: remove this when FP in test32 is fixed. 1958} 1959 1960void Run() { 1961 printf("test38: negative\n"); 1962 Q1 = new ProducerConsumerQueue(INT_MAX); 1963 Q2 = new ProducerConsumerQueue(INT_MAX); 1964 MyThreadArray t(Getter, Getter, Putter1, Putter2); 1965 t.Start(); 1966 t.Join(); 1967 printf("\tGLOB=%d\n", GLOB); 1968 delete Q1; 1969 delete Q2; 1970} 1971REGISTER_TEST(Run, 38); 1972} // namespace test38 1973 1974// test39: FP. Barrier. {{{1 1975namespace test39 { 1976#ifndef NO_BARRIER 1977// Same as test17 but uses Barrier class (pthread_barrier_t). 1978int GLOB = 0; 1979const int N_threads = 3; 1980Barrier barrier(N_threads); 1981Mutex MU; 1982 1983void Worker() { 1984 MU.Lock(); 1985 GLOB++; 1986 MU.Unlock(); 1987 barrier.Block(); 1988 CHECK(GLOB == N_threads); 1989} 1990void Run() { 1991 ANNOTATE_TRACE_MEMORY(&GLOB); 1992// ANNOTATE_EXPECT_RACE(&GLOB, "test39. FP. Fixed by MSMProp1. Barrier."); 1993 printf("test39: negative\n"); 1994 { 1995 ThreadPool pool(N_threads); 1996 pool.StartWorkers(); 1997 for (int i = 0; i < N_threads; i++) { 1998 pool.Add(NewCallback(Worker)); 1999 } 2000 } // all folks are joined here. 2001 printf("\tGLOB=%d\n", GLOB); 2002} 2003REGISTER_TEST(Run, 39); 2004#endif // NO_BARRIER 2005} // namespace test39 2006 2007 2008// test40: FP. Synchronization via Mutexes and PCQ. 4 threads. W/W {{{1 2009namespace test40 { 2010// Similar to test38 but with different order of events (due to sleep). 2011 2012// Putter1: Putter2: Getter1: Getter2: 2013// MU1.Lock() MU1.Lock() 2014// write(GLOB) write(GLOB) 2015// MU1.Unlock() MU1.Unlock() 2016// Q1.Put() Q2.Put() 2017// Q1.Put() Q2.Put() 2018// Q1.Get() Q1.Get() 2019// Q2.Get() Q2.Get() 2020// MU2.Lock() MU2.Lock() 2021// write(GLOB) write(GLOB) 2022// MU2.Unlock() MU2.Unlock() 2023// 2024// MU1.Lock() MU1.Lock() 2025// MU2.Lock() MU2.Lock() 2026// write(GLOB) write(GLOB) 2027// MU2.Unlock() MU2.Unlock() 2028// MU1.Unlock() MU1.Unlock() 2029 2030 2031ProducerConsumerQueue *Q1, *Q2; 2032int GLOB = 0; 2033Mutex MU, MU1, MU2; 2034 2035void Putter(ProducerConsumerQueue *q) { 2036 MU1.Lock(); 2037 GLOB++; 2038 MU1.Unlock(); 2039 2040 q->Put(NULL); 2041 q->Put(NULL); 2042 usleep(100000); 2043 2044 MU1.Lock(); 2045 MU2.Lock(); 2046 GLOB++; 2047 MU2.Unlock(); 2048 MU1.Unlock(); 2049 2050} 2051 2052void Putter1() { Putter(Q1); } 2053void Putter2() { Putter(Q2); } 2054 2055void Getter() { 2056 Q1->Get(); 2057 Q2->Get(); 2058 2059 MU2.Lock(); 2060 GLOB++; 2061 MU2.Unlock(); 2062 2063 usleep(48000); // TODO: remove this when FP in test32 is fixed. 2064} 2065 2066void Run() { 2067// ANNOTATE_EXPECT_RACE(&GLOB, "test40. FP. Fixed by MSMProp1. Complex Stuff."); 2068 printf("test40: negative\n"); 2069 Q1 = new ProducerConsumerQueue(INT_MAX); 2070 Q2 = new ProducerConsumerQueue(INT_MAX); 2071 MyThreadArray t(Getter, Getter, Putter1, Putter2); 2072 t.Start(); 2073 t.Join(); 2074 printf("\tGLOB=%d\n", GLOB); 2075 delete Q1; 2076 delete Q2; 2077} 2078REGISTER_TEST(Run, 40); 2079} // namespace test40 2080 2081// test41: TN. Test for race that appears when loading a dynamic symbol. {{{1 2082namespace test41 { 2083void Worker() { 2084 ANNOTATE_NO_OP(NULL); // An empty function, loaded from dll. 2085} 2086void Run() { 2087 printf("test41: negative\n"); 2088 MyThreadArray t(Worker, Worker, Worker); 2089 t.Start(); 2090 t.Join(); 2091} 2092REGISTER_TEST2(Run, 41, FEATURE|NEEDS_ANNOTATIONS); 2093} // namespace test41 2094 2095 2096// test42: TN. Using the same cond var several times. {{{1 2097namespace test42 { 2098int GLOB = 0; 2099int COND = 0; 2100int N_threads = 3; 2101Mutex MU; 2102 2103void Worker1() { 2104 GLOB=1; 2105 2106 MU.Lock(); 2107 COND = 1; 2108 CV.Signal(); 2109 MU.Unlock(); 2110 2111 MU.Lock(); 2112 while (COND != 0) 2113 CV.Wait(&MU); 2114 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU); 2115 MU.Unlock(); 2116 2117 GLOB=3; 2118 2119} 2120 2121void Worker2() { 2122 2123 MU.Lock(); 2124 while (COND != 1) 2125 CV.Wait(&MU); 2126 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU); 2127 MU.Unlock(); 2128 2129 GLOB=2; 2130 2131 MU.Lock(); 2132 COND = 0; 2133 CV.Signal(); 2134 MU.Unlock(); 2135 2136} 2137 2138void Run() { 2139// ANNOTATE_EXPECT_RACE(&GLOB, "test42. TN. debugging."); 2140 printf("test42: negative\n"); 2141 MyThreadArray t(Worker1, Worker2); 2142 t.Start(); 2143 t.Join(); 2144 printf("\tGLOB=%d\n", GLOB); 2145} 2146REGISTER_TEST2(Run, 42, FEATURE|NEEDS_ANNOTATIONS); 2147} // namespace test42 2148 2149 2150 2151// test43: TN. {{{1 2152namespace test43 { 2153// 2154// Putter: Getter: 2155// 1. write 2156// 2. Q.Put() --\ . 2157// 3. read \--> a. Q.Get() 2158// b. read 2159int GLOB = 0; 2160ProducerConsumerQueue Q(INT_MAX); 2161void Putter() { 2162 GLOB = 1; 2163 Q.Put(NULL); 2164 CHECK(GLOB == 1); 2165} 2166void Getter() { 2167 Q.Get(); 2168 usleep(100000); 2169 CHECK(GLOB == 1); 2170} 2171void Run() { 2172 printf("test43: negative\n"); 2173 MyThreadArray t(Putter, Getter); 2174 t.Start(); 2175 t.Join(); 2176 printf("\tGLOB=%d\n", GLOB); 2177} 2178REGISTER_TEST(Run, 43) 2179} // namespace test43 2180 2181 2182// test44: FP. {{{1 2183namespace test44 { 2184// 2185// Putter: Getter: 2186// 1. read 2187// 2. Q.Put() --\ . 2188// 3. MU.Lock() \--> a. Q.Get() 2189// 4. write 2190// 5. MU.Unlock() 2191// b. MU.Lock() 2192// c. write 2193// d. MU.Unlock(); 2194int GLOB = 0; 2195Mutex MU; 2196ProducerConsumerQueue Q(INT_MAX); 2197void Putter() { 2198 CHECK(GLOB == 0); 2199 Q.Put(NULL); 2200 MU.Lock(); 2201 GLOB = 1; 2202 MU.Unlock(); 2203} 2204void Getter() { 2205 Q.Get(); 2206 usleep(100000); 2207 MU.Lock(); 2208 GLOB = 1; 2209 MU.Unlock(); 2210} 2211void Run() { 2212// ANNOTATE_EXPECT_RACE(&GLOB, "test44. FP. Fixed by MSMProp1."); 2213 printf("test44: negative\n"); 2214 MyThreadArray t(Putter, Getter); 2215 t.Start(); 2216 t.Join(); 2217 printf("\tGLOB=%d\n", GLOB); 2218} 2219REGISTER_TEST(Run, 44) 2220} // namespace test44 2221 2222 2223// test45: TN. {{{1 2224namespace test45 { 2225// 2226// Putter: Getter: 2227// 1. read 2228// 2. Q.Put() --\ . 2229// 3. MU.Lock() \--> a. Q.Get() 2230// 4. write 2231// 5. MU.Unlock() 2232// b. MU.Lock() 2233// c. read 2234// d. MU.Unlock(); 2235int GLOB = 0; 2236Mutex MU; 2237ProducerConsumerQueue Q(INT_MAX); 2238void Putter() { 2239 CHECK(GLOB == 0); 2240 Q.Put(NULL); 2241 MU.Lock(); 2242 GLOB++; 2243 MU.Unlock(); 2244} 2245void Getter() { 2246 Q.Get(); 2247 usleep(100000); 2248 MU.Lock(); 2249 CHECK(GLOB <= 1); 2250 MU.Unlock(); 2251} 2252void Run() { 2253 printf("test45: negative\n"); 2254 MyThreadArray t(Putter, Getter); 2255 t.Start(); 2256 t.Join(); 2257 printf("\tGLOB=%d\n", GLOB); 2258} 2259REGISTER_TEST(Run, 45) 2260} // namespace test45 2261 2262 2263// test46: FN. {{{1 2264namespace test46 { 2265// 2266// First: Second: 2267// 1. write 2268// 2. MU.Lock() 2269// 3. write 2270// 4. MU.Unlock() (sleep) 2271// a. MU.Lock() 2272// b. write 2273// c. MU.Unlock(); 2274int GLOB = 0; 2275Mutex MU; 2276void First() { 2277 GLOB++; 2278 MU.Lock(); 2279 GLOB++; 2280 MU.Unlock(); 2281} 2282void Second() { 2283 usleep(480000); 2284 MU.Lock(); 2285 GLOB++; 2286 MU.Unlock(); 2287 2288 // just a print. 2289 // If we move it to Run() we will get report in MSMHelgrind 2290 // due to its false positive (test32). 2291 MU.Lock(); 2292 printf("\tGLOB=%d\n", GLOB); 2293 MU.Unlock(); 2294} 2295void Run() { 2296 ANNOTATE_TRACE_MEMORY(&GLOB); 2297 MyThreadArray t(First, Second); 2298 t.Start(); 2299 t.Join(); 2300} 2301REGISTER_TEST(Run, 46) 2302} // namespace test46 2303 2304 2305// test47: TP. Not detected by pure happens-before detectors. {{{1 2306namespace test47 { 2307// A true race that can not be detected by a pure happens-before 2308// race detector. 2309// 2310// First: Second: 2311// 1. write 2312// 2. MU.Lock() 2313// 3. MU.Unlock() (sleep) 2314// a. MU.Lock() 2315// b. MU.Unlock(); 2316// c. write 2317int GLOB = 0; 2318Mutex MU; 2319void First() { 2320 GLOB=1; 2321 MU.Lock(); 2322 MU.Unlock(); 2323} 2324void Second() { 2325 usleep(480000); 2326 MU.Lock(); 2327 MU.Unlock(); 2328 GLOB++; 2329} 2330void Run() { 2331 FAST_MODE_INIT(&GLOB); 2332 if (!Tsan_PureHappensBefore()) 2333 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test47. TP. Not detected by pure HB."); 2334 printf("test47: positive\n"); 2335 MyThreadArray t(First, Second); 2336 t.Start(); 2337 t.Join(); 2338 printf("\tGLOB=%d\n", GLOB); 2339} 2340REGISTER_TEST(Run, 47) 2341} // namespace test47 2342 2343 2344// test48: FN. Simple race (single write vs multiple reads). {{{1 2345namespace test48 { 2346int GLOB = 0; 2347// same as test10 but with single writer and multiple readers 2348// A simple data race between single writer and multiple readers. 2349// Write happens before Reads (enforced by sleep(1)), 2350 2351// 2352// Writer: Readers: 2353// 1. write(GLOB) a. sleep(long enough so that GLOB 2354// is most likely initialized by Writer) 2355// b. read(GLOB) 2356// 2357// 2358// Eraser algorithm does not detect the race here, 2359// see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html. 2360// 2361void Writer() { 2362 GLOB = 3; 2363} 2364void Reader() { 2365 usleep(100000); 2366 CHECK(GLOB != -777); 2367} 2368 2369void Run() { 2370 FAST_MODE_INIT(&GLOB); 2371 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test48. TP. FN in MSMHelgrind."); 2372 printf("test48: positive\n"); 2373 MyThreadArray t(Writer, Reader,Reader,Reader); 2374 t.Start(); 2375 t.Join(); 2376 printf("\tGLOB=%d\n", GLOB); 2377} 2378REGISTER_TEST(Run, 48) 2379} // namespace test48 2380 2381 2382// test49: FN. Simple race (single write vs multiple reads). {{{1 2383namespace test49 { 2384int GLOB = 0; 2385// same as test10 but with multiple read operations done by a single reader 2386// A simple data race between writer and readers. 2387// Write happens before Read (enforced by sleep(1)), 2388// 2389// Writer: Reader: 2390// 1. write(GLOB) a. sleep(long enough so that GLOB 2391// is most likely initialized by Writer) 2392// b. read(GLOB) 2393// c. read(GLOB) 2394// d. read(GLOB) 2395// e. read(GLOB) 2396// 2397// 2398// Eraser algorithm does not detect the race here, 2399// see Section 2.2 of http://citeseer.ist.psu.edu/savage97eraser.html. 2400// 2401void Writer() { 2402 GLOB = 3; 2403} 2404void Reader() { 2405 usleep(100000); 2406 CHECK(GLOB != -777); 2407 CHECK(GLOB != -777); 2408 CHECK(GLOB != -777); 2409 CHECK(GLOB != -777); 2410} 2411 2412void Run() { 2413 FAST_MODE_INIT(&GLOB); 2414 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test49. TP. FN in MSMHelgrind."); 2415 printf("test49: positive\n"); 2416 MyThreadArray t(Writer, Reader); 2417 t.Start(); 2418 t.Join(); 2419 printf("\tGLOB=%d\n", GLOB); 2420} 2421REGISTER_TEST(Run, 49); 2422} // namespace test49 2423 2424 2425// test50: TP. Synchronization via CondVar. {{{1 2426namespace test50 { 2427int GLOB = 0; 2428Mutex MU; 2429// Two last write accesses to GLOB are not synchronized 2430// 2431// Waiter: Waker: 2432// 1. COND = 0 2433// 2. Start(Waker) 2434// 3. MU.Lock() a. write(GLOB) 2435// b. MU.Lock() 2436// c. COND = 1 2437// /--- d. CV.Signal() 2438// 4. while(COND != 1) / e. MU.Unlock() 2439// CV.Wait(MU) <---/ 2440// 5. MU.Unlock() 2441// 6. write(GLOB) f. MU.Lock() 2442// g. write(GLOB) 2443// h. MU.Unlock() 2444 2445 2446void Waker() { 2447 usleep(100000); // Make sure the waiter blocks. 2448 2449 GLOB = 1; 2450 2451 MU.Lock(); 2452 COND = 1; 2453 CV.Signal(); 2454 MU.Unlock(); 2455 2456 usleep(100000); 2457 MU.Lock(); 2458 GLOB = 3; 2459 MU.Unlock(); 2460} 2461 2462void Waiter() { 2463 ThreadPool pool(1); 2464 pool.StartWorkers(); 2465 COND = 0; 2466 pool.Add(NewCallback(Waker)); 2467 2468 MU.Lock(); 2469 while(COND != 1) 2470 CV.Wait(&MU); 2471 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU); 2472 MU.Unlock(); 2473 2474 GLOB = 2; 2475} 2476void Run() { 2477 FAST_MODE_INIT(&GLOB); 2478 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test50. TP."); 2479 printf("test50: positive\n"); 2480 Waiter(); 2481 printf("\tGLOB=%d\n", GLOB); 2482} 2483REGISTER_TEST2(Run, 50, FEATURE|NEEDS_ANNOTATIONS); 2484} // namespace test50 2485 2486 2487// test51: TP. Synchronization via CondVar: problem with several signals. {{{1 2488namespace test51 { 2489int GLOB = 0; 2490int COND = 0; 2491Mutex MU; 2492 2493 2494// scheduler dependent results because of several signals 2495// second signal will be lost 2496// 2497// Waiter: Waker: 2498// 1. Start(Waker) 2499// 2. MU.Lock() 2500// 3. while(COND) 2501// CV.Wait(MU)<-\ . 2502// 4. MU.Unlock() \ . 2503// 5. write(GLOB) \ a. write(GLOB) 2504// \ b. MU.Lock() 2505// \ c. COND = 1 2506// \--- d. CV.Signal() 2507// e. MU.Unlock() 2508// 2509// f. write(GLOB) 2510// 2511// g. MU.Lock() 2512// h. COND = 1 2513// LOST<---- i. CV.Signal() 2514// j. MU.Unlock() 2515 2516void Waker() { 2517 2518 usleep(10000); // Make sure the waiter blocks. 2519 2520 GLOB = 1; 2521 2522 MU.Lock(); 2523 COND = 1; 2524 CV.Signal(); 2525 MU.Unlock(); 2526 2527 usleep(10000); // Make sure the waiter is signalled. 2528 2529 GLOB = 2; 2530 2531 MU.Lock(); 2532 COND = 1; 2533 CV.Signal(); //Lost Signal 2534 MU.Unlock(); 2535} 2536 2537void Waiter() { 2538 2539 ThreadPool pool(1); 2540 pool.StartWorkers(); 2541 pool.Add(NewCallback(Waker)); 2542 2543 MU.Lock(); 2544 while(COND != 1) 2545 CV.Wait(&MU); 2546 MU.Unlock(); 2547 2548 2549 GLOB = 3; 2550} 2551void Run() { 2552 FAST_MODE_INIT(&GLOB); 2553 ANNOTATE_EXPECT_RACE(&GLOB, "test51. TP."); 2554 printf("test51: positive\n"); 2555 Waiter(); 2556 printf("\tGLOB=%d\n", GLOB); 2557} 2558REGISTER_TEST(Run, 51); 2559} // namespace test51 2560 2561 2562// test52: TP. Synchronization via CondVar: problem with several signals. {{{1 2563namespace test52 { 2564int GLOB = 0; 2565int COND = 0; 2566Mutex MU; 2567 2568// same as test51 but the first signal will be lost 2569// scheduler dependent results because of several signals 2570// 2571// Waiter: Waker: 2572// 1. Start(Waker) 2573// a. write(GLOB) 2574// b. MU.Lock() 2575// c. COND = 1 2576// LOST<---- d. CV.Signal() 2577// e. MU.Unlock() 2578// 2579// 2. MU.Lock() 2580// 3. while(COND) 2581// CV.Wait(MU)<-\ . 2582// 4. MU.Unlock() \ f. write(GLOB) 2583// 5. write(GLOB) \ . 2584// \ g. MU.Lock() 2585// \ h. COND = 1 2586// \--- i. CV.Signal() 2587// j. MU.Unlock() 2588 2589void Waker() { 2590 2591 GLOB = 1; 2592 2593 MU.Lock(); 2594 COND = 1; 2595 CV.Signal(); //lost signal 2596 MU.Unlock(); 2597 2598 usleep(20000); // Make sure the waiter blocks 2599 2600 GLOB = 2; 2601 2602 MU.Lock(); 2603 COND = 1; 2604 CV.Signal(); 2605 MU.Unlock(); 2606} 2607 2608void Waiter() { 2609 ThreadPool pool(1); 2610 pool.StartWorkers(); 2611 pool.Add(NewCallback(Waker)); 2612 2613 usleep(10000); // Make sure the first signal will be lost 2614 2615 MU.Lock(); 2616 while(COND != 1) 2617 CV.Wait(&MU); 2618 MU.Unlock(); 2619 2620 GLOB = 3; 2621} 2622void Run() { 2623 FAST_MODE_INIT(&GLOB); 2624 ANNOTATE_EXPECT_RACE(&GLOB, "test52. TP."); 2625 printf("test52: positive\n"); 2626 Waiter(); 2627 printf("\tGLOB=%d\n", GLOB); 2628} 2629REGISTER_TEST(Run, 52); 2630} // namespace test52 2631 2632 2633// test53: FP. Synchronization via implicit semaphore. {{{1 2634namespace test53 { 2635// Correctly synchronized test, but the common lockset is empty. 2636// The variable FLAG works as an implicit semaphore. 2637// MSMHelgrind still does not complain since it does not maintain the lockset 2638// at the exclusive state. But MSMProp1 does complain. 2639// See also test54. 2640// 2641// 2642// Initializer: Users 2643// 1. MU1.Lock() 2644// 2. write(GLOB) 2645// 3. FLAG = true 2646// 4. MU1.Unlock() 2647// a. MU1.Lock() 2648// b. f = FLAG; 2649// c. MU1.Unlock() 2650// d. if (!f) goto a. 2651// e. MU2.Lock() 2652// f. write(GLOB) 2653// g. MU2.Unlock() 2654// 2655 2656int GLOB = 0; 2657bool FLAG = false; 2658Mutex MU1, MU2; 2659 2660void Initializer() { 2661 MU1.Lock(); 2662 GLOB = 1000; 2663 FLAG = true; 2664 MU1.Unlock(); 2665 usleep(100000); // just in case 2666} 2667 2668void User() { 2669 bool f = false; 2670 while(!f) { 2671 MU1.Lock(); 2672 f = FLAG; 2673 MU1.Unlock(); 2674 usleep(10000); 2675 } 2676 // at this point Initializer will not access GLOB again 2677 MU2.Lock(); 2678 CHECK(GLOB >= 1000); 2679 GLOB++; 2680 MU2.Unlock(); 2681} 2682 2683void Run() { 2684 FAST_MODE_INIT(&GLOB); 2685 if (!Tsan_PureHappensBefore()) 2686 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test53. FP. Implicit semaphore"); 2687 printf("test53: FP. false positive, Implicit semaphore\n"); 2688 MyThreadArray t(Initializer, User, User); 2689 t.Start(); 2690 t.Join(); 2691 printf("\tGLOB=%d\n", GLOB); 2692} 2693REGISTER_TEST(Run, 53) 2694} // namespace test53 2695 2696 2697// test54: TN. Synchronization via implicit semaphore. Annotated {{{1 2698namespace test54 { 2699// Same as test53, but annotated. 2700int GLOB = 0; 2701bool FLAG = false; 2702Mutex MU1, MU2; 2703 2704void Initializer() { 2705 MU1.Lock(); 2706 GLOB = 1000; 2707 FLAG = true; 2708 ANNOTATE_CONDVAR_SIGNAL(&GLOB); 2709 MU1.Unlock(); 2710 usleep(100000); // just in case 2711} 2712 2713void User() { 2714 bool f = false; 2715 while(!f) { 2716 MU1.Lock(); 2717 f = FLAG; 2718 MU1.Unlock(); 2719 usleep(10000); 2720 } 2721 // at this point Initializer will not access GLOB again 2722 ANNOTATE_CONDVAR_WAIT(&GLOB); 2723 MU2.Lock(); 2724 CHECK(GLOB >= 1000); 2725 GLOB++; 2726 MU2.Unlock(); 2727} 2728 2729void Run() { 2730 printf("test54: negative\n"); 2731 MyThreadArray t(Initializer, User, User); 2732 t.Start(); 2733 t.Join(); 2734 printf("\tGLOB=%d\n", GLOB); 2735} 2736REGISTER_TEST2(Run, 54, FEATURE|NEEDS_ANNOTATIONS) 2737} // namespace test54 2738 2739 2740// test55: FP. Synchronization with TryLock. Not easy for race detectors {{{1 2741namespace test55 { 2742// "Correct" synchronization with TryLock and Lock. 2743// 2744// This scheme is actually very risky. 2745// It is covered in detail in this video: 2746// http://youtube.com/watch?v=mrvAqvtWYb4 (slide 36, near 50-th minute). 2747int GLOB = 0; 2748Mutex MU; 2749 2750void Worker_Lock() { 2751 GLOB = 1; 2752 MU.Lock(); 2753} 2754 2755void Worker_TryLock() { 2756 while (true) { 2757 if (!MU.TryLock()) { 2758 MU.Unlock(); 2759 break; 2760 } 2761 else 2762 MU.Unlock(); 2763 usleep(100); 2764 } 2765 GLOB = 2; 2766} 2767 2768void Run() { 2769 printf("test55:\n"); 2770 MyThreadArray t(Worker_Lock, Worker_TryLock); 2771 t.Start(); 2772 t.Join(); 2773 printf("\tGLOB=%d\n", GLOB); 2774} 2775REGISTER_TEST2(Run, 55, FEATURE|EXCLUDE_FROM_ALL); 2776} // namespace test55 2777 2778 2779 2780// test56: TP. Use of ANNOTATE_BENIGN_RACE. {{{1 2781namespace test56 { 2782// For whatever reason the user wants to treat 2783// a race on GLOB as a benign race. 2784int GLOB = 0; 2785int GLOB2 = 0; 2786 2787void Worker() { 2788 GLOB++; 2789} 2790 2791void Run() { 2792 ANNOTATE_BENIGN_RACE(&GLOB, "test56. Use of ANNOTATE_BENIGN_RACE."); 2793 ANNOTATE_BENIGN_RACE(&GLOB2, "No race. The tool should be silent"); 2794 printf("test56: positive\n"); 2795 MyThreadArray t(Worker, Worker, Worker, Worker); 2796 t.Start(); 2797 t.Join(); 2798 printf("\tGLOB=%d\n", GLOB); 2799} 2800REGISTER_TEST2(Run, 56, FEATURE|NEEDS_ANNOTATIONS) 2801} // namespace test56 2802 2803 2804// test57: TN: Correct use of atomics. {{{1 2805namespace test57 { 2806int GLOB = 0; 2807void Writer() { 2808 for (int i = 0; i < 10; i++) { 2809 AtomicIncrement(&GLOB, 1); 2810 usleep(1000); 2811 } 2812} 2813void Reader() { 2814 while (GLOB < 20) usleep(1000); 2815} 2816void Run() { 2817 printf("test57: negative\n"); 2818 MyThreadArray t(Writer, Writer, Reader, Reader); 2819 t.Start(); 2820 t.Join(); 2821 CHECK(GLOB == 20); 2822 printf("\tGLOB=%d\n", GLOB); 2823} 2824REGISTER_TEST(Run, 57) 2825} // namespace test57 2826 2827 2828// test58: TN. User defined synchronization. {{{1 2829namespace test58 { 2830int GLOB1 = 1; 2831int GLOB2 = 2; 2832int FLAG1 = 0; 2833int FLAG2 = 0; 2834 2835// Correctly synchronized test, but the common lockset is empty. 2836// The variables FLAG1 and FLAG2 used for synchronization and as 2837// temporary variables for swapping two global values. 2838// Such kind of synchronization is rarely used (Excluded from all tests??). 2839 2840void Worker2() { 2841 FLAG1=GLOB2; 2842 2843 while(!FLAG2) 2844 ; 2845 GLOB2=FLAG2; 2846} 2847 2848void Worker1() { 2849 FLAG2=GLOB1; 2850 2851 while(!FLAG1) 2852 ; 2853 GLOB1=FLAG1; 2854} 2855 2856void Run() { 2857 printf("test58:\n"); 2858 MyThreadArray t(Worker1, Worker2); 2859 t.Start(); 2860 t.Join(); 2861 printf("\tGLOB1=%d\n", GLOB1); 2862 printf("\tGLOB2=%d\n", GLOB2); 2863} 2864REGISTER_TEST2(Run, 58, FEATURE|EXCLUDE_FROM_ALL) 2865} // namespace test58 2866 2867 2868 2869// test59: TN. User defined synchronization. Annotated {{{1 2870namespace test59 { 2871int COND1 = 0; 2872int COND2 = 0; 2873int GLOB1 = 1; 2874int GLOB2 = 2; 2875int FLAG1 = 0; 2876int FLAG2 = 0; 2877// same as test 58 but annotated 2878 2879void Worker2() { 2880 FLAG1=GLOB2; 2881 ANNOTATE_CONDVAR_SIGNAL(&COND2); 2882 while(!FLAG2) usleep(1); 2883 ANNOTATE_CONDVAR_WAIT(&COND1); 2884 GLOB2=FLAG2; 2885} 2886 2887void Worker1() { 2888 FLAG2=GLOB1; 2889 ANNOTATE_CONDVAR_SIGNAL(&COND1); 2890 while(!FLAG1) usleep(1); 2891 ANNOTATE_CONDVAR_WAIT(&COND2); 2892 GLOB1=FLAG1; 2893} 2894 2895void Run() { 2896 printf("test59: negative\n"); 2897 ANNOTATE_BENIGN_RACE(&FLAG1, "synchronization via 'safe' race"); 2898 ANNOTATE_BENIGN_RACE(&FLAG2, "synchronization via 'safe' race"); 2899 MyThreadArray t(Worker1, Worker2); 2900 t.Start(); 2901 t.Join(); 2902 printf("\tGLOB1=%d\n", GLOB1); 2903 printf("\tGLOB2=%d\n", GLOB2); 2904} 2905REGISTER_TEST2(Run, 59, FEATURE|NEEDS_ANNOTATIONS) 2906} // namespace test59 2907 2908 2909// test60: TN. Correct synchronization using signal-wait {{{1 2910namespace test60 { 2911int COND1 = 0; 2912int COND2 = 0; 2913int GLOB1 = 1; 2914int GLOB2 = 2; 2915int FLAG2 = 0; 2916int FLAG1 = 0; 2917Mutex MU; 2918// same as test 59 but synchronized with signal-wait. 2919 2920void Worker2() { 2921 FLAG1=GLOB2; 2922 2923 MU.Lock(); 2924 COND1 = 1; 2925 CV.Signal(); 2926 MU.Unlock(); 2927 2928 MU.Lock(); 2929 while(COND2 != 1) 2930 CV.Wait(&MU); 2931 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU); 2932 MU.Unlock(); 2933 2934 GLOB2=FLAG2; 2935} 2936 2937void Worker1() { 2938 FLAG2=GLOB1; 2939 2940 MU.Lock(); 2941 COND2 = 1; 2942 CV.Signal(); 2943 MU.Unlock(); 2944 2945 MU.Lock(); 2946 while(COND1 != 1) 2947 CV.Wait(&MU); 2948 ANNOTATE_CONDVAR_LOCK_WAIT(&CV, &MU); 2949 MU.Unlock(); 2950 2951 GLOB1=FLAG1; 2952} 2953 2954void Run() { 2955 printf("test60: negative\n"); 2956 MyThreadArray t(Worker1, Worker2); 2957 t.Start(); 2958 t.Join(); 2959 printf("\tGLOB1=%d\n", GLOB1); 2960 printf("\tGLOB2=%d\n", GLOB2); 2961} 2962REGISTER_TEST2(Run, 60, FEATURE|NEEDS_ANNOTATIONS) 2963} // namespace test60 2964 2965 2966// test61: TN. Synchronization via Mutex as in happens-before, annotated. {{{1 2967namespace test61 { 2968Mutex MU; 2969int GLOB = 0; 2970int *P1 = NULL, *P2 = NULL; 2971 2972// In this test Mutex lock/unlock operations introduce happens-before relation. 2973// We annotate the code so that MU is treated as in pure happens-before detector. 2974 2975 2976void Putter() { 2977 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU); 2978 MU.Lock(); 2979 if (P1 == NULL) { 2980 P1 = &GLOB; 2981 *P1 = 1; 2982 } 2983 MU.Unlock(); 2984} 2985 2986void Getter() { 2987 bool done = false; 2988 while (!done) { 2989 MU.Lock(); 2990 if (P1) { 2991 done = true; 2992 P2 = P1; 2993 P1 = NULL; 2994 } 2995 MU.Unlock(); 2996 } 2997 *P2 = 2; 2998} 2999 3000 3001void Run() { 3002 printf("test61: negative\n"); 3003 MyThreadArray t(Putter, Getter); 3004 t.Start(); 3005 t.Join(); 3006 printf("\tGLOB=%d\n", GLOB); 3007} 3008REGISTER_TEST2(Run, 61, FEATURE|NEEDS_ANNOTATIONS) 3009} // namespace test61 3010 3011 3012// test62: STAB. Create as many segments as possible. {{{1 3013namespace test62 { 3014// Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments. 3015// A better scheme is to implement garbage collection for segments. 3016ProducerConsumerQueue Q(INT_MAX); 3017const int N = 1 << 22; 3018 3019void Putter() { 3020 for (int i = 0; i < N; i++){ 3021 if ((i % (N / 8)) == 0) { 3022 printf("i=%d\n", i); 3023 } 3024 Q.Put(NULL); 3025 } 3026} 3027 3028void Getter() { 3029 for (int i = 0; i < N; i++) 3030 Q.Get(); 3031} 3032 3033void Run() { 3034 printf("test62:\n"); 3035 MyThreadArray t(Putter, Getter); 3036 t.Start(); 3037 t.Join(); 3038} 3039REGISTER_TEST2(Run, 62, STABILITY|EXCLUDE_FROM_ALL) 3040} // namespace test62 3041 3042 3043// test63: STAB. Create as many segments as possible and do it fast. {{{1 3044namespace test63 { 3045// Helgrind 3.3.0 will fail as it has a hard limit of < 2^24 segments. 3046// A better scheme is to implement garbage collection for segments. 3047const int N = 1 << 24; 3048int C = 0; 3049 3050void Putter() { 3051 for (int i = 0; i < N; i++){ 3052 if ((i % (N / 8)) == 0) { 3053 printf("i=%d\n", i); 3054 } 3055 ANNOTATE_CONDVAR_SIGNAL(&C); 3056 } 3057} 3058 3059void Getter() { 3060} 3061 3062void Run() { 3063 printf("test63:\n"); 3064 MyThreadArray t(Putter, Getter); 3065 t.Start(); 3066 t.Join(); 3067} 3068REGISTER_TEST2(Run, 63, STABILITY|EXCLUDE_FROM_ALL) 3069} // namespace test63 3070 3071 3072// test64: TP. T2 happens-before T3, but T1 is independent. Reads in T1/T2. {{{1 3073namespace test64 { 3074// True race between T1 and T3: 3075// 3076// T1: T2: T3: 3077// 1. read(GLOB) (sleep) 3078// a. read(GLOB) 3079// b. Q.Put() -----> A. Q.Get() 3080// B. write(GLOB) 3081// 3082// 3083 3084int GLOB = 0; 3085ProducerConsumerQueue Q(INT_MAX); 3086 3087void T1() { 3088 CHECK(GLOB == 0); 3089} 3090 3091void T2() { 3092 usleep(100000); 3093 CHECK(GLOB == 0); 3094 Q.Put(NULL); 3095} 3096 3097void T3() { 3098 Q.Get(); 3099 GLOB = 1; 3100} 3101 3102 3103void Run() { 3104 FAST_MODE_INIT(&GLOB); 3105 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test64: TP."); 3106 printf("test64: positive\n"); 3107 MyThreadArray t(T1, T2, T3); 3108 t.Start(); 3109 t.Join(); 3110 printf("\tGLOB=%d\n", GLOB); 3111} 3112REGISTER_TEST(Run, 64) 3113} // namespace test64 3114 3115 3116// test65: TP. T2 happens-before T3, but T1 is independent. Writes in T1/T2. {{{1 3117namespace test65 { 3118// Similar to test64. 3119// True race between T1 and T3: 3120// 3121// T1: T2: T3: 3122// 1. MU.Lock() 3123// 2. write(GLOB) 3124// 3. MU.Unlock() (sleep) 3125// a. MU.Lock() 3126// b. write(GLOB) 3127// c. MU.Unlock() 3128// d. Q.Put() -----> A. Q.Get() 3129// B. write(GLOB) 3130// 3131// 3132 3133int GLOB = 0; 3134Mutex MU; 3135ProducerConsumerQueue Q(INT_MAX); 3136 3137void T1() { 3138 MU.Lock(); 3139 GLOB++; 3140 MU.Unlock(); 3141} 3142 3143void T2() { 3144 usleep(100000); 3145 MU.Lock(); 3146 GLOB++; 3147 MU.Unlock(); 3148 Q.Put(NULL); 3149} 3150 3151void T3() { 3152 Q.Get(); 3153 GLOB = 1; 3154} 3155 3156 3157void Run() { 3158 FAST_MODE_INIT(&GLOB); 3159 if (!Tsan_PureHappensBefore()) 3160 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test65. TP."); 3161 printf("test65: positive\n"); 3162 MyThreadArray t(T1, T2, T3); 3163 t.Start(); 3164 t.Join(); 3165 printf("\tGLOB=%d\n", GLOB); 3166} 3167REGISTER_TEST(Run, 65) 3168} // namespace test65 3169 3170 3171// test66: TN. Two separate pairs of signaller/waiter using the same CV. {{{1 3172namespace test66 { 3173int GLOB1 = 0; 3174int GLOB2 = 0; 3175int C1 = 0; 3176int C2 = 0; 3177Mutex MU; 3178 3179void Signaller1() { 3180 GLOB1 = 1; 3181 MU.Lock(); 3182 C1 = 1; 3183 CV.Signal(); 3184 MU.Unlock(); 3185} 3186 3187void Signaller2() { 3188 GLOB2 = 1; 3189 usleep(100000); 3190 MU.Lock(); 3191 C2 = 1; 3192 CV.Signal(); 3193 MU.Unlock(); 3194} 3195 3196void Waiter1() { 3197 MU.Lock(); 3198 while (C1 != 1) CV.Wait(&MU); 3199 ANNOTATE_CONDVAR_WAIT(&CV); 3200 MU.Unlock(); 3201 GLOB1 = 2; 3202} 3203 3204void Waiter2() { 3205 MU.Lock(); 3206 while (C2 != 1) CV.Wait(&MU); 3207 ANNOTATE_CONDVAR_WAIT(&CV); 3208 MU.Unlock(); 3209 GLOB2 = 2; 3210} 3211 3212void Run() { 3213 printf("test66: negative\n"); 3214 MyThreadArray t(Signaller1, Signaller2, Waiter1, Waiter2); 3215 t.Start(); 3216 t.Join(); 3217 printf("\tGLOB=%d/%d\n", GLOB1, GLOB2); 3218} 3219REGISTER_TEST2(Run, 66, FEATURE|NEEDS_ANNOTATIONS) 3220} // namespace test66 3221 3222 3223// test67: FN. Race between Signaller1 and Waiter2 {{{1 3224namespace test67 { 3225// Similar to test66, but there is a real race here. 3226// 3227// Here we create a happens-before arc between Signaller1 and Waiter2 3228// even though there should be no such arc. 3229// However, it's probably improssible (or just very hard) to avoid it. 3230int GLOB = 0; 3231int C1 = 0; 3232int C2 = 0; 3233Mutex MU; 3234 3235void Signaller1() { 3236 GLOB = 1; 3237 MU.Lock(); 3238 C1 = 1; 3239 CV.Signal(); 3240 MU.Unlock(); 3241} 3242 3243void Signaller2() { 3244 usleep(100000); 3245 MU.Lock(); 3246 C2 = 1; 3247 CV.Signal(); 3248 MU.Unlock(); 3249} 3250 3251void Waiter1() { 3252 MU.Lock(); 3253 while (C1 != 1) CV.Wait(&MU); 3254 ANNOTATE_CONDVAR_WAIT(&CV); 3255 MU.Unlock(); 3256} 3257 3258void Waiter2() { 3259 MU.Lock(); 3260 while (C2 != 1) CV.Wait(&MU); 3261 ANNOTATE_CONDVAR_WAIT(&CV); 3262 MU.Unlock(); 3263 GLOB = 2; 3264} 3265 3266void Run() { 3267 FAST_MODE_INIT(&GLOB); 3268 ANNOTATE_EXPECT_RACE(&GLOB, "test67. FN. Race between Signaller1 and Waiter2"); 3269 printf("test67: positive\n"); 3270 MyThreadArray t(Signaller1, Signaller2, Waiter1, Waiter2); 3271 t.Start(); 3272 t.Join(); 3273 printf("\tGLOB=%d\n", GLOB); 3274} 3275REGISTER_TEST2(Run, 67, FEATURE|NEEDS_ANNOTATIONS|EXCLUDE_FROM_ALL) 3276} // namespace test67 3277 3278 3279// test68: TP. Writes are protected by MU, reads are not. {{{1 3280namespace test68 { 3281// In this test, all writes to GLOB are protected by a mutex 3282// but some reads go unprotected. 3283// This is certainly a race, but in some cases such code could occur in 3284// a correct program. For example, the unprotected reads may be used 3285// for showing statistics and are not required to be precise. 3286int GLOB = 0; 3287int COND = 0; 3288const int N_writers = 3; 3289Mutex MU, MU1; 3290 3291void Writer() { 3292 for (int i = 0; i < 100; i++) { 3293 MU.Lock(); 3294 GLOB++; 3295 MU.Unlock(); 3296 } 3297 3298 // we are done 3299 MU1.Lock(); 3300 COND++; 3301 MU1.Unlock(); 3302} 3303 3304void Reader() { 3305 bool cont = true; 3306 while (cont) { 3307 CHECK(GLOB >= 0); 3308 3309 // are we done? 3310 MU1.Lock(); 3311 if (COND == N_writers) 3312 cont = false; 3313 MU1.Unlock(); 3314 usleep(100); 3315 } 3316} 3317 3318void Run() { 3319 FAST_MODE_INIT(&GLOB); 3320 ANNOTATE_EXPECT_RACE(&GLOB, "TP. Writes are protected, reads are not."); 3321 printf("test68: positive\n"); 3322 MyThreadArray t(Reader, Writer, Writer, Writer); 3323 t.Start(); 3324 t.Join(); 3325 printf("\tGLOB=%d\n", GLOB); 3326} 3327REGISTER_TEST(Run, 68) 3328} // namespace test68 3329 3330 3331// test69: {{{1 3332namespace test69 { 3333// This is the same as test68, but annotated. 3334// We do not want to annotate GLOB as a benign race 3335// because we want to allow racy reads only in certain places. 3336// 3337// TODO: 3338int GLOB = 0; 3339int COND = 0; 3340const int N_writers = 3; 3341int FAKE_MU = 0; 3342Mutex MU, MU1; 3343 3344void Writer() { 3345 for (int i = 0; i < 10; i++) { 3346 MU.Lock(); 3347 GLOB++; 3348 MU.Unlock(); 3349 } 3350 3351 // we are done 3352 MU1.Lock(); 3353 COND++; 3354 MU1.Unlock(); 3355} 3356 3357void Reader() { 3358 bool cont = true; 3359 while (cont) { 3360 ANNOTATE_IGNORE_READS_BEGIN(); 3361 CHECK(GLOB >= 0); 3362 ANNOTATE_IGNORE_READS_END(); 3363 3364 // are we done? 3365 MU1.Lock(); 3366 if (COND == N_writers) 3367 cont = false; 3368 MU1.Unlock(); 3369 usleep(100); 3370 } 3371} 3372 3373void Run() { 3374 printf("test69: negative\n"); 3375 MyThreadArray t(Reader, Writer, Writer, Writer); 3376 t.Start(); 3377 t.Join(); 3378 printf("\tGLOB=%d\n", GLOB); 3379} 3380REGISTER_TEST(Run, 69) 3381} // namespace test69 3382 3383// test70: STAB. Check that TRACE_MEMORY works. {{{1 3384namespace test70 { 3385int GLOB = 0; 3386void Run() { 3387 printf("test70: negative\n"); 3388 ANNOTATE_TRACE_MEMORY(&GLOB); 3389 GLOB = 1; 3390 printf("\tGLOB=%d\n", GLOB); 3391} 3392REGISTER_TEST(Run, 70) 3393} // namespace test70 3394 3395 3396 3397// test71: TN. strlen, index. {{{1 3398namespace test71 { 3399// This test is a reproducer for a benign race in strlen (as well as index, etc). 3400// Some implementations of strlen may read up to 7 bytes past the end of the string 3401// thus touching memory which may not belong to this string. 3402// Such race is benign because the data read past the end of the string is not used. 3403// 3404// Here, we allocate a 8-byte aligned string str and initialize first 5 bytes. 3405// Then one thread calls strlen(str) (as well as index & rindex) 3406// and another thread initializes str[5]..str[7]. 3407// 3408// This can be fixed in Helgrind by intercepting strlen and replacing it 3409// with a simpler implementation. 3410 3411char *str; 3412void WorkerX() { 3413 usleep(100000); 3414 CHECK(strlen(str) == 4); 3415 CHECK(index(str, 'X') == str); 3416 CHECK(index(str, 'x') == str+1); 3417 CHECK(index(str, 'Y') == NULL); 3418 CHECK(rindex(str, 'X') == str+2); 3419 CHECK(rindex(str, 'x') == str+3); 3420 CHECK(rindex(str, 'Y') == NULL); 3421} 3422void WorkerY() { 3423 str[5] = 'Y'; 3424 str[6] = 'Y'; 3425 str[7] = '\0'; 3426} 3427 3428void Run() { 3429 str = new char[8]; 3430 str[0] = 'X'; 3431 str[1] = 'x'; 3432 str[2] = 'X'; 3433 str[3] = 'x'; 3434 str[4] = '\0'; 3435 3436 printf("test71: negative (strlen & index)\n"); 3437 MyThread t1(WorkerY); 3438 MyThread t2(WorkerX); 3439 t1.Start(); 3440 t2.Start(); 3441 t1.Join(); 3442 t2.Join(); 3443 printf("\tstrX=%s; strY=%s\n", str, str+5); 3444} 3445REGISTER_TEST(Run, 71) 3446} // namespace test71 3447 3448 3449// test72: STAB. Stress test for the number of segment sets (SSETs). {{{1 3450namespace test72 { 3451#ifndef NO_BARRIER 3452// Variation of test33. 3453// Instead of creating Nlog*N_iter threads, 3454// we create Nlog threads and do N_iter barriers. 3455int GLOB = 0; 3456const int N_iter = 30; 3457const int Nlog = 16; 3458const int N = 1 << Nlog; 3459static int64_t ARR1[N]; 3460static int64_t ARR2[N]; 3461Barrier *barriers[N_iter]; 3462Mutex MU; 3463 3464void Worker() { 3465 MU.Lock(); 3466 int n = ++GLOB; 3467 MU.Unlock(); 3468 3469 n %= Nlog; 3470 3471 long t0 = clock(); 3472 long t = t0; 3473 3474 for (int it = 0; it < N_iter; it++) { 3475 if(n == 0) { 3476 //printf("Iter: %d; %ld %ld\n", it, clock() - t, clock() - t0); 3477 t = clock(); 3478 } 3479 // Iterate N_iter times, block on barrier after each iteration. 3480 // This way Helgrind will create new segments after each barrier. 3481 3482 for (int x = 0; x < 2; x++) { 3483 // run the inner loop twice. 3484 // When a memory location is accessed second time it is likely 3485 // that the state (SVal) will be unchanged. 3486 // The memory machine may optimize this case. 3487 for (int i = 0; i < N; i++) { 3488 // ARR1[i] and ARR2[N-1-i] are accessed by threads from i-th subset 3489 if (i & (1 << n)) { 3490 CHECK(ARR1[i] == 0); 3491 CHECK(ARR2[N-1-i] == 0); 3492 } 3493 } 3494 } 3495 barriers[it]->Block(); 3496 } 3497} 3498 3499 3500void Run() { 3501 printf("test72:\n"); 3502 3503 std::vector<MyThread*> vec(Nlog); 3504 3505 for (int i = 0; i < N_iter; i++) 3506 barriers[i] = new Barrier(Nlog); 3507 3508 // Create and start Nlog threads 3509 for (int i = 0; i < Nlog; i++) { 3510 vec[i] = new MyThread(Worker); 3511 vec[i]->Start(); 3512 } 3513 3514 // Join all threads. 3515 for (int i = 0; i < Nlog; i++) { 3516 vec[i]->Join(); 3517 delete vec[i]; 3518 } 3519 for (int i = 0; i < N_iter; i++) 3520 delete barriers[i]; 3521 3522 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n", 3523 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/ 3524} 3525REGISTER_TEST2(Run, 72, STABILITY|PERFORMANCE|EXCLUDE_FROM_ALL); 3526#endif // NO_BARRIER 3527} // namespace test72 3528 3529 3530// test73: STAB. Stress test for the number of (SSETs), different access sizes. {{{1 3531namespace test73 { 3532#ifndef NO_BARRIER 3533// Variation of test72. 3534// We perform accesses of different sizes to the same location. 3535int GLOB = 0; 3536const int N_iter = 2; 3537const int Nlog = 16; 3538const int N = 1 << Nlog; 3539static int64_t ARR1[N]; 3540static int ARR2[N]; 3541Barrier *barriers[N_iter]; 3542Mutex MU; 3543 3544void Worker() { 3545 MU.Lock(); 3546 int n = ++GLOB; 3547 MU.Unlock(); 3548 3549 n %= Nlog; 3550 3551 for (int it = 0; it < N_iter; it++) { 3552 // Iterate N_iter times, block on barrier after each iteration. 3553 // This way Helgrind will create new segments after each barrier. 3554 3555 for (int x = 0; x < 4; x++) { 3556 for (int i = 0; i < N; i++) { 3557 // ARR1[i] are accessed by threads from i-th subset 3558 if (i & (1 << n)) { 3559 for (int off = 0; off < (1 << x); off++) { 3560 switch(x) { 3561 case 0: CHECK( ARR1 [i * (1<<x) + off] == 0); break; 3562 case 1: CHECK(((int*) (ARR1))[i * (1<<x) + off] == 0); break; 3563 case 2: CHECK(((short*)(ARR1))[i * (1<<x) + off] == 0); break; 3564 case 3: CHECK(((char*) (ARR1))[i * (1<<x) + off] == 0); break; 3565 } 3566 switch(x) { 3567 case 1: CHECK(((int*) (ARR2))[i * (1<<x) + off] == 0); break; 3568 case 2: CHECK(((short*)(ARR2))[i * (1<<x) + off] == 0); break; 3569 case 3: CHECK(((char*) (ARR2))[i * (1<<x) + off] == 0); break; 3570 } 3571 } 3572 } 3573 } 3574 } 3575 barriers[it]->Block(); 3576 } 3577} 3578 3579 3580 3581void Run() { 3582 printf("test73:\n"); 3583 3584 std::vector<MyThread*> vec(Nlog); 3585 3586 for (int i = 0; i < N_iter; i++) 3587 barriers[i] = new Barrier(Nlog); 3588 3589 // Create and start Nlog threads 3590 for (int i = 0; i < Nlog; i++) { 3591 vec[i] = new MyThread(Worker); 3592 vec[i]->Start(); 3593 } 3594 3595 // Join all threads. 3596 for (int i = 0; i < Nlog; i++) { 3597 vec[i]->Join(); 3598 delete vec[i]; 3599 } 3600 for (int i = 0; i < N_iter; i++) 3601 delete barriers[i]; 3602 3603 /*printf("\tGLOB=%d; ARR[1]=%d; ARR[7]=%d; ARR[N-1]=%d\n", 3604 GLOB, (int)ARR1[1], (int)ARR1[7], (int)ARR1[N-1]);*/ 3605} 3606REGISTER_TEST2(Run, 73, STABILITY|PERFORMANCE|EXCLUDE_FROM_ALL); 3607#endif // NO_BARRIER 3608} // namespace test73 3609 3610 3611// test74: PERF. A lot of lock/unlock calls. {{{1 3612namespace test74 { 3613const int N = 100000; 3614Mutex MU; 3615void Run() { 3616 printf("test74: perf\n"); 3617 for (int i = 0; i < N; i++ ) { 3618 MU.Lock(); 3619 MU.Unlock(); 3620 } 3621} 3622REGISTER_TEST(Run, 74) 3623} // namespace test74 3624 3625 3626// test75: TN. Test for sem_post, sem_wait, sem_trywait. {{{1 3627namespace test75 { 3628int GLOB = 0; 3629sem_t sem[2]; 3630 3631void Poster() { 3632 GLOB = 1; 3633 sem_post(&sem[0]); 3634 sem_post(&sem[1]); 3635} 3636 3637void Waiter() { 3638 sem_wait(&sem[0]); 3639 CHECK(GLOB==1); 3640} 3641void TryWaiter() { 3642 usleep(500000); 3643 sem_trywait(&sem[1]); 3644 CHECK(GLOB==1); 3645} 3646 3647void Run() { 3648#ifndef DRT_NO_SEM 3649 sem_init(&sem[0], 0, 0); 3650 sem_init(&sem[1], 0, 0); 3651 3652 printf("test75: negative\n"); 3653 { 3654 MyThreadArray t(Poster, Waiter); 3655 t.Start(); 3656 t.Join(); 3657 } 3658 GLOB = 2; 3659 { 3660 MyThreadArray t(Poster, TryWaiter); 3661 t.Start(); 3662 t.Join(); 3663 } 3664 printf("\tGLOB=%d\n", GLOB); 3665 3666 sem_destroy(&sem[0]); 3667 sem_destroy(&sem[1]); 3668#endif 3669} 3670REGISTER_TEST(Run, 75) 3671} // namespace test75 3672 3673// RefCountedClass {{{1 3674struct RefCountedClass { 3675 public: 3676 RefCountedClass() { 3677 annotate_unref_ = false; 3678 ref_ = 0; 3679 data_ = 0; 3680 } 3681 3682 ~RefCountedClass() { 3683 CHECK(ref_ == 0); // race may be reported here 3684 int data_val = data_; // and here 3685 // if MU is not annotated 3686 data_ = 0; 3687 ref_ = -1; 3688 printf("\tRefCountedClass::data_ = %d\n", data_val); 3689 } 3690 3691 void AccessData() { 3692 this->mu_.Lock(); 3693 this->data_++; 3694 this->mu_.Unlock(); 3695 } 3696 3697 void Ref() { 3698 MU.Lock(); 3699 CHECK(ref_ >= 0); 3700 ref_++; 3701 MU.Unlock(); 3702 } 3703 3704 void Unref() { 3705 MU.Lock(); 3706 CHECK(ref_ > 0); 3707 ref_--; 3708 bool do_delete = ref_ == 0; 3709 if (annotate_unref_) { 3710 ANNOTATE_CONDVAR_SIGNAL(this); 3711 } 3712 MU.Unlock(); 3713 if (do_delete) { 3714 if (annotate_unref_) { 3715 ANNOTATE_CONDVAR_WAIT(this); 3716 } 3717 delete this; 3718 } 3719 } 3720 3721 static void Annotate_MU() { 3722 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU); 3723 } 3724 void AnnotateUnref() { 3725 annotate_unref_ = true; 3726 } 3727 void Annotate_Race() { 3728 ANNOTATE_BENIGN_RACE(&this->data_, "needs annotation"); 3729 ANNOTATE_BENIGN_RACE(&this->ref_, "needs annotation"); 3730 } 3731 private: 3732 bool annotate_unref_; 3733 3734 int data_; 3735 Mutex mu_; // protects data_ 3736 3737 int ref_; 3738 static Mutex MU; // protects ref_ 3739}; 3740 3741Mutex RefCountedClass::MU; 3742 3743// test76: FP. Ref counting, no annotations. {{{1 3744namespace test76 { 3745#ifndef NO_BARRIER 3746int GLOB = 0; 3747Barrier barrier(4); 3748RefCountedClass *object = NULL; 3749void Worker() { 3750 object->Ref(); 3751 barrier.Block(); 3752 object->AccessData(); 3753 object->Unref(); 3754} 3755void Run() { 3756 printf("test76: false positive (ref counting)\n"); 3757 object = new RefCountedClass; 3758 object->Annotate_Race(); 3759 MyThreadArray t(Worker, Worker, Worker, Worker); 3760 t.Start(); 3761 t.Join(); 3762} 3763REGISTER_TEST2(Run, 76, FEATURE) 3764#endif // NO_BARRIER 3765} // namespace test76 3766 3767 3768 3769// test77: TN. Ref counting, MU is annotated. {{{1 3770namespace test77 { 3771#ifndef NO_BARRIER 3772// same as test76, but RefCountedClass::MU is annotated. 3773int GLOB = 0; 3774Barrier barrier(4); 3775RefCountedClass *object = NULL; 3776void Worker() { 3777 object->Ref(); 3778 barrier.Block(); 3779 object->AccessData(); 3780 object->Unref(); 3781} 3782void Run() { 3783 printf("test77: true negative (ref counting), mutex is annotated\n"); 3784 RefCountedClass::Annotate_MU(); 3785 object = new RefCountedClass; 3786 MyThreadArray t(Worker, Worker, Worker, Worker); 3787 t.Start(); 3788 t.Join(); 3789} 3790REGISTER_TEST(Run, 77) 3791#endif // NO_BARRIER 3792} // namespace test77 3793 3794 3795 3796// test78: TN. Ref counting, Unref is annotated. {{{1 3797namespace test78 { 3798#ifndef NO_BARRIER 3799// same as test76, but RefCountedClass::Unref is annotated. 3800int GLOB = 0; 3801Barrier barrier(4); 3802RefCountedClass *object = NULL; 3803void Worker() { 3804 object->Ref(); 3805 barrier.Block(); 3806 object->AccessData(); 3807 object->Unref(); 3808} 3809void Run() { 3810 printf("test78: true negative (ref counting), Unref is annotated\n"); 3811 RefCountedClass::Annotate_MU(); 3812 object = new RefCountedClass; 3813 MyThreadArray t(Worker, Worker, Worker, Worker); 3814 t.Start(); 3815 t.Join(); 3816} 3817REGISTER_TEST(Run, 78) 3818#endif // NO_BARRIER 3819} // namespace test78 3820 3821 3822 3823// test79 TN. Swap. {{{1 3824namespace test79 { 3825#if 0 3826typedef __gnu_cxx::hash_map<int, int> map_t; 3827#else 3828typedef std::map<int, int> map_t; 3829#endif 3830map_t MAP; 3831Mutex MU; 3832 3833// Here we use swap to pass MAP between threads. 3834// The synchronization is correct, but w/o ANNOTATE_MUTEX_IS_USED_AS_CONDVAR 3835// Helgrind will complain. 3836 3837void Worker1() { 3838 map_t tmp; 3839 MU.Lock(); 3840 // We swap the new empty map 'tmp' with 'MAP'. 3841 MAP.swap(tmp); 3842 MU.Unlock(); 3843 // tmp (which is the old version of MAP) is destroyed here. 3844} 3845 3846void Worker2() { 3847 MU.Lock(); 3848 MAP[1]++; // Just update MAP under MU. 3849 MU.Unlock(); 3850} 3851 3852void Worker3() { Worker1(); } 3853void Worker4() { Worker2(); } 3854 3855void Run() { 3856 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&MU); 3857 printf("test79: negative\n"); 3858 MyThreadArray t(Worker1, Worker2, Worker3, Worker4); 3859 t.Start(); 3860 t.Join(); 3861} 3862REGISTER_TEST(Run, 79) 3863} // namespace test79 3864 3865 3866// AtomicRefCountedClass. {{{1 3867// Same as RefCountedClass, but using atomic ops instead of mutex. 3868struct AtomicRefCountedClass { 3869 public: 3870 AtomicRefCountedClass() { 3871 annotate_unref_ = false; 3872 ref_ = 0; 3873 data_ = 0; 3874 } 3875 3876 ~AtomicRefCountedClass() { 3877 CHECK(ref_ == 0); // race may be reported here 3878 int data_val = data_; // and here 3879 data_ = 0; 3880 ref_ = -1; 3881 printf("\tRefCountedClass::data_ = %d\n", data_val); 3882 } 3883 3884 void AccessData() { 3885 this->mu_.Lock(); 3886 this->data_++; 3887 this->mu_.Unlock(); 3888 } 3889 3890 void Ref() { 3891 AtomicIncrement(&ref_, 1); 3892 } 3893 3894 void Unref() { 3895 // DISCLAIMER: I am not sure I've implemented this correctly 3896 // (might require some memory barrier, etc). 3897 // But this implementation of reference counting is enough for 3898 // the purpose of Helgrind demonstration. 3899 AtomicIncrement(&ref_, -1); 3900 if (annotate_unref_) { ANNOTATE_CONDVAR_SIGNAL(this); } 3901 if (ref_ == 0) { 3902 if (annotate_unref_) { ANNOTATE_CONDVAR_WAIT(this); } 3903 delete this; 3904 } 3905 } 3906 3907 void AnnotateUnref() { 3908 annotate_unref_ = true; 3909 } 3910 void Annotate_Race() { 3911 ANNOTATE_BENIGN_RACE(&this->data_, "needs annotation"); 3912 } 3913 private: 3914 bool annotate_unref_; 3915 3916 Mutex mu_; 3917 int data_; // under mu_ 3918 3919 int ref_; // used in atomic ops. 3920}; 3921 3922// test80: FP. Ref counting with atomics, no annotations. {{{1 3923namespace test80 { 3924#ifndef NO_BARRIER 3925int GLOB = 0; 3926Barrier barrier(4); 3927AtomicRefCountedClass *object = NULL; 3928void Worker() { 3929 object->Ref(); 3930 barrier.Block(); 3931 object->AccessData(); 3932 object->Unref(); // All the tricky stuff is here. 3933} 3934void Run() { 3935 printf("test80: false positive (ref counting)\n"); 3936 object = new AtomicRefCountedClass; 3937 object->Annotate_Race(); 3938 MyThreadArray t(Worker, Worker, Worker, Worker); 3939 t.Start(); 3940 t.Join(); 3941} 3942REGISTER_TEST2(Run, 80, FEATURE|EXCLUDE_FROM_ALL) 3943#endif // NO_BARRIER 3944} // namespace test80 3945 3946 3947// test81: TN. Ref counting with atomics, Unref is annotated. {{{1 3948namespace test81 { 3949#ifndef NO_BARRIER 3950// same as test80, but Unref is annotated. 3951int GLOB = 0; 3952Barrier barrier(4); 3953AtomicRefCountedClass *object = NULL; 3954void Worker() { 3955 object->Ref(); 3956 barrier.Block(); 3957 object->AccessData(); 3958 object->Unref(); // All the tricky stuff is here. 3959} 3960void Run() { 3961 printf("test81: negative (annotated ref counting)\n"); 3962 object = new AtomicRefCountedClass; 3963 object->AnnotateUnref(); 3964 MyThreadArray t(Worker, Worker, Worker, Worker); 3965 t.Start(); 3966 t.Join(); 3967} 3968REGISTER_TEST2(Run, 81, FEATURE|EXCLUDE_FROM_ALL) 3969#endif // NO_BARRIER 3970} // namespace test81 3971 3972 3973// test82: Object published w/o synchronization. {{{1 3974namespace test82 { 3975 3976// Writer creates a new object and makes the pointer visible to the Reader. 3977// Reader waits until the object pointer is non-null and reads the object. 3978// 3979// On Core 2 Duo this test will sometimes (quite rarely) fail in 3980// the CHECK below, at least if compiled with -O2. 3981// 3982// The sequence of events:: 3983// Thread1: Thread2: 3984// a. arr_[...] = ... 3985// b. foo[i] = ... 3986// A. ... = foo[i]; // non NULL 3987// B. ... = arr_[...]; 3988// 3989// Since there is no proper synchronization, during the even (B) 3990// Thread2 may not see the result of the event (a). 3991// On x86 and x86_64 this happens due to compiler reordering instructions. 3992// On other arcitectures it may also happen due to cashe inconsistency. 3993 3994class FOO { 3995 public: 3996 FOO() { 3997 idx_ = rand() % 1024; 3998 arr_[idx_] = 77777; 3999 // __asm__ __volatile__("" : : : "memory"); // this fixes! 4000 } 4001 static void check(volatile FOO *foo) { 4002 CHECK(foo->arr_[foo->idx_] == 77777); 4003 } 4004 private: 4005 int idx_; 4006 int arr_[1024]; 4007}; 4008 4009const int N = 100000; 4010static volatile FOO *foo[N]; 4011Mutex MU; 4012 4013void Writer() { 4014 for (int i = 0; i < N; i++) { 4015 foo[i] = new FOO; 4016 usleep(100); 4017 } 4018} 4019 4020void Reader() { 4021 for (int i = 0; i < N; i++) { 4022 while (!foo[i]) { 4023 MU.Lock(); // this is NOT a synchronization, 4024 MU.Unlock(); // it just helps foo[i] to become visible in Reader. 4025 } 4026 if ((i % 100) == 0) { 4027 printf("rd %d\n", i); 4028 } 4029 // At this point Reader() sees the new value of foo[i] 4030 // but in very rare cases will not see the new value of foo[i]->arr_. 4031 // Thus this CHECK will sometimes fail. 4032 FOO::check(foo[i]); 4033 } 4034} 4035 4036void Run() { 4037 printf("test82: positive\n"); 4038 MyThreadArray t(Writer, Reader); 4039 t.Start(); 4040 t.Join(); 4041} 4042REGISTER_TEST2(Run, 82, FEATURE|EXCLUDE_FROM_ALL) 4043} // namespace test82 4044 4045 4046// test83: Object published w/o synchronization (simple version){{{1 4047namespace test83 { 4048// A simplified version of test83 (example of a wrong code). 4049// This test, though incorrect, will almost never fail. 4050volatile static int *ptr = NULL; 4051Mutex MU; 4052 4053void Writer() { 4054 usleep(100); 4055 ptr = new int(777); 4056} 4057 4058void Reader() { 4059 while(!ptr) { 4060 MU.Lock(); // Not a synchronization! 4061 MU.Unlock(); 4062 } 4063 CHECK(*ptr == 777); 4064} 4065 4066void Run() { 4067// printf("test83: positive\n"); 4068 MyThreadArray t(Writer, Reader); 4069 t.Start(); 4070 t.Join(); 4071} 4072REGISTER_TEST2(Run, 83, FEATURE|EXCLUDE_FROM_ALL) 4073} // namespace test83 4074 4075 4076// test84: TP. True race (regression test for a bug related to atomics){{{1 4077namespace test84 { 4078// Helgrind should not create HB arcs for the bus lock even when 4079// --pure-happens-before=yes is used. 4080// Bug found in by Bart Van Assche, the test is taken from 4081// valgrind file drd/tests/atomic_var.c. 4082static int s_x = 0; 4083/* s_dummy[] ensures that s_x and s_y are not in the same cache line. */ 4084static char s_dummy[512] = {0}; 4085static int s_y; 4086 4087void thread_func_1() 4088{ 4089 s_y = 1; 4090 AtomicIncrement(&s_x, 1); 4091} 4092 4093void thread_func_2() 4094{ 4095 while (AtomicIncrement(&s_x, 0) == 0) 4096 ; 4097 printf("y = %d\n", s_y); 4098} 4099 4100 4101void Run() { 4102 CHECK(s_dummy[0] == 0); // Avoid compiler warning about 's_dummy unused'. 4103 printf("test84: positive\n"); 4104 FAST_MODE_INIT(&s_y); 4105 ANNOTATE_EXPECT_RACE_FOR_TSAN(&s_y, "test84: TP. true race."); 4106 MyThreadArray t(thread_func_1, thread_func_2); 4107 t.Start(); 4108 t.Join(); 4109} 4110REGISTER_TEST(Run, 84) 4111} // namespace test84 4112 4113 4114// test85: Test for RunningOnValgrind(). {{{1 4115namespace test85 { 4116int GLOB = 0; 4117void Run() { 4118 printf("test85: RunningOnValgrind() = %d\n", RunningOnValgrind()); 4119} 4120REGISTER_TEST(Run, 85) 4121} // namespace test85 4122 4123 4124// test86: Test for race inside DTOR: racey write to vptr. Benign. {{{1 4125namespace test86 { 4126// This test shows a racey access to vptr (the pointer to vtbl). 4127// We have class A and class B derived from A. 4128// Both classes have a virtual function f() and a virtual DTOR. 4129// We create an object 'A *a = new B' 4130// and pass this object from Thread1 to Thread2. 4131// Thread2 calls a->f(). This call reads a->vtpr. 4132// Thread1 deletes the object. B::~B waits untill the object can be destroyed 4133// (flag_stopped == true) but at the very beginning of B::~B 4134// a->vptr is written to. 4135// So, we have a race on a->vptr. 4136// On this particular test this race is benign, but test87 shows 4137// how such race could harm. 4138// 4139// 4140// 4141// Threa1: Thread2: 4142// 1. A a* = new B; 4143// 2. Q.Put(a); ------------\ . 4144// \--------------------> a. a = Q.Get(); 4145// b. a->f(); 4146// /--------- c. flag_stopped = true; 4147// 3. delete a; / 4148// waits untill flag_stopped <------/ 4149// inside the dtor 4150// 4151 4152bool flag_stopped = false; 4153Mutex mu; 4154 4155ProducerConsumerQueue Q(INT_MAX); // Used to pass A* between threads. 4156 4157struct A { 4158 A() { printf("A::A()\n"); } 4159 virtual ~A() { printf("A::~A()\n"); } 4160 virtual void f() { } 4161 4162 uintptr_t padding[15]; 4163} __attribute__ ((aligned (64))); 4164 4165struct B: A { 4166 B() { printf("B::B()\n"); } 4167 virtual ~B() { 4168 // The race is here. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< 4169 printf("B::~B()\n"); 4170 // wait until flag_stopped is true. 4171 mu.LockWhen(Condition(&ArgIsTrue, &flag_stopped)); 4172 mu.Unlock(); 4173 printf("B::~B() done\n"); 4174 } 4175 virtual void f() { } 4176}; 4177 4178void Waiter() { 4179 A *a = new B; 4180 if (!Tsan_FastMode()) 4181 ANNOTATE_EXPECT_RACE(a, "test86: expected race on a->vptr"); 4182 printf("Waiter: B created\n"); 4183 Q.Put(a); 4184 usleep(100000); // so that Worker calls a->f() first. 4185 printf("Waiter: deleting B\n"); 4186 delete a; 4187 printf("Waiter: B deleted\n"); 4188 usleep(100000); 4189 printf("Waiter: done\n"); 4190} 4191 4192void Worker() { 4193 A *a = reinterpret_cast<A*>(Q.Get()); 4194 printf("Worker: got A\n"); 4195 a->f(); 4196 4197 mu.Lock(); 4198 flag_stopped = true; 4199 mu.Unlock(); 4200 usleep(200000); 4201 printf("Worker: done\n"); 4202} 4203 4204void Run() { 4205 printf("test86: positive, race inside DTOR\n"); 4206 MyThreadArray t(Waiter, Worker); 4207 t.Start(); 4208 t.Join(); 4209} 4210REGISTER_TEST(Run, 86) 4211} // namespace test86 4212 4213 4214// test87: Test for race inside DTOR: racey write to vptr. Harmful.{{{1 4215namespace test87 { 4216// A variation of test86 where the race is harmful. 4217// Here we have class C derived from B. 4218// We create an object 'A *a = new C' in Thread1 and pass it to Thread2. 4219// Thread2 calls a->f(). 4220// Thread1 calls 'delete a'. 4221// It first calls C::~C, then B::~B where it rewrites the vptr to point 4222// to B::vtbl. This is a problem because Thread2 might not have called a->f() 4223// and now it will call B::f instead of C::f. 4224// 4225bool flag_stopped = false; 4226Mutex mu; 4227 4228ProducerConsumerQueue Q(INT_MAX); // Used to pass A* between threads. 4229 4230struct A { 4231 A() { printf("A::A()\n"); } 4232 virtual ~A() { printf("A::~A()\n"); } 4233 virtual void f() = 0; // pure virtual. 4234}; 4235 4236struct B: A { 4237 B() { printf("B::B()\n"); } 4238 virtual ~B() { 4239 // The race is here. <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< 4240 printf("B::~B()\n"); 4241 // wait until flag_stopped is true. 4242 mu.LockWhen(Condition(&ArgIsTrue, &flag_stopped)); 4243 mu.Unlock(); 4244 printf("B::~B() done\n"); 4245 } 4246 virtual void f() = 0; // pure virtual. 4247}; 4248 4249struct C: B { 4250 C() { printf("C::C()\n"); } 4251 virtual ~C() { printf("C::~C()\n"); } 4252 virtual void f() { } 4253}; 4254 4255void Waiter() { 4256 A *a = new C; 4257 Q.Put(a); 4258 delete a; 4259} 4260 4261void Worker() { 4262 A *a = reinterpret_cast<A*>(Q.Get()); 4263 a->f(); 4264 4265 mu.Lock(); 4266 flag_stopped = true; 4267 ANNOTATE_CONDVAR_SIGNAL(&mu); 4268 mu.Unlock(); 4269} 4270 4271void Run() { 4272 printf("test87: positive, race inside DTOR\n"); 4273 MyThreadArray t(Waiter, Worker); 4274 t.Start(); 4275 t.Join(); 4276} 4277REGISTER_TEST2(Run, 87, FEATURE|EXCLUDE_FROM_ALL) 4278} // namespace test87 4279 4280 4281// test88: Test for ANNOTATE_IGNORE_WRITES_*{{{1 4282namespace test88 { 4283// a recey write annotated with ANNOTATE_IGNORE_WRITES_BEGIN/END. 4284int GLOB = 0; 4285void Worker() { 4286 ANNOTATE_IGNORE_WRITES_BEGIN(); 4287 GLOB = 1; 4288 ANNOTATE_IGNORE_WRITES_END(); 4289} 4290void Run() { 4291 printf("test88: negative, test for ANNOTATE_IGNORE_WRITES_*\n"); 4292 MyThread t(Worker); 4293 t.Start(); 4294 GLOB = 1; 4295 t.Join(); 4296 printf("\tGLOB=%d\n", GLOB); 4297} 4298REGISTER_TEST(Run, 88) 4299} // namespace test88 4300 4301 4302// test89: Test for debug info. {{{1 4303namespace test89 { 4304// Simlpe races with different objects (stack, heap globals; scalars, structs). 4305// Also, if run with --trace-level=2 this test will show a sequence of 4306// CTOR and DTOR calls. 4307struct STRUCT { 4308 int a, b, c; 4309}; 4310 4311struct A { 4312 int a; 4313 A() { 4314 ANNOTATE_TRACE_MEMORY(&a); 4315 a = 1; 4316 } 4317 virtual ~A() { 4318 a = 4; 4319 } 4320}; 4321 4322struct B : A { 4323 B() { CHECK(a == 1); } 4324 virtual ~B() { CHECK(a == 3); } 4325}; 4326struct C : B { 4327 C() { a = 2; } 4328 virtual ~C() { a = 3; } 4329}; 4330 4331int GLOBAL = 0; 4332int *STACK = 0; 4333STRUCT GLOB_STRUCT; 4334STRUCT *STACK_STRUCT; 4335STRUCT *HEAP_STRUCT; 4336 4337void Worker() { 4338 GLOBAL = 1; 4339 *STACK = 1; 4340 GLOB_STRUCT.b = 1; 4341 STACK_STRUCT->b = 1; 4342 HEAP_STRUCT->b = 1; 4343} 4344 4345void Run() { 4346 int stack_var = 0; 4347 STACK = &stack_var; 4348 4349 STRUCT stack_struct; 4350 STACK_STRUCT = &stack_struct; 4351 4352 HEAP_STRUCT = new STRUCT; 4353 4354 printf("test89: negative\n"); 4355 MyThreadArray t(Worker, Worker); 4356 t.Start(); 4357 t.Join(); 4358 4359 delete HEAP_STRUCT; 4360 4361 A *a = new C; 4362 printf("Using 'a->a': %d\n", a->a); 4363 delete a; 4364} 4365REGISTER_TEST2(Run, 89, FEATURE|EXCLUDE_FROM_ALL) 4366} // namespace test89 4367 4368 4369// test90: FP. Test for a safely-published pointer (read-only). {{{1 4370namespace test90 { 4371// The Publisher creates an object and safely publishes it under a mutex. 4372// Readers access the object read-only. 4373// See also test91. 4374// 4375// Without annotations Helgrind will issue a false positive in Reader(). 4376// 4377// Choices for annotations: 4378// -- ANNOTATE_CONDVAR_SIGNAL/ANNOTATE_CONDVAR_WAIT 4379// -- ANNOTATE_MUTEX_IS_USED_AS_CONDVAR 4380// -- ANNOTATE_PUBLISH_MEMORY_RANGE. 4381 4382int *GLOB = 0; 4383Mutex MU; 4384 4385void Publisher() { 4386 MU.Lock(); 4387 GLOB = (int*)memalign(64, sizeof(int)); 4388 *GLOB = 777; 4389 if (!Tsan_PureHappensBefore() && !Tsan_FastMode()) 4390 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB, "test90. FP. This is a false positve"); 4391 MU.Unlock(); 4392 usleep(200000); 4393} 4394 4395void Reader() { 4396 usleep(10000); 4397 while (true) { 4398 MU.Lock(); 4399 int *p = GLOB; 4400 MU.Unlock(); 4401 if (p) { 4402 CHECK(*p == 777); // Race is reported here. 4403 break; 4404 } 4405 } 4406} 4407 4408void Run() { 4409 printf("test90: false positive (safely published pointer).\n"); 4410 MyThreadArray t(Publisher, Reader, Reader, Reader); 4411 t.Start(); 4412 t.Join(); 4413 printf("\t*GLOB=%d\n", *GLOB); 4414 free(GLOB); 4415} 4416REGISTER_TEST(Run, 90) 4417} // namespace test90 4418 4419 4420// test91: FP. Test for a safely-published pointer (read-write). {{{1 4421namespace test91 { 4422// Similar to test90. 4423// The Publisher creates an object and safely publishes it under a mutex MU1. 4424// Accessors get the object under MU1 and access it (read/write) under MU2. 4425// 4426// Without annotations Helgrind will issue a false positive in Accessor(). 4427// 4428 4429int *GLOB = 0; 4430Mutex MU, MU1, MU2; 4431 4432void Publisher() { 4433 MU1.Lock(); 4434 GLOB = (int*)memalign(64, sizeof(int)); 4435 *GLOB = 777; 4436 if (!Tsan_PureHappensBefore() && !Tsan_FastMode()) 4437 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB, "test91. FP. This is a false positve"); 4438 MU1.Unlock(); 4439} 4440 4441void Accessor() { 4442 usleep(10000); 4443 while (true) { 4444 MU1.Lock(); 4445 int *p = GLOB; 4446 MU1.Unlock(); 4447 if (p) { 4448 MU2.Lock(); 4449 (*p)++; // Race is reported here. 4450 CHECK(*p > 777); 4451 MU2.Unlock(); 4452 break; 4453 } 4454 } 4455} 4456 4457void Run() { 4458 printf("test91: false positive (safely published pointer, read/write).\n"); 4459 MyThreadArray t(Publisher, Accessor, Accessor, Accessor); 4460 t.Start(); 4461 t.Join(); 4462 printf("\t*GLOB=%d\n", *GLOB); 4463 free(GLOB); 4464} 4465REGISTER_TEST(Run, 91) 4466} // namespace test91 4467 4468 4469// test92: TN. Test for a safely-published pointer (read-write), annotated. {{{1 4470namespace test92 { 4471// Similar to test91, but annotated with ANNOTATE_PUBLISH_MEMORY_RANGE. 4472// 4473// 4474// Publisher: Accessors: 4475// 4476// 1. MU1.Lock() 4477// 2. Create GLOB. 4478// 3. ANNOTATE_PUBLISH_...(GLOB) -------\ . 4479// 4. MU1.Unlock() \ . 4480// \ a. MU1.Lock() 4481// \ b. Get GLOB 4482// \ c. MU1.Unlock() 4483// \--> d. Access GLOB 4484// 4485// A happens-before arc is created between ANNOTATE_PUBLISH_MEMORY_RANGE and 4486// accesses to GLOB. 4487 4488struct ObjType { 4489 int arr[10]; 4490}; 4491 4492ObjType *GLOB = 0; 4493Mutex MU, MU1, MU2; 4494 4495void Publisher() { 4496 MU1.Lock(); 4497 GLOB = new ObjType; 4498 for (int i = 0; i < 10; i++) { 4499 GLOB->arr[i] = 777; 4500 } 4501 // This annotation should go right before the object is published. 4502 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB, sizeof(*GLOB)); 4503 MU1.Unlock(); 4504} 4505 4506void Accessor(int index) { 4507 while (true) { 4508 MU1.Lock(); 4509 ObjType *p = GLOB; 4510 MU1.Unlock(); 4511 if (p) { 4512 MU2.Lock(); 4513 p->arr[index]++; // W/o the annotations the race will be reported here. 4514 CHECK(p->arr[index] == 778); 4515 MU2.Unlock(); 4516 break; 4517 } 4518 } 4519} 4520 4521void Accessor0() { Accessor(0); } 4522void Accessor5() { Accessor(5); } 4523void Accessor9() { Accessor(9); } 4524 4525void Run() { 4526 printf("test92: safely published pointer, read/write, annotated.\n"); 4527 MyThreadArray t(Publisher, Accessor0, Accessor5, Accessor9); 4528 t.Start(); 4529 t.Join(); 4530 printf("\t*GLOB=%d\n", GLOB->arr[0]); 4531} 4532REGISTER_TEST(Run, 92) 4533} // namespace test92 4534 4535 4536// test93: TP. Test for incorrect usage of ANNOTATE_PUBLISH_MEMORY_RANGE. {{{1 4537namespace test93 { 4538int GLOB = 0; 4539 4540void Reader() { 4541 CHECK(GLOB == 0); 4542} 4543 4544void Publisher() { 4545 usleep(10000); 4546 // Incorrect, used after the memory has been accessed in another thread. 4547 ANNOTATE_PUBLISH_MEMORY_RANGE(&GLOB, sizeof(GLOB)); 4548} 4549 4550void Run() { 4551 printf("test93: positive, misuse of ANNOTATE_PUBLISH_MEMORY_RANGE\n"); 4552 MyThreadArray t(Reader, Publisher); 4553 t.Start(); 4554 t.Join(); 4555 printf("\tGLOB=%d\n", GLOB); 4556} 4557REGISTER_TEST2(Run, 93, FEATURE|EXCLUDE_FROM_ALL) 4558} // namespace test93 4559 4560 4561// test94: TP. Check do_cv_signal/fake segment logic {{{1 4562namespace test94 { 4563int GLOB; 4564 4565int COND = 0; 4566int COND2 = 0; 4567Mutex MU, MU2; 4568CondVar CV, CV2; 4569 4570void Thr1() { 4571 usleep(10000); // Make sure the waiter blocks. 4572 4573 GLOB = 1; // WRITE 4574 4575 MU.Lock(); 4576 COND = 1; 4577 CV.Signal(); 4578 MU.Unlock(); 4579} 4580void Thr2() { 4581 usleep(1000*1000); // Make sure CV2.Signal() "happens after" CV.Signal() 4582 usleep(10000); // Make sure the waiter blocks. 4583 4584 MU2.Lock(); 4585 COND2 = 1; 4586 CV2.Signal(); 4587 MU2.Unlock(); 4588} 4589void Thr3() { 4590 MU.Lock(); 4591 while(COND != 1) 4592 CV.Wait(&MU); 4593 MU.Unlock(); 4594} 4595void Thr4() { 4596 MU2.Lock(); 4597 while(COND2 != 1) 4598 CV2.Wait(&MU2); 4599 MU2.Unlock(); 4600 GLOB = 2; // READ: no HB-relation between CV.Signal and CV2.Wait ! 4601} 4602void Run() { 4603 FAST_MODE_INIT(&GLOB); 4604 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test94: TP."); 4605 printf("test94: TP. Check do_cv_signal/fake segment logic\n"); 4606 MyThreadArray mta(Thr1, Thr2, Thr3, Thr4); 4607 mta.Start(); 4608 mta.Join(); 4609 printf("\tGLOB=%d\n", GLOB); 4610} 4611REGISTER_TEST(Run, 94); 4612} // namespace test94 4613 4614// test95: TP. Check do_cv_signal/fake segment logic {{{1 4615namespace test95 { 4616int GLOB = 0; 4617 4618int COND = 0; 4619int COND2 = 0; 4620Mutex MU, MU2; 4621CondVar CV, CV2; 4622 4623void Thr1() { 4624 usleep(1000*1000); // Make sure CV2.Signal() "happens before" CV.Signal() 4625 usleep(10000); // Make sure the waiter blocks. 4626 4627 GLOB = 1; // WRITE 4628 4629 MU.Lock(); 4630 COND = 1; 4631 CV.Signal(); 4632 MU.Unlock(); 4633} 4634void Thr2() { 4635 usleep(10000); // Make sure the waiter blocks. 4636 4637 MU2.Lock(); 4638 COND2 = 1; 4639 CV2.Signal(); 4640 MU2.Unlock(); 4641} 4642void Thr3() { 4643 MU.Lock(); 4644 while(COND != 1) 4645 CV.Wait(&MU); 4646 MU.Unlock(); 4647} 4648void Thr4() { 4649 MU2.Lock(); 4650 while(COND2 != 1) 4651 CV2.Wait(&MU2); 4652 MU2.Unlock(); 4653 GLOB = 2; // READ: no HB-relation between CV.Signal and CV2.Wait ! 4654} 4655void Run() { 4656 FAST_MODE_INIT(&GLOB); 4657 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test95: TP."); 4658 printf("test95: TP. Check do_cv_signal/fake segment logic\n"); 4659 MyThreadArray mta(Thr1, Thr2, Thr3, Thr4); 4660 mta.Start(); 4661 mta.Join(); 4662 printf("\tGLOB=%d\n", GLOB); 4663} 4664REGISTER_TEST(Run, 95); 4665} // namespace test95 4666 4667// test96: TN. tricky LockSet behaviour {{{1 4668// 3 threads access the same memory with three different 4669// locksets: {A, B}, {B, C}, {C, A}. 4670// These locksets have empty intersection 4671namespace test96 { 4672int GLOB = 0; 4673 4674Mutex A, B, C; 4675 4676void Thread1() { 4677 MutexLock a(&A); 4678 MutexLock b(&B); 4679 GLOB++; 4680} 4681 4682void Thread2() { 4683 MutexLock b(&B); 4684 MutexLock c(&C); 4685 GLOB++; 4686} 4687 4688void Thread3() { 4689 MutexLock a(&A); 4690 MutexLock c(&C); 4691 GLOB++; 4692} 4693 4694void Run() { 4695 printf("test96: FP. tricky LockSet behaviour\n"); 4696 ANNOTATE_TRACE_MEMORY(&GLOB); 4697 MyThreadArray mta(Thread1, Thread2, Thread3); 4698 mta.Start(); 4699 mta.Join(); 4700 CHECK(GLOB == 3); 4701 printf("\tGLOB=%d\n", GLOB); 4702} 4703REGISTER_TEST(Run, 96); 4704} // namespace test96 4705 4706// test97: This test shows false negative with --fast-mode=yes {{{1 4707namespace test97 { 4708const int HG_CACHELINE_SIZE = 64; 4709 4710Mutex MU; 4711 4712const int ARRAY_SIZE = HG_CACHELINE_SIZE * 4 / sizeof(int); 4713int array[ARRAY_SIZE]; 4714int * GLOB = &array[ARRAY_SIZE/2]; 4715/* 4716 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points 4717 to a memory inside a CacheLineZ which is inside array's memory range 4718 */ 4719 4720void Reader() { 4721 usleep(500000); 4722 CHECK(777 == *GLOB); 4723} 4724 4725void Run() { 4726 MyThreadArray t(Reader); 4727 if (!Tsan_FastMode()) 4728 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB, "test97: TP. FN with --fast-mode=yes"); 4729 printf("test97: This test shows false negative with --fast-mode=yes\n"); 4730 4731 t.Start(); 4732 *GLOB = 777; 4733 t.Join(); 4734} 4735 4736REGISTER_TEST2(Run, 97, FEATURE) 4737} // namespace test97 4738 4739// test98: Synchronization via read/write (or send/recv). {{{1 4740namespace test98 { 4741// The synchronization here is done by a pair of read/write calls 4742// that create a happens-before arc. Same may be done with send/recv. 4743// Such synchronization is quite unusual in real programs 4744// (why would one synchronizae via a file or socket?), but 4745// quite possible in unittests where one threads runs for producer 4746// and one for consumer. 4747// 4748// A race detector has to create a happens-before arcs for 4749// {read,send}->{write,recv} even if the file descriptors are different. 4750// 4751int GLOB = 0; 4752int fd_out = -1; 4753int fd_in = -1; 4754 4755void Writer() { 4756 usleep(1000); 4757 GLOB = 1; 4758 const char *str = "Hey there!\n"; 4759 IGNORE_RETURN_VALUE(write(fd_out, str, strlen(str) + 1)); 4760} 4761 4762void Reader() { 4763 char buff[100]; 4764 while (read(fd_in, buff, 100) == 0) 4765 sleep(1); 4766 printf("read: %s\n", buff); 4767 GLOB = 2; 4768} 4769 4770void Run() { 4771 printf("test98: negative, synchronization via I/O\n"); 4772 char in_name[100]; 4773 char out_name[100]; 4774 // we open two files, on for reading and one for writing, 4775 // but the files are actually the same (symlinked). 4776 sprintf(in_name, "/tmp/racecheck_unittest_in.%d", getpid()); 4777 sprintf(out_name, "/tmp/racecheck_unittest_out.%d", getpid()); 4778 fd_out = creat(out_name, O_WRONLY | S_IRWXU); 4779 IGNORE_RETURN_VALUE(symlink(out_name, in_name)); 4780 fd_in = open(in_name, 0, O_RDONLY); 4781 CHECK(fd_out >= 0); 4782 CHECK(fd_in >= 0); 4783 MyThreadArray t(Writer, Reader); 4784 t.Start(); 4785 t.Join(); 4786 printf("\tGLOB=%d\n", GLOB); 4787 // cleanup 4788 close(fd_in); 4789 close(fd_out); 4790 unlink(in_name); 4791 unlink(out_name); 4792} 4793REGISTER_TEST(Run, 98) 4794} // namespace test98 4795 4796 4797// test99: TP. Unit test for a bug in LockWhen*. {{{1 4798namespace test99 { 4799 4800 4801bool GLOB = false; 4802Mutex mu; 4803 4804static void Thread1() { 4805 for (int i = 0; i < 100; i++) { 4806 mu.LockWhenWithTimeout(Condition(&ArgIsTrue, &GLOB), 5); 4807 GLOB = false; 4808 mu.Unlock(); 4809 usleep(10000); 4810 } 4811} 4812 4813static void Thread2() { 4814 for (int i = 0; i < 100; i++) { 4815 mu.Lock(); 4816 mu.Unlock(); 4817 usleep(10000); 4818 } 4819} 4820 4821void Run() { 4822 printf("test99: regression test for LockWhen*\n"); 4823 MyThreadArray t(Thread1, Thread2); 4824 t.Start(); 4825 t.Join(); 4826} 4827REGISTER_TEST(Run, 99); 4828} // namespace test99 4829 4830 4831// test100: Test for initialization bit. {{{1 4832namespace test100 { 4833int G1 = 0; 4834int G2 = 0; 4835int G3 = 0; 4836int G4 = 0; 4837 4838void Creator() { 4839 G1 = 1; CHECK(G1); 4840 G2 = 1; 4841 G3 = 1; CHECK(G3); 4842 G4 = 1; 4843} 4844 4845void Worker1() { 4846 usleep(100000); 4847 CHECK(G1); 4848 CHECK(G2); 4849 G3 = 3; 4850 G4 = 3; 4851} 4852 4853void Worker2() { 4854 4855} 4856 4857 4858void Run() { 4859 printf("test100: test for initialization bit. \n"); 4860 MyThreadArray t(Creator, Worker1, Worker2); 4861 ANNOTATE_TRACE_MEMORY(&G1); 4862 ANNOTATE_TRACE_MEMORY(&G2); 4863 ANNOTATE_TRACE_MEMORY(&G3); 4864 ANNOTATE_TRACE_MEMORY(&G4); 4865 t.Start(); 4866 t.Join(); 4867} 4868REGISTER_TEST2(Run, 100, FEATURE|EXCLUDE_FROM_ALL) 4869} // namespace test100 4870 4871 4872// test101: TN. Two signals and two waits. {{{1 4873namespace test101 { 4874Mutex MU; 4875CondVar CV; 4876int GLOB = 0; 4877 4878int C1 = 0, C2 = 0; 4879 4880void Signaller() { 4881 usleep(100000); 4882 MU.Lock(); 4883 C1 = 1; 4884 CV.Signal(); 4885 printf("signal\n"); 4886 MU.Unlock(); 4887 4888 GLOB = 1; 4889 4890 usleep(500000); 4891 MU.Lock(); 4892 C2 = 1; 4893 CV.Signal(); 4894 printf("signal\n"); 4895 MU.Unlock(); 4896} 4897 4898void Waiter() { 4899 MU.Lock(); 4900 while(!C1) 4901 CV.Wait(&MU); 4902 printf("wait\n"); 4903 MU.Unlock(); 4904 4905 MU.Lock(); 4906 while(!C2) 4907 CV.Wait(&MU); 4908 printf("wait\n"); 4909 MU.Unlock(); 4910 4911 GLOB = 2; 4912 4913} 4914 4915void Run() { 4916 printf("test101: negative\n"); 4917 MyThreadArray t(Waiter, Signaller); 4918 t.Start(); 4919 t.Join(); 4920 printf("\tGLOB=%d\n", GLOB); 4921} 4922REGISTER_TEST(Run, 101) 4923} // namespace test101 4924 4925// test102: --fast-mode=yes vs. --initialization-bit=yes {{{1 4926namespace test102 { 4927const int HG_CACHELINE_SIZE = 64; 4928 4929Mutex MU; 4930 4931const int ARRAY_SIZE = HG_CACHELINE_SIZE * 4 / sizeof(int); 4932int array[ARRAY_SIZE + 1]; 4933int * GLOB = &array[ARRAY_SIZE/2]; 4934/* 4935 We use sizeof(array) == 4 * HG_CACHELINE_SIZE to be sure that GLOB points 4936 to a memory inside a CacheLineZ which is inside array's memory range 4937*/ 4938 4939void Reader() { 4940 usleep(200000); 4941 CHECK(777 == GLOB[0]); 4942 usleep(400000); 4943 CHECK(777 == GLOB[1]); 4944} 4945 4946void Run() { 4947 MyThreadArray t(Reader); 4948 if (!Tsan_FastMode()) 4949 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB+0, "test102: TP. FN with --fast-mode=yes"); 4950 ANNOTATE_EXPECT_RACE_FOR_TSAN(GLOB+1, "test102: TP"); 4951 printf("test102: --fast-mode=yes vs. --initialization-bit=yes\n"); 4952 4953 t.Start(); 4954 GLOB[0] = 777; 4955 usleep(400000); 4956 GLOB[1] = 777; 4957 t.Join(); 4958} 4959 4960REGISTER_TEST2(Run, 102, FEATURE) 4961} // namespace test102 4962 4963// test103: Access different memory locations with different LockSets {{{1 4964namespace test103 { 4965const int N_MUTEXES = 6; 4966const int LOCKSET_INTERSECTION_SIZE = 3; 4967 4968int data[1 << LOCKSET_INTERSECTION_SIZE] = {0}; 4969Mutex MU[N_MUTEXES]; 4970 4971inline int LS_to_idx (int ls) { 4972 return (ls >> (N_MUTEXES - LOCKSET_INTERSECTION_SIZE)) 4973 & ((1 << LOCKSET_INTERSECTION_SIZE) - 1); 4974} 4975 4976void Worker() { 4977 for (int ls = 0; ls < (1 << N_MUTEXES); ls++) { 4978 if (LS_to_idx(ls) == 0) 4979 continue; 4980 for (int m = 0; m < N_MUTEXES; m++) 4981 if (ls & (1 << m)) 4982 MU[m].Lock(); 4983 4984 data[LS_to_idx(ls)]++; 4985 4986 for (int m = N_MUTEXES - 1; m >= 0; m--) 4987 if (ls & (1 << m)) 4988 MU[m].Unlock(); 4989 } 4990} 4991 4992void Run() { 4993 printf("test103: Access different memory locations with different LockSets\n"); 4994 MyThreadArray t(Worker, Worker, Worker, Worker); 4995 t.Start(); 4996 t.Join(); 4997} 4998REGISTER_TEST2(Run, 103, FEATURE) 4999} // namespace test103 5000 5001// test104: TP. Simple race (write vs write). Heap mem. {{{1 5002namespace test104 { 5003int *GLOB = NULL; 5004void Worker() { 5005 *GLOB = 1; 5006} 5007 5008void Parent() { 5009 MyThread t(Worker); 5010 t.Start(); 5011 usleep(100000); 5012 *GLOB = 2; 5013 t.Join(); 5014} 5015void Run() { 5016 GLOB = (int*)memalign(64, sizeof(int)); 5017 *GLOB = 0; 5018 ANNOTATE_EXPECT_RACE(GLOB, "test104. TP."); 5019 ANNOTATE_TRACE_MEMORY(GLOB); 5020 printf("test104: positive\n"); 5021 Parent(); 5022 printf("\tGLOB=%d\n", *GLOB); 5023 free(GLOB); 5024} 5025REGISTER_TEST(Run, 104); 5026} // namespace test104 5027 5028 5029// test105: Checks how stack grows. {{{1 5030namespace test105 { 5031int GLOB = 0; 5032 5033void F1() { 5034 int ar[32]; 5035// ANNOTATE_TRACE_MEMORY(&ar[0]); 5036// ANNOTATE_TRACE_MEMORY(&ar[31]); 5037 ar[0] = 1; 5038 ar[31] = 1; 5039} 5040 5041void Worker() { 5042 int ar[32]; 5043// ANNOTATE_TRACE_MEMORY(&ar[0]); 5044// ANNOTATE_TRACE_MEMORY(&ar[31]); 5045 ar[0] = 1; 5046 ar[31] = 1; 5047 F1(); 5048} 5049 5050void Run() { 5051 printf("test105: negative\n"); 5052 Worker(); 5053 MyThread t(Worker); 5054 t.Start(); 5055 t.Join(); 5056 printf("\tGLOB=%d\n", GLOB); 5057} 5058REGISTER_TEST(Run, 105) 5059} // namespace test105 5060 5061 5062// test106: TN. pthread_once. {{{1 5063namespace test106 { 5064int *GLOB = NULL; 5065static pthread_once_t once = PTHREAD_ONCE_INIT; 5066void Init() { 5067 GLOB = new int; 5068 ANNOTATE_TRACE_MEMORY(GLOB); 5069 *GLOB = 777; 5070} 5071 5072void Worker0() { 5073 pthread_once(&once, Init); 5074} 5075void Worker1() { 5076 usleep(100000); 5077 pthread_once(&once, Init); 5078 CHECK(*GLOB == 777); 5079} 5080 5081 5082void Run() { 5083 printf("test106: negative\n"); 5084 MyThreadArray t(Worker0, Worker1, Worker1, Worker1); 5085 t.Start(); 5086 t.Join(); 5087 printf("\tGLOB=%d\n", *GLOB); 5088} 5089REGISTER_TEST2(Run, 106, FEATURE) 5090} // namespace test106 5091 5092 5093// test107: Test for ANNOTATE_EXPECT_RACE {{{1 5094namespace test107 { 5095int GLOB = 0; 5096void Run() { 5097 printf("test107: negative\n"); 5098 ANNOTATE_EXPECT_RACE(&GLOB, "No race in fact. Just checking the tool."); 5099 printf("\tGLOB=%d\n", GLOB); 5100} 5101REGISTER_TEST2(Run, 107, FEATURE|EXCLUDE_FROM_ALL) 5102} // namespace test107 5103 5104 5105// test108: TN. initialization of static object. {{{1 5106namespace test108 { 5107// Here we have a function-level static object. 5108// Starting from gcc 4 this is therad safe, 5109// but is is not thread safe with many other compilers. 5110// 5111// Helgrind supports this kind of initialization by 5112// intercepting __cxa_guard_acquire/__cxa_guard_release 5113// and ignoring all accesses between them. 5114// Helgrind also intercepts pthread_once in the same manner. 5115class Foo { 5116 public: 5117 Foo() { 5118 ANNOTATE_TRACE_MEMORY(&a_); 5119 a_ = 42; 5120 } 5121 void Check() const { CHECK(a_ == 42); } 5122 private: 5123 int a_; 5124}; 5125 5126const Foo *GetFoo() { 5127 static const Foo *foo = new Foo(); 5128 return foo; 5129} 5130void Worker0() { 5131 GetFoo(); 5132} 5133 5134void Worker() { 5135 usleep(200000); 5136 const Foo *foo = GetFoo(); 5137 foo->Check(); 5138} 5139 5140 5141void Run() { 5142 printf("test108: negative, initialization of static object\n"); 5143 MyThreadArray t(Worker0, Worker, Worker); 5144 t.Start(); 5145 t.Join(); 5146} 5147REGISTER_TEST2(Run, 108, FEATURE) 5148} // namespace test108 5149 5150 5151// test109: TN. Checking happens before between parent and child threads. {{{1 5152namespace test109 { 5153// Check that the detector correctly connects 5154// pthread_create with the new thread 5155// and 5156// thread exit with pthread_join 5157const int N = 32; 5158static int GLOB[N]; 5159 5160void Worker(void *a) { 5161 usleep(10000); 5162// printf("--Worker : %ld %p\n", (int*)a - GLOB, (void*)pthread_self()); 5163 int *arg = (int*)a; 5164 (*arg)++; 5165} 5166 5167void Run() { 5168 printf("test109: negative\n"); 5169 MyThread *t[N]; 5170 for (int i = 0; i < N; i++) { 5171 t[i] = new MyThread(Worker, &GLOB[i]); 5172 } 5173 for (int i = 0; i < N; i++) { 5174 ANNOTATE_TRACE_MEMORY(&GLOB[i]); 5175 GLOB[i] = 1; 5176 t[i]->Start(); 5177// printf("--Started: %p\n", (void*)t[i]->tid()); 5178 } 5179 for (int i = 0; i < N; i++) { 5180// printf("--Joining: %p\n", (void*)t[i]->tid()); 5181 t[i]->Join(); 5182// printf("--Joined : %p\n", (void*)t[i]->tid()); 5183 GLOB[i]++; 5184 } 5185 for (int i = 0; i < N; i++) delete t[i]; 5186 5187 printf("\tGLOB=%d\n", GLOB[13]); 5188} 5189REGISTER_TEST(Run, 109) 5190} // namespace test109 5191 5192 5193// test110: TP. Simple races with stack, global and heap objects. {{{1 5194namespace test110 { 5195int GLOB = 0; 5196static int STATIC; 5197 5198int *STACK = 0; 5199 5200int *MALLOC; 5201int *CALLOC; 5202int *REALLOC; 5203int *VALLOC; 5204int *PVALLOC; 5205int *MEMALIGN; 5206int *POSIX_MEMALIGN; 5207int *MMAP; 5208 5209int *NEW; 5210int *NEW_ARR; 5211 5212void Worker() { 5213 GLOB++; 5214 STATIC++; 5215 5216 (*STACK)++; 5217 5218 (*MALLOC)++; 5219 (*CALLOC)++; 5220 (*REALLOC)++; 5221 (*VALLOC)++; 5222 (*PVALLOC)++; 5223 (*MEMALIGN)++; 5224 (*POSIX_MEMALIGN)++; 5225 (*MMAP)++; 5226 5227 (*NEW)++; 5228 (*NEW_ARR)++; 5229} 5230void Run() { 5231 int x = 0; 5232 STACK = &x; 5233 5234 MALLOC = (int*)malloc(sizeof(int)); 5235 CALLOC = (int*)calloc(1, sizeof(int)); 5236 REALLOC = (int*)realloc(NULL, sizeof(int)); 5237 VALLOC = (int*)valloc(sizeof(int)); 5238 PVALLOC = (int*)valloc(sizeof(int)); // TODO: pvalloc breaks helgrind. 5239 MEMALIGN = (int*)memalign(64, sizeof(int)); 5240 CHECK(0 == posix_memalign((void**)&POSIX_MEMALIGN, 64, sizeof(int))); 5241 MMAP = (int*)mmap(NULL, sizeof(int), PROT_READ | PROT_WRITE, 5242 MAP_PRIVATE | MAP_ANON, -1, 0); 5243 5244 NEW = new int; 5245 NEW_ARR = new int[10]; 5246 5247 5248 FAST_MODE_INIT(STACK); 5249 ANNOTATE_EXPECT_RACE(STACK, "real race on stack object"); 5250 FAST_MODE_INIT(&GLOB); 5251 ANNOTATE_EXPECT_RACE(&GLOB, "real race on global object"); 5252 FAST_MODE_INIT(&STATIC); 5253 ANNOTATE_EXPECT_RACE(&STATIC, "real race on a static global object"); 5254 FAST_MODE_INIT(MALLOC); 5255 ANNOTATE_EXPECT_RACE(MALLOC, "real race on a malloc-ed object"); 5256 FAST_MODE_INIT(CALLOC); 5257 ANNOTATE_EXPECT_RACE(CALLOC, "real race on a calloc-ed object"); 5258 FAST_MODE_INIT(REALLOC); 5259 ANNOTATE_EXPECT_RACE(REALLOC, "real race on a realloc-ed object"); 5260 FAST_MODE_INIT(VALLOC); 5261 ANNOTATE_EXPECT_RACE(VALLOC, "real race on a valloc-ed object"); 5262 FAST_MODE_INIT(PVALLOC); 5263 ANNOTATE_EXPECT_RACE(PVALLOC, "real race on a pvalloc-ed object"); 5264 FAST_MODE_INIT(MEMALIGN); 5265 ANNOTATE_EXPECT_RACE(MEMALIGN, "real race on a memalign-ed object"); 5266 FAST_MODE_INIT(POSIX_MEMALIGN); 5267 ANNOTATE_EXPECT_RACE(POSIX_MEMALIGN, "real race on a posix_memalign-ed object"); 5268 FAST_MODE_INIT(MMAP); 5269 ANNOTATE_EXPECT_RACE(MMAP, "real race on a mmap-ed object"); 5270 5271 FAST_MODE_INIT(NEW); 5272 ANNOTATE_EXPECT_RACE(NEW, "real race on a new-ed object"); 5273 FAST_MODE_INIT(NEW_ARR); 5274 ANNOTATE_EXPECT_RACE(NEW_ARR, "real race on a new[]-ed object"); 5275 5276 MyThreadArray t(Worker, Worker, Worker); 5277 t.Start(); 5278 t.Join(); 5279 printf("test110: positive (race on a stack object)\n"); 5280 printf("\tSTACK=%d\n", *STACK); 5281 CHECK(GLOB <= 3); 5282 CHECK(STATIC <= 3); 5283 5284 free(MALLOC); 5285 free(CALLOC); 5286 free(REALLOC); 5287 free(VALLOC); 5288 free(PVALLOC); 5289 free(MEMALIGN); 5290 free(POSIX_MEMALIGN); 5291 munmap(MMAP, sizeof(int)); 5292 delete NEW; 5293 delete [] NEW_ARR; 5294} 5295REGISTER_TEST(Run, 110) 5296} // namespace test110 5297 5298 5299// test111: TN. Unit test for a bug related to stack handling. {{{1 5300namespace test111 { 5301char *GLOB = 0; 5302bool COND = false; 5303Mutex mu; 5304const int N = 3000; 5305 5306void write_to_p(char *p, int val) { 5307 for (int i = 0; i < N; i++) 5308 p[i] = val; 5309} 5310 5311static bool ArgIsTrue(bool *arg) { 5312// printf("ArgIsTrue: %d tid=%d\n", *arg, (int)pthread_self()); 5313 return *arg == true; 5314} 5315 5316void f1() { 5317 char some_stack[N]; 5318 write_to_p(some_stack, 1); 5319 mu.LockWhen(Condition(&ArgIsTrue, &COND)); 5320 mu.Unlock(); 5321} 5322 5323void f2() { 5324 char some_stack[N]; 5325 char some_more_stack[N]; 5326 write_to_p(some_stack, 2); 5327 write_to_p(some_more_stack, 2); 5328} 5329 5330void f0() { f2(); } 5331 5332void Worker1() { 5333 f0(); 5334 f1(); 5335 f2(); 5336} 5337 5338void Worker2() { 5339 usleep(100000); 5340 mu.Lock(); 5341 COND = true; 5342 mu.Unlock(); 5343} 5344 5345void Run() { 5346 printf("test111: regression test\n"); 5347 MyThreadArray t(Worker1, Worker1, Worker2); 5348// AnnotateSetVerbosity(__FILE__, __LINE__, 3); 5349 t.Start(); 5350 t.Join(); 5351// AnnotateSetVerbosity(__FILE__, __LINE__, 1); 5352} 5353REGISTER_TEST2(Run, 111, FEATURE) 5354} // namespace test111 5355 5356// test112: STAB. Test for ANNOTATE_PUBLISH_MEMORY_RANGE{{{1 5357namespace test112 { 5358char *GLOB = 0; 5359const int N = 64 * 5; 5360Mutex mu; 5361bool ready = false; // under mu 5362int beg, end; // under mu 5363 5364Mutex mu1; 5365 5366void Worker() { 5367 5368 bool is_ready = false; 5369 int b, e; 5370 while (!is_ready) { 5371 mu.Lock(); 5372 is_ready = ready; 5373 b = beg; 5374 e = end; 5375 mu.Unlock(); 5376 usleep(1000); 5377 } 5378 5379 mu1.Lock(); 5380 for (int i = b; i < e; i++) { 5381 GLOB[i]++; 5382 } 5383 mu1.Unlock(); 5384} 5385 5386void PublishRange(int b, int e) { 5387 MyThreadArray t(Worker, Worker); 5388 ready = false; // runs before other threads 5389 t.Start(); 5390 5391 ANNOTATE_NEW_MEMORY(GLOB + b, e - b); 5392 ANNOTATE_TRACE_MEMORY(GLOB + b); 5393 for (int j = b; j < e; j++) { 5394 GLOB[j] = 0; 5395 } 5396 ANNOTATE_PUBLISH_MEMORY_RANGE(GLOB + b, e - b); 5397 5398 // hand off 5399 mu.Lock(); 5400 ready = true; 5401 beg = b; 5402 end = e; 5403 mu.Unlock(); 5404 5405 t.Join(); 5406} 5407 5408void Run() { 5409 printf("test112: stability (ANNOTATE_PUBLISH_MEMORY_RANGE)\n"); 5410 GLOB = new char [N]; 5411 5412 PublishRange(0, 10); 5413 PublishRange(3, 5); 5414 5415 PublishRange(12, 13); 5416 PublishRange(10, 14); 5417 5418 PublishRange(15, 17); 5419 PublishRange(16, 18); 5420 5421 // do few more random publishes. 5422 for (int i = 0; i < 20; i++) { 5423 const int begin = rand() % N; 5424 const int size = (rand() % (N - begin)) + 1; 5425 CHECK(size > 0); 5426 CHECK(begin + size <= N); 5427 PublishRange(begin, begin + size); 5428 } 5429 5430 printf("GLOB = %d\n", (int)GLOB[0]); 5431} 5432REGISTER_TEST2(Run, 112, STABILITY) 5433} // namespace test112 5434 5435 5436// test113: PERF. A lot of lock/unlock calls. Many locks {{{1 5437namespace test113 { 5438const int kNumIter = 100000; 5439const int kNumLocks = 7; 5440Mutex MU[kNumLocks]; 5441void Run() { 5442 printf("test113: perf\n"); 5443 for (int i = 0; i < kNumIter; i++ ) { 5444 for (int j = 0; j < kNumLocks; j++) { 5445 if (i & (1 << j)) MU[j].Lock(); 5446 } 5447 for (int j = kNumLocks - 1; j >= 0; j--) { 5448 if (i & (1 << j)) MU[j].Unlock(); 5449 } 5450 } 5451} 5452REGISTER_TEST(Run, 113) 5453} // namespace test113 5454 5455 5456// test114: STAB. Recursive lock. {{{1 5457namespace test114 { 5458int Bar() { 5459 static int bar = 1; 5460 return bar; 5461} 5462int Foo() { 5463 static int foo = Bar(); 5464 return foo; 5465} 5466void Worker() { 5467 static int x = Foo(); 5468 CHECK(x == 1); 5469} 5470void Run() { 5471 printf("test114: stab\n"); 5472 MyThreadArray t(Worker, Worker); 5473 t.Start(); 5474 t.Join(); 5475} 5476REGISTER_TEST(Run, 114) 5477} // namespace test114 5478 5479 5480// test115: TN. sem_open. {{{1 5481namespace test115 { 5482int tid = 0; 5483Mutex mu; 5484const char *kSemName = "drt-test-sem"; 5485 5486int GLOB = 0; 5487 5488sem_t *DoSemOpen() { 5489 // TODO: there is some race report inside sem_open 5490 // for which suppressions do not work... (???) 5491 ANNOTATE_IGNORE_WRITES_BEGIN(); 5492 sem_t *sem = sem_open(kSemName, O_CREAT, 0600, 3); 5493 ANNOTATE_IGNORE_WRITES_END(); 5494 return sem; 5495} 5496 5497void Worker() { 5498 mu.Lock(); 5499 int my_tid = tid++; 5500 mu.Unlock(); 5501 5502 if (my_tid == 0) { 5503 GLOB = 1; 5504 } 5505 5506 // if the detector observes a happens-before arc between 5507 // sem_open and sem_wait, it will be silent. 5508 sem_t *sem = DoSemOpen(); 5509 usleep(100000); 5510 CHECK(sem != SEM_FAILED); 5511 CHECK(sem_wait(sem) == 0); 5512 5513 if (my_tid > 0) { 5514 CHECK(GLOB == 1); 5515 } 5516} 5517 5518void Run() { 5519 printf("test115: stab (sem_open())\n"); 5520 5521 // just check that sem_open is not completely broken 5522 sem_unlink(kSemName); 5523 sem_t* sem = DoSemOpen(); 5524 CHECK(sem != SEM_FAILED); 5525 CHECK(sem_wait(sem) == 0); 5526 sem_unlink(kSemName); 5527 5528 // check that sem_open and sem_wait create a happens-before arc. 5529 MyThreadArray t(Worker, Worker, Worker); 5530 t.Start(); 5531 t.Join(); 5532 // clean up 5533 sem_unlink(kSemName); 5534} 5535REGISTER_TEST(Run, 115) 5536} // namespace test115 5537 5538 5539// test116: TN. some operations with string<> objects. {{{1 5540namespace test116 { 5541 5542void Worker() { 5543 string A[10], B[10], C[10]; 5544 for (int i = 0; i < 1000; i++) { 5545 for (int j = 0; j < 10; j++) { 5546 string &a = A[j]; 5547 string &b = B[j]; 5548 string &c = C[j]; 5549 a = "sdl;fkjhasdflksj df"; 5550 b = "sdf sdf;ljsd "; 5551 c = "'sfdf df"; 5552 c = b; 5553 a = c; 5554 b = a; 5555 swap(a,b); 5556 swap(b,c); 5557 } 5558 for (int j = 0; j < 10; j++) { 5559 string &a = A[j]; 5560 string &b = B[j]; 5561 string &c = C[j]; 5562 a.clear(); 5563 b.clear(); 5564 c.clear(); 5565 } 5566 } 5567} 5568 5569void Run() { 5570 printf("test116: negative (strings)\n"); 5571 MyThreadArray t(Worker, Worker, Worker); 5572 t.Start(); 5573 t.Join(); 5574} 5575REGISTER_TEST2(Run, 116, FEATURE|EXCLUDE_FROM_ALL) 5576} // namespace test116 5577 5578// test117: TN. Many calls to function-scope static init. {{{1 5579namespace test117 { 5580const int N = 50; 5581 5582int Foo() { 5583 usleep(20000); 5584 return 1; 5585} 5586 5587void Worker(void *a) { 5588 static int foo = Foo(); 5589 CHECK(foo == 1); 5590} 5591 5592void Run() { 5593 printf("test117: negative\n"); 5594 MyThread *t[N]; 5595 for (int i = 0; i < N; i++) { 5596 t[i] = new MyThread(Worker); 5597 } 5598 for (int i = 0; i < N; i++) { 5599 t[i]->Start(); 5600 } 5601 for (int i = 0; i < N; i++) { 5602 t[i]->Join(); 5603 } 5604 for (int i = 0; i < N; i++) delete t[i]; 5605} 5606REGISTER_TEST(Run, 117) 5607} // namespace test117 5608 5609 5610 5611// test118 PERF: One signal, multiple waits. {{{1 5612namespace test118 { 5613int GLOB = 0; 5614const int kNumIter = 2000000; 5615void Signaller() { 5616 usleep(50000); 5617 ANNOTATE_CONDVAR_SIGNAL(&GLOB); 5618} 5619void Waiter() { 5620 for (int i = 0; i < kNumIter; i++) { 5621 ANNOTATE_CONDVAR_WAIT(&GLOB); 5622 if (i == kNumIter / 2) 5623 usleep(100000); 5624 } 5625} 5626void Run() { 5627 printf("test118: perf\n"); 5628 MyThreadArray t(Signaller, Waiter, Signaller, Waiter); 5629 t.Start(); 5630 t.Join(); 5631 printf("\tGLOB=%d\n", GLOB); 5632} 5633REGISTER_TEST(Run, 118) 5634} // namespace test118 5635 5636 5637// test119: TP. Testing that malloc does not introduce any HB arc. {{{1 5638namespace test119 { 5639int GLOB = 0; 5640void Worker1() { 5641 GLOB = 1; 5642 free(malloc(123)); 5643} 5644void Worker2() { 5645 usleep(100000); 5646 free(malloc(345)); 5647 GLOB = 2; 5648} 5649void Run() { 5650 printf("test119: positive (checking if malloc creates HB arcs)\n"); 5651 FAST_MODE_INIT(&GLOB); 5652 if (!(Tsan_PureHappensBefore() && kMallocUsesMutex)) 5653 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "true race"); 5654 MyThreadArray t(Worker1, Worker2); 5655 t.Start(); 5656 t.Join(); 5657 printf("\tGLOB=%d\n", GLOB); 5658} 5659REGISTER_TEST(Run, 119) 5660} // namespace test119 5661 5662 5663// test120: TP. Thread1: write then read. Thread2: read. {{{1 5664namespace test120 { 5665int GLOB = 0; 5666 5667void Thread1() { 5668 GLOB = 1; // write 5669 CHECK(GLOB); // read 5670} 5671 5672void Thread2() { 5673 usleep(100000); 5674 CHECK(GLOB >= 0); // read 5675} 5676 5677void Run() { 5678 FAST_MODE_INIT(&GLOB); 5679 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "TP (T1: write then read, T2: read)"); 5680 printf("test120: positive\n"); 5681 MyThreadArray t(Thread1, Thread2); 5682 GLOB = 1; 5683 t.Start(); 5684 t.Join(); 5685 printf("\tGLOB=%d\n", GLOB); 5686} 5687REGISTER_TEST(Run, 120) 5688} // namespace test120 5689 5690 5691// test121: TP. Example of double-checked-locking {{{1 5692namespace test121 { 5693struct Foo { 5694 uintptr_t a, b[15]; 5695} __attribute__ ((aligned (64))); 5696 5697static Mutex mu; 5698static Foo *foo; 5699 5700void InitMe() { 5701 if (!foo) { 5702 MutexLock lock(&mu); 5703 if (!foo) { 5704 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo, "test121. Double-checked locking (ptr)"); 5705 foo = new Foo; 5706 if (!Tsan_FastMode()) 5707 ANNOTATE_EXPECT_RACE_FOR_TSAN(&foo->a, "test121. Double-checked locking (obj)"); 5708 foo->a = 42; 5709 } 5710 } 5711} 5712 5713void UseMe() { 5714 InitMe(); 5715 CHECK(foo && foo->a == 42); 5716} 5717 5718void Worker1() { UseMe(); } 5719void Worker2() { UseMe(); } 5720void Worker3() { UseMe(); } 5721 5722 5723void Run() { 5724 FAST_MODE_INIT(&foo); 5725 printf("test121: TP. Example of double-checked-locking\n"); 5726 MyThreadArray t1(Worker1, Worker2, Worker3); 5727 t1.Start(); 5728 t1.Join(); 5729 delete foo; 5730} 5731REGISTER_TEST(Run, 121) 5732} // namespace test121 5733 5734// test122 TP: Simple test with RWLock {{{1 5735namespace test122 { 5736int VAR1 = 0; 5737int VAR2 = 0; 5738RWLock mu; 5739 5740void WriteWhileHoldingReaderLock(int *p) { 5741 usleep(100000); 5742 ReaderLockScoped lock(&mu); // Reader lock for writing. -- bug. 5743 (*p)++; 5744} 5745 5746void CorrectWrite(int *p) { 5747 WriterLockScoped lock(&mu); 5748 (*p)++; 5749} 5750 5751void Thread1() { WriteWhileHoldingReaderLock(&VAR1); } 5752void Thread2() { CorrectWrite(&VAR1); } 5753void Thread3() { CorrectWrite(&VAR2); } 5754void Thread4() { WriteWhileHoldingReaderLock(&VAR2); } 5755 5756 5757void Run() { 5758 printf("test122: positive (rw-lock)\n"); 5759 VAR1 = 0; 5760 VAR2 = 0; 5761 ANNOTATE_TRACE_MEMORY(&VAR1); 5762 ANNOTATE_TRACE_MEMORY(&VAR2); 5763 if (!Tsan_PureHappensBefore()) { 5764 ANNOTATE_EXPECT_RACE_FOR_TSAN(&VAR1, "test122. TP. ReaderLock-ed while writing"); 5765 ANNOTATE_EXPECT_RACE_FOR_TSAN(&VAR2, "test122. TP. ReaderLock-ed while writing"); 5766 } 5767 MyThreadArray t(Thread1, Thread2, Thread3, Thread4); 5768 t.Start(); 5769 t.Join(); 5770} 5771REGISTER_TEST(Run, 122) 5772} // namespace test122 5773 5774 5775// test123 TP: accesses of different sizes. {{{1 5776namespace test123 { 5777 5778uint64_t MEM[8]; 5779 5780#define GenericWrite(p,size,off) { \ 5781 if (size == 64) {\ 5782 CHECK(off == 0);\ 5783 (p)[off] = 1;\ 5784 } else if (size == 32) {\ 5785 CHECK(off <= 2);\ 5786 uint32_t *x = (uint32_t*)(p);\ 5787 x[off] = 1;\ 5788 } else if (size == 16) {\ 5789 CHECK(off <= 4);\ 5790 uint16_t *x = (uint16_t*)(p);\ 5791 x[off] = 1;\ 5792 } else if (size == 8) {\ 5793 CHECK(off <= 8);\ 5794 uint8_t *x = (uint8_t*)(p);\ 5795 x[off] = 1;\ 5796 } else {\ 5797 CHECK(0);\ 5798 }\ 5799}\ 5800 5801// Q. Hey dude, why so many functions? 5802// A. I need different stack traces for different accesses. 5803 5804void Wr64_0() { GenericWrite(&MEM[0], 64, 0); } 5805void Wr64_1() { GenericWrite(&MEM[1], 64, 0); } 5806void Wr64_2() { GenericWrite(&MEM[2], 64, 0); } 5807void Wr64_3() { GenericWrite(&MEM[3], 64, 0); } 5808void Wr64_4() { GenericWrite(&MEM[4], 64, 0); } 5809void Wr64_5() { GenericWrite(&MEM[5], 64, 0); } 5810void Wr64_6() { GenericWrite(&MEM[6], 64, 0); } 5811void Wr64_7() { GenericWrite(&MEM[7], 64, 0); } 5812 5813void Wr32_0() { GenericWrite(&MEM[0], 32, 0); } 5814void Wr32_1() { GenericWrite(&MEM[1], 32, 1); } 5815void Wr32_2() { GenericWrite(&MEM[2], 32, 0); } 5816void Wr32_3() { GenericWrite(&MEM[3], 32, 1); } 5817void Wr32_4() { GenericWrite(&MEM[4], 32, 0); } 5818void Wr32_5() { GenericWrite(&MEM[5], 32, 1); } 5819void Wr32_6() { GenericWrite(&MEM[6], 32, 0); } 5820void Wr32_7() { GenericWrite(&MEM[7], 32, 1); } 5821 5822void Wr16_0() { GenericWrite(&MEM[0], 16, 0); } 5823void Wr16_1() { GenericWrite(&MEM[1], 16, 1); } 5824void Wr16_2() { GenericWrite(&MEM[2], 16, 2); } 5825void Wr16_3() { GenericWrite(&MEM[3], 16, 3); } 5826void Wr16_4() { GenericWrite(&MEM[4], 16, 0); } 5827void Wr16_5() { GenericWrite(&MEM[5], 16, 1); } 5828void Wr16_6() { GenericWrite(&MEM[6], 16, 2); } 5829void Wr16_7() { GenericWrite(&MEM[7], 16, 3); } 5830 5831void Wr8_0() { GenericWrite(&MEM[0], 8, 0); } 5832void Wr8_1() { GenericWrite(&MEM[1], 8, 1); } 5833void Wr8_2() { GenericWrite(&MEM[2], 8, 2); } 5834void Wr8_3() { GenericWrite(&MEM[3], 8, 3); } 5835void Wr8_4() { GenericWrite(&MEM[4], 8, 4); } 5836void Wr8_5() { GenericWrite(&MEM[5], 8, 5); } 5837void Wr8_6() { GenericWrite(&MEM[6], 8, 6); } 5838void Wr8_7() { GenericWrite(&MEM[7], 8, 7); } 5839 5840void WriteAll64() { 5841 Wr64_0(); 5842 Wr64_1(); 5843 Wr64_2(); 5844 Wr64_3(); 5845 Wr64_4(); 5846 Wr64_5(); 5847 Wr64_6(); 5848 Wr64_7(); 5849} 5850 5851void WriteAll32() { 5852 Wr32_0(); 5853 Wr32_1(); 5854 Wr32_2(); 5855 Wr32_3(); 5856 Wr32_4(); 5857 Wr32_5(); 5858 Wr32_6(); 5859 Wr32_7(); 5860} 5861 5862void WriteAll16() { 5863 Wr16_0(); 5864 Wr16_1(); 5865 Wr16_2(); 5866 Wr16_3(); 5867 Wr16_4(); 5868 Wr16_5(); 5869 Wr16_6(); 5870 Wr16_7(); 5871} 5872 5873void WriteAll8() { 5874 Wr8_0(); 5875 Wr8_1(); 5876 Wr8_2(); 5877 Wr8_3(); 5878 Wr8_4(); 5879 Wr8_5(); 5880 Wr8_6(); 5881 Wr8_7(); 5882} 5883 5884void W00() { WriteAll64(); } 5885void W01() { WriteAll64(); } 5886void W02() { WriteAll64(); } 5887 5888void W10() { WriteAll32(); } 5889void W11() { WriteAll32(); } 5890void W12() { WriteAll32(); } 5891 5892void W20() { WriteAll16(); } 5893void W21() { WriteAll16(); } 5894void W22() { WriteAll16(); } 5895 5896void W30() { WriteAll8(); } 5897void W31() { WriteAll8(); } 5898void W32() { WriteAll8(); } 5899 5900typedef void (*F)(void); 5901 5902void TestTwoSizes(F f1, F f2) { 5903 // first f1, then f2 5904 ANNOTATE_NEW_MEMORY(MEM, sizeof(MEM)); 5905 memset(MEM, 0, sizeof(MEM)); 5906 MyThreadArray t1(f1, f2); 5907 t1.Start(); 5908 t1.Join(); 5909 // reverse order 5910 ANNOTATE_NEW_MEMORY(MEM, sizeof(MEM)); 5911 memset(MEM, 0, sizeof(MEM)); 5912 MyThreadArray t2(f2, f1); 5913 t2.Start(); 5914 t2.Join(); 5915} 5916 5917void Run() { 5918 printf("test123: positive (different sizes)\n"); 5919 TestTwoSizes(W00, W10); 5920// TestTwoSizes(W01, W20); 5921// TestTwoSizes(W02, W30); 5922// TestTwoSizes(W11, W21); 5923// TestTwoSizes(W12, W31); 5924// TestTwoSizes(W22, W32); 5925 5926} 5927REGISTER_TEST2(Run, 123, FEATURE|EXCLUDE_FROM_ALL) 5928} // namespace test123 5929 5930 5931// test124: What happens if we delete an unlocked lock? {{{1 5932namespace test124 { 5933// This test does not worg with pthreads (you can't call 5934// pthread_mutex_destroy on a locked lock). 5935int GLOB = 0; 5936const int N = 1000; 5937void Worker() { 5938 Mutex *a_large_local_array_of_mutexes; 5939 a_large_local_array_of_mutexes = new Mutex[N]; 5940 for (int i = 0; i < N; i++) { 5941 a_large_local_array_of_mutexes[i].Lock(); 5942 } 5943 delete []a_large_local_array_of_mutexes; 5944 GLOB = 1; 5945} 5946 5947void Run() { 5948 printf("test124: negative\n"); 5949 MyThreadArray t(Worker, Worker, Worker); 5950 t.Start(); 5951 t.Join(); 5952 printf("\tGLOB=%d\n", GLOB); 5953} 5954REGISTER_TEST2(Run, 124, FEATURE|EXCLUDE_FROM_ALL) 5955} // namespace test124 5956 5957 5958// test125 TN: Backwards lock (annotated). {{{1 5959namespace test125 { 5960// This test uses "Backwards mutex" locking protocol. 5961// We take a *reader* lock when writing to a per-thread data 5962// (GLOB[thread_num]) and we take a *writer* lock when we 5963// are reading from the entire array at once. 5964// 5965// Such locking protocol is not understood by ThreadSanitizer's 5966// hybrid state machine. So, you either have to use a pure-happens-before 5967// detector ("tsan --pure-happens-before") or apply pure happens-before mode 5968// to this particular lock by using ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu). 5969 5970const int n_threads = 3; 5971RWLock mu; 5972int GLOB[n_threads]; 5973 5974int adder_num; // updated atomically. 5975 5976void Adder() { 5977 int my_num = AtomicIncrement(&adder_num, 1); 5978 5979 ReaderLockScoped lock(&mu); 5980 GLOB[my_num]++; 5981} 5982 5983void Aggregator() { 5984 int sum = 0; 5985 { 5986 WriterLockScoped lock(&mu); 5987 for (int i = 0; i < n_threads; i++) { 5988 sum += GLOB[i]; 5989 } 5990 } 5991 printf("sum=%d\n", sum); 5992} 5993 5994void Run() { 5995 printf("test125: negative\n"); 5996 5997 ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu); 5998 5999 // run Adders, then Aggregator 6000 { 6001 MyThreadArray t(Adder, Adder, Adder, Aggregator); 6002 t.Start(); 6003 t.Join(); 6004 } 6005 6006 // Run Aggregator first. 6007 adder_num = 0; 6008 { 6009 MyThreadArray t(Aggregator, Adder, Adder, Adder); 6010 t.Start(); 6011 t.Join(); 6012 } 6013 6014} 6015REGISTER_TEST(Run, 125) 6016} // namespace test125 6017 6018// test126 TN: test for BlockingCounter {{{1 6019namespace test126 { 6020BlockingCounter *blocking_counter; 6021int GLOB = 0; 6022void Worker() { 6023 CHECK(blocking_counter); 6024 CHECK(GLOB == 0); 6025 blocking_counter->DecrementCount(); 6026} 6027void Run() { 6028 printf("test126: negative\n"); 6029 MyThreadArray t(Worker, Worker, Worker); 6030 blocking_counter = new BlockingCounter(3); 6031 t.Start(); 6032 blocking_counter->Wait(); 6033 GLOB = 1; 6034 t.Join(); 6035 printf("\tGLOB=%d\n", GLOB); 6036} 6037REGISTER_TEST(Run, 126) 6038} // namespace test126 6039 6040 6041// test127. Bad code: unlocking a mutex locked by another thread. {{{1 6042namespace test127 { 6043Mutex mu; 6044void Thread1() { 6045 mu.Lock(); 6046} 6047void Thread2() { 6048 usleep(100000); 6049 mu.Unlock(); 6050} 6051void Run() { 6052 printf("test127: unlocking a mutex locked by another thread.\n"); 6053 MyThreadArray t(Thread1, Thread2); 6054 t.Start(); 6055 t.Join(); 6056} 6057REGISTER_TEST(Run, 127) 6058} // namespace test127 6059 6060// test128. Suppressed code in concurrent accesses {{{1 6061// Please use --suppressions=unittest.supp flag when running this test. 6062namespace test128 { 6063Mutex mu; 6064int GLOB = 0; 6065void Worker() { 6066 usleep(100000); 6067 mu.Lock(); 6068 GLOB++; 6069 mu.Unlock(); 6070} 6071void ThisFunctionShouldBeSuppressed() { 6072 GLOB++; 6073} 6074void Run() { 6075 printf("test128: Suppressed code in concurrent accesses.\n"); 6076 MyThreadArray t(Worker, ThisFunctionShouldBeSuppressed); 6077 t.Start(); 6078 t.Join(); 6079} 6080REGISTER_TEST2(Run, 128, FEATURE | EXCLUDE_FROM_ALL) 6081} // namespace test128 6082 6083// test129: TN. Synchronization via ReaderLockWhen(). {{{1 6084namespace test129 { 6085int GLOB = 0; 6086Mutex MU; 6087bool WeirdCondition(int* param) { 6088 *param = GLOB; // a write into Waiter's memory 6089 return GLOB > 0; 6090} 6091void Waiter() { 6092 int param = 0; 6093 MU.ReaderLockWhen(Condition(WeirdCondition, ¶m)); 6094 MU.ReaderUnlock(); 6095 CHECK(GLOB > 0); 6096 CHECK(param > 0); 6097} 6098void Waker() { 6099 usleep(100000); // Make sure the waiter blocks. 6100 MU.Lock(); 6101 GLOB++; 6102 MU.Unlock(); // calls ANNOTATE_CONDVAR_SIGNAL; 6103} 6104void Run() { 6105 printf("test129: Synchronization via ReaderLockWhen()\n"); 6106 MyThread mt(Waiter, NULL, "Waiter Thread"); 6107 mt.Start(); 6108 Waker(); 6109 mt.Join(); 6110 printf("\tGLOB=%d\n", GLOB); 6111} 6112REGISTER_TEST2(Run, 129, FEATURE); 6113} // namespace test129 6114 6115// test130: TN. Per-thread. {{{1 6116namespace test130 { 6117#ifndef NO_TLS 6118// This test verifies that the race detector handles 6119// thread-local storage (TLS) correctly. 6120// As of 09-03-30 ThreadSanitizer has a bug: 6121// - Thread1 starts 6122// - Thread1 touches per_thread_global 6123// - Thread1 ends 6124// - Thread2 starts (and there is no happens-before relation between it and 6125// Thread1) 6126// - Thread2 touches per_thread_global 6127// It may happen so that Thread2 will have per_thread_global in the same address 6128// as Thread1. Since there is no happens-before relation between threads, 6129// ThreadSanitizer reports a race. 6130// 6131// test131 does the same for stack. 6132 6133static __thread int per_thread_global[10] = {0}; 6134 6135void RealWorker() { // Touch per_thread_global. 6136 per_thread_global[1]++; 6137 errno++; 6138} 6139 6140void Worker() { // Spawn few threads that touch per_thread_global. 6141 MyThreadArray t(RealWorker, RealWorker); 6142 t.Start(); 6143 t.Join(); 6144} 6145void Worker0() { sleep(0); Worker(); } 6146void Worker1() { sleep(1); Worker(); } 6147void Worker2() { sleep(2); Worker(); } 6148void Worker3() { sleep(3); Worker(); } 6149 6150void Run() { 6151 printf("test130: Per-thread\n"); 6152 MyThreadArray t1(Worker0, Worker1, Worker2, Worker3); 6153 t1.Start(); 6154 t1.Join(); 6155 printf("\tper_thread_global=%d\n", per_thread_global[1]); 6156} 6157REGISTER_TEST(Run, 130) 6158#endif // NO_TLS 6159} // namespace test130 6160 6161 6162// test131: TN. Stack. {{{1 6163namespace test131 { 6164// Same as test130, but for stack. 6165 6166void RealWorker() { // Touch stack. 6167 int stack_var = 0; 6168 stack_var++; 6169} 6170 6171void Worker() { // Spawn few threads that touch stack. 6172 MyThreadArray t(RealWorker, RealWorker); 6173 t.Start(); 6174 t.Join(); 6175} 6176void Worker0() { sleep(0); Worker(); } 6177void Worker1() { sleep(1); Worker(); } 6178void Worker2() { sleep(2); Worker(); } 6179void Worker3() { sleep(3); Worker(); } 6180 6181void Run() { 6182 printf("test131: stack\n"); 6183 MyThreadArray t(Worker0, Worker1, Worker2, Worker3); 6184 t.Start(); 6185 t.Join(); 6186} 6187REGISTER_TEST(Run, 131) 6188} // namespace test131 6189 6190 6191// test132: TP. Simple race (write vs write). Works in fast-mode. {{{1 6192namespace test132 { 6193int GLOB = 0; 6194void Worker() { GLOB = 1; } 6195 6196void Run1() { 6197 FAST_MODE_INIT(&GLOB); 6198 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test132"); 6199 printf("test132: positive; &GLOB=%p\n", &GLOB); 6200 ANNOTATE_TRACE_MEMORY(&GLOB); 6201 GLOB = 7; 6202 MyThreadArray t(Worker, Worker); 6203 t.Start(); 6204 t.Join(); 6205} 6206 6207void Run() { 6208 Run1(); 6209} 6210REGISTER_TEST(Run, 132); 6211} // namespace test132 6212 6213 6214// test133: TP. Simple race (write vs write). Works in fast mode. {{{1 6215namespace test133 { 6216// Same as test132, but everything is run from a separate thread spawned from 6217// the main thread. 6218int GLOB = 0; 6219void Worker() { GLOB = 1; } 6220 6221void Run1() { 6222 FAST_MODE_INIT(&GLOB); 6223 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "test133"); 6224 printf("test133: positive; &GLOB=%p\n", &GLOB); 6225 ANNOTATE_TRACE_MEMORY(&GLOB); 6226 GLOB = 7; 6227 MyThreadArray t(Worker, Worker); 6228 t.Start(); 6229 t.Join(); 6230} 6231void Run() { 6232 MyThread t(Run1); 6233 t.Start(); 6234 t.Join(); 6235} 6236REGISTER_TEST(Run, 133); 6237} // namespace test133 6238 6239 6240// test134 TN. Swap. Variant of test79. {{{1 6241namespace test134 { 6242#if 0 6243typedef __gnu_cxx::hash_map<int, int> map_t; 6244#else 6245typedef std::map<int, int> map_t; 6246#endif 6247map_t map; 6248Mutex mu; 6249// Here we use swap to pass map between threads. 6250// The synchronization is correct, but w/o the annotation 6251// any hybrid detector will complain. 6252 6253// Swap is very unfriendly to the lock-set (and hybrid) race detectors. 6254// Since tmp is destructed outside the mutex, we need to have a happens-before 6255// arc between any prior access to map and here. 6256// Since the internals of tmp are created ouside the mutex and are passed to 6257// other thread, we need to have a h-b arc between here and any future access. 6258// These arcs can be created by HAPPENS_{BEFORE,AFTER} annotations, but it is 6259// much simpler to apply pure-happens-before mode to the mutex mu. 6260void Swapper() { 6261 map_t tmp; 6262 MutexLock lock(&mu); 6263 ANNOTATE_HAPPENS_AFTER(&map); 6264 // We swap the new empty map 'tmp' with 'map'. 6265 map.swap(tmp); 6266 ANNOTATE_HAPPENS_BEFORE(&map); 6267 // tmp (which is the old version of map) is destroyed here. 6268} 6269 6270void Worker() { 6271 MutexLock lock(&mu); 6272 ANNOTATE_HAPPENS_AFTER(&map); 6273 map[1]++; 6274 ANNOTATE_HAPPENS_BEFORE(&map); 6275} 6276 6277void Run() { 6278 printf("test134: negative (swap)\n"); 6279 // ********************** Shorter way: *********************** 6280 // ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu); 6281 MyThreadArray t(Worker, Worker, Swapper, Worker, Worker); 6282 t.Start(); 6283 t.Join(); 6284} 6285REGISTER_TEST(Run, 134) 6286} // namespace test134 6287 6288// test135 TN. Swap. Variant of test79. {{{1 6289namespace test135 { 6290 6291void SubWorker() { 6292 const long SIZE = 65536; 6293 for (int i = 0; i < 32; i++) { 6294 int *ptr = (int*)mmap(NULL, SIZE, PROT_READ | PROT_WRITE, 6295 MAP_PRIVATE | MAP_ANON, -1, 0); 6296 *ptr = 42; 6297 munmap(ptr, SIZE); 6298 } 6299} 6300 6301void Worker() { 6302 MyThreadArray t(SubWorker, SubWorker, SubWorker, SubWorker); 6303 t.Start(); 6304 t.Join(); 6305} 6306 6307void Run() { 6308 printf("test135: negative (mmap)\n"); 6309 MyThreadArray t(Worker, Worker, Worker, Worker); 6310 t.Start(); 6311 t.Join(); 6312} 6313REGISTER_TEST(Run, 135) 6314} // namespace test135 6315 6316// test136. Unlock twice. {{{1 6317namespace test136 { 6318void Run() { 6319 printf("test136: unlock twice\n"); 6320 pthread_mutexattr_t attr; 6321 CHECK(0 == pthread_mutexattr_init(&attr)); 6322 CHECK(0 == pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)); 6323 6324 pthread_mutex_t mu; 6325 CHECK(0 == pthread_mutex_init(&mu, &attr)); 6326 CHECK(0 == pthread_mutex_lock(&mu)); 6327 CHECK(0 == pthread_mutex_unlock(&mu)); 6328 int ret_unlock = pthread_mutex_unlock(&mu); // unlocking twice. 6329 int ret_destroy = pthread_mutex_destroy(&mu); 6330 printf(" pthread_mutex_unlock returned %d\n", ret_unlock); 6331 printf(" pthread_mutex_destroy returned %d\n", ret_destroy); 6332 6333} 6334 6335REGISTER_TEST(Run, 136) 6336} // namespace test136 6337 6338// test137 TP. Races on stack variables. {{{1 6339namespace test137 { 6340int GLOB = 0; 6341ProducerConsumerQueue q(10); 6342 6343void Worker() { 6344 int stack; 6345 int *tmp = (int*)q.Get(); 6346 (*tmp)++; 6347 int *racey = &stack; 6348 q.Put(racey); 6349 (*racey)++; 6350 usleep(150000); 6351 // We may miss the races if we sleep less due to die_memory events... 6352} 6353 6354void Run() { 6355 int tmp = 0; 6356 printf("test137: TP. Races on stack variables.\n"); 6357 q.Put(&tmp); 6358 MyThreadArray t(Worker, Worker, Worker, Worker); 6359 t.Start(); 6360 t.Join(); 6361 q.Get(); 6362} 6363 6364REGISTER_TEST2(Run, 137, FEATURE | EXCLUDE_FROM_ALL) 6365} // namespace test137 6366 6367// test138 FN. Two closures hit the same thread in ThreadPool. {{{1 6368namespace test138 { 6369int GLOB = 0; 6370 6371void Worker() { 6372 usleep(100000); 6373 GLOB++; 6374} 6375 6376void Run() { 6377 FAST_MODE_INIT(&GLOB); 6378 printf("test138: FN. Two closures hit the same thread in ThreadPool.\n"); 6379 6380 // When using thread pools, two concurrent callbacks might be scheduled 6381 // onto the same executor thread. As a result, unnecessary happens-before 6382 // relation may be introduced between callbacks. 6383 // If we set the number of executor threads to 1, any known data 6384 // race detector will be silent. However, the same situation may happen 6385 // with any number of executor threads (with some probability). 6386 ThreadPool tp(1); 6387 tp.StartWorkers(); 6388 tp.Add(NewCallback(Worker)); 6389 tp.Add(NewCallback(Worker)); 6390} 6391 6392REGISTER_TEST2(Run, 138, FEATURE) 6393} // namespace test138 6394 6395// test139: FN. A true race hidden by reference counting annotation. {{{1 6396namespace test139 { 6397int GLOB = 0; 6398RefCountedClass *obj; 6399 6400void Worker1() { 6401 GLOB++; // First access. 6402 obj->Unref(); 6403} 6404 6405void Worker2() { 6406 usleep(100000); 6407 obj->Unref(); 6408 GLOB++; // Second access. 6409} 6410 6411void Run() { 6412 FAST_MODE_INIT(&GLOB); 6413 printf("test139: FN. A true race hidden by reference counting annotation.\n"); 6414 6415 obj = new RefCountedClass; 6416 obj->AnnotateUnref(); 6417 obj->Ref(); 6418 obj->Ref(); 6419 MyThreadArray mt(Worker1, Worker2); 6420 mt.Start(); 6421 mt.Join(); 6422} 6423 6424REGISTER_TEST2(Run, 139, FEATURE) 6425} // namespace test139 6426 6427// test140 TN. Swap. Variant of test79 and test134. {{{1 6428namespace test140 { 6429#if 0 6430typedef __gnu_cxx::hash_map<int, int> Container; 6431#else 6432typedef std::map<int,int> Container; 6433#endif 6434Mutex mu; 6435static Container container; 6436 6437// Here we use swap to pass a Container between threads. 6438// The synchronization is correct, but w/o the annotation 6439// any hybrid detector will complain. 6440// 6441// Unlike the test134, we try to have a minimal set of annotations 6442// so that extra h-b arcs do not hide other races. 6443 6444// Swap is very unfriendly to the lock-set (and hybrid) race detectors. 6445// Since tmp is destructed outside the mutex, we need to have a happens-before 6446// arc between any prior access to map and here. 6447// Since the internals of tmp are created ouside the mutex and are passed to 6448// other thread, we need to have a h-b arc between here and any future access. 6449// 6450// We want to be able to annotate swapper so that we don't need to annotate 6451// anything else. 6452void Swapper() { 6453 Container tmp; 6454 tmp[1] = tmp[2] = tmp[3] = 0; 6455 { 6456 MutexLock lock(&mu); 6457 container.swap(tmp); 6458 // we are unpublishing the old container. 6459 ANNOTATE_UNPUBLISH_MEMORY_RANGE(&container, sizeof(container)); 6460 // we are publishing the new container. 6461 ANNOTATE_PUBLISH_MEMORY_RANGE(&container, sizeof(container)); 6462 } 6463 tmp[1]++; 6464 tmp[2]++; 6465 // tmp (which is the old version of container) is destroyed here. 6466} 6467 6468void Worker() { 6469 MutexLock lock(&mu); 6470 container[1]++; 6471 int *v = &container[2]; 6472 for (int i = 0; i < 10; i++) { 6473 // if uncommented, this will break ANNOTATE_UNPUBLISH_MEMORY_RANGE(): 6474 // ANNOTATE_HAPPENS_BEFORE(v); 6475 if (i % 3) { 6476 (*v)++; 6477 } 6478 } 6479} 6480 6481void Run() { 6482 printf("test140: negative (swap) %p\n", &container); 6483 MyThreadArray t(Worker, Worker, Swapper, Worker, Worker); 6484 t.Start(); 6485 t.Join(); 6486} 6487REGISTER_TEST(Run, 140) 6488} // namespace test140 6489 6490// test141 FP. unlink/fopen, rmdir/opendir. {{{1 6491namespace test141 { 6492int GLOB1 = 0, 6493 GLOB2 = 0; 6494char *dir_name = NULL, 6495 *filename = NULL; 6496 6497void Waker1() { 6498 usleep(100000); 6499 GLOB1 = 1; // Write 6500 // unlink deletes a file 'filename' 6501 // which exits spin-loop in Waiter1(). 6502 printf(" Deleting file...\n"); 6503 CHECK(unlink(filename) == 0); 6504} 6505 6506void Waiter1() { 6507 FILE *tmp; 6508 while ((tmp = fopen(filename, "r")) != NULL) { 6509 fclose(tmp); 6510 usleep(10000); 6511 } 6512 printf(" ...file has been deleted\n"); 6513 GLOB1 = 2; // Write 6514} 6515 6516void Waker2() { 6517 usleep(100000); 6518 GLOB2 = 1; // Write 6519 // rmdir deletes a directory 'dir_name' 6520 // which exit spin-loop in Waker(). 6521 printf(" Deleting directory...\n"); 6522 CHECK(rmdir(dir_name) == 0); 6523} 6524 6525void Waiter2() { 6526 DIR *tmp; 6527 while ((tmp = opendir(dir_name)) != NULL) { 6528 closedir(tmp); 6529 usleep(10000); 6530 } 6531 printf(" ...directory has been deleted\n"); 6532 GLOB2 = 2; 6533} 6534 6535void Run() { 6536 FAST_MODE_INIT(&GLOB1); 6537 FAST_MODE_INIT(&GLOB2); 6538 printf("test141: FP. unlink/fopen, rmdir/opendir.\n"); 6539 6540 dir_name = strdup("/tmp/tsan-XXXXXX"); 6541 IGNORE_RETURN_VALUE(mkdtemp(dir_name)); 6542 6543 filename = strdup((std::string() + dir_name + "/XXXXXX").c_str()); 6544 const int fd = mkstemp(filename); 6545 CHECK(fd >= 0); 6546 close(fd); 6547 6548 MyThreadArray mta1(Waker1, Waiter1); 6549 mta1.Start(); 6550 mta1.Join(); 6551 6552 MyThreadArray mta2(Waker2, Waiter2); 6553 mta2.Start(); 6554 mta2.Join(); 6555 free(filename); 6556 filename = 0; 6557 free(dir_name); 6558 dir_name = 0; 6559} 6560REGISTER_TEST(Run, 141) 6561} // namespace test141 6562 6563 6564// Simple FIFO queue annotated with PCQ annotations. {{{1 6565class FifoMessageQueue { 6566 public: 6567 FifoMessageQueue() { ANNOTATE_PCQ_CREATE(this); } 6568 ~FifoMessageQueue() { ANNOTATE_PCQ_DESTROY(this); } 6569 // Send a message. 'message' should be positive. 6570 void Put(int message) { 6571 CHECK(message); 6572 MutexLock lock(&mu_); 6573 ANNOTATE_PCQ_PUT(this); 6574 q_.push(message); 6575 } 6576 // Return the message from the queue and pop it 6577 // or return 0 if there are no messages. 6578 int Get() { 6579 MutexLock lock(&mu_); 6580 if (q_.empty()) return 0; 6581 int res = q_.front(); 6582 q_.pop(); 6583 ANNOTATE_PCQ_GET(this); 6584 return res; 6585 } 6586 private: 6587 Mutex mu_; 6588 queue<int> q_; 6589}; 6590 6591 6592// test142: TN. Check PCQ_* annotations. {{{1 6593namespace test142 { 6594// Putter writes to array[i] and sends a message 'i'. 6595// Getters receive messages and read array[message]. 6596// PCQ_* annotations calm down the hybrid detectors. 6597 6598const int N = 1000; 6599int array[N+1]; 6600 6601FifoMessageQueue q; 6602 6603void Putter() { 6604 for (int i = 1; i <= N; i++) { 6605 array[i] = i*i; 6606 q.Put(i); 6607 usleep(1000); 6608 } 6609} 6610 6611void Getter() { 6612 int non_zero_received = 0; 6613 for (int i = 1; i <= N; i++) { 6614 int res = q.Get(); 6615 if (res > 0) { 6616 CHECK(array[res] = res * res); 6617 non_zero_received++; 6618 } 6619 usleep(1000); 6620 } 6621 printf("T=%d: non_zero_received=%d\n", 6622 (int)pthread_self(), non_zero_received); 6623} 6624 6625void Run() { 6626 printf("test142: tests PCQ annotations\n"); 6627 MyThreadArray t(Putter, Getter, Getter); 6628 t.Start(); 6629 t.Join(); 6630} 6631REGISTER_TEST(Run, 142) 6632} // namespace test142 6633 6634 6635// test143: TP. Check PCQ_* annotations. {{{1 6636namespace test143 { 6637// True positive. 6638// We have a race on GLOB between Putter and one of the Getters. 6639// Pure h-b will not see it. 6640// If FifoMessageQueue was annotated using HAPPENS_BEFORE/AFTER, the race would 6641// be missed too. 6642// PCQ_* annotations do not hide this race. 6643int GLOB = 0; 6644 6645FifoMessageQueue q; 6646 6647void Putter() { 6648 GLOB = 1; 6649 q.Put(1); 6650} 6651 6652void Getter() { 6653 usleep(10000); 6654 q.Get(); 6655 CHECK(GLOB == 1); // Race here 6656} 6657 6658void Run() { 6659 q.Put(1); 6660 if (!Tsan_PureHappensBefore()) { 6661 ANNOTATE_EXPECT_RACE_FOR_TSAN(&GLOB, "true races"); 6662 } 6663 printf("test143: tests PCQ annotations (true positive)\n"); 6664 MyThreadArray t(Putter, Getter, Getter); 6665 t.Start(); 6666 t.Join(); 6667} 6668REGISTER_TEST(Run, 143); 6669} // namespace test143 6670 6671 6672 6673 6674// test300: {{{1 6675namespace test300 { 6676int GLOB = 0; 6677void Run() { 6678} 6679REGISTER_TEST2(Run, 300, RACE_DEMO) 6680} // namespace test300 6681 6682// test301: Simple race. {{{1 6683namespace test301 { 6684Mutex mu1; // This Mutex guards var. 6685Mutex mu2; // This Mutex is not related to var. 6686int var; // GUARDED_BY(mu1) 6687 6688void Thread1() { // Runs in thread named 'test-thread-1'. 6689 MutexLock lock(&mu1); // Correct Mutex. 6690 var = 1; 6691} 6692 6693void Thread2() { // Runs in thread named 'test-thread-2'. 6694 MutexLock lock(&mu2); // Wrong Mutex. 6695 var = 2; 6696} 6697 6698void Run() { 6699 var = 0; 6700 printf("test301: simple race.\n"); 6701 MyThread t1(Thread1, NULL, "test-thread-1"); 6702 MyThread t2(Thread2, NULL, "test-thread-2"); 6703 t1.Start(); 6704 t2.Start(); 6705 t1.Join(); 6706 t2.Join(); 6707} 6708REGISTER_TEST2(Run, 301, RACE_DEMO) 6709} // namespace test301 6710 6711// test302: Complex race which happens at least twice. {{{1 6712namespace test302 { 6713// In this test we have many different accesses to GLOB and only one access 6714// is not synchronized properly. 6715int GLOB = 0; 6716 6717Mutex MU1; 6718Mutex MU2; 6719void Worker() { 6720 for(int i = 0; i < 100; i++) { 6721 switch(i % 4) { 6722 case 0: 6723 // This read is protected correctly. 6724 MU1.Lock(); CHECK(GLOB >= 0); MU1.Unlock(); 6725 break; 6726 case 1: 6727 // Here we used the wrong lock! The reason of the race is here. 6728 MU2.Lock(); CHECK(GLOB >= 0); MU2.Unlock(); 6729 break; 6730 case 2: 6731 // This read is protected correctly. 6732 MU1.Lock(); CHECK(GLOB >= 0); MU1.Unlock(); 6733 break; 6734 case 3: 6735 // This write is protected correctly. 6736 MU1.Lock(); GLOB++; MU1.Unlock(); 6737 break; 6738 } 6739 // sleep a bit so that the threads interleave 6740 // and the race happens at least twice. 6741 usleep(100); 6742 } 6743} 6744 6745void Run() { 6746 printf("test302: Complex race that happens twice.\n"); 6747 MyThread t1(Worker), t2(Worker); 6748 t1.Start(); 6749 t2.Start(); 6750 t1.Join(); t2.Join(); 6751} 6752REGISTER_TEST2(Run, 302, RACE_DEMO) 6753} // namespace test302 6754 6755 6756// test303: Need to trace the memory to understand the report. {{{1 6757namespace test303 { 6758int GLOB = 0; 6759 6760Mutex MU; 6761void Worker1() { CHECK(GLOB >= 0); } 6762void Worker2() { MU.Lock(); GLOB=1; MU.Unlock();} 6763 6764void Run() { 6765 printf("test303: a race that needs annotations.\n"); 6766 ANNOTATE_TRACE_MEMORY(&GLOB); 6767 MyThreadArray t(Worker1, Worker2); 6768 t.Start(); 6769 t.Join(); 6770} 6771REGISTER_TEST2(Run, 303, RACE_DEMO) 6772} // namespace test303 6773 6774 6775 6776// test304: Can not trace the memory, since it is a library object. {{{1 6777namespace test304 { 6778string *STR; 6779Mutex MU; 6780 6781void Worker1() { 6782 sleep(0); 6783 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF); 6784 MU.Lock(); CHECK(STR->length() >= 4); MU.Unlock(); 6785} 6786void Worker2() { 6787 sleep(1); 6788 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF); 6789 CHECK(STR->length() >= 4); // Unprotected! 6790} 6791void Worker3() { 6792 sleep(2); 6793 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF); 6794 MU.Lock(); CHECK(STR->length() >= 4); MU.Unlock(); 6795} 6796void Worker4() { 6797 sleep(3); 6798 ANNOTATE_CONDVAR_SIGNAL((void*)0xDEADBEAF); 6799 MU.Lock(); *STR += " + a very very long string"; MU.Unlock(); 6800} 6801 6802void Run() { 6803 STR = new string ("The String"); 6804 printf("test304: a race where memory tracing does not work.\n"); 6805 MyThreadArray t(Worker1, Worker2, Worker3, Worker4); 6806 t.Start(); 6807 t.Join(); 6808 6809 printf("%s\n", STR->c_str()); 6810 delete STR; 6811} 6812REGISTER_TEST2(Run, 304, RACE_DEMO) 6813} // namespace test304 6814 6815 6816 6817// test305: A bit more tricky: two locks used inconsistenly. {{{1 6818namespace test305 { 6819int GLOB = 0; 6820 6821// In this test GLOB is protected by MU1 and MU2, but inconsistently. 6822// The TRACES observed by helgrind are: 6823// TRACE[1]: Access{T2/S2 wr} -> new State{Mod; #LS=2; #SS=1; T2/S2} 6824// TRACE[2]: Access{T4/S9 wr} -> new State{Mod; #LS=1; #SS=2; T2/S2, T4/S9} 6825// TRACE[3]: Access{T5/S13 wr} -> new State{Mod; #LS=1; #SS=3; T2/S2, T4/S9, T5/S13} 6826// TRACE[4]: Access{T6/S19 wr} -> new State{Mod; #LS=0; #SS=4; T2/S2, T4/S9, T5/S13, T6/S19} 6827// 6828// The guilty access is either Worker2() or Worker4(), depending on 6829// which mutex is supposed to protect GLOB. 6830Mutex MU1; 6831Mutex MU2; 6832void Worker1() { MU1.Lock(); MU2.Lock(); GLOB=1; MU2.Unlock(); MU1.Unlock(); } 6833void Worker2() { MU1.Lock(); GLOB=2; MU1.Unlock(); } 6834void Worker3() { MU1.Lock(); MU2.Lock(); GLOB=3; MU2.Unlock(); MU1.Unlock(); } 6835void Worker4() { MU2.Lock(); GLOB=4; MU2.Unlock(); } 6836 6837void Run() { 6838 ANNOTATE_TRACE_MEMORY(&GLOB); 6839 printf("test305: simple race.\n"); 6840 MyThread t1(Worker1), t2(Worker2), t3(Worker3), t4(Worker4); 6841 t1.Start(); usleep(100); 6842 t2.Start(); usleep(100); 6843 t3.Start(); usleep(100); 6844 t4.Start(); usleep(100); 6845 t1.Join(); t2.Join(); t3.Join(); t4.Join(); 6846} 6847REGISTER_TEST2(Run, 305, RACE_DEMO) 6848} // namespace test305 6849 6850// test306: Two locks are used to protect a var. {{{1 6851namespace test306 { 6852int GLOB = 0; 6853// Thread1 and Thread2 access the var under two locks. 6854// Thread3 uses no locks. 6855 6856Mutex MU1; 6857Mutex MU2; 6858void Worker1() { MU1.Lock(); MU2.Lock(); GLOB=1; MU2.Unlock(); MU1.Unlock(); } 6859void Worker2() { MU1.Lock(); MU2.Lock(); GLOB=3; MU2.Unlock(); MU1.Unlock(); } 6860void Worker3() { GLOB=4; } 6861 6862void Run() { 6863 ANNOTATE_TRACE_MEMORY(&GLOB); 6864 printf("test306: simple race.\n"); 6865 MyThread t1(Worker1), t2(Worker2), t3(Worker3); 6866 t1.Start(); usleep(100); 6867 t2.Start(); usleep(100); 6868 t3.Start(); usleep(100); 6869 t1.Join(); t2.Join(); t3.Join(); 6870} 6871REGISTER_TEST2(Run, 306, RACE_DEMO) 6872} // namespace test306 6873 6874// test307: Simple race, code with control flow {{{1 6875namespace test307 { 6876int *GLOB = 0; 6877volatile /*to fake the compiler*/ bool some_condition = true; 6878 6879 6880void SomeFunc() { } 6881 6882int FunctionWithControlFlow() { 6883 int unrelated_stuff = 0; 6884 unrelated_stuff++; 6885 SomeFunc(); // "--keep-history=1" will point somewhere here. 6886 if (some_condition) { // Or here 6887 if (some_condition) { 6888 unrelated_stuff++; // Or here. 6889 unrelated_stuff++; 6890 (*GLOB)++; // "--keep-history=2" will point here (experimental). 6891 } 6892 } 6893 usleep(100000); 6894 return unrelated_stuff; 6895} 6896 6897void Worker1() { FunctionWithControlFlow(); } 6898void Worker2() { Worker1(); } 6899void Worker3() { Worker2(); } 6900void Worker4() { Worker3(); } 6901 6902void Run() { 6903 GLOB = new int; 6904 *GLOB = 1; 6905 printf("test307: simple race, code with control flow\n"); 6906 MyThreadArray t1(Worker1, Worker2, Worker3, Worker4); 6907 t1.Start(); 6908 t1.Join(); 6909} 6910REGISTER_TEST2(Run, 307, RACE_DEMO) 6911} // namespace test307 6912 6913// test308: Example of double-checked-locking {{{1 6914namespace test308 { 6915struct Foo { 6916 int a; 6917}; 6918 6919static int is_inited = 0; 6920static Mutex lock; 6921static Foo *foo; 6922 6923void InitMe() { 6924 if (!is_inited) { 6925 lock.Lock(); 6926 if (!is_inited) { 6927 foo = new Foo; 6928 foo->a = 42; 6929 is_inited = 1; 6930 } 6931 lock.Unlock(); 6932 } 6933} 6934 6935void UseMe() { 6936 InitMe(); 6937 CHECK(foo && foo->a == 42); 6938} 6939 6940void Worker1() { UseMe(); } 6941void Worker2() { UseMe(); } 6942void Worker3() { UseMe(); } 6943 6944 6945void Run() { 6946 ANNOTATE_TRACE_MEMORY(&is_inited); 6947 printf("test308: Example of double-checked-locking\n"); 6948 MyThreadArray t1(Worker1, Worker2, Worker3); 6949 t1.Start(); 6950 t1.Join(); 6951} 6952REGISTER_TEST2(Run, 308, RACE_DEMO) 6953} // namespace test308 6954 6955// test309: Simple race on an STL object. {{{1 6956namespace test309 { 6957string GLOB; 6958 6959void Worker1() { 6960 GLOB="Thread1"; 6961} 6962void Worker2() { 6963 usleep(100000); 6964 GLOB="Booooooooooo"; 6965} 6966 6967void Run() { 6968 printf("test309: simple race on an STL object.\n"); 6969 MyThread t1(Worker1), t2(Worker2); 6970 t1.Start(); 6971 t2.Start(); 6972 t1.Join(); t2.Join(); 6973} 6974REGISTER_TEST2(Run, 309, RACE_DEMO) 6975} // namespace test309 6976 6977// test310: One more simple race. {{{1 6978namespace test310 { 6979int *PTR = NULL; // GUARDED_BY(mu1) 6980 6981Mutex mu1; // Protects PTR. 6982Mutex mu2; // Unrelated to PTR. 6983Mutex mu3; // Unrelated to PTR. 6984 6985void Writer1() { 6986 MutexLock lock3(&mu3); // This lock is unrelated to PTR. 6987 MutexLock lock1(&mu1); // Protect PTR. 6988 *PTR = 1; 6989} 6990 6991void Writer2() { 6992 MutexLock lock2(&mu2); // This lock is unrelated to PTR. 6993 MutexLock lock1(&mu1); // Protect PTR. 6994 int some_unrelated_stuff = 0; 6995 if (some_unrelated_stuff == 0) 6996 some_unrelated_stuff++; 6997 *PTR = 2; 6998} 6999 7000 7001void Reader() { 7002 MutexLock lock2(&mu2); // Oh, gosh, this is a wrong mutex! 7003 CHECK(*PTR <= 2); 7004} 7005 7006// Some functions to make the stack trace non-trivial. 7007void DoWrite1() { Writer1(); } 7008void Thread1() { DoWrite1(); } 7009 7010void DoWrite2() { Writer2(); } 7011void Thread2() { DoWrite2(); } 7012 7013void DoRead() { Reader(); } 7014void Thread3() { DoRead(); } 7015 7016void Run() { 7017 printf("test310: simple race.\n"); 7018 PTR = new int; 7019 ANNOTATE_TRACE_MEMORY(PTR); 7020 *PTR = 0; 7021 MyThread t1(Thread1, NULL, "writer1"), 7022 t2(Thread2, NULL, "writer2"), 7023 t3(Thread3, NULL, "buggy reader"); 7024 t1.Start(); 7025 t2.Start(); 7026 usleep(100000); // Let the writers go first. 7027 t3.Start(); 7028 7029 t1.Join(); 7030 t2.Join(); 7031 t3.Join(); 7032} 7033REGISTER_TEST2(Run, 310, RACE_DEMO) 7034} // namespace test310 7035 7036// test311: Yet another simple race. {{{1 7037namespace test311 { 7038int *PTR = NULL; // GUARDED_BY(mu1) 7039 7040Mutex mu1; // Protects PTR. 7041Mutex mu2; // Unrelated to PTR. 7042Mutex mu3; // Unrelated to PTR. 7043 7044void GoodWriter1() { 7045 MutexLock lock3(&mu3); // This lock is unrelated to PTR. 7046 MutexLock lock1(&mu1); // Protect PTR. 7047 *PTR = 1; 7048} 7049 7050void GoodWriter2() { 7051 MutexLock lock2(&mu2); // This lock is unrelated to PTR. 7052 MutexLock lock1(&mu1); // Protect PTR. 7053 *PTR = 2; 7054} 7055 7056void GoodReader() { 7057 MutexLock lock1(&mu1); // Protect PTR. 7058 CHECK(*PTR >= 0); 7059} 7060 7061void BuggyWriter() { 7062 MutexLock lock2(&mu2); // Wrong mutex! 7063 *PTR = 3; 7064} 7065 7066// Some functions to make the stack trace non-trivial. 7067void DoWrite1() { GoodWriter1(); } 7068void Thread1() { DoWrite1(); } 7069 7070void DoWrite2() { GoodWriter2(); } 7071void Thread2() { DoWrite2(); } 7072 7073void DoGoodRead() { GoodReader(); } 7074void Thread3() { DoGoodRead(); } 7075 7076void DoBadWrite() { BuggyWriter(); } 7077void Thread4() { DoBadWrite(); } 7078 7079void Run() { 7080 printf("test311: simple race.\n"); 7081 PTR = new int; 7082 ANNOTATE_TRACE_MEMORY(PTR); 7083 *PTR = 0; 7084 MyThread t1(Thread1, NULL, "good writer1"), 7085 t2(Thread2, NULL, "good writer2"), 7086 t3(Thread3, NULL, "good reader"), 7087 t4(Thread4, NULL, "buggy writer"); 7088 t1.Start(); 7089 t3.Start(); 7090 // t2 goes after t3. This way a pure happens-before detector has no chance. 7091 usleep(10000); 7092 t2.Start(); 7093 usleep(100000); // Let the good folks go first. 7094 t4.Start(); 7095 7096 t1.Join(); 7097 t2.Join(); 7098 t3.Join(); 7099 t4.Join(); 7100} 7101REGISTER_TEST2(Run, 311, RACE_DEMO) 7102} // namespace test311 7103 7104// test312: A test with a very deep stack. {{{1 7105namespace test312 { 7106int GLOB = 0; 7107void RaceyWrite() { GLOB++; } 7108void Func1() { RaceyWrite(); } 7109void Func2() { Func1(); } 7110void Func3() { Func2(); } 7111void Func4() { Func3(); } 7112void Func5() { Func4(); } 7113void Func6() { Func5(); } 7114void Func7() { Func6(); } 7115void Func8() { Func7(); } 7116void Func9() { Func8(); } 7117void Func10() { Func9(); } 7118void Func11() { Func10(); } 7119void Func12() { Func11(); } 7120void Func13() { Func12(); } 7121void Func14() { Func13(); } 7122void Func15() { Func14(); } 7123void Func16() { Func15(); } 7124void Func17() { Func16(); } 7125void Func18() { Func17(); } 7126void Func19() { Func18(); } 7127void Worker() { Func19(); } 7128void Run() { 7129 printf("test312: simple race with deep stack.\n"); 7130 MyThreadArray t(Worker, Worker, Worker); 7131 t.Start(); 7132 t.Join(); 7133} 7134REGISTER_TEST2(Run, 312, RACE_DEMO) 7135} // namespace test312 7136 7137// test313 TP: test for thread graph output {{{1 7138namespace test313 { 7139BlockingCounter *blocking_counter; 7140int GLOB = 0; 7141 7142// Worker(N) will do 2^N increments of GLOB, each increment in a separate thread 7143void Worker(int depth) { 7144 CHECK(depth >= 0); 7145 if (depth > 0) { 7146 ThreadPool pool(2); 7147 pool.StartWorkers(); 7148 pool.Add(NewCallback(Worker, depth-1)); 7149 pool.Add(NewCallback(Worker, depth-1)); 7150 } else { 7151 GLOB++; // Race here 7152 } 7153} 7154void Run() { 7155 printf("test313: positive\n"); 7156 Worker(4); 7157 printf("\tGLOB=%d\n", GLOB); 7158} 7159REGISTER_TEST2(Run, 313, RACE_DEMO) 7160} // namespace test313 7161 7162 7163 7164// test400: Demo of a simple false positive. {{{1 7165namespace test400 { 7166static Mutex mu; 7167static vector<int> *vec; // GUARDED_BY(mu); 7168 7169void InitAllBeforeStartingThreads() { 7170 vec = new vector<int>; 7171 vec->push_back(1); 7172 vec->push_back(2); 7173} 7174 7175void Thread1() { 7176 MutexLock lock(&mu); 7177 vec->pop_back(); 7178} 7179 7180void Thread2() { 7181 MutexLock lock(&mu); 7182 vec->pop_back(); 7183} 7184 7185//---- Sub-optimal code --------- 7186size_t NumberOfElementsLeft() { 7187 MutexLock lock(&mu); 7188 return vec->size(); 7189} 7190 7191void WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly() { 7192 while(NumberOfElementsLeft()) { 7193 ; // sleep or print or do nothing. 7194 } 7195 // It is now safe to access vec w/o lock. 7196 // But a hybrid detector (like ThreadSanitizer) can't see it. 7197 // Solutions: 7198 // 1. Use pure happens-before detector (e.g. "tsan --pure-happens-before") 7199 // 2. Call ANNOTATE_MUTEX_IS_USED_AS_CONDVAR(&mu) 7200 // in InitAllBeforeStartingThreads() 7201 // 3. (preferred) Use WaitForAllThreadsToFinish_Good() (see below). 7202 CHECK(vec->empty()); 7203 delete vec; 7204} 7205 7206//----- Better code ----------- 7207 7208bool NoElementsLeft(vector<int> *v) { 7209 return v->empty(); 7210} 7211 7212void WaitForAllThreadsToFinish_Good() { 7213 mu.LockWhen(Condition(NoElementsLeft, vec)); 7214 mu.Unlock(); 7215 7216 // It is now safe to access vec w/o lock. 7217 CHECK(vec->empty()); 7218 delete vec; 7219} 7220 7221 7222void Run() { 7223 MyThreadArray t(Thread1, Thread2); 7224 InitAllBeforeStartingThreads(); 7225 t.Start(); 7226 WaitForAllThreadsToFinish_InefficientAndTsanUnfriendly(); 7227// WaitForAllThreadsToFinish_Good(); 7228 t.Join(); 7229} 7230REGISTER_TEST2(Run, 400, RACE_DEMO) 7231} // namespace test400 7232 7233// test401: Demo of false positive caused by reference counting. {{{1 7234namespace test401 { 7235// A simplified example of reference counting. 7236// DecRef() does ref count increment in a way unfriendly to race detectors. 7237// DecRefAnnotated() does the same in a friendly way. 7238 7239static vector<int> *vec; 7240static int ref_count; 7241 7242void InitAllBeforeStartingThreads(int number_of_threads) { 7243 vec = new vector<int>; 7244 vec->push_back(1); 7245 ref_count = number_of_threads; 7246} 7247 7248// Correct, but unfriendly to race detectors. 7249int DecRef() { 7250 return AtomicIncrement(&ref_count, -1); 7251} 7252 7253// Correct and friendly to race detectors. 7254int DecRefAnnotated() { 7255 ANNOTATE_CONDVAR_SIGNAL(&ref_count); 7256 int res = AtomicIncrement(&ref_count, -1); 7257 if (res == 0) { 7258 ANNOTATE_CONDVAR_WAIT(&ref_count); 7259 } 7260 return res; 7261} 7262 7263void ThreadWorker() { 7264 CHECK(ref_count > 0); 7265 CHECK(vec->size() == 1); 7266 if (DecRef() == 0) { // Use DecRefAnnotated() instead! 7267 // No one uses vec now ==> delete it. 7268 delete vec; // A false race may be reported here. 7269 vec = NULL; 7270 } 7271} 7272 7273void Run() { 7274 MyThreadArray t(ThreadWorker, ThreadWorker, ThreadWorker); 7275 InitAllBeforeStartingThreads(3 /*number of threads*/); 7276 t.Start(); 7277 t.Join(); 7278 CHECK(vec == 0); 7279} 7280REGISTER_TEST2(Run, 401, RACE_DEMO) 7281} // namespace test401 7282 7283// test501: Manually call PRINT_* annotations {{{1 7284namespace test501 { 7285int COUNTER = 0; 7286int GLOB = 0; 7287Mutex muCounter, muGlob[65]; 7288 7289void Worker() { 7290 muCounter.Lock(); 7291 int myId = ++COUNTER; 7292 muCounter.Unlock(); 7293 7294 usleep(100); 7295 7296 muGlob[myId].Lock(); 7297 muGlob[0].Lock(); 7298 GLOB++; 7299 muGlob[0].Unlock(); 7300 muGlob[myId].Unlock(); 7301} 7302 7303void Worker_1() { 7304 MyThreadArray ta (Worker, Worker, Worker, Worker); 7305 ta.Start(); 7306 usleep(500000); 7307 ta.Join (); 7308} 7309 7310void Worker_2() { 7311 MyThreadArray ta (Worker_1, Worker_1, Worker_1, Worker_1); 7312 ta.Start(); 7313 usleep(300000); 7314 ta.Join (); 7315} 7316 7317void Run() { 7318 ANNOTATE_RESET_STATS(); 7319 printf("test501: Manually call PRINT_* annotations.\n"); 7320 MyThreadArray ta (Worker_2, Worker_2, Worker_2, Worker_2); 7321 ta.Start(); 7322 usleep(100000); 7323 ta.Join (); 7324 ANNOTATE_PRINT_MEMORY_USAGE(0); 7325 ANNOTATE_PRINT_STATS(); 7326} 7327 7328REGISTER_TEST2(Run, 501, FEATURE | EXCLUDE_FROM_ALL) 7329} // namespace test501 7330 7331// test502: produce lots of segments without cross-thread relations {{{1 7332namespace test502 { 7333 7334/* 7335 * This test produces ~1Gb of memory usage when run with the following options: 7336 * 7337 * --tool=helgrind 7338 * --trace-after-race=0 7339 * --num-callers=2 7340 * --more-context=no 7341 */ 7342 7343Mutex MU; 7344int GLOB = 0; 7345 7346void TP() { 7347 for (int i = 0; i < 750000; i++) { 7348 MU.Lock(); 7349 GLOB++; 7350 MU.Unlock(); 7351 } 7352} 7353 7354void Run() { 7355 MyThreadArray t(TP, TP); 7356 printf("test502: produce lots of segments without cross-thread relations\n"); 7357 7358 t.Start(); 7359 t.Join(); 7360} 7361 7362REGISTER_TEST2(Run, 502, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL 7363 | PERFORMANCE) 7364} // namespace test502 7365 7366// test503: produce lots of segments with simple HB-relations {{{1 7367// HB cache-miss rate is ~55% 7368namespace test503 { 7369 7370// |- | | | | | 7371// | \| | | | | 7372// | |- | | | | 7373// | | \| | | | 7374// | | |- | | | 7375// | | | \| | | 7376// | | | |- | | 7377// | | | | \| | 7378// | | | | |- | 7379// | | | | | \| 7380// | | | | | |---- 7381//->| | | | | | 7382// |- | | | | | 7383// | \| | | | | 7384// ... 7385 7386const int N_threads = 32; 7387const int ARRAY_SIZE = 128; 7388int GLOB[ARRAY_SIZE]; 7389ProducerConsumerQueue *Q[N_threads]; 7390int GLOB_limit = 100000; 7391int count = -1; 7392 7393void Worker(){ 7394 int myId = AtomicIncrement(&count, 1); 7395 7396 ProducerConsumerQueue &myQ = *Q[myId], &nextQ = *Q[(myId+1) % N_threads]; 7397 7398 // this code produces a new SS with each new segment 7399 while (myQ.Get() != NULL) { 7400 for (int i = 0; i < ARRAY_SIZE; i++) 7401 GLOB[i]++; 7402 7403 if (myId == 0 && GLOB[0] > GLOB_limit) { 7404 // Stop all threads 7405 for (int i = 0; i < N_threads; i++) 7406 Q[i]->Put(NULL); 7407 } else 7408 nextQ.Put(GLOB); 7409 } 7410} 7411 7412void Run() { 7413 printf("test503: produce lots of segments with simple HB-relations\n"); 7414 for (int i = 0; i < N_threads; i++) 7415 Q[i] = new ProducerConsumerQueue(1); 7416 Q[0]->Put(GLOB); 7417 7418 { 7419 ThreadPool pool(N_threads); 7420 pool.StartWorkers(); 7421 for (int i = 0; i < N_threads; i++) { 7422 pool.Add(NewCallback(Worker)); 7423 } 7424 } // all folks are joined here. 7425 7426 for (int i = 0; i < N_threads; i++) 7427 delete Q[i]; 7428} 7429 7430REGISTER_TEST2(Run, 503, MEMORY_USAGE | PRINT_STATS 7431 | PERFORMANCE | EXCLUDE_FROM_ALL) 7432} // namespace test503 7433 7434// test504: force massive cache fetch-wback (50% misses, mostly CacheLineZ) {{{1 7435namespace test504 { 7436 7437const int N_THREADS = 2, 7438 HG_CACHELINE_COUNT = 1 << 16, 7439 HG_CACHELINE_SIZE = 1 << 6, 7440 HG_CACHE_SIZE = HG_CACHELINE_COUNT * HG_CACHELINE_SIZE; 7441 7442// int gives us ~4x speed of the byte test 7443// 4x array size gives us 7444// total multiplier of 16x over the cachesize 7445// so we can neglect the cached-at-the-end memory 7446const int ARRAY_SIZE = 4 * HG_CACHE_SIZE, 7447 ITERATIONS = 30; 7448int array[ARRAY_SIZE]; 7449 7450int count = 0; 7451Mutex count_mu; 7452 7453void Worker() { 7454 count_mu.Lock(); 7455 int myId = ++count; 7456 count_mu.Unlock(); 7457 7458 // all threads write to different memory locations, 7459 // so no synchronization mechanisms are needed 7460 int lower_bound = ARRAY_SIZE * (myId-1) / N_THREADS, 7461 upper_bound = ARRAY_SIZE * ( myId ) / N_THREADS; 7462 for (int j = 0; j < ITERATIONS; j++) 7463 for (int i = lower_bound; i < upper_bound; 7464 i += HG_CACHELINE_SIZE / sizeof(array[0])) { 7465 array[i] = i; // each array-write generates a cache miss 7466 } 7467} 7468 7469void Run() { 7470 printf("test504: force massive CacheLineZ fetch-wback\n"); 7471 MyThreadArray t(Worker, Worker); 7472 t.Start(); 7473 t.Join(); 7474} 7475 7476REGISTER_TEST2(Run, 504, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL) 7477} // namespace test504 7478 7479// test505: force massive cache fetch-wback (60% misses) {{{1 7480// modification of test504 - more threads, byte accesses and lots of mutexes 7481// so it produces lots of CacheLineF misses (30-50% of CacheLineZ misses) 7482namespace test505 { 7483 7484const int N_THREADS = 2, 7485 HG_CACHELINE_COUNT = 1 << 16, 7486 HG_CACHELINE_SIZE = 1 << 6, 7487 HG_CACHE_SIZE = HG_CACHELINE_COUNT * HG_CACHELINE_SIZE; 7488 7489const int ARRAY_SIZE = 4 * HG_CACHE_SIZE, 7490 ITERATIONS = 3; 7491int64_t array[ARRAY_SIZE]; 7492 7493int count = 0; 7494Mutex count_mu; 7495 7496void Worker() { 7497 const int N_MUTEXES = 5; 7498 Mutex mu[N_MUTEXES]; 7499 count_mu.Lock(); 7500 int myId = ++count; 7501 count_mu.Unlock(); 7502 7503 // all threads write to different memory locations, 7504 // so no synchronization mechanisms are needed 7505 int lower_bound = ARRAY_SIZE * (myId-1) / N_THREADS, 7506 upper_bound = ARRAY_SIZE * ( myId ) / N_THREADS; 7507 for (int j = 0; j < ITERATIONS; j++) 7508 for (int mutex_id = 0; mutex_id < N_MUTEXES; mutex_id++) { 7509 Mutex *m = & mu[mutex_id]; 7510 m->Lock(); 7511 for (int i = lower_bound + mutex_id, cnt = 0; 7512 i < upper_bound; 7513 i += HG_CACHELINE_SIZE / sizeof(array[0]), cnt++) { 7514 array[i] = i; // each array-write generates a cache miss 7515 } 7516 m->Unlock(); 7517 } 7518} 7519 7520void Run() { 7521 printf("test505: force massive CacheLineF fetch-wback\n"); 7522 MyThreadArray t(Worker, Worker); 7523 t.Start(); 7524 t.Join(); 7525} 7526 7527REGISTER_TEST2(Run, 505, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL) 7528} // namespace test505 7529 7530// test506: massive HB's using Barriers {{{1 7531// HB cache miss is ~40% 7532// segments consume 10x more memory than SSs 7533// modification of test39 7534namespace test506 { 7535#ifndef NO_BARRIER 7536// Same as test17 but uses Barrier class (pthread_barrier_t). 7537int GLOB = 0; 7538const int N_threads = 64, 7539 ITERATIONS = 1000; 7540Barrier *barrier[ITERATIONS]; 7541Mutex MU; 7542 7543void Worker() { 7544 for (int i = 0; i < ITERATIONS; i++) { 7545 MU.Lock(); 7546 GLOB++; 7547 MU.Unlock(); 7548 barrier[i]->Block(); 7549 } 7550} 7551void Run() { 7552 printf("test506: massive HB's using Barriers\n"); 7553 for (int i = 0; i < ITERATIONS; i++) { 7554 barrier[i] = new Barrier(N_threads); 7555 } 7556 { 7557 ThreadPool pool(N_threads); 7558 pool.StartWorkers(); 7559 for (int i = 0; i < N_threads; i++) { 7560 pool.Add(NewCallback(Worker)); 7561 } 7562 } // all folks are joined here. 7563 CHECK(GLOB == N_threads * ITERATIONS); 7564 for (int i = 0; i < ITERATIONS; i++) { 7565 delete barrier[i]; 7566 } 7567} 7568REGISTER_TEST2(Run, 506, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL); 7569#endif // NO_BARRIER 7570} // namespace test506 7571 7572// test507: vgHelgrind_initIterAtFM/stackClear benchmark {{{1 7573// vgHelgrind_initIterAtFM/stackClear consume ~8.5%/5.5% CPU 7574namespace test507 { 7575const int N_THREADS = 1, 7576 BUFFER_SIZE = 1, 7577 ITERATIONS = 1 << 20; 7578 7579void Foo() { 7580 struct T { 7581 char temp; 7582 T() { 7583 ANNOTATE_RWLOCK_CREATE(&temp); 7584 } 7585 ~T() { 7586 ANNOTATE_RWLOCK_DESTROY(&temp); 7587 } 7588 } s[BUFFER_SIZE]; 7589 s->temp = '\0'; 7590} 7591 7592void Worker() { 7593 for (int j = 0; j < ITERATIONS; j++) { 7594 Foo(); 7595 } 7596} 7597 7598void Run() { 7599 printf("test507: vgHelgrind_initIterAtFM/stackClear benchmark\n"); 7600 { 7601 ThreadPool pool(N_THREADS); 7602 pool.StartWorkers(); 7603 for (int i = 0; i < N_THREADS; i++) { 7604 pool.Add(NewCallback(Worker)); 7605 } 7606 } // all folks are joined here. 7607} 7608REGISTER_TEST2(Run, 507, EXCLUDE_FROM_ALL); 7609} // namespace test507 7610 7611// test508: cmp_WordVecs_for_FM benchmark {{{1 7612// 50+% of CPU consumption by cmp_WordVecs_for_FM 7613namespace test508 { 7614const int N_THREADS = 1, 7615 BUFFER_SIZE = 1 << 10, 7616 ITERATIONS = 1 << 9; 7617 7618void Foo() { 7619 struct T { 7620 char temp; 7621 T() { 7622 ANNOTATE_RWLOCK_CREATE(&temp); 7623 } 7624 ~T() { 7625 ANNOTATE_RWLOCK_DESTROY(&temp); 7626 } 7627 } s[BUFFER_SIZE]; 7628 s->temp = '\0'; 7629} 7630 7631void Worker() { 7632 for (int j = 0; j < ITERATIONS; j++) { 7633 Foo(); 7634 } 7635} 7636 7637void Run() { 7638 printf("test508: cmp_WordVecs_for_FM benchmark\n"); 7639 { 7640 ThreadPool pool(N_THREADS); 7641 pool.StartWorkers(); 7642 for (int i = 0; i < N_THREADS; i++) { 7643 pool.Add(NewCallback(Worker)); 7644 } 7645 } // all folks are joined here. 7646} 7647REGISTER_TEST2(Run, 508, EXCLUDE_FROM_ALL); 7648} // namespace test508 7649 7650// test509: avl_find_node benchmark {{{1 7651// 10+% of CPU consumption by avl_find_node 7652namespace test509 { 7653const int N_THREADS = 16, 7654 ITERATIONS = 1 << 8; 7655 7656void Worker() { 7657 std::vector<Mutex*> mu_list; 7658 for (int i = 0; i < ITERATIONS; i++) { 7659 Mutex * mu = new Mutex(); 7660 mu_list.push_back(mu); 7661 mu->Lock(); 7662 } 7663 for (int i = ITERATIONS - 1; i >= 0; i--) { 7664 Mutex * mu = mu_list[i]; 7665 mu->Unlock(); 7666 delete mu; 7667 } 7668} 7669 7670void Run() { 7671 printf("test509: avl_find_node benchmark\n"); 7672 { 7673 ThreadPool pool(N_THREADS); 7674 pool.StartWorkers(); 7675 for (int i = 0; i < N_THREADS; i++) { 7676 pool.Add(NewCallback(Worker)); 7677 } 7678 } // all folks are joined here. 7679} 7680REGISTER_TEST2(Run, 509, EXCLUDE_FROM_ALL); 7681} // namespace test509 7682 7683// test510: SS-recycle test {{{1 7684// this tests shows the case where only ~1% of SS are recycled 7685namespace test510 { 7686const int N_THREADS = 16, 7687 ITERATIONS = 1 << 10; 7688int GLOB = 0; 7689 7690void Worker() { 7691 usleep(100000); 7692 for (int i = 0; i < ITERATIONS; i++) { 7693 ANNOTATE_CONDVAR_SIGNAL((void*)0xDeadBeef); 7694 GLOB++; 7695 usleep(10); 7696 } 7697} 7698 7699void Run() { 7700 //ANNOTATE_BENIGN_RACE(&GLOB, "Test"); 7701 printf("test510: SS-recycle test\n"); 7702 { 7703 ThreadPool pool(N_THREADS); 7704 pool.StartWorkers(); 7705 for (int i = 0; i < N_THREADS; i++) { 7706 pool.Add(NewCallback(Worker)); 7707 } 7708 } // all folks are joined here. 7709} 7710REGISTER_TEST2(Run, 510, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL); 7711} // namespace test510 7712 7713// test511: Segment refcounting test ('1' refcounting) {{{1 7714namespace test511 { 7715int GLOB = 0; 7716 7717void Run () { 7718 for (int i = 0; i < 300; i++) { 7719 ANNOTATE_CONDVAR_SIGNAL(&GLOB); 7720 usleep(1000); 7721 GLOB++; 7722 ANNOTATE_CONDVAR_WAIT(&GLOB); 7723 if (i % 100 == 0) 7724 ANNOTATE_PRINT_MEMORY_USAGE(0); 7725 } 7726} 7727REGISTER_TEST2(Run, 511, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL); 7728} // namespace test511 7729 7730// test512: Segment refcounting test ('S' refcounting) {{{1 7731namespace test512 { 7732int GLOB = 0; 7733sem_t SEM; 7734 7735void Run () { 7736 sem_init(&SEM, 0, 0); 7737 for (int i = 0; i < 300; i++) { 7738 sem_post(&SEM); 7739 usleep(1000); 7740 GLOB++; 7741 sem_wait(&SEM); 7742 /*if (i % 100 == 0) 7743 ANNOTATE_PRINT_MEMORY_USAGE(0);*/ 7744 } 7745 sem_destroy(&SEM); 7746} 7747REGISTER_TEST2(Run, 512, MEMORY_USAGE | PRINT_STATS | EXCLUDE_FROM_ALL); 7748} // namespace test512 7749 7750// test513: --fast-mode benchmark {{{1 7751namespace test513 { 7752 7753const int N_THREADS = 2, 7754 HG_CACHELINE_SIZE = 1 << 6, 7755 ARRAY_SIZE = HG_CACHELINE_SIZE * 512, 7756 MUTEX_ID_BITS = 8, 7757 MUTEX_ID_MASK = (1 << MUTEX_ID_BITS) - 1; 7758 7759// Each thread has its own cacheline and tackles with it intensively 7760const int ITERATIONS = 1024; 7761int array[N_THREADS][ARRAY_SIZE]; 7762 7763int count = 0; 7764Mutex count_mu; 7765Mutex mutex_arr[N_THREADS][MUTEX_ID_BITS]; 7766 7767void Worker() { 7768 count_mu.Lock(); 7769 int myId = count++; 7770 count_mu.Unlock(); 7771 7772 // all threads write to different memory locations 7773 for (int j = 0; j < ITERATIONS; j++) { 7774 int mutex_mask = j & MUTEX_ID_BITS; 7775 for (int m = 0; m < MUTEX_ID_BITS; m++) 7776 if (mutex_mask & (1 << m)) 7777 mutex_arr[myId][m].Lock(); 7778 7779 for (int i = 0; i < ARRAY_SIZE; i++) { 7780 array[myId][i] = i; 7781 } 7782 7783 for (int m = 0; m < MUTEX_ID_BITS; m++) 7784 if (mutex_mask & (1 << m)) 7785 mutex_arr[myId][m].Unlock(); 7786 } 7787} 7788 7789void Run() { 7790 printf("test513: --fast-mode benchmark\n"); 7791 { 7792 ThreadPool pool(N_THREADS); 7793 pool.StartWorkers(); 7794 for (int i = 0; i < N_THREADS; i++) { 7795 pool.Add(NewCallback(Worker)); 7796 } 7797 } // all folks are joined here. 7798} 7799 7800REGISTER_TEST2(Run, 513, PERFORMANCE | PRINT_STATS | EXCLUDE_FROM_ALL) 7801} // namespace test513 7802 7803// End {{{1 7804// vim:shiftwidth=2:softtabstop=2:expandtab:foldmethod=marker 7805