drd_load_store.c revision 1081fe2314bc72160d8ff503f11a65905cd4e4df
1/* 2 This file is part of drd, a thread error detector. 3 4 Copyright (C) 2006-2011 Bart Van Assche <bvanassche@acm.org>. 5 6 This program is free software; you can redistribute it and/or 7 modify it under the terms of the GNU General Public License as 8 published by the Free Software Foundation; either version 2 of the 9 License, or (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, but 12 WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 19 02111-1307, USA. 20 21 The GNU General Public License is contained in the file COPYING. 22*/ 23 24 25#include "drd_bitmap.h" 26#include "drd_thread_bitmap.h" 27#include "drd_vc.h" /* DRD_(vc_snprint)() */ 28 29/* Include several source files here in order to allow the compiler to */ 30/* do more inlining. */ 31#include "drd_bitmap.c" 32#include "drd_load_store.h" 33#include "drd_segment.c" 34#include "drd_thread.c" 35#include "drd_vc.c" 36#include "libvex_guest_offsets.h" 37 38 39/* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */ 40#if defined(VGA_x86) 41#define STACK_POINTER_OFFSET OFFSET_x86_ESP 42#elif defined(VGA_amd64) 43#define STACK_POINTER_OFFSET OFFSET_amd64_RSP 44#elif defined(VGA_ppc32) 45#define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1 46#elif defined(VGA_ppc64) 47#define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1 48#elif defined(VGA_arm) 49#define STACK_POINTER_OFFSET OFFSET_arm_R13 50#elif defined(VGA_s390x) 51#define STACK_POINTER_OFFSET OFFSET_s390x_r15 52#else 53#error Unknown architecture. 54#endif 55 56 57/* Local variables. */ 58 59static Bool s_check_stack_accesses = False; 60static Bool s_first_race_only = False; 61 62 63/* Function definitions. */ 64 65Bool DRD_(get_check_stack_accesses)() 66{ 67 return s_check_stack_accesses; 68} 69 70void DRD_(set_check_stack_accesses)(const Bool c) 71{ 72 tl_assert(c == False || c == True); 73 s_check_stack_accesses = c; 74} 75 76Bool DRD_(get_first_race_only)() 77{ 78 return s_first_race_only; 79} 80 81void DRD_(set_first_race_only)(const Bool fro) 82{ 83 tl_assert(fro == False || fro == True); 84 s_first_race_only = fro; 85} 86 87void DRD_(trace_mem_access)(const Addr addr, const SizeT size, 88 const BmAccessTypeT access_type) 89{ 90 if (DRD_(is_any_traced)(addr, addr + size)) 91 { 92 char* vc; 93 94 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(DRD_(thread_get_running_tid)())); 95 DRD_(trace_msg_w_bt)("%s 0x%lx size %ld (thread %d / vc %s)", 96 access_type == eLoad ? "load " 97 : access_type == eStore ? "store" 98 : access_type == eStart ? "start" 99 : access_type == eEnd ? "end " : "????", 100 addr, size, DRD_(thread_get_running_tid)(), vc); 101 VG_(free)(vc); 102 tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)()) 103 == VG_(get_running_tid)()); 104 } 105} 106 107static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr, const SizeT size) 108{ 109 return DRD_(trace_mem_access)(addr, size, eLoad); 110} 111 112static VG_REGPARM(2) void drd_trace_mem_store(const Addr addr,const SizeT size) 113{ 114 return DRD_(trace_mem_access)(addr, size, eStore); 115} 116 117static void drd_report_race(const Addr addr, const SizeT size, 118 const BmAccessTypeT access_type) 119{ 120 DataRaceErrInfo drei; 121 122 drei.tid = DRD_(thread_get_running_tid)(); 123 drei.addr = addr; 124 drei.size = size; 125 drei.access_type = access_type; 126 VG_(maybe_record_error)(VG_(get_running_tid)(), 127 DataRaceErr, 128 VG_(get_IP)(VG_(get_running_tid)()), 129 "Conflicting access", 130 &drei); 131 132 if (s_first_race_only) 133 { 134 DRD_(start_suppression)(addr, addr + size, "first race only"); 135 } 136} 137 138VG_REGPARM(2) void DRD_(trace_load)(Addr addr, SizeT size) 139{ 140#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 141 /* The assert below has been commented out because of performance reasons.*/ 142 tl_assert(DRD_(thread_get_running_tid)() 143 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 144#endif 145 146 if (DRD_(running_thread_is_recording_loads)() 147 && (s_check_stack_accesses 148 || ! DRD_(thread_address_on_stack)(addr)) 149 && bm_access_load_triggers_conflict(addr, addr + size) 150 && ! DRD_(is_suppressed)(addr, addr + size)) 151 { 152 drd_report_race(addr, size, eLoad); 153 } 154} 155 156static VG_REGPARM(1) void drd_trace_load_1(Addr addr) 157{ 158 if (DRD_(running_thread_is_recording_loads)() 159 && (s_check_stack_accesses 160 || ! DRD_(thread_address_on_stack)(addr)) 161 && bm_access_load_1_triggers_conflict(addr) 162 && ! DRD_(is_suppressed)(addr, addr + 1)) 163 { 164 drd_report_race(addr, 1, eLoad); 165 } 166} 167 168static VG_REGPARM(1) void drd_trace_load_2(Addr addr) 169{ 170 if (DRD_(running_thread_is_recording_loads)() 171 && (s_check_stack_accesses 172 || ! DRD_(thread_address_on_stack)(addr)) 173 && bm_access_load_2_triggers_conflict(addr) 174 && ! DRD_(is_suppressed)(addr, addr + 2)) 175 { 176 drd_report_race(addr, 2, eLoad); 177 } 178} 179 180static VG_REGPARM(1) void drd_trace_load_4(Addr addr) 181{ 182 if (DRD_(running_thread_is_recording_loads)() 183 && (s_check_stack_accesses 184 || ! DRD_(thread_address_on_stack)(addr)) 185 && bm_access_load_4_triggers_conflict(addr) 186 && ! DRD_(is_suppressed)(addr, addr + 4)) 187 { 188 drd_report_race(addr, 4, eLoad); 189 } 190} 191 192static VG_REGPARM(1) void drd_trace_load_8(Addr addr) 193{ 194 if (DRD_(running_thread_is_recording_loads)() 195 && (s_check_stack_accesses 196 || ! DRD_(thread_address_on_stack)(addr)) 197 && bm_access_load_8_triggers_conflict(addr) 198 && ! DRD_(is_suppressed)(addr, addr + 8)) 199 { 200 drd_report_race(addr, 8, eLoad); 201 } 202} 203 204VG_REGPARM(2) void DRD_(trace_store)(Addr addr, SizeT size) 205{ 206#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 207 /* The assert below has been commented out because of performance reasons.*/ 208 tl_assert(DRD_(thread_get_running_tid)() 209 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 210#endif 211 212 if (DRD_(running_thread_is_recording_stores)() 213 && (s_check_stack_accesses 214 || ! DRD_(thread_address_on_stack)(addr)) 215 && bm_access_store_triggers_conflict(addr, addr + size) 216 && ! DRD_(is_suppressed)(addr, addr + size)) 217 { 218 drd_report_race(addr, size, eStore); 219 } 220} 221 222static VG_REGPARM(1) void drd_trace_store_1(Addr addr) 223{ 224 if (DRD_(running_thread_is_recording_stores)() 225 && (s_check_stack_accesses 226 || ! DRD_(thread_address_on_stack)(addr)) 227 && bm_access_store_1_triggers_conflict(addr) 228 && ! DRD_(is_suppressed)(addr, addr + 1)) 229 { 230 drd_report_race(addr, 1, eStore); 231 } 232} 233 234static VG_REGPARM(1) void drd_trace_store_2(Addr addr) 235{ 236 if (DRD_(running_thread_is_recording_stores)() 237 && (s_check_stack_accesses 238 || ! DRD_(thread_address_on_stack)(addr)) 239 && bm_access_store_2_triggers_conflict(addr) 240 && ! DRD_(is_suppressed)(addr, addr + 2)) 241 { 242 drd_report_race(addr, 2, eStore); 243 } 244} 245 246static VG_REGPARM(1) void drd_trace_store_4(Addr addr) 247{ 248 if (DRD_(running_thread_is_recording_stores)() 249 && (s_check_stack_accesses 250 || ! DRD_(thread_address_on_stack)(addr)) 251 && bm_access_store_4_triggers_conflict(addr) 252 && ! DRD_(is_suppressed)(addr, addr + 4)) 253 { 254 drd_report_race(addr, 4, eStore); 255 } 256} 257 258static VG_REGPARM(1) void drd_trace_store_8(Addr addr) 259{ 260 if (DRD_(running_thread_is_recording_stores)() 261 && (s_check_stack_accesses 262 || ! DRD_(thread_address_on_stack)(addr)) 263 && bm_access_store_8_triggers_conflict(addr) 264 && ! DRD_(is_suppressed)(addr, addr + 8)) 265 { 266 drd_report_race(addr, 8, eStore); 267 } 268} 269 270/** 271 * Return true if and only if addr_expr matches the pattern (SP) or 272 * <offset>(SP). 273 */ 274static Bool is_stack_access(IRSB* const bb, IRExpr* const addr_expr) 275{ 276 Bool result = False; 277 278 if (addr_expr->tag == Iex_RdTmp) 279 { 280 int i; 281 for (i = 0; i < bb->stmts_size; i++) 282 { 283 if (bb->stmts[i] 284 && bb->stmts[i]->tag == Ist_WrTmp 285 && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp) 286 { 287 IRExpr* e = bb->stmts[i]->Ist.WrTmp.data; 288 if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET) 289 { 290 result = True; 291 } 292 293 //ppIRExpr(e); 294 //VG_(printf)(" (%s)\n", result ? "True" : "False"); 295 break; 296 } 297 } 298 } 299 return result; 300} 301 302static void instrument_load(IRSB* const bb, 303 IRExpr* const addr_expr, 304 const HWord size) 305{ 306 IRExpr* size_expr; 307 IRExpr** argv; 308 IRDirty* di; 309 310 if (UNLIKELY(DRD_(any_address_is_traced)())) 311 { 312 addStmtToIRSB(bb, 313 IRStmt_Dirty( 314 unsafeIRDirty_0_N(/*regparms*/2, 315 "drd_trace_load", 316 VG_(fnptr_to_fnentry) 317 (drd_trace_mem_load), 318 mkIRExprVec_2(addr_expr, 319 mkIRExpr_HWord(size))))); 320 } 321 322 if (! s_check_stack_accesses && is_stack_access(bb, addr_expr)) 323 return; 324 325 switch (size) 326 { 327 case 1: 328 argv = mkIRExprVec_1(addr_expr); 329 di = unsafeIRDirty_0_N(/*regparms*/1, 330 "drd_trace_load_1", 331 VG_(fnptr_to_fnentry)(drd_trace_load_1), 332 argv); 333 break; 334 case 2: 335 argv = mkIRExprVec_1(addr_expr); 336 di = unsafeIRDirty_0_N(/*regparms*/1, 337 "drd_trace_load_2", 338 VG_(fnptr_to_fnentry)(drd_trace_load_2), 339 argv); 340 break; 341 case 4: 342 argv = mkIRExprVec_1(addr_expr); 343 di = unsafeIRDirty_0_N(/*regparms*/1, 344 "drd_trace_load_4", 345 VG_(fnptr_to_fnentry)(drd_trace_load_4), 346 argv); 347 break; 348 case 8: 349 argv = mkIRExprVec_1(addr_expr); 350 di = unsafeIRDirty_0_N(/*regparms*/1, 351 "drd_trace_load_8", 352 VG_(fnptr_to_fnentry)(drd_trace_load_8), 353 argv); 354 break; 355 default: 356 size_expr = mkIRExpr_HWord(size); 357 argv = mkIRExprVec_2(addr_expr, size_expr); 358 di = unsafeIRDirty_0_N(/*regparms*/2, 359 "drd_trace_load", 360 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 361 argv); 362 break; 363 } 364 addStmtToIRSB(bb, IRStmt_Dirty(di)); 365} 366 367static void instrument_store(IRSB* const bb, 368 IRExpr* const addr_expr, 369 const HWord size) 370{ 371 IRExpr* size_expr; 372 IRExpr** argv; 373 IRDirty* di; 374 375 if (UNLIKELY(DRD_(any_address_is_traced)())) 376 { 377 addStmtToIRSB(bb, 378 IRStmt_Dirty( 379 unsafeIRDirty_0_N(/*regparms*/2, 380 "drd_trace_store", 381 VG_(fnptr_to_fnentry) 382 (drd_trace_mem_store), 383 mkIRExprVec_2(addr_expr, 384 mkIRExpr_HWord(size))))); 385 } 386 387 if (! s_check_stack_accesses && is_stack_access(bb, addr_expr)) 388 return; 389 390 switch (size) 391 { 392 case 1: 393 argv = mkIRExprVec_1(addr_expr); 394 di = unsafeIRDirty_0_N(/*regparms*/1, 395 "drd_trace_store_1", 396 VG_(fnptr_to_fnentry)(drd_trace_store_1), 397 argv); 398 break; 399 case 2: 400 argv = mkIRExprVec_1(addr_expr); 401 di = unsafeIRDirty_0_N(/*regparms*/1, 402 "drd_trace_store_2", 403 VG_(fnptr_to_fnentry)(drd_trace_store_2), 404 argv); 405 break; 406 case 4: 407 argv = mkIRExprVec_1(addr_expr); 408 di = unsafeIRDirty_0_N(/*regparms*/1, 409 "drd_trace_store_4", 410 VG_(fnptr_to_fnentry)(drd_trace_store_4), 411 argv); 412 break; 413 case 8: 414 argv = mkIRExprVec_1(addr_expr); 415 di = unsafeIRDirty_0_N(/*regparms*/1, 416 "drd_trace_store_8", 417 VG_(fnptr_to_fnentry)(drd_trace_store_8), 418 argv); 419 break; 420 default: 421 size_expr = mkIRExpr_HWord(size); 422 argv = mkIRExprVec_2(addr_expr, size_expr); 423 di = unsafeIRDirty_0_N(/*regparms*/2, 424 "drd_trace_store", 425 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 426 argv); 427 break; 428 } 429 addStmtToIRSB(bb, IRStmt_Dirty(di)); 430} 431 432IRSB* DRD_(instrument)(VgCallbackClosure* const closure, 433 IRSB* const bb_in, 434 VexGuestLayout* const layout, 435 VexGuestExtents* const vge, 436 IRType const gWordTy, 437 IRType const hWordTy) 438{ 439 IRDirty* di; 440 Int i; 441 IRSB* bb; 442 IRExpr** argv; 443 Bool instrument = True; 444 445 /* Set up BB */ 446 bb = emptyIRSB(); 447 bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv); 448 bb->next = deepCopyIRExpr(bb_in->next); 449 bb->jumpkind = bb_in->jumpkind; 450 451 for (i = 0; i < bb_in->stmts_used; i++) 452 { 453 IRStmt* const st = bb_in->stmts[i]; 454 tl_assert(st); 455 tl_assert(isFlatIRStmt(st)); 456 457 switch (st->tag) 458 { 459 /* Note: the code for not instrumenting the code in .plt */ 460 /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */ 461 /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */ 462 /* This is because on this platform dynamic library symbols are */ 463 /* relocated in another way than by later binutils versions. The */ 464 /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */ 465 case Ist_IMark: 466 instrument = VG_(DebugInfo_sect_kind)(NULL, 0, st->Ist.IMark.addr) 467 != Vg_SectPLT; 468 addStmtToIRSB(bb, st); 469 break; 470 471 case Ist_MBE: 472 switch (st->Ist.MBE.event) 473 { 474 case Imbe_Fence: 475 break; /* not interesting */ 476 default: 477 tl_assert(0); 478 } 479 addStmtToIRSB(bb, st); 480 break; 481 482 case Ist_Store: 483 if (instrument) 484 { 485 instrument_store(bb, 486 st->Ist.Store.addr, 487 sizeofIRType(typeOfIRExpr(bb->tyenv, 488 st->Ist.Store.data))); 489 } 490 addStmtToIRSB(bb, st); 491 break; 492 493 case Ist_WrTmp: 494 if (instrument) 495 { 496 const IRExpr* const data = st->Ist.WrTmp.data; 497 if (data->tag == Iex_Load) 498 { 499 instrument_load(bb, 500 data->Iex.Load.addr, 501 sizeofIRType(data->Iex.Load.ty)); 502 } 503 } 504 addStmtToIRSB(bb, st); 505 break; 506 507 case Ist_Dirty: 508 if (instrument) 509 { 510 IRDirty* d = st->Ist.Dirty.details; 511 IREffect const mFx = d->mFx; 512 switch (mFx) { 513 case Ifx_None: 514 break; 515 case Ifx_Read: 516 case Ifx_Write: 517 case Ifx_Modify: 518 tl_assert(d->mAddr); 519 tl_assert(d->mSize > 0); 520 argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize)); 521 if (mFx == Ifx_Read || mFx == Ifx_Modify) { 522 di = unsafeIRDirty_0_N( 523 /*regparms*/2, 524 "drd_trace_load", 525 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 526 argv); 527 addStmtToIRSB(bb, IRStmt_Dirty(di)); 528 } 529 if (mFx == Ifx_Write || mFx == Ifx_Modify) 530 { 531 di = unsafeIRDirty_0_N( 532 /*regparms*/2, 533 "drd_trace_store", 534 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 535 argv); 536 addStmtToIRSB(bb, IRStmt_Dirty(di)); 537 } 538 break; 539 default: 540 tl_assert(0); 541 } 542 } 543 addStmtToIRSB(bb, st); 544 break; 545 546 case Ist_CAS: 547 if (instrument) 548 { 549 /* 550 * Treat compare-and-swap as a read. By handling atomic 551 * instructions as read instructions no data races are reported 552 * between conflicting atomic operations nor between atomic 553 * operations and non-atomic reads. Conflicts between atomic 554 * operations and non-atomic write operations are still reported 555 * however. 556 */ 557 Int dataSize; 558 IRCAS* cas = st->Ist.CAS.details; 559 tl_assert(cas->addr != NULL); 560 tl_assert(cas->dataLo != NULL); 561 dataSize = sizeofIRType(typeOfIRExpr(bb->tyenv, cas->dataLo)); 562 if (cas->dataHi != NULL) 563 dataSize *= 2; /* since it's a doubleword-CAS */ 564 instrument_load(bb, cas->addr, dataSize); 565 } 566 addStmtToIRSB(bb, st); 567 break; 568 569 case Ist_LLSC: { 570 /* Ignore store-conditionals, and handle load-linked's 571 exactly like normal loads. */ 572 IRType dataTy; 573 if (st->Ist.LLSC.storedata == NULL) 574 { 575 /* LL */ 576 dataTy = typeOfIRTemp(bb_in->tyenv, st->Ist.LLSC.result); 577 if (instrument) { 578 instrument_load(bb, 579 st->Ist.LLSC.addr, 580 sizeofIRType(dataTy)); 581 } 582 } 583 else 584 { 585 /* SC */ 586 /*ignore */ 587 } 588 addStmtToIRSB(bb, st); 589 break; 590 } 591 592 case Ist_NoOp: 593 case Ist_AbiHint: 594 case Ist_Put: 595 case Ist_PutI: 596 case Ist_Exit: 597 /* None of these can contain any memory references. */ 598 addStmtToIRSB(bb, st); 599 break; 600 601 default: 602 ppIRStmt(st); 603 tl_assert(0); 604 } 605 } 606 607 return bb; 608} 609 610