drd_load_store.c revision b68c947950428e2fd7f5ac0ac41a4a4fa9ea4abb
1/* 2 This file is part of drd, a thread error detector. 3 4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche@acm.org>. 5 6 This program is free software; you can redistribute it and/or 7 modify it under the terms of the GNU General Public License as 8 published by the Free Software Foundation; either version 2 of the 9 License, or (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, but 12 WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 19 02111-1307, USA. 20 21 The GNU General Public License is contained in the file COPYING. 22*/ 23 24 25#include "drd_bitmap.h" 26#include "drd_thread_bitmap.h" 27#include "drd_vc.h" /* DRD_(vc_snprint)() */ 28 29/* Include several source files here in order to allow the compiler to */ 30/* do more inlining. */ 31#include "drd_bitmap.c" 32#include "drd_load_store.h" 33#include "drd_segment.c" 34#include "drd_thread.c" 35#include "drd_vc.c" 36#include "libvex_guest_offsets.h" 37 38 39/* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */ 40#if defined(VGA_x86) 41#define STACK_POINTER_OFFSET OFFSET_x86_ESP 42#elif defined(VGA_amd64) 43#define STACK_POINTER_OFFSET OFFSET_amd64_RSP 44#elif defined(VGA_ppc32) 45#define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1 46#elif defined(VGA_ppc64be) || defined(VGA_ppc64le) 47#define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1 48#elif defined(VGA_arm) 49#define STACK_POINTER_OFFSET OFFSET_arm_R13 50#elif defined(VGA_arm64) 51#define STACK_POINTER_OFFSET OFFSET_arm64_XSP 52#elif defined(VGA_s390x) 53#define STACK_POINTER_OFFSET OFFSET_s390x_r15 54#elif defined(VGA_mips32) 55#define STACK_POINTER_OFFSET OFFSET_mips32_r29 56#elif defined(VGA_mips64) 57#define STACK_POINTER_OFFSET OFFSET_mips64_r29 58#else 59#error Unknown architecture. 60#endif 61 62 63/* Local variables. */ 64 65static Bool s_check_stack_accesses = False; 66static Bool s_first_race_only = False; 67 68 69/* Function definitions. */ 70 71Bool DRD_(get_check_stack_accesses)() 72{ 73 return s_check_stack_accesses; 74} 75 76void DRD_(set_check_stack_accesses)(const Bool c) 77{ 78 tl_assert(c == False || c == True); 79 s_check_stack_accesses = c; 80} 81 82Bool DRD_(get_first_race_only)() 83{ 84 return s_first_race_only; 85} 86 87void DRD_(set_first_race_only)(const Bool fro) 88{ 89 tl_assert(fro == False || fro == True); 90 s_first_race_only = fro; 91} 92 93void DRD_(trace_mem_access)(const Addr addr, const SizeT size, 94 const BmAccessTypeT access_type, 95 const HWord stored_value_hi, 96 const HWord stored_value_lo) 97{ 98 if (DRD_(is_any_traced)(addr, addr + size)) 99 { 100 HChar* vc; 101 102 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(DRD_(thread_get_running_tid)())); 103 if (access_type == eStore && size <= sizeof(HWord)) { 104 DRD_(trace_msg_w_bt)("store 0x%lx size %ld val %ld/0x%lx (thread %d /" 105 " vc %s)", addr, size, stored_value_lo, 106 stored_value_lo, DRD_(thread_get_running_tid)(), 107 vc); 108 } else if (access_type == eStore && size > sizeof(HWord)) { 109 ULong sv; 110 111 tl_assert(sizeof(HWord) == 4); 112 sv = ((ULong)stored_value_hi << 32) | stored_value_lo; 113 DRD_(trace_msg_w_bt)("store 0x%lx size %ld val %lld/0x%llx (thread %d" 114 " / vc %s)", addr, size, sv, sv, 115 DRD_(thread_get_running_tid)(), vc); 116 } else { 117 DRD_(trace_msg_w_bt)("%s 0x%lx size %ld (thread %d / vc %s)", 118 access_type == eLoad ? "load " 119 : access_type == eStore ? "store" 120 : access_type == eStart ? "start" 121 : access_type == eEnd ? "end " : "????", 122 addr, size, DRD_(thread_get_running_tid)(), vc); 123 } 124 VG_(free)(vc); 125 tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)()) 126 == VG_(get_running_tid)()); 127 } 128} 129 130static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr, const SizeT size) 131{ 132 return DRD_(trace_mem_access)(addr, size, eLoad, 0, 0); 133} 134 135static VG_REGPARM(3) void drd_trace_mem_store(const Addr addr,const SizeT size, 136 const HWord stored_value_hi, 137 const HWord stored_value_lo) 138{ 139 return DRD_(trace_mem_access)(addr, size, eStore, stored_value_hi, 140 stored_value_lo); 141} 142 143static void drd_report_race(const Addr addr, const SizeT size, 144 const BmAccessTypeT access_type) 145{ 146 ThreadId vg_tid; 147 148 vg_tid = VG_(get_running_tid)(); 149 if (!DRD_(get_check_stack_accesses)() 150 && DRD_(thread_address_on_any_stack)(addr)) { 151#if 0 152 GenericErrInfo GEI = { 153 .tid = DRD_(thread_get_running_tid)(), 154 .addr = addr, 155 }; 156 VG_(maybe_record_error)(vg_tid, GenericErr, VG_(get_IP)(vg_tid), 157 "--check-stack-var=no skips checking stack" 158 " variables shared over threads", 159 &GEI); 160#endif 161 } else { 162 DataRaceErrInfo drei = { 163 .tid = DRD_(thread_get_running_tid)(), 164 .addr = addr, 165 .size = size, 166 .access_type = access_type, 167 }; 168 VG_(maybe_record_error)(vg_tid, DataRaceErr, VG_(get_IP)(vg_tid), 169 "Conflicting access", &drei); 170 171 if (s_first_race_only) 172 DRD_(start_suppression)(addr, addr + size, "first race only"); 173 } 174} 175 176VG_REGPARM(2) void DRD_(trace_load)(Addr addr, SizeT size) 177{ 178#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 179 /* The assert below has been commented out because of performance reasons.*/ 180 tl_assert(DRD_(thread_get_running_tid)() 181 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 182#endif 183 184 if (DRD_(running_thread_is_recording_loads)() 185 && (s_check_stack_accesses 186 || ! DRD_(thread_address_on_stack)(addr)) 187 && bm_access_load_triggers_conflict(addr, addr + size) 188 && ! DRD_(is_suppressed)(addr, addr + size)) 189 { 190 drd_report_race(addr, size, eLoad); 191 } 192} 193 194static VG_REGPARM(1) void drd_trace_load_1(Addr addr) 195{ 196 if (DRD_(running_thread_is_recording_loads)() 197 && (s_check_stack_accesses 198 || ! DRD_(thread_address_on_stack)(addr)) 199 && bm_access_load_1_triggers_conflict(addr) 200 && ! DRD_(is_suppressed)(addr, addr + 1)) 201 { 202 drd_report_race(addr, 1, eLoad); 203 } 204} 205 206static VG_REGPARM(1) void drd_trace_load_2(Addr addr) 207{ 208 if (DRD_(running_thread_is_recording_loads)() 209 && (s_check_stack_accesses 210 || ! DRD_(thread_address_on_stack)(addr)) 211 && bm_access_load_2_triggers_conflict(addr) 212 && ! DRD_(is_suppressed)(addr, addr + 2)) 213 { 214 drd_report_race(addr, 2, eLoad); 215 } 216} 217 218static VG_REGPARM(1) void drd_trace_load_4(Addr addr) 219{ 220 if (DRD_(running_thread_is_recording_loads)() 221 && (s_check_stack_accesses 222 || ! DRD_(thread_address_on_stack)(addr)) 223 && bm_access_load_4_triggers_conflict(addr) 224 && ! DRD_(is_suppressed)(addr, addr + 4)) 225 { 226 drd_report_race(addr, 4, eLoad); 227 } 228} 229 230static VG_REGPARM(1) void drd_trace_load_8(Addr addr) 231{ 232 if (DRD_(running_thread_is_recording_loads)() 233 && (s_check_stack_accesses 234 || ! DRD_(thread_address_on_stack)(addr)) 235 && bm_access_load_8_triggers_conflict(addr) 236 && ! DRD_(is_suppressed)(addr, addr + 8)) 237 { 238 drd_report_race(addr, 8, eLoad); 239 } 240} 241 242VG_REGPARM(2) void DRD_(trace_store)(Addr addr, SizeT size) 243{ 244#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 245 /* The assert below has been commented out because of performance reasons.*/ 246 tl_assert(DRD_(thread_get_running_tid)() 247 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 248#endif 249 250 if (DRD_(running_thread_is_recording_stores)() 251 && (s_check_stack_accesses 252 || ! DRD_(thread_address_on_stack)(addr)) 253 && bm_access_store_triggers_conflict(addr, addr + size) 254 && ! DRD_(is_suppressed)(addr, addr + size)) 255 { 256 drd_report_race(addr, size, eStore); 257 } 258} 259 260static VG_REGPARM(1) void drd_trace_store_1(Addr addr) 261{ 262 if (DRD_(running_thread_is_recording_stores)() 263 && (s_check_stack_accesses 264 || ! DRD_(thread_address_on_stack)(addr)) 265 && bm_access_store_1_triggers_conflict(addr) 266 && ! DRD_(is_suppressed)(addr, addr + 1)) 267 { 268 drd_report_race(addr, 1, eStore); 269 } 270} 271 272static VG_REGPARM(1) void drd_trace_store_2(Addr addr) 273{ 274 if (DRD_(running_thread_is_recording_stores)() 275 && (s_check_stack_accesses 276 || ! DRD_(thread_address_on_stack)(addr)) 277 && bm_access_store_2_triggers_conflict(addr) 278 && ! DRD_(is_suppressed)(addr, addr + 2)) 279 { 280 drd_report_race(addr, 2, eStore); 281 } 282} 283 284static VG_REGPARM(1) void drd_trace_store_4(Addr addr) 285{ 286 if (DRD_(running_thread_is_recording_stores)() 287 && (s_check_stack_accesses 288 || !DRD_(thread_address_on_stack)(addr)) 289 && bm_access_store_4_triggers_conflict(addr) 290 && !DRD_(is_suppressed)(addr, addr + 4)) 291 { 292 drd_report_race(addr, 4, eStore); 293 } 294} 295 296static VG_REGPARM(1) void drd_trace_store_8(Addr addr) 297{ 298 if (DRD_(running_thread_is_recording_stores)() 299 && (s_check_stack_accesses 300 || ! DRD_(thread_address_on_stack)(addr)) 301 && bm_access_store_8_triggers_conflict(addr) 302 && ! DRD_(is_suppressed)(addr, addr + 8)) 303 { 304 drd_report_race(addr, 8, eStore); 305 } 306} 307 308/** 309 * Return true if and only if addr_expr matches the pattern (SP) or 310 * <offset>(SP). 311 */ 312static Bool is_stack_access(IRSB* const bb, IRExpr* const addr_expr) 313{ 314 Bool result = False; 315 316 if (addr_expr->tag == Iex_RdTmp) 317 { 318 int i; 319 for (i = 0; i < bb->stmts_used; i++) 320 { 321 if (bb->stmts[i] 322 && bb->stmts[i]->tag == Ist_WrTmp 323 && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp) 324 { 325 IRExpr* e = bb->stmts[i]->Ist.WrTmp.data; 326 if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET) 327 { 328 result = True; 329 } 330 331 //ppIRExpr(e); 332 //VG_(printf)(" (%s)\n", result ? "True" : "False"); 333 break; 334 } 335 } 336 } 337 return result; 338} 339 340static const IROp u_widen_irop[5][9] = { 341 [Ity_I1 - Ity_I1] = { [4] = Iop_1Uto32, [8] = Iop_1Uto64 }, 342 [Ity_I8 - Ity_I1] = { [4] = Iop_8Uto32, [8] = Iop_8Uto64 }, 343 [Ity_I16 - Ity_I1] = { [4] = Iop_16Uto32, [8] = Iop_16Uto64 }, 344 [Ity_I32 - Ity_I1] = { [8] = Iop_32Uto64 }, 345}; 346 347/** 348 * Instrument the client code to trace a memory load (--trace-addr). 349 */ 350static IRExpr* instr_trace_mem_load(IRSB* const bb, IRExpr* addr_expr, 351 const HWord size, 352 IRExpr* const guard/* NULL => True */) 353{ 354 IRTemp tmp; 355 356 tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr)); 357 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr)); 358 addr_expr = IRExpr_RdTmp(tmp); 359 IRDirty* di 360 = unsafeIRDirty_0_N(/*regparms*/2, 361 "drd_trace_mem_load", 362 VG_(fnptr_to_fnentry) 363 (drd_trace_mem_load), 364 mkIRExprVec_2(addr_expr, mkIRExpr_HWord(size))); 365 if (guard) di->guard = guard; 366 addStmtToIRSB(bb, IRStmt_Dirty(di)); 367 368 return addr_expr; 369} 370 371/** 372 * Instrument the client code to trace a memory store (--trace-addr). 373 */ 374static void instr_trace_mem_store(IRSB* const bb, IRExpr* const addr_expr, 375 IRExpr* data_expr_hi, IRExpr* data_expr_lo, 376 IRExpr* const guard/* NULL => True */) 377{ 378 IRType ty_data_expr; 379 HWord size; 380 381 tl_assert(sizeof(HWord) == 4 || sizeof(HWord) == 8); 382 tl_assert(!data_expr_hi || typeOfIRExpr(bb->tyenv, data_expr_hi) == Ity_I32); 383 384 ty_data_expr = typeOfIRExpr(bb->tyenv, data_expr_lo); 385 size = sizeofIRType(ty_data_expr); 386 387#if 0 388 // Test code 389 if (ty_data_expr == Ity_I32) { 390 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F32); 391 data_expr_lo = IRExpr_Unop(Iop_ReinterpI32asF32, data_expr_lo); 392 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo)); 393 data_expr_lo = IRExpr_RdTmp(tmp); 394 ty_data_expr = Ity_F32; 395 } else if (ty_data_expr == Ity_I64) { 396 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F64); 397 data_expr_lo = IRExpr_Unop(Iop_ReinterpI64asF64, data_expr_lo); 398 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo)); 399 data_expr_lo = IRExpr_RdTmp(tmp); 400 ty_data_expr = Ity_F64; 401 } 402#endif 403 404 if (ty_data_expr == Ity_F32) { 405 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I32); 406 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF32asI32, 407 data_expr_lo))); 408 data_expr_lo = IRExpr_RdTmp(tmp); 409 ty_data_expr = Ity_I32; 410 } else if (ty_data_expr == Ity_F64) { 411 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I64); 412 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF64asI64, 413 data_expr_lo))); 414 data_expr_lo = IRExpr_RdTmp(tmp); 415 ty_data_expr = Ity_I64; 416 } 417 418 if (size == sizeof(HWord) 419 && (ty_data_expr == Ity_I32 || ty_data_expr == Ity_I64)) 420 { 421 /* No conversion necessary */ 422 } else { 423 IROp widen_op; 424 425 if (Ity_I1 <= ty_data_expr 426 && ty_data_expr 427 < Ity_I1 + sizeof(u_widen_irop)/sizeof(u_widen_irop[0])) 428 { 429 widen_op = u_widen_irop[ty_data_expr - Ity_I1][sizeof(HWord)]; 430 if (!widen_op) 431 widen_op = Iop_INVALID; 432 } else { 433 widen_op = Iop_INVALID; 434 } 435 if (widen_op != Iop_INVALID) { 436 IRTemp tmp; 437 438 /* Widen the integer expression to a HWord */ 439 tmp = newIRTemp(bb->tyenv, sizeof(HWord) == 4 ? Ity_I32 : Ity_I64); 440 addStmtToIRSB(bb, 441 IRStmt_WrTmp(tmp, IRExpr_Unop(widen_op, data_expr_lo))); 442 data_expr_lo = IRExpr_RdTmp(tmp); 443 } else if (size > sizeof(HWord) && !data_expr_hi 444 && ty_data_expr == Ity_I64) { 445 IRTemp tmp; 446 447 tl_assert(sizeof(HWord) == 4); 448 tl_assert(size == 8); 449 tmp = newIRTemp(bb->tyenv, Ity_I32); 450 addStmtToIRSB(bb, 451 IRStmt_WrTmp(tmp, 452 IRExpr_Unop(Iop_64HIto32, data_expr_lo))); 453 data_expr_hi = IRExpr_RdTmp(tmp); 454 tmp = newIRTemp(bb->tyenv, Ity_I32); 455 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, 456 IRExpr_Unop(Iop_64to32, data_expr_lo))); 457 data_expr_lo = IRExpr_RdTmp(tmp); 458 } else { 459 data_expr_lo = mkIRExpr_HWord(0); 460 } 461 } 462 IRDirty* di 463 = unsafeIRDirty_0_N(/*regparms*/3, 464 "drd_trace_mem_store", 465 VG_(fnptr_to_fnentry)(drd_trace_mem_store), 466 mkIRExprVec_4(addr_expr, mkIRExpr_HWord(size), 467 data_expr_hi ? data_expr_hi 468 : mkIRExpr_HWord(0), data_expr_lo)); 469 if (guard) di->guard = guard; 470 addStmtToIRSB(bb, IRStmt_Dirty(di) ); 471} 472 473static void instrument_load(IRSB* const bb, IRExpr* const addr_expr, 474 const HWord size, 475 IRExpr* const guard/* NULL => True */) 476{ 477 IRExpr* size_expr; 478 IRExpr** argv; 479 IRDirty* di; 480 481 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr)) 482 return; 483 484 switch (size) 485 { 486 case 1: 487 argv = mkIRExprVec_1(addr_expr); 488 di = unsafeIRDirty_0_N(/*regparms*/1, 489 "drd_trace_load_1", 490 VG_(fnptr_to_fnentry)(drd_trace_load_1), 491 argv); 492 break; 493 case 2: 494 argv = mkIRExprVec_1(addr_expr); 495 di = unsafeIRDirty_0_N(/*regparms*/1, 496 "drd_trace_load_2", 497 VG_(fnptr_to_fnentry)(drd_trace_load_2), 498 argv); 499 break; 500 case 4: 501 argv = mkIRExprVec_1(addr_expr); 502 di = unsafeIRDirty_0_N(/*regparms*/1, 503 "drd_trace_load_4", 504 VG_(fnptr_to_fnentry)(drd_trace_load_4), 505 argv); 506 break; 507 case 8: 508 argv = mkIRExprVec_1(addr_expr); 509 di = unsafeIRDirty_0_N(/*regparms*/1, 510 "drd_trace_load_8", 511 VG_(fnptr_to_fnentry)(drd_trace_load_8), 512 argv); 513 break; 514 default: 515 size_expr = mkIRExpr_HWord(size); 516 argv = mkIRExprVec_2(addr_expr, size_expr); 517 di = unsafeIRDirty_0_N(/*regparms*/2, 518 "drd_trace_load", 519 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 520 argv); 521 break; 522 } 523 if (guard) di->guard = guard; 524 addStmtToIRSB(bb, IRStmt_Dirty(di)); 525} 526 527static void instrument_store(IRSB* const bb, IRExpr* addr_expr, 528 IRExpr* const data_expr, 529 IRExpr* const guard_expr/* NULL => True */) 530{ 531 IRExpr* size_expr; 532 IRExpr** argv; 533 IRDirty* di; 534 HWord size; 535 536 size = sizeofIRType(typeOfIRExpr(bb->tyenv, data_expr)); 537 538 if (UNLIKELY(DRD_(any_address_is_traced)())) { 539 IRTemp tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr)); 540 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr)); 541 addr_expr = IRExpr_RdTmp(tmp); 542 instr_trace_mem_store(bb, addr_expr, NULL, data_expr, guard_expr); 543 } 544 545 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr)) 546 return; 547 548 switch (size) 549 { 550 case 1: 551 argv = mkIRExprVec_1(addr_expr); 552 di = unsafeIRDirty_0_N(/*regparms*/1, 553 "drd_trace_store_1", 554 VG_(fnptr_to_fnentry)(drd_trace_store_1), 555 argv); 556 break; 557 case 2: 558 argv = mkIRExprVec_1(addr_expr); 559 di = unsafeIRDirty_0_N(/*regparms*/1, 560 "drd_trace_store_2", 561 VG_(fnptr_to_fnentry)(drd_trace_store_2), 562 argv); 563 break; 564 case 4: 565 argv = mkIRExprVec_1(addr_expr); 566 di = unsafeIRDirty_0_N(/*regparms*/1, 567 "drd_trace_store_4", 568 VG_(fnptr_to_fnentry)(drd_trace_store_4), 569 argv); 570 break; 571 case 8: 572 argv = mkIRExprVec_1(addr_expr); 573 di = unsafeIRDirty_0_N(/*regparms*/1, 574 "drd_trace_store_8", 575 VG_(fnptr_to_fnentry)(drd_trace_store_8), 576 argv); 577 break; 578 default: 579 size_expr = mkIRExpr_HWord(size); 580 argv = mkIRExprVec_2(addr_expr, size_expr); 581 di = unsafeIRDirty_0_N(/*regparms*/2, 582 "drd_trace_store", 583 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 584 argv); 585 break; 586 } 587 if (guard_expr) di->guard = guard_expr; 588 addStmtToIRSB(bb, IRStmt_Dirty(di)); 589} 590 591IRSB* DRD_(instrument)(VgCallbackClosure* const closure, 592 IRSB* const bb_in, 593 VexGuestLayout* const layout, 594 VexGuestExtents* const vge, 595 VexArchInfo* archinfo_host, 596 IRType const gWordTy, 597 IRType const hWordTy) 598{ 599 IRDirty* di; 600 Int i; 601 IRSB* bb; 602 IRExpr** argv; 603 Bool instrument = True; 604 605 /* Set up BB */ 606 bb = emptyIRSB(); 607 bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv); 608 bb->next = deepCopyIRExpr(bb_in->next); 609 bb->jumpkind = bb_in->jumpkind; 610 bb->offsIP = bb_in->offsIP; 611 612 for (i = 0; i < bb_in->stmts_used; i++) 613 { 614 IRStmt* const st = bb_in->stmts[i]; 615 tl_assert(st); 616 tl_assert(isFlatIRStmt(st)); 617 618 switch (st->tag) 619 { 620 /* Note: the code for not instrumenting the code in .plt */ 621 /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */ 622 /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */ 623 /* This is because on this platform dynamic library symbols are */ 624 /* relocated in another way than by later binutils versions. The */ 625 /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */ 626 case Ist_IMark: 627 instrument = VG_(DebugInfo_sect_kind)(NULL, 0, st->Ist.IMark.addr) 628 != Vg_SectPLT; 629 addStmtToIRSB(bb, st); 630 break; 631 632 case Ist_MBE: 633 switch (st->Ist.MBE.event) 634 { 635 case Imbe_Fence: 636 break; /* not interesting to DRD */ 637 case Imbe_CancelReservation: 638 break; /* not interesting to DRD */ 639 default: 640 tl_assert(0); 641 } 642 addStmtToIRSB(bb, st); 643 break; 644 645 case Ist_Store: 646 if (instrument) 647 instrument_store(bb, st->Ist.Store.addr, st->Ist.Store.data, 648 NULL/* no guard */); 649 addStmtToIRSB(bb, st); 650 break; 651 652 case Ist_StoreG: { 653 IRStoreG* sg = st->Ist.StoreG.details; 654 IRExpr* data = sg->data; 655 IRExpr* addr = sg->addr; 656 if (instrument) 657 instrument_store(bb, addr, data, sg->guard); 658 addStmtToIRSB(bb, st); 659 break; 660 } 661 662 case Ist_LoadG: { 663 IRLoadG* lg = st->Ist.LoadG.details; 664 IRType type = Ity_INVALID; /* loaded type */ 665 IRType typeWide = Ity_INVALID; /* after implicit widening */ 666 IRExpr* addr_expr = lg->addr; 667 typeOfIRLoadGOp(lg->cvt, &typeWide, &type); 668 tl_assert(type != Ity_INVALID); 669 if (UNLIKELY(DRD_(any_address_is_traced)())) { 670 addr_expr = instr_trace_mem_load(bb, addr_expr, 671 sizeofIRType(type), lg->guard); 672 } 673 instrument_load(bb, lg->addr, 674 sizeofIRType(type), lg->guard); 675 addStmtToIRSB(bb, st); 676 break; 677 } 678 679 case Ist_WrTmp: 680 if (instrument) { 681 const IRExpr* const data = st->Ist.WrTmp.data; 682 IRExpr* addr_expr = data->Iex.Load.addr; 683 if (data->tag == Iex_Load) { 684 if (UNLIKELY(DRD_(any_address_is_traced)())) { 685 addr_expr = instr_trace_mem_load(bb, addr_expr, 686 sizeofIRType(data->Iex.Load.ty), 687 NULL/* no guard */); 688 } 689 instrument_load(bb, addr_expr, sizeofIRType(data->Iex.Load.ty), 690 NULL/* no guard */); 691 } 692 } 693 addStmtToIRSB(bb, st); 694 break; 695 696 case Ist_Dirty: 697 if (instrument) { 698 IRDirty* d = st->Ist.Dirty.details; 699 IREffect const mFx = d->mFx; 700 switch (mFx) { 701 case Ifx_None: 702 break; 703 case Ifx_Read: 704 case Ifx_Write: 705 case Ifx_Modify: 706 tl_assert(d->mAddr); 707 tl_assert(d->mSize > 0); 708 argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize)); 709 if (mFx == Ifx_Read || mFx == Ifx_Modify) { 710 di = unsafeIRDirty_0_N( 711 /*regparms*/2, 712 "drd_trace_load", 713 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 714 argv); 715 addStmtToIRSB(bb, IRStmt_Dirty(di)); 716 } 717 if (mFx == Ifx_Write || mFx == Ifx_Modify) 718 { 719 di = unsafeIRDirty_0_N( 720 /*regparms*/2, 721 "drd_trace_store", 722 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 723 argv); 724 addStmtToIRSB(bb, IRStmt_Dirty(di)); 725 } 726 break; 727 default: 728 tl_assert(0); 729 } 730 } 731 addStmtToIRSB(bb, st); 732 break; 733 734 case Ist_CAS: 735 if (instrument) { 736 /* 737 * Treat compare-and-swap as a read. By handling atomic 738 * instructions as read instructions no data races are reported 739 * between conflicting atomic operations nor between atomic 740 * operations and non-atomic reads. Conflicts between atomic 741 * operations and non-atomic write operations are still reported 742 * however. 743 */ 744 Int dataSize; 745 IRCAS* cas = st->Ist.CAS.details; 746 747 tl_assert(cas->addr != NULL); 748 tl_assert(cas->dataLo != NULL); 749 dataSize = sizeofIRType(typeOfIRExpr(bb->tyenv, cas->dataLo)); 750 if (cas->dataHi != NULL) 751 dataSize *= 2; /* since it's a doubleword-CAS */ 752 753 if (UNLIKELY(DRD_(any_address_is_traced)())) 754 instr_trace_mem_store(bb, cas->addr, cas->dataHi, cas->dataLo, 755 NULL/* no guard */); 756 757 instrument_load(bb, cas->addr, dataSize, NULL/*no guard*/); 758 } 759 addStmtToIRSB(bb, st); 760 break; 761 762 case Ist_LLSC: { 763 /* 764 * Ignore store-conditionals (except for tracing), and handle 765 * load-linked's exactly like normal loads. 766 */ 767 IRType dataTy; 768 769 if (st->Ist.LLSC.storedata == NULL) { 770 /* LL */ 771 dataTy = typeOfIRTemp(bb_in->tyenv, st->Ist.LLSC.result); 772 if (instrument) { 773 IRExpr* addr_expr = st->Ist.LLSC.addr; 774 if (UNLIKELY(DRD_(any_address_is_traced)())) 775 addr_expr = instr_trace_mem_load(bb, addr_expr, 776 sizeofIRType(dataTy), 777 NULL /* no guard */); 778 779 instrument_load(bb, addr_expr, sizeofIRType(dataTy), 780 NULL/*no guard*/); 781 } 782 } else { 783 /* SC */ 784 instr_trace_mem_store(bb, st->Ist.LLSC.addr, NULL, 785 st->Ist.LLSC.storedata, 786 NULL/* no guard */); 787 } 788 addStmtToIRSB(bb, st); 789 break; 790 } 791 792 case Ist_NoOp: 793 case Ist_AbiHint: 794 case Ist_Put: 795 case Ist_PutI: 796 case Ist_Exit: 797 /* None of these can contain any memory references. */ 798 addStmtToIRSB(bb, st); 799 break; 800 801 default: 802 ppIRStmt(st); 803 tl_assert(0); 804 } 805 } 806 807 return bb; 808} 809 810