1/* 2 This file is part of drd, a thread error detector. 3 4 Copyright (C) 2006-2015 Bart Van Assche <bvanassche@acm.org>. 5 6 This program is free software; you can redistribute it and/or 7 modify it under the terms of the GNU General Public License as 8 published by the Free Software Foundation; either version 2 of the 9 License, or (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, but 12 WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 19 02111-1307, USA. 20 21 The GNU General Public License is contained in the file COPYING. 22*/ 23 24 25#include "drd_bitmap.h" 26#include "drd_thread_bitmap.h" 27#include "drd_vc.h" /* DRD_(vc_snprint)() */ 28 29/* Include several source files here in order to allow the compiler to */ 30/* do more inlining. */ 31#include "drd_bitmap.c" 32#include "drd_load_store.h" 33#include "drd_segment.c" 34#include "drd_thread.c" 35#include "drd_vc.c" 36#include "libvex_guest_offsets.h" 37 38 39/* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */ 40#if defined(VGA_x86) 41#define STACK_POINTER_OFFSET OFFSET_x86_ESP 42#elif defined(VGA_amd64) 43#define STACK_POINTER_OFFSET OFFSET_amd64_RSP 44#elif defined(VGA_ppc32) 45#define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1 46#elif defined(VGA_ppc64be) || defined(VGA_ppc64le) 47#define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1 48#elif defined(VGA_arm) 49#define STACK_POINTER_OFFSET OFFSET_arm_R13 50#elif defined(VGA_arm64) 51#define STACK_POINTER_OFFSET OFFSET_arm64_XSP 52#elif defined(VGA_s390x) 53#define STACK_POINTER_OFFSET OFFSET_s390x_r15 54#elif defined(VGA_mips32) 55#define STACK_POINTER_OFFSET OFFSET_mips32_r29 56#elif defined(VGA_mips64) 57#define STACK_POINTER_OFFSET OFFSET_mips64_r29 58#elif defined(VGA_tilegx) 59#define STACK_POINTER_OFFSET OFFSET_tilegx_r54 60#else 61#error Unknown architecture. 62#endif 63 64 65/* Local variables. */ 66 67static Bool s_check_stack_accesses = False; 68static Bool s_first_race_only = False; 69 70 71/* Function definitions. */ 72 73Bool DRD_(get_check_stack_accesses)() 74{ 75 return s_check_stack_accesses; 76} 77 78void DRD_(set_check_stack_accesses)(const Bool c) 79{ 80 tl_assert(c == False || c == True); 81 s_check_stack_accesses = c; 82} 83 84Bool DRD_(get_first_race_only)() 85{ 86 return s_first_race_only; 87} 88 89void DRD_(set_first_race_only)(const Bool fro) 90{ 91 tl_assert(fro == False || fro == True); 92 s_first_race_only = fro; 93} 94 95void DRD_(trace_mem_access)(const Addr addr, const SizeT size, 96 const BmAccessTypeT access_type, 97 const HWord stored_value_hi, 98 const HWord stored_value_lo) 99{ 100 if (DRD_(is_any_traced)(addr, addr + size)) 101 { 102 HChar* vc; 103 104 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(DRD_(thread_get_running_tid)())); 105 if (access_type == eStore && size <= sizeof(HWord)) { 106 DRD_(trace_msg_w_bt)("store 0x%lx size %lu val %lu/0x%lx (thread %u /" 107 " vc %s)", addr, size, stored_value_lo, 108 stored_value_lo, DRD_(thread_get_running_tid)(), 109 vc); 110 } else if (access_type == eStore && size > sizeof(HWord)) { 111 ULong sv; 112 113 tl_assert(sizeof(HWord) == 4); 114 sv = ((ULong)stored_value_hi << 32) | stored_value_lo; 115 DRD_(trace_msg_w_bt)("store 0x%lx size %lu val %llu/0x%llx (thread %u" 116 " / vc %s)", addr, size, sv, sv, 117 DRD_(thread_get_running_tid)(), vc); 118 } else { 119 DRD_(trace_msg_w_bt)("%s 0x%lx size %lu (thread %u / vc %s)", 120 access_type == eLoad ? "load " 121 : access_type == eStore ? "store" 122 : access_type == eStart ? "start" 123 : access_type == eEnd ? "end " : "????", 124 addr, size, DRD_(thread_get_running_tid)(), vc); 125 } 126 VG_(free)(vc); 127 tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)()) 128 == VG_(get_running_tid)()); 129 } 130} 131 132static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr, const SizeT size) 133{ 134 return DRD_(trace_mem_access)(addr, size, eLoad, 0, 0); 135} 136 137static VG_REGPARM(3) void drd_trace_mem_store(const Addr addr,const SizeT size, 138 const HWord stored_value_hi, 139 const HWord stored_value_lo) 140{ 141 return DRD_(trace_mem_access)(addr, size, eStore, stored_value_hi, 142 stored_value_lo); 143} 144 145static void drd_report_race(const Addr addr, const SizeT size, 146 const BmAccessTypeT access_type) 147{ 148 ThreadId vg_tid; 149 150 vg_tid = VG_(get_running_tid)(); 151 if (!DRD_(get_check_stack_accesses)() 152 && DRD_(thread_address_on_any_stack)(addr)) { 153#if 0 154 GenericErrInfo GEI = { 155 .tid = DRD_(thread_get_running_tid)(), 156 .addr = addr, 157 }; 158 VG_(maybe_record_error)(vg_tid, GenericErr, VG_(get_IP)(vg_tid), 159 "--check-stack-var=no skips checking stack" 160 " variables shared over threads", 161 &GEI); 162#endif 163 } else { 164 DataRaceErrInfo drei = { 165 .tid = DRD_(thread_get_running_tid)(), 166 .addr = addr, 167 .size = size, 168 .access_type = access_type, 169 }; 170 VG_(maybe_record_error)(vg_tid, DataRaceErr, VG_(get_IP)(vg_tid), 171 "Conflicting access", &drei); 172 173 if (s_first_race_only) 174 DRD_(start_suppression)(addr, addr + size, "first race only"); 175 } 176} 177 178VG_REGPARM(2) void DRD_(trace_load)(Addr addr, SizeT size) 179{ 180#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 181 /* The assert below has been commented out because of performance reasons.*/ 182 tl_assert(DRD_(thread_get_running_tid)() 183 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 184#endif 185 186 if (DRD_(running_thread_is_recording_loads)() 187 && (s_check_stack_accesses 188 || ! DRD_(thread_address_on_stack)(addr)) 189 && bm_access_load_triggers_conflict(addr, addr + size) 190 && ! DRD_(is_suppressed)(addr, addr + size)) 191 { 192 drd_report_race(addr, size, eLoad); 193 } 194} 195 196static VG_REGPARM(1) void drd_trace_load_1(Addr addr) 197{ 198 if (DRD_(running_thread_is_recording_loads)() 199 && (s_check_stack_accesses 200 || ! DRD_(thread_address_on_stack)(addr)) 201 && bm_access_load_1_triggers_conflict(addr) 202 && ! DRD_(is_suppressed)(addr, addr + 1)) 203 { 204 drd_report_race(addr, 1, eLoad); 205 } 206} 207 208static VG_REGPARM(1) void drd_trace_load_2(Addr addr) 209{ 210 if (DRD_(running_thread_is_recording_loads)() 211 && (s_check_stack_accesses 212 || ! DRD_(thread_address_on_stack)(addr)) 213 && bm_access_load_2_triggers_conflict(addr) 214 && ! DRD_(is_suppressed)(addr, addr + 2)) 215 { 216 drd_report_race(addr, 2, eLoad); 217 } 218} 219 220static VG_REGPARM(1) void drd_trace_load_4(Addr addr) 221{ 222 if (DRD_(running_thread_is_recording_loads)() 223 && (s_check_stack_accesses 224 || ! DRD_(thread_address_on_stack)(addr)) 225 && bm_access_load_4_triggers_conflict(addr) 226 && ! DRD_(is_suppressed)(addr, addr + 4)) 227 { 228 drd_report_race(addr, 4, eLoad); 229 } 230} 231 232static VG_REGPARM(1) void drd_trace_load_8(Addr addr) 233{ 234 if (DRD_(running_thread_is_recording_loads)() 235 && (s_check_stack_accesses 236 || ! DRD_(thread_address_on_stack)(addr)) 237 && bm_access_load_8_triggers_conflict(addr) 238 && ! DRD_(is_suppressed)(addr, addr + 8)) 239 { 240 drd_report_race(addr, 8, eLoad); 241 } 242} 243 244VG_REGPARM(2) void DRD_(trace_store)(Addr addr, SizeT size) 245{ 246#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 247 /* The assert below has been commented out because of performance reasons.*/ 248 tl_assert(DRD_(thread_get_running_tid)() 249 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 250#endif 251 252 if (DRD_(running_thread_is_recording_stores)() 253 && (s_check_stack_accesses 254 || ! DRD_(thread_address_on_stack)(addr)) 255 && bm_access_store_triggers_conflict(addr, addr + size) 256 && ! DRD_(is_suppressed)(addr, addr + size)) 257 { 258 drd_report_race(addr, size, eStore); 259 } 260} 261 262static VG_REGPARM(1) void drd_trace_store_1(Addr addr) 263{ 264 if (DRD_(running_thread_is_recording_stores)() 265 && (s_check_stack_accesses 266 || ! DRD_(thread_address_on_stack)(addr)) 267 && bm_access_store_1_triggers_conflict(addr) 268 && ! DRD_(is_suppressed)(addr, addr + 1)) 269 { 270 drd_report_race(addr, 1, eStore); 271 } 272} 273 274static VG_REGPARM(1) void drd_trace_store_2(Addr addr) 275{ 276 if (DRD_(running_thread_is_recording_stores)() 277 && (s_check_stack_accesses 278 || ! DRD_(thread_address_on_stack)(addr)) 279 && bm_access_store_2_triggers_conflict(addr) 280 && ! DRD_(is_suppressed)(addr, addr + 2)) 281 { 282 drd_report_race(addr, 2, eStore); 283 } 284} 285 286static VG_REGPARM(1) void drd_trace_store_4(Addr addr) 287{ 288 if (DRD_(running_thread_is_recording_stores)() 289 && (s_check_stack_accesses 290 || !DRD_(thread_address_on_stack)(addr)) 291 && bm_access_store_4_triggers_conflict(addr) 292 && !DRD_(is_suppressed)(addr, addr + 4)) 293 { 294 drd_report_race(addr, 4, eStore); 295 } 296} 297 298static VG_REGPARM(1) void drd_trace_store_8(Addr addr) 299{ 300 if (DRD_(running_thread_is_recording_stores)() 301 && (s_check_stack_accesses 302 || ! DRD_(thread_address_on_stack)(addr)) 303 && bm_access_store_8_triggers_conflict(addr) 304 && ! DRD_(is_suppressed)(addr, addr + 8)) 305 { 306 drd_report_race(addr, 8, eStore); 307 } 308} 309 310/** 311 * Return true if and only if addr_expr matches the pattern (SP) or 312 * <offset>(SP). 313 */ 314static Bool is_stack_access(IRSB* const bb, IRExpr* const addr_expr) 315{ 316 Bool result = False; 317 318 if (addr_expr->tag == Iex_RdTmp) 319 { 320 int i; 321 for (i = 0; i < bb->stmts_used; i++) 322 { 323 if (bb->stmts[i] 324 && bb->stmts[i]->tag == Ist_WrTmp 325 && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp) 326 { 327 IRExpr* e = bb->stmts[i]->Ist.WrTmp.data; 328 if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET) 329 { 330 result = True; 331 } 332 333 //ppIRExpr(e); 334 //VG_(printf)(" (%s)\n", result ? "True" : "False"); 335 break; 336 } 337 } 338 } 339 return result; 340} 341 342static const IROp u_widen_irop[5][9] = { 343 [Ity_I1 - Ity_I1] = { [4] = Iop_1Uto32, [8] = Iop_1Uto64 }, 344 [Ity_I8 - Ity_I1] = { [4] = Iop_8Uto32, [8] = Iop_8Uto64 }, 345 [Ity_I16 - Ity_I1] = { [4] = Iop_16Uto32, [8] = Iop_16Uto64 }, 346 [Ity_I32 - Ity_I1] = { [8] = Iop_32Uto64 }, 347}; 348 349/** 350 * Instrument the client code to trace a memory load (--trace-addr). 351 */ 352static IRExpr* instr_trace_mem_load(IRSB* const bb, IRExpr* addr_expr, 353 const HWord size, 354 IRExpr* const guard/* NULL => True */) 355{ 356 IRTemp tmp; 357 358 tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr)); 359 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr)); 360 addr_expr = IRExpr_RdTmp(tmp); 361 IRDirty* di 362 = unsafeIRDirty_0_N(/*regparms*/2, 363 "drd_trace_mem_load", 364 VG_(fnptr_to_fnentry) 365 (drd_trace_mem_load), 366 mkIRExprVec_2(addr_expr, mkIRExpr_HWord(size))); 367 if (guard) di->guard = guard; 368 addStmtToIRSB(bb, IRStmt_Dirty(di)); 369 370 return addr_expr; 371} 372 373/** 374 * Instrument the client code to trace a memory store (--trace-addr). 375 */ 376static void instr_trace_mem_store(IRSB* const bb, IRExpr* const addr_expr, 377 IRExpr* data_expr_hi, IRExpr* data_expr_lo, 378 IRExpr* const guard/* NULL => True */) 379{ 380 IRType ty_data_expr; 381 HWord size; 382 383 tl_assert(sizeof(HWord) == 4 || sizeof(HWord) == 8); 384 tl_assert(!data_expr_hi || typeOfIRExpr(bb->tyenv, data_expr_hi) == Ity_I32); 385 386 ty_data_expr = typeOfIRExpr(bb->tyenv, data_expr_lo); 387 size = sizeofIRType(ty_data_expr); 388 389#if 0 390 // Test code 391 if (ty_data_expr == Ity_I32) { 392 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F32); 393 data_expr_lo = IRExpr_Unop(Iop_ReinterpI32asF32, data_expr_lo); 394 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo)); 395 data_expr_lo = IRExpr_RdTmp(tmp); 396 ty_data_expr = Ity_F32; 397 } else if (ty_data_expr == Ity_I64) { 398 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F64); 399 data_expr_lo = IRExpr_Unop(Iop_ReinterpI64asF64, data_expr_lo); 400 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr_lo)); 401 data_expr_lo = IRExpr_RdTmp(tmp); 402 ty_data_expr = Ity_F64; 403 } 404#endif 405 406 if (ty_data_expr == Ity_F32) { 407 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I32); 408 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF32asI32, 409 data_expr_lo))); 410 data_expr_lo = IRExpr_RdTmp(tmp); 411 ty_data_expr = Ity_I32; 412 } else if (ty_data_expr == Ity_F64) { 413 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I64); 414 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF64asI64, 415 data_expr_lo))); 416 data_expr_lo = IRExpr_RdTmp(tmp); 417 ty_data_expr = Ity_I64; 418 } 419 420 if (size == sizeof(HWord) 421 && (ty_data_expr == Ity_I32 || ty_data_expr == Ity_I64)) 422 { 423 /* No conversion necessary */ 424 } else { 425 IROp widen_op; 426 427 if (Ity_I1 <= ty_data_expr 428 && ty_data_expr 429 < Ity_I1 + sizeof(u_widen_irop)/sizeof(u_widen_irop[0])) 430 { 431 widen_op = u_widen_irop[ty_data_expr - Ity_I1][sizeof(HWord)]; 432 if (!widen_op) 433 widen_op = Iop_INVALID; 434 } else { 435 widen_op = Iop_INVALID; 436 } 437 if (widen_op != Iop_INVALID) { 438 IRTemp tmp; 439 440 /* Widen the integer expression to a HWord */ 441 tmp = newIRTemp(bb->tyenv, sizeof(HWord) == 4 ? Ity_I32 : Ity_I64); 442 addStmtToIRSB(bb, 443 IRStmt_WrTmp(tmp, IRExpr_Unop(widen_op, data_expr_lo))); 444 data_expr_lo = IRExpr_RdTmp(tmp); 445 } else if (size > sizeof(HWord) && !data_expr_hi 446 && ty_data_expr == Ity_I64) { 447 IRTemp tmp; 448 449 tl_assert(sizeof(HWord) == 4); 450 tl_assert(size == 8); 451 tmp = newIRTemp(bb->tyenv, Ity_I32); 452 addStmtToIRSB(bb, 453 IRStmt_WrTmp(tmp, 454 IRExpr_Unop(Iop_64HIto32, data_expr_lo))); 455 data_expr_hi = IRExpr_RdTmp(tmp); 456 tmp = newIRTemp(bb->tyenv, Ity_I32); 457 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, 458 IRExpr_Unop(Iop_64to32, data_expr_lo))); 459 data_expr_lo = IRExpr_RdTmp(tmp); 460 } else { 461 data_expr_lo = mkIRExpr_HWord(0); 462 } 463 } 464 IRDirty* di 465 = unsafeIRDirty_0_N(/*regparms*/3, 466 "drd_trace_mem_store", 467 VG_(fnptr_to_fnentry)(drd_trace_mem_store), 468 mkIRExprVec_4(addr_expr, mkIRExpr_HWord(size), 469 data_expr_hi ? data_expr_hi 470 : mkIRExpr_HWord(0), data_expr_lo)); 471 if (guard) di->guard = guard; 472 addStmtToIRSB(bb, IRStmt_Dirty(di) ); 473} 474 475static void instrument_load(IRSB* const bb, IRExpr* const addr_expr, 476 const HWord size, 477 IRExpr* const guard/* NULL => True */) 478{ 479 IRExpr* size_expr; 480 IRExpr** argv; 481 IRDirty* di; 482 483 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr)) 484 return; 485 486 switch (size) 487 { 488 case 1: 489 argv = mkIRExprVec_1(addr_expr); 490 di = unsafeIRDirty_0_N(/*regparms*/1, 491 "drd_trace_load_1", 492 VG_(fnptr_to_fnentry)(drd_trace_load_1), 493 argv); 494 break; 495 case 2: 496 argv = mkIRExprVec_1(addr_expr); 497 di = unsafeIRDirty_0_N(/*regparms*/1, 498 "drd_trace_load_2", 499 VG_(fnptr_to_fnentry)(drd_trace_load_2), 500 argv); 501 break; 502 case 4: 503 argv = mkIRExprVec_1(addr_expr); 504 di = unsafeIRDirty_0_N(/*regparms*/1, 505 "drd_trace_load_4", 506 VG_(fnptr_to_fnentry)(drd_trace_load_4), 507 argv); 508 break; 509 case 8: 510 argv = mkIRExprVec_1(addr_expr); 511 di = unsafeIRDirty_0_N(/*regparms*/1, 512 "drd_trace_load_8", 513 VG_(fnptr_to_fnentry)(drd_trace_load_8), 514 argv); 515 break; 516 default: 517 size_expr = mkIRExpr_HWord(size); 518 argv = mkIRExprVec_2(addr_expr, size_expr); 519 di = unsafeIRDirty_0_N(/*regparms*/2, 520 "drd_trace_load", 521 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 522 argv); 523 break; 524 } 525 if (guard) di->guard = guard; 526 addStmtToIRSB(bb, IRStmt_Dirty(di)); 527} 528 529static void instrument_store(IRSB* const bb, IRExpr* addr_expr, 530 IRExpr* const data_expr, 531 IRExpr* const guard_expr/* NULL => True */) 532{ 533 IRExpr* size_expr; 534 IRExpr** argv; 535 IRDirty* di; 536 HWord size; 537 538 size = sizeofIRType(typeOfIRExpr(bb->tyenv, data_expr)); 539 540 if (UNLIKELY(DRD_(any_address_is_traced)())) { 541 IRTemp tmp = newIRTemp(bb->tyenv, typeOfIRExpr(bb->tyenv, addr_expr)); 542 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, addr_expr)); 543 addr_expr = IRExpr_RdTmp(tmp); 544 instr_trace_mem_store(bb, addr_expr, NULL, data_expr, guard_expr); 545 } 546 547 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr)) 548 return; 549 550 switch (size) 551 { 552 case 1: 553 argv = mkIRExprVec_1(addr_expr); 554 di = unsafeIRDirty_0_N(/*regparms*/1, 555 "drd_trace_store_1", 556 VG_(fnptr_to_fnentry)(drd_trace_store_1), 557 argv); 558 break; 559 case 2: 560 argv = mkIRExprVec_1(addr_expr); 561 di = unsafeIRDirty_0_N(/*regparms*/1, 562 "drd_trace_store_2", 563 VG_(fnptr_to_fnentry)(drd_trace_store_2), 564 argv); 565 break; 566 case 4: 567 argv = mkIRExprVec_1(addr_expr); 568 di = unsafeIRDirty_0_N(/*regparms*/1, 569 "drd_trace_store_4", 570 VG_(fnptr_to_fnentry)(drd_trace_store_4), 571 argv); 572 break; 573 case 8: 574 argv = mkIRExprVec_1(addr_expr); 575 di = unsafeIRDirty_0_N(/*regparms*/1, 576 "drd_trace_store_8", 577 VG_(fnptr_to_fnentry)(drd_trace_store_8), 578 argv); 579 break; 580 default: 581 size_expr = mkIRExpr_HWord(size); 582 argv = mkIRExprVec_2(addr_expr, size_expr); 583 di = unsafeIRDirty_0_N(/*regparms*/2, 584 "drd_trace_store", 585 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 586 argv); 587 break; 588 } 589 if (guard_expr) di->guard = guard_expr; 590 addStmtToIRSB(bb, IRStmt_Dirty(di)); 591} 592 593IRSB* DRD_(instrument)(VgCallbackClosure* const closure, 594 IRSB* const bb_in, 595 const VexGuestLayout* const layout, 596 const VexGuestExtents* const vge, 597 const VexArchInfo* archinfo_host, 598 IRType const gWordTy, 599 IRType const hWordTy) 600{ 601 IRDirty* di; 602 Int i; 603 IRSB* bb; 604 IRExpr** argv; 605 Bool instrument = True; 606 607 /* Set up BB */ 608 bb = emptyIRSB(); 609 bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv); 610 bb->next = deepCopyIRExpr(bb_in->next); 611 bb->jumpkind = bb_in->jumpkind; 612 bb->offsIP = bb_in->offsIP; 613 614 for (i = 0; i < bb_in->stmts_used; i++) 615 { 616 IRStmt* const st = bb_in->stmts[i]; 617 tl_assert(st); 618 tl_assert(isFlatIRStmt(st)); 619 620 switch (st->tag) 621 { 622 /* Note: the code for not instrumenting the code in .plt */ 623 /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */ 624 /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */ 625 /* This is because on this platform dynamic library symbols are */ 626 /* relocated in another way than by later binutils versions. The */ 627 /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */ 628 case Ist_IMark: 629 instrument = VG_(DebugInfo_sect_kind)(NULL, st->Ist.IMark.addr) 630 != Vg_SectPLT; 631 addStmtToIRSB(bb, st); 632 break; 633 634 case Ist_MBE: 635 switch (st->Ist.MBE.event) 636 { 637 case Imbe_Fence: 638 break; /* not interesting to DRD */ 639 case Imbe_CancelReservation: 640 break; /* not interesting to DRD */ 641 default: 642 tl_assert(0); 643 } 644 addStmtToIRSB(bb, st); 645 break; 646 647 case Ist_Store: 648 if (instrument) 649 instrument_store(bb, st->Ist.Store.addr, st->Ist.Store.data, 650 NULL/* no guard */); 651 addStmtToIRSB(bb, st); 652 break; 653 654 case Ist_StoreG: { 655 IRStoreG* sg = st->Ist.StoreG.details; 656 IRExpr* data = sg->data; 657 IRExpr* addr = sg->addr; 658 if (instrument) 659 instrument_store(bb, addr, data, sg->guard); 660 addStmtToIRSB(bb, st); 661 break; 662 } 663 664 case Ist_LoadG: { 665 IRLoadG* lg = st->Ist.LoadG.details; 666 IRType type = Ity_INVALID; /* loaded type */ 667 IRType typeWide = Ity_INVALID; /* after implicit widening */ 668 IRExpr* addr_expr = lg->addr; 669 typeOfIRLoadGOp(lg->cvt, &typeWide, &type); 670 tl_assert(type != Ity_INVALID); 671 if (UNLIKELY(DRD_(any_address_is_traced)())) { 672 addr_expr = instr_trace_mem_load(bb, addr_expr, 673 sizeofIRType(type), lg->guard); 674 } 675 instrument_load(bb, lg->addr, 676 sizeofIRType(type), lg->guard); 677 addStmtToIRSB(bb, st); 678 break; 679 } 680 681 case Ist_WrTmp: 682 if (instrument) { 683 const IRExpr* const data = st->Ist.WrTmp.data; 684 IRExpr* addr_expr = data->Iex.Load.addr; 685 if (data->tag == Iex_Load) { 686 if (UNLIKELY(DRD_(any_address_is_traced)())) { 687 addr_expr = instr_trace_mem_load(bb, addr_expr, 688 sizeofIRType(data->Iex.Load.ty), 689 NULL/* no guard */); 690 } 691 instrument_load(bb, addr_expr, sizeofIRType(data->Iex.Load.ty), 692 NULL/* no guard */); 693 } 694 } 695 addStmtToIRSB(bb, st); 696 break; 697 698 case Ist_Dirty: 699 if (instrument) { 700 IRDirty* d = st->Ist.Dirty.details; 701 IREffect const mFx = d->mFx; 702 switch (mFx) { 703 case Ifx_None: 704 break; 705 case Ifx_Read: 706 case Ifx_Write: 707 case Ifx_Modify: 708 tl_assert(d->mAddr); 709 tl_assert(d->mSize > 0); 710 argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize)); 711 if (mFx == Ifx_Read || mFx == Ifx_Modify) { 712 di = unsafeIRDirty_0_N( 713 /*regparms*/2, 714 "drd_trace_load", 715 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 716 argv); 717 addStmtToIRSB(bb, IRStmt_Dirty(di)); 718 } 719 if (mFx == Ifx_Write || mFx == Ifx_Modify) 720 { 721 di = unsafeIRDirty_0_N( 722 /*regparms*/2, 723 "drd_trace_store", 724 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 725 argv); 726 addStmtToIRSB(bb, IRStmt_Dirty(di)); 727 } 728 break; 729 default: 730 tl_assert(0); 731 } 732 } 733 addStmtToIRSB(bb, st); 734 break; 735 736 case Ist_CAS: 737 if (instrument) { 738 /* 739 * Treat compare-and-swap as a read. By handling atomic 740 * instructions as read instructions no data races are reported 741 * between conflicting atomic operations nor between atomic 742 * operations and non-atomic reads. Conflicts between atomic 743 * operations and non-atomic write operations are still reported 744 * however. 745 */ 746 Int dataSize; 747 IRCAS* cas = st->Ist.CAS.details; 748 749 tl_assert(cas->addr != NULL); 750 tl_assert(cas->dataLo != NULL); 751 dataSize = sizeofIRType(typeOfIRExpr(bb->tyenv, cas->dataLo)); 752 if (cas->dataHi != NULL) 753 dataSize *= 2; /* since it's a doubleword-CAS */ 754 755 if (UNLIKELY(DRD_(any_address_is_traced)())) 756 instr_trace_mem_store(bb, cas->addr, cas->dataHi, cas->dataLo, 757 NULL/* no guard */); 758 759 instrument_load(bb, cas->addr, dataSize, NULL/*no guard*/); 760 } 761 addStmtToIRSB(bb, st); 762 break; 763 764 case Ist_LLSC: { 765 /* 766 * Ignore store-conditionals (except for tracing), and handle 767 * load-linked's exactly like normal loads. 768 */ 769 IRType dataTy; 770 771 if (st->Ist.LLSC.storedata == NULL) { 772 /* LL */ 773 dataTy = typeOfIRTemp(bb_in->tyenv, st->Ist.LLSC.result); 774 if (instrument) { 775 IRExpr* addr_expr = st->Ist.LLSC.addr; 776 if (UNLIKELY(DRD_(any_address_is_traced)())) 777 addr_expr = instr_trace_mem_load(bb, addr_expr, 778 sizeofIRType(dataTy), 779 NULL /* no guard */); 780 781 instrument_load(bb, addr_expr, sizeofIRType(dataTy), 782 NULL/*no guard*/); 783 } 784 } else { 785 /* SC */ 786 instr_trace_mem_store(bb, st->Ist.LLSC.addr, NULL, 787 st->Ist.LLSC.storedata, 788 NULL/* no guard */); 789 } 790 addStmtToIRSB(bb, st); 791 break; 792 } 793 794 case Ist_NoOp: 795 case Ist_AbiHint: 796 case Ist_Put: 797 case Ist_PutI: 798 case Ist_Exit: 799 /* None of these can contain any memory references. */ 800 addStmtToIRSB(bb, st); 801 break; 802 803 default: 804 ppIRStmt(st); 805 tl_assert(0); 806 } 807 } 808 809 return bb; 810} 811 812