drd_load_store.c revision 5cda1b556992b534477683b291a659554ff3bb88
1/* 2 This file is part of drd, a thread error detector. 3 4 Copyright (C) 2006-2011 Bart Van Assche <bvanassche@acm.org>. 5 6 This program is free software; you can redistribute it and/or 7 modify it under the terms of the GNU General Public License as 8 published by the Free Software Foundation; either version 2 of the 9 License, or (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, but 12 WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 19 02111-1307, USA. 20 21 The GNU General Public License is contained in the file COPYING. 22*/ 23 24 25#include "drd_bitmap.h" 26#include "drd_thread_bitmap.h" 27#include "drd_vc.h" /* DRD_(vc_snprint)() */ 28 29/* Include several source files here in order to allow the compiler to */ 30/* do more inlining. */ 31#include "drd_bitmap.c" 32#include "drd_load_store.h" 33#include "drd_segment.c" 34#include "drd_thread.c" 35#include "drd_vc.c" 36#include "libvex_guest_offsets.h" 37 38 39/* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */ 40#if defined(VGA_x86) 41#define STACK_POINTER_OFFSET OFFSET_x86_ESP 42#elif defined(VGA_amd64) 43#define STACK_POINTER_OFFSET OFFSET_amd64_RSP 44#elif defined(VGA_ppc32) 45#define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1 46#elif defined(VGA_ppc64) 47#define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1 48#elif defined(VGA_arm) 49#define STACK_POINTER_OFFSET OFFSET_arm_R13 50#elif defined(VGA_s390x) 51#define STACK_POINTER_OFFSET OFFSET_s390x_r15 52#else 53#error Unknown architecture. 54#endif 55 56 57/* Local variables. */ 58 59static Bool s_check_stack_accesses = False; 60static Bool s_first_race_only = False; 61 62 63/* Function definitions. */ 64 65Bool DRD_(get_check_stack_accesses)() 66{ 67 return s_check_stack_accesses; 68} 69 70void DRD_(set_check_stack_accesses)(const Bool c) 71{ 72 tl_assert(c == False || c == True); 73 s_check_stack_accesses = c; 74} 75 76Bool DRD_(get_first_race_only)() 77{ 78 return s_first_race_only; 79} 80 81void DRD_(set_first_race_only)(const Bool fro) 82{ 83 tl_assert(fro == False || fro == True); 84 s_first_race_only = fro; 85} 86 87void DRD_(trace_mem_access)(const Addr addr, const SizeT size, 88 const BmAccessTypeT access_type, 89 const HWord stored_value) 90{ 91 if (DRD_(is_any_traced)(addr, addr + size)) 92 { 93 char* vc; 94 95 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(DRD_(thread_get_running_tid)())); 96 if (access_type == eStore && size <= sizeof(HWord)) { 97 DRD_(trace_msg_w_bt)("store 0x%lx size %ld val %ld/0x%lx (thread %d /" 98 " vc %s)", addr, size, stored_value, stored_value, 99 DRD_(thread_get_running_tid)(), vc); 100 } else { 101 DRD_(trace_msg_w_bt)("%s 0x%lx size %ld (thread %d / vc %s)", 102 access_type == eLoad ? "load " 103 : access_type == eStore ? "store" 104 : access_type == eStart ? "start" 105 : access_type == eEnd ? "end " : "????", 106 addr, size, DRD_(thread_get_running_tid)(), vc); 107 } 108 VG_(free)(vc); 109 tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)()) 110 == VG_(get_running_tid)()); 111 } 112} 113 114static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr, const SizeT size) 115{ 116 return DRD_(trace_mem_access)(addr, size, eLoad, 0); 117} 118 119static VG_REGPARM(3) void drd_trace_mem_store(const Addr addr,const SizeT size, 120 const HWord stored_value) 121{ 122 return DRD_(trace_mem_access)(addr, size, eStore, stored_value); 123} 124 125static void drd_report_race(const Addr addr, const SizeT size, 126 const BmAccessTypeT access_type) 127{ 128 DataRaceErrInfo drei; 129 130 drei.tid = DRD_(thread_get_running_tid)(); 131 drei.addr = addr; 132 drei.size = size; 133 drei.access_type = access_type; 134 VG_(maybe_record_error)(VG_(get_running_tid)(), 135 DataRaceErr, 136 VG_(get_IP)(VG_(get_running_tid)()), 137 "Conflicting access", 138 &drei); 139 140 if (s_first_race_only) 141 { 142 DRD_(start_suppression)(addr, addr + size, "first race only"); 143 } 144} 145 146VG_REGPARM(2) void DRD_(trace_load)(Addr addr, SizeT size) 147{ 148#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 149 /* The assert below has been commented out because of performance reasons.*/ 150 tl_assert(DRD_(thread_get_running_tid)() 151 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 152#endif 153 154 if (DRD_(running_thread_is_recording_loads)() 155 && (s_check_stack_accesses 156 || ! DRD_(thread_address_on_stack)(addr)) 157 && bm_access_load_triggers_conflict(addr, addr + size) 158 && ! DRD_(is_suppressed)(addr, addr + size)) 159 { 160 drd_report_race(addr, size, eLoad); 161 } 162} 163 164static VG_REGPARM(1) void drd_trace_load_1(Addr addr) 165{ 166 if (DRD_(running_thread_is_recording_loads)() 167 && (s_check_stack_accesses 168 || ! DRD_(thread_address_on_stack)(addr)) 169 && bm_access_load_1_triggers_conflict(addr) 170 && ! DRD_(is_suppressed)(addr, addr + 1)) 171 { 172 drd_report_race(addr, 1, eLoad); 173 } 174} 175 176static VG_REGPARM(1) void drd_trace_load_2(Addr addr) 177{ 178 if (DRD_(running_thread_is_recording_loads)() 179 && (s_check_stack_accesses 180 || ! DRD_(thread_address_on_stack)(addr)) 181 && bm_access_load_2_triggers_conflict(addr) 182 && ! DRD_(is_suppressed)(addr, addr + 2)) 183 { 184 drd_report_race(addr, 2, eLoad); 185 } 186} 187 188static VG_REGPARM(1) void drd_trace_load_4(Addr addr) 189{ 190 if (DRD_(running_thread_is_recording_loads)() 191 && (s_check_stack_accesses 192 || ! DRD_(thread_address_on_stack)(addr)) 193 && bm_access_load_4_triggers_conflict(addr) 194 && ! DRD_(is_suppressed)(addr, addr + 4)) 195 { 196 drd_report_race(addr, 4, eLoad); 197 } 198} 199 200static VG_REGPARM(1) void drd_trace_load_8(Addr addr) 201{ 202 if (DRD_(running_thread_is_recording_loads)() 203 && (s_check_stack_accesses 204 || ! DRD_(thread_address_on_stack)(addr)) 205 && bm_access_load_8_triggers_conflict(addr) 206 && ! DRD_(is_suppressed)(addr, addr + 8)) 207 { 208 drd_report_race(addr, 8, eLoad); 209 } 210} 211 212VG_REGPARM(2) void DRD_(trace_store)(Addr addr, SizeT size) 213{ 214#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 215 /* The assert below has been commented out because of performance reasons.*/ 216 tl_assert(DRD_(thread_get_running_tid)() 217 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 218#endif 219 220 if (DRD_(running_thread_is_recording_stores)() 221 && (s_check_stack_accesses 222 || ! DRD_(thread_address_on_stack)(addr)) 223 && bm_access_store_triggers_conflict(addr, addr + size) 224 && ! DRD_(is_suppressed)(addr, addr + size)) 225 { 226 drd_report_race(addr, size, eStore); 227 } 228} 229 230static VG_REGPARM(1) void drd_trace_store_1(Addr addr) 231{ 232 if (DRD_(running_thread_is_recording_stores)() 233 && (s_check_stack_accesses 234 || ! DRD_(thread_address_on_stack)(addr)) 235 && bm_access_store_1_triggers_conflict(addr) 236 && ! DRD_(is_suppressed)(addr, addr + 1)) 237 { 238 drd_report_race(addr, 1, eStore); 239 } 240} 241 242static VG_REGPARM(1) void drd_trace_store_2(Addr addr) 243{ 244 if (DRD_(running_thread_is_recording_stores)() 245 && (s_check_stack_accesses 246 || ! DRD_(thread_address_on_stack)(addr)) 247 && bm_access_store_2_triggers_conflict(addr) 248 && ! DRD_(is_suppressed)(addr, addr + 2)) 249 { 250 drd_report_race(addr, 2, eStore); 251 } 252} 253 254static VG_REGPARM(1) void drd_trace_store_4(Addr addr) 255{ 256 if (DRD_(running_thread_is_recording_stores)() 257 && (s_check_stack_accesses 258 || !DRD_(thread_address_on_stack)(addr)) 259 && bm_access_store_4_triggers_conflict(addr) 260 && !DRD_(is_suppressed)(addr, addr + 4)) 261 { 262 drd_report_race(addr, 4, eStore); 263 } 264} 265 266static VG_REGPARM(1) void drd_trace_store_8(Addr addr) 267{ 268 if (DRD_(running_thread_is_recording_stores)() 269 && (s_check_stack_accesses 270 || ! DRD_(thread_address_on_stack)(addr)) 271 && bm_access_store_8_triggers_conflict(addr) 272 && ! DRD_(is_suppressed)(addr, addr + 8)) 273 { 274 drd_report_race(addr, 8, eStore); 275 } 276} 277 278/** 279 * Return true if and only if addr_expr matches the pattern (SP) or 280 * <offset>(SP). 281 */ 282static Bool is_stack_access(IRSB* const bb, IRExpr* const addr_expr) 283{ 284 Bool result = False; 285 286 if (addr_expr->tag == Iex_RdTmp) 287 { 288 int i; 289 for (i = 0; i < bb->stmts_size; i++) 290 { 291 if (bb->stmts[i] 292 && bb->stmts[i]->tag == Ist_WrTmp 293 && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp) 294 { 295 IRExpr* e = bb->stmts[i]->Ist.WrTmp.data; 296 if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET) 297 { 298 result = True; 299 } 300 301 //ppIRExpr(e); 302 //VG_(printf)(" (%s)\n", result ? "True" : "False"); 303 break; 304 } 305 } 306 } 307 return result; 308} 309 310static const IROp u_widen_irop[5][9] = { 311 [Ity_I1 - Ity_I1][4] = Iop_1Uto32, 312 [Ity_I1 - Ity_I1][8] = Iop_1Uto64, 313 [Ity_I8 - Ity_I1][4] = Iop_8Uto32, 314 [Ity_I8 - Ity_I1][8] = Iop_8Uto64, 315 [Ity_I16 - Ity_I1][4] = Iop_16Uto32, 316 [Ity_I16 - Ity_I1][8] = Iop_16Uto64, 317 [Ity_I32 - Ity_I1][8] = Iop_32Uto64, 318}; 319 320/** 321 * Instrument the client code to trace a memory load (--trace-addr). 322 */ 323static void instr_trace_mem_load(IRSB* const bb, IRExpr* const addr_expr, 324 const HWord size) 325{ 326 addStmtToIRSB(bb, 327 IRStmt_Dirty( 328 unsafeIRDirty_0_N(/*regparms*/2, 329 "drd_trace_mem_load", 330 VG_(fnptr_to_fnentry) 331 (drd_trace_mem_load), 332 mkIRExprVec_2(addr_expr, mkIRExpr_HWord(size))))); 333} 334 335/** 336 * Instrument the client code to trace a memory store (--trace-addr). 337 */ 338static void instr_trace_mem_store(IRSB* const bb, IRExpr* const addr_expr, 339 IRExpr* const data_expr) 340{ 341 IRType ty_data_expr; 342 IRExpr *hword_data_expr; 343 HWord size; 344 345 tl_assert(sizeof(HWord) == 4 || sizeof(HWord) == 8); 346 347 ty_data_expr = typeOfIRExpr(bb->tyenv, data_expr); 348 size = sizeofIRType(ty_data_expr); 349 350 if (size == sizeof(HWord) 351 && (ty_data_expr == Ity_I32 || ty_data_expr == Ity_I64)) 352 { 353 /* No conversion necessary */ 354 hword_data_expr = data_expr; 355 } else { 356 IROp widen_op; 357 358 if (Ity_I1 <= ty_data_expr 359 && ty_data_expr 360 < Ity_I1 + sizeof(u_widen_irop)/sizeof(u_widen_irop[0])) 361 { 362 widen_op = u_widen_irop[ty_data_expr - Ity_I1][sizeof(HWord)]; 363 if (!widen_op) 364 widen_op = Iop_INVALID; 365 } else { 366 widen_op = Iop_INVALID; 367 } 368 if (widen_op != Iop_INVALID) { 369 IRTemp tmp; 370 371 /* Widen the integer expression to a HWord */ 372 tmp = newIRTemp(bb->tyenv, sizeof(HWord) == 4 ? Ity_I32 : Ity_I64); 373 addStmtToIRSB(bb, 374 IRStmt_WrTmp(tmp, IRExpr_Unop(widen_op, data_expr))); 375 hword_data_expr = IRExpr_RdTmp(tmp); 376 } else { 377 /* 378 * Replace anything wider than a HWord and also Ity_F32, Ity_F64, 379 * Ity_F128 and Ity_V128 by zero. 380 */ 381 hword_data_expr = mkIRExpr_HWord(0); 382 } 383 } 384 addStmtToIRSB(bb, 385 IRStmt_Dirty( 386 unsafeIRDirty_0_N(/*regparms*/3, 387 "drd_trace_mem_store", 388 VG_(fnptr_to_fnentry) 389 (drd_trace_mem_store), 390 mkIRExprVec_3(addr_expr, mkIRExpr_HWord(size), 391 hword_data_expr)))); 392} 393 394static void instrument_load(IRSB* const bb, IRExpr* const addr_expr, 395 const HWord size) 396{ 397 IRExpr* size_expr; 398 IRExpr** argv; 399 IRDirty* di; 400 401 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr)) 402 return; 403 404 switch (size) 405 { 406 case 1: 407 argv = mkIRExprVec_1(addr_expr); 408 di = unsafeIRDirty_0_N(/*regparms*/1, 409 "drd_trace_load_1", 410 VG_(fnptr_to_fnentry)(drd_trace_load_1), 411 argv); 412 break; 413 case 2: 414 argv = mkIRExprVec_1(addr_expr); 415 di = unsafeIRDirty_0_N(/*regparms*/1, 416 "drd_trace_load_2", 417 VG_(fnptr_to_fnentry)(drd_trace_load_2), 418 argv); 419 break; 420 case 4: 421 argv = mkIRExprVec_1(addr_expr); 422 di = unsafeIRDirty_0_N(/*regparms*/1, 423 "drd_trace_load_4", 424 VG_(fnptr_to_fnentry)(drd_trace_load_4), 425 argv); 426 break; 427 case 8: 428 argv = mkIRExprVec_1(addr_expr); 429 di = unsafeIRDirty_0_N(/*regparms*/1, 430 "drd_trace_load_8", 431 VG_(fnptr_to_fnentry)(drd_trace_load_8), 432 argv); 433 break; 434 default: 435 size_expr = mkIRExpr_HWord(size); 436 argv = mkIRExprVec_2(addr_expr, size_expr); 437 di = unsafeIRDirty_0_N(/*regparms*/2, 438 "drd_trace_load", 439 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 440 argv); 441 break; 442 } 443 addStmtToIRSB(bb, IRStmt_Dirty(di)); 444} 445 446static void instrument_store(IRSB* const bb, IRExpr* const addr_expr, 447 IRExpr* const data_expr) 448{ 449 IRExpr* size_expr; 450 IRExpr** argv; 451 IRDirty* di; 452 HWord size; 453 454 size = sizeofIRType(typeOfIRExpr(bb->tyenv, data_expr)); 455 456 if (UNLIKELY(DRD_(any_address_is_traced)())) 457 instr_trace_mem_store(bb, addr_expr, data_expr); 458 459 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr)) 460 return; 461 462 switch (size) 463 { 464 case 1: 465 argv = mkIRExprVec_1(addr_expr); 466 di = unsafeIRDirty_0_N(/*regparms*/1, 467 "drd_trace_store_1", 468 VG_(fnptr_to_fnentry)(drd_trace_store_1), 469 argv); 470 break; 471 case 2: 472 argv = mkIRExprVec_1(addr_expr); 473 di = unsafeIRDirty_0_N(/*regparms*/1, 474 "drd_trace_store_2", 475 VG_(fnptr_to_fnentry)(drd_trace_store_2), 476 argv); 477 break; 478 case 4: 479 argv = mkIRExprVec_1(addr_expr); 480 di = unsafeIRDirty_0_N(/*regparms*/1, 481 "drd_trace_store_4", 482 VG_(fnptr_to_fnentry)(drd_trace_store_4), 483 argv); 484 break; 485 case 8: 486 argv = mkIRExprVec_1(addr_expr); 487 di = unsafeIRDirty_0_N(/*regparms*/1, 488 "drd_trace_store_8", 489 VG_(fnptr_to_fnentry)(drd_trace_store_8), 490 argv); 491 break; 492 default: 493 size_expr = mkIRExpr_HWord(size); 494 argv = mkIRExprVec_2(addr_expr, size_expr); 495 di = unsafeIRDirty_0_N(/*regparms*/2, 496 "drd_trace_store", 497 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 498 argv); 499 break; 500 } 501 addStmtToIRSB(bb, IRStmt_Dirty(di)); 502} 503 504IRSB* DRD_(instrument)(VgCallbackClosure* const closure, 505 IRSB* const bb_in, 506 VexGuestLayout* const layout, 507 VexGuestExtents* const vge, 508 IRType const gWordTy, 509 IRType const hWordTy) 510{ 511 IRDirty* di; 512 Int i; 513 IRSB* bb; 514 IRExpr** argv; 515 Bool instrument = True; 516 517 /* Set up BB */ 518 bb = emptyIRSB(); 519 bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv); 520 bb->next = deepCopyIRExpr(bb_in->next); 521 bb->jumpkind = bb_in->jumpkind; 522 523 for (i = 0; i < bb_in->stmts_used; i++) 524 { 525 IRStmt* const st = bb_in->stmts[i]; 526 tl_assert(st); 527 tl_assert(isFlatIRStmt(st)); 528 529 switch (st->tag) 530 { 531 /* Note: the code for not instrumenting the code in .plt */ 532 /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */ 533 /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */ 534 /* This is because on this platform dynamic library symbols are */ 535 /* relocated in another way than by later binutils versions. The */ 536 /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */ 537 case Ist_IMark: 538 instrument = VG_(DebugInfo_sect_kind)(NULL, 0, st->Ist.IMark.addr) 539 != Vg_SectPLT; 540 addStmtToIRSB(bb, st); 541 break; 542 543 case Ist_MBE: 544 switch (st->Ist.MBE.event) 545 { 546 case Imbe_Fence: 547 break; /* not interesting */ 548 default: 549 tl_assert(0); 550 } 551 addStmtToIRSB(bb, st); 552 break; 553 554 case Ist_Store: 555 if (instrument) 556 instrument_store(bb, st->Ist.Store.addr, st->Ist.Store.data); 557 addStmtToIRSB(bb, st); 558 break; 559 560 case Ist_WrTmp: 561 if (instrument) { 562 const IRExpr* const data = st->Ist.WrTmp.data; 563 if (data->tag == Iex_Load) { 564 if (UNLIKELY(DRD_(any_address_is_traced)())) 565 instr_trace_mem_load(bb, data->Iex.Load.addr, 566 sizeofIRType(data->Iex.Load.ty)); 567 568 instrument_load(bb, data->Iex.Load.addr, 569 sizeofIRType(data->Iex.Load.ty)); 570 } 571 } 572 addStmtToIRSB(bb, st); 573 break; 574 575 case Ist_Dirty: 576 if (instrument) { 577 IRDirty* d = st->Ist.Dirty.details; 578 IREffect const mFx = d->mFx; 579 switch (mFx) { 580 case Ifx_None: 581 break; 582 case Ifx_Read: 583 case Ifx_Write: 584 case Ifx_Modify: 585 tl_assert(d->mAddr); 586 tl_assert(d->mSize > 0); 587 argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize)); 588 if (mFx == Ifx_Read || mFx == Ifx_Modify) { 589 di = unsafeIRDirty_0_N( 590 /*regparms*/2, 591 "drd_trace_load", 592 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 593 argv); 594 addStmtToIRSB(bb, IRStmt_Dirty(di)); 595 } 596 if (mFx == Ifx_Write || mFx == Ifx_Modify) 597 { 598 di = unsafeIRDirty_0_N( 599 /*regparms*/2, 600 "drd_trace_store", 601 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 602 argv); 603 addStmtToIRSB(bb, IRStmt_Dirty(di)); 604 } 605 break; 606 default: 607 tl_assert(0); 608 } 609 } 610 addStmtToIRSB(bb, st); 611 break; 612 613 case Ist_CAS: 614 if (instrument) { 615 /* 616 * Treat compare-and-swap as a read. By handling atomic 617 * instructions as read instructions no data races are reported 618 * between conflicting atomic operations nor between atomic 619 * operations and non-atomic reads. Conflicts between atomic 620 * operations and non-atomic write operations are still reported 621 * however. 622 */ 623 Int dataSize; 624 IRCAS* cas = st->Ist.CAS.details; 625 626 tl_assert(cas->addr != NULL); 627 tl_assert(cas->dataLo != NULL); 628 dataSize = sizeofIRType(typeOfIRExpr(bb->tyenv, cas->dataLo)); 629 if (cas->dataHi != NULL) 630 dataSize *= 2; /* since it's a doubleword-CAS */ 631 632 if (UNLIKELY(DRD_(any_address_is_traced)())) { 633 if (cas->dataHi) { 634 IRExpr* data_expr; 635 636 tl_assert(typeOfIRExpr(bb->tyenv, cas->dataLo) == Ity_I32); 637 data_expr 638 = IRExpr_Binop( 639 Iop_Or64, 640 IRExpr_Binop( 641 Iop_Shl64, 642 IRExpr_Unop(Iop_32Uto64, cas->dataHi), 643 mkIRExpr_HWord(32)), 644 IRExpr_Unop(Iop_32Uto64, cas->dataLo)); 645 instr_trace_mem_store(bb, cas->addr, data_expr); 646 } else { 647 instr_trace_mem_store(bb, cas->addr, cas->dataLo); 648 } 649 } 650 651 instrument_load(bb, cas->addr, dataSize); 652 } 653 addStmtToIRSB(bb, st); 654 break; 655 656 case Ist_LLSC: { 657 /* 658 * Ignore store-conditionals (except for tracing), and handle 659 * load-linked's exactly like normal loads. 660 */ 661 IRType dataTy; 662 663 if (st->Ist.LLSC.storedata == NULL) { 664 /* LL */ 665 dataTy = typeOfIRTemp(bb_in->tyenv, st->Ist.LLSC.result); 666 if (instrument) { 667 if (UNLIKELY(DRD_(any_address_is_traced)())) 668 instr_trace_mem_load(bb, st->Ist.LLSC.addr, 669 sizeofIRType(dataTy)); 670 671 instrument_load(bb, st->Ist.LLSC.addr, sizeofIRType(dataTy)); 672 } 673 } else { 674 /* SC */ 675 instr_trace_mem_store(bb, st->Ist.LLSC.addr, 676 st->Ist.LLSC.storedata); 677 } 678 addStmtToIRSB(bb, st); 679 break; 680 } 681 682 case Ist_NoOp: 683 case Ist_AbiHint: 684 case Ist_Put: 685 case Ist_PutI: 686 case Ist_Exit: 687 /* None of these can contain any memory references. */ 688 addStmtToIRSB(bb, st); 689 break; 690 691 default: 692 ppIRStmt(st); 693 tl_assert(0); 694 } 695 } 696 697 return bb; 698} 699 700