drd_load_store.c revision 7ca75edd99380d43786706330927b25cc5e55544
1/* 2 This file is part of drd, a thread error detector. 3 4 Copyright (C) 2006-2011 Bart Van Assche <bvanassche@acm.org>. 5 6 This program is free software; you can redistribute it and/or 7 modify it under the terms of the GNU General Public License as 8 published by the Free Software Foundation; either version 2 of the 9 License, or (at your option) any later version. 10 11 This program is distributed in the hope that it will be useful, but 12 WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 General Public License for more details. 15 16 You should have received a copy of the GNU General Public License 17 along with this program; if not, write to the Free Software 18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 19 02111-1307, USA. 20 21 The GNU General Public License is contained in the file COPYING. 22*/ 23 24 25#include "drd_bitmap.h" 26#include "drd_thread_bitmap.h" 27#include "drd_vc.h" /* DRD_(vc_snprint)() */ 28 29/* Include several source files here in order to allow the compiler to */ 30/* do more inlining. */ 31#include "drd_bitmap.c" 32#include "drd_load_store.h" 33#include "drd_segment.c" 34#include "drd_thread.c" 35#include "drd_vc.c" 36#include "libvex_guest_offsets.h" 37 38 39/* STACK_POINTER_OFFSET: VEX register offset for the stack pointer register. */ 40#if defined(VGA_x86) 41#define STACK_POINTER_OFFSET OFFSET_x86_ESP 42#elif defined(VGA_amd64) 43#define STACK_POINTER_OFFSET OFFSET_amd64_RSP 44#elif defined(VGA_ppc32) 45#define STACK_POINTER_OFFSET OFFSET_ppc32_GPR1 46#elif defined(VGA_ppc64) 47#define STACK_POINTER_OFFSET OFFSET_ppc64_GPR1 48#elif defined(VGA_arm) 49#define STACK_POINTER_OFFSET OFFSET_arm_R13 50#elif defined(VGA_s390x) 51#define STACK_POINTER_OFFSET OFFSET_s390x_r15 52#else 53#error Unknown architecture. 54#endif 55 56 57/* Local variables. */ 58 59static Bool s_check_stack_accesses = False; 60static Bool s_first_race_only = False; 61 62 63/* Function definitions. */ 64 65Bool DRD_(get_check_stack_accesses)() 66{ 67 return s_check_stack_accesses; 68} 69 70void DRD_(set_check_stack_accesses)(const Bool c) 71{ 72 tl_assert(c == False || c == True); 73 s_check_stack_accesses = c; 74} 75 76Bool DRD_(get_first_race_only)() 77{ 78 return s_first_race_only; 79} 80 81void DRD_(set_first_race_only)(const Bool fro) 82{ 83 tl_assert(fro == False || fro == True); 84 s_first_race_only = fro; 85} 86 87void DRD_(trace_mem_access)(const Addr addr, const SizeT size, 88 const BmAccessTypeT access_type, 89 const HWord stored_value) 90{ 91 if (DRD_(is_any_traced)(addr, addr + size)) 92 { 93 char* vc; 94 95 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(DRD_(thread_get_running_tid)())); 96 if (access_type == eStore && size <= sizeof(HWord)) { 97 DRD_(trace_msg_w_bt)("store 0x%lx size %ld val %ld/0x%lx (thread %d /" 98 " vc %s)", addr, size, stored_value, stored_value, 99 DRD_(thread_get_running_tid)(), vc); 100 } else { 101 DRD_(trace_msg_w_bt)("%s 0x%lx size %ld (thread %d / vc %s)", 102 access_type == eLoad ? "load " 103 : access_type == eStore ? "store" 104 : access_type == eStart ? "start" 105 : access_type == eEnd ? "end " : "????", 106 addr, size, DRD_(thread_get_running_tid)(), vc); 107 } 108 VG_(free)(vc); 109 tl_assert(DRD_(DrdThreadIdToVgThreadId)(DRD_(thread_get_running_tid)()) 110 == VG_(get_running_tid)()); 111 } 112} 113 114static VG_REGPARM(2) void drd_trace_mem_load(const Addr addr, const SizeT size) 115{ 116 return DRD_(trace_mem_access)(addr, size, eLoad, 0); 117} 118 119static VG_REGPARM(3) void drd_trace_mem_store(const Addr addr,const SizeT size, 120 const HWord stored_value) 121{ 122 return DRD_(trace_mem_access)(addr, size, eStore, stored_value); 123} 124 125static void drd_report_race(const Addr addr, const SizeT size, 126 const BmAccessTypeT access_type) 127{ 128 DataRaceErrInfo drei; 129 130 drei.tid = DRD_(thread_get_running_tid)(); 131 drei.addr = addr; 132 drei.size = size; 133 drei.access_type = access_type; 134 VG_(maybe_record_error)(VG_(get_running_tid)(), 135 DataRaceErr, 136 VG_(get_IP)(VG_(get_running_tid)()), 137 "Conflicting access", 138 &drei); 139 140 if (s_first_race_only) 141 { 142 DRD_(start_suppression)(addr, addr + size, "first race only"); 143 } 144} 145 146VG_REGPARM(2) void DRD_(trace_load)(Addr addr, SizeT size) 147{ 148#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 149 /* The assert below has been commented out because of performance reasons.*/ 150 tl_assert(DRD_(thread_get_running_tid)() 151 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 152#endif 153 154 if (DRD_(running_thread_is_recording_loads)() 155 && (s_check_stack_accesses 156 || ! DRD_(thread_address_on_stack)(addr)) 157 && bm_access_load_triggers_conflict(addr, addr + size) 158 && ! DRD_(is_suppressed)(addr, addr + size)) 159 { 160 drd_report_race(addr, size, eLoad); 161 } 162} 163 164static VG_REGPARM(1) void drd_trace_load_1(Addr addr) 165{ 166 if (DRD_(running_thread_is_recording_loads)() 167 && (s_check_stack_accesses 168 || ! DRD_(thread_address_on_stack)(addr)) 169 && bm_access_load_1_triggers_conflict(addr) 170 && ! DRD_(is_suppressed)(addr, addr + 1)) 171 { 172 drd_report_race(addr, 1, eLoad); 173 } 174} 175 176static VG_REGPARM(1) void drd_trace_load_2(Addr addr) 177{ 178 if (DRD_(running_thread_is_recording_loads)() 179 && (s_check_stack_accesses 180 || ! DRD_(thread_address_on_stack)(addr)) 181 && bm_access_load_2_triggers_conflict(addr) 182 && ! DRD_(is_suppressed)(addr, addr + 2)) 183 { 184 drd_report_race(addr, 2, eLoad); 185 } 186} 187 188static VG_REGPARM(1) void drd_trace_load_4(Addr addr) 189{ 190 if (DRD_(running_thread_is_recording_loads)() 191 && (s_check_stack_accesses 192 || ! DRD_(thread_address_on_stack)(addr)) 193 && bm_access_load_4_triggers_conflict(addr) 194 && ! DRD_(is_suppressed)(addr, addr + 4)) 195 { 196 drd_report_race(addr, 4, eLoad); 197 } 198} 199 200static VG_REGPARM(1) void drd_trace_load_8(Addr addr) 201{ 202 if (DRD_(running_thread_is_recording_loads)() 203 && (s_check_stack_accesses 204 || ! DRD_(thread_address_on_stack)(addr)) 205 && bm_access_load_8_triggers_conflict(addr) 206 && ! DRD_(is_suppressed)(addr, addr + 8)) 207 { 208 drd_report_race(addr, 8, eLoad); 209 } 210} 211 212VG_REGPARM(2) void DRD_(trace_store)(Addr addr, SizeT size) 213{ 214#ifdef ENABLE_DRD_CONSISTENCY_CHECKS 215 /* The assert below has been commented out because of performance reasons.*/ 216 tl_assert(DRD_(thread_get_running_tid)() 217 == DRD_(VgThreadIdToDrdThreadId)(VG_(get_running_tid()))); 218#endif 219 220 if (DRD_(running_thread_is_recording_stores)() 221 && (s_check_stack_accesses 222 || ! DRD_(thread_address_on_stack)(addr)) 223 && bm_access_store_triggers_conflict(addr, addr + size) 224 && ! DRD_(is_suppressed)(addr, addr + size)) 225 { 226 drd_report_race(addr, size, eStore); 227 } 228} 229 230static VG_REGPARM(1) void drd_trace_store_1(Addr addr) 231{ 232 if (DRD_(running_thread_is_recording_stores)() 233 && (s_check_stack_accesses 234 || ! DRD_(thread_address_on_stack)(addr)) 235 && bm_access_store_1_triggers_conflict(addr) 236 && ! DRD_(is_suppressed)(addr, addr + 1)) 237 { 238 drd_report_race(addr, 1, eStore); 239 } 240} 241 242static VG_REGPARM(1) void drd_trace_store_2(Addr addr) 243{ 244 if (DRD_(running_thread_is_recording_stores)() 245 && (s_check_stack_accesses 246 || ! DRD_(thread_address_on_stack)(addr)) 247 && bm_access_store_2_triggers_conflict(addr) 248 && ! DRD_(is_suppressed)(addr, addr + 2)) 249 { 250 drd_report_race(addr, 2, eStore); 251 } 252} 253 254static VG_REGPARM(1) void drd_trace_store_4(Addr addr) 255{ 256 if (DRD_(running_thread_is_recording_stores)() 257 && (s_check_stack_accesses 258 || !DRD_(thread_address_on_stack)(addr)) 259 && bm_access_store_4_triggers_conflict(addr) 260 && !DRD_(is_suppressed)(addr, addr + 4)) 261 { 262 drd_report_race(addr, 4, eStore); 263 } 264} 265 266static VG_REGPARM(1) void drd_trace_store_8(Addr addr) 267{ 268 if (DRD_(running_thread_is_recording_stores)() 269 && (s_check_stack_accesses 270 || ! DRD_(thread_address_on_stack)(addr)) 271 && bm_access_store_8_triggers_conflict(addr) 272 && ! DRD_(is_suppressed)(addr, addr + 8)) 273 { 274 drd_report_race(addr, 8, eStore); 275 } 276} 277 278/** 279 * Return true if and only if addr_expr matches the pattern (SP) or 280 * <offset>(SP). 281 */ 282static Bool is_stack_access(IRSB* const bb, IRExpr* const addr_expr) 283{ 284 Bool result = False; 285 286 if (addr_expr->tag == Iex_RdTmp) 287 { 288 int i; 289 for (i = 0; i < bb->stmts_size; i++) 290 { 291 if (bb->stmts[i] 292 && bb->stmts[i]->tag == Ist_WrTmp 293 && bb->stmts[i]->Ist.WrTmp.tmp == addr_expr->Iex.RdTmp.tmp) 294 { 295 IRExpr* e = bb->stmts[i]->Ist.WrTmp.data; 296 if (e->tag == Iex_Get && e->Iex.Get.offset == STACK_POINTER_OFFSET) 297 { 298 result = True; 299 } 300 301 //ppIRExpr(e); 302 //VG_(printf)(" (%s)\n", result ? "True" : "False"); 303 break; 304 } 305 } 306 } 307 return result; 308} 309 310static const IROp u_widen_irop[5][9] = { 311 [Ity_I1 - Ity_I1] = { [4] = Iop_1Uto32, [8] = Iop_1Uto64 }, 312 [Ity_I8 - Ity_I1] = { [4] = Iop_8Uto32, [8] = Iop_8Uto64 }, 313 [Ity_I16 - Ity_I1] = { [4] = Iop_16Uto32, [8] = Iop_16Uto64 }, 314 [Ity_I32 - Ity_I1] = { [8] = Iop_32Uto64 }, 315}; 316 317/** 318 * Instrument the client code to trace a memory load (--trace-addr). 319 */ 320static void instr_trace_mem_load(IRSB* const bb, IRExpr* const addr_expr, 321 const HWord size) 322{ 323 addStmtToIRSB(bb, 324 IRStmt_Dirty( 325 unsafeIRDirty_0_N(/*regparms*/2, 326 "drd_trace_mem_load", 327 VG_(fnptr_to_fnentry) 328 (drd_trace_mem_load), 329 mkIRExprVec_2(addr_expr, mkIRExpr_HWord(size))))); 330} 331 332/** 333 * Instrument the client code to trace a memory store (--trace-addr). 334 */ 335static void instr_trace_mem_store(IRSB* const bb, IRExpr* const addr_expr, 336 IRExpr* data_expr) 337{ 338 IRType ty_data_expr; 339 HWord size; 340 341 tl_assert(sizeof(HWord) == 4 || sizeof(HWord) == 8); 342 343 ty_data_expr = typeOfIRExpr(bb->tyenv, data_expr); 344 size = sizeofIRType(ty_data_expr); 345 346#if 0 347 // Test code 348 if (ty_data_expr == Ity_I32) { 349 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F32); 350 data_expr = IRExpr_Unop(Iop_ReinterpI32asF32, data_expr); 351 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr)); 352 data_expr = IRExpr_RdTmp(tmp); 353 ty_data_expr = Ity_F32; 354 } else if (ty_data_expr == Ity_I64) { 355 IRTemp tmp = newIRTemp(bb->tyenv, Ity_F64); 356 data_expr = IRExpr_Unop(Iop_ReinterpI64asF64, data_expr); 357 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, data_expr)); 358 data_expr = IRExpr_RdTmp(tmp); 359 ty_data_expr = Ity_F64; 360 } 361#endif 362 363 if (ty_data_expr == Ity_F32) { 364 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I32); 365 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF32asI32, 366 data_expr))); 367 data_expr = IRExpr_RdTmp(tmp); 368 ty_data_expr = Ity_I32; 369 } else if (ty_data_expr == Ity_F64) { 370 IRTemp tmp = newIRTemp(bb->tyenv, Ity_I64); 371 addStmtToIRSB(bb, IRStmt_WrTmp(tmp, IRExpr_Unop(Iop_ReinterpF64asI64, 372 data_expr))); 373 data_expr = IRExpr_RdTmp(tmp); 374 ty_data_expr = Ity_I64; 375 } 376 377 if (size == sizeof(HWord) 378 && (ty_data_expr == Ity_I32 || ty_data_expr == Ity_I64)) 379 { 380 /* No conversion necessary */ 381 } else { 382 IROp widen_op; 383 384 if (Ity_I1 <= ty_data_expr 385 && ty_data_expr 386 < Ity_I1 + sizeof(u_widen_irop)/sizeof(u_widen_irop[0])) 387 { 388 widen_op = u_widen_irop[ty_data_expr - Ity_I1][sizeof(HWord)]; 389 if (!widen_op) 390 widen_op = Iop_INVALID; 391 } else { 392 widen_op = Iop_INVALID; 393 } 394 if (widen_op != Iop_INVALID) { 395 IRTemp tmp; 396 397 /* Widen the integer expression to a HWord */ 398 tmp = newIRTemp(bb->tyenv, sizeof(HWord) == 4 ? Ity_I32 : Ity_I64); 399 addStmtToIRSB(bb, 400 IRStmt_WrTmp(tmp, IRExpr_Unop(widen_op, data_expr))); 401 data_expr = IRExpr_RdTmp(tmp); 402 } else { 403 /* Replace anything wider than a HWord with zero. */ 404 data_expr = mkIRExpr_HWord(0); 405 } 406 } 407 addStmtToIRSB(bb, 408 IRStmt_Dirty( 409 unsafeIRDirty_0_N(/*regparms*/3, 410 "drd_trace_mem_store", 411 VG_(fnptr_to_fnentry) 412 (drd_trace_mem_store), 413 mkIRExprVec_3(addr_expr, mkIRExpr_HWord(size), 414 data_expr)))); 415} 416 417static void instrument_load(IRSB* const bb, IRExpr* const addr_expr, 418 const HWord size) 419{ 420 IRExpr* size_expr; 421 IRExpr** argv; 422 IRDirty* di; 423 424 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr)) 425 return; 426 427 switch (size) 428 { 429 case 1: 430 argv = mkIRExprVec_1(addr_expr); 431 di = unsafeIRDirty_0_N(/*regparms*/1, 432 "drd_trace_load_1", 433 VG_(fnptr_to_fnentry)(drd_trace_load_1), 434 argv); 435 break; 436 case 2: 437 argv = mkIRExprVec_1(addr_expr); 438 di = unsafeIRDirty_0_N(/*regparms*/1, 439 "drd_trace_load_2", 440 VG_(fnptr_to_fnentry)(drd_trace_load_2), 441 argv); 442 break; 443 case 4: 444 argv = mkIRExprVec_1(addr_expr); 445 di = unsafeIRDirty_0_N(/*regparms*/1, 446 "drd_trace_load_4", 447 VG_(fnptr_to_fnentry)(drd_trace_load_4), 448 argv); 449 break; 450 case 8: 451 argv = mkIRExprVec_1(addr_expr); 452 di = unsafeIRDirty_0_N(/*regparms*/1, 453 "drd_trace_load_8", 454 VG_(fnptr_to_fnentry)(drd_trace_load_8), 455 argv); 456 break; 457 default: 458 size_expr = mkIRExpr_HWord(size); 459 argv = mkIRExprVec_2(addr_expr, size_expr); 460 di = unsafeIRDirty_0_N(/*regparms*/2, 461 "drd_trace_load", 462 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 463 argv); 464 break; 465 } 466 addStmtToIRSB(bb, IRStmt_Dirty(di)); 467} 468 469static void instrument_store(IRSB* const bb, IRExpr* const addr_expr, 470 IRExpr* const data_expr) 471{ 472 IRExpr* size_expr; 473 IRExpr** argv; 474 IRDirty* di; 475 HWord size; 476 477 size = sizeofIRType(typeOfIRExpr(bb->tyenv, data_expr)); 478 479 if (UNLIKELY(DRD_(any_address_is_traced)())) 480 instr_trace_mem_store(bb, addr_expr, data_expr); 481 482 if (!s_check_stack_accesses && is_stack_access(bb, addr_expr)) 483 return; 484 485 switch (size) 486 { 487 case 1: 488 argv = mkIRExprVec_1(addr_expr); 489 di = unsafeIRDirty_0_N(/*regparms*/1, 490 "drd_trace_store_1", 491 VG_(fnptr_to_fnentry)(drd_trace_store_1), 492 argv); 493 break; 494 case 2: 495 argv = mkIRExprVec_1(addr_expr); 496 di = unsafeIRDirty_0_N(/*regparms*/1, 497 "drd_trace_store_2", 498 VG_(fnptr_to_fnentry)(drd_trace_store_2), 499 argv); 500 break; 501 case 4: 502 argv = mkIRExprVec_1(addr_expr); 503 di = unsafeIRDirty_0_N(/*regparms*/1, 504 "drd_trace_store_4", 505 VG_(fnptr_to_fnentry)(drd_trace_store_4), 506 argv); 507 break; 508 case 8: 509 argv = mkIRExprVec_1(addr_expr); 510 di = unsafeIRDirty_0_N(/*regparms*/1, 511 "drd_trace_store_8", 512 VG_(fnptr_to_fnentry)(drd_trace_store_8), 513 argv); 514 break; 515 default: 516 size_expr = mkIRExpr_HWord(size); 517 argv = mkIRExprVec_2(addr_expr, size_expr); 518 di = unsafeIRDirty_0_N(/*regparms*/2, 519 "drd_trace_store", 520 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 521 argv); 522 break; 523 } 524 addStmtToIRSB(bb, IRStmt_Dirty(di)); 525} 526 527IRSB* DRD_(instrument)(VgCallbackClosure* const closure, 528 IRSB* const bb_in, 529 VexGuestLayout* const layout, 530 VexGuestExtents* const vge, 531 IRType const gWordTy, 532 IRType const hWordTy) 533{ 534 IRDirty* di; 535 Int i; 536 IRSB* bb; 537 IRExpr** argv; 538 Bool instrument = True; 539 540 /* Set up BB */ 541 bb = emptyIRSB(); 542 bb->tyenv = deepCopyIRTypeEnv(bb_in->tyenv); 543 bb->next = deepCopyIRExpr(bb_in->next); 544 bb->jumpkind = bb_in->jumpkind; 545 546 for (i = 0; i < bb_in->stmts_used; i++) 547 { 548 IRStmt* const st = bb_in->stmts[i]; 549 tl_assert(st); 550 tl_assert(isFlatIRStmt(st)); 551 552 switch (st->tag) 553 { 554 /* Note: the code for not instrumenting the code in .plt */ 555 /* sections is only necessary on CentOS 3.0 x86 (kernel 2.4.21 */ 556 /* + glibc 2.3.2 + NPTL 0.60 + binutils 2.14.90.0.4). */ 557 /* This is because on this platform dynamic library symbols are */ 558 /* relocated in another way than by later binutils versions. The */ 559 /* linker e.g. does not generate .got.plt sections on CentOS 3.0. */ 560 case Ist_IMark: 561 instrument = VG_(DebugInfo_sect_kind)(NULL, 0, st->Ist.IMark.addr) 562 != Vg_SectPLT; 563 addStmtToIRSB(bb, st); 564 break; 565 566 case Ist_MBE: 567 switch (st->Ist.MBE.event) 568 { 569 case Imbe_Fence: 570 break; /* not interesting */ 571 default: 572 tl_assert(0); 573 } 574 addStmtToIRSB(bb, st); 575 break; 576 577 case Ist_Store: 578 if (instrument) 579 instrument_store(bb, st->Ist.Store.addr, st->Ist.Store.data); 580 addStmtToIRSB(bb, st); 581 break; 582 583 case Ist_WrTmp: 584 if (instrument) { 585 const IRExpr* const data = st->Ist.WrTmp.data; 586 if (data->tag == Iex_Load) { 587 if (UNLIKELY(DRD_(any_address_is_traced)())) 588 instr_trace_mem_load(bb, data->Iex.Load.addr, 589 sizeofIRType(data->Iex.Load.ty)); 590 591 instrument_load(bb, data->Iex.Load.addr, 592 sizeofIRType(data->Iex.Load.ty)); 593 } 594 } 595 addStmtToIRSB(bb, st); 596 break; 597 598 case Ist_Dirty: 599 if (instrument) { 600 IRDirty* d = st->Ist.Dirty.details; 601 IREffect const mFx = d->mFx; 602 switch (mFx) { 603 case Ifx_None: 604 break; 605 case Ifx_Read: 606 case Ifx_Write: 607 case Ifx_Modify: 608 tl_assert(d->mAddr); 609 tl_assert(d->mSize > 0); 610 argv = mkIRExprVec_2(d->mAddr, mkIRExpr_HWord(d->mSize)); 611 if (mFx == Ifx_Read || mFx == Ifx_Modify) { 612 di = unsafeIRDirty_0_N( 613 /*regparms*/2, 614 "drd_trace_load", 615 VG_(fnptr_to_fnentry)(DRD_(trace_load)), 616 argv); 617 addStmtToIRSB(bb, IRStmt_Dirty(di)); 618 } 619 if (mFx == Ifx_Write || mFx == Ifx_Modify) 620 { 621 di = unsafeIRDirty_0_N( 622 /*regparms*/2, 623 "drd_trace_store", 624 VG_(fnptr_to_fnentry)(DRD_(trace_store)), 625 argv); 626 addStmtToIRSB(bb, IRStmt_Dirty(di)); 627 } 628 break; 629 default: 630 tl_assert(0); 631 } 632 } 633 addStmtToIRSB(bb, st); 634 break; 635 636 case Ist_CAS: 637 if (instrument) { 638 /* 639 * Treat compare-and-swap as a read. By handling atomic 640 * instructions as read instructions no data races are reported 641 * between conflicting atomic operations nor between atomic 642 * operations and non-atomic reads. Conflicts between atomic 643 * operations and non-atomic write operations are still reported 644 * however. 645 */ 646 Int dataSize; 647 IRCAS* cas = st->Ist.CAS.details; 648 649 tl_assert(cas->addr != NULL); 650 tl_assert(cas->dataLo != NULL); 651 dataSize = sizeofIRType(typeOfIRExpr(bb->tyenv, cas->dataLo)); 652 if (cas->dataHi != NULL) 653 dataSize *= 2; /* since it's a doubleword-CAS */ 654 655 if (UNLIKELY(DRD_(any_address_is_traced)())) { 656 if (cas->dataHi) { 657 tl_assert(typeOfIRExpr(bb->tyenv, cas->dataLo) == Ity_I32); 658 tl_assert(dataSize == 8); 659 instr_trace_mem_store(bb, cas->addr, 660 IRExpr_Const(IRConst_U64(0))); 661 } else { 662 instr_trace_mem_store(bb, cas->addr, cas->dataLo); 663 } 664 } 665 666 instrument_load(bb, cas->addr, dataSize); 667 } 668 addStmtToIRSB(bb, st); 669 break; 670 671 case Ist_LLSC: { 672 /* 673 * Ignore store-conditionals (except for tracing), and handle 674 * load-linked's exactly like normal loads. 675 */ 676 IRType dataTy; 677 678 if (st->Ist.LLSC.storedata == NULL) { 679 /* LL */ 680 dataTy = typeOfIRTemp(bb_in->tyenv, st->Ist.LLSC.result); 681 if (instrument) { 682 if (UNLIKELY(DRD_(any_address_is_traced)())) 683 instr_trace_mem_load(bb, st->Ist.LLSC.addr, 684 sizeofIRType(dataTy)); 685 686 instrument_load(bb, st->Ist.LLSC.addr, sizeofIRType(dataTy)); 687 } 688 } else { 689 /* SC */ 690 instr_trace_mem_store(bb, st->Ist.LLSC.addr, 691 st->Ist.LLSC.storedata); 692 } 693 addStmtToIRSB(bb, st); 694 break; 695 } 696 697 case Ist_NoOp: 698 case Ist_AbiHint: 699 case Ist_Put: 700 case Ist_PutI: 701 case Ist_Exit: 702 /* None of these can contain any memory references. */ 703 addStmtToIRSB(bb, st); 704 break; 705 706 default: 707 ppIRStmt(st); 708 tl_assert(0); 709 } 710 } 711 712 return bb; 713} 714 715