mc_translate.c revision 0e1b514ab8e837f75a207a037ea53a6a721e9d28
1 2/*--------------------------------------------------------------------*/ 3/*--- Instrument UCode to perform memory checking operations. ---*/ 4/*--- mc_translate.c ---*/ 5/*--------------------------------------------------------------------*/ 6 7/* 8 This file is part of MemCheck, a heavyweight Valgrind skin for 9 detecting memory errors. 10 11 Copyright (C) 2000-2003 Julian Seward 12 jseward@acm.org 13 14 This program is free software; you can redistribute it and/or 15 modify it under the terms of the GNU General Public License as 16 published by the Free Software Foundation; either version 2 of the 17 License, or (at your option) any later version. 18 19 This program is distributed in the hope that it will be useful, but 20 WITHOUT ANY WARRANTY; without even the implied warranty of 21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 General Public License for more details. 23 24 You should have received a copy of the GNU General Public License 25 along with this program; if not, write to the Free Software 26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 27 02111-1307, USA. 28 29 The GNU General Public License is contained in the file COPYING. 30*/ 31 32#include "mc_include.h" 33 34/* --------------------------------------------------------------------- 35 Template functions for extending UCode 36 ------------------------------------------------------------------ */ 37 38/* Compare this with the restrictions on core instructions in 39 vg_translate.c:VG_(saneUInstr)(). Everything general said there applies 40 here too. 41*/ 42Bool SK_(sane_XUInstr)(Bool beforeRA, Bool beforeLiveness, UInstr* u) 43{ 44// SSS: duplicating these macros really sucks 45# define LIT0 (u->lit32 == 0) 46# define LIT1 (!(LIT0)) 47# define LITm (u->tag1 == Literal ? True : LIT0 ) 48# define SZ0 (u->size == 0) 49# define SZi (u->size == 4 || u->size == 2 || u->size == 1) 50# define SZj (u->size == 4 || u->size == 2 || u->size == 1 || u->size == 0) 51# define CC0 (u->flags_r == FlagsEmpty && u->flags_w == FlagsEmpty) 52# define TR1 (beforeRA ? (u->tag1 == TempReg) : (u->tag1 == RealReg)) 53# define TR2 (beforeRA ? (u->tag2 == TempReg) : (u->tag2 == RealReg)) 54# define A1 (u->tag1 == ArchReg) 55# define A2 (u->tag2 == ArchReg) 56# define L1 (u->tag1 == Literal && u->val1 == 0) 57# define Ls1 (u->tag1 == Lit16) 58# define Ls3 (u->tag3 == Lit16) 59# define TRL1 (TR1 || L1) 60# define TRA1 (TR1 || A1) 61# define N2 (u->tag2 == NoValue) 62# define N3 (u->tag3 == NoValue) 63# define COND0 (u->cond == 0) 64# define EXTRA4b0 (u->extra4b == 0) 65# define SG_WD0 (u->signed_widen == 0) 66# define JMPKIND0 (u->jmpkind == 0) 67# define CCALL0 (u->argc==0 && u->regparms_n==0 && u->has_ret_val==0 && \ 68 ( beforeLiveness \ 69 ? u->regs_live_after == ALL_RREGS_LIVE \ 70 : True )) 71# define XOTHER (COND0 && EXTRA4b0 && SG_WD0 && JMPKIND0 && CCALL0) 72 73 Int n_lits = 0; 74 if (u->tag1 == Literal) n_lits++; 75 if (u->tag2 == Literal) n_lits++; 76 if (u->tag3 == Literal) n_lits++; 77 if (n_lits > 1) 78 return False; 79 80 /* Fields not checked: val1, val2, val3 */ 81 82 switch (u->opcode) { 83 84 /* Fields checked: lit32 size flags_r/w tag1 tag2 tag3 (rest) */ 85 case LOADV: return LIT0 && SZi && CC0 && TR1 && TR2 && N3 && XOTHER; 86 case STOREV: return LITm && SZi && CC0 && TRL1 && TR2 && N3 && XOTHER; 87 case GETV: return LIT0 && SZi && CC0 && A1 && TR2 && N3 && XOTHER; 88 case PUTV: return LITm && SZi && CC0 && TRL1 && A2 && N3 && XOTHER; 89 case GETVF: 90 case PUTVF: return LIT0 && SZ0 && CC0 && TR1 && N2 && N3 && XOTHER; 91 case TESTV: 92 case SETV: return LIT0 && SZj && CC0 && TRA1 && N2 && N3 && XOTHER; 93 case TAG1: return LIT0 && SZ0 && CC0 && TR1 && N2 && Ls3 && XOTHER; 94 case TAG2: return LIT0 && SZ0 && CC0 && TR1 && TR2 && Ls3 && XOTHER; 95 default: 96 VG_(printf)("unhandled opcode: %u\n", u->opcode); 97 VG_(skin_panic)("SK_(sane_XUInstr): unhandled opcode"); 98 } 99# undef LIT0 100# undef LIT1 101# undef LITm 102# undef SZ0 103# undef SZi 104# undef SZj 105# undef CC0 106# undef TR1 107# undef TR2 108# undef A1 109# undef A2 110# undef L1 111# undef Ls1 112# undef Ls3 113# undef TRL1 114# undef TRA1 115# undef N2 116# undef N3 117# undef COND0 118# undef EXTRA4b0 119# undef JMPKIND0 120# undef CCALL0 121# undef XOTHER 122} 123 124static Char* nameOfTagOp ( TagOp h ) 125{ 126 switch (h) { 127 case Tag_PCast40: return "PCast40"; 128 case Tag_PCast20: return "PCast20"; 129 case Tag_PCast10: return "PCast10"; 130 case Tag_PCast01: return "PCast01"; 131 case Tag_PCast02: return "PCast02"; 132 case Tag_PCast04: return "PCast04"; 133 case Tag_PCast14: return "PCast14"; 134 case Tag_PCast12: return "PCast12"; 135 case Tag_PCast11: return "PCast11"; 136 case Tag_Left4: return "Left4"; 137 case Tag_Left2: return "Left2"; 138 case Tag_Left1: return "Left1"; 139 case Tag_SWiden14: return "SWiden14"; 140 case Tag_SWiden24: return "SWiden24"; 141 case Tag_SWiden12: return "SWiden12"; 142 case Tag_ZWiden14: return "ZWiden14"; 143 case Tag_ZWiden24: return "ZWiden24"; 144 case Tag_ZWiden12: return "ZWiden12"; 145 case Tag_UifU4: return "UifU4"; 146 case Tag_UifU2: return "UifU2"; 147 case Tag_UifU1: return "UifU1"; 148 case Tag_UifU0: return "UifU0"; 149 case Tag_DifD4: return "DifD4"; 150 case Tag_DifD2: return "DifD2"; 151 case Tag_DifD1: return "DifD1"; 152 case Tag_ImproveAND4_TQ: return "ImproveAND4_TQ"; 153 case Tag_ImproveAND2_TQ: return "ImproveAND2_TQ"; 154 case Tag_ImproveAND1_TQ: return "ImproveAND1_TQ"; 155 case Tag_ImproveOR4_TQ: return "ImproveOR4_TQ"; 156 case Tag_ImproveOR2_TQ: return "ImproveOR2_TQ"; 157 case Tag_ImproveOR1_TQ: return "ImproveOR1_TQ"; 158 case Tag_DebugFn: return "DebugFn"; 159 default: VG_(skin_panic)("vg_nameOfTagOp"); 160 } 161} 162 163 164Char* SK_(name_XUOpcode)(Opcode opc) 165{ 166 switch (opc) { 167 case GETVF: return "GETVF"; 168 case PUTVF: return "PUTVF"; 169 case TAG1: return "TAG1"; 170 case TAG2: return "TAG2"; 171 case LOADV: return "LOADV"; 172 case STOREV: return "STOREV"; 173 case GETV: return "GETV"; 174 case PUTV: return "PUTV"; 175 case TESTV: return "TESTV"; 176 case SETV: return "SETV"; 177 default: 178 VG_(printf)("unhandled opcode: %u\n", opc); 179 VG_(skin_panic)("SK_(name_XUOpcode): unhandled case"); 180 } 181} 182 183/* --------------------------------------------------------------------- 184 Debugging stuff. 185 ------------------------------------------------------------------ */ 186 187void SK_(pp_XUInstr)(UInstr* u) 188{ 189 switch (u->opcode) { 190 191 case TAG1: 192 VG_(printf)("\t"); 193 VG_(pp_UOperand)(u, 1, 4, False); 194 VG_(printf)(" = %s ( ", nameOfTagOp( u->val3 )); 195 VG_(pp_UOperand)(u, 1, 4, False); 196 VG_(printf)(" )"); 197 break; 198 199 case TAG2: 200 VG_(printf)("\t"); 201 VG_(pp_UOperand)(u, 2, 4, False); 202 VG_(printf)(" = %s ( ", nameOfTagOp( u->val3 )); 203 VG_(pp_UOperand)(u, 1, 4, False); 204 VG_(printf)(", "); 205 VG_(pp_UOperand)(u, 2, 4, False); 206 VG_(printf)(" )"); 207 break; 208 209 case STOREV: case LOADV: 210 VG_(printf)("\t"); 211 VG_(pp_UOperand)(u, 1, u->size, u->opcode==LOADV); 212 VG_(printf)(", "); 213 VG_(pp_UOperand)(u, 2, u->size, u->opcode==STOREV); 214 break; 215 216 case PUTVF: case GETVF: 217 VG_(printf)("\t"); 218 VG_(pp_UOperand)(u, 1, 0, False); 219 break; 220 221 case GETV: case PUTV: 222 VG_(printf)("\t"); 223 VG_(pp_UOperand)(u, 1, u->opcode==PUTV ? 4 : u->size, False); 224 VG_(printf)(", "); 225 VG_(pp_UOperand)(u, 2, u->opcode==GETV ? 4 : u->size, False); 226 break; 227 228 case TESTV: case SETV: 229 VG_(printf)("\t"); 230 VG_(pp_UOperand)(u, 1, u->size, False); 231 break; 232 233 default: 234 VG_(printf)("unhandled opcode: %u\n", u->opcode); 235 VG_(skin_panic)("SK_(pp_XUInstr): unhandled opcode"); 236 } 237 238} 239 240Int SK_(get_Xreg_usage)(UInstr* u, Tag tag, Int* regs, Bool* isWrites) 241{ 242# define RD(ono) VG_UINSTR_READS_REG(ono, regs, isWrites) 243# define WR(ono) VG_UINSTR_WRITES_REG(ono, regs, isWrites) 244 245 Int n = 0; 246 switch (u->opcode) { 247 case TAG1: RD(1); WR(1); break; 248 case TAG2: RD(1); RD(2); WR(2); break; 249 case LOADV: RD(1); WR(2); break; 250 case STOREV: RD(1); RD(2); break; 251 case GETV: WR(2); break; 252 case PUTV: RD(1); break; 253 case TESTV: RD(1); break; 254 case SETV: WR(1); break; 255 case PUTVF: RD(1); break; 256 case GETVF: WR(1); break; 257 258 default: 259 VG_(printf)("unhandled opcode: %u\n", u->opcode); 260 VG_(skin_panic)("SK_(get_Xreg_usage): unhandled opcode"); 261 } 262 return n; 263 264# undef RD 265# undef WR 266} 267 268/*------------------------------------------------------------*/ 269/*--- New instrumentation machinery. ---*/ 270/*------------------------------------------------------------*/ 271 272static 273TagOp get_Tag_ImproveOR_TQ ( Int sz ) 274{ 275 switch (sz) { 276 case 4: return Tag_ImproveOR4_TQ; 277 case 2: return Tag_ImproveOR2_TQ; 278 case 1: return Tag_ImproveOR1_TQ; 279 default: VG_(skin_panic)("get_Tag_ImproveOR_TQ"); 280 } 281} 282 283 284static 285TagOp get_Tag_ImproveAND_TQ ( Int sz ) 286{ 287 switch (sz) { 288 case 4: return Tag_ImproveAND4_TQ; 289 case 2: return Tag_ImproveAND2_TQ; 290 case 1: return Tag_ImproveAND1_TQ; 291 default: VG_(skin_panic)("get_Tag_ImproveAND_TQ"); 292 } 293} 294 295 296static 297TagOp get_Tag_Left ( Int sz ) 298{ 299 switch (sz) { 300 case 4: return Tag_Left4; 301 case 2: return Tag_Left2; 302 case 1: return Tag_Left1; 303 default: VG_(skin_panic)("get_Tag_Left"); 304 } 305} 306 307 308static 309TagOp get_Tag_UifU ( Int sz ) 310{ 311 switch (sz) { 312 case 4: return Tag_UifU4; 313 case 2: return Tag_UifU2; 314 case 1: return Tag_UifU1; 315 case 0: return Tag_UifU0; 316 default: VG_(skin_panic)("get_Tag_UifU"); 317 } 318} 319 320 321static 322TagOp get_Tag_DifD ( Int sz ) 323{ 324 switch (sz) { 325 case 4: return Tag_DifD4; 326 case 2: return Tag_DifD2; 327 case 1: return Tag_DifD1; 328 default: VG_(skin_panic)("get_Tag_DifD"); 329 } 330} 331 332 333static 334TagOp get_Tag_PCast ( Int szs, Int szd ) 335{ 336 if (szs == 4 && szd == 0) return Tag_PCast40; 337 if (szs == 2 && szd == 0) return Tag_PCast20; 338 if (szs == 1 && szd == 0) return Tag_PCast10; 339 if (szs == 0 && szd == 1) return Tag_PCast01; 340 if (szs == 0 && szd == 2) return Tag_PCast02; 341 if (szs == 0 && szd == 4) return Tag_PCast04; 342 if (szs == 1 && szd == 4) return Tag_PCast14; 343 if (szs == 1 && szd == 2) return Tag_PCast12; 344 if (szs == 1 && szd == 1) return Tag_PCast11; 345 VG_(printf)("get_Tag_PCast(%d,%d)\n", szs, szd); 346 VG_(skin_panic)("get_Tag_PCast"); 347} 348 349 350static 351TagOp get_Tag_Widen ( Bool syned, Int szs, Int szd ) 352{ 353 if (szs == 1 && szd == 2 && syned) return Tag_SWiden12; 354 if (szs == 1 && szd == 2 && !syned) return Tag_ZWiden12; 355 356 if (szs == 1 && szd == 4 && syned) return Tag_SWiden14; 357 if (szs == 1 && szd == 4 && !syned) return Tag_ZWiden14; 358 359 if (szs == 2 && szd == 4 && syned) return Tag_SWiden24; 360 if (szs == 2 && szd == 4 && !syned) return Tag_ZWiden24; 361 362 VG_(printf)("get_Tag_Widen(%d,%d,%d)\n", (Int)syned, szs, szd); 363 VG_(skin_panic)("get_Tag_Widen"); 364} 365 366/* Pessimally cast the spec'd shadow from one size to another. */ 367static 368void create_PCast ( UCodeBlock* cb, Int szs, Int szd, Int tempreg ) 369{ 370 if (szs == 0 && szd == 0) 371 return; 372 uInstr3(cb, TAG1, 0, TempReg, tempreg, 373 NoValue, 0, 374 Lit16, get_Tag_PCast(szs,szd)); 375} 376 377 378/* Create a signed or unsigned widen of the spec'd shadow from one 379 size to another. The only allowed size transitions are 1->2, 1->4 380 and 2->4. */ 381static 382void create_Widen ( UCodeBlock* cb, Bool signed_widen, 383 Int szs, Int szd, Int tempreg ) 384{ 385 if (szs == szd) return; 386 uInstr3(cb, TAG1, 0, TempReg, tempreg, 387 NoValue, 0, 388 Lit16, get_Tag_Widen(signed_widen,szs,szd)); 389} 390 391 392/* Get the condition codes into a new shadow, at the given size. */ 393static 394Int create_GETVF ( UCodeBlock* cb, Int sz ) 395{ 396 Int tt = newShadow(cb); 397 uInstr1(cb, GETVF, 0, TempReg, tt); 398 create_PCast(cb, 0, sz, tt); 399 return tt; 400} 401 402 403/* Save the condition codes from the spec'd shadow. */ 404static 405void create_PUTVF ( UCodeBlock* cb, Int sz, Int tempreg ) 406{ 407 if (sz == 0) { 408 uInstr1(cb, PUTVF, 0, TempReg, tempreg); 409 } else { 410 Int tt = newShadow(cb); 411 uInstr2(cb, MOV, 4, TempReg, tempreg, TempReg, tt); 412 create_PCast(cb, sz, 0, tt); 413 uInstr1(cb, PUTVF, 0, TempReg, tt); 414 } 415} 416 417 418/* Do Left on the spec'd shadow. */ 419static 420void create_Left ( UCodeBlock* cb, Int sz, Int tempreg ) 421{ 422 uInstr3(cb, TAG1, 0, 423 TempReg, tempreg, 424 NoValue, 0, 425 Lit16, get_Tag_Left(sz)); 426} 427 428 429/* Do UifU on ts and td, putting the result in td. */ 430static 431void create_UifU ( UCodeBlock* cb, Int sz, Int ts, Int td ) 432{ 433 uInstr3(cb, TAG2, 0, TempReg, ts, TempReg, td, 434 Lit16, get_Tag_UifU(sz)); 435} 436 437 438/* Do DifD on ts and td, putting the result in td. */ 439static 440void create_DifD ( UCodeBlock* cb, Int sz, Int ts, Int td ) 441{ 442 uInstr3(cb, TAG2, 0, TempReg, ts, TempReg, td, 443 Lit16, get_Tag_DifD(sz)); 444} 445 446 447/* Do HelpAND on value tval and tag tqqq, putting the result in 448 tqqq. */ 449static 450void create_ImproveAND_TQ ( UCodeBlock* cb, Int sz, Int tval, Int tqqq ) 451{ 452 uInstr3(cb, TAG2, 0, TempReg, tval, TempReg, tqqq, 453 Lit16, get_Tag_ImproveAND_TQ(sz)); 454} 455 456 457/* Do HelpOR on value tval and tag tqqq, putting the result in 458 tqqq. */ 459static 460void create_ImproveOR_TQ ( UCodeBlock* cb, Int sz, Int tval, Int tqqq ) 461{ 462 uInstr3(cb, TAG2, 0, TempReg, tval, TempReg, tqqq, 463 Lit16, get_Tag_ImproveOR_TQ(sz)); 464} 465 466 467/* Get the shadow for an operand described by (tag, val). Emit code 468 to do this and return the identity of the shadow holding the 469 result. The result tag is always copied into a new shadow, so it 470 can be modified without trashing the original.*/ 471static 472Int /* TempReg */ getOperandShadow ( UCodeBlock* cb, 473 Int sz, Int tag, Int val ) 474{ 475 Int sh; 476 sh = newShadow(cb); 477 if (tag == TempReg) { 478 uInstr2(cb, MOV, 4, TempReg, SHADOW(val), TempReg, sh); 479 return sh; 480 } 481 if (tag == Literal) { 482 uInstr1(cb, SETV, sz, TempReg, sh); 483 return sh; 484 } 485 if (tag == ArchReg) { 486 uInstr2(cb, GETV, sz, ArchReg, val, TempReg, sh); 487 return sh; 488 } 489 VG_(skin_panic)("getOperandShadow"); 490} 491 492/* Create and return an instrumented version of cb_in. Free cb_in 493 before returning. */ 494static UCodeBlock* memcheck_instrument ( UCodeBlock* cb_in ) 495{ 496 UCodeBlock* cb; 497 Int i, j; 498 UInstr* u_in; 499 Int qs, qd, qt, qtt; 500 Bool bogusLiterals; 501 502 cb = VG_(setup_UCodeBlock)(cb_in); 503 504 /* Scan the block to look for bogus literals. These are magic 505 numbers which particularly appear in hand-optimised / inlined 506 implementations of strlen() et al which cause so much trouble 507 (spurious reports of uninit-var uses). Purpose of this horrible 508 hack is to disable some checks any such literals are present in 509 this basic block. */ 510 bogusLiterals = False; 511 512 if (MC_(clo_avoid_strlen_errors)) { 513 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) { 514 u_in = VG_(get_instr)(cb_in, i); 515 switch (u_in->opcode) { 516 case ADD: case SUB: case MOV: 517 if (u_in->size == 4 && u_in->tag1 == Literal) 518 goto literal; 519 break; 520 case LEA1: 521 sk_assert(u_in->size == 4); 522 goto literal; 523 default: 524 break; 525 } 526 continue; 527 literal: 528 if (u_in->lit32 == 0xFEFEFEFF || 529 u_in->lit32 == 0x80808080 || 530 u_in->lit32 == 0x00008080) { 531 bogusLiterals = True; 532 break; 533 } 534 } 535 } 536 537 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) { 538 u_in = VG_(get_instr)(cb_in, i); 539 qs = qd = qt = qtt = INVALID_TEMPREG; 540 541 switch (u_in->opcode) { 542 543 case LOCK: 544 case NOP: 545 break; 546 547 case INCEIP: 548 VG_(copy_UInstr)(cb, u_in); 549 break; 550 551 /* The segment registers do not have their definedness 552 tracked. We therefore make fake shadows on GETSEG and 553 test them on PUTSEG. This will catch writing garbage to a 554 segment register; therefore we can assume it to be defined 555 when read (GETSEGd). Since the first arg of USESEG is 556 fetched by GETSEG, we can assume it to be defined, and so 557 the definedness of the result is simply the definedness of 558 the second (virtual_address) arg of USESEG. The upshot of 559 all this is that instrumentation of USESEG is a no-op! */ 560 561 case PUTSEG: 562 sk_assert(u_in->tag1 == TempReg); 563 uInstr1(cb, TESTV, 2, TempReg, SHADOW(u_in->val1)); 564 uInstr1(cb, SETV, 2, TempReg, SHADOW(u_in->val1)); 565 VG_(copy_UInstr)(cb, u_in); 566 break; 567 568 case GETSEG: 569 sk_assert(u_in->tag2 == TempReg); 570 uInstr1(cb, SETV, 2, TempReg, SHADOW(u_in->val2)); 571 VG_(copy_UInstr)(cb, u_in); 572 break; 573 574 case USESEG: 575 VG_(copy_UInstr)(cb, u_in); 576 break; 577 578 /* Loads and stores. Test the V bits for the address. 24 579 Mar 02: since the address is A-checked anyway, there's not 580 really much point in doing the V-check too, unless you 581 think that you might use addresses which are undefined but 582 still addressible. Hence the optionalisation of the V 583 check. 15 Dec 02: optionalisation removed, since it no 584 longer makes much sense given we also have an addrcheck 585 skin. 586 587 The LOADV/STOREV does an addressibility check for the 588 address. */ 589 590 case LOAD: 591 uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val1)); 592 uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val1)); 593 uInstr2(cb, LOADV, u_in->size, 594 TempReg, u_in->val1, 595 TempReg, SHADOW(u_in->val2)); 596 VG_(copy_UInstr)(cb, u_in); 597 break; 598 599 case STORE: 600 uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val2)); 601 uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val2)); 602 uInstr2(cb, STOREV, u_in->size, 603 TempReg, SHADOW(u_in->val1), 604 TempReg, u_in->val2); 605 VG_(copy_UInstr)(cb, u_in); 606 break; 607 608 /* Moving stuff around. Make the V bits follow accordingly, 609 but don't do anything else. */ 610 611 case GET: 612 uInstr2(cb, GETV, u_in->size, 613 ArchReg, u_in->val1, 614 TempReg, SHADOW(u_in->val2)); 615 VG_(copy_UInstr)(cb, u_in); 616 break; 617 618 case PUT: 619 uInstr2(cb, PUTV, u_in->size, 620 TempReg, SHADOW(u_in->val1), 621 ArchReg, u_in->val2); 622 VG_(copy_UInstr)(cb, u_in); 623 break; 624 625 case GETF: 626 /* This is not the smartest way to do it, but should work. */ 627 qd = create_GETVF(cb, u_in->size); 628 uInstr2(cb, MOV, 4, TempReg, qd, TempReg, SHADOW(u_in->val1)); 629 VG_(copy_UInstr)(cb, u_in); 630 break; 631 632 case PUTF: 633 create_PUTVF(cb, u_in->size, SHADOW(u_in->val1)); 634 VG_(copy_UInstr)(cb, u_in); 635 break; 636 637 case MOV: 638 switch (u_in->tag1) { 639 case TempReg: 640 uInstr2(cb, MOV, 4, 641 TempReg, SHADOW(u_in->val1), 642 TempReg, SHADOW(u_in->val2)); 643 break; 644 case Literal: 645 uInstr1(cb, SETV, u_in->size, 646 TempReg, SHADOW(u_in->val2)); 647 break; 648 default: 649 VG_(skin_panic)("memcheck_instrument: MOV"); 650 } 651 VG_(copy_UInstr)(cb, u_in); 652 break; 653 654 /* Special case of add, where one of the operands is a literal. 655 lea1(t) = t + some literal. 656 Therefore: lea1#(qa) = left(qa) 657 */ 658 case LEA1: 659 sk_assert(u_in->size == 4 && !VG_(any_flag_use)(u_in)); 660 qs = SHADOW(u_in->val1); 661 qd = SHADOW(u_in->val2); 662 uInstr2(cb, MOV, 4, TempReg, qs, TempReg, qd); 663 create_Left(cb, u_in->size, qd); 664 VG_(copy_UInstr)(cb, u_in); 665 break; 666 667 /* Another form of add. 668 lea2(ts,tt,shift) = ts + (tt << shift); shift is a literal 669 and is 0,1,2 or 3. 670 lea2#(qs,qt) = left(qs `UifU` (qt << shift)). 671 Note, subtly, that the shift puts zeroes at the bottom of qt, 672 meaning Valid, since the corresponding shift of tt puts 673 zeroes at the bottom of tb. 674 */ 675 case LEA2: { 676 Int shift; 677 sk_assert(u_in->size == 4 && !VG_(any_flag_use)(u_in)); 678 switch (u_in->extra4b) { 679 case 1: shift = 0; break; 680 case 2: shift = 1; break; 681 case 4: shift = 2; break; 682 case 8: shift = 3; break; 683 default: VG_(skin_panic)( "memcheck_instrument(LEA2)" ); 684 } 685 qs = SHADOW(u_in->val1); 686 qt = SHADOW(u_in->val2); 687 qd = SHADOW(u_in->val3); 688 uInstr2(cb, MOV, 4, TempReg, qt, TempReg, qd); 689 if (shift > 0) { 690 uInstr2(cb, SHL, 4, Literal, 0, TempReg, qd); 691 uLiteral(cb, shift); 692 } 693 create_UifU(cb, 4, qs, qd); 694 create_Left(cb, u_in->size, qd); 695 VG_(copy_UInstr)(cb, u_in); 696 break; 697 } 698 699 /* inc#/dec#(qd) = q `UifU` left(qd) = left(qd) */ 700 case INC: case DEC: 701 qd = SHADOW(u_in->val1); 702 create_Left(cb, u_in->size, qd); 703 if (u_in->flags_w != FlagsEmpty) 704 create_PUTVF(cb, u_in->size, qd); 705 VG_(copy_UInstr)(cb, u_in); 706 break; 707 708 /* This is a HACK (approximation :-) */ 709 /* rcl#/rcr#(qs,qd) 710 = let q0 = pcast-sz-0(qd) `UifU` pcast-sz-0(qs) `UifU` eflags# 711 eflags# = q0 712 qd =pcast-0-sz(q0) 713 Ie, cast everything down to a single bit, then back up. 714 This assumes that any bad bits infect the whole word and 715 the eflags. 716 */ 717 case RCL: case RCR: 718 sk_assert(u_in->flags_r != FlagsEmpty); 719 /* The following assertion looks like it makes sense, but is 720 actually wrong. Consider this: 721 rcll %eax 722 imull %eax, %eax 723 The rcll writes O and C but so does the imull, so the O and C 724 write of the rcll is annulled by the prior improvement pass. 725 Noticed by Kevin Ryde <user42@zip.com.au> 726 */ 727 /* sk_assert(u_in->flags_w != FlagsEmpty); */ 728 qs = getOperandShadow(cb, u_in->size, u_in->tag1, u_in->val1); 729 /* We can safely modify qs; cast it to 0-size. */ 730 create_PCast(cb, u_in->size, 0, qs); 731 qd = SHADOW(u_in->val2); 732 create_PCast(cb, u_in->size, 0, qd); 733 /* qs is cast-to-0(shift count#), and qd is cast-to-0(value#). */ 734 create_UifU(cb, 0, qs, qd); 735 /* qs is now free; reuse it for the flag definedness. */ 736 qs = create_GETVF(cb, 0); 737 create_UifU(cb, 0, qs, qd); 738 create_PUTVF(cb, 0, qd); 739 create_PCast(cb, 0, u_in->size, qd); 740 VG_(copy_UInstr)(cb, u_in); 741 break; 742 743 /* for OP in shl shr sar rol ror 744 (qs is shift count#, qd is value to be OP#d) 745 OP(ts,td) 746 OP#(qs,qd) 747 = pcast-1-sz(qs) `UifU` OP(ts,qd) 748 So we apply OP to the tag bits too, and then UifU with 749 the shift count# to take account of the possibility of it 750 being undefined. 751 752 A bit subtle: 753 ROL/ROR rearrange the tag bits as per the value bits. 754 SHL/SHR shifts zeroes into the value, and corresponding 755 zeroes indicating Definedness into the tag. 756 SAR copies the top bit of the value downwards, and therefore 757 SAR also copies the definedness of the top bit too. 758 So in all five cases, we just apply the same op to the tag 759 bits as is applied to the value bits. Neat! 760 */ 761 case SHL: 762 case SHR: case SAR: 763 case ROL: case ROR: { 764 Int t_amount = INVALID_TEMPREG; 765 sk_assert(u_in->tag1 == TempReg || u_in->tag1 == Literal); 766 sk_assert(u_in->tag2 == TempReg); 767 qd = SHADOW(u_in->val2); 768 769 /* Make qs hold shift-count# and make 770 t_amount be a TempReg holding the shift count. */ 771 if (u_in->tag1 == Literal) { 772 t_amount = newTemp(cb); 773 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_amount); 774 uLiteral(cb, u_in->lit32); 775 qs = SHADOW(t_amount); 776 uInstr1(cb, SETV, 1, TempReg, qs); 777 } else { 778 t_amount = u_in->val1; 779 qs = SHADOW(u_in->val1); 780 } 781 782 uInstr2(cb, u_in->opcode, 783 u_in->size, 784 TempReg, t_amount, 785 TempReg, qd); 786 qt = newShadow(cb); 787 uInstr2(cb, MOV, 4, TempReg, qs, TempReg, qt); 788 create_PCast(cb, 1, u_in->size, qt); 789 create_UifU(cb, u_in->size, qt, qd); 790 VG_(copy_UInstr)(cb, u_in); 791 break; 792 } 793 794 /* One simple tag operation. */ 795 case WIDEN: 796 sk_assert(u_in->tag1 == TempReg); 797 create_Widen(cb, u_in->signed_widen, u_in->extra4b, u_in->size, 798 SHADOW(u_in->val1)); 799 VG_(copy_UInstr)(cb, u_in); 800 break; 801 802 /* not#(x) = x (since bitwise independent) */ 803 case NOT: 804 sk_assert(u_in->tag1 == TempReg); 805 VG_(copy_UInstr)(cb, u_in); 806 break; 807 808 /* neg#(x) = left(x) (derivable from case for SUB) */ 809 case NEG: 810 sk_assert(u_in->tag1 == TempReg); 811 create_Left(cb, u_in->size, SHADOW(u_in->val1)); 812 VG_(copy_UInstr)(cb, u_in); 813 break; 814 815 /* bswap#(x) = bswap(x) */ 816 case BSWAP: 817 sk_assert(u_in->tag1 == TempReg); 818 sk_assert(u_in->size == 4); 819 qd = SHADOW(u_in->val1); 820 uInstr1(cb, BSWAP, 4, TempReg, qd); 821 VG_(copy_UInstr)(cb, u_in); 822 break; 823 824 /* cc2val#(qd) = pcast-0-to-size(eflags#) */ 825 case CC2VAL: 826 sk_assert(u_in->tag1 == TempReg); 827 sk_assert(u_in->flags_r != FlagsEmpty); 828 qt = create_GETVF(cb, u_in->size); 829 uInstr2(cb, MOV, 4, TempReg, qt, TempReg, SHADOW(u_in->val1)); 830 VG_(copy_UInstr)(cb, u_in); 831 break; 832 833 /* cmov#(qs,qd) = cmov(qs,qd) 834 That is, do the cmov of tags using the same flags as for 835 the data (obviously). However, first do a test on the 836 validity of the flags. 837 */ 838 case CMOV: 839 sk_assert(u_in->size == 4); 840 sk_assert(u_in->tag1 == TempReg); 841 sk_assert(u_in->tag2 == TempReg); 842 sk_assert(u_in->flags_r != FlagsEmpty); 843 sk_assert(u_in->flags_w == FlagsEmpty); 844 qs = SHADOW(u_in->val1); 845 qd = SHADOW(u_in->val2); 846 qt = create_GETVF(cb, 0); 847 uInstr1(cb, TESTV, 0, TempReg, qt); 848 /* qt should never be referred to again. Nevertheless 849 ... */ 850 uInstr1(cb, SETV, 0, TempReg, qt); 851 852 uInstr2(cb, CMOV, 4, TempReg, qs, TempReg, qd); 853 uCond(cb, u_in->cond); 854 uFlagsRWU(cb, u_in->flags_r, u_in->flags_w, FlagsEmpty); 855 856 VG_(copy_UInstr)(cb, u_in); 857 break; 858 859 /* add#/sub#(qs,qd) 860 = qs `UifU` qd `UifU` left(qs) `UifU` left(qd) 861 = left(qs) `UifU` left(qd) 862 = left(qs `UifU` qd) 863 adc#/sbb#(qs,qd) 864 = left(qs `UifU` qd) `UifU` pcast(eflags#) 865 Second arg (dest) is TempReg. 866 First arg (src) is Literal or TempReg or ArchReg. 867 */ 868 case ADD: case SUB: 869 case ADC: case SBB: 870 qd = SHADOW(u_in->val2); 871 qs = getOperandShadow(cb, u_in->size, u_in->tag1, u_in->val1); 872 create_UifU(cb, u_in->size, qs, qd); 873 create_Left(cb, u_in->size, qd); 874 if (u_in->opcode == ADC || u_in->opcode == SBB) { 875 sk_assert(u_in->flags_r != FlagsEmpty); 876 qt = create_GETVF(cb, u_in->size); 877 create_UifU(cb, u_in->size, qt, qd); 878 } 879 if (u_in->flags_w != FlagsEmpty) { 880 create_PUTVF(cb, u_in->size, qd); 881 } 882 VG_(copy_UInstr)(cb, u_in); 883 break; 884 885 /* xor#(qs,qd) = qs `UifU` qd */ 886 case XOR: 887 qd = SHADOW(u_in->val2); 888 qs = getOperandShadow(cb, u_in->size, u_in->tag1, u_in->val1); 889 create_UifU(cb, u_in->size, qs, qd); 890 if (u_in->flags_w != FlagsEmpty) { 891 create_PUTVF(cb, u_in->size, qd); 892 } 893 VG_(copy_UInstr)(cb, u_in); 894 break; 895 896 /* and#/or#(qs,qd) 897 = (qs `UifU` qd) `DifD` improve(vs,qs) 898 `DifD` improve(vd,qd) 899 where improve is the relevant one of 900 Improve{AND,OR}_TQ 901 Use the following steps, with qt as a temp: 902 qt = improve(vd,qd) 903 qd = qs `UifU` qd 904 qd = qt `DifD` qd 905 qt = improve(vs,qs) 906 qd = qt `DifD` qd 907 */ 908 case AND: case OR: 909 sk_assert(u_in->tag1 == TempReg); 910 sk_assert(u_in->tag2 == TempReg); 911 qd = SHADOW(u_in->val2); 912 qs = SHADOW(u_in->val1); 913 qt = newShadow(cb); 914 915 /* qt = improve(vd,qd) */ 916 uInstr2(cb, MOV, 4, TempReg, qd, TempReg, qt); 917 if (u_in->opcode == AND) 918 create_ImproveAND_TQ(cb, u_in->size, u_in->val2, qt); 919 else 920 create_ImproveOR_TQ(cb, u_in->size, u_in->val2, qt); 921 /* qd = qs `UifU` qd */ 922 create_UifU(cb, u_in->size, qs, qd); 923 /* qd = qt `DifD` qd */ 924 create_DifD(cb, u_in->size, qt, qd); 925 /* qt = improve(vs,qs) */ 926 uInstr2(cb, MOV, 4, TempReg, qs, TempReg, qt); 927 if (u_in->opcode == AND) 928 create_ImproveAND_TQ(cb, u_in->size, u_in->val1, qt); 929 else 930 create_ImproveOR_TQ(cb, u_in->size, u_in->val1, qt); 931 /* qd = qt `DifD` qd */ 932 create_DifD(cb, u_in->size, qt, qd); 933 /* So, finally qd is the result tag. */ 934 if (u_in->flags_w != FlagsEmpty) { 935 create_PUTVF(cb, u_in->size, qd); 936 } 937 VG_(copy_UInstr)(cb, u_in); 938 break; 939 940 /* Machinery to do with supporting CALLM. Copy the start and 941 end markers only to make the result easier to read 942 (debug); they generate no code and have no effect. 943 */ 944 case CALLM_S: case CALLM_E: 945 VG_(copy_UInstr)(cb, u_in); 946 break; 947 948 /* Copy PUSH and POP verbatim. Arg/result absval 949 calculations are done when the associated CALL is 950 processed. CLEAR has no effect on absval calculations but 951 needs to be copied. 952 */ 953 case PUSH: case POP: case CLEAR: 954 VG_(copy_UInstr)(cb, u_in); 955 break; 956 957 /* In short: 958 callm#(a1# ... an#) = (a1# `UifU` ... `UifU` an#) 959 We have to decide on a size to do the computation at, 960 although the choice doesn't affect correctness. We will 961 do a pcast to the final size anyway, so the only important 962 factor is to choose a size which minimises the total 963 number of casts needed. Valgrind: just use size 0, 964 regardless. It may not be very good for performance 965 but does simplify matters, mainly by reducing the number 966 of different pessimising casts which have to be implemented. 967 */ 968 case CALLM: { 969 UInstr* uu; 970 Bool res_used; 971 972 /* Now generate the code. Get the final result absval 973 into qt. */ 974 qt = newShadow(cb); 975 qtt = newShadow(cb); 976 uInstr1(cb, SETV, 0, TempReg, qt); 977 for (j = i-1; VG_(get_instr)(cb_in, j)->opcode != CALLM_S; j--) { 978 uu = VG_(get_instr)(cb_in, j); 979 if (uu->opcode != PUSH) continue; 980 /* cast via a temporary */ 981 uInstr2(cb, MOV, 4, TempReg, SHADOW(uu->val1), 982 TempReg, qtt); 983 create_PCast(cb, uu->size, 0, qtt); 984 create_UifU(cb, 0, qtt, qt); 985 } 986 /* Remembering also that flags read count as inputs. */ 987 if (u_in->flags_r != FlagsEmpty) { 988 qtt = create_GETVF(cb, 0); 989 create_UifU(cb, 0, qtt, qt); 990 } 991 992 /* qt now holds the result tag. If any results from the 993 call are used, either by fetching with POP or 994 implicitly by writing the flags, we copy the result 995 absval to the relevant location. If not used, the call 996 must have been for its side effects, so we test qt here 997 and now. Note that this assumes that all values 998 removed by POP continue to be live. So dead args 999 *must* be removed with CLEAR, not by POPping them into 1000 a dummy tempreg. 1001 */ 1002 res_used = False; 1003 for (j = i+1; VG_(get_instr)(cb_in, j)->opcode != CALLM_E; j++) { 1004 uu = VG_(get_instr)(cb_in, j); 1005 if (uu->opcode != POP) continue; 1006 /* Cast via a temp. */ 1007 uInstr2(cb, MOV, 4, TempReg, qt, TempReg, qtt); 1008 create_PCast(cb, 0, uu->size, qtt); 1009 uInstr2(cb, MOV, 4, TempReg, qtt, 1010 TempReg, SHADOW(uu->val1)); 1011 res_used = True; 1012 } 1013 if (u_in->flags_w != FlagsEmpty) { 1014 create_PUTVF(cb, 0, qt); 1015 res_used = True; 1016 } 1017 if (!res_used) { 1018 uInstr1(cb, TESTV, 0, TempReg, qt); 1019 /* qt should never be referred to again. Nevertheless 1020 ... */ 1021 uInstr1(cb, SETV, 0, TempReg, qt); 1022 } 1023 VG_(copy_UInstr)(cb, u_in); 1024 break; 1025 } 1026 /* Whew ... */ 1027 1028 case JMP: 1029 if (u_in->tag1 == TempReg) { 1030 uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val1)); 1031 uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val1)); 1032 } else { 1033 sk_assert(u_in->tag1 == Literal); 1034 } 1035 if (u_in->cond != CondAlways) { 1036 sk_assert(u_in->flags_r != FlagsEmpty); 1037 qt = create_GETVF(cb, 0); 1038 if (/* HACK */ bogusLiterals) { 1039 if (0) 1040 VG_(printf)("ignore TESTV due to bogus literal\n"); 1041 } else { 1042 uInstr1(cb, TESTV, 0, TempReg, qt); 1043 } 1044 /* qt should never be referred to again. Nevertheless 1045 ... */ 1046 uInstr1(cb, SETV, 0, TempReg, qt); 1047 } 1048 VG_(copy_UInstr)(cb, u_in); 1049 break; 1050 1051 case JIFZ: 1052 uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val1)); 1053 uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val1)); 1054 VG_(copy_UInstr)(cb, u_in); 1055 break; 1056 1057 /* Emit a check on the address used. The value loaded into the 1058 FPU is checked by the call to fpu_{read/write}_check(). */ 1059 case MMX2_MemRd: case MMX2_MemWr: 1060 case FPU_R: case FPU_W: { 1061 Int t_size = INVALID_TEMPREG; 1062 1063 if (u_in->opcode == MMX2_MemRd || u_in->opcode == MMX2_MemWr) 1064 sk_assert(u_in->size == 4 || u_in->size == 8); 1065 1066 sk_assert(u_in->tag2 == TempReg); 1067 uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val2)); 1068 uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val2)); 1069 1070 t_size = newTemp(cb); 1071 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size); 1072 uLiteral(cb, u_in->size); 1073 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size); 1074 uCCall(cb, 1075 u_in->opcode==FPU_R ? (Addr) & MC_(fpu_read_check) 1076 : (Addr) & MC_(fpu_write_check), 1077 2, 2, False); 1078 1079 VG_(copy_UInstr)(cb, u_in); 1080 break; 1081 } 1082 1083 /* For FPU insns not referencing memory, just copy thru. */ 1084 case MMX1: case MMX2: case MMX3: 1085 case FPU: 1086 VG_(copy_UInstr)(cb, u_in); 1087 break; 1088 1089 /* Since we don't track definedness of values inside the 1090 MMX state, we'd better check that the (int) reg being 1091 read here is defined. */ 1092 case MMX2_RegRd: 1093 sk_assert(u_in->tag2 == TempReg); 1094 sk_assert(u_in->size == 4); 1095 uInstr1(cb, TESTV, 4, TempReg, SHADOW(u_in->val2)); 1096 uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val2)); 1097 VG_(copy_UInstr)(cb, u_in); 1098 break; 1099 1100 /* The MMX register is assumed to be fully defined, so 1101 that's what this register becomes. */ 1102 case MMX2_RegWr: 1103 sk_assert(u_in->tag2 == TempReg); 1104 sk_assert(u_in->size == 4); 1105 uInstr1(cb, SETV, 4, TempReg, SHADOW(u_in->val2)); 1106 VG_(copy_UInstr)(cb, u_in); 1107 break; 1108 1109 default: 1110 VG_(pp_UInstr)(0, u_in); 1111 VG_(skin_panic)( "memcheck_instrument: unhandled case"); 1112 1113 } /* end of switch (u_in->opcode) */ 1114 1115 } /* end of for loop */ 1116 1117 VG_(free_UCodeBlock)(cb_in); 1118 return cb; 1119} 1120 1121/*------------------------------------------------------------*/ 1122/*--- Clean up mem check instrumentation. ---*/ 1123/*------------------------------------------------------------*/ 1124 1125#define dis VG_(print_codegen) 1126 1127 1128#define VGC_IS_SHADOW(tempreg) ((tempreg % 2) == 1) 1129#define VGC_UNDEF ((UChar)100) 1130#define VGC_VALUE ((UChar)101) 1131 1132#define NOP_no_msg(uu) \ 1133 do { VG_(new_NOP)(uu); } while (False) 1134 1135#define NOP_tag1_op(uu) \ 1136 do { VG_(new_NOP)(uu); \ 1137 if (dis) \ 1138 VG_(printf)(" at %2d: delete %s due to defd arg\n", \ 1139 i, nameOfTagOp(u->val3)); \ 1140 } while (False) 1141 1142#define SETV_tag1_op(uu,newsz) \ 1143 do { uu->opcode = SETV; \ 1144 uu->size = newsz; \ 1145 uu->tag2 = uu->tag3 = NoValue; \ 1146 if (dis) \ 1147 VG_(printf)(" at %2d: convert %s to SETV%d " \ 1148 "due to defd arg\n", \ 1149 i, nameOfTagOp(u->val3), newsz); \ 1150 } while (False) 1151 1152 1153 1154/* Run backwards and delete SETVs on shadow temps for which the next 1155 action is a write. Needs an env saying whether or not the next 1156 action is a write. The supplied UCodeBlock is destructively 1157 modified. 1158*/ 1159static void vg_delete_redundant_SETVs ( UCodeBlock* cb ) 1160{ 1161 Int i, j, k; 1162 Int n_temps = VG_(get_num_temps)(cb); 1163 Bool* next_is_write; 1164 UInstr* u; 1165 Int tempUse[VG_MAX_REGS_USED]; 1166 Bool isWrites[VG_MAX_REGS_USED]; 1167 1168 if (n_temps == 0) return; 1169 1170 next_is_write = VG_(malloc)(n_temps * sizeof(Bool)); 1171 1172 for (i = 0; i < n_temps; i++) next_is_write[i] = True; 1173 1174 for (i = VG_(get_num_instrs)(cb) - 1; i >= 0; i--) { 1175 u = VG_(get_instr)(cb, i); 1176 1177 /* Occasionally there will be GETVs, TAG1s and TAG2s calculating 1178 values which are never used. These first three cases get rid 1179 of them. */ 1180 1181 if (u->opcode == GETV && VGC_IS_SHADOW(u->val2) 1182 && next_is_write[u->val2]) { 1183 sk_assert(u->val2 < n_temps); 1184 VG_(new_NOP)(u); 1185 if (dis) 1186 VG_(printf)(" at %2d: delete GETV\n", i); 1187 } else 1188 1189 if (u->opcode == TAG1 && VGC_IS_SHADOW(u->val1) 1190 && next_is_write[u->val1]) { 1191 sk_assert(u->val1 < n_temps); 1192 VG_(new_NOP)(u); 1193 if (dis) 1194 VG_(printf)(" at %2d: delete TAG1\n", i); 1195 } else 1196 1197 if (u->opcode == TAG2 && VGC_IS_SHADOW(u->val2) 1198 && next_is_write[u->val2]) { 1199 sk_assert(u->val2 < n_temps); 1200 VG_(new_NOP)(u); 1201 if (dis) 1202 VG_(printf)(" at %2d: delete TAG2\n", i); 1203 } else 1204 1205 /* The bulk of the cleanup work of this function is done by 1206 the code from here downwards. */ 1207 1208 if (u->opcode == MOV && VGC_IS_SHADOW(u->val2) 1209 && next_is_write[u->val2]) { 1210 /* This MOV is pointless because the target is dead at this 1211 point. Delete it. */ 1212 VG_(new_NOP)(u); 1213 if (dis) 1214 VG_(printf)(" at %2d: delete MOV\n", i); 1215 } else 1216 1217 if (u->opcode == SETV) { 1218 if (u->tag1 == TempReg) { 1219 sk_assert(VGC_IS_SHADOW(u->val1)); 1220 if (next_is_write[u->val1]) { 1221 /* This write is pointless, so annul it. */ 1222 VG_(new_NOP)(u); 1223 if (dis) 1224 VG_(printf)(" at %2d: delete SETV\n", i); 1225 } else { 1226 /* This write has a purpose; don't annul it, but do 1227 notice that we did it. */ 1228 next_is_write[u->val1] = True; 1229 } 1230 1231 } 1232 1233 } else { 1234 /* Find out what this insn does to the temps. */ 1235 k = VG_(get_reg_usage)(u, TempReg, &tempUse[0], &isWrites[0]); 1236 sk_assert(0 <= k && k <= VG_MAX_REGS_USED); 1237 for (j = k-1; j >= 0; j--) { 1238 next_is_write[ tempUse[j] ] = isWrites[j]; 1239 } 1240 } 1241 } 1242} 1243 1244 1245/* Run forwards, propagating and using the is-completely-defined 1246 property. This removes a lot of redundant tag-munging code. 1247 Unfortunately it requires intimate knowledge of how each uinstr and 1248 tagop modifies its arguments. This duplicates knowledge of uinstr 1249 tempreg uses embodied in VG_(get_reg_usage)(), which is unfortunate. 1250 The supplied UCodeBlock* is modified in-place. 1251 1252 For each value temp, def[] should hold VGC_VALUE. 1253 1254 For each shadow temp, def[] may hold 4,2,1 or 0 iff that shadow is 1255 definitely known to be fully defined at that size. In all other 1256 circumstances a shadow's def[] entry is VGC_UNDEF, meaning possibly 1257 undefined. In cases of doubt, VGC_UNDEF is always safe. 1258*/ 1259static void vg_propagate_definedness ( UCodeBlock* cb ) 1260{ 1261 Int i, j, k, t; 1262 Int n_temps = VG_(get_num_temps)(cb); 1263 UChar* def; 1264 UInstr* u; 1265 Int tempUse[VG_MAX_REGS_USED]; 1266 Bool isWrites[VG_MAX_REGS_USED]; 1267 1268 if (n_temps == 0) return; 1269 1270 def = VG_(malloc)(n_temps * sizeof(UChar)); 1271 1272 for (i = 0; i < n_temps; i++) 1273 def[i] = VGC_IS_SHADOW(i) ? VGC_UNDEF : VGC_VALUE; 1274 1275 /* Run forwards, detecting and using the all-defined property. */ 1276 1277 for (i = 0; i < VG_(get_num_instrs)(cb); i++) { 1278 u = VG_(get_instr)(cb, i); 1279 switch (u->opcode) { 1280 1281 /* Tag-handling uinstrs. */ 1282 1283 /* Deal with these quickly. */ 1284 case NOP: 1285 case LOCK: 1286 case INCEIP: 1287 break; 1288 1289 /* Make a tag defined. */ 1290 case SETV: 1291 sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1)); 1292 def[u->val1] = u->size; 1293 break; 1294 1295 /* Check definedness of a tag. */ 1296 case TESTV: 1297 sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1)); 1298 if (def[u->val1] <= 4) { 1299 sk_assert(def[u->val1] == u->size); 1300 NOP_no_msg(u); 1301 if (dis) 1302 VG_(printf)(" at %2d: delete TESTV on defd arg\n", i); 1303 } 1304 break; 1305 1306 /* Applies to both values and tags. Propagate Definedness 1307 property through copies. Note that this isn't optional; 1308 we *have* to do this to keep def[] correct. */ 1309 case MOV: 1310 sk_assert(u->tag2 == TempReg); 1311 if (u->tag1 == TempReg) { 1312 if (VGC_IS_SHADOW(u->val1)) { 1313 sk_assert(VGC_IS_SHADOW(u->val2)); 1314 def[u->val2] = def[u->val1]; 1315 } 1316 } 1317 break; 1318 1319 case PUTV: 1320 sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1)); 1321 if (def[u->val1] <= 4) { 1322 sk_assert(def[u->val1] == u->size); 1323 u->tag1 = Literal; 1324 u->val1 = 0; 1325 switch (u->size) { 1326 case 4: u->lit32 = 0x00000000; break; 1327 case 2: u->lit32 = 0xFFFF0000; break; 1328 case 1: u->lit32 = 0xFFFFFF00; break; 1329 default: VG_(skin_panic)("vg_cleanup(PUTV)"); 1330 } 1331 if (dis) 1332 VG_(printf)( 1333 " at %2d: propagate definedness into PUTV\n", i); 1334 } 1335 break; 1336 1337 case STOREV: 1338 sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1)); 1339 if (def[u->val1] <= 4) { 1340 sk_assert(def[u->val1] == u->size); 1341 u->tag1 = Literal; 1342 u->val1 = 0; 1343 switch (u->size) { 1344 case 4: u->lit32 = 0x00000000; break; 1345 case 2: u->lit32 = 0xFFFF0000; break; 1346 case 1: u->lit32 = 0xFFFFFF00; break; 1347 default: VG_(skin_panic)("vg_cleanup(STOREV)"); 1348 } 1349 if (dis) 1350 VG_(printf)( 1351 " at %2d: propagate definedness into STandV\n", i); 1352 } 1353 break; 1354 1355 /* Nothing interesting we can do with this, I think. */ 1356 case PUTVF: 1357 break; 1358 1359 /* Tag handling operations. */ 1360 case TAG2: 1361 sk_assert(u->tag2 == TempReg && VGC_IS_SHADOW(u->val2)); 1362 sk_assert(u->tag3 == Lit16); 1363 /* Ultra-paranoid "type" checking. */ 1364 switch (u->val3) { 1365 case Tag_ImproveAND4_TQ: case Tag_ImproveAND2_TQ: 1366 case Tag_ImproveAND1_TQ: case Tag_ImproveOR4_TQ: 1367 case Tag_ImproveOR2_TQ: case Tag_ImproveOR1_TQ: 1368 sk_assert(u->tag1 == TempReg && !VGC_IS_SHADOW(u->val1)); 1369 break; 1370 default: 1371 sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1)); 1372 break; 1373 } 1374 switch (u->val3) { 1375 Int sz; 1376 case Tag_UifU4: 1377 sz = 4; goto do_UifU; 1378 case Tag_UifU2: 1379 sz = 2; goto do_UifU; 1380 case Tag_UifU1: 1381 sz = 1; goto do_UifU; 1382 case Tag_UifU0: 1383 sz = 0; goto do_UifU; 1384 do_UifU: 1385 sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1)); 1386 sk_assert(u->tag2 == TempReg && VGC_IS_SHADOW(u->val2)); 1387 if (def[u->val1] <= 4) { 1388 /* UifU. The first arg is defined, so result is 1389 simply second arg. Delete this operation. */ 1390 sk_assert(def[u->val1] == sz); 1391 NOP_no_msg(u); 1392 if (dis) 1393 VG_(printf)( 1394 " at %2d: delete UifU%d due to defd arg1\n", 1395 i, sz); 1396 } 1397 else 1398 if (def[u->val2] <= 4) { 1399 /* UifU. The second arg is defined, so result is 1400 simply first arg. Copy to second. */ 1401 sk_assert(def[u->val2] == sz); 1402 u->opcode = MOV; 1403 u->size = 4; 1404 u->tag3 = NoValue; 1405 def[u->val2] = def[u->val1]; 1406 if (dis) 1407 VG_(printf)( 1408 " at %2d: change UifU%d to MOV due to defd" 1409 " arg2\n", 1410 i, sz); 1411 } 1412 break; 1413 case Tag_ImproveAND4_TQ: 1414 sz = 4; goto do_ImproveAND; 1415 case Tag_ImproveAND1_TQ: 1416 sz = 1; goto do_ImproveAND; 1417 do_ImproveAND: 1418 /* Implements Q = T OR Q. So if Q is entirely defined, 1419 ie all 0s, we get MOV T, Q. */ 1420 if (def[u->val2] <= 4) { 1421 sk_assert(def[u->val2] == sz); 1422 u->size = 4; /* Regardless of sz */ 1423 u->opcode = MOV; 1424 u->tag3 = NoValue; 1425 def[u->val2] = VGC_UNDEF; 1426 if (dis) 1427 VG_(printf)( 1428 " at %2d: change ImproveAND%d_TQ to MOV due " 1429 "to defd arg2\n", 1430 i, sz); 1431 } 1432 break; 1433 default: 1434 goto unhandled; 1435 } 1436 break; 1437 1438 case TAG1: 1439 sk_assert(u->tag1 == TempReg && VGC_IS_SHADOW(u->val1)); 1440 if (def[u->val1] > 4) break; 1441 /* We now know that the arg to the op is entirely defined. 1442 If the op changes the size of the arg, we must replace 1443 it with a SETV at the new size. If it doesn't change 1444 the size, we can delete it completely. */ 1445 switch (u->val3) { 1446 /* Maintain the same size ... */ 1447 case Tag_Left4: 1448 sk_assert(def[u->val1] == 4); 1449 NOP_tag1_op(u); 1450 break; 1451 case Tag_PCast11: 1452 sk_assert(def[u->val1] == 1); 1453 NOP_tag1_op(u); 1454 break; 1455 /* Change size ... */ 1456 case Tag_PCast40: 1457 sk_assert(def[u->val1] == 4); 1458 SETV_tag1_op(u,0); 1459 def[u->val1] = 0; 1460 break; 1461 case Tag_PCast14: 1462 sk_assert(def[u->val1] == 1); 1463 SETV_tag1_op(u,4); 1464 def[u->val1] = 4; 1465 break; 1466 case Tag_PCast12: 1467 sk_assert(def[u->val1] == 1); 1468 SETV_tag1_op(u,2); 1469 def[u->val1] = 2; 1470 break; 1471 case Tag_PCast10: 1472 sk_assert(def[u->val1] == 1); 1473 SETV_tag1_op(u,0); 1474 def[u->val1] = 0; 1475 break; 1476 case Tag_PCast02: 1477 sk_assert(def[u->val1] == 0); 1478 SETV_tag1_op(u,2); 1479 def[u->val1] = 2; 1480 break; 1481 default: 1482 goto unhandled; 1483 } 1484 if (dis) 1485 VG_(printf)( 1486 " at %2d: delete TAG1 %s due to defd arg\n", 1487 i, nameOfTagOp(u->val3)); 1488 break; 1489 1490 default: 1491 unhandled: 1492 /* We don't know how to handle this uinstr. Be safe, and 1493 set to VGC_VALUE or VGC_UNDEF all temps written by it. */ 1494 k = VG_(get_reg_usage)(u, TempReg, &tempUse[0], &isWrites[0]); 1495 sk_assert(0 <= k && k <= VG_MAX_REGS_USED); 1496 for (j = 0; j < k; j++) { 1497 t = tempUse[j]; 1498 sk_assert(t >= 0 && t < n_temps); 1499 if (!isWrites[j]) { 1500 /* t is read; ignore it. */ 1501 if (0&& VGC_IS_SHADOW(t) && def[t] <= 4) 1502 VG_(printf)("ignoring def %d at %s %s\n", 1503 def[t], 1504 VG_(name_UOpcode)(True, u->opcode), 1505 (u->opcode == TAG1 || u->opcode == TAG2) 1506 ? nameOfTagOp(u->val3) 1507 : (Char*)""); 1508 } else { 1509 /* t is written; better nullify it. */ 1510 def[t] = VGC_IS_SHADOW(t) ? VGC_UNDEF : VGC_VALUE; 1511 } 1512 } 1513 } 1514 } 1515} 1516 1517 1518/* Top level post-MemCheck-instrumentation cleanup function. */ 1519static void vg_cleanup ( UCodeBlock* cb ) 1520{ 1521 vg_propagate_definedness ( cb ); 1522 vg_delete_redundant_SETVs ( cb ); 1523} 1524 1525 1526/* Caller will print out final instrumented code if necessary; we 1527 print out intermediate instrumented code here if necessary. */ 1528UCodeBlock* SK_(instrument) ( UCodeBlock* cb, Addr not_used ) 1529{ 1530 cb = memcheck_instrument ( cb ); 1531 if (MC_(clo_cleanup)) { 1532 if (dis) { 1533 VG_(pp_UCodeBlock) ( cb, "Unimproved instrumented UCode:" ); 1534 VG_(printf)("Instrumentation improvements:\n"); 1535 } 1536 vg_cleanup(cb); 1537 if (dis) VG_(printf)("\n"); 1538 } 1539 return cb; 1540} 1541 1542#undef dis 1543 1544/*--------------------------------------------------------------------*/ 1545/*--- end mc_translate.c ---*/ 1546/*--------------------------------------------------------------------*/ 1547