H A D | test_main.c | 506 static IRExpr* expr2vbits ( struct _MCEnv* mce, IRExpr* e ); 547 Shadow IRTemps are therefore allocated on demand. mce.tmpMap is a 566 static IRTemp findShadowTmp ( MCEnv* mce, IRTemp orig ) argument 568 tl_assert(orig < mce->n_originalTmps); 569 if (mce->tmpMap[orig] == IRTemp_INVALID) { 570 mce->tmpMap[orig] 571 = newIRTemp(mce->bb->tyenv, 572 shadowType(mce->bb->tyenv->types[orig])); 574 return mce->tmpMap[orig]; 583 static void newShadowTmp ( MCEnv* mce, IRTem argument 607 isOriginalAtom( MCEnv* mce, IRAtom* a1 ) argument 618 isShadowAtom( MCEnv* mce, IRAtom* a1 ) argument 704 assignNew( MCEnv* mce, IRType ty, IRExpr* e ) argument 717 mkDifD8( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 723 mkDifD16( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 729 mkDifD32( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 735 mkDifD64( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 741 mkDifDV128( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 749 mkUifU8( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 755 mkUifU16( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 761 mkUifU32( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 767 mkUifU64( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 773 mkUifUV128( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 779 mkUifU( MCEnv* mce, IRType vty, IRAtom* a1, IRAtom* a2 ) argument 794 mkLeft8( MCEnv* mce, IRAtom* a1 ) argument 804 mkLeft16( MCEnv* mce, IRAtom* a1 ) argument 814 mkLeft32( MCEnv* mce, IRAtom* a1 ) argument 829 mkImproveAND8( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 837 mkImproveAND16( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 845 mkImproveAND32( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 853 mkImproveAND64( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 861 mkImproveANDV128( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 872 mkImproveOR8( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 884 mkImproveOR16( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 896 mkImproveOR32( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 908 mkImproveOR64( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 920 mkImproveORV128( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 934 mkPCastTo( MCEnv* mce, IRType dst_ty, IRAtom* vbits ) argument 995 setHelperAnns( MCEnv* mce, IRDirty* di ) argument 1016 complainIfUndefined( MCEnv* mce, IRAtom* atom ) argument 1098 isAlwaysDefd( MCEnv* mce, Int offset, Int size ) argument 1130 do_shadow_PUT( MCEnv* mce, Int offset, IRAtom* atom, IRAtom* vatom ) argument 1160 do_shadow_PUTI( MCEnv* mce, IRRegArray* descr, IRAtom* ix, Int bias, IRAtom* atom ) argument 1195 shadow_GET( MCEnv* mce, Int offset, IRType ty ) argument 1214 shadow_GETI( MCEnv* mce, IRRegArray* descr, IRAtom* ix, Int bias ) argument 1245 mkLazy2( MCEnv* mce, IRType finalVty, IRAtom* va1, IRAtom* va2 ) argument 1264 mkLazyN( MCEnv* mce, IRAtom** exprvec, IRType finalVtype, IRCallee* cee ) argument 1297 expensiveAdd32( MCEnv* mce, IRAtom* qaa, IRAtom* qbb, IRAtom* aa, IRAtom* bb ) argument 1355 mkPCast8x16( MCEnv* mce, IRAtom* at ) argument 1360 mkPCast16x8( MCEnv* mce, IRAtom* at ) argument 1365 mkPCast32x4( MCEnv* mce, IRAtom* at ) argument 1370 mkPCast64x2( MCEnv* mce, IRAtom* at ) argument 1413 binary32Fx4( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY ) argument 1424 unary32Fx4( MCEnv* mce, IRAtom* vatomX ) argument 1433 binary32F0x4( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY ) argument 1446 unary32F0x4( MCEnv* mce, IRAtom* vatomX ) argument 1459 binary64Fx2( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY ) argument 1470 unary64Fx2( MCEnv* mce, IRAtom* vatomX ) argument 1479 binary64F0x2( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY ) argument 1492 unary64F0x2( MCEnv* mce, IRAtom* vatomX ) argument 1531 vectorNarrowV128( MCEnv* mce, IROp narrow_op, IRAtom* vatom1, IRAtom* vatom2) argument 1555 binary8Ix16( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 1564 binary16Ix8( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 1573 binary32Ix4( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 1582 binary64Ix2( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 1596 expr2vbits_Binop( MCEnv* mce, IROp op, IRAtom* atom1, IRAtom* atom2 ) argument 1917 expr2vbits_Unop( MCEnv* mce, IROp op, IRAtom* atom ) argument 2007 expr2vbits_LDle_WRK( MCEnv* mce, IRType ty, IRAtom* addr, UInt bias ) argument 2068 expr2vbits_LDle( MCEnv* mce, IRType ty, IRAtom* addr, UInt bias ) argument 2090 expr2vbits_Mux0X( MCEnv* mce, IRAtom* cond, IRAtom* expr0, IRAtom* exprX ) argument 2117 expr2vbits( MCEnv* mce, IRExpr* e ) argument 2172 zwidenToHostWord( MCEnv* mce, IRAtom* vatom ) argument 2203 do_shadow_STle( MCEnv* mce, IRAtom* addr, UInt bias, IRAtom* data, IRAtom* vdata ) argument 2332 do_shadow_Dirty( MCEnv* mce, IRDirty* d ) argument 2581 MCEnv mce; local [all...] |
H A D | mc_translate.c | 129 static IRExpr* expr2vbits ( struct _MCEnv* mce, IRExpr* e ); 130 static IRTemp findShadowTmpB ( struct _MCEnv* mce, IRTemp orig ); 185 "typeOfIRExpr(mce->bb->tyenv, ...)" at various places in the 217 Shadow IRTemps are therefore allocated on demand. mce.tmpMap is a 235 both the table in mce->sb and to our auxiliary mapping. Note that 236 newTemp may cause mce->tmpMap to resize, hence previous results 237 from VG_(indexXA)(mce->tmpMap) are invalidated. */ 238 static IRTemp newTemp ( MCEnv* mce, IRType ty, TempKind kind ) argument 242 IRTemp tmp = newIRTemp(mce->sb->tyenv, ty); 246 newIx = VG_(addToXA)( mce 254 findShadowTmpV( MCEnv* mce, IRTemp orig ) argument 284 newShadowTmpV( MCEnv* mce, IRTemp orig ) argument 318 isOriginalAtom( MCEnv* mce, IRAtom* a1 ) argument 331 isShadowAtom( MCEnv* mce, IRAtom* a1 ) argument 406 stmt( HChar cat, MCEnv* mce, IRStmt* st ) argument 417 assign( HChar cat, MCEnv* mce, IRTemp tmp, IRExpr* expr ) argument 441 assignNew( HChar cat, MCEnv* mce, IRType ty, IRExpr* e ) argument 483 mkDifD8( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 489 mkDifD16( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 495 mkDifD32( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 501 mkDifD64( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 507 mkDifDV128( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 513 mkDifDV256( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 521 mkUifU8( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 527 mkUifU16( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 533 mkUifU32( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 539 mkUifU64( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 545 mkUifU128( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 559 mkUifUV128( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 565 mkUifUV256( MCEnv* mce, IRAtom* a1, IRAtom* a2 ) argument 571 mkUifU( MCEnv* mce, IRType vty, IRAtom* a1, IRAtom* a2 ) argument 587 mkLeft8( MCEnv* mce, IRAtom* a1 ) argument 592 mkLeft16( MCEnv* mce, IRAtom* a1 ) argument 597 mkLeft32( MCEnv* mce, IRAtom* a1 ) argument 602 mkLeft64( MCEnv* mce, IRAtom* a1 ) argument 612 mkImproveAND8( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 620 mkImproveAND16( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 628 mkImproveAND32( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 636 mkImproveAND64( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 644 mkImproveANDV128( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 652 mkImproveANDV256( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 663 mkImproveOR8( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 675 mkImproveOR16( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 687 mkImproveOR32( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 699 mkImproveOR64( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 711 mkImproveORV128( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 723 mkImproveORV256( MCEnv* mce, IRAtom* data, IRAtom* vbits ) argument 741 mkPCastTo( MCEnv* mce, IRType dst_ty, IRAtom* vbits ) argument 873 expensiveCmpEQorNE( MCEnv* mce, IRType ty, IRAtom* vxx, IRAtom* vyy, IRAtom* xx, IRAtom* yy ) argument 985 doCmpORD( MCEnv* mce, IROp cmp_op, IRAtom* xxhash, IRAtom* yyhash, IRAtom* xx, IRAtom* yy ) argument 1069 setHelperAnns( MCEnv* mce, IRDirty* di ) argument 1094 complainIfUndefined( MCEnv* mce, IRAtom* atom, IRExpr *guard ) argument 1263 isAlwaysDefd( MCEnv* mce, Int offset, Int size ) argument 1296 do_shadow_PUT( MCEnv* mce, Int offset, IRAtom* atom, IRAtom* vatom, IRExpr *guard ) argument 1344 do_shadow_PUTI( MCEnv* mce, IRPutI *puti) argument 1388 shadow_GET( MCEnv* mce, Int offset, IRType ty ) argument 1409 shadow_GETI( MCEnv* mce, IRRegArray* descr, IRAtom* ix, Int bias ) argument 1441 mkLazy2( MCEnv* mce, IRType finalVty, IRAtom* va1, IRAtom* va2 ) argument 1489 mkLazy3( MCEnv* mce, IRType finalVty, IRAtom* va1, IRAtom* va2, IRAtom* va3 ) argument 1586 mkLazy4( MCEnv* mce, IRType finalVty, IRAtom* va1, IRAtom* va2, IRAtom* va3, IRAtom* va4 ) argument 1658 mkLazyN( MCEnv* mce, IRAtom** exprvec, IRType finalVtype, IRCallee* cee ) argument 1710 expensiveAddSub( MCEnv* mce, Bool add, IRType ty, IRAtom* qaa, IRAtom* qbb, IRAtom* aa, IRAtom* bb ) argument 1819 scalarShift( MCEnv* mce, IRType ty, IROp original_op, IRAtom* qaa, IRAtom* qbb, IRAtom* aa, IRAtom* bb ) argument 1848 mkPCast8x16( MCEnv* mce, IRAtom* at ) argument 1853 mkPCast16x8( MCEnv* mce, IRAtom* at ) argument 1858 mkPCast32x4( MCEnv* mce, IRAtom* at ) argument 1863 mkPCast64x2( MCEnv* mce, IRAtom* at ) argument 1868 mkPCast64x4( MCEnv* mce, IRAtom* at ) argument 1873 mkPCast32x8( MCEnv* mce, IRAtom* at ) argument 1878 mkPCast32x2( MCEnv* mce, IRAtom* at ) argument 1883 mkPCast16x4( MCEnv* mce, IRAtom* at ) argument 1888 mkPCast8x8( MCEnv* mce, IRAtom* at ) argument 1893 mkPCast16x2( MCEnv* mce, IRAtom* at ) argument 1898 mkPCast8x4( MCEnv* mce, IRAtom* at ) argument 1941 binary32Fx4( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY ) argument 1952 unary32Fx4( MCEnv* mce, IRAtom* vatomX ) argument 1961 binary32F0x4( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY ) argument 1974 unary32F0x4( MCEnv* mce, IRAtom* vatomX ) argument 1987 binary64Fx2( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY ) argument 1998 unary64Fx2( MCEnv* mce, IRAtom* vatomX ) argument 2007 binary64F0x2( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY ) argument 2020 unary64F0x2( MCEnv* mce, IRAtom* vatomX ) argument 2033 binary32Fx2( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY ) argument 2044 unary32Fx2( MCEnv* mce, IRAtom* vatomX ) argument 2055 binary64Fx4( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY ) argument 2066 unary64Fx4( MCEnv* mce, IRAtom* vatomX ) argument 2077 binary32Fx8( MCEnv* mce, IRAtom* vatomX, IRAtom* vatomY ) argument 2088 unary32Fx8( MCEnv* mce, IRAtom* vatomX ) argument 2184 vectorNarrowBinV128( MCEnv* mce, IROp narrow_op, IRAtom* vatom1, IRAtom* vatom2) argument 2208 vectorNarrowBin64( MCEnv* mce, IROp narrow_op, IRAtom* vatom1, IRAtom* vatom2) argument 2229 vectorNarrowUnV128( MCEnv* mce, IROp narrow_op, IRAtom* vatom1) argument 2267 vectorWidenI64( MCEnv* mce, IROp longen_op, IRAtom* vatom1) argument 2295 binary8Ix16( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 2304 binary16Ix8( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 2313 binary32Ix4( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 2322 binary64Ix2( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 2333 binary8Ix8( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 2342 binary16Ix4( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 2351 binary32Ix2( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 2360 binary64Ix1( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 2371 binary8Ix4( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 2380 binary16Ix2( MCEnv* mce, IRAtom* vatom1, IRAtom* vatom2 ) argument 2394 expr2vbits_Qop( MCEnv* mce, IROp op, IRAtom* atom1, IRAtom* atom2, IRAtom* atom3, IRAtom* atom4 ) argument 2442 expr2vbits_Triop( MCEnv* mce, IROp op, IRAtom* atom1, IRAtom* atom2, IRAtom* atom3 ) argument 2527 expr2vbits_Binop( MCEnv* mce, IROp op, IRAtom* atom1, IRAtom* atom2 ) argument 3534 expr2vbits_Unop( MCEnv* mce, IROp op, IRAtom* atom ) argument 3829 expr2vbits_Load_WRK( MCEnv* mce, IREndness end, IRType ty, IRAtom* addr, UInt bias ) argument 3914 expr2vbits_Load( MCEnv* mce, IREndness end, IRType ty, IRAtom* addr, UInt bias ) argument 3964 expr2vbits_guarded_Load( MCEnv* mce, IREndness end, IRType ty, IRAtom* addr, UInt bias, IRAtom *guard ) argument 3985 expr2vbits_Mux0X( MCEnv* mce, IRAtom* cond, IRAtom* expr0, IRAtom* exprX ) argument 4013 expr2vbits( MCEnv* mce, IRExpr* e ) argument 4085 zwidenToHostWord( MCEnv* mce, IRAtom* vatom ) argument 4137 do_shadow_Store( MCEnv* mce, IREndness end, IRAtom* addr, UInt bias, IRAtom* data, IRAtom* vdata, IRAtom* guard ) argument 4402 do_shadow_Dirty( MCEnv* mce, IRDirty* d ) argument 4628 do_AbiHint( MCEnv* mce, IRExpr* base, Int len, IRExpr* nia ) argument 4686 bind_shadow_tmp_to_orig( UChar how, MCEnv* mce, IRAtom* orig, IRAtom* shadow ) argument 4714 do_shadow_CAS( MCEnv* mce, IRCAS* cas ) argument 4847 do_shadow_CAS_single( MCEnv* mce, IRCAS* cas ) argument 4936 do_shadow_CAS_double( MCEnv* mce, IRCAS* cas ) argument 5098 do_shadow_LLSC( MCEnv* mce, IREndness stEnd, IRTemp stResult, IRExpr* stAddr, IRExpr* stStoredata ) argument 5299 MCEnv mce; local 5741 findShadowTmpB( MCEnv* mce, IRTemp orig ) argument 5761 gen_maxU32( MCEnv* mce, IRAtom* b1, IRAtom* b2 ) argument 5766 gen_load_b( MCEnv* mce, Int szB, IRAtom* baseaddr, Int offset ) argument 5824 gen_guarded_load_b( MCEnv* mce, Int szB, IRAtom* baseaddr, Int offset, IRAtom* guard ) argument 5843 gen_store_b( MCEnv* mce, Int szB, IRAtom* baseaddr, Int offset, IRAtom* dataB, IRAtom* guard ) argument 5897 narrowTo32( MCEnv* mce, IRAtom* e ) argument 5906 zWidenFrom32( MCEnv* mce, IRType dstTy, IRAtom* e ) argument 5915 schemeE( MCEnv* mce, IRExpr* e ) argument 6051 do_origins_Dirty( MCEnv* mce, IRDirty* d ) argument 6254 do_origins_Store( MCEnv* mce, IREndness stEnd, IRExpr* stAddr, IRExpr* stData ) argument 6273 schemeS( MCEnv* mce, IRStmt* st ) argument [all...] |