1 2 3/*---------------------------------------------------------------*/ 4/*--- begin host_ppc_isel.c ---*/ 5/*---------------------------------------------------------------*/ 6 7/* 8 This file is part of Valgrind, a dynamic binary instrumentation 9 framework. 10 11 Copyright (C) 2004-2012 OpenWorks LLP 12 info@open-works.net 13 14 This program is free software; you can redistribute it and/or 15 modify it under the terms of the GNU General Public License as 16 published by the Free Software Foundation; either version 2 of the 17 License, or (at your option) any later version. 18 19 This program is distributed in the hope that it will be useful, but 20 WITHOUT ANY WARRANTY; without even the implied warranty of 21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 General Public License for more details. 23 24 You should have received a copy of the GNU General Public License 25 along with this program; if not, write to the Free Software 26 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 27 02110-1301, USA. 28 29 The GNU General Public License is contained in the file COPYING. 30 31 Neither the names of the U.S. Department of Energy nor the 32 University of California nor the names of its contributors may be 33 used to endorse or promote products derived from this software 34 without prior written permission. 35*/ 36 37#include "libvex_basictypes.h" 38#include "libvex_ir.h" 39#include "libvex.h" 40 41#include "ir_match.h" 42#include "main_util.h" 43#include "main_globals.h" 44#include "host_generic_regs.h" 45#include "host_generic_simd64.h" 46#include "host_ppc_defs.h" 47 48/* GPR register class for ppc32/64 */ 49#define HRcGPR(__mode64) (__mode64 ? HRcInt64 : HRcInt32) 50 51 52/*---------------------------------------------------------*/ 53/*--- Register Usage Conventions ---*/ 54/*---------------------------------------------------------*/ 55/* 56 Integer Regs 57 ------------ 58 GPR0 Reserved 59 GPR1 Stack Pointer 60 GPR2 not used - TOC pointer 61 GPR3:10 Allocateable 62 GPR11 if mode64: not used - calls by ptr / env ptr for some langs 63 GPR12 if mode64: not used - exceptions / global linkage code 64 GPR13 not used - Thread-specific pointer 65 GPR14:28 Allocateable 66 GPR29 Unused by us (reserved for the dispatcher) 67 GPR30 AltiVec temp spill register 68 GPR31 GuestStatePointer 69 70 Of Allocateable regs: 71 if (mode64) 72 GPR3:10 Caller-saved regs 73 else 74 GPR3:12 Caller-saved regs 75 GPR14:29 Callee-saved regs 76 77 GPR3 [Return | Parameter] - carrying reg 78 GPR4:10 Parameter-carrying regs 79 80 81 Floating Point Regs 82 ------------------- 83 FPR0:31 Allocateable 84 85 FPR0 Caller-saved - scratch reg 86 if (mode64) 87 FPR1:13 Caller-saved - param & return regs 88 else 89 FPR1:8 Caller-saved - param & return regs 90 FPR9:13 Caller-saved regs 91 FPR14:31 Callee-saved regs 92 93 94 Vector Regs (on processors with the VMX feature) 95 ----------- 96 VR0-VR1 Volatile scratch registers 97 VR2-VR13 Volatile vector parameters registers 98 VR14-VR19 Volatile scratch registers 99 VR20-VR31 Non-volatile registers 100 VRSAVE Non-volatile 32-bit register 101*/ 102 103 104/*---------------------------------------------------------*/ 105/*--- PPC FP Status & Control Register Conventions ---*/ 106/*---------------------------------------------------------*/ 107/* 108 Vex-generated code expects to run with the FPU set as follows: all 109 exceptions masked. The rounding mode is set appropriately before 110 each floating point insn emitted (or left unchanged if known to be 111 correct already). There are a few fp insns (fmr,fneg,fabs,fnabs), 112 which are unaffected by the rm and so the rounding mode is not set 113 prior to them. 114 115 At least on MPC7447A (Mac Mini), frsqrte is also not affected by 116 rounding mode. At some point the ppc docs get sufficiently vague 117 that the only way to find out is to write test programs. 118*/ 119/* Notes on the FP instruction set, 6 Feb 06. 120 121What exns -> CR1 ? Sets FPRF ? Observes RM ? 122------------------------------------------------------------- 123 124fmr[.] if . n n 125fneg[.] if . n n 126fabs[.] if . n n 127fnabs[.] if . n n 128 129fadd[.] if . y y 130fadds[.] if . y y 131fcfid[.] (Si64->dbl) if . y y 132fcfidU[.] (Ui64->dbl) if . y y 133fcfids[.] (Si64->sngl) if . Y Y 134fcfidus[.] (Ui64->sngl) if . Y Y 135fcmpo (cmp, result n n n 136fcmpu to crfD) n n n 137fctid[.] (dbl->i64) if . ->undef y 138fctidz[.] (dbl->i64) if . ->undef rounds-to-zero 139fctiw[.] (dbl->i32) if . ->undef y 140fctiwz[.] (dbl->i32) if . ->undef rounds-to-zero 141fdiv[.] if . y y 142fdivs[.] if . y y 143fmadd[.] if . y y 144fmadds[.] if . y y 145fmsub[.] if . y y 146fmsubs[.] if . y y 147fmul[.] if . y y 148fmuls[.] if . y y 149 150(note: for fnm*, rounding happens before final negation) 151fnmadd[.] if . y y 152fnmadds[.] if . y y 153fnmsub[.] if . y y 154fnmsubs[.] if . y y 155 156fre[.] if . y y 157fres[.] if . y y 158 159frsqrte[.] if . y apparently not 160 161fsqrt[.] if . y y 162fsqrts[.] if . y y 163fsub[.] if . y y 164fsubs[.] if . y y 165 166 167fpscr: bits 30-31 (ibm) is RM 168 24-29 (ibm) are exnmasks/non-IEEE bit, all zero 169 15-19 (ibm) is FPRF: class, <, =, >, UNord 170 171ppc fe(guest) makes fpscr read as all zeros except RM (and maybe FPRF 172in future) 173 174mcrfs - move fpscr field to CR field 175mtfsfi[.] - 4 bit imm moved to fpscr field 176mtfsf[.] - move frS[low 1/2] to fpscr but using 8-bit field mask 177mtfsb1[.] - set given fpscr bit 178mtfsb0[.] - clear given fpscr bit 179mffs[.] - move all fpscr to frD[low 1/2] 180 181For [.] presumably cr1 is set with exn summary bits, as per 182main FP insns 183 184A single precision store truncates/denormalises the in-register value, 185but does not round it. This is so that flds followed by fsts is 186always the identity. 187*/ 188 189 190/*---------------------------------------------------------*/ 191/*--- misc helpers ---*/ 192/*---------------------------------------------------------*/ 193 194/* These are duplicated in guest-ppc/toIR.c */ 195static IRExpr* unop ( IROp op, IRExpr* a ) 196{ 197 return IRExpr_Unop(op, a); 198} 199 200static IRExpr* mkU32 ( UInt i ) 201{ 202 return IRExpr_Const(IRConst_U32(i)); 203} 204 205static IRExpr* bind ( Int binder ) 206{ 207 return IRExpr_Binder(binder); 208} 209 210 211/*---------------------------------------------------------*/ 212/*--- ISelEnv ---*/ 213/*---------------------------------------------------------*/ 214 215/* This carries around: 216 217 - A mapping from IRTemp to IRType, giving the type of any IRTemp we 218 might encounter. This is computed before insn selection starts, 219 and does not change. 220 221 - A mapping from IRTemp to HReg. This tells the insn selector 222 which virtual register(s) are associated with each IRTemp 223 temporary. This is computed before insn selection starts, and 224 does not change. We expect this mapping to map precisely the 225 same set of IRTemps as the type mapping does. 226 227 - vregmapLo holds the primary register for the IRTemp. 228 - vregmapMedLo holds the secondary register for the IRTemp, 229 if any is needed. That's only for Ity_I64 temps 230 in 32 bit mode or Ity_I128 temps in 64-bit mode. 231 - vregmapMedHi is only for dealing with Ity_I128 temps in 232 32 bit mode. It holds bits 95:64 (Intel numbering) 233 of the IRTemp. 234 - vregmapHi is also only for dealing with Ity_I128 temps 235 in 32 bit mode. It holds the most significant bits 236 (127:96 in Intel numbering) of the IRTemp. 237 238 - The code array, that is, the insns selected so far. 239 240 - A counter, for generating new virtual registers. 241 242 - The host subarchitecture we are selecting insns for. 243 This is set at the start and does not change. 244 245 - A Bool to tell us if the host is 32 or 64bit. 246 This is set at the start and does not change. 247 248 - An IRExpr*, which may be NULL, holding the IR expression (an 249 IRRoundingMode-encoded value) to which the FPU's rounding mode 250 was most recently set. Setting to NULL is always safe. Used to 251 avoid redundant settings of the FPU's rounding mode, as 252 described in set_FPU_rounding_mode below. 253 254 - A VexMiscInfo*, needed for knowing how to generate 255 function calls for this target. 256 257 - The maximum guest address of any guest insn in this block. 258 Actually, the address of the highest-addressed byte from any 259 insn in this block. Is set at the start and does not change. 260 This is used for detecting jumps which are definitely 261 forward-edges from this block, and therefore can be made 262 (chained) to the fast entry point of the destination, thereby 263 avoiding the destination's event check. 264*/ 265 266typedef 267 struct { 268 /* Constant -- are set at the start and do not change. */ 269 IRTypeEnv* type_env; 270 // 64-bit mode 32-bit mode 271 HReg* vregmapLo; // Low 64-bits [63:0] Low 32-bits [31:0] 272 HReg* vregmapMedLo; // high 64-bits[127:64] Next 32-bits [63:32] 273 HReg* vregmapMedHi; // unused Next 32-bits [95:64] 274 HReg* vregmapHi; // unused highest 32-bits [127:96] 275 Int n_vregmap; 276 277 /* 27 Jan 06: Not currently used, but should be */ 278 UInt hwcaps; 279 280 Bool mode64; 281 282 VexAbiInfo* vbi; 283 284 Bool chainingAllowed; 285 Addr64 max_ga; 286 287 /* These are modified as we go along. */ 288 HInstrArray* code; 289 Int vreg_ctr; 290 291 IRExpr* previous_rm; 292 } 293 ISelEnv; 294 295 296static HReg lookupIRTemp ( ISelEnv* env, IRTemp tmp ) 297{ 298 vassert(tmp >= 0); 299 vassert(tmp < env->n_vregmap); 300 return env->vregmapLo[tmp]; 301} 302 303static void lookupIRTempPair ( HReg* vrHI, HReg* vrLO, 304 ISelEnv* env, IRTemp tmp ) 305{ 306 vassert(tmp >= 0); 307 vassert(tmp < env->n_vregmap); 308 vassert(env->vregmapMedLo[tmp] != INVALID_HREG); 309 *vrLO = env->vregmapLo[tmp]; 310 *vrHI = env->vregmapMedLo[tmp]; 311} 312 313/* Only for used in 32-bit mode */ 314static void lookupIRTempQuad ( HReg* vrHi, HReg* vrMedHi, HReg* vrMedLo, 315 HReg* vrLo, ISelEnv* env, IRTemp tmp ) 316{ 317 vassert(!env->mode64); 318 vassert(tmp >= 0); 319 vassert(tmp < env->n_vregmap); 320 vassert(env->vregmapMedLo[tmp] != INVALID_HREG); 321 *vrHi = env->vregmapHi[tmp]; 322 *vrMedHi = env->vregmapMedHi[tmp]; 323 *vrMedLo = env->vregmapMedLo[tmp]; 324 *vrLo = env->vregmapLo[tmp]; 325} 326 327static void addInstr ( ISelEnv* env, PPCInstr* instr ) 328{ 329 addHInstr(env->code, instr); 330 if (vex_traceflags & VEX_TRACE_VCODE) { 331 ppPPCInstr(instr, env->mode64); 332 vex_printf("\n"); 333 } 334} 335 336static HReg newVRegI ( ISelEnv* env ) 337{ 338 HReg reg = mkHReg(env->vreg_ctr, HRcGPR(env->mode64), 339 True/*virtual reg*/); 340 env->vreg_ctr++; 341 return reg; 342} 343 344static HReg newVRegF ( ISelEnv* env ) 345{ 346 HReg reg = mkHReg(env->vreg_ctr, HRcFlt64, True/*virtual reg*/); 347 env->vreg_ctr++; 348 return reg; 349} 350 351static HReg newVRegV ( ISelEnv* env ) 352{ 353 HReg reg = mkHReg(env->vreg_ctr, HRcVec128, True/*virtual reg*/); 354 env->vreg_ctr++; 355 return reg; 356} 357 358 359/*---------------------------------------------------------*/ 360/*--- ISEL: Forward declarations ---*/ 361/*---------------------------------------------------------*/ 362 363/* These are organised as iselXXX and iselXXX_wrk pairs. The 364 iselXXX_wrk do the real work, but are not to be called directly. 365 For each XXX, iselXXX calls its iselXXX_wrk counterpart, then 366 checks that all returned registers are virtual. You should not 367 call the _wrk version directly. 368 369 'Word' refers to the size of the native machine word, that is, 370 32-bit int in 32-bit mode and 64-bit int in 64-bit mode. '2Word' 371 therefore refers to a double-width (64/128-bit) quantity in two 372 integer registers. 373*/ 374/* 32-bit mode: compute an I8/I16/I32 into a GPR. 375 64-bit mode: compute an I8/I16/I32/I64 into a GPR. */ 376static HReg iselWordExpr_R_wrk ( ISelEnv* env, IRExpr* e ); 377static HReg iselWordExpr_R ( ISelEnv* env, IRExpr* e ); 378 379/* 32-bit mode: Compute an I8/I16/I32 into a RH 380 (reg-or-halfword-immediate). 381 64-bit mode: Compute an I8/I16/I32/I64 into a RH 382 (reg-or-halfword-immediate). 383 It's important to specify whether the immediate is to be regarded 384 as signed or not. If yes, this will never return -32768 as an 385 immediate; this guaranteed that all signed immediates that are 386 return can have their sign inverted if need be. 387*/ 388static PPCRH* iselWordExpr_RH_wrk ( ISelEnv* env, 389 Bool syned, IRExpr* e ); 390static PPCRH* iselWordExpr_RH ( ISelEnv* env, 391 Bool syned, IRExpr* e ); 392 393/* 32-bit mode: compute an I32 into a RI (reg or 32-bit immediate). 394 64-bit mode: compute an I64 into a RI (reg or 64-bit immediate). */ 395static PPCRI* iselWordExpr_RI_wrk ( ISelEnv* env, IRExpr* e ); 396static PPCRI* iselWordExpr_RI ( ISelEnv* env, IRExpr* e ); 397 398/* In 32 bit mode ONLY, compute an I8 into a 399 reg-or-5-bit-unsigned-immediate, the latter being an immediate in 400 the range 1 .. 31 inclusive. Used for doing shift amounts. */ 401static PPCRH* iselWordExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e ); 402static PPCRH* iselWordExpr_RH5u ( ISelEnv* env, IRExpr* e ); 403 404/* In 64-bit mode ONLY, compute an I8 into a 405 reg-or-6-bit-unsigned-immediate, the latter being an immediate in 406 the range 1 .. 63 inclusive. Used for doing shift amounts. */ 407static PPCRH* iselWordExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e ); 408static PPCRH* iselWordExpr_RH6u ( ISelEnv* env, IRExpr* e ); 409 410/* 32-bit mode: compute an I32 into an AMode. 411 64-bit mode: compute an I64 into an AMode. 412 413 Requires to know (xferTy) the type of data to be loaded/stored 414 using this amode. That is so that, for 64-bit code generation, any 415 PPCAMode_IR returned will have an index (immediate offset) field 416 that is guaranteed to be 4-aligned, if there is any chance that the 417 amode is to be used in ld/ldu/lda/std/stdu. 418 419 Since there are no such restrictions on 32-bit insns, xferTy is 420 ignored for 32-bit code generation. */ 421static PPCAMode* iselWordExpr_AMode_wrk ( ISelEnv* env, IRExpr* e, IRType xferTy ); 422static PPCAMode* iselWordExpr_AMode ( ISelEnv* env, IRExpr* e, IRType xferTy ); 423 424static void iselInt128Expr_to_32x4_wrk ( HReg* rHi, HReg* rMedHi, 425 HReg* rMedLo, HReg* rLo, 426 ISelEnv* env, IRExpr* e ); 427static void iselInt128Expr_to_32x4 ( HReg* rHi, HReg* rMedHi, 428 HReg* rMedLo, HReg* rLo, 429 ISelEnv* env, IRExpr* e ); 430 431 432/* 32-bit mode ONLY: compute an I64 into a GPR pair. */ 433static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, 434 ISelEnv* env, IRExpr* e ); 435static void iselInt64Expr ( HReg* rHi, HReg* rLo, 436 ISelEnv* env, IRExpr* e ); 437 438/* 64-bit mode ONLY: compute an I128 into a GPR64 pair. */ 439static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, 440 ISelEnv* env, IRExpr* e ); 441static void iselInt128Expr ( HReg* rHi, HReg* rLo, 442 ISelEnv* env, IRExpr* e ); 443 444static PPCCondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ); 445static PPCCondCode iselCondCode ( ISelEnv* env, IRExpr* e ); 446 447static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ); 448static HReg iselDblExpr ( ISelEnv* env, IRExpr* e ); 449 450static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e ); 451static HReg iselFltExpr ( ISelEnv* env, IRExpr* e ); 452 453static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ); 454static HReg iselVecExpr ( ISelEnv* env, IRExpr* e ); 455 456/* 64-bit mode ONLY. */ 457static HReg iselDfp64Expr_wrk ( ISelEnv* env, IRExpr* e ); 458static HReg iselDfp64Expr ( ISelEnv* env, IRExpr* e ); 459 460/* 64-bit mode ONLY: compute an D128 into a GPR64 pair. */ 461static void iselDfp128Expr_wrk ( HReg* rHi, HReg* rLo, ISelEnv* env, 462 IRExpr* e ); 463static void iselDfp128Expr ( HReg* rHi, HReg* rLo, ISelEnv* env, 464 IRExpr* e ); 465 466/*---------------------------------------------------------*/ 467/*--- ISEL: Misc helpers ---*/ 468/*---------------------------------------------------------*/ 469 470/* Make an int reg-reg move. */ 471 472static PPCInstr* mk_iMOVds_RR ( HReg r_dst, HReg r_src ) 473{ 474 vassert(hregClass(r_dst) == hregClass(r_src)); 475 vassert(hregClass(r_src) == HRcInt32 || 476 hregClass(r_src) == HRcInt64); 477 return PPCInstr_Alu(Palu_OR, r_dst, r_src, PPCRH_Reg(r_src)); 478} 479 480/* Advance/retreat %r1 by n. */ 481 482static void add_to_sp ( ISelEnv* env, UInt n ) 483{ 484 HReg sp = StackFramePtr(env->mode64); 485 vassert(n < 256 && (n%16) == 0); 486 addInstr(env, PPCInstr_Alu( Palu_ADD, sp, sp, 487 PPCRH_Imm(True,toUShort(n)) )); 488} 489 490static void sub_from_sp ( ISelEnv* env, UInt n ) 491{ 492 HReg sp = StackFramePtr(env->mode64); 493 vassert(n < 256 && (n%16) == 0); 494 addInstr(env, PPCInstr_Alu( Palu_SUB, sp, sp, 495 PPCRH_Imm(True,toUShort(n)) )); 496} 497 498/* 499 returns a quadword aligned address on the stack 500 - copies SP, adds 16bytes, aligns to quadword. 501 use sub_from_sp(32) before calling this, 502 as expects to have 32 bytes to play with. 503*/ 504static HReg get_sp_aligned16 ( ISelEnv* env ) 505{ 506 HReg r = newVRegI(env); 507 HReg align16 = newVRegI(env); 508 addInstr(env, mk_iMOVds_RR(r, StackFramePtr(env->mode64))); 509 // add 16 510 addInstr(env, PPCInstr_Alu( Palu_ADD, r, r, 511 PPCRH_Imm(True,toUShort(16)) )); 512 // mask to quadword 513 addInstr(env, 514 PPCInstr_LI(align16, 0xFFFFFFFFFFFFFFF0ULL, env->mode64)); 515 addInstr(env, PPCInstr_Alu(Palu_AND, r,r, PPCRH_Reg(align16))); 516 return r; 517} 518 519 520 521/* Load 2*I32 regs to fp reg */ 522static HReg mk_LoadRR32toFPR ( ISelEnv* env, 523 HReg r_srcHi, HReg r_srcLo ) 524{ 525 HReg fr_dst = newVRegF(env); 526 PPCAMode *am_addr0, *am_addr1; 527 528 vassert(!env->mode64); 529 vassert(hregClass(r_srcHi) == HRcInt32); 530 vassert(hregClass(r_srcLo) == HRcInt32); 531 532 sub_from_sp( env, 16 ); // Move SP down 16 bytes 533 am_addr0 = PPCAMode_IR( 0, StackFramePtr(env->mode64) ); 534 am_addr1 = PPCAMode_IR( 4, StackFramePtr(env->mode64) ); 535 536 // store hi,lo as Ity_I32's 537 addInstr(env, PPCInstr_Store( 4, am_addr0, r_srcHi, env->mode64 )); 538 addInstr(env, PPCInstr_Store( 4, am_addr1, r_srcLo, env->mode64 )); 539 540 // load as float 541 addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0)); 542 543 add_to_sp( env, 16 ); // Reset SP 544 return fr_dst; 545} 546 547/* Load I64 reg to fp reg */ 548static HReg mk_LoadR64toFPR ( ISelEnv* env, HReg r_src ) 549{ 550 HReg fr_dst = newVRegF(env); 551 PPCAMode *am_addr0; 552 553 vassert(env->mode64); 554 vassert(hregClass(r_src) == HRcInt64); 555 556 sub_from_sp( env, 16 ); // Move SP down 16 bytes 557 am_addr0 = PPCAMode_IR( 0, StackFramePtr(env->mode64) ); 558 559 // store as Ity_I64 560 addInstr(env, PPCInstr_Store( 8, am_addr0, r_src, env->mode64 )); 561 562 // load as float 563 addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fr_dst, am_addr0)); 564 565 add_to_sp( env, 16 ); // Reset SP 566 return fr_dst; 567} 568 569 570/* Given an amode, return one which references 4 bytes further 571 along. */ 572 573static PPCAMode* advance4 ( ISelEnv* env, PPCAMode* am ) 574{ 575 PPCAMode* am4 = dopyPPCAMode( am ); 576 if (am4->tag == Pam_IR 577 && am4->Pam.IR.index + 4 <= 32767) { 578 am4->Pam.IR.index += 4; 579 } else { 580 vpanic("advance4(ppc,host)"); 581 } 582 return am4; 583} 584 585 586/* Given a guest-state array descriptor, an index expression and a 587 bias, generate a PPCAMode pointing at the relevant piece of 588 guest state. */ 589static 590PPCAMode* genGuestArrayOffset ( ISelEnv* env, IRRegArray* descr, 591 IRExpr* off, Int bias ) 592{ 593 HReg rtmp, roff; 594 Int elemSz = sizeofIRType(descr->elemTy); 595 Int nElems = descr->nElems; 596 Int shift = 0; 597 598 /* Throw out any cases we don't need. In theory there might be a 599 day where we need to handle others, but not today. */ 600 601 if (nElems != 16 && nElems != 32) 602 vpanic("genGuestArrayOffset(ppc host)(1)"); 603 604 switch (elemSz) { 605 case 4: shift = 2; break; 606 case 8: shift = 3; break; 607 default: vpanic("genGuestArrayOffset(ppc host)(2)"); 608 } 609 610 if (bias < -100 || bias > 100) /* somewhat arbitrarily */ 611 vpanic("genGuestArrayOffset(ppc host)(3)"); 612 if (descr->base < 0 || descr->base > 5000) /* somewhat arbitrarily */ 613 vpanic("genGuestArrayOffset(ppc host)(4)"); 614 615 /* Compute off into a reg, %off. Then return: 616 617 addi %tmp, %off, bias (if bias != 0) 618 andi %tmp, nElems-1 619 sldi %tmp, shift 620 addi %tmp, %tmp, base 621 ... Baseblockptr + %tmp ... 622 */ 623 roff = iselWordExpr_R(env, off); 624 rtmp = newVRegI(env); 625 addInstr(env, PPCInstr_Alu( 626 Palu_ADD, 627 rtmp, roff, 628 PPCRH_Imm(True/*signed*/, toUShort(bias)))); 629 addInstr(env, PPCInstr_Alu( 630 Palu_AND, 631 rtmp, rtmp, 632 PPCRH_Imm(False/*unsigned*/, toUShort(nElems-1)))); 633 addInstr(env, PPCInstr_Shft( 634 Pshft_SHL, 635 env->mode64 ? False : True/*F:64-bit, T:32-bit shift*/, 636 rtmp, rtmp, 637 PPCRH_Imm(False/*unsigned*/, toUShort(shift)))); 638 addInstr(env, PPCInstr_Alu( 639 Palu_ADD, 640 rtmp, rtmp, 641 PPCRH_Imm(True/*signed*/, toUShort(descr->base)))); 642 return 643 PPCAMode_RR( GuestStatePtr(env->mode64), rtmp ); 644} 645 646 647/*---------------------------------------------------------*/ 648/*--- ISEL: Function call helpers ---*/ 649/*---------------------------------------------------------*/ 650 651/* Used only in doHelperCall. See big comment in doHelperCall re 652 handling of register-parameter args. This function figures out 653 whether evaluation of an expression might require use of a fixed 654 register. If in doubt return True (safe but suboptimal). 655*/ 656static 657Bool mightRequireFixedRegs ( IRExpr* e ) 658{ 659 switch (e->tag) { 660 case Iex_RdTmp: case Iex_Const: case Iex_Get: 661 return False; 662 default: 663 return True; 664 } 665} 666 667 668/* Do a complete function call. guard is a Ity_Bit expression 669 indicating whether or not the call happens. If guard==NULL, the 670 call is unconditional. */ 671 672static 673void doHelperCall ( ISelEnv* env, 674 Bool passBBP, 675 IRExpr* guard, IRCallee* cee, IRExpr** args ) 676{ 677 PPCCondCode cc; 678 HReg argregs[PPC_N_REGPARMS]; 679 HReg tmpregs[PPC_N_REGPARMS]; 680 Bool go_fast; 681 Int n_args, i, argreg; 682 UInt argiregs; 683 ULong target; 684 Bool mode64 = env->mode64; 685 686 /* Do we need to force use of an odd-even reg pair for 64-bit 687 args? */ 688 Bool regalign_int64s 689 = (!mode64) && env->vbi->host_ppc32_regalign_int64_args; 690 691 /* Marshal args for a call and do the call. 692 693 If passBBP is True, %rbp (the baseblock pointer) is to be passed 694 as the first arg. 695 696 This function only deals with a tiny set of possibilities, which 697 cover all helpers in practice. The restrictions are that only 698 arguments in registers are supported, hence only PPC_N_REGPARMS x 699 (mode32:32 | mode64:64) integer bits in total can be passed. 700 In fact the only supported arg type is (mode32:I32 | mode64:I64). 701 702 Generating code which is both efficient and correct when 703 parameters are to be passed in registers is difficult, for the 704 reasons elaborated in detail in comments attached to 705 doHelperCall() in priv/host-x86/isel.c. Here, we use a variant 706 of the method described in those comments. 707 708 The problem is split into two cases: the fast scheme and the 709 slow scheme. In the fast scheme, arguments are computed 710 directly into the target (real) registers. This is only safe 711 when we can be sure that computation of each argument will not 712 trash any real registers set by computation of any other 713 argument. 714 715 In the slow scheme, all args are first computed into vregs, and 716 once they are all done, they are moved to the relevant real 717 regs. This always gives correct code, but it also gives a bunch 718 of vreg-to-rreg moves which are usually redundant but are hard 719 for the register allocator to get rid of. 720 721 To decide which scheme to use, all argument expressions are 722 first examined. If they are all so simple that it is clear they 723 will be evaluated without use of any fixed registers, use the 724 fast scheme, else use the slow scheme. Note also that only 725 unconditional calls may use the fast scheme, since having to 726 compute a condition expression could itself trash real 727 registers. 728 729 Note this requires being able to examine an expression and 730 determine whether or not evaluation of it might use a fixed 731 register. That requires knowledge of how the rest of this insn 732 selector works. Currently just the following 3 are regarded as 733 safe -- hopefully they cover the majority of arguments in 734 practice: IRExpr_Tmp IRExpr_Const IRExpr_Get. 735 */ 736 737 /* Note that the cee->regparms field is meaningless on PPC32/64 host 738 (since there is only one calling convention) and so we always 739 ignore it. */ 740 741 n_args = 0; 742 for (i = 0; args[i]; i++) 743 n_args++; 744 745 if (PPC_N_REGPARMS < n_args + (passBBP ? 1 : 0)) { 746 vpanic("doHelperCall(PPC): cannot currently handle > 8 args"); 747 // PPC_N_REGPARMS 748 } 749 750 argregs[0] = hregPPC_GPR3(mode64); 751 argregs[1] = hregPPC_GPR4(mode64); 752 argregs[2] = hregPPC_GPR5(mode64); 753 argregs[3] = hregPPC_GPR6(mode64); 754 argregs[4] = hregPPC_GPR7(mode64); 755 argregs[5] = hregPPC_GPR8(mode64); 756 argregs[6] = hregPPC_GPR9(mode64); 757 argregs[7] = hregPPC_GPR10(mode64); 758 argiregs = 0; 759 760 tmpregs[0] = tmpregs[1] = tmpregs[2] = 761 tmpregs[3] = tmpregs[4] = tmpregs[5] = 762 tmpregs[6] = tmpregs[7] = INVALID_HREG; 763 764 /* First decide which scheme (slow or fast) is to be used. First 765 assume the fast scheme, and select slow if any contraindications 766 (wow) appear. */ 767 768 go_fast = True; 769 770 if (guard) { 771 if (guard->tag == Iex_Const 772 && guard->Iex.Const.con->tag == Ico_U1 773 && guard->Iex.Const.con->Ico.U1 == True) { 774 /* unconditional */ 775 } else { 776 /* Not manifestly unconditional -- be conservative. */ 777 go_fast = False; 778 } 779 } 780 781 if (go_fast) { 782 for (i = 0; i < n_args; i++) { 783 if (mightRequireFixedRegs(args[i])) { 784 go_fast = False; 785 break; 786 } 787 } 788 } 789 790 /* At this point the scheme to use has been established. Generate 791 code to get the arg values into the argument rregs. */ 792 793 if (go_fast) { 794 795 /* FAST SCHEME */ 796 argreg = 0; 797 if (passBBP) { 798 argiregs |= (1 << (argreg+3)); 799 addInstr(env, mk_iMOVds_RR( argregs[argreg], 800 GuestStatePtr(mode64) )); 801 argreg++; 802 } 803 804 for (i = 0; i < n_args; i++) { 805 vassert(argreg < PPC_N_REGPARMS); 806 vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I32 || 807 typeOfIRExpr(env->type_env, args[i]) == Ity_I64); 808 if (!mode64) { 809 if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32) { 810 argiregs |= (1 << (argreg+3)); 811 addInstr(env, 812 mk_iMOVds_RR( argregs[argreg], 813 iselWordExpr_R(env, args[i]) )); 814 } else { // Ity_I64 815 HReg rHi, rLo; 816 if (regalign_int64s && (argreg%2) == 1) 817 // ppc32 ELF abi spec for passing LONG_LONG 818 argreg++; // XXX: odd argreg => even rN 819 vassert(argreg < PPC_N_REGPARMS-1); 820 iselInt64Expr(&rHi,&rLo, env, args[i]); 821 argiregs |= (1 << (argreg+3)); 822 addInstr(env, mk_iMOVds_RR( argregs[argreg++], rHi )); 823 argiregs |= (1 << (argreg+3)); 824 addInstr(env, mk_iMOVds_RR( argregs[argreg], rLo)); 825 } 826 } else { // mode64 827 argiregs |= (1 << (argreg+3)); 828 addInstr(env, mk_iMOVds_RR( argregs[argreg], 829 iselWordExpr_R(env, args[i]) )); 830 } 831 argreg++; 832 } 833 834 /* Fast scheme only applies for unconditional calls. Hence: */ 835 cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE ); 836 837 } else { 838 839 /* SLOW SCHEME; move via temporaries */ 840 argreg = 0; 841 842 if (passBBP) { 843 /* This is pretty stupid; better to move directly to r3 844 after the rest of the args are done. */ 845 tmpregs[argreg] = newVRegI(env); 846 addInstr(env, mk_iMOVds_RR( tmpregs[argreg], 847 GuestStatePtr(mode64) )); 848 argreg++; 849 } 850 851 for (i = 0; i < n_args; i++) { 852 vassert(argreg < PPC_N_REGPARMS); 853 vassert(typeOfIRExpr(env->type_env, args[i]) == Ity_I32 || 854 typeOfIRExpr(env->type_env, args[i]) == Ity_I64); 855 if (!mode64) { 856 if (typeOfIRExpr(env->type_env, args[i]) == Ity_I32) { 857 tmpregs[argreg] = iselWordExpr_R(env, args[i]); 858 } else { // Ity_I64 859 HReg rHi, rLo; 860 if (regalign_int64s && (argreg%2) == 1) 861 // ppc32 ELF abi spec for passing LONG_LONG 862 argreg++; // XXX: odd argreg => even rN 863 vassert(argreg < PPC_N_REGPARMS-1); 864 iselInt64Expr(&rHi,&rLo, env, args[i]); 865 tmpregs[argreg++] = rHi; 866 tmpregs[argreg] = rLo; 867 } 868 } else { // mode64 869 tmpregs[argreg] = iselWordExpr_R(env, args[i]); 870 } 871 argreg++; 872 } 873 874 /* Now we can compute the condition. We can't do it earlier 875 because the argument computations could trash the condition 876 codes. Be a bit clever to handle the common case where the 877 guard is 1:Bit. */ 878 cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE ); 879 if (guard) { 880 if (guard->tag == Iex_Const 881 && guard->Iex.Const.con->tag == Ico_U1 882 && guard->Iex.Const.con->Ico.U1 == True) { 883 /* unconditional -- do nothing */ 884 } else { 885 cc = iselCondCode( env, guard ); 886 } 887 } 888 889 /* Move the args to their final destinations. */ 890 for (i = 0; i < argreg; i++) { 891 if (tmpregs[i] == INVALID_HREG) // Skip invalid regs 892 continue; 893 /* None of these insns, including any spill code that might 894 be generated, may alter the condition codes. */ 895 argiregs |= (1 << (i+3)); 896 addInstr( env, mk_iMOVds_RR( argregs[i], tmpregs[i] ) ); 897 } 898 899 } 900 901 target = mode64 ? Ptr_to_ULong(cee->addr) : 902 toUInt(Ptr_to_ULong(cee->addr)); 903 904 /* Finally, the call itself. */ 905 addInstr(env, PPCInstr_Call( cc, (Addr64)target, argiregs )); 906} 907 908 909/*---------------------------------------------------------*/ 910/*--- ISEL: FP rounding mode helpers ---*/ 911/*---------------------------------------------------------*/ 912 913///* Set FPU's rounding mode to the default */ 914//static 915//void set_FPU_rounding_default ( ISelEnv* env ) 916//{ 917// HReg fr_src = newVRegF(env); 918// HReg r_src = newVRegI(env); 919// 920// /* Default rounding mode = 0x0 921// Only supporting the rounding-mode bits - the rest of FPSCR is 0x0 922// - so we can set the whole register at once (faster) 923// note: upper 32 bits ignored by FpLdFPSCR 924// */ 925// addInstr(env, PPCInstr_LI(r_src, 0x0, env->mode64)); 926// if (env->mode64) { 927// fr_src = mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64 928// } else { 929// fr_src = mk_LoadRR32toFPR( env, r_src, r_src ); // 2*I32 -> F64 930// } 931// addInstr(env, PPCInstr_FpLdFPSCR( fr_src )); 932//} 933 934/* Convert IR rounding mode to PPC encoding */ 935static HReg roundModeIRtoPPC ( ISelEnv* env, HReg r_rmIR ) 936{ 937 /* 938 rounding mode | PPC | IR 939 ----------------------------------------------- 940 to nearest, ties to even | 000 | 000 941 to zero | 001 | 011 942 to +infinity | 010 | 010 943 to -infinity | 011 | 001 944 +++++ Below are the extended rounding modes for decimal floating point +++++ 945 to nearest, ties away from 0 | 100 | 100 946 to nearest, ties toward 0 | 101 | 111 947 to away from 0 | 110 | 110 948 to prepare for shorter precision | 111 | 101 949 */ 950 HReg r_rmPPC = newVRegI(env); 951 HReg r_tmp1 = newVRegI(env); 952 HReg r_tmp2 = newVRegI(env); 953 954 vassert(hregClass(r_rmIR) == HRcGPR(env->mode64)); 955 956 // r_rmPPC = XOR(r_rmIR, r_rmIR << 1) & 3 957 // 958 // slwi tmp1, r_rmIR, 1 959 // xor tmp1, r_rmIR, tmp1 960 // andi r_rmPPC, tmp1, 3 961 962 addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, 963 r_tmp1, r_rmIR, PPCRH_Imm(False,1))); 964 965 addInstr( env, PPCInstr_Alu( Palu_AND, 966 r_tmp2, r_tmp1, PPCRH_Imm( False, 3 ) ) ); 967 968 addInstr( env, PPCInstr_Alu( Palu_XOR, 969 r_rmPPC, r_rmIR, PPCRH_Reg( r_tmp2 ) ) ); 970 971 return r_rmPPC; 972} 973 974 975/* Set the FPU's rounding mode: 'mode' is an I32-typed expression 976 denoting a value in the range 0 .. 7, indicating a round mode 977 encoded as per type IRRoundingMode. Set the PPC FPSCR to have the 978 same rounding. When the dfp_rm arg is True, set the decimal 979 floating point rounding mode bits (29:31); otherwise, set the 980 binary floating point rounding mode bits (62:63). 981 982 For speed & simplicity, we're setting the *entire* FPSCR here. 983 984 Setting the rounding mode is expensive. So this function tries to 985 avoid repeatedly setting the rounding mode to the same thing by 986 first comparing 'mode' to the 'mode' tree supplied in the previous 987 call to this function, if any. (The previous value is stored in 988 env->previous_rm.) If 'mode' is a single IR temporary 't' and 989 env->previous_rm is also just 't', then the setting is skipped. 990 991 This is safe because of the SSA property of IR: an IR temporary can 992 only be defined once and so will have the same value regardless of 993 where it appears in the block. Cool stuff, SSA. 994 995 A safety condition: all attempts to set the RM must be aware of 996 this mechanism - by being routed through the functions here. 997 998 Of course this only helps if blocks where the RM is set more than 999 once and it is set to the same value each time, *and* that value is 1000 held in the same IR temporary each time. In order to assure the 1001 latter as much as possible, the IR optimiser takes care to do CSE 1002 on any block with any sign of floating point activity. 1003*/ 1004static 1005void _set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode, Bool dfp_rm ) 1006{ 1007 HReg fr_src = newVRegF(env); 1008 HReg r_src; 1009 1010 vassert(typeOfIRExpr(env->type_env,mode) == Ity_I32); 1011 1012 /* Do we need to do anything? */ 1013 if (env->previous_rm 1014 && env->previous_rm->tag == Iex_RdTmp 1015 && mode->tag == Iex_RdTmp 1016 && env->previous_rm->Iex.RdTmp.tmp == mode->Iex.RdTmp.tmp) { 1017 /* no - setting it to what it was before. */ 1018 vassert(typeOfIRExpr(env->type_env, env->previous_rm) == Ity_I32); 1019 return; 1020 } 1021 1022 /* No luck - we better set it, and remember what we set it to. */ 1023 env->previous_rm = mode; 1024 1025 /* Only supporting the rounding-mode bits - the rest of FPSCR is 1026 0x0 - so we can set the whole register at once (faster). */ 1027 1028 // Resolve rounding mode and convert to PPC representation 1029 r_src = roundModeIRtoPPC( env, iselWordExpr_R(env, mode) ); 1030 1031 // gpr -> fpr 1032 if (env->mode64) { 1033 if (dfp_rm) { 1034 HReg r_tmp1 = newVRegI( env ); 1035 addInstr( env, 1036 PPCInstr_Shft( Pshft_SHL, False/*64bit shift*/, 1037 r_tmp1, r_src, PPCRH_Imm( False, 32 ) ) ); 1038 fr_src = mk_LoadR64toFPR( env, r_tmp1 ); 1039 } else { 1040 fr_src = mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64 1041 } 1042 } else { 1043 if (dfp_rm) { 1044 HReg r_zero = newVRegI( env ); 1045 addInstr( env, PPCInstr_LI( r_zero, 0, env->mode64 ) ); 1046 fr_src = mk_LoadRR32toFPR( env, r_src, r_zero ); 1047 } else { 1048 fr_src = mk_LoadRR32toFPR( env, r_src, r_src ); // 2*I32 -> F64 1049 } 1050 } 1051 1052 // Move to FPSCR 1053 addInstr(env, PPCInstr_FpLdFPSCR( fr_src, dfp_rm )); 1054} 1055 1056static void set_FPU_rounding_mode ( ISelEnv* env, IRExpr* mode ) 1057{ 1058 _set_FPU_rounding_mode(env, mode, False); 1059} 1060 1061static void set_FPU_DFP_rounding_mode ( ISelEnv* env, IRExpr* mode ) 1062{ 1063 _set_FPU_rounding_mode(env, mode, True); 1064} 1065 1066 1067/*---------------------------------------------------------*/ 1068/*--- ISEL: vector helpers ---*/ 1069/*---------------------------------------------------------*/ 1070 1071/* Generate all-zeroes into a new vector register. 1072*/ 1073static HReg generate_zeroes_V128 ( ISelEnv* env ) 1074{ 1075 HReg dst = newVRegV(env); 1076 addInstr(env, PPCInstr_AvBinary(Pav_XOR, dst, dst, dst)); 1077 return dst; 1078} 1079 1080/* Generate all-ones into a new vector register. 1081*/ 1082static HReg generate_ones_V128 ( ISelEnv* env ) 1083{ 1084 HReg dst = newVRegV(env); 1085 PPCVI5s * src = PPCVI5s_Imm(-1); 1086 addInstr(env, PPCInstr_AvSplat(8, dst, src)); 1087 return dst; 1088} 1089 1090 1091/* 1092 Generates code for AvSplat 1093 - takes in IRExpr* of type 8|16|32 1094 returns vector reg of duplicated lanes of input 1095 - uses AvSplat(imm) for imms up to simm6. 1096 otherwise must use store reg & load vector 1097*/ 1098static HReg mk_AvDuplicateRI( ISelEnv* env, IRExpr* e ) 1099{ 1100 HReg r_src; 1101 HReg dst = newVRegV(env); 1102 PPCRI* ri = iselWordExpr_RI(env, e); 1103 IRType ty = typeOfIRExpr(env->type_env,e); 1104 UInt sz = (ty == Ity_I8) ? 8 : (ty == Ity_I16) ? 16 : 32; 1105 vassert(ty == Ity_I8 || ty == Ity_I16 || ty == Ity_I32); 1106 1107 /* special case: immediate */ 1108 if (ri->tag == Pri_Imm) { 1109 Int simm32 = (Int)ri->Pri.Imm; 1110 1111 /* figure out if it's do-able with imm splats. */ 1112 if (simm32 >= -32 && simm32 <= 31) { 1113 Char simm6 = (Char)simm32; 1114 if (simm6 > 15) { /* 16:31 inclusive */ 1115 HReg v1 = newVRegV(env); 1116 HReg v2 = newVRegV(env); 1117 addInstr(env, PPCInstr_AvSplat(sz, v1, PPCVI5s_Imm(-16))); 1118 addInstr(env, PPCInstr_AvSplat(sz, v2, PPCVI5s_Imm(simm6-16))); 1119 addInstr(env, 1120 (sz== 8) ? PPCInstr_AvBin8x16(Pav_SUBU, dst, v2, v1) : 1121 (sz==16) ? PPCInstr_AvBin16x8(Pav_SUBU, dst, v2, v1) 1122 : PPCInstr_AvBin32x4(Pav_SUBU, dst, v2, v1) ); 1123 return dst; 1124 } 1125 if (simm6 < -16) { /* -32:-17 inclusive */ 1126 HReg v1 = newVRegV(env); 1127 HReg v2 = newVRegV(env); 1128 addInstr(env, PPCInstr_AvSplat(sz, v1, PPCVI5s_Imm(-16))); 1129 addInstr(env, PPCInstr_AvSplat(sz, v2, PPCVI5s_Imm(simm6+16))); 1130 addInstr(env, 1131 (sz== 8) ? PPCInstr_AvBin8x16(Pav_ADDU, dst, v2, v1) : 1132 (sz==16) ? PPCInstr_AvBin16x8(Pav_ADDU, dst, v2, v1) 1133 : PPCInstr_AvBin32x4(Pav_ADDU, dst, v2, v1) ); 1134 return dst; 1135 } 1136 /* simplest form: -16:15 inclusive */ 1137 addInstr(env, PPCInstr_AvSplat(sz, dst, PPCVI5s_Imm(simm6))); 1138 return dst; 1139 } 1140 1141 /* no luck; use the Slow way. */ 1142 r_src = newVRegI(env); 1143 addInstr(env, PPCInstr_LI(r_src, (Long)simm32, env->mode64)); 1144 } 1145 else { 1146 r_src = ri->Pri.Reg; 1147 } 1148 1149 /* default case: store r_src in lowest lane of 16-aligned mem, 1150 load vector, splat lowest lane to dst */ 1151 { 1152 /* CAB: Maybe faster to store r_src multiple times (sz dependent), 1153 and simply load the vector? */ 1154 HReg r_aligned16; 1155 HReg v_src = newVRegV(env); 1156 PPCAMode *am_off12; 1157 1158 sub_from_sp( env, 32 ); // Move SP down 1159 /* Get a 16-aligned address within our stack space */ 1160 r_aligned16 = get_sp_aligned16( env ); 1161 am_off12 = PPCAMode_IR( 12, r_aligned16 ); 1162 1163 /* Store r_src in low word of 16-aligned mem */ 1164 addInstr(env, PPCInstr_Store( 4, am_off12, r_src, env->mode64 )); 1165 1166 /* Load src to vector[low lane] */ 1167 addInstr(env, PPCInstr_AvLdSt( True/*ld*/, 4, v_src, am_off12 ) ); 1168 add_to_sp( env, 32 ); // Reset SP 1169 1170 /* Finally, splat v_src[low_lane] to dst */ 1171 addInstr(env, PPCInstr_AvSplat(sz, dst, PPCVI5s_Reg(v_src))); 1172 return dst; 1173 } 1174} 1175 1176 1177/* for each lane of vSrc: lane == nan ? laneX = all 1's : all 0's */ 1178static HReg isNan ( ISelEnv* env, HReg vSrc ) 1179{ 1180 HReg zeros, msk_exp, msk_mnt, expt, mnts, vIsNan; 1181 1182 vassert(hregClass(vSrc) == HRcVec128); 1183 1184 zeros = mk_AvDuplicateRI(env, mkU32(0)); 1185 msk_exp = mk_AvDuplicateRI(env, mkU32(0x7F800000)); 1186 msk_mnt = mk_AvDuplicateRI(env, mkU32(0x7FFFFF)); 1187 expt = newVRegV(env); 1188 mnts = newVRegV(env); 1189 vIsNan = newVRegV(env); 1190 1191 /* 32bit float => sign(1) | exponent(8) | mantissa(23) 1192 nan => exponent all ones, mantissa > 0 */ 1193 1194 addInstr(env, PPCInstr_AvBinary(Pav_AND, expt, vSrc, msk_exp)); 1195 addInstr(env, PPCInstr_AvBin32x4(Pav_CMPEQU, expt, expt, msk_exp)); 1196 addInstr(env, PPCInstr_AvBinary(Pav_AND, mnts, vSrc, msk_mnt)); 1197 addInstr(env, PPCInstr_AvBin32x4(Pav_CMPGTU, mnts, mnts, zeros)); 1198 addInstr(env, PPCInstr_AvBinary(Pav_AND, vIsNan, expt, mnts)); 1199 return vIsNan; 1200} 1201 1202 1203/*---------------------------------------------------------*/ 1204/*--- ISEL: Integer expressions (64/32/16/8 bit) ---*/ 1205/*---------------------------------------------------------*/ 1206 1207/* Select insns for an integer-typed expression, and add them to the 1208 code list. Return a reg holding the result. This reg will be a 1209 virtual register. THE RETURNED REG MUST NOT BE MODIFIED. If you 1210 want to modify it, ask for a new vreg, copy it in there, and modify 1211 the copy. The register allocator will do its best to map both 1212 vregs to the same real register, so the copies will often disappear 1213 later in the game. 1214 1215 This should handle expressions of 64, 32, 16 and 8-bit type. 1216 All results are returned in a (mode64 ? 64bit : 32bit) register. 1217 For 16- and 8-bit expressions, the upper (32/48/56 : 16/24) bits 1218 are arbitrary, so you should mask or sign extend partial values 1219 if necessary. 1220*/ 1221 1222static HReg iselWordExpr_R ( ISelEnv* env, IRExpr* e ) 1223{ 1224 HReg r = iselWordExpr_R_wrk(env, e); 1225 /* sanity checks ... */ 1226# if 0 1227 vex_printf("\n"); ppIRExpr(e); vex_printf("\n"); 1228# endif 1229 1230 vassert(hregClass(r) == HRcGPR(env->mode64)); 1231 vassert(hregIsVirtual(r)); 1232 return r; 1233} 1234 1235/* DO NOT CALL THIS DIRECTLY ! */ 1236static HReg iselWordExpr_R_wrk ( ISelEnv* env, IRExpr* e ) 1237{ 1238 Bool mode64 = env->mode64; 1239 MatchInfo mi; 1240 DECLARE_PATTERN(p_32to1_then_1Uto8); 1241 1242 IRType ty = typeOfIRExpr(env->type_env,e); 1243 vassert(ty == Ity_I8 || ty == Ity_I16 || 1244 ty == Ity_I32 || ((ty == Ity_I64) && mode64)); 1245 1246 switch (e->tag) { 1247 1248 /* --------- TEMP --------- */ 1249 case Iex_RdTmp: 1250 return lookupIRTemp(env, e->Iex.RdTmp.tmp); 1251 1252 /* --------- LOAD --------- */ 1253 case Iex_Load: { 1254 HReg r_dst; 1255 PPCAMode* am_addr; 1256 if (e->Iex.Load.end != Iend_BE) 1257 goto irreducible; 1258 r_dst = newVRegI(env); 1259 am_addr = iselWordExpr_AMode( env, e->Iex.Load.addr, ty/*of xfer*/ ); 1260 addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)), 1261 r_dst, am_addr, mode64 )); 1262 return r_dst; 1263 /*NOTREACHED*/ 1264 } 1265 1266 /* --------- BINARY OP --------- */ 1267 case Iex_Binop: { 1268 PPCAluOp aluOp; 1269 PPCShftOp shftOp; 1270 1271 /* Is it an addition or logical style op? */ 1272 switch (e->Iex.Binop.op) { 1273 case Iop_Add8: case Iop_Add16: case Iop_Add32: case Iop_Add64: 1274 aluOp = Palu_ADD; break; 1275 case Iop_Sub8: case Iop_Sub16: case Iop_Sub32: case Iop_Sub64: 1276 aluOp = Palu_SUB; break; 1277 case Iop_And8: case Iop_And16: case Iop_And32: case Iop_And64: 1278 aluOp = Palu_AND; break; 1279 case Iop_Or8: case Iop_Or16: case Iop_Or32: case Iop_Or64: 1280 aluOp = Palu_OR; break; 1281 case Iop_Xor8: case Iop_Xor16: case Iop_Xor32: case Iop_Xor64: 1282 aluOp = Palu_XOR; break; 1283 default: 1284 aluOp = Palu_INVALID; break; 1285 } 1286 /* For commutative ops we assume any literal 1287 values are on the second operand. */ 1288 if (aluOp != Palu_INVALID) { 1289 HReg r_dst = newVRegI(env); 1290 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1); 1291 PPCRH* ri_srcR = NULL; 1292 /* get right arg into an RH, in the appropriate way */ 1293 switch (aluOp) { 1294 case Palu_ADD: case Palu_SUB: 1295 ri_srcR = iselWordExpr_RH(env, True/*signed*/, 1296 e->Iex.Binop.arg2); 1297 break; 1298 case Palu_AND: case Palu_OR: case Palu_XOR: 1299 ri_srcR = iselWordExpr_RH(env, False/*signed*/, 1300 e->Iex.Binop.arg2); 1301 break; 1302 default: 1303 vpanic("iselWordExpr_R_wrk-aluOp-arg2"); 1304 } 1305 addInstr(env, PPCInstr_Alu(aluOp, r_dst, r_srcL, ri_srcR)); 1306 return r_dst; 1307 } 1308 1309 /* a shift? */ 1310 switch (e->Iex.Binop.op) { 1311 case Iop_Shl8: case Iop_Shl16: case Iop_Shl32: case Iop_Shl64: 1312 shftOp = Pshft_SHL; break; 1313 case Iop_Shr8: case Iop_Shr16: case Iop_Shr32: case Iop_Shr64: 1314 shftOp = Pshft_SHR; break; 1315 case Iop_Sar8: case Iop_Sar16: case Iop_Sar32: case Iop_Sar64: 1316 shftOp = Pshft_SAR; break; 1317 default: 1318 shftOp = Pshft_INVALID; break; 1319 } 1320 /* we assume any literal values are on the second operand. */ 1321 if (shftOp != Pshft_INVALID) { 1322 HReg r_dst = newVRegI(env); 1323 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1); 1324 PPCRH* ri_srcR = NULL; 1325 /* get right arg into an RH, in the appropriate way */ 1326 switch (shftOp) { 1327 case Pshft_SHL: case Pshft_SHR: case Pshft_SAR: 1328 if (!mode64) 1329 ri_srcR = iselWordExpr_RH5u(env, e->Iex.Binop.arg2); 1330 else 1331 ri_srcR = iselWordExpr_RH6u(env, e->Iex.Binop.arg2); 1332 break; 1333 default: 1334 vpanic("iselIntExpr_R_wrk-shftOp-arg2"); 1335 } 1336 /* widen the left arg if needed */ 1337 if (shftOp == Pshft_SHR || shftOp == Pshft_SAR) { 1338 if (ty == Ity_I8 || ty == Ity_I16) { 1339 PPCRH* amt = PPCRH_Imm(False, 1340 toUShort(ty == Ity_I8 ? 24 : 16)); 1341 HReg tmp = newVRegI(env); 1342 addInstr(env, PPCInstr_Shft(Pshft_SHL, 1343 True/*32bit shift*/, 1344 tmp, r_srcL, amt)); 1345 addInstr(env, PPCInstr_Shft(shftOp, 1346 True/*32bit shift*/, 1347 tmp, tmp, amt)); 1348 r_srcL = tmp; 1349 vassert(0); /* AWAITING TEST CASE */ 1350 } 1351 } 1352 /* Only 64 expressions need 64bit shifts, 1353 32bit shifts are fine for all others */ 1354 if (ty == Ity_I64) { 1355 vassert(mode64); 1356 addInstr(env, PPCInstr_Shft(shftOp, False/*64bit shift*/, 1357 r_dst, r_srcL, ri_srcR)); 1358 } else { 1359 addInstr(env, PPCInstr_Shft(shftOp, True/*32bit shift*/, 1360 r_dst, r_srcL, ri_srcR)); 1361 } 1362 return r_dst; 1363 } 1364 1365 /* How about a div? */ 1366 if (e->Iex.Binop.op == Iop_DivS32 || 1367 e->Iex.Binop.op == Iop_DivU32 || 1368 e->Iex.Binop.op == Iop_DivS32E || 1369 e->Iex.Binop.op == Iop_DivU32E) { 1370 Bool syned = toBool((e->Iex.Binop.op == Iop_DivS32) || (e->Iex.Binop.op == Iop_DivS32E)); 1371 HReg r_dst = newVRegI(env); 1372 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1); 1373 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2); 1374 addInstr( env, 1375 PPCInstr_Div( ( ( e->Iex.Binop.op == Iop_DivU32E ) 1376 || ( e->Iex.Binop.op == Iop_DivS32E ) ) ? True 1377 : False, 1378 syned, 1379 True/*32bit div*/, 1380 r_dst, 1381 r_srcL, 1382 r_srcR ) ); 1383 return r_dst; 1384 } 1385 if (e->Iex.Binop.op == Iop_DivS64 || 1386 e->Iex.Binop.op == Iop_DivU64 || e->Iex.Binop.op == Iop_DivS64E 1387 || e->Iex.Binop.op == Iop_DivU64E ) { 1388 Bool syned = toBool((e->Iex.Binop.op == Iop_DivS64) ||(e->Iex.Binop.op == Iop_DivS64E)); 1389 HReg r_dst = newVRegI(env); 1390 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1); 1391 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2); 1392 vassert(mode64); 1393 addInstr( env, 1394 PPCInstr_Div( ( ( e->Iex.Binop.op == Iop_DivS64E ) 1395 || ( e->Iex.Binop.op 1396 == Iop_DivU64E ) ) ? True 1397 : False, 1398 syned, 1399 False/*64bit div*/, 1400 r_dst, 1401 r_srcL, 1402 r_srcR ) ); 1403 return r_dst; 1404 } 1405 1406 /* No? Anyone for a mul? */ 1407 if (e->Iex.Binop.op == Iop_Mul32 1408 || e->Iex.Binop.op == Iop_Mul64) { 1409 Bool syned = False; 1410 Bool sz32 = (e->Iex.Binop.op != Iop_Mul64); 1411 HReg r_dst = newVRegI(env); 1412 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1); 1413 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2); 1414 addInstr(env, PPCInstr_MulL(syned, False/*lo32*/, sz32, 1415 r_dst, r_srcL, r_srcR)); 1416 return r_dst; 1417 } 1418 1419 /* 32 x 32 -> 64 multiply */ 1420 if (mode64 1421 && (e->Iex.Binop.op == Iop_MullU32 1422 || e->Iex.Binop.op == Iop_MullS32)) { 1423 HReg tLo = newVRegI(env); 1424 HReg tHi = newVRegI(env); 1425 HReg r_dst = newVRegI(env); 1426 Bool syned = toBool(e->Iex.Binop.op == Iop_MullS32); 1427 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1); 1428 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2); 1429 addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/, 1430 False/*lo32*/, True/*32bit mul*/, 1431 tLo, r_srcL, r_srcR)); 1432 addInstr(env, PPCInstr_MulL(syned, 1433 True/*hi32*/, True/*32bit mul*/, 1434 tHi, r_srcL, r_srcR)); 1435 addInstr(env, PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/, 1436 r_dst, tHi, PPCRH_Imm(False,32))); 1437 addInstr(env, PPCInstr_Alu(Palu_OR, 1438 r_dst, r_dst, PPCRH_Reg(tLo))); 1439 return r_dst; 1440 } 1441 1442 /* El-mutanto 3-way compare? */ 1443 if (e->Iex.Binop.op == Iop_CmpORD32S 1444 || e->Iex.Binop.op == Iop_CmpORD32U) { 1445 Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD32S); 1446 HReg dst = newVRegI(env); 1447 HReg srcL = iselWordExpr_R(env, e->Iex.Binop.arg1); 1448 PPCRH* srcR = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2); 1449 addInstr(env, PPCInstr_Cmp(syned, True/*32bit cmp*/, 1450 7/*cr*/, srcL, srcR)); 1451 addInstr(env, PPCInstr_MfCR(dst)); 1452 addInstr(env, PPCInstr_Alu(Palu_AND, dst, dst, 1453 PPCRH_Imm(False,7<<1))); 1454 return dst; 1455 } 1456 1457 if (e->Iex.Binop.op == Iop_CmpORD64S 1458 || e->Iex.Binop.op == Iop_CmpORD64U) { 1459 Bool syned = toBool(e->Iex.Binop.op == Iop_CmpORD64S); 1460 HReg dst = newVRegI(env); 1461 HReg srcL = iselWordExpr_R(env, e->Iex.Binop.arg1); 1462 PPCRH* srcR = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2); 1463 vassert(mode64); 1464 addInstr(env, PPCInstr_Cmp(syned, False/*64bit cmp*/, 1465 7/*cr*/, srcL, srcR)); 1466 addInstr(env, PPCInstr_MfCR(dst)); 1467 addInstr(env, PPCInstr_Alu(Palu_AND, dst, dst, 1468 PPCRH_Imm(False,7<<1))); 1469 return dst; 1470 } 1471 1472 if (e->Iex.Binop.op == Iop_Max32U) { 1473 HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1); 1474 HReg r2 = iselWordExpr_R(env, e->Iex.Binop.arg2); 1475 HReg rdst = newVRegI(env); 1476 PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7LT ); 1477 addInstr(env, mk_iMOVds_RR(rdst, r1)); 1478 addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, 1479 7/*cr*/, rdst, PPCRH_Reg(r2))); 1480 addInstr(env, PPCInstr_CMov(cc, rdst, PPCRI_Reg(r2))); 1481 return rdst; 1482 } 1483 1484 if (e->Iex.Binop.op == Iop_32HLto64) { 1485 HReg r_Hi = iselWordExpr_R(env, e->Iex.Binop.arg1); 1486 HReg r_Lo = iselWordExpr_R(env, e->Iex.Binop.arg2); 1487 HReg r_dst = newVRegI(env); 1488 HReg msk = newVRegI(env); 1489 vassert(mode64); 1490 /* r_dst = OR( r_Hi<<32, r_Lo ) */ 1491 addInstr(env, PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/, 1492 r_dst, r_Hi, PPCRH_Imm(False,32))); 1493 addInstr(env, PPCInstr_LI(msk, 0xFFFFFFFF, mode64)); 1494 addInstr(env, PPCInstr_Alu( Palu_AND, r_Lo, r_Lo, 1495 PPCRH_Reg(msk) )); 1496 addInstr(env, PPCInstr_Alu( Palu_OR, r_dst, r_dst, 1497 PPCRH_Reg(r_Lo) )); 1498 return r_dst; 1499 } 1500 1501 if ((e->Iex.Binop.op == Iop_CmpF64) || 1502 (e->Iex.Binop.op == Iop_CmpD64) || 1503 (e->Iex.Binop.op == Iop_CmpD128)) { 1504 HReg fr_srcL; 1505 HReg fr_srcL_lo; 1506 HReg fr_srcR; 1507 HReg fr_srcR_lo; 1508 1509 HReg r_ccPPC = newVRegI(env); 1510 HReg r_ccIR = newVRegI(env); 1511 HReg r_ccIR_b0 = newVRegI(env); 1512 HReg r_ccIR_b2 = newVRegI(env); 1513 HReg r_ccIR_b6 = newVRegI(env); 1514 1515 if (e->Iex.Binop.op == Iop_CmpF64) { 1516 fr_srcL = iselDblExpr(env, e->Iex.Binop.arg1); 1517 fr_srcR = iselDblExpr(env, e->Iex.Binop.arg2); 1518 addInstr(env, PPCInstr_FpCmp(r_ccPPC, fr_srcL, fr_srcR)); 1519 1520 } else if (e->Iex.Binop.op == Iop_CmpD64) { 1521 fr_srcL = iselDfp64Expr(env, e->Iex.Binop.arg1); 1522 fr_srcR = iselDfp64Expr(env, e->Iex.Binop.arg2); 1523 addInstr(env, PPCInstr_Dfp64Cmp(r_ccPPC, fr_srcL, fr_srcR)); 1524 1525 } else { // e->Iex.Binop.op == Iop_CmpD128 1526 iselDfp128Expr(&fr_srcL, &fr_srcL_lo, env, e->Iex.Binop.arg1); 1527 iselDfp128Expr(&fr_srcR, &fr_srcR_lo, env, e->Iex.Binop.arg2); 1528 addInstr(env, PPCInstr_Dfp128Cmp(r_ccPPC, fr_srcL, fr_srcL_lo, 1529 fr_srcR, fr_srcR_lo)); 1530 } 1531 1532 /* Map compare result from PPC to IR, 1533 conforming to CmpF64 definition. */ 1534 /* 1535 FP cmp result | PPC | IR 1536 -------------------------- 1537 UN | 0x1 | 0x45 1538 EQ | 0x2 | 0x40 1539 GT | 0x4 | 0x00 1540 LT | 0x8 | 0x01 1541 */ 1542 1543 // r_ccIR_b0 = r_ccPPC[0] | r_ccPPC[3] 1544 addInstr(env, PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/, 1545 r_ccIR_b0, r_ccPPC, 1546 PPCRH_Imm(False,0x3))); 1547 addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR_b0, 1548 r_ccPPC, PPCRH_Reg(r_ccIR_b0))); 1549 addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b0, 1550 r_ccIR_b0, PPCRH_Imm(False,0x1))); 1551 1552 // r_ccIR_b2 = r_ccPPC[0] 1553 addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, 1554 r_ccIR_b2, r_ccPPC, 1555 PPCRH_Imm(False,0x2))); 1556 addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b2, 1557 r_ccIR_b2, PPCRH_Imm(False,0x4))); 1558 1559 // r_ccIR_b6 = r_ccPPC[0] | r_ccPPC[1] 1560 addInstr(env, PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/, 1561 r_ccIR_b6, r_ccPPC, 1562 PPCRH_Imm(False,0x1))); 1563 addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR_b6, 1564 r_ccPPC, PPCRH_Reg(r_ccIR_b6))); 1565 addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, 1566 r_ccIR_b6, r_ccIR_b6, 1567 PPCRH_Imm(False,0x6))); 1568 addInstr(env, PPCInstr_Alu(Palu_AND, r_ccIR_b6, 1569 r_ccIR_b6, PPCRH_Imm(False,0x40))); 1570 1571 // r_ccIR = r_ccIR_b0 | r_ccIR_b2 | r_ccIR_b6 1572 addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR, 1573 r_ccIR_b0, PPCRH_Reg(r_ccIR_b2))); 1574 addInstr(env, PPCInstr_Alu(Palu_OR, r_ccIR, 1575 r_ccIR, PPCRH_Reg(r_ccIR_b6))); 1576 return r_ccIR; 1577 } 1578 1579 if ( e->Iex.Binop.op == Iop_F64toI32S || 1580 e->Iex.Binop.op == Iop_F64toI32U ) { 1581 /* This works in both mode64 and mode32. */ 1582 HReg r1 = StackFramePtr(env->mode64); 1583 PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 ); 1584 HReg fsrc = iselDblExpr(env, e->Iex.Binop.arg2); 1585 HReg ftmp = newVRegF(env); 1586 HReg idst = newVRegI(env); 1587 1588 /* Set host rounding mode */ 1589 set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); 1590 1591 sub_from_sp( env, 16 ); 1592 addInstr(env, PPCInstr_FpCftI(False/*F->I*/, True/*int32*/, 1593 e->Iex.Binop.op == Iop_F64toI32S ? True/*syned*/ 1594 : False, 1595 True/*flt64*/, 1596 ftmp, fsrc)); 1597 addInstr(env, PPCInstr_FpSTFIW(r1, ftmp)); 1598 addInstr(env, PPCInstr_Load(4, idst, zero_r1, mode64)); 1599 1600 /* in 64-bit mode we need to sign-widen idst. */ 1601 if (mode64) 1602 addInstr(env, PPCInstr_Unary(Pun_EXTSW, idst, idst)); 1603 1604 add_to_sp( env, 16 ); 1605 1606 ///* Restore default FPU rounding. */ 1607 //set_FPU_rounding_default( env ); 1608 return idst; 1609 } 1610 1611 if (e->Iex.Binop.op == Iop_F64toI64S || e->Iex.Binop.op == Iop_F64toI64U ) { 1612 if (mode64) { 1613 HReg r1 = StackFramePtr(env->mode64); 1614 PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 ); 1615 HReg fsrc = iselDblExpr(env, e->Iex.Binop.arg2); 1616 HReg idst = newVRegI(env); 1617 HReg ftmp = newVRegF(env); 1618 1619 /* Set host rounding mode */ 1620 set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); 1621 1622 sub_from_sp( env, 16 ); 1623 addInstr(env, PPCInstr_FpCftI(False/*F->I*/, False/*int64*/, 1624 ( e->Iex.Binop.op == Iop_F64toI64S ) ? True 1625 : False, 1626 True, ftmp, fsrc)); 1627 addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, ftmp, zero_r1)); 1628 addInstr(env, PPCInstr_Load(8, idst, zero_r1, True/*mode64*/)); 1629 add_to_sp( env, 16 ); 1630 1631 ///* Restore default FPU rounding. */ 1632 //set_FPU_rounding_default( env ); 1633 return idst; 1634 } 1635 } 1636 1637 break; 1638 } 1639 1640 /* --------- UNARY OP --------- */ 1641 case Iex_Unop: { 1642 IROp op_unop = e->Iex.Unop.op; 1643 1644 /* 1Uto8(32to1(expr32)) */ 1645 DEFINE_PATTERN(p_32to1_then_1Uto8, 1646 unop(Iop_1Uto8,unop(Iop_32to1,bind(0)))); 1647 if (matchIRExpr(&mi,p_32to1_then_1Uto8,e)) { 1648 IRExpr* expr32 = mi.bindee[0]; 1649 HReg r_dst = newVRegI(env); 1650 HReg r_src = iselWordExpr_R(env, expr32); 1651 addInstr(env, PPCInstr_Alu(Palu_AND, r_dst, 1652 r_src, PPCRH_Imm(False,1))); 1653 return r_dst; 1654 } 1655 1656 /* 16Uto32(LDbe:I16(expr32)) */ 1657 { 1658 DECLARE_PATTERN(p_LDbe16_then_16Uto32); 1659 DEFINE_PATTERN(p_LDbe16_then_16Uto32, 1660 unop(Iop_16Uto32, 1661 IRExpr_Load(Iend_BE,Ity_I16,bind(0))) ); 1662 if (matchIRExpr(&mi,p_LDbe16_then_16Uto32,e)) { 1663 HReg r_dst = newVRegI(env); 1664 PPCAMode* amode 1665 = iselWordExpr_AMode( env, mi.bindee[0], Ity_I16/*xfer*/ ); 1666 addInstr(env, PPCInstr_Load(2,r_dst,amode, mode64)); 1667 return r_dst; 1668 } 1669 } 1670 1671 switch (op_unop) { 1672 case Iop_8Uto16: 1673 case Iop_8Uto32: 1674 case Iop_8Uto64: 1675 case Iop_16Uto32: 1676 case Iop_16Uto64: { 1677 HReg r_dst = newVRegI(env); 1678 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1679 UShort mask = toUShort(op_unop==Iop_16Uto64 ? 0xFFFF : 1680 op_unop==Iop_16Uto32 ? 0xFFFF : 0xFF); 1681 addInstr(env, PPCInstr_Alu(Palu_AND,r_dst,r_src, 1682 PPCRH_Imm(False,mask))); 1683 return r_dst; 1684 } 1685 case Iop_32Uto64: { 1686 HReg r_dst = newVRegI(env); 1687 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1688 vassert(mode64); 1689 addInstr(env, 1690 PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/, 1691 r_dst, r_src, PPCRH_Imm(False,32))); 1692 addInstr(env, 1693 PPCInstr_Shft(Pshft_SHR, False/*64bit shift*/, 1694 r_dst, r_dst, PPCRH_Imm(False,32))); 1695 return r_dst; 1696 } 1697 case Iop_8Sto16: 1698 case Iop_8Sto32: 1699 case Iop_16Sto32: { 1700 HReg r_dst = newVRegI(env); 1701 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1702 UShort amt = toUShort(op_unop==Iop_16Sto32 ? 16 : 24); 1703 addInstr(env, 1704 PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, 1705 r_dst, r_src, PPCRH_Imm(False,amt))); 1706 addInstr(env, 1707 PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, 1708 r_dst, r_dst, PPCRH_Imm(False,amt))); 1709 return r_dst; 1710 } 1711 case Iop_8Sto64: 1712 case Iop_16Sto64: { 1713 HReg r_dst = newVRegI(env); 1714 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1715 UShort amt = toUShort(op_unop==Iop_8Sto64 ? 56 : 48); 1716 vassert(mode64); 1717 addInstr(env, 1718 PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/, 1719 r_dst, r_src, PPCRH_Imm(False,amt))); 1720 addInstr(env, 1721 PPCInstr_Shft(Pshft_SAR, False/*64bit shift*/, 1722 r_dst, r_dst, PPCRH_Imm(False,amt))); 1723 return r_dst; 1724 } 1725 case Iop_32Sto64: { 1726 HReg r_dst = newVRegI(env); 1727 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1728 vassert(mode64); 1729 /* According to the IBM docs, in 64 bit mode, srawi r,r,0 1730 sign extends the lower 32 bits into the upper 32 bits. */ 1731 addInstr(env, 1732 PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, 1733 r_dst, r_src, PPCRH_Imm(False,0))); 1734 return r_dst; 1735 } 1736 case Iop_Not8: 1737 case Iop_Not16: 1738 case Iop_Not32: 1739 case Iop_Not64: { 1740 if (op_unop == Iop_Not64) vassert(mode64); 1741 HReg r_dst = newVRegI(env); 1742 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1743 addInstr(env, PPCInstr_Unary(Pun_NOT,r_dst,r_src)); 1744 return r_dst; 1745 } 1746 case Iop_64HIto32: { 1747 if (!mode64) { 1748 HReg rHi, rLo; 1749 iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg); 1750 return rHi; /* and abandon rLo .. poor wee thing :-) */ 1751 } else { 1752 HReg r_dst = newVRegI(env); 1753 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1754 addInstr(env, 1755 PPCInstr_Shft(Pshft_SHR, False/*64bit shift*/, 1756 r_dst, r_src, PPCRH_Imm(False,32))); 1757 return r_dst; 1758 } 1759 } 1760 case Iop_64to32: { 1761 if (!mode64) { 1762 HReg rHi, rLo; 1763 iselInt64Expr(&rHi,&rLo, env, e->Iex.Unop.arg); 1764 return rLo; /* similar stupid comment to the above ... */ 1765 } else { 1766 /* This is a no-op. */ 1767 return iselWordExpr_R(env, e->Iex.Unop.arg); 1768 } 1769 } 1770 case Iop_64to16: { 1771 if (mode64) { /* This is a no-op. */ 1772 return iselWordExpr_R(env, e->Iex.Unop.arg); 1773 } 1774 break; /* evidently not used in 32-bit mode */ 1775 } 1776 case Iop_16HIto8: 1777 case Iop_32HIto16: { 1778 HReg r_dst = newVRegI(env); 1779 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1780 UShort shift = toUShort(op_unop == Iop_16HIto8 ? 8 : 16); 1781 addInstr(env, 1782 PPCInstr_Shft(Pshft_SHR, True/*32bit shift*/, 1783 r_dst, r_src, PPCRH_Imm(False,shift))); 1784 return r_dst; 1785 } 1786 case Iop_128HIto64: 1787 if (mode64) { 1788 HReg rHi, rLo; 1789 iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg); 1790 return rHi; /* and abandon rLo .. poor wee thing :-) */ 1791 } 1792 break; 1793 case Iop_128to64: 1794 if (mode64) { 1795 HReg rHi, rLo; 1796 iselInt128Expr(&rHi,&rLo, env, e->Iex.Unop.arg); 1797 return rLo; /* similar stupid comment to the above ... */ 1798 } 1799 break; 1800 case Iop_1Uto64: 1801 case Iop_1Uto32: 1802 case Iop_1Uto8: 1803 if ((op_unop != Iop_1Uto64) || mode64) { 1804 HReg r_dst = newVRegI(env); 1805 PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg); 1806 addInstr(env, PPCInstr_Set(cond,r_dst)); 1807 return r_dst; 1808 } 1809 break; 1810 case Iop_1Sto8: 1811 case Iop_1Sto16: 1812 case Iop_1Sto32: { 1813 /* could do better than this, but for now ... */ 1814 HReg r_dst = newVRegI(env); 1815 PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg); 1816 addInstr(env, PPCInstr_Set(cond,r_dst)); 1817 addInstr(env, 1818 PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, 1819 r_dst, r_dst, PPCRH_Imm(False,31))); 1820 addInstr(env, 1821 PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, 1822 r_dst, r_dst, PPCRH_Imm(False,31))); 1823 return r_dst; 1824 } 1825 case Iop_1Sto64: 1826 if (mode64) { 1827 /* could do better than this, but for now ... */ 1828 HReg r_dst = newVRegI(env); 1829 PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg); 1830 addInstr(env, PPCInstr_Set(cond,r_dst)); 1831 addInstr(env, PPCInstr_Shft(Pshft_SHL, False/*64bit shift*/, 1832 r_dst, r_dst, PPCRH_Imm(False,63))); 1833 addInstr(env, PPCInstr_Shft(Pshft_SAR, False/*64bit shift*/, 1834 r_dst, r_dst, PPCRH_Imm(False,63))); 1835 return r_dst; 1836 } 1837 break; 1838 case Iop_Clz32: 1839 case Iop_Clz64: { 1840 HReg r_src, r_dst; 1841 PPCUnaryOp op_clz = (op_unop == Iop_Clz32) ? Pun_CLZ32 : 1842 Pun_CLZ64; 1843 if (op_unop == Iop_Clz64 && !mode64) 1844 goto irreducible; 1845 /* Count leading zeroes. */ 1846 r_dst = newVRegI(env); 1847 r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1848 addInstr(env, PPCInstr_Unary(op_clz,r_dst,r_src)); 1849 return r_dst; 1850 } 1851 1852 case Iop_Left8: 1853 case Iop_Left32: 1854 case Iop_Left64: { 1855 HReg r_src, r_dst; 1856 if (op_unop == Iop_Left64 && !mode64) 1857 goto irreducible; 1858 r_dst = newVRegI(env); 1859 r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1860 addInstr(env, PPCInstr_Unary(Pun_NEG,r_dst,r_src)); 1861 addInstr(env, PPCInstr_Alu(Palu_OR, r_dst, r_dst, PPCRH_Reg(r_src))); 1862 return r_dst; 1863 } 1864 1865 case Iop_CmpwNEZ32: { 1866 HReg r_dst = newVRegI(env); 1867 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1868 addInstr(env, PPCInstr_Unary(Pun_NEG,r_dst,r_src)); 1869 addInstr(env, PPCInstr_Alu(Palu_OR, r_dst, r_dst, PPCRH_Reg(r_src))); 1870 addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, 1871 r_dst, r_dst, PPCRH_Imm(False, 31))); 1872 return r_dst; 1873 } 1874 1875 case Iop_CmpwNEZ64: { 1876 HReg r_dst = newVRegI(env); 1877 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 1878 if (!mode64) goto irreducible; 1879 addInstr(env, PPCInstr_Unary(Pun_NEG,r_dst,r_src)); 1880 addInstr(env, PPCInstr_Alu(Palu_OR, r_dst, r_dst, PPCRH_Reg(r_src))); 1881 addInstr(env, PPCInstr_Shft(Pshft_SAR, False/*64bit shift*/, 1882 r_dst, r_dst, PPCRH_Imm(False, 63))); 1883 return r_dst; 1884 } 1885 1886 case Iop_V128to32: { 1887 HReg r_aligned16; 1888 HReg dst = newVRegI(env); 1889 HReg vec = iselVecExpr(env, e->Iex.Unop.arg); 1890 PPCAMode *am_off0, *am_off12; 1891 sub_from_sp( env, 32 ); // Move SP down 32 bytes 1892 1893 // get a quadword aligned address within our stack space 1894 r_aligned16 = get_sp_aligned16( env ); 1895 am_off0 = PPCAMode_IR( 0, r_aligned16 ); 1896 am_off12 = PPCAMode_IR( 12,r_aligned16 ); 1897 1898 // store vec, load low word to dst 1899 addInstr(env, 1900 PPCInstr_AvLdSt( False/*store*/, 16, vec, am_off0 )); 1901 addInstr(env, 1902 PPCInstr_Load( 4, dst, am_off12, mode64 )); 1903 1904 add_to_sp( env, 32 ); // Reset SP 1905 return dst; 1906 } 1907 1908 case Iop_V128to64: 1909 case Iop_V128HIto64: 1910 if (mode64) { 1911 HReg r_aligned16; 1912 HReg dst = newVRegI(env); 1913 HReg vec = iselVecExpr(env, e->Iex.Unop.arg); 1914 PPCAMode *am_off0, *am_off8; 1915 sub_from_sp( env, 32 ); // Move SP down 32 bytes 1916 1917 // get a quadword aligned address within our stack space 1918 r_aligned16 = get_sp_aligned16( env ); 1919 am_off0 = PPCAMode_IR( 0, r_aligned16 ); 1920 am_off8 = PPCAMode_IR( 8 ,r_aligned16 ); 1921 1922 // store vec, load low word (+8) or high (+0) to dst 1923 addInstr(env, 1924 PPCInstr_AvLdSt( False/*store*/, 16, vec, am_off0 )); 1925 addInstr(env, 1926 PPCInstr_Load( 1927 8, dst, 1928 op_unop == Iop_V128HIto64 ? am_off0 : am_off8, 1929 mode64 )); 1930 1931 add_to_sp( env, 32 ); // Reset SP 1932 return dst; 1933 } 1934 break; 1935 case Iop_16to8: 1936 case Iop_32to8: 1937 case Iop_32to16: 1938 case Iop_64to8: 1939 /* These are no-ops. */ 1940 return iselWordExpr_R(env, e->Iex.Unop.arg); 1941 1942 /* ReinterpF64asI64(e) */ 1943 /* Given an IEEE754 double, produce an I64 with the same bit 1944 pattern. */ 1945 case Iop_ReinterpF64asI64: 1946 if (mode64) { 1947 PPCAMode *am_addr; 1948 HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg); 1949 HReg r_dst = newVRegI(env); 1950 1951 sub_from_sp( env, 16 ); // Move SP down 16 bytes 1952 am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) ); 1953 1954 // store as F64 1955 addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8, 1956 fr_src, am_addr )); 1957 // load as Ity_I64 1958 addInstr(env, PPCInstr_Load( 8, r_dst, am_addr, mode64 )); 1959 1960 add_to_sp( env, 16 ); // Reset SP 1961 return r_dst; 1962 } 1963 break; 1964 1965 /* ReinterpF32asI32(e) */ 1966 /* Given an IEEE754 float, produce an I32 with the same bit 1967 pattern. */ 1968 case Iop_ReinterpF32asI32: { 1969 /* I believe this generates correct code for both 32- and 1970 64-bit hosts. */ 1971 PPCAMode *am_addr; 1972 HReg fr_src = iselFltExpr(env, e->Iex.Unop.arg); 1973 HReg r_dst = newVRegI(env); 1974 1975 sub_from_sp( env, 16 ); // Move SP down 16 bytes 1976 am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) ); 1977 1978 // store as F32 1979 addInstr(env, PPCInstr_FpLdSt( False/*store*/, 4, 1980 fr_src, am_addr )); 1981 // load as Ity_I32 1982 addInstr(env, PPCInstr_Load( 4, r_dst, am_addr, mode64 )); 1983 1984 add_to_sp( env, 16 ); // Reset SP 1985 return r_dst; 1986 } 1987 break; 1988 1989 case Iop_ReinterpD64asI64: 1990 if (mode64) { 1991 PPCAMode *am_addr; 1992 HReg fr_src = iselDfp64Expr(env, e->Iex.Unop.arg); 1993 HReg r_dst = newVRegI(env); 1994 1995 sub_from_sp( env, 16 ); // Move SP down 16 bytes 1996 am_addr = PPCAMode_IR( 0, StackFramePtr(mode64) ); 1997 1998 // store as D64 1999 addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8, 2000 fr_src, am_addr )); 2001 // load as Ity_I64 2002 addInstr(env, PPCInstr_Load( 8, r_dst, am_addr, mode64 )); 2003 add_to_sp( env, 16 ); // Reset SP 2004 return r_dst; 2005 } 2006 break; 2007 2008 case Iop_BCDtoDPB: { 2009 PPCCondCode cc; 2010 UInt argiregs; 2011 HReg argregs[1]; 2012 HReg r_dst = newVRegI(env); 2013 Int argreg; 2014 HWord* fdescr; 2015 2016 argiregs = 0; 2017 argreg = 0; 2018 argregs[0] = hregPPC_GPR3(mode64); 2019 2020 argiregs |= (1 << (argreg+3)); 2021 addInstr(env, mk_iMOVds_RR( argregs[argreg++], 2022 iselWordExpr_R(env, e->Iex.Unop.arg) ) ); 2023 2024 cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE ); 2025 2026 fdescr = (HWord*)h_BCDtoDPB; 2027 addInstr(env, PPCInstr_Call( cc, (Addr64)(fdescr[0]), argiregs ) ); 2028 2029 addInstr(env, mk_iMOVds_RR(r_dst, argregs[0])); 2030 return r_dst; 2031 } 2032 2033 case Iop_DPBtoBCD: { 2034 PPCCondCode cc; 2035 UInt argiregs; 2036 HReg argregs[1]; 2037 HReg r_dst = newVRegI(env); 2038 Int argreg; 2039 HWord* fdescr; 2040 2041 argiregs = 0; 2042 argreg = 0; 2043 argregs[0] = hregPPC_GPR3(mode64); 2044 2045 argiregs |= (1 << (argreg+3)); 2046 addInstr(env, mk_iMOVds_RR( argregs[argreg++], 2047 iselWordExpr_R(env, e->Iex.Unop.arg) ) ); 2048 2049 cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE ); 2050 2051 fdescr = (HWord*)h_DPBtoBCD; 2052 addInstr(env, PPCInstr_Call( cc, (Addr64)(fdescr[0]), argiregs ) ); 2053 2054 addInstr(env, mk_iMOVds_RR(r_dst, argregs[0])); 2055 return r_dst; 2056 } 2057 2058 default: 2059 break; 2060 } 2061 break; 2062 } 2063 2064 /* --------- GET --------- */ 2065 case Iex_Get: { 2066 if (ty == Ity_I8 || ty == Ity_I16 || 2067 ty == Ity_I32 || ((ty == Ity_I64) && mode64)) { 2068 HReg r_dst = newVRegI(env); 2069 PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset, 2070 GuestStatePtr(mode64) ); 2071 addInstr(env, PPCInstr_Load( toUChar(sizeofIRType(ty)), 2072 r_dst, am_addr, mode64 )); 2073 return r_dst; 2074 } 2075 break; 2076 } 2077 2078 case Iex_GetI: { 2079 PPCAMode* src_am 2080 = genGuestArrayOffset( env, e->Iex.GetI.descr, 2081 e->Iex.GetI.ix, e->Iex.GetI.bias ); 2082 HReg r_dst = newVRegI(env); 2083 if (mode64 && ty == Ity_I64) { 2084 addInstr(env, PPCInstr_Load( toUChar(8), 2085 r_dst, src_am, mode64 )); 2086 return r_dst; 2087 } 2088 if ((!mode64) && ty == Ity_I32) { 2089 addInstr(env, PPCInstr_Load( toUChar(4), 2090 r_dst, src_am, mode64 )); 2091 return r_dst; 2092 } 2093 break; 2094 } 2095 2096 /* --------- CCALL --------- */ 2097 case Iex_CCall: { 2098 HReg r_dst = newVRegI(env); 2099 vassert(ty == Ity_I32); 2100 2101 /* be very restrictive for now. Only 32/64-bit ints allowed 2102 for args, and 32 bits for return type. */ 2103 if (e->Iex.CCall.retty != Ity_I32) 2104 goto irreducible; 2105 2106 /* Marshal args, do the call, clear stack. */ 2107 doHelperCall( env, False, NULL, 2108 e->Iex.CCall.cee, e->Iex.CCall.args ); 2109 2110 /* GPR3 now holds the destination address from Pin_Goto */ 2111 addInstr(env, mk_iMOVds_RR(r_dst, hregPPC_GPR3(mode64))); 2112 return r_dst; 2113 } 2114 2115 /* --------- LITERAL --------- */ 2116 /* 32/16/8-bit literals */ 2117 case Iex_Const: { 2118 Long l; 2119 HReg r_dst = newVRegI(env); 2120 IRConst* con = e->Iex.Const.con; 2121 switch (con->tag) { 2122 case Ico_U64: if (!mode64) goto irreducible; 2123 l = (Long) con->Ico.U64; break; 2124 case Ico_U32: l = (Long)(Int) con->Ico.U32; break; 2125 case Ico_U16: l = (Long)(Int)(Short)con->Ico.U16; break; 2126 case Ico_U8: l = (Long)(Int)(Char )con->Ico.U8; break; 2127 default: vpanic("iselIntExpr_R.const(ppc)"); 2128 } 2129 addInstr(env, PPCInstr_LI(r_dst, (ULong)l, mode64)); 2130 return r_dst; 2131 } 2132 2133 /* --------- MULTIPLEX --------- */ 2134 case Iex_Mux0X: { 2135 if ((ty == Ity_I8 || ty == Ity_I16 || 2136 ty == Ity_I32 || ((ty == Ity_I64) && mode64)) && 2137 typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) { 2138 PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); 2139 HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond); 2140 HReg rX = iselWordExpr_R(env, e->Iex.Mux0X.exprX); 2141 PPCRI* r0 = iselWordExpr_RI(env, e->Iex.Mux0X.expr0); 2142 HReg r_dst = newVRegI(env); 2143 HReg r_tmp = newVRegI(env); 2144 addInstr(env, mk_iMOVds_RR(r_dst,rX)); 2145 addInstr(env, PPCInstr_Alu(Palu_AND, r_tmp, 2146 r_cond, PPCRH_Imm(False,0xFF))); 2147 addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, 2148 7/*cr*/, r_tmp, PPCRH_Imm(False,0))); 2149 addInstr(env, PPCInstr_CMov(cc,r_dst,r0)); 2150 return r_dst; 2151 } 2152 break; 2153 } 2154 2155 default: 2156 break; 2157 } /* switch (e->tag) */ 2158 2159 2160 /* We get here if no pattern matched. */ 2161 irreducible: 2162 ppIRExpr(e); 2163 vpanic("iselIntExpr_R(ppc): cannot reduce tree"); 2164} 2165 2166 2167/*---------------------------------------------------------*/ 2168/*--- ISEL: Integer expression auxiliaries ---*/ 2169/*---------------------------------------------------------*/ 2170 2171/* --------------------- AMODEs --------------------- */ 2172 2173/* Return an AMode which computes the value of the specified 2174 expression, possibly also adding insns to the code list as a 2175 result. The expression may only be a word-size one. 2176*/ 2177 2178static Bool uInt_fits_in_16_bits ( UInt u ) 2179{ 2180 /* Is u the same as the sign-extend of its lower 16 bits? */ 2181 Int i = u & 0xFFFF; 2182 i <<= 16; 2183 i >>= 16; 2184 return toBool(u == (UInt)i); 2185} 2186 2187static Bool uLong_fits_in_16_bits ( ULong u ) 2188{ 2189 /* Is u the same as the sign-extend of its lower 16 bits? */ 2190 Long i = u & 0xFFFFULL; 2191 i <<= 48; 2192 i >>= 48; 2193 return toBool(u == (ULong)i); 2194} 2195 2196static Bool uLong_is_4_aligned ( ULong u ) 2197{ 2198 return toBool((u & 3ULL) == 0); 2199} 2200 2201static Bool sane_AMode ( ISelEnv* env, PPCAMode* am ) 2202{ 2203 Bool mode64 = env->mode64; 2204 switch (am->tag) { 2205 case Pam_IR: 2206 /* Using uInt_fits_in_16_bits in 64-bit mode seems a bit bogus, 2207 somehow, but I think it's OK. */ 2208 return toBool( hregClass(am->Pam.IR.base) == HRcGPR(mode64) && 2209 hregIsVirtual(am->Pam.IR.base) && 2210 uInt_fits_in_16_bits(am->Pam.IR.index) ); 2211 case Pam_RR: 2212 return toBool( hregClass(am->Pam.RR.base) == HRcGPR(mode64) && 2213 hregIsVirtual(am->Pam.RR.base) && 2214 hregClass(am->Pam.RR.index) == HRcGPR(mode64) && 2215 hregIsVirtual(am->Pam.IR.index) ); 2216 default: 2217 vpanic("sane_AMode: unknown ppc amode tag"); 2218 } 2219} 2220 2221static 2222PPCAMode* iselWordExpr_AMode ( ISelEnv* env, IRExpr* e, IRType xferTy ) 2223{ 2224 PPCAMode* am = iselWordExpr_AMode_wrk(env, e, xferTy); 2225 vassert(sane_AMode(env, am)); 2226 return am; 2227} 2228 2229/* DO NOT CALL THIS DIRECTLY ! */ 2230static PPCAMode* iselWordExpr_AMode_wrk ( ISelEnv* env, IRExpr* e, IRType xferTy ) 2231{ 2232 IRType ty = typeOfIRExpr(env->type_env,e); 2233 2234 if (env->mode64) { 2235 2236 /* If the data load/store type is I32 or I64, this amode might 2237 be destined for use in ld/ldu/lwa/st/stu. In which case 2238 insist that if it comes out as an _IR, the immediate must 2239 have its bottom two bits be zero. This does assume that for 2240 any other type (I8/I16/I128/F32/F64/V128) the amode will not 2241 be parked in any such instruction. But that seems a 2242 reasonable assumption. */ 2243 Bool aligned4imm = toBool(xferTy == Ity_I32 || xferTy == Ity_I64); 2244 2245 vassert(ty == Ity_I64); 2246 2247 /* Add64(expr,i), where i == sign-extend of (i & 0xFFFF) */ 2248 if (e->tag == Iex_Binop 2249 && e->Iex.Binop.op == Iop_Add64 2250 && e->Iex.Binop.arg2->tag == Iex_Const 2251 && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U64 2252 && (aligned4imm ? uLong_is_4_aligned(e->Iex.Binop.arg2 2253 ->Iex.Const.con->Ico.U64) 2254 : True) 2255 && uLong_fits_in_16_bits(e->Iex.Binop.arg2 2256 ->Iex.Const.con->Ico.U64)) { 2257 return PPCAMode_IR( (Int)e->Iex.Binop.arg2->Iex.Const.con->Ico.U64, 2258 iselWordExpr_R(env, e->Iex.Binop.arg1) ); 2259 } 2260 2261 /* Add64(expr,expr) */ 2262 if (e->tag == Iex_Binop 2263 && e->Iex.Binop.op == Iop_Add64) { 2264 HReg r_base = iselWordExpr_R(env, e->Iex.Binop.arg1); 2265 HReg r_idx = iselWordExpr_R(env, e->Iex.Binop.arg2); 2266 return PPCAMode_RR( r_idx, r_base ); 2267 } 2268 2269 } else { 2270 2271 vassert(ty == Ity_I32); 2272 2273 /* Add32(expr,i), where i == sign-extend of (i & 0xFFFF) */ 2274 if (e->tag == Iex_Binop 2275 && e->Iex.Binop.op == Iop_Add32 2276 && e->Iex.Binop.arg2->tag == Iex_Const 2277 && e->Iex.Binop.arg2->Iex.Const.con->tag == Ico_U32 2278 && uInt_fits_in_16_bits(e->Iex.Binop.arg2 2279 ->Iex.Const.con->Ico.U32)) { 2280 return PPCAMode_IR( (Int)e->Iex.Binop.arg2->Iex.Const.con->Ico.U32, 2281 iselWordExpr_R(env, e->Iex.Binop.arg1) ); 2282 } 2283 2284 /* Add32(expr,expr) */ 2285 if (e->tag == Iex_Binop 2286 && e->Iex.Binop.op == Iop_Add32) { 2287 HReg r_base = iselWordExpr_R(env, e->Iex.Binop.arg1); 2288 HReg r_idx = iselWordExpr_R(env, e->Iex.Binop.arg2); 2289 return PPCAMode_RR( r_idx, r_base ); 2290 } 2291 2292 } 2293 2294 /* Doesn't match anything in particular. Generate it into 2295 a register and use that. */ 2296 return PPCAMode_IR( 0, iselWordExpr_R(env,e) ); 2297} 2298 2299 2300/* --------------------- RH --------------------- */ 2301 2302/* Compute an I8/I16/I32 (and I64, in 64-bit mode) into a RH 2303 (reg-or-halfword-immediate). It's important to specify whether the 2304 immediate is to be regarded as signed or not. If yes, this will 2305 never return -32768 as an immediate; this guaranteed that all 2306 signed immediates that are return can have their sign inverted if 2307 need be. */ 2308 2309static PPCRH* iselWordExpr_RH ( ISelEnv* env, Bool syned, IRExpr* e ) 2310{ 2311 PPCRH* ri = iselWordExpr_RH_wrk(env, syned, e); 2312 /* sanity checks ... */ 2313 switch (ri->tag) { 2314 case Prh_Imm: 2315 vassert(ri->Prh.Imm.syned == syned); 2316 if (syned) 2317 vassert(ri->Prh.Imm.imm16 != 0x8000); 2318 return ri; 2319 case Prh_Reg: 2320 vassert(hregClass(ri->Prh.Reg.reg) == HRcGPR(env->mode64)); 2321 vassert(hregIsVirtual(ri->Prh.Reg.reg)); 2322 return ri; 2323 default: 2324 vpanic("iselIntExpr_RH: unknown ppc RH tag"); 2325 } 2326} 2327 2328/* DO NOT CALL THIS DIRECTLY ! */ 2329static PPCRH* iselWordExpr_RH_wrk ( ISelEnv* env, Bool syned, IRExpr* e ) 2330{ 2331 ULong u; 2332 Long l; 2333 IRType ty = typeOfIRExpr(env->type_env,e); 2334 vassert(ty == Ity_I8 || ty == Ity_I16 || 2335 ty == Ity_I32 || ((ty == Ity_I64) && env->mode64)); 2336 2337 /* special case: immediate */ 2338 if (e->tag == Iex_Const) { 2339 IRConst* con = e->Iex.Const.con; 2340 /* What value are we aiming to generate? */ 2341 switch (con->tag) { 2342 /* Note: Not sign-extending - we carry 'syned' around */ 2343 case Ico_U64: vassert(env->mode64); 2344 u = con->Ico.U64; break; 2345 case Ico_U32: u = 0xFFFFFFFF & con->Ico.U32; break; 2346 case Ico_U16: u = 0x0000FFFF & con->Ico.U16; break; 2347 case Ico_U8: u = 0x000000FF & con->Ico.U8; break; 2348 default: vpanic("iselIntExpr_RH.Iex_Const(ppch)"); 2349 } 2350 l = (Long)u; 2351 /* Now figure out if it's representable. */ 2352 if (!syned && u <= 65535) { 2353 return PPCRH_Imm(False/*unsigned*/, toUShort(u & 0xFFFF)); 2354 } 2355 if (syned && l >= -32767 && l <= 32767) { 2356 return PPCRH_Imm(True/*signed*/, toUShort(u & 0xFFFF)); 2357 } 2358 /* no luck; use the Slow Way. */ 2359 } 2360 2361 /* default case: calculate into a register and return that */ 2362 return PPCRH_Reg( iselWordExpr_R ( env, e ) ); 2363} 2364 2365 2366/* --------------------- RIs --------------------- */ 2367 2368/* Calculate an expression into an PPCRI operand. As with 2369 iselIntExpr_R, the expression can have type 32, 16 or 8 bits, or, 2370 in 64-bit mode, 64 bits. */ 2371 2372static PPCRI* iselWordExpr_RI ( ISelEnv* env, IRExpr* e ) 2373{ 2374 PPCRI* ri = iselWordExpr_RI_wrk(env, e); 2375 /* sanity checks ... */ 2376 switch (ri->tag) { 2377 case Pri_Imm: 2378 return ri; 2379 case Pri_Reg: 2380 vassert(hregClass(ri->Pri.Reg) == HRcGPR(env->mode64)); 2381 vassert(hregIsVirtual(ri->Pri.Reg)); 2382 return ri; 2383 default: 2384 vpanic("iselIntExpr_RI: unknown ppc RI tag"); 2385 } 2386} 2387 2388/* DO NOT CALL THIS DIRECTLY ! */ 2389static PPCRI* iselWordExpr_RI_wrk ( ISelEnv* env, IRExpr* e ) 2390{ 2391 Long l; 2392 IRType ty = typeOfIRExpr(env->type_env,e); 2393 vassert(ty == Ity_I8 || ty == Ity_I16 || 2394 ty == Ity_I32 || ((ty == Ity_I64) && env->mode64)); 2395 2396 /* special case: immediate */ 2397 if (e->tag == Iex_Const) { 2398 IRConst* con = e->Iex.Const.con; 2399 switch (con->tag) { 2400 case Ico_U64: vassert(env->mode64); 2401 l = (Long) con->Ico.U64; break; 2402 case Ico_U32: l = (Long)(Int) con->Ico.U32; break; 2403 case Ico_U16: l = (Long)(Int)(Short)con->Ico.U16; break; 2404 case Ico_U8: l = (Long)(Int)(Char )con->Ico.U8; break; 2405 default: vpanic("iselIntExpr_RI.Iex_Const(ppch)"); 2406 } 2407 return PPCRI_Imm((ULong)l); 2408 } 2409 2410 /* default case: calculate into a register and return that */ 2411 return PPCRI_Reg( iselWordExpr_R ( env, e ) ); 2412} 2413 2414 2415/* --------------------- RH5u --------------------- */ 2416 2417/* Compute an I8 into a reg-or-5-bit-unsigned-immediate, the latter 2418 being an immediate in the range 1 .. 31 inclusive. Used for doing 2419 shift amounts. Only used in 32-bit mode. */ 2420 2421static PPCRH* iselWordExpr_RH5u ( ISelEnv* env, IRExpr* e ) 2422{ 2423 PPCRH* ri; 2424 vassert(!env->mode64); 2425 ri = iselWordExpr_RH5u_wrk(env, e); 2426 /* sanity checks ... */ 2427 switch (ri->tag) { 2428 case Prh_Imm: 2429 vassert(ri->Prh.Imm.imm16 >= 1 && ri->Prh.Imm.imm16 <= 31); 2430 vassert(!ri->Prh.Imm.syned); 2431 return ri; 2432 case Prh_Reg: 2433 vassert(hregClass(ri->Prh.Reg.reg) == HRcGPR(env->mode64)); 2434 vassert(hregIsVirtual(ri->Prh.Reg.reg)); 2435 return ri; 2436 default: 2437 vpanic("iselIntExpr_RH5u: unknown ppc RI tag"); 2438 } 2439} 2440 2441/* DO NOT CALL THIS DIRECTLY ! */ 2442static PPCRH* iselWordExpr_RH5u_wrk ( ISelEnv* env, IRExpr* e ) 2443{ 2444 IRType ty = typeOfIRExpr(env->type_env,e); 2445 vassert(ty == Ity_I8); 2446 2447 /* special case: immediate */ 2448 if (e->tag == Iex_Const 2449 && e->Iex.Const.con->tag == Ico_U8 2450 && e->Iex.Const.con->Ico.U8 >= 1 2451 && e->Iex.Const.con->Ico.U8 <= 31) { 2452 return PPCRH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8); 2453 } 2454 2455 /* default case: calculate into a register and return that */ 2456 return PPCRH_Reg( iselWordExpr_R ( env, e ) ); 2457} 2458 2459 2460/* --------------------- RH6u --------------------- */ 2461 2462/* Compute an I8 into a reg-or-6-bit-unsigned-immediate, the latter 2463 being an immediate in the range 1 .. 63 inclusive. Used for doing 2464 shift amounts. Only used in 64-bit mode. */ 2465 2466static PPCRH* iselWordExpr_RH6u ( ISelEnv* env, IRExpr* e ) 2467{ 2468 PPCRH* ri; 2469 vassert(env->mode64); 2470 ri = iselWordExpr_RH6u_wrk(env, e); 2471 /* sanity checks ... */ 2472 switch (ri->tag) { 2473 case Prh_Imm: 2474 vassert(ri->Prh.Imm.imm16 >= 1 && ri->Prh.Imm.imm16 <= 63); 2475 vassert(!ri->Prh.Imm.syned); 2476 return ri; 2477 case Prh_Reg: 2478 vassert(hregClass(ri->Prh.Reg.reg) == HRcGPR(env->mode64)); 2479 vassert(hregIsVirtual(ri->Prh.Reg.reg)); 2480 return ri; 2481 default: 2482 vpanic("iselIntExpr_RH6u: unknown ppc64 RI tag"); 2483 } 2484} 2485 2486/* DO NOT CALL THIS DIRECTLY ! */ 2487static PPCRH* iselWordExpr_RH6u_wrk ( ISelEnv* env, IRExpr* e ) 2488{ 2489 IRType ty = typeOfIRExpr(env->type_env,e); 2490 vassert(ty == Ity_I8); 2491 2492 /* special case: immediate */ 2493 if (e->tag == Iex_Const 2494 && e->Iex.Const.con->tag == Ico_U8 2495 && e->Iex.Const.con->Ico.U8 >= 1 2496 && e->Iex.Const.con->Ico.U8 <= 63) { 2497 return PPCRH_Imm(False/*unsigned*/, e->Iex.Const.con->Ico.U8); 2498 } 2499 2500 /* default case: calculate into a register and return that */ 2501 return PPCRH_Reg( iselWordExpr_R ( env, e ) ); 2502} 2503 2504 2505/* --------------------- CONDCODE --------------------- */ 2506 2507/* Generate code to evaluated a bit-typed expression, returning the 2508 condition code which would correspond when the expression would 2509 notionally have returned 1. */ 2510 2511static PPCCondCode iselCondCode ( ISelEnv* env, IRExpr* e ) 2512{ 2513 /* Uh, there's nothing we can sanity check here, unfortunately. */ 2514 return iselCondCode_wrk(env,e); 2515} 2516 2517/* DO NOT CALL THIS DIRECTLY ! */ 2518static PPCCondCode iselCondCode_wrk ( ISelEnv* env, IRExpr* e ) 2519{ 2520 vassert(e); 2521 vassert(typeOfIRExpr(env->type_env,e) == Ity_I1); 2522 2523 /* Constant 1:Bit */ 2524 if (e->tag == Iex_Const && e->Iex.Const.con->Ico.U1 == True) { 2525 // Make a compare that will always be true: 2526 HReg r_zero = newVRegI(env); 2527 addInstr(env, PPCInstr_LI(r_zero, 0, env->mode64)); 2528 addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, 2529 7/*cr*/, r_zero, PPCRH_Reg(r_zero))); 2530 return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); 2531 } 2532 2533 /* Not1(...) */ 2534 if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_Not1) { 2535 /* Generate code for the arg, and negate the test condition */ 2536 PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg); 2537 cond.test = invertCondTest(cond.test); 2538 return cond; 2539 } 2540 2541 /* --- patterns rooted at: 32to1 or 64to1 --- */ 2542 2543 /* 32to1, 64to1 */ 2544 if (e->tag == Iex_Unop && 2545 (e->Iex.Unop.op == Iop_32to1 || e->Iex.Unop.op == Iop_64to1)) { 2546 HReg src = iselWordExpr_R(env, e->Iex.Unop.arg); 2547 HReg tmp = newVRegI(env); 2548 /* could do better, probably -- andi. */ 2549 addInstr(env, PPCInstr_Alu(Palu_AND, tmp, 2550 src, PPCRH_Imm(False,1))); 2551 addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, 2552 7/*cr*/, tmp, PPCRH_Imm(False,1))); 2553 return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); 2554 } 2555 2556 /* --- patterns rooted at: CmpNEZ8 --- */ 2557 2558 /* CmpNEZ8(x) */ 2559 /* could do better -- andi. */ 2560 if (e->tag == Iex_Unop 2561 && e->Iex.Unop.op == Iop_CmpNEZ8) { 2562 HReg arg = iselWordExpr_R(env, e->Iex.Unop.arg); 2563 HReg tmp = newVRegI(env); 2564 addInstr(env, PPCInstr_Alu(Palu_AND, tmp, arg, 2565 PPCRH_Imm(False,0xFF))); 2566 addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, 2567 7/*cr*/, tmp, PPCRH_Imm(False,0))); 2568 return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); 2569 } 2570 2571 /* --- patterns rooted at: CmpNEZ32 --- */ 2572 2573 /* CmpNEZ32(x) */ 2574 if (e->tag == Iex_Unop 2575 && e->Iex.Unop.op == Iop_CmpNEZ32) { 2576 HReg r1 = iselWordExpr_R(env, e->Iex.Unop.arg); 2577 addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, 2578 7/*cr*/, r1, PPCRH_Imm(False,0))); 2579 return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); 2580 } 2581 2582 /* --- patterns rooted at: Cmp*32* --- */ 2583 2584 /* Cmp*32*(x,y) */ 2585 if (e->tag == Iex_Binop 2586 && (e->Iex.Binop.op == Iop_CmpEQ32 2587 || e->Iex.Binop.op == Iop_CmpNE32 2588 || e->Iex.Binop.op == Iop_CmpLT32S 2589 || e->Iex.Binop.op == Iop_CmpLT32U 2590 || e->Iex.Binop.op == Iop_CmpLE32S 2591 || e->Iex.Binop.op == Iop_CmpLE32U)) { 2592 Bool syned = (e->Iex.Binop.op == Iop_CmpLT32S || 2593 e->Iex.Binop.op == Iop_CmpLE32S); 2594 HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1); 2595 PPCRH* ri2 = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2); 2596 addInstr(env, PPCInstr_Cmp(syned, True/*32bit cmp*/, 2597 7/*cr*/, r1, ri2)); 2598 2599 switch (e->Iex.Binop.op) { 2600 case Iop_CmpEQ32: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); 2601 case Iop_CmpNE32: return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); 2602 case Iop_CmpLT32U: case Iop_CmpLT32S: 2603 return mk_PPCCondCode( Pct_TRUE, Pcf_7LT ); 2604 case Iop_CmpLE32U: case Iop_CmpLE32S: 2605 return mk_PPCCondCode( Pct_FALSE, Pcf_7GT ); 2606 default: vpanic("iselCondCode(ppc): CmpXX32"); 2607 } 2608 } 2609 2610 /* --- patterns rooted at: CmpNEZ64 --- */ 2611 2612 /* CmpNEZ64 */ 2613 if (e->tag == Iex_Unop 2614 && e->Iex.Unop.op == Iop_CmpNEZ64) { 2615 if (!env->mode64) { 2616 HReg hi, lo; 2617 HReg tmp = newVRegI(env); 2618 iselInt64Expr( &hi, &lo, env, e->Iex.Unop.arg ); 2619 addInstr(env, PPCInstr_Alu(Palu_OR, tmp, lo, PPCRH_Reg(hi))); 2620 addInstr(env, PPCInstr_Cmp(False/*sign*/, True/*32bit cmp*/, 2621 7/*cr*/, tmp,PPCRH_Imm(False,0))); 2622 return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); 2623 } else { // mode64 2624 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 2625 addInstr(env, PPCInstr_Cmp(False/*sign*/, False/*64bit cmp*/, 2626 7/*cr*/, r_src,PPCRH_Imm(False,0))); 2627 return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); 2628 } 2629 } 2630 2631 /* --- patterns rooted at: Cmp*64* --- */ 2632 2633 /* Cmp*64*(x,y) */ 2634 if (e->tag == Iex_Binop 2635 && (e->Iex.Binop.op == Iop_CmpEQ64 2636 || e->Iex.Binop.op == Iop_CmpNE64 2637 || e->Iex.Binop.op == Iop_CmpLT64S 2638 || e->Iex.Binop.op == Iop_CmpLT64U 2639 || e->Iex.Binop.op == Iop_CmpLE64S 2640 || e->Iex.Binop.op == Iop_CmpLE64U)) { 2641 Bool syned = (e->Iex.Binop.op == Iop_CmpLT64S || 2642 e->Iex.Binop.op == Iop_CmpLE64S); 2643 HReg r1 = iselWordExpr_R(env, e->Iex.Binop.arg1); 2644 PPCRH* ri2 = iselWordExpr_RH(env, syned, e->Iex.Binop.arg2); 2645 vassert(env->mode64); 2646 addInstr(env, PPCInstr_Cmp(syned, False/*64bit cmp*/, 2647 7/*cr*/, r1, ri2)); 2648 2649 switch (e->Iex.Binop.op) { 2650 case Iop_CmpEQ64: return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); 2651 case Iop_CmpNE64: return mk_PPCCondCode( Pct_FALSE, Pcf_7EQ ); 2652 case Iop_CmpLT64U: return mk_PPCCondCode( Pct_TRUE, Pcf_7LT ); 2653 case Iop_CmpLE64U: return mk_PPCCondCode( Pct_FALSE, Pcf_7GT ); 2654 default: vpanic("iselCondCode(ppc): CmpXX64"); 2655 } 2656 } 2657 2658 /* var */ 2659 if (e->tag == Iex_RdTmp) { 2660 HReg r_src = lookupIRTemp(env, e->Iex.RdTmp.tmp); 2661 HReg src_masked = newVRegI(env); 2662 addInstr(env, 2663 PPCInstr_Alu(Palu_AND, src_masked, 2664 r_src, PPCRH_Imm(False,1))); 2665 addInstr(env, 2666 PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, 2667 7/*cr*/, src_masked, PPCRH_Imm(False,1))); 2668 return mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); 2669 } 2670 2671 vex_printf("iselCondCode(ppc): No such tag(%u)\n", e->tag); 2672 ppIRExpr(e); 2673 vpanic("iselCondCode(ppc)"); 2674} 2675 2676 2677/*---------------------------------------------------------*/ 2678/*--- ISEL: Integer expressions (128 bit) ---*/ 2679/*---------------------------------------------------------*/ 2680 2681/* 64-bit mode ONLY: compute a 128-bit value into a register pair, 2682 which is returned as the first two parameters. As with 2683 iselWordExpr_R, these may be either real or virtual regs; in any 2684 case they must not be changed by subsequent code emitted by the 2685 caller. */ 2686 2687static void iselInt128Expr ( HReg* rHi, HReg* rLo, 2688 ISelEnv* env, IRExpr* e ) 2689{ 2690 vassert(env->mode64); 2691 iselInt128Expr_wrk(rHi, rLo, env, e); 2692# if 0 2693 vex_printf("\n"); ppIRExpr(e); vex_printf("\n"); 2694# endif 2695 vassert(hregClass(*rHi) == HRcGPR(env->mode64)); 2696 vassert(hregIsVirtual(*rHi)); 2697 vassert(hregClass(*rLo) == HRcGPR(env->mode64)); 2698 vassert(hregIsVirtual(*rLo)); 2699} 2700 2701/* DO NOT CALL THIS DIRECTLY ! */ 2702static void iselInt128Expr_wrk ( HReg* rHi, HReg* rLo, 2703 ISelEnv* env, IRExpr* e ) 2704{ 2705 vassert(e); 2706 vassert(typeOfIRExpr(env->type_env,e) == Ity_I128); 2707 2708 /* read 128-bit IRTemp */ 2709 if (e->tag == Iex_RdTmp) { 2710 lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp); 2711 return; 2712 } 2713 2714 /* --------- BINARY ops --------- */ 2715 if (e->tag == Iex_Binop) { 2716 switch (e->Iex.Binop.op) { 2717 /* 64 x 64 -> 128 multiply */ 2718 case Iop_MullU64: 2719 case Iop_MullS64: { 2720 HReg tLo = newVRegI(env); 2721 HReg tHi = newVRegI(env); 2722 Bool syned = toBool(e->Iex.Binop.op == Iop_MullS64); 2723 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1); 2724 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2); 2725 addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/, 2726 False/*lo64*/, False/*64bit mul*/, 2727 tLo, r_srcL, r_srcR)); 2728 addInstr(env, PPCInstr_MulL(syned, 2729 True/*hi64*/, False/*64bit mul*/, 2730 tHi, r_srcL, r_srcR)); 2731 *rHi = tHi; 2732 *rLo = tLo; 2733 return; 2734 } 2735 2736 /* 64HLto128(e1,e2) */ 2737 case Iop_64HLto128: 2738 *rHi = iselWordExpr_R(env, e->Iex.Binop.arg1); 2739 *rLo = iselWordExpr_R(env, e->Iex.Binop.arg2); 2740 return; 2741 default: 2742 break; 2743 } 2744 } /* if (e->tag == Iex_Binop) */ 2745 2746 2747 /* --------- UNARY ops --------- */ 2748 if (e->tag == Iex_Unop) { 2749 switch (e->Iex.Unop.op) { 2750 default: 2751 break; 2752 } 2753 } /* if (e->tag == Iex_Unop) */ 2754 2755 vex_printf("iselInt128Expr(ppc64): No such tag(%u)\n", e->tag); 2756 ppIRExpr(e); 2757 vpanic("iselInt128Expr(ppc64)"); 2758} 2759 2760 2761/*---------------------------------------------------------*/ 2762/*--- ISEL: Integer expressions (64 bit) ---*/ 2763/*---------------------------------------------------------*/ 2764 2765/* 32-bit mode ONLY: compute a 128-bit value into a register quad */ 2766static void iselInt128Expr_to_32x4 ( HReg* rHi, HReg* rMedHi, HReg* rMedLo, 2767 HReg* rLo, ISelEnv* env, IRExpr* e ) 2768{ 2769 vassert(!env->mode64); 2770 iselInt128Expr_to_32x4_wrk(rHi, rMedHi, rMedLo, rLo, env, e); 2771# if 0 2772 vex_printf("\n"); ppIRExpr(e); vex_printf("\n"); 2773# endif 2774 vassert(hregClass(*rHi) == HRcInt32); 2775 vassert(hregIsVirtual(*rHi)); 2776 vassert(hregClass(*rMedHi) == HRcInt32); 2777 vassert(hregIsVirtual(*rMedHi)); 2778 vassert(hregClass(*rMedLo) == HRcInt32); 2779 vassert(hregIsVirtual(*rMedLo)); 2780 vassert(hregClass(*rLo) == HRcInt32); 2781 vassert(hregIsVirtual(*rLo)); 2782} 2783 2784static void iselInt128Expr_to_32x4_wrk ( HReg* rHi, HReg* rMedHi, 2785 HReg* rMedLo, HReg* rLo, 2786 ISelEnv* env, IRExpr* e ) 2787{ 2788 vassert(e); 2789 vassert(typeOfIRExpr(env->type_env,e) == Ity_I128); 2790 2791 /* read 128-bit IRTemp */ 2792 if (e->tag == Iex_RdTmp) { 2793 lookupIRTempQuad( rHi, rMedHi, rMedLo, rLo, env, e->Iex.RdTmp.tmp); 2794 return; 2795 } 2796 2797 if (e->tag == Iex_Binop) { 2798 2799 IROp op_binop = e->Iex.Binop.op; 2800 switch (op_binop) { 2801 case Iop_64HLto128: 2802 iselInt64Expr(rHi, rMedHi, env, e->Iex.Binop.arg1); 2803 iselInt64Expr(rMedLo, rLo, env, e->Iex.Binop.arg2); 2804 return; 2805 default: 2806 vex_printf("iselInt128Expr_to_32x4_wrk: Binop case 0x%x not found\n", 2807 op_binop); 2808 break; 2809 } 2810 } 2811 2812 vex_printf("iselInt128Expr_to_32x4_wrk: e->tag 0x%x not found\n", e->tag); 2813 return; 2814} 2815 2816/* 32-bit mode ONLY: compute a 64-bit value into a register pair, 2817 which is returned as the first two parameters. As with 2818 iselIntExpr_R, these may be either real or virtual regs; in any 2819 case they must not be changed by subsequent code emitted by the 2820 caller. */ 2821 2822static void iselInt64Expr ( HReg* rHi, HReg* rLo, 2823 ISelEnv* env, IRExpr* e ) 2824{ 2825 vassert(!env->mode64); 2826 iselInt64Expr_wrk(rHi, rLo, env, e); 2827# if 0 2828 vex_printf("\n"); ppIRExpr(e); vex_printf("\n"); 2829# endif 2830 vassert(hregClass(*rHi) == HRcInt32); 2831 vassert(hregIsVirtual(*rHi)); 2832 vassert(hregClass(*rLo) == HRcInt32); 2833 vassert(hregIsVirtual(*rLo)); 2834} 2835 2836/* DO NOT CALL THIS DIRECTLY ! */ 2837static void iselInt64Expr_wrk ( HReg* rHi, HReg* rLo, 2838 ISelEnv* env, IRExpr* e ) 2839{ 2840 vassert(e); 2841 vassert(typeOfIRExpr(env->type_env,e) == Ity_I64); 2842 2843 /* 64-bit load */ 2844 if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) { 2845 HReg tLo = newVRegI(env); 2846 HReg tHi = newVRegI(env); 2847 HReg r_addr = iselWordExpr_R(env, e->Iex.Load.addr); 2848 vassert(!env->mode64); 2849 addInstr(env, PPCInstr_Load( 4/*byte-load*/, 2850 tHi, PPCAMode_IR( 0, r_addr ), 2851 False/*32-bit insn please*/) ); 2852 addInstr(env, PPCInstr_Load( 4/*byte-load*/, 2853 tLo, PPCAMode_IR( 4, r_addr ), 2854 False/*32-bit insn please*/) ); 2855 *rHi = tHi; 2856 *rLo = tLo; 2857 return; 2858 } 2859 2860 /* 64-bit literal */ 2861 if (e->tag == Iex_Const) { 2862 ULong w64 = e->Iex.Const.con->Ico.U64; 2863 UInt wHi = ((UInt)(w64 >> 32)) & 0xFFFFFFFF; 2864 UInt wLo = ((UInt)w64) & 0xFFFFFFFF; 2865 HReg tLo = newVRegI(env); 2866 HReg tHi = newVRegI(env); 2867 vassert(e->Iex.Const.con->tag == Ico_U64); 2868 addInstr(env, PPCInstr_LI(tHi, (Long)(Int)wHi, False/*mode32*/)); 2869 addInstr(env, PPCInstr_LI(tLo, (Long)(Int)wLo, False/*mode32*/)); 2870 *rHi = tHi; 2871 *rLo = tLo; 2872 return; 2873 } 2874 2875 /* read 64-bit IRTemp */ 2876 if (e->tag == Iex_RdTmp) { 2877 lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp); 2878 return; 2879 } 2880 2881 /* 64-bit GET */ 2882 if (e->tag == Iex_Get) { 2883 PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset, 2884 GuestStatePtr(False/*mode32*/) ); 2885 PPCAMode* am_addr4 = advance4(env, am_addr); 2886 HReg tLo = newVRegI(env); 2887 HReg tHi = newVRegI(env); 2888 addInstr(env, PPCInstr_Load( 4, tHi, am_addr, False/*mode32*/ )); 2889 addInstr(env, PPCInstr_Load( 4, tLo, am_addr4, False/*mode32*/ )); 2890 *rHi = tHi; 2891 *rLo = tLo; 2892 return; 2893 } 2894 2895 /* 64-bit Mux0X */ 2896 if (e->tag == Iex_Mux0X) { 2897 HReg e0Lo, e0Hi, eXLo, eXHi; 2898 HReg tLo = newVRegI(env); 2899 HReg tHi = newVRegI(env); 2900 2901 PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); 2902 HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond); 2903 HReg r_tmp = newVRegI(env); 2904 2905 iselInt64Expr(&e0Hi, &e0Lo, env, e->Iex.Mux0X.expr0); 2906 iselInt64Expr(&eXHi, &eXLo, env, e->Iex.Mux0X.exprX); 2907 addInstr(env, mk_iMOVds_RR(tHi,eXHi)); 2908 addInstr(env, mk_iMOVds_RR(tLo,eXLo)); 2909 2910 addInstr(env, PPCInstr_Alu(Palu_AND, 2911 r_tmp, r_cond, PPCRH_Imm(False,0xFF))); 2912 addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, 2913 7/*cr*/, r_tmp, PPCRH_Imm(False,0))); 2914 2915 addInstr(env, PPCInstr_CMov(cc,tHi,PPCRI_Reg(e0Hi))); 2916 addInstr(env, PPCInstr_CMov(cc,tLo,PPCRI_Reg(e0Lo))); 2917 *rHi = tHi; 2918 *rLo = tLo; 2919 return; 2920 } 2921 2922 /* --------- BINARY ops --------- */ 2923 if (e->tag == Iex_Binop) { 2924 IROp op_binop = e->Iex.Binop.op; 2925 switch (op_binop) { 2926 /* 32 x 32 -> 64 multiply */ 2927 case Iop_MullU32: 2928 case Iop_MullS32: { 2929 HReg tLo = newVRegI(env); 2930 HReg tHi = newVRegI(env); 2931 Bool syned = toBool(op_binop == Iop_MullS32); 2932 HReg r_srcL = iselWordExpr_R(env, e->Iex.Binop.arg1); 2933 HReg r_srcR = iselWordExpr_R(env, e->Iex.Binop.arg2); 2934 addInstr(env, PPCInstr_MulL(False/*signedness irrelevant*/, 2935 False/*lo32*/, True/*32bit mul*/, 2936 tLo, r_srcL, r_srcR)); 2937 addInstr(env, PPCInstr_MulL(syned, 2938 True/*hi32*/, True/*32bit mul*/, 2939 tHi, r_srcL, r_srcR)); 2940 *rHi = tHi; 2941 *rLo = tLo; 2942 return; 2943 } 2944 2945 /* Or64/And64/Xor64 */ 2946 case Iop_Or64: 2947 case Iop_And64: 2948 case Iop_Xor64: { 2949 HReg xLo, xHi, yLo, yHi; 2950 HReg tLo = newVRegI(env); 2951 HReg tHi = newVRegI(env); 2952 PPCAluOp op = (op_binop == Iop_Or64) ? Palu_OR : 2953 (op_binop == Iop_And64) ? Palu_AND : Palu_XOR; 2954 iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1); 2955 iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2); 2956 addInstr(env, PPCInstr_Alu(op, tHi, xHi, PPCRH_Reg(yHi))); 2957 addInstr(env, PPCInstr_Alu(op, tLo, xLo, PPCRH_Reg(yLo))); 2958 *rHi = tHi; 2959 *rLo = tLo; 2960 return; 2961 } 2962 2963 /* Add64 */ 2964 case Iop_Add64: { 2965 HReg xLo, xHi, yLo, yHi; 2966 HReg tLo = newVRegI(env); 2967 HReg tHi = newVRegI(env); 2968 iselInt64Expr(&xHi, &xLo, env, e->Iex.Binop.arg1); 2969 iselInt64Expr(&yHi, &yLo, env, e->Iex.Binop.arg2); 2970 addInstr(env, PPCInstr_AddSubC( True/*add*/, True /*set carry*/, 2971 tLo, xLo, yLo)); 2972 addInstr(env, PPCInstr_AddSubC( True/*add*/, False/*read carry*/, 2973 tHi, xHi, yHi)); 2974 *rHi = tHi; 2975 *rLo = tLo; 2976 return; 2977 } 2978 2979 /* 32HLto64(e1,e2) */ 2980 case Iop_32HLto64: 2981 *rHi = iselWordExpr_R(env, e->Iex.Binop.arg1); 2982 *rLo = iselWordExpr_R(env, e->Iex.Binop.arg2); 2983 return; 2984 2985 /* F64toI64[S|U] */ 2986 case Iop_F64toI64S: case Iop_F64toI64U: { 2987 HReg tLo = newVRegI(env); 2988 HReg tHi = newVRegI(env); 2989 HReg r1 = StackFramePtr(env->mode64); 2990 PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 ); 2991 PPCAMode* four_r1 = PPCAMode_IR( 4, r1 ); 2992 HReg fsrc = iselDblExpr(env, e->Iex.Binop.arg2); 2993 HReg ftmp = newVRegF(env); 2994 2995 vassert(!env->mode64); 2996 /* Set host rounding mode */ 2997 set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); 2998 2999 sub_from_sp( env, 16 ); 3000 addInstr(env, PPCInstr_FpCftI(False/*F->I*/, False/*int64*/, 3001 (op_binop == Iop_F64toI64S) ? True : False, 3002 True, ftmp, fsrc)); 3003 addInstr(env, PPCInstr_FpLdSt(False/*store*/, 8, ftmp, zero_r1)); 3004 addInstr(env, PPCInstr_Load(4, tHi, zero_r1, False/*mode32*/)); 3005 addInstr(env, PPCInstr_Load(4, tLo, four_r1, False/*mode32*/)); 3006 add_to_sp( env, 16 ); 3007 3008 ///* Restore default FPU rounding. */ 3009 //set_FPU_rounding_default( env ); 3010 *rHi = tHi; 3011 *rLo = tLo; 3012 return; 3013 } 3014 3015 default: 3016 break; 3017 } 3018 } /* if (e->tag == Iex_Binop) */ 3019 3020 3021 /* --------- UNARY ops --------- */ 3022 if (e->tag == Iex_Unop) { 3023 switch (e->Iex.Unop.op) { 3024 3025 /* CmpwNEZ64(e) */ 3026 case Iop_CmpwNEZ64: { 3027 HReg argHi, argLo; 3028 HReg tmp1 = newVRegI(env); 3029 HReg tmp2 = newVRegI(env); 3030 iselInt64Expr(&argHi, &argLo, env, e->Iex.Unop.arg); 3031 /* tmp1 = argHi | argLo */ 3032 addInstr(env, PPCInstr_Alu(Palu_OR, tmp1, argHi, PPCRH_Reg(argLo))); 3033 /* tmp2 = (tmp1 | -tmp1) >>s 31 */ 3034 addInstr(env, PPCInstr_Unary(Pun_NEG,tmp2,tmp1)); 3035 addInstr(env, PPCInstr_Alu(Palu_OR, tmp2, tmp2, PPCRH_Reg(tmp1))); 3036 addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, 3037 tmp2, tmp2, PPCRH_Imm(False, 31))); 3038 *rHi = tmp2; 3039 *rLo = tmp2; /* yes, really tmp2 */ 3040 return; 3041 } 3042 3043 /* Left64 */ 3044 case Iop_Left64: { 3045 HReg argHi, argLo; 3046 HReg zero32 = newVRegI(env); 3047 HReg resHi = newVRegI(env); 3048 HReg resLo = newVRegI(env); 3049 iselInt64Expr(&argHi, &argLo, env, e->Iex.Unop.arg); 3050 vassert(env->mode64 == False); 3051 addInstr(env, PPCInstr_LI(zero32, 0, env->mode64)); 3052 /* resHi:resLo = - argHi:argLo */ 3053 addInstr(env, PPCInstr_AddSubC( False/*sub*/, True/*set carry*/, 3054 resLo, zero32, argLo )); 3055 addInstr(env, PPCInstr_AddSubC( False/*sub*/, False/*read carry*/, 3056 resHi, zero32, argHi )); 3057 /* resHi:resLo |= srcHi:srcLo */ 3058 addInstr(env, PPCInstr_Alu(Palu_OR, resLo, resLo, PPCRH_Reg(argLo))); 3059 addInstr(env, PPCInstr_Alu(Palu_OR, resHi, resHi, PPCRH_Reg(argHi))); 3060 *rHi = resHi; 3061 *rLo = resLo; 3062 return; 3063 } 3064 3065 /* 32Sto64(e) */ 3066 case Iop_32Sto64: { 3067 HReg tHi = newVRegI(env); 3068 HReg src = iselWordExpr_R(env, e->Iex.Unop.arg); 3069 addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, 3070 tHi, src, PPCRH_Imm(False,31))); 3071 *rHi = tHi; 3072 *rLo = src; 3073 return; 3074 } 3075 3076 /* 32Uto64(e) */ 3077 case Iop_32Uto64: { 3078 HReg tHi = newVRegI(env); 3079 HReg tLo = iselWordExpr_R(env, e->Iex.Unop.arg); 3080 addInstr(env, PPCInstr_LI(tHi, 0, False/*mode32*/)); 3081 *rHi = tHi; 3082 *rLo = tLo; 3083 return; 3084 } 3085 3086 case Iop_128to64: { 3087 /* Narrow, return the low 64-bit half as a 32-bit 3088 * register pair */ 3089 HReg r_Hi = INVALID_HREG; 3090 HReg r_MedHi = INVALID_HREG; 3091 HReg r_MedLo = INVALID_HREG; 3092 HReg r_Lo = INVALID_HREG; 3093 3094 iselInt128Expr_to_32x4(&r_Hi, &r_MedHi, &r_MedLo, &r_Lo, 3095 env, e->Iex.Unop.arg); 3096 *rHi = r_MedLo; 3097 *rLo = r_Lo; 3098 return; 3099 } 3100 3101 case Iop_128HIto64: { 3102 /* Narrow, return the high 64-bit half as a 32-bit 3103 * register pair */ 3104 HReg r_Hi = INVALID_HREG; 3105 HReg r_MedHi = INVALID_HREG; 3106 HReg r_MedLo = INVALID_HREG; 3107 HReg r_Lo = INVALID_HREG; 3108 3109 iselInt128Expr_to_32x4(&r_Hi, &r_MedHi, &r_MedLo, &r_Lo, 3110 env, e->Iex.Unop.arg); 3111 *rHi = r_Hi; 3112 *rLo = r_MedHi; 3113 return; 3114 } 3115 3116 /* V128{HI}to64 */ 3117 case Iop_V128HIto64: 3118 case Iop_V128to64: { 3119 HReg r_aligned16; 3120 Int off = e->Iex.Unop.op==Iop_V128HIto64 ? 0 : 8; 3121 HReg tLo = newVRegI(env); 3122 HReg tHi = newVRegI(env); 3123 HReg vec = iselVecExpr(env, e->Iex.Unop.arg); 3124 PPCAMode *am_off0, *am_offLO, *am_offHI; 3125 sub_from_sp( env, 32 ); // Move SP down 32 bytes 3126 3127 // get a quadword aligned address within our stack space 3128 r_aligned16 = get_sp_aligned16( env ); 3129 am_off0 = PPCAMode_IR( 0, r_aligned16 ); 3130 am_offHI = PPCAMode_IR( off, r_aligned16 ); 3131 am_offLO = PPCAMode_IR( off+4, r_aligned16 ); 3132 3133 // store as Vec128 3134 addInstr(env, 3135 PPCInstr_AvLdSt( False/*store*/, 16, vec, am_off0 )); 3136 3137 // load hi,lo words (of hi/lo half of vec) as Ity_I32's 3138 addInstr(env, 3139 PPCInstr_Load( 4, tHi, am_offHI, False/*mode32*/ )); 3140 addInstr(env, 3141 PPCInstr_Load( 4, tLo, am_offLO, False/*mode32*/ )); 3142 3143 add_to_sp( env, 32 ); // Reset SP 3144 *rHi = tHi; 3145 *rLo = tLo; 3146 return; 3147 } 3148 3149 /* could do better than this, but for now ... */ 3150 case Iop_1Sto64: { 3151 HReg tLo = newVRegI(env); 3152 HReg tHi = newVRegI(env); 3153 PPCCondCode cond = iselCondCode(env, e->Iex.Unop.arg); 3154 addInstr(env, PPCInstr_Set(cond,tLo)); 3155 addInstr(env, PPCInstr_Shft(Pshft_SHL, True/*32bit shift*/, 3156 tLo, tLo, PPCRH_Imm(False,31))); 3157 addInstr(env, PPCInstr_Shft(Pshft_SAR, True/*32bit shift*/, 3158 tLo, tLo, PPCRH_Imm(False,31))); 3159 addInstr(env, mk_iMOVds_RR(tHi, tLo)); 3160 *rHi = tHi; 3161 *rLo = tLo; 3162 return; 3163 } 3164 3165 case Iop_Not64: { 3166 HReg xLo, xHi; 3167 HReg tmpLo = newVRegI(env); 3168 HReg tmpHi = newVRegI(env); 3169 iselInt64Expr(&xHi, &xLo, env, e->Iex.Unop.arg); 3170 addInstr(env, PPCInstr_Unary(Pun_NOT,tmpLo,xLo)); 3171 addInstr(env, PPCInstr_Unary(Pun_NOT,tmpHi,xHi)); 3172 *rHi = tmpHi; 3173 *rLo = tmpLo; 3174 return; 3175 } 3176 3177 /* ReinterpF64asI64(e) */ 3178 /* Given an IEEE754 double, produce an I64 with the same bit 3179 pattern. */ 3180 case Iop_ReinterpF64asI64: { 3181 PPCAMode *am_addr0, *am_addr1; 3182 HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg); 3183 HReg r_dstLo = newVRegI(env); 3184 HReg r_dstHi = newVRegI(env); 3185 3186 sub_from_sp( env, 16 ); // Move SP down 16 bytes 3187 am_addr0 = PPCAMode_IR( 0, StackFramePtr(False/*mode32*/) ); 3188 am_addr1 = PPCAMode_IR( 4, StackFramePtr(False/*mode32*/) ); 3189 3190 // store as F64 3191 addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8, 3192 fr_src, am_addr0 )); 3193 3194 // load hi,lo as Ity_I32's 3195 addInstr(env, PPCInstr_Load( 4, r_dstHi, 3196 am_addr0, False/*mode32*/ )); 3197 addInstr(env, PPCInstr_Load( 4, r_dstLo, 3198 am_addr1, False/*mode32*/ )); 3199 *rHi = r_dstHi; 3200 *rLo = r_dstLo; 3201 3202 add_to_sp( env, 16 ); // Reset SP 3203 return; 3204 } 3205 3206 case Iop_ReinterpD64asI64: { 3207 HReg fr_src = iselDfp64Expr(env, e->Iex.Unop.arg); 3208 PPCAMode *am_addr0, *am_addr1; 3209 HReg r_dstLo = newVRegI(env); 3210 HReg r_dstHi = newVRegI(env); 3211 3212 3213 sub_from_sp( env, 16 ); // Move SP down 16 bytes 3214 am_addr0 = PPCAMode_IR( 0, StackFramePtr(False/*mode32*/) ); 3215 am_addr1 = PPCAMode_IR( 4, StackFramePtr(False/*mode32*/) ); 3216 3217 // store as D64 3218 addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8, 3219 fr_src, am_addr0 )); 3220 3221 // load hi,lo as Ity_I32's 3222 addInstr(env, PPCInstr_Load( 4, r_dstHi, 3223 am_addr0, False/*mode32*/ )); 3224 addInstr(env, PPCInstr_Load( 4, r_dstLo, 3225 am_addr1, False/*mode32*/ )); 3226 *rHi = r_dstHi; 3227 *rLo = r_dstLo; 3228 3229 add_to_sp( env, 16 ); // Reset SP 3230 3231 return; 3232 } 3233 3234 case Iop_BCDtoDPB: { 3235 PPCCondCode cc; 3236 UInt argiregs; 3237 HReg argregs[2]; 3238 Int argreg; 3239 HReg tLo = newVRegI(env); 3240 HReg tHi = newVRegI(env); 3241 HReg tmpHi; 3242 HReg tmpLo; 3243 ULong target; 3244 Bool mode64 = env->mode64; 3245 3246 argregs[0] = hregPPC_GPR3(mode64); 3247 argregs[1] = hregPPC_GPR4(mode64); 3248 3249 argiregs = 0; 3250 argreg = 0; 3251 3252 iselInt64Expr( &tmpHi, &tmpLo, env, e->Iex.Unop.arg ); 3253 3254 argiregs |= ( 1 << (argreg+3 ) ); 3255 addInstr( env, mk_iMOVds_RR( argregs[argreg++], tmpHi ) ); 3256 3257 argiregs |= ( 1 << (argreg+3 ) ); 3258 addInstr( env, mk_iMOVds_RR( argregs[argreg], tmpLo ) ); 3259 3260 cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE ); 3261 target = toUInt( Ptr_to_ULong(h_BCDtoDPB ) ); 3262 3263 addInstr( env, PPCInstr_Call( cc, (Addr64)target, argiregs ) ); 3264 addInstr( env, mk_iMOVds_RR( tHi, argregs[argreg-1] ) ); 3265 addInstr( env, mk_iMOVds_RR( tLo, argregs[argreg] ) ); 3266 3267 *rHi = tHi; 3268 *rLo = tLo; 3269 return; 3270 } 3271 3272 case Iop_DPBtoBCD: { 3273 PPCCondCode cc; 3274 UInt argiregs; 3275 HReg argregs[2]; 3276 Int argreg; 3277 HReg tLo = newVRegI(env); 3278 HReg tHi = newVRegI(env); 3279 HReg tmpHi; 3280 HReg tmpLo; 3281 ULong target; 3282 Bool mode64 = env->mode64; 3283 3284 argregs[0] = hregPPC_GPR3(mode64); 3285 argregs[1] = hregPPC_GPR4(mode64); 3286 3287 argiregs = 0; 3288 argreg = 0; 3289 3290 iselInt64Expr(&tmpHi, &tmpLo, env, e->Iex.Unop.arg); 3291 3292 argiregs |= (1 << (argreg+3)); 3293 addInstr(env, mk_iMOVds_RR( argregs[argreg++], tmpHi )); 3294 3295 argiregs |= (1 << (argreg+3)); 3296 addInstr(env, mk_iMOVds_RR( argregs[argreg], tmpLo)); 3297 3298 cc = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE ); 3299 3300 target = toUInt( Ptr_to_ULong( h_DPBtoBCD ) ); 3301 3302 addInstr(env, PPCInstr_Call( cc, (Addr64)target, argiregs ) ); 3303 addInstr(env, mk_iMOVds_RR(tHi, argregs[argreg-1])); 3304 addInstr(env, mk_iMOVds_RR(tLo, argregs[argreg])); 3305 3306 *rHi = tHi; 3307 *rLo = tLo; 3308 return; 3309 } 3310 3311 default: 3312 break; 3313 } 3314 } /* if (e->tag == Iex_Unop) */ 3315 3316 vex_printf("iselInt64Expr(ppc): No such tag(%u)\n", e->tag); 3317 ppIRExpr(e); 3318 vpanic("iselInt64Expr(ppc)"); 3319} 3320 3321 3322/*---------------------------------------------------------*/ 3323/*--- ISEL: Floating point expressions (32 bit) ---*/ 3324/*---------------------------------------------------------*/ 3325 3326/* Nothing interesting here; really just wrappers for 3327 64-bit stuff. */ 3328 3329static HReg iselFltExpr ( ISelEnv* env, IRExpr* e ) 3330{ 3331 HReg r = iselFltExpr_wrk( env, e ); 3332# if 0 3333 vex_printf("\n"); ppIRExpr(e); vex_printf("\n"); 3334# endif 3335 vassert(hregClass(r) == HRcFlt64); /* yes, really Flt64 */ 3336 vassert(hregIsVirtual(r)); 3337 return r; 3338} 3339 3340/* DO NOT CALL THIS DIRECTLY */ 3341static HReg iselFltExpr_wrk ( ISelEnv* env, IRExpr* e ) 3342{ 3343 Bool mode64 = env->mode64; 3344 3345 IRType ty = typeOfIRExpr(env->type_env,e); 3346 vassert(ty == Ity_F32); 3347 3348 if (e->tag == Iex_RdTmp) { 3349 return lookupIRTemp(env, e->Iex.RdTmp.tmp); 3350 } 3351 3352 if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) { 3353 PPCAMode* am_addr; 3354 HReg r_dst = newVRegF(env); 3355 vassert(e->Iex.Load.ty == Ity_F32); 3356 am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, Ity_F32/*xfer*/); 3357 addInstr(env, PPCInstr_FpLdSt(True/*load*/, 4, r_dst, am_addr)); 3358 return r_dst; 3359 } 3360 3361 if (e->tag == Iex_Get) { 3362 HReg r_dst = newVRegF(env); 3363 PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset, 3364 GuestStatePtr(env->mode64) ); 3365 addInstr(env, PPCInstr_FpLdSt( True/*load*/, 4, r_dst, am_addr )); 3366 return r_dst; 3367 } 3368 3369 if (e->tag == Iex_Unop && e->Iex.Unop.op == Iop_TruncF64asF32) { 3370 /* This is quite subtle. The only way to do the relevant 3371 truncation is to do a single-precision store and then a 3372 double precision load to get it back into a register. The 3373 problem is, if the data is then written to memory a second 3374 time, as in 3375 3376 STbe(...) = TruncF64asF32(...) 3377 3378 then will the second truncation further alter the value? The 3379 answer is no: flds (as generated here) followed by fsts 3380 (generated for the STbe) is the identity function on 32-bit 3381 floats, so we are safe. 3382 3383 Another upshot of this is that if iselStmt can see the 3384 entirety of 3385 3386 STbe(...) = TruncF64asF32(arg) 3387 3388 then it can short circuit having to deal with TruncF64asF32 3389 individually; instead just compute arg into a 64-bit FP 3390 register and do 'fsts' (since that itself does the 3391 truncation). 3392 3393 We generate pretty poor code here (should be ok both for 3394 32-bit and 64-bit mode); but it is expected that for the most 3395 part the latter optimisation will apply and hence this code 3396 will not often be used. 3397 */ 3398 HReg fsrc = iselDblExpr(env, e->Iex.Unop.arg); 3399 HReg fdst = newVRegF(env); 3400 PPCAMode* zero_r1 = PPCAMode_IR( 0, StackFramePtr(env->mode64) ); 3401 3402 sub_from_sp( env, 16 ); 3403 // store as F32, hence truncating 3404 addInstr(env, PPCInstr_FpLdSt( False/*store*/, 4, 3405 fsrc, zero_r1 )); 3406 // and reload. Good huh?! (sigh) 3407 addInstr(env, PPCInstr_FpLdSt( True/*load*/, 4, 3408 fdst, zero_r1 )); 3409 add_to_sp( env, 16 ); 3410 return fdst; 3411 } 3412 3413 if (e->tag == Iex_Binop && e->Iex.Binop.op == Iop_I64UtoF32) { 3414 if (mode64) { 3415 HReg fdst = newVRegF(env); 3416 HReg isrc = iselWordExpr_R(env, e->Iex.Binop.arg2); 3417 HReg r1 = StackFramePtr(env->mode64); 3418 PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 ); 3419 3420 /* Set host rounding mode */ 3421 set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); 3422 3423 sub_from_sp( env, 16 ); 3424 3425 addInstr(env, PPCInstr_Store(8, zero_r1, isrc, True/*mode64*/)); 3426 addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fdst, zero_r1)); 3427 addInstr(env, PPCInstr_FpCftI(True/*I->F*/, False/*int64*/, 3428 False, False, 3429 fdst, fdst)); 3430 3431 add_to_sp( env, 16 ); 3432 3433 ///* Restore default FPU rounding. */ 3434 //set_FPU_rounding_default( env ); 3435 return fdst; 3436 } else { 3437 /* 32-bit mode */ 3438 HReg fdst = newVRegF(env); 3439 HReg isrcHi, isrcLo; 3440 HReg r1 = StackFramePtr(env->mode64); 3441 PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 ); 3442 PPCAMode* four_r1 = PPCAMode_IR( 4, r1 ); 3443 3444 iselInt64Expr(&isrcHi, &isrcLo, env, e->Iex.Binop.arg2); 3445 3446 /* Set host rounding mode */ 3447 set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); 3448 3449 sub_from_sp( env, 16 ); 3450 3451 addInstr(env, PPCInstr_Store(4, zero_r1, isrcHi, False/*mode32*/)); 3452 addInstr(env, PPCInstr_Store(4, four_r1, isrcLo, False/*mode32*/)); 3453 addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fdst, zero_r1)); 3454 addInstr(env, PPCInstr_FpCftI(True/*I->F*/, False/*int64*/, 3455 False, False, 3456 fdst, fdst)); 3457 3458 add_to_sp( env, 16 ); 3459 3460 ///* Restore default FPU rounding. */ 3461 //set_FPU_rounding_default( env ); 3462 return fdst; 3463 } 3464 3465 } 3466 3467 vex_printf("iselFltExpr(ppc): No such tag(%u)\n", e->tag); 3468 ppIRExpr(e); 3469 vpanic("iselFltExpr_wrk(ppc)"); 3470} 3471 3472 3473/*---------------------------------------------------------*/ 3474/*--- ISEL: Floating point expressions (64 bit) ---*/ 3475/*---------------------------------------------------------*/ 3476 3477/* Compute a 64-bit floating point value into a register, the identity 3478 of which is returned. As with iselIntExpr_R, the reg may be either 3479 real or virtual; in any case it must not be changed by subsequent 3480 code emitted by the caller. */ 3481 3482/* IEEE 754 formats. From http://www.freesoft.org/CIE/RFC/1832/32.htm: 3483 3484 Type S (1 bit) E (11 bits) F (52 bits) 3485 ---- --------- ----------- ----------- 3486 signalling NaN u 2047 (max) .0uuuuu---u 3487 (with at least 3488 one 1 bit) 3489 quiet NaN u 2047 (max) .1uuuuu---u 3490 3491 negative infinity 1 2047 (max) .000000---0 3492 3493 positive infinity 0 2047 (max) .000000---0 3494 3495 negative zero 1 0 .000000---0 3496 3497 positive zero 0 0 .000000---0 3498*/ 3499 3500static HReg iselDblExpr ( ISelEnv* env, IRExpr* e ) 3501{ 3502 HReg r = iselDblExpr_wrk( env, e ); 3503# if 0 3504 vex_printf("\n"); ppIRExpr(e); vex_printf("\n"); 3505# endif 3506 vassert(hregClass(r) == HRcFlt64); 3507 vassert(hregIsVirtual(r)); 3508 return r; 3509} 3510 3511/* DO NOT CALL THIS DIRECTLY */ 3512static HReg iselDblExpr_wrk ( ISelEnv* env, IRExpr* e ) 3513{ 3514 Bool mode64 = env->mode64; 3515 IRType ty = typeOfIRExpr(env->type_env,e); 3516 vassert(e); 3517 vassert(ty == Ity_F64); 3518 3519 if (e->tag == Iex_RdTmp) { 3520 return lookupIRTemp(env, e->Iex.RdTmp.tmp); 3521 } 3522 3523 /* --------- LITERAL --------- */ 3524 if (e->tag == Iex_Const) { 3525 union { UInt u32x2[2]; ULong u64; Double f64; } u; 3526 vassert(sizeof(u) == 8); 3527 vassert(sizeof(u.u64) == 8); 3528 vassert(sizeof(u.f64) == 8); 3529 vassert(sizeof(u.u32x2) == 8); 3530 3531 if (e->Iex.Const.con->tag == Ico_F64) { 3532 u.f64 = e->Iex.Const.con->Ico.F64; 3533 } 3534 else if (e->Iex.Const.con->tag == Ico_F64i) { 3535 u.u64 = e->Iex.Const.con->Ico.F64i; 3536 } 3537 else 3538 vpanic("iselDblExpr(ppc): const"); 3539 3540 if (!mode64) { 3541 HReg r_srcHi = newVRegI(env); 3542 HReg r_srcLo = newVRegI(env); 3543 addInstr(env, PPCInstr_LI(r_srcHi, u.u32x2[0], mode64)); 3544 addInstr(env, PPCInstr_LI(r_srcLo, u.u32x2[1], mode64)); 3545 return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo ); 3546 } else { // mode64 3547 HReg r_src = newVRegI(env); 3548 addInstr(env, PPCInstr_LI(r_src, u.u64, mode64)); 3549 return mk_LoadR64toFPR( env, r_src ); // 1*I64 -> F64 3550 } 3551 } 3552 3553 /* --------- LOAD --------- */ 3554 if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) { 3555 HReg r_dst = newVRegF(env); 3556 PPCAMode* am_addr; 3557 vassert(e->Iex.Load.ty == Ity_F64); 3558 am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, Ity_F64/*xfer*/); 3559 addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, r_dst, am_addr)); 3560 return r_dst; 3561 } 3562 3563 /* --------- GET --------- */ 3564 if (e->tag == Iex_Get) { 3565 HReg r_dst = newVRegF(env); 3566 PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset, 3567 GuestStatePtr(mode64) ); 3568 addInstr(env, PPCInstr_FpLdSt( True/*load*/, 8, r_dst, am_addr )); 3569 return r_dst; 3570 } 3571 3572 /* --------- OPS --------- */ 3573 if (e->tag == Iex_Qop) { 3574 PPCFpOp fpop = Pfp_INVALID; 3575 switch (e->Iex.Qop.details->op) { 3576 case Iop_MAddF64: fpop = Pfp_MADDD; break; 3577 case Iop_MAddF64r32: fpop = Pfp_MADDS; break; 3578 case Iop_MSubF64: fpop = Pfp_MSUBD; break; 3579 case Iop_MSubF64r32: fpop = Pfp_MSUBS; break; 3580 default: break; 3581 } 3582 if (fpop != Pfp_INVALID) { 3583 HReg r_dst = newVRegF(env); 3584 HReg r_srcML = iselDblExpr(env, e->Iex.Qop.details->arg2); 3585 HReg r_srcMR = iselDblExpr(env, e->Iex.Qop.details->arg3); 3586 HReg r_srcAcc = iselDblExpr(env, e->Iex.Qop.details->arg4); 3587 set_FPU_rounding_mode( env, e->Iex.Qop.details->arg1 ); 3588 addInstr(env, PPCInstr_FpMulAcc(fpop, r_dst, 3589 r_srcML, r_srcMR, r_srcAcc)); 3590 return r_dst; 3591 } 3592 } 3593 3594 if (e->tag == Iex_Triop) { 3595 IRTriop *triop = e->Iex.Triop.details; 3596 PPCFpOp fpop = Pfp_INVALID; 3597 switch (triop->op) { 3598 case Iop_AddF64: fpop = Pfp_ADDD; break; 3599 case Iop_SubF64: fpop = Pfp_SUBD; break; 3600 case Iop_MulF64: fpop = Pfp_MULD; break; 3601 case Iop_DivF64: fpop = Pfp_DIVD; break; 3602 case Iop_AddF64r32: fpop = Pfp_ADDS; break; 3603 case Iop_SubF64r32: fpop = Pfp_SUBS; break; 3604 case Iop_MulF64r32: fpop = Pfp_MULS; break; 3605 case Iop_DivF64r32: fpop = Pfp_DIVS; break; 3606 default: break; 3607 } 3608 if (fpop != Pfp_INVALID) { 3609 HReg r_dst = newVRegF(env); 3610 HReg r_srcL = iselDblExpr(env, triop->arg2); 3611 HReg r_srcR = iselDblExpr(env, triop->arg3); 3612 set_FPU_rounding_mode( env, triop->arg1 ); 3613 addInstr(env, PPCInstr_FpBinary(fpop, r_dst, r_srcL, r_srcR)); 3614 return r_dst; 3615 } 3616 switch (triop->op) { 3617 case Iop_QuantizeD64: fpop = Pfp_DQUA; break; 3618 case Iop_SignificanceRoundD64: fpop = Pfp_RRDTR; break; 3619 default: break; 3620 } 3621 if (fpop != Pfp_INVALID) { 3622 HReg r_dst = newVRegF(env); 3623 HReg r_srcL = iselDblExpr(env, triop->arg2); 3624 HReg r_srcR = iselDblExpr(env, triop->arg3); 3625 PPCRI* rmc = iselWordExpr_RI(env, triop->arg1); 3626 3627 // will set TE and RMC when issuing instruction 3628 addInstr(env, PPCInstr_DfpQuantize(fpop, r_dst, r_srcL, r_srcR, rmc)); 3629 return r_dst; 3630 } 3631 } 3632 3633 if (e->tag == Iex_Binop) { 3634 PPCFpOp fpop = Pfp_INVALID; 3635 switch (e->Iex.Binop.op) { 3636 case Iop_SqrtF64: fpop = Pfp_SQRT; break; 3637 case Iop_I64StoD64: fpop = Pfp_DCFFIX; break; 3638 case Iop_D64toI64S: fpop = Pfp_DCTFIX; break; 3639 default: break; 3640 } 3641 if (fpop != Pfp_INVALID) { 3642 HReg fr_dst = newVRegF(env); 3643 HReg fr_src = iselDblExpr(env, e->Iex.Binop.arg2); 3644 set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); 3645 addInstr(env, PPCInstr_FpUnary(fpop, fr_dst, fr_src)); 3646 return fr_dst; 3647 } 3648 } 3649 3650 if (e->tag == Iex_Binop) { 3651 3652 if (e->Iex.Binop.op == Iop_RoundF64toF32) { 3653 HReg r_dst = newVRegF(env); 3654 HReg r_src = iselDblExpr(env, e->Iex.Binop.arg2); 3655 set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); 3656 addInstr(env, PPCInstr_FpRSP(r_dst, r_src)); 3657 //set_FPU_rounding_default( env ); 3658 return r_dst; 3659 } 3660 3661 if (e->Iex.Binop.op == Iop_I64StoF64 || e->Iex.Binop.op == Iop_I64UtoF64) { 3662 if (mode64) { 3663 HReg fdst = newVRegF(env); 3664 HReg isrc = iselWordExpr_R(env, e->Iex.Binop.arg2); 3665 HReg r1 = StackFramePtr(env->mode64); 3666 PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 ); 3667 3668 /* Set host rounding mode */ 3669 set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); 3670 3671 sub_from_sp( env, 16 ); 3672 3673 addInstr(env, PPCInstr_Store(8, zero_r1, isrc, True/*mode64*/)); 3674 addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fdst, zero_r1)); 3675 addInstr(env, PPCInstr_FpCftI(True/*I->F*/, False/*int64*/, 3676 e->Iex.Binop.op == Iop_I64StoF64, 3677 True/*fdst is 64 bit*/, 3678 fdst, fdst)); 3679 3680 add_to_sp( env, 16 ); 3681 3682 ///* Restore default FPU rounding. */ 3683 //set_FPU_rounding_default( env ); 3684 return fdst; 3685 } else { 3686 /* 32-bit mode */ 3687 HReg fdst = newVRegF(env); 3688 HReg isrcHi, isrcLo; 3689 HReg r1 = StackFramePtr(env->mode64); 3690 PPCAMode* zero_r1 = PPCAMode_IR( 0, r1 ); 3691 PPCAMode* four_r1 = PPCAMode_IR( 4, r1 ); 3692 3693 iselInt64Expr(&isrcHi, &isrcLo, env, e->Iex.Binop.arg2); 3694 3695 /* Set host rounding mode */ 3696 set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); 3697 3698 sub_from_sp( env, 16 ); 3699 3700 addInstr(env, PPCInstr_Store(4, zero_r1, isrcHi, False/*mode32*/)); 3701 addInstr(env, PPCInstr_Store(4, four_r1, isrcLo, False/*mode32*/)); 3702 addInstr(env, PPCInstr_FpLdSt(True/*load*/, 8, fdst, zero_r1)); 3703 addInstr(env, PPCInstr_FpCftI(True/*I->F*/, False/*int64*/, 3704 e->Iex.Binop.op == Iop_I64StoF64, 3705 True/*fdst is 64 bit*/, 3706 fdst, fdst)); 3707 3708 add_to_sp( env, 16 ); 3709 3710 ///* Restore default FPU rounding. */ 3711 //set_FPU_rounding_default( env ); 3712 return fdst; 3713 } 3714 } 3715 3716 } 3717 3718 if (e->tag == Iex_Unop) { 3719 PPCFpOp fpop = Pfp_INVALID; 3720 switch (e->Iex.Unop.op) { 3721 case Iop_NegF64: fpop = Pfp_NEG; break; 3722 case Iop_AbsF64: fpop = Pfp_ABS; break; 3723 case Iop_Est5FRSqrt: fpop = Pfp_RSQRTE; break; 3724 case Iop_RoundF64toF64_NegINF: fpop = Pfp_FRIM; break; 3725 case Iop_RoundF64toF64_PosINF: fpop = Pfp_FRIP; break; 3726 case Iop_RoundF64toF64_NEAREST: fpop = Pfp_FRIN; break; 3727 case Iop_RoundF64toF64_ZERO: fpop = Pfp_FRIZ; break; 3728 case Iop_ExtractExpD64: fpop = Pfp_DXEX; break; 3729 default: break; 3730 } 3731 if (fpop != Pfp_INVALID) { 3732 HReg fr_dst = newVRegF(env); 3733 HReg fr_src = iselDblExpr(env, e->Iex.Unop.arg); 3734 addInstr(env, PPCInstr_FpUnary(fpop, fr_dst, fr_src)); 3735 return fr_dst; 3736 } 3737 } 3738 3739 if (e->tag == Iex_Unop) { 3740 switch (e->Iex.Unop.op) { 3741 case Iop_ReinterpI64asF64: { 3742 /* Given an I64, produce an IEEE754 double with the same 3743 bit pattern. */ 3744 if (!mode64) { 3745 HReg r_srcHi, r_srcLo; 3746 iselInt64Expr( &r_srcHi, &r_srcLo, env, e->Iex.Unop.arg); 3747 return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo ); 3748 } else { 3749 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 3750 return mk_LoadR64toFPR( env, r_src ); 3751 } 3752 } 3753 3754 case Iop_F32toF64: { 3755 if (e->Iex.Unop.arg->tag == Iex_Unop && 3756 e->Iex.Unop.arg->Iex.Unop.op == Iop_ReinterpI32asF32 ) { 3757 e = e->Iex.Unop.arg; 3758 3759 HReg src = iselWordExpr_R(env, e->Iex.Unop.arg); 3760 HReg fr_dst = newVRegF(env); 3761 PPCAMode *am_addr; 3762 3763 sub_from_sp( env, 16 ); // Move SP down 16 bytes 3764 am_addr = PPCAMode_IR( 0, StackFramePtr(env->mode64) ); 3765 3766 // store src as Ity_I32's 3767 addInstr(env, PPCInstr_Store( 4, am_addr, src, env->mode64 )); 3768 3769 // load single precision float, but the end results loads into a 3770 // 64-bit FP register -- i.e., F64. 3771 addInstr(env, PPCInstr_FpLdSt(True/*load*/, 4, fr_dst, am_addr)); 3772 3773 add_to_sp( env, 16 ); // Reset SP 3774 return fr_dst; 3775 } 3776 3777 3778 /* this is a no-op */ 3779 HReg res = iselFltExpr(env, e->Iex.Unop.arg); 3780 return res; 3781 } 3782 default: 3783 break; 3784 } 3785 } 3786 3787 /* --------- MULTIPLEX --------- */ 3788 if (e->tag == Iex_Mux0X) { 3789 if (ty == Ity_F64 3790 && typeOfIRExpr(env->type_env,e->Iex.Mux0X.cond) == Ity_I8) { 3791 PPCCondCode cc = mk_PPCCondCode( Pct_TRUE, Pcf_7EQ ); 3792 HReg r_cond = iselWordExpr_R(env, e->Iex.Mux0X.cond); 3793 HReg frX = iselDblExpr(env, e->Iex.Mux0X.exprX); 3794 HReg fr0 = iselDblExpr(env, e->Iex.Mux0X.expr0); 3795 HReg fr_dst = newVRegF(env); 3796 HReg r_tmp = newVRegI(env); 3797 addInstr(env, PPCInstr_Alu(Palu_AND, r_tmp, 3798 r_cond, PPCRH_Imm(False,0xFF))); 3799 addInstr(env, PPCInstr_FpUnary( Pfp_MOV, fr_dst, frX )); 3800 addInstr(env, PPCInstr_Cmp(False/*unsigned*/, True/*32bit cmp*/, 3801 7/*cr*/, r_tmp, PPCRH_Imm(False,0))); 3802 addInstr(env, PPCInstr_FpCMov( cc, fr_dst, fr0 )); 3803 return fr_dst; 3804 } 3805 } 3806 3807 vex_printf("iselDblExpr(ppc): No such tag(%u)\n", e->tag); 3808 ppIRExpr(e); 3809 vpanic("iselDblExpr_wrk(ppc)"); 3810} 3811 3812static HReg iselDfp64Expr(ISelEnv* env, IRExpr* e) 3813{ 3814 HReg r = iselDfp64Expr_wrk( env, e ); 3815 vassert(hregClass(r) == HRcFlt64); 3816 vassert( hregIsVirtual(r) ); 3817 return r; 3818} 3819 3820/* DO NOT CALL THIS DIRECTLY */ 3821static HReg iselDfp64Expr_wrk(ISelEnv* env, IRExpr* e) 3822{ 3823 Bool mode64 = env->mode64; 3824 IRType ty = typeOfIRExpr( env->type_env, e ); 3825 HReg r_dstHi, r_dstLo; 3826 3827 vassert( e ); 3828 vassert( ty == Ity_D64 ); 3829 3830 if (e->tag == Iex_RdTmp) { 3831 return lookupIRTemp( env, e->Iex.RdTmp.tmp ); 3832 } 3833 3834 /* --------- GET --------- */ 3835 if (e->tag == Iex_Get) { 3836 HReg r_dst = newVRegF( env ); 3837 PPCAMode* am_addr = PPCAMode_IR( e->Iex.Get.offset, 3838 GuestStatePtr(mode64) ); 3839 addInstr( env, PPCInstr_FpLdSt( True/*load*/, 8, r_dst, am_addr ) ); 3840 return r_dst; 3841 } 3842 3843 /* --------- OPS --------- */ 3844 if (e->tag == Iex_Qop) { 3845 HReg r_dst = newVRegF( env ); 3846 return r_dst; 3847 } 3848 3849 if (e->tag == Iex_Unop) { 3850 HReg fr_dst = newVRegF(env); 3851 switch (e->Iex.Unop.op) { 3852 case Iop_ReinterpI64asD64: { 3853 /* Given an I64, produce an IEEE754 DFP with the same 3854 bit pattern. */ 3855 if (!mode64) { 3856 HReg r_srcHi, r_srcLo; 3857 iselInt64Expr( &r_srcHi, &r_srcLo, env, e->Iex.Unop.arg); 3858 return mk_LoadRR32toFPR( env, r_srcHi, r_srcLo ); 3859 } else { 3860 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 3861 return mk_LoadR64toFPR( env, r_src ); 3862 } 3863 } 3864 3865 case Iop_ExtractExpD64: { 3866 HReg fr_src = iselDfp64Expr(env, e->Iex.Unop.arg); 3867 3868 addInstr(env, PPCInstr_Dfp64Unary(Pfp_DXEX, fr_dst, fr_src)); 3869 return fr_dst; 3870 } 3871 case Iop_ExtractExpD128: { 3872 /* Result is a D64 */ 3873 HReg r_srcHi; 3874 HReg r_srcLo; 3875 3876 iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Unop.arg); 3877 addInstr(env, PPCInstr_ExtractExpD128(Pfp_DXEXQ, fr_dst, 3878 r_srcHi, r_srcLo)); 3879 return fr_dst; 3880 } 3881 case Iop_D32toD64: { 3882 HReg fr_src = iselDfp64Expr(env, e->Iex.Unop.arg); 3883 addInstr(env, PPCInstr_Dfp64Unary(Pfp_DCTDP, fr_dst, fr_src)); 3884 return fr_dst; 3885 } 3886 case Iop_D128HItoD64: 3887 iselDfp128Expr( &r_dstHi, &r_dstLo, env, e->Iex.Unop.arg ); 3888 return r_dstHi; 3889 case Iop_D128LOtoD64: 3890 iselDfp128Expr( &r_dstHi, &r_dstLo, env, e->Iex.Unop.arg ); 3891 return r_dstLo; 3892 case Iop_InsertExpD64: { 3893 HReg fr_srcL = iselDblExpr(env, e->Iex.Binop.arg1); 3894 HReg fr_srcR = iselDblExpr(env, e->Iex.Binop.arg2); 3895 3896 addInstr(env, PPCInstr_Dfp64Binary(Pfp_DIEX, fr_dst, fr_srcL, 3897 fr_srcR)); 3898 return fr_dst; 3899 } 3900 default: 3901 vex_printf( "ERROR: iselDfp64Expr_wrk, UNKNOWN unop case %d\n", 3902 e->Iex.Unop.op ); 3903 } 3904 } 3905 3906 if (e->tag == Iex_Binop) { 3907 3908 switch (e->Iex.Binop.op) { 3909 case Iop_D128toI64S: { 3910 PPCFpOp fpop = Pfp_DCTFIXQ; 3911 HReg fr_dst = newVRegF(env); 3912 HReg r_srcHi = newVRegF(env); 3913 HReg r_srcLo = newVRegF(env); 3914 3915 set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1 ); 3916 iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2); 3917 addInstr(env, PPCInstr_DfpD128toD64(fpop, fr_dst, r_srcHi, r_srcLo)); 3918 return fr_dst; 3919 } 3920 case Iop_D128toD64: { 3921 PPCFpOp fpop = Pfp_DRDPQ; 3922 HReg fr_dst = newVRegF(env); 3923 HReg r_srcHi = newVRegF(env); 3924 HReg r_srcLo = newVRegF(env); 3925 3926 set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1 ); 3927 iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2); 3928 addInstr(env, PPCInstr_DfpD128toD64(fpop, fr_dst, r_srcHi, r_srcLo)); 3929 return fr_dst; 3930 } 3931 break; 3932 default: 3933 break; 3934 } 3935 3936 if (e->Iex.Unop.op == Iop_RoundD64toInt) { 3937 HReg fr_dst = newVRegF(env); 3938 HReg fr_src = newVRegF(env); 3939 PPCRI* r_rmc = iselWordExpr_RI(env, e->Iex.Binop.arg1); 3940 3941 fr_src = iselDfp64Expr(env, e->Iex.Binop.arg2); 3942 addInstr(env, PPCInstr_DfpRound(fr_dst, fr_src, r_rmc)); 3943 return fr_dst; 3944 } 3945 } 3946 3947 if (e->tag == Iex_Binop) { 3948 PPCFpOp fpop = Pfp_INVALID; 3949 HReg fr_dst = newVRegF(env); 3950 3951 switch (e->Iex.Binop.op) { 3952 case Iop_D64toD32: fpop = Pfp_DRSP; break; 3953 case Iop_I64StoD64: fpop = Pfp_DCFFIX; break; 3954 case Iop_D64toI64S: fpop = Pfp_DCTFIX; break; 3955 default: break; 3956 } 3957 if (fpop != Pfp_INVALID) { 3958 HReg fr_src = iselDfp64Expr(env, e->Iex.Binop.arg2); 3959 set_FPU_DFP_rounding_mode( env, e->Iex.Binop.arg1 ); 3960 addInstr(env, PPCInstr_Dfp64Unary(fpop, fr_dst, fr_src)); 3961 return fr_dst; 3962 } 3963 3964 switch (e->Iex.Binop.op) { 3965 /* shift instructions F64, I32 -> F64 */ 3966 case Iop_ShlD64: fpop = Pfp_DSCLI; break; 3967 case Iop_ShrD64: fpop = Pfp_DSCRI; break; 3968 default: break; 3969 } 3970 if (fpop != Pfp_INVALID) { 3971 HReg fr_src = iselDfp64Expr(env, e->Iex.Binop.arg1); 3972 PPCRI* shift = iselWordExpr_RI(env, e->Iex.Binop.arg2); 3973 3974 addInstr(env, PPCInstr_DfpShift(fpop, fr_dst, fr_src, shift)); 3975 return fr_dst; 3976 } 3977 3978 switch (e->Iex.Binop.op) { 3979 case Iop_InsertExpD64: 3980 fpop = Pfp_DIEX; 3981 break; 3982 default: break; 3983 } 3984 if (fpop != Pfp_INVALID) { 3985 HReg fr_srcL = iselDfp64Expr(env, e->Iex.Binop.arg1); 3986 HReg fr_srcR = iselDfp64Expr(env, e->Iex.Binop.arg2); 3987 addInstr(env, PPCInstr_Dfp64Binary(fpop, fr_dst, fr_srcL, fr_srcR)); 3988 return fr_dst; 3989 } 3990 } 3991 3992 if (e->tag == Iex_Triop) { 3993 IRTriop *triop = e->Iex.Triop.details; 3994 PPCFpOp fpop = Pfp_INVALID; 3995 3996 switch (triop->op) { 3997 case Iop_AddD64: 3998 fpop = Pfp_DFPADD; 3999 break; 4000 case Iop_SubD64: 4001 fpop = Pfp_DFPSUB; 4002 break; 4003 case Iop_MulD64: 4004 fpop = Pfp_DFPMUL; 4005 break; 4006 case Iop_DivD64: 4007 fpop = Pfp_DFPDIV; 4008 break; 4009 default: 4010 break; 4011 } 4012 if (fpop != Pfp_INVALID) { 4013 HReg r_dst = newVRegF( env ); 4014 HReg r_srcL = iselDfp64Expr( env, triop->arg2 ); 4015 HReg r_srcR = iselDfp64Expr( env, triop->arg3 ); 4016 4017 set_FPU_DFP_rounding_mode( env, triop->arg1 ); 4018 addInstr( env, PPCInstr_Dfp64Binary( fpop, r_dst, r_srcL, r_srcR ) ); 4019 return r_dst; 4020 } 4021 4022 switch (triop->op) { 4023 case Iop_QuantizeD64: fpop = Pfp_DQUA; break; 4024 case Iop_SignificanceRoundD64: fpop = Pfp_RRDTR; break; 4025 default: break; 4026 } 4027 if (fpop != Pfp_INVALID) { 4028 HReg r_dst = newVRegF(env); 4029 HReg r_srcL = iselDfp64Expr(env, triop->arg2); 4030 HReg r_srcR = iselDfp64Expr(env, triop->arg3); 4031 PPCRI* rmc = iselWordExpr_RI(env, triop->arg1); 4032 4033 addInstr(env, PPCInstr_DfpQuantize(fpop, r_dst, r_srcL, r_srcR, 4034 rmc)); 4035 return r_dst; 4036 } 4037 } 4038 4039 ppIRExpr( e ); 4040 vpanic( "iselDfp64Expr_wrk(ppc)" ); 4041} 4042 4043static void iselDfp128Expr(HReg* rHi, HReg* rLo, ISelEnv* env, IRExpr* e) 4044{ 4045 iselDfp128Expr_wrk( rHi, rLo, env, e ); 4046 vassert( hregIsVirtual(*rHi) ); 4047 vassert( hregIsVirtual(*rLo) ); 4048} 4049 4050/* DO NOT CALL THIS DIRECTLY */ 4051static void iselDfp128Expr_wrk(HReg* rHi, HReg *rLo, ISelEnv* env, IRExpr* e) 4052{ 4053 vassert( e ); 4054 vassert( typeOfIRExpr(env->type_env,e) == Ity_D128 ); 4055 4056 /* read 128-bit IRTemp */ 4057 if (e->tag == Iex_RdTmp) { 4058 lookupIRTempPair( rHi, rLo, env, e->Iex.RdTmp.tmp ); 4059 return; 4060 } 4061 4062 if (e->tag == Iex_Unop) { 4063 PPCFpOp fpop = Pfp_INVALID; 4064 HReg r_dstHi = newVRegF(env); 4065 HReg r_dstLo = newVRegF(env); 4066 4067 if (e->Iex.Unop.op == Iop_I64StoD128) { 4068 HReg r_src = iselDfp64Expr(env, e->Iex.Unop.arg); 4069 fpop = Pfp_DCFFIXQ; 4070 4071 addInstr(env, PPCInstr_DfpI64StoD128(fpop, r_dstHi, r_dstLo, 4072 r_src)); 4073 } 4074 if (e->Iex.Unop.op == Iop_D64toD128) { 4075 HReg r_src = iselDfp64Expr(env, e->Iex.Unop.arg); 4076 fpop = Pfp_DCTQPQ; 4077 4078 /* Source is 64bit result is 128 bit. High 64bit source arg, 4079 * is ignored by the instruction. Set high arg to r_src just 4080 * to meet the vassert tests. 4081 */ 4082 addInstr(env, PPCInstr_Dfp128Unary(fpop, r_dstHi, r_dstLo, 4083 r_src, r_src)); 4084 } 4085 *rHi = r_dstHi; 4086 *rLo = r_dstLo; 4087 return; 4088 } 4089 4090 /* --------- OPS --------- */ 4091 if (e->tag == Iex_Binop) { 4092 HReg r_srcHi; 4093 HReg r_srcLo; 4094 4095 switch (e->Iex.Binop.op) { 4096 case Iop_D64HLtoD128: 4097 r_srcHi = iselDfp64Expr( env, e->Iex.Binop.arg1 ); 4098 r_srcLo = iselDfp64Expr( env, e->Iex.Binop.arg2 ); 4099 *rHi = r_srcHi; 4100 *rLo = r_srcLo; 4101 return; 4102 break; 4103 case Iop_D128toD64: { 4104 PPCFpOp fpop = Pfp_DRDPQ; 4105 HReg fr_dst = newVRegF(env); 4106 4107 set_FPU_rounding_mode( env, e->Iex.Binop.arg1 ); 4108 iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2); 4109 addInstr(env, PPCInstr_DfpD128toD64(fpop, fr_dst, r_srcHi, r_srcLo)); 4110 4111 /* Need to meet the interface spec but the result is 4112 * just 64-bits so send the result back in both halfs. 4113 */ 4114 *rHi = fr_dst; 4115 *rLo = fr_dst; 4116 return; 4117 } 4118 case Iop_ShlD128: 4119 case Iop_ShrD128: { 4120 HReg fr_dst_hi = newVRegF(env); 4121 HReg fr_dst_lo = newVRegF(env); 4122 PPCRI* shift = iselWordExpr_RI(env, e->Iex.Binop.arg2); 4123 PPCFpOp fpop = Pfp_DSCLIQ; /* fix later if necessary */ 4124 4125 iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg1); 4126 4127 if (e->Iex.Binop.op == Iop_ShrD128) 4128 fpop = Pfp_DSCRIQ; 4129 4130 addInstr(env, PPCInstr_DfpShift128(fpop, fr_dst_hi, fr_dst_lo, 4131 r_srcHi, r_srcLo, shift)); 4132 4133 *rHi = fr_dst_hi; 4134 *rLo = fr_dst_lo; 4135 return; 4136 } 4137 case Iop_RoundD128toInt: { 4138 HReg r_dstHi = newVRegF(env); 4139 HReg r_dstLo = newVRegF(env); 4140 PPCRI* r_rmc = iselWordExpr_RI(env, e->Iex.Binop.arg1); 4141 4142 // will set R and RMC when issuing instruction 4143 iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2); 4144 4145 addInstr(env, PPCInstr_DfpRound128(r_dstHi, r_dstLo, 4146 r_srcHi, r_srcLo, r_rmc)); 4147 *rHi = r_dstHi; 4148 *rLo = r_dstLo; 4149 return; 4150 } 4151 case Iop_InsertExpD128: { 4152 HReg r_dstHi = newVRegF(env); 4153 HReg r_dstLo = newVRegF(env); 4154 HReg r_srcL = newVRegF(env); 4155 4156 r_srcL = iselDfp64Expr(env, e->Iex.Binop.arg1); 4157 iselDfp128Expr(&r_srcHi, &r_srcLo, env, e->Iex.Binop.arg2); 4158 addInstr(env, PPCInstr_InsertExpD128(Pfp_DIEXQ, 4159 r_dstHi, r_dstLo, 4160 r_srcL, r_srcHi, r_srcLo)); 4161 *rHi = r_dstHi; 4162 *rLo = r_dstLo; 4163 return; 4164 } 4165 default: 4166 vex_printf( "ERROR: iselDfp128Expr_wrk, UNKNOWN binop case %d\n", 4167 e->Iex.Binop.op ); 4168 break; 4169 } 4170 } 4171 4172 if (e->tag == Iex_Triop) { 4173 IRTriop *triop = e->Iex.Triop.details; 4174 PPCFpOp fpop = Pfp_INVALID; 4175 switch (triop->op) { 4176 case Iop_AddD128: 4177 fpop = Pfp_DFPADDQ; 4178 break; 4179 case Iop_SubD128: 4180 fpop = Pfp_DFPSUBQ; 4181 break; 4182 case Iop_MulD128: 4183 fpop = Pfp_DFPMULQ; 4184 break; 4185 case Iop_DivD128: 4186 fpop = Pfp_DFPDIVQ; 4187 break; 4188 default: 4189 break; 4190 } 4191 4192 if (fpop != Pfp_INVALID) { 4193 HReg r_dstHi = newVRegV( env ); 4194 HReg r_dstLo = newVRegV( env ); 4195 HReg r_srcRHi = newVRegV( env ); 4196 HReg r_srcRLo = newVRegV( env ); 4197 4198 /* dst will be used to pass in the left operand and get the result. */ 4199 iselDfp128Expr( &r_dstHi, &r_dstLo, env, triop->arg2 ); 4200 iselDfp128Expr( &r_srcRHi, &r_srcRLo, env, triop->arg3 ); 4201 set_FPU_rounding_mode( env, triop->arg1 ); 4202 addInstr( env, 4203 PPCInstr_Dfp128Binary( fpop, r_dstHi, r_dstLo, 4204 r_srcRHi, r_srcRLo ) ); 4205 *rHi = r_dstHi; 4206 *rLo = r_dstLo; 4207 return; 4208 } 4209 switch (triop->op) { 4210 case Iop_QuantizeD128: fpop = Pfp_DQUAQ; break; 4211 case Iop_SignificanceRoundD128: fpop = Pfp_DRRNDQ; break; 4212 default: break; 4213 } 4214 if (fpop != Pfp_INVALID) { 4215 HReg r_dstHi = newVRegF(env); 4216 HReg r_dstLo = newVRegF(env); 4217 HReg r_srcHi = newVRegF(env); 4218 HReg r_srcLo = newVRegF(env); 4219 PPCRI* rmc = iselWordExpr_RI(env, triop->arg1); 4220 4221 /* dst will be used to pass in the left operand and get the result */ 4222 iselDfp128Expr(&r_dstHi, &r_dstLo, env, triop->arg2); 4223 iselDfp128Expr(&r_srcHi, &r_srcLo, env, triop->arg3); 4224 4225 // will set RMC when issuing instruction 4226 addInstr(env, PPCInstr_DfpQuantize128(fpop, r_dstHi, r_dstLo, 4227 r_srcHi, r_srcLo, rmc)); 4228 *rHi = r_dstHi; 4229 *rLo = r_dstLo; 4230 return; 4231 } 4232 } 4233 4234 ppIRExpr( e ); 4235 vpanic( "iselDfp128Expr(ppc64)" ); 4236} 4237 4238 4239/*---------------------------------------------------------*/ 4240/*--- ISEL: SIMD (Vector) expressions, 128 bit. ---*/ 4241/*---------------------------------------------------------*/ 4242 4243static HReg iselVecExpr ( ISelEnv* env, IRExpr* e ) 4244{ 4245 HReg r = iselVecExpr_wrk( env, e ); 4246# if 0 4247 vex_printf("\n"); ppIRExpr(e); vex_printf("\n"); 4248# endif 4249 vassert(hregClass(r) == HRcVec128); 4250 vassert(hregIsVirtual(r)); 4251 return r; 4252} 4253 4254/* DO NOT CALL THIS DIRECTLY */ 4255static HReg iselVecExpr_wrk ( ISelEnv* env, IRExpr* e ) 4256{ 4257 Bool mode64 = env->mode64; 4258 PPCAvOp op = Pav_INVALID; 4259 PPCAvFpOp fpop = Pavfp_INVALID; 4260 IRType ty = typeOfIRExpr(env->type_env,e); 4261 vassert(e); 4262 vassert(ty == Ity_V128); 4263 4264 if (e->tag == Iex_RdTmp) { 4265 return lookupIRTemp(env, e->Iex.RdTmp.tmp); 4266 } 4267 4268 if (e->tag == Iex_Get) { 4269 /* Guest state vectors are 16byte aligned, 4270 so don't need to worry here */ 4271 HReg dst = newVRegV(env); 4272 addInstr(env, 4273 PPCInstr_AvLdSt( True/*load*/, 16, dst, 4274 PPCAMode_IR( e->Iex.Get.offset, 4275 GuestStatePtr(mode64) ))); 4276 return dst; 4277 } 4278 4279 if (e->tag == Iex_Load && e->Iex.Load.end == Iend_BE) { 4280 PPCAMode* am_addr; 4281 HReg v_dst = newVRegV(env); 4282 vassert(e->Iex.Load.ty == Ity_V128); 4283 am_addr = iselWordExpr_AMode(env, e->Iex.Load.addr, Ity_V128/*xfer*/); 4284 addInstr(env, PPCInstr_AvLdSt( True/*load*/, 16, v_dst, am_addr)); 4285 return v_dst; 4286 } 4287 4288 if (e->tag == Iex_Unop) { 4289 switch (e->Iex.Unop.op) { 4290 4291 case Iop_NotV128: { 4292 HReg arg = iselVecExpr(env, e->Iex.Unop.arg); 4293 HReg dst = newVRegV(env); 4294 addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, arg)); 4295 return dst; 4296 } 4297 4298 case Iop_CmpNEZ8x16: { 4299 HReg arg = iselVecExpr(env, e->Iex.Unop.arg); 4300 HReg zero = newVRegV(env); 4301 HReg dst = newVRegV(env); 4302 addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero)); 4303 addInstr(env, PPCInstr_AvBin8x16(Pav_CMPEQU, dst, arg, zero)); 4304 addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst)); 4305 return dst; 4306 } 4307 4308 case Iop_CmpNEZ16x8: { 4309 HReg arg = iselVecExpr(env, e->Iex.Unop.arg); 4310 HReg zero = newVRegV(env); 4311 HReg dst = newVRegV(env); 4312 addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero)); 4313 addInstr(env, PPCInstr_AvBin16x8(Pav_CMPEQU, dst, arg, zero)); 4314 addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst)); 4315 return dst; 4316 } 4317 4318 case Iop_CmpNEZ32x4: { 4319 HReg arg = iselVecExpr(env, e->Iex.Unop.arg); 4320 HReg zero = newVRegV(env); 4321 HReg dst = newVRegV(env); 4322 addInstr(env, PPCInstr_AvBinary(Pav_XOR, zero, zero, zero)); 4323 addInstr(env, PPCInstr_AvBin32x4(Pav_CMPEQU, dst, arg, zero)); 4324 addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst)); 4325 return dst; 4326 } 4327 4328 case Iop_Recip32Fx4: fpop = Pavfp_RCPF; goto do_32Fx4_unary; 4329 case Iop_RSqrt32Fx4: fpop = Pavfp_RSQRTF; goto do_32Fx4_unary; 4330 case Iop_I32UtoFx4: fpop = Pavfp_CVTU2F; goto do_32Fx4_unary; 4331 case Iop_I32StoFx4: fpop = Pavfp_CVTS2F; goto do_32Fx4_unary; 4332 case Iop_QFtoI32Ux4_RZ: fpop = Pavfp_QCVTF2U; goto do_32Fx4_unary; 4333 case Iop_QFtoI32Sx4_RZ: fpop = Pavfp_QCVTF2S; goto do_32Fx4_unary; 4334 case Iop_RoundF32x4_RM: fpop = Pavfp_ROUNDM; goto do_32Fx4_unary; 4335 case Iop_RoundF32x4_RP: fpop = Pavfp_ROUNDP; goto do_32Fx4_unary; 4336 case Iop_RoundF32x4_RN: fpop = Pavfp_ROUNDN; goto do_32Fx4_unary; 4337 case Iop_RoundF32x4_RZ: fpop = Pavfp_ROUNDZ; goto do_32Fx4_unary; 4338 do_32Fx4_unary: 4339 { 4340 HReg arg = iselVecExpr(env, e->Iex.Unop.arg); 4341 HReg dst = newVRegV(env); 4342 addInstr(env, PPCInstr_AvUn32Fx4(fpop, dst, arg)); 4343 return dst; 4344 } 4345 4346 case Iop_32UtoV128: { 4347 HReg r_aligned16, r_zeros; 4348 HReg r_src = iselWordExpr_R(env, e->Iex.Unop.arg); 4349 HReg dst = newVRegV(env); 4350 PPCAMode *am_off0, *am_off4, *am_off8, *am_off12; 4351 sub_from_sp( env, 32 ); // Move SP down 4352 4353 /* Get a quadword aligned address within our stack space */ 4354 r_aligned16 = get_sp_aligned16( env ); 4355 am_off0 = PPCAMode_IR( 0, r_aligned16 ); 4356 am_off4 = PPCAMode_IR( 4, r_aligned16 ); 4357 am_off8 = PPCAMode_IR( 8, r_aligned16 ); 4358 am_off12 = PPCAMode_IR( 12, r_aligned16 ); 4359 4360 /* Store zeros */ 4361 r_zeros = newVRegI(env); 4362 addInstr(env, PPCInstr_LI(r_zeros, 0x0, mode64)); 4363 addInstr(env, PPCInstr_Store( 4, am_off0, r_zeros, mode64 )); 4364 addInstr(env, PPCInstr_Store( 4, am_off4, r_zeros, mode64 )); 4365 addInstr(env, PPCInstr_Store( 4, am_off8, r_zeros, mode64 )); 4366 4367 /* Store r_src in low word of quadword-aligned mem */ 4368 addInstr(env, PPCInstr_Store( 4, am_off12, r_src, mode64 )); 4369 4370 /* Load word into low word of quadword vector reg */ 4371 addInstr(env, PPCInstr_AvLdSt( True/*ld*/, 4, dst, am_off12 )); 4372 4373 add_to_sp( env, 32 ); // Reset SP 4374 return dst; 4375 } 4376 4377 case Iop_Dup8x16: 4378 case Iop_Dup16x8: 4379 case Iop_Dup32x4: 4380 return mk_AvDuplicateRI(env, e->Iex.Unop.arg); 4381 4382 default: 4383 break; 4384 } /* switch (e->Iex.Unop.op) */ 4385 } /* if (e->tag == Iex_Unop) */ 4386 4387 if (e->tag == Iex_Binop) { 4388 switch (e->Iex.Binop.op) { 4389 4390 case Iop_64HLtoV128: { 4391 if (!mode64) { 4392 HReg r3, r2, r1, r0, r_aligned16; 4393 PPCAMode *am_off0, *am_off4, *am_off8, *am_off12; 4394 HReg dst = newVRegV(env); 4395 /* do this via the stack (easy, convenient, etc) */ 4396 sub_from_sp( env, 32 ); // Move SP down 4397 4398 // get a quadword aligned address within our stack space 4399 r_aligned16 = get_sp_aligned16( env ); 4400 am_off0 = PPCAMode_IR( 0, r_aligned16 ); 4401 am_off4 = PPCAMode_IR( 4, r_aligned16 ); 4402 am_off8 = PPCAMode_IR( 8, r_aligned16 ); 4403 am_off12 = PPCAMode_IR( 12, r_aligned16 ); 4404 4405 /* Do the less significant 64 bits */ 4406 iselInt64Expr(&r1, &r0, env, e->Iex.Binop.arg2); 4407 addInstr(env, PPCInstr_Store( 4, am_off12, r0, mode64 )); 4408 addInstr(env, PPCInstr_Store( 4, am_off8, r1, mode64 )); 4409 /* Do the more significant 64 bits */ 4410 iselInt64Expr(&r3, &r2, env, e->Iex.Binop.arg1); 4411 addInstr(env, PPCInstr_Store( 4, am_off4, r2, mode64 )); 4412 addInstr(env, PPCInstr_Store( 4, am_off0, r3, mode64 )); 4413 4414 /* Fetch result back from stack. */ 4415 addInstr(env, PPCInstr_AvLdSt(True/*ld*/, 16, dst, am_off0)); 4416 4417 add_to_sp( env, 32 ); // Reset SP 4418 return dst; 4419 } else { 4420 HReg rHi = iselWordExpr_R(env, e->Iex.Binop.arg1); 4421 HReg rLo = iselWordExpr_R(env, e->Iex.Binop.arg2); 4422 HReg dst = newVRegV(env); 4423 HReg r_aligned16; 4424 PPCAMode *am_off0, *am_off8; 4425 /* do this via the stack (easy, convenient, etc) */ 4426 sub_from_sp( env, 32 ); // Move SP down 4427 4428 // get a quadword aligned address within our stack space 4429 r_aligned16 = get_sp_aligned16( env ); 4430 am_off0 = PPCAMode_IR( 0, r_aligned16 ); 4431 am_off8 = PPCAMode_IR( 8, r_aligned16 ); 4432 4433 /* Store 2*I64 to stack */ 4434 addInstr(env, PPCInstr_Store( 8, am_off0, rHi, mode64 )); 4435 addInstr(env, PPCInstr_Store( 8, am_off8, rLo, mode64 )); 4436 4437 /* Fetch result back from stack. */ 4438 addInstr(env, PPCInstr_AvLdSt(True/*ld*/, 16, dst, am_off0)); 4439 4440 add_to_sp( env, 32 ); // Reset SP 4441 return dst; 4442 } 4443 } 4444 4445 case Iop_Add32Fx4: fpop = Pavfp_ADDF; goto do_32Fx4; 4446 case Iop_Sub32Fx4: fpop = Pavfp_SUBF; goto do_32Fx4; 4447 case Iop_Max32Fx4: fpop = Pavfp_MAXF; goto do_32Fx4; 4448 case Iop_Min32Fx4: fpop = Pavfp_MINF; goto do_32Fx4; 4449 case Iop_Mul32Fx4: fpop = Pavfp_MULF; goto do_32Fx4; 4450 case Iop_CmpEQ32Fx4: fpop = Pavfp_CMPEQF; goto do_32Fx4; 4451 case Iop_CmpGT32Fx4: fpop = Pavfp_CMPGTF; goto do_32Fx4; 4452 case Iop_CmpGE32Fx4: fpop = Pavfp_CMPGEF; goto do_32Fx4; 4453 do_32Fx4: 4454 { 4455 HReg argL = iselVecExpr(env, e->Iex.Binop.arg1); 4456 HReg argR = iselVecExpr(env, e->Iex.Binop.arg2); 4457 HReg dst = newVRegV(env); 4458 addInstr(env, PPCInstr_AvBin32Fx4(fpop, dst, argL, argR)); 4459 return dst; 4460 } 4461 4462 case Iop_CmpLE32Fx4: { 4463 HReg argL = iselVecExpr(env, e->Iex.Binop.arg1); 4464 HReg argR = iselVecExpr(env, e->Iex.Binop.arg2); 4465 HReg dst = newVRegV(env); 4466 4467 /* stay consistent with native ppc compares: 4468 if a left/right lane holds a nan, return zeros for that lane 4469 so: le == NOT(gt OR isNan) 4470 */ 4471 HReg isNanLR = newVRegV(env); 4472 HReg isNanL = isNan(env, argL); 4473 HReg isNanR = isNan(env, argR); 4474 addInstr(env, PPCInstr_AvBinary(Pav_OR, isNanLR, 4475 isNanL, isNanR)); 4476 4477 addInstr(env, PPCInstr_AvBin32Fx4(Pavfp_CMPGTF, dst, 4478 argL, argR)); 4479 addInstr(env, PPCInstr_AvBinary(Pav_OR, dst, dst, isNanLR)); 4480 addInstr(env, PPCInstr_AvUnary(Pav_NOT, dst, dst)); 4481 return dst; 4482 } 4483 4484 case Iop_AndV128: op = Pav_AND; goto do_AvBin; 4485 case Iop_OrV128: op = Pav_OR; goto do_AvBin; 4486 case Iop_XorV128: op = Pav_XOR; goto do_AvBin; 4487 do_AvBin: { 4488 HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1); 4489 HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2); 4490 HReg dst = newVRegV(env); 4491 addInstr(env, PPCInstr_AvBinary(op, dst, arg1, arg2)); 4492 return dst; 4493 } 4494 4495 case Iop_Shl8x16: op = Pav_SHL; goto do_AvBin8x16; 4496 case Iop_Shr8x16: op = Pav_SHR; goto do_AvBin8x16; 4497 case Iop_Sar8x16: op = Pav_SAR; goto do_AvBin8x16; 4498 case Iop_Rol8x16: op = Pav_ROTL; goto do_AvBin8x16; 4499 case Iop_InterleaveHI8x16: op = Pav_MRGHI; goto do_AvBin8x16; 4500 case Iop_InterleaveLO8x16: op = Pav_MRGLO; goto do_AvBin8x16; 4501 case Iop_Add8x16: op = Pav_ADDU; goto do_AvBin8x16; 4502 case Iop_QAdd8Ux16: op = Pav_QADDU; goto do_AvBin8x16; 4503 case Iop_QAdd8Sx16: op = Pav_QADDS; goto do_AvBin8x16; 4504 case Iop_Sub8x16: op = Pav_SUBU; goto do_AvBin8x16; 4505 case Iop_QSub8Ux16: op = Pav_QSUBU; goto do_AvBin8x16; 4506 case Iop_QSub8Sx16: op = Pav_QSUBS; goto do_AvBin8x16; 4507 case Iop_Avg8Ux16: op = Pav_AVGU; goto do_AvBin8x16; 4508 case Iop_Avg8Sx16: op = Pav_AVGS; goto do_AvBin8x16; 4509 case Iop_Max8Ux16: op = Pav_MAXU; goto do_AvBin8x16; 4510 case Iop_Max8Sx16: op = Pav_MAXS; goto do_AvBin8x16; 4511 case Iop_Min8Ux16: op = Pav_MINU; goto do_AvBin8x16; 4512 case Iop_Min8Sx16: op = Pav_MINS; goto do_AvBin8x16; 4513 case Iop_MullEven8Ux16: op = Pav_OMULU; goto do_AvBin8x16; 4514 case Iop_MullEven8Sx16: op = Pav_OMULS; goto do_AvBin8x16; 4515 case Iop_CmpEQ8x16: op = Pav_CMPEQU; goto do_AvBin8x16; 4516 case Iop_CmpGT8Ux16: op = Pav_CMPGTU; goto do_AvBin8x16; 4517 case Iop_CmpGT8Sx16: op = Pav_CMPGTS; goto do_AvBin8x16; 4518 do_AvBin8x16: { 4519 HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1); 4520 HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2); 4521 HReg dst = newVRegV(env); 4522 addInstr(env, PPCInstr_AvBin8x16(op, dst, arg1, arg2)); 4523 return dst; 4524 } 4525 4526 case Iop_Shl16x8: op = Pav_SHL; goto do_AvBin16x8; 4527 case Iop_Shr16x8: op = Pav_SHR; goto do_AvBin16x8; 4528 case Iop_Sar16x8: op = Pav_SAR; goto do_AvBin16x8; 4529 case Iop_Rol16x8: op = Pav_ROTL; goto do_AvBin16x8; 4530 case Iop_NarrowBin16to8x16: op = Pav_PACKUU; goto do_AvBin16x8; 4531 case Iop_QNarrowBin16Uto8Ux16: op = Pav_QPACKUU; goto do_AvBin16x8; 4532 case Iop_QNarrowBin16Sto8Sx16: op = Pav_QPACKSS; goto do_AvBin16x8; 4533 case Iop_InterleaveHI16x8: op = Pav_MRGHI; goto do_AvBin16x8; 4534 case Iop_InterleaveLO16x8: op = Pav_MRGLO; goto do_AvBin16x8; 4535 case Iop_Add16x8: op = Pav_ADDU; goto do_AvBin16x8; 4536 case Iop_QAdd16Ux8: op = Pav_QADDU; goto do_AvBin16x8; 4537 case Iop_QAdd16Sx8: op = Pav_QADDS; goto do_AvBin16x8; 4538 case Iop_Sub16x8: op = Pav_SUBU; goto do_AvBin16x8; 4539 case Iop_QSub16Ux8: op = Pav_QSUBU; goto do_AvBin16x8; 4540 case Iop_QSub16Sx8: op = Pav_QSUBS; goto do_AvBin16x8; 4541 case Iop_Avg16Ux8: op = Pav_AVGU; goto do_AvBin16x8; 4542 case Iop_Avg16Sx8: op = Pav_AVGS; goto do_AvBin16x8; 4543 case Iop_Max16Ux8: op = Pav_MAXU; goto do_AvBin16x8; 4544 case Iop_Max16Sx8: op = Pav_MAXS; goto do_AvBin16x8; 4545 case Iop_Min16Ux8: op = Pav_MINU; goto do_AvBin16x8; 4546 case Iop_Min16Sx8: op = Pav_MINS; goto do_AvBin16x8; 4547 case Iop_MullEven16Ux8: op = Pav_OMULU; goto do_AvBin16x8; 4548 case Iop_MullEven16Sx8: op = Pav_OMULS; goto do_AvBin16x8; 4549 case Iop_CmpEQ16x8: op = Pav_CMPEQU; goto do_AvBin16x8; 4550 case Iop_CmpGT16Ux8: op = Pav_CMPGTU; goto do_AvBin16x8; 4551 case Iop_CmpGT16Sx8: op = Pav_CMPGTS; goto do_AvBin16x8; 4552 do_AvBin16x8: { 4553 HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1); 4554 HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2); 4555 HReg dst = newVRegV(env); 4556 addInstr(env, PPCInstr_AvBin16x8(op, dst, arg1, arg2)); 4557 return dst; 4558 } 4559 4560 case Iop_Shl32x4: op = Pav_SHL; goto do_AvBin32x4; 4561 case Iop_Shr32x4: op = Pav_SHR; goto do_AvBin32x4; 4562 case Iop_Sar32x4: op = Pav_SAR; goto do_AvBin32x4; 4563 case Iop_Rol32x4: op = Pav_ROTL; goto do_AvBin32x4; 4564 case Iop_NarrowBin32to16x8: op = Pav_PACKUU; goto do_AvBin32x4; 4565 case Iop_QNarrowBin32Uto16Ux8: op = Pav_QPACKUU; goto do_AvBin32x4; 4566 case Iop_QNarrowBin32Sto16Sx8: op = Pav_QPACKSS; goto do_AvBin32x4; 4567 case Iop_InterleaveHI32x4: op = Pav_MRGHI; goto do_AvBin32x4; 4568 case Iop_InterleaveLO32x4: op = Pav_MRGLO; goto do_AvBin32x4; 4569 case Iop_Add32x4: op = Pav_ADDU; goto do_AvBin32x4; 4570 case Iop_QAdd32Ux4: op = Pav_QADDU; goto do_AvBin32x4; 4571 case Iop_QAdd32Sx4: op = Pav_QADDS; goto do_AvBin32x4; 4572 case Iop_Sub32x4: op = Pav_SUBU; goto do_AvBin32x4; 4573 case Iop_QSub32Ux4: op = Pav_QSUBU; goto do_AvBin32x4; 4574 case Iop_QSub32Sx4: op = Pav_QSUBS; goto do_AvBin32x4; 4575 case Iop_Avg32Ux4: op = Pav_AVGU; goto do_AvBin32x4; 4576 case Iop_Avg32Sx4: op = Pav_AVGS; goto do_AvBin32x4; 4577 case Iop_Max32Ux4: op = Pav_MAXU; goto do_AvBin32x4; 4578 case Iop_Max32Sx4: op = Pav_MAXS; goto do_AvBin32x4; 4579 case Iop_Min32Ux4: op = Pav_MINU; goto do_AvBin32x4; 4580 case Iop_Min32Sx4: op = Pav_MINS; goto do_AvBin32x4; 4581 case Iop_CmpEQ32x4: op = Pav_CMPEQU; goto do_AvBin32x4; 4582 case Iop_CmpGT32Ux4: op = Pav_CMPGTU; goto do_AvBin32x4; 4583 case Iop_CmpGT32Sx4: op = Pav_CMPGTS; goto do_AvBin32x4; 4584 do_AvBin32x4: { 4585 HReg arg1 = iselVecExpr(env, e->Iex.Binop.arg1); 4586 HReg arg2 = iselVecExpr(env, e->Iex.Binop.arg2); 4587 HReg dst = newVRegV(env); 4588 addInstr(env, PPCInstr_AvBin32x4(op, dst, arg1, arg2)); 4589 return dst; 4590 } 4591 4592 case Iop_ShlN8x16: op = Pav_SHL; goto do_AvShift8x16; 4593 case Iop_SarN8x16: op = Pav_SAR; goto do_AvShift8x16; 4594 do_AvShift8x16: { 4595 HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1); 4596 HReg dst = newVRegV(env); 4597 HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2); 4598 addInstr(env, PPCInstr_AvBin8x16(op, dst, r_src, v_shft)); 4599 return dst; 4600 } 4601 4602 case Iop_ShlN16x8: op = Pav_SHL; goto do_AvShift16x8; 4603 case Iop_ShrN16x8: op = Pav_SHR; goto do_AvShift16x8; 4604 case Iop_SarN16x8: op = Pav_SAR; goto do_AvShift16x8; 4605 do_AvShift16x8: { 4606 HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1); 4607 HReg dst = newVRegV(env); 4608 HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2); 4609 addInstr(env, PPCInstr_AvBin16x8(op, dst, r_src, v_shft)); 4610 return dst; 4611 } 4612 4613 case Iop_ShlN32x4: op = Pav_SHL; goto do_AvShift32x4; 4614 case Iop_ShrN32x4: op = Pav_SHR; goto do_AvShift32x4; 4615 case Iop_SarN32x4: op = Pav_SAR; goto do_AvShift32x4; 4616 do_AvShift32x4: { 4617 HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1); 4618 HReg dst = newVRegV(env); 4619 HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2); 4620 addInstr(env, PPCInstr_AvBin32x4(op, dst, r_src, v_shft)); 4621 return dst; 4622 } 4623 4624 case Iop_ShrV128: op = Pav_SHR; goto do_AvShiftV128; 4625 case Iop_ShlV128: op = Pav_SHL; goto do_AvShiftV128; 4626 do_AvShiftV128: { 4627 HReg dst = newVRegV(env); 4628 HReg r_src = iselVecExpr(env, e->Iex.Binop.arg1); 4629 HReg v_shft = mk_AvDuplicateRI(env, e->Iex.Binop.arg2); 4630 /* Note: shift value gets masked by 127 */ 4631 addInstr(env, PPCInstr_AvBinary(op, dst, r_src, v_shft)); 4632 return dst; 4633 } 4634 4635 case Iop_Perm8x16: { 4636 HReg dst = newVRegV(env); 4637 HReg v_src = iselVecExpr(env, e->Iex.Binop.arg1); 4638 HReg v_ctl = iselVecExpr(env, e->Iex.Binop.arg2); 4639 addInstr(env, PPCInstr_AvPerm(dst, v_src, v_src, v_ctl)); 4640 return dst; 4641 } 4642 4643 default: 4644 break; 4645 } /* switch (e->Iex.Binop.op) */ 4646 } /* if (e->tag == Iex_Binop) */ 4647 4648 if (e->tag == Iex_Const ) { 4649 vassert(e->Iex.Const.con->tag == Ico_V128); 4650 if (e->Iex.Const.con->Ico.V128 == 0x0000) { 4651 return generate_zeroes_V128(env); 4652 } 4653 else if (e->Iex.Const.con->Ico.V128 == 0xffff) { 4654 return generate_ones_V128(env); 4655 } 4656 } 4657 4658 vex_printf("iselVecExpr(ppc) (subarch = %s): can't reduce\n", 4659 LibVEX_ppVexHwCaps(mode64 ? VexArchPPC64 : VexArchPPC32, 4660 env->hwcaps)); 4661 ppIRExpr(e); 4662 vpanic("iselVecExpr_wrk(ppc)"); 4663} 4664 4665 4666/*---------------------------------------------------------*/ 4667/*--- ISEL: Statements ---*/ 4668/*---------------------------------------------------------*/ 4669 4670static void iselStmt ( ISelEnv* env, IRStmt* stmt ) 4671{ 4672 Bool mode64 = env->mode64; 4673 if (vex_traceflags & VEX_TRACE_VCODE) { 4674 vex_printf("\n -- "); 4675 ppIRStmt(stmt); 4676 vex_printf("\n"); 4677 } 4678 4679 switch (stmt->tag) { 4680 4681 /* --------- STORE --------- */ 4682 case Ist_Store: { 4683 IRType tya = typeOfIRExpr(env->type_env, stmt->Ist.Store.addr); 4684 IRType tyd = typeOfIRExpr(env->type_env, stmt->Ist.Store.data); 4685 IREndness end = stmt->Ist.Store.end; 4686 4687 if (end != Iend_BE) 4688 goto stmt_fail; 4689 if (!mode64 && (tya != Ity_I32)) 4690 goto stmt_fail; 4691 if (mode64 && (tya != Ity_I64)) 4692 goto stmt_fail; 4693 4694 if (tyd == Ity_I8 || tyd == Ity_I16 || tyd == Ity_I32 || 4695 (mode64 && (tyd == Ity_I64))) { 4696 PPCAMode* am_addr 4697 = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd/*of xfer*/); 4698 HReg r_src = iselWordExpr_R(env, stmt->Ist.Store.data); 4699 addInstr(env, PPCInstr_Store( toUChar(sizeofIRType(tyd)), 4700 am_addr, r_src, mode64 )); 4701 return; 4702 } 4703 if (tyd == Ity_F64) { 4704 PPCAMode* am_addr 4705 = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd/*of xfer*/); 4706 HReg fr_src = iselDblExpr(env, stmt->Ist.Store.data); 4707 addInstr(env, 4708 PPCInstr_FpLdSt(False/*store*/, 8, fr_src, am_addr)); 4709 return; 4710 } 4711 if (tyd == Ity_F32) { 4712 PPCAMode* am_addr 4713 = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd/*of xfer*/); 4714 HReg fr_src = iselFltExpr(env, stmt->Ist.Store.data); 4715 addInstr(env, 4716 PPCInstr_FpLdSt(False/*store*/, 4, fr_src, am_addr)); 4717 return; 4718 } 4719 if (tyd == Ity_V128) { 4720 PPCAMode* am_addr 4721 = iselWordExpr_AMode(env, stmt->Ist.Store.addr, tyd/*of xfer*/); 4722 HReg v_src = iselVecExpr(env, stmt->Ist.Store.data); 4723 addInstr(env, 4724 PPCInstr_AvLdSt(False/*store*/, 16, v_src, am_addr)); 4725 return; 4726 } 4727 if (tyd == Ity_I64 && !mode64) { 4728 /* Just calculate the address in the register. Life is too 4729 short to arse around trying and possibly failing to adjust 4730 the offset in a 'reg+offset' style amode. */ 4731 HReg rHi32, rLo32; 4732 HReg r_addr = iselWordExpr_R(env, stmt->Ist.Store.addr); 4733 iselInt64Expr( &rHi32, &rLo32, env, stmt->Ist.Store.data ); 4734 addInstr(env, PPCInstr_Store( 4/*byte-store*/, 4735 PPCAMode_IR( 0, r_addr ), 4736 rHi32, 4737 False/*32-bit insn please*/) ); 4738 addInstr(env, PPCInstr_Store( 4/*byte-store*/, 4739 PPCAMode_IR( 4, r_addr ), 4740 rLo32, 4741 False/*32-bit insn please*/) ); 4742 return; 4743 } 4744 break; 4745 } 4746 4747 /* --------- PUT --------- */ 4748 case Ist_Put: { 4749 IRType ty = typeOfIRExpr(env->type_env, stmt->Ist.Put.data); 4750 if (ty == Ity_I8 || ty == Ity_I16 || 4751 ty == Ity_I32 || ((ty == Ity_I64) && mode64)) { 4752 HReg r_src = iselWordExpr_R(env, stmt->Ist.Put.data); 4753 PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset, 4754 GuestStatePtr(mode64) ); 4755 addInstr(env, PPCInstr_Store( toUChar(sizeofIRType(ty)), 4756 am_addr, r_src, mode64 )); 4757 return; 4758 } 4759 if (!mode64 && ty == Ity_I64) { 4760 HReg rHi, rLo; 4761 PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset, 4762 GuestStatePtr(mode64) ); 4763 PPCAMode* am_addr4 = advance4(env, am_addr); 4764 iselInt64Expr(&rHi,&rLo, env, stmt->Ist.Put.data); 4765 addInstr(env, PPCInstr_Store( 4, am_addr, rHi, mode64 )); 4766 addInstr(env, PPCInstr_Store( 4, am_addr4, rLo, mode64 )); 4767 return; 4768 } 4769 if (ty == Ity_V128) { 4770 /* Guest state vectors are 16byte aligned, 4771 so don't need to worry here */ 4772 HReg v_src = iselVecExpr(env, stmt->Ist.Put.data); 4773 PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset, 4774 GuestStatePtr(mode64) ); 4775 addInstr(env, 4776 PPCInstr_AvLdSt(False/*store*/, 16, v_src, am_addr)); 4777 return; 4778 } 4779 if (ty == Ity_F64) { 4780 HReg fr_src = iselDblExpr(env, stmt->Ist.Put.data); 4781 PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset, 4782 GuestStatePtr(mode64) ); 4783 addInstr(env, PPCInstr_FpLdSt( False/*store*/, 8, 4784 fr_src, am_addr )); 4785 return; 4786 } 4787 if (ty == Ity_D64) { 4788 HReg fr_src = iselDfp64Expr( env, stmt->Ist.Put.data ); 4789 PPCAMode* am_addr = PPCAMode_IR( stmt->Ist.Put.offset, 4790 GuestStatePtr(mode64) ); 4791 addInstr( env, PPCInstr_FpLdSt( False/*store*/, 8, fr_src, am_addr ) ); 4792 return; 4793 } 4794 break; 4795 } 4796 4797 /* --------- Indexed PUT --------- */ 4798 case Ist_PutI: { 4799 IRPutI *puti = stmt->Ist.PutI.details; 4800 4801 PPCAMode* dst_am 4802 = genGuestArrayOffset( 4803 env, puti->descr, 4804 puti->ix, puti->bias ); 4805 IRType ty = typeOfIRExpr(env->type_env, puti->data); 4806 if (mode64 && ty == Ity_I64) { 4807 HReg r_src = iselWordExpr_R(env, puti->data); 4808 addInstr(env, PPCInstr_Store( toUChar(8), 4809 dst_am, r_src, mode64 )); 4810 return; 4811 } 4812 if ((!mode64) && ty == Ity_I32) { 4813 HReg r_src = iselWordExpr_R(env, puti->data); 4814 addInstr(env, PPCInstr_Store( toUChar(4), 4815 dst_am, r_src, mode64 )); 4816 return; 4817 } 4818 break; 4819 } 4820 4821 /* --------- TMP --------- */ 4822 case Ist_WrTmp: { 4823 IRTemp tmp = stmt->Ist.WrTmp.tmp; 4824 IRType ty = typeOfIRTemp(env->type_env, tmp); 4825 if (ty == Ity_I8 || ty == Ity_I16 || 4826 ty == Ity_I32 || ((ty == Ity_I64) && mode64)) { 4827 HReg r_dst = lookupIRTemp(env, tmp); 4828 HReg r_src = iselWordExpr_R(env, stmt->Ist.WrTmp.data); 4829 addInstr(env, mk_iMOVds_RR( r_dst, r_src )); 4830 return; 4831 } 4832 if (!mode64 && ty == Ity_I64) { 4833 HReg r_srcHi, r_srcLo, r_dstHi, r_dstLo; 4834 4835 iselInt64Expr(&r_srcHi,&r_srcLo, env, stmt->Ist.WrTmp.data); 4836 lookupIRTempPair( &r_dstHi, &r_dstLo, env, tmp); 4837 addInstr(env, mk_iMOVds_RR(r_dstHi, r_srcHi) ); 4838 addInstr(env, mk_iMOVds_RR(r_dstLo, r_srcLo) ); 4839 return; 4840 } 4841 if (mode64 && ty == Ity_I128) { 4842 HReg r_srcHi, r_srcLo, r_dstHi, r_dstLo; 4843 iselInt128Expr(&r_srcHi,&r_srcLo, env, stmt->Ist.WrTmp.data); 4844 lookupIRTempPair( &r_dstHi, &r_dstLo, env, tmp); 4845 addInstr(env, mk_iMOVds_RR(r_dstHi, r_srcHi) ); 4846 addInstr(env, mk_iMOVds_RR(r_dstLo, r_srcLo) ); 4847 return; 4848 } 4849 if (!mode64 && ty == Ity_I128) { 4850 HReg r_srcHi, r_srcMedHi, r_srcMedLo, r_srcLo; 4851 HReg r_dstHi, r_dstMedHi, r_dstMedLo, r_dstLo; 4852 4853 iselInt128Expr_to_32x4(&r_srcHi, &r_srcMedHi, 4854 &r_srcMedLo, &r_srcLo, 4855 env, stmt->Ist.WrTmp.data); 4856 4857 lookupIRTempQuad( &r_dstHi, &r_dstMedHi, &r_dstMedLo, 4858 &r_dstLo, env, tmp); 4859 4860 addInstr(env, mk_iMOVds_RR(r_dstHi, r_srcHi) ); 4861 addInstr(env, mk_iMOVds_RR(r_dstMedHi, r_srcMedHi) ); 4862 addInstr(env, mk_iMOVds_RR(r_dstMedLo, r_srcMedLo) ); 4863 addInstr(env, mk_iMOVds_RR(r_dstLo, r_srcLo) ); 4864 return; 4865 } 4866 if (ty == Ity_I1) { 4867 PPCCondCode cond = iselCondCode(env, stmt->Ist.WrTmp.data); 4868 HReg r_dst = lookupIRTemp(env, tmp); 4869 addInstr(env, PPCInstr_Set(cond, r_dst)); 4870 return; 4871 } 4872 if (ty == Ity_F64) { 4873 HReg fr_dst = lookupIRTemp(env, tmp); 4874 HReg fr_src = iselDblExpr(env, stmt->Ist.WrTmp.data); 4875 addInstr(env, PPCInstr_FpUnary(Pfp_MOV, fr_dst, fr_src)); 4876 return; 4877 } 4878 if (ty == Ity_F32) { 4879 HReg fr_dst = lookupIRTemp(env, tmp); 4880 HReg fr_src = iselFltExpr(env, stmt->Ist.WrTmp.data); 4881 addInstr(env, PPCInstr_FpUnary(Pfp_MOV, fr_dst, fr_src)); 4882 return; 4883 } 4884 if (ty == Ity_V128) { 4885 HReg v_dst = lookupIRTemp(env, tmp); 4886 HReg v_src = iselVecExpr(env, stmt->Ist.WrTmp.data); 4887 addInstr(env, PPCInstr_AvUnary(Pav_MOV, v_dst, v_src)); 4888 return; 4889 } 4890 if (ty == Ity_D64) { 4891 HReg fr_dst = lookupIRTemp( env, tmp ); 4892 HReg fr_src = iselDfp64Expr( env, stmt->Ist.WrTmp.data ); 4893 addInstr( env, PPCInstr_Dfp64Unary( Pfp_MOV, fr_dst, fr_src ) ); 4894 return; 4895 } 4896 if (ty == Ity_D128) { 4897 HReg fr_srcHi, fr_srcLo, fr_dstHi, fr_dstLo; 4898 // lookupDfp128IRTempPair( &fr_dstHi, &fr_dstLo, env, tmp ); 4899 lookupIRTempPair( &fr_dstHi, &fr_dstLo, env, tmp ); 4900 iselDfp128Expr( &fr_srcHi, &fr_srcLo, env, stmt->Ist.WrTmp.data ); 4901 addInstr( env, PPCInstr_Dfp64Unary( Pfp_MOV, fr_dstHi, fr_srcHi ) ); 4902 addInstr( env, PPCInstr_Dfp64Unary( Pfp_MOV, fr_dstLo, fr_srcLo ) ); 4903 return; 4904 } 4905 break; 4906 } 4907 4908 /* --------- Load Linked or Store Conditional --------- */ 4909 case Ist_LLSC: { 4910 IRTemp res = stmt->Ist.LLSC.result; 4911 IRType tyRes = typeOfIRTemp(env->type_env, res); 4912 IRType tyAddr = typeOfIRExpr(env->type_env, stmt->Ist.LLSC.addr); 4913 4914 if (stmt->Ist.LLSC.end != Iend_BE) 4915 goto stmt_fail; 4916 if (!mode64 && (tyAddr != Ity_I32)) 4917 goto stmt_fail; 4918 if (mode64 && (tyAddr != Ity_I64)) 4919 goto stmt_fail; 4920 4921 if (stmt->Ist.LLSC.storedata == NULL) { 4922 /* LL */ 4923 HReg r_addr = iselWordExpr_R( env, stmt->Ist.LLSC.addr ); 4924 HReg r_dst = lookupIRTemp(env, res); 4925 if (tyRes == Ity_I32) { 4926 addInstr(env, PPCInstr_LoadL( 4, r_dst, r_addr, mode64 )); 4927 return; 4928 } 4929 if (tyRes == Ity_I64 && mode64) { 4930 addInstr(env, PPCInstr_LoadL( 8, r_dst, r_addr, mode64 )); 4931 return; 4932 } 4933 /* fallthru */; 4934 } else { 4935 /* SC */ 4936 HReg r_res = lookupIRTemp(env, res); /* :: Ity_I1 */ 4937 HReg r_a = iselWordExpr_R(env, stmt->Ist.LLSC.addr); 4938 HReg r_src = iselWordExpr_R(env, stmt->Ist.LLSC.storedata); 4939 HReg r_tmp = newVRegI(env); 4940 IRType tyData = typeOfIRExpr(env->type_env, 4941 stmt->Ist.LLSC.storedata); 4942 vassert(tyRes == Ity_I1); 4943 if (tyData == Ity_I32 || (tyData == Ity_I64 && mode64)) { 4944 addInstr(env, PPCInstr_StoreC( tyData==Ity_I32 ? 4 : 8, 4945 r_a, r_src, mode64 )); 4946 addInstr(env, PPCInstr_MfCR( r_tmp )); 4947 addInstr(env, PPCInstr_Shft( 4948 Pshft_SHR, 4949 env->mode64 ? False : True 4950 /*F:64-bit, T:32-bit shift*/, 4951 r_tmp, r_tmp, 4952 PPCRH_Imm(False/*unsigned*/, 29))); 4953 /* Probably unnecessary, since the IR dest type is Ity_I1, 4954 and so we are entitled to leave whatever junk we like 4955 drifting round in the upper 31 or 63 bits of r_res. 4956 However, for the sake of conservativeness .. */ 4957 addInstr(env, PPCInstr_Alu( 4958 Palu_AND, 4959 r_res, r_tmp, 4960 PPCRH_Imm(False/*signed*/, 1))); 4961 return; 4962 } 4963 /* fallthru */ 4964 } 4965 goto stmt_fail; 4966 /*NOTREACHED*/ 4967 } 4968 4969 /* --------- Call to DIRTY helper --------- */ 4970 case Ist_Dirty: { 4971 IRType retty; 4972 IRDirty* d = stmt->Ist.Dirty.details; 4973 Bool passBBP = False; 4974 4975 if (d->nFxState == 0) 4976 vassert(!d->needsBBP); 4977 passBBP = toBool(d->nFxState > 0 && d->needsBBP); 4978 4979 /* Marshal args, do the call, clear stack. */ 4980 doHelperCall( env, passBBP, d->guard, d->cee, d->args ); 4981 4982 /* Now figure out what to do with the returned value, if any. */ 4983 if (d->tmp == IRTemp_INVALID) 4984 /* No return value. Nothing to do. */ 4985 return; 4986 4987 retty = typeOfIRTemp(env->type_env, d->tmp); 4988 if (!mode64 && retty == Ity_I64) { 4989 HReg r_dstHi, r_dstLo; 4990 /* The returned value is in %r3:%r4. Park it in the 4991 register-pair associated with tmp. */ 4992 lookupIRTempPair( &r_dstHi, &r_dstLo, env, d->tmp); 4993 addInstr(env, mk_iMOVds_RR(r_dstHi, hregPPC_GPR3(mode64))); 4994 addInstr(env, mk_iMOVds_RR(r_dstLo, hregPPC_GPR4(mode64))); 4995 return; 4996 } 4997 if (retty == Ity_I8 || retty == Ity_I16 || 4998 retty == Ity_I32 || ((retty == Ity_I64) && mode64)) { 4999 /* The returned value is in %r3. Park it in the register 5000 associated with tmp. */ 5001 HReg r_dst = lookupIRTemp(env, d->tmp); 5002 addInstr(env, mk_iMOVds_RR(r_dst, hregPPC_GPR3(mode64))); 5003 return; 5004 } 5005 break; 5006 } 5007 5008 /* --------- MEM FENCE --------- */ 5009 case Ist_MBE: 5010 switch (stmt->Ist.MBE.event) { 5011 case Imbe_Fence: 5012 addInstr(env, PPCInstr_MFence()); 5013 return; 5014 default: 5015 break; 5016 } 5017 break; 5018 5019 /* --------- INSTR MARK --------- */ 5020 /* Doesn't generate any executable code ... */ 5021 case Ist_IMark: 5022 return; 5023 5024 /* --------- ABI HINT --------- */ 5025 /* These have no meaning (denotation in the IR) and so we ignore 5026 them ... if any actually made it this far. */ 5027 case Ist_AbiHint: 5028 return; 5029 5030 /* --------- NO-OP --------- */ 5031 /* Fairly self-explanatory, wouldn't you say? */ 5032 case Ist_NoOp: 5033 return; 5034 5035 /* --------- EXIT --------- */ 5036 case Ist_Exit: { 5037 IRConst* dst = stmt->Ist.Exit.dst; 5038 if (!mode64 && dst->tag != Ico_U32) 5039 vpanic("iselStmt(ppc): Ist_Exit: dst is not a 32-bit value"); 5040 if (mode64 && dst->tag != Ico_U64) 5041 vpanic("iselStmt(ppc64): Ist_Exit: dst is not a 64-bit value"); 5042 5043 PPCCondCode cc = iselCondCode(env, stmt->Ist.Exit.guard); 5044 PPCAMode* amCIA = PPCAMode_IR(stmt->Ist.Exit.offsIP, 5045 hregPPC_GPR31(mode64)); 5046 5047 /* Case: boring transfer to known address */ 5048 if (stmt->Ist.Exit.jk == Ijk_Boring 5049 || stmt->Ist.Exit.jk == Ijk_Call 5050 /* || stmt->Ist.Exit.jk == Ijk_Ret */) { 5051 if (env->chainingAllowed) { 5052 /* .. almost always true .. */ 5053 /* Skip the event check at the dst if this is a forwards 5054 edge. */ 5055 Bool toFastEP 5056 = mode64 5057 ? (((Addr64)stmt->Ist.Exit.dst->Ico.U64) > (Addr64)env->max_ga) 5058 : (((Addr32)stmt->Ist.Exit.dst->Ico.U32) > (Addr32)env->max_ga); 5059 if (0) vex_printf("%s", toFastEP ? "Y" : ","); 5060 addInstr(env, PPCInstr_XDirect( 5061 mode64 ? (Addr64)stmt->Ist.Exit.dst->Ico.U64 5062 : (Addr64)stmt->Ist.Exit.dst->Ico.U32, 5063 amCIA, cc, toFastEP)); 5064 } else { 5065 /* .. very occasionally .. */ 5066 /* We can't use chaining, so ask for an assisted transfer, 5067 as that's the only alternative that is allowable. */ 5068 HReg r = iselWordExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst)); 5069 addInstr(env, PPCInstr_XAssisted(r, amCIA, cc, Ijk_Boring)); 5070 } 5071 return; 5072 } 5073 5074 /* Case: assisted transfer to arbitrary address */ 5075 switch (stmt->Ist.Exit.jk) { 5076 /* Keep this list in sync with that in iselNext below */ 5077 case Ijk_ClientReq: 5078 case Ijk_EmFail: 5079 case Ijk_EmWarn: 5080 case Ijk_NoDecode: 5081 case Ijk_NoRedir: 5082 case Ijk_SigBUS: 5083 case Ijk_SigTRAP: 5084 case Ijk_Sys_syscall: 5085 case Ijk_TInval: 5086 { 5087 HReg r = iselWordExpr_R(env, IRExpr_Const(stmt->Ist.Exit.dst)); 5088 addInstr(env, PPCInstr_XAssisted(r, amCIA, cc, 5089 stmt->Ist.Exit.jk)); 5090 return; 5091 } 5092 default: 5093 break; 5094 } 5095 5096 /* Do we ever expect to see any other kind? */ 5097 goto stmt_fail; 5098 } 5099 5100 default: break; 5101 } 5102 stmt_fail: 5103 ppIRStmt(stmt); 5104 vpanic("iselStmt(ppc)"); 5105} 5106 5107 5108/*---------------------------------------------------------*/ 5109/*--- ISEL: Basic block terminators (Nexts) ---*/ 5110/*---------------------------------------------------------*/ 5111 5112static void iselNext ( ISelEnv* env, 5113 IRExpr* next, IRJumpKind jk, Int offsIP ) 5114{ 5115 if (vex_traceflags & VEX_TRACE_VCODE) { 5116 vex_printf( "\n-- PUT(%d) = ", offsIP); 5117 ppIRExpr( next ); 5118 vex_printf( "; exit-"); 5119 ppIRJumpKind(jk); 5120 vex_printf( "\n"); 5121 } 5122 5123 PPCCondCode always = mk_PPCCondCode( Pct_ALWAYS, Pcf_NONE ); 5124 5125 /* Case: boring transfer to known address */ 5126 if (next->tag == Iex_Const) { 5127 IRConst* cdst = next->Iex.Const.con; 5128 vassert(cdst->tag == (env->mode64 ? Ico_U64 :Ico_U32)); 5129 if (jk == Ijk_Boring || jk == Ijk_Call) { 5130 /* Boring transfer to known address */ 5131 PPCAMode* amCIA = PPCAMode_IR(offsIP, hregPPC_GPR31(env->mode64)); 5132 if (env->chainingAllowed) { 5133 /* .. almost always true .. */ 5134 /* Skip the event check at the dst if this is a forwards 5135 edge. */ 5136 Bool toFastEP 5137 = env->mode64 5138 ? (((Addr64)cdst->Ico.U64) > (Addr64)env->max_ga) 5139 : (((Addr32)cdst->Ico.U32) > (Addr32)env->max_ga); 5140 if (0) vex_printf("%s", toFastEP ? "X" : "."); 5141 addInstr(env, PPCInstr_XDirect( 5142 env->mode64 ? (Addr64)cdst->Ico.U64 5143 : (Addr64)cdst->Ico.U32, 5144 amCIA, always, toFastEP)); 5145 } else { 5146 /* .. very occasionally .. */ 5147 /* We can't use chaining, so ask for an assisted transfer, 5148 as that's the only alternative that is allowable. */ 5149 HReg r = iselWordExpr_R(env, next); 5150 addInstr(env, PPCInstr_XAssisted(r, amCIA, always, 5151 Ijk_Boring)); 5152 } 5153 return; 5154 } 5155 } 5156 5157 /* Case: call/return (==boring) transfer to any address */ 5158 switch (jk) { 5159 case Ijk_Boring: case Ijk_Ret: case Ijk_Call: { 5160 HReg r = iselWordExpr_R(env, next); 5161 PPCAMode* amCIA = PPCAMode_IR(offsIP, hregPPC_GPR31(env->mode64)); 5162 if (env->chainingAllowed) { 5163 addInstr(env, PPCInstr_XIndir(r, amCIA, always)); 5164 } else { 5165 addInstr(env, PPCInstr_XAssisted(r, amCIA, always, 5166 Ijk_Boring)); 5167 } 5168 return; 5169 } 5170 default: 5171 break; 5172 } 5173 5174 /* Case: assisted transfer to arbitrary address */ 5175 switch (jk) { 5176 /* Keep this list in sync with that for Ist_Exit above */ 5177 case Ijk_ClientReq: 5178 case Ijk_EmFail: 5179 case Ijk_EmWarn: 5180 case Ijk_NoDecode: 5181 case Ijk_NoRedir: 5182 case Ijk_SigBUS: 5183 case Ijk_SigTRAP: 5184 case Ijk_Sys_syscall: 5185 case Ijk_TInval: 5186 { 5187 HReg r = iselWordExpr_R(env, next); 5188 PPCAMode* amCIA = PPCAMode_IR(offsIP, hregPPC_GPR31(env->mode64)); 5189 addInstr(env, PPCInstr_XAssisted(r, amCIA, always, jk)); 5190 return; 5191 } 5192 default: 5193 break; 5194 } 5195 5196 vex_printf( "\n-- PUT(%d) = ", offsIP); 5197 ppIRExpr( next ); 5198 vex_printf( "; exit-"); 5199 ppIRJumpKind(jk); 5200 vex_printf( "\n"); 5201 vassert(0); // are we expecting any other kind? 5202} 5203 5204 5205/*---------------------------------------------------------*/ 5206/*--- Insn selector top-level ---*/ 5207/*---------------------------------------------------------*/ 5208 5209/* Translate an entire SB to ppc code. */ 5210HInstrArray* iselSB_PPC ( IRSB* bb, 5211 VexArch arch_host, 5212 VexArchInfo* archinfo_host, 5213 VexAbiInfo* vbi, 5214 Int offs_Host_EvC_Counter, 5215 Int offs_Host_EvC_FailAddr, 5216 Bool chainingAllowed, 5217 Bool addProfInc, 5218 Addr64 max_ga ) 5219{ 5220 Int i, j; 5221 HReg hregLo, hregMedLo, hregMedHi, hregHi; 5222 ISelEnv* env; 5223 UInt hwcaps_host = archinfo_host->hwcaps; 5224 Bool mode64 = False; 5225 UInt mask32, mask64; 5226 PPCAMode *amCounter, *amFailAddr; 5227 5228 5229 vassert(arch_host == VexArchPPC32 || arch_host == VexArchPPC64); 5230 mode64 = arch_host == VexArchPPC64; 5231 if (!mode64) vassert(max_ga <= 0xFFFFFFFFULL); 5232 5233 /* do some sanity checks */ 5234 mask32 = VEX_HWCAPS_PPC32_F | VEX_HWCAPS_PPC32_V 5235 | VEX_HWCAPS_PPC32_FX | VEX_HWCAPS_PPC32_GX | VEX_HWCAPS_PPC32_VX 5236 | VEX_HWCAPS_PPC32_DFP; 5237 5238 mask64 = VEX_HWCAPS_PPC64_V | VEX_HWCAPS_PPC64_FX 5239 | VEX_HWCAPS_PPC64_GX | VEX_HWCAPS_PPC64_VX | VEX_HWCAPS_PPC64_DFP; 5240 5241 if (mode64) { 5242 vassert((hwcaps_host & mask32) == 0); 5243 } else { 5244 vassert((hwcaps_host & mask64) == 0); 5245 } 5246 5247 /* Make up an initial environment to use. */ 5248 env = LibVEX_Alloc(sizeof(ISelEnv)); 5249 env->vreg_ctr = 0; 5250 5251 /* Are we being ppc32 or ppc64? */ 5252 env->mode64 = mode64; 5253 5254 /* Set up output code array. */ 5255 env->code = newHInstrArray(); 5256 5257 /* Copy BB's type env. */ 5258 env->type_env = bb->tyenv; 5259 5260 /* Make up an IRTemp -> virtual HReg mapping. This doesn't 5261 * change as we go along. 5262 * 5263 * vregmap2 and vregmap3 are only used in 32 bit mode 5264 * for supporting I128 in 32-bit mode 5265 */ 5266 env->n_vregmap = bb->tyenv->types_used; 5267 env->vregmapLo = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); 5268 env->vregmapMedLo = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); 5269 if (mode64) { 5270 env->vregmapMedHi = NULL; 5271 env->vregmapHi = NULL; 5272 } else { 5273 env->vregmapMedHi = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); 5274 env->vregmapHi = LibVEX_Alloc(env->n_vregmap * sizeof(HReg)); 5275 } 5276 5277 /* and finally ... */ 5278 env->chainingAllowed = chainingAllowed; 5279 env->max_ga = max_ga; 5280 env->hwcaps = hwcaps_host; 5281 env->previous_rm = NULL; 5282 env->vbi = vbi; 5283 5284 /* For each IR temporary, allocate a suitably-kinded virtual 5285 register. */ 5286 j = 0; 5287 for (i = 0; i < env->n_vregmap; i++) { 5288 hregLo = hregMedLo = hregMedHi = hregHi = INVALID_HREG; 5289 switch (bb->tyenv->types[i]) { 5290 case Ity_I1: 5291 case Ity_I8: 5292 case Ity_I16: 5293 case Ity_I32: 5294 if (mode64) { hregLo = mkHReg(j++, HRcInt64, True); break; 5295 } else { hregLo = mkHReg(j++, HRcInt32, True); break; 5296 } 5297 case Ity_I64: 5298 if (mode64) { hregLo = mkHReg(j++, HRcInt64, True); break; 5299 } else { hregLo = mkHReg(j++, HRcInt32, True); 5300 hregMedLo = mkHReg(j++, HRcInt32, True); break; 5301 } 5302 case Ity_I128: 5303 if (mode64) { hregLo = mkHReg(j++, HRcInt64, True); 5304 hregMedLo = mkHReg(j++, HRcInt64, True); break; 5305 } else { hregLo = mkHReg(j++, HRcInt32, True); 5306 hregMedLo = mkHReg(j++, HRcInt32, True); 5307 hregMedHi = mkHReg(j++, HRcInt32, True); 5308 hregHi = mkHReg(j++, HRcInt32, True); break; 5309 } 5310 case Ity_F32: 5311 case Ity_F64: hregLo = mkHReg(j++, HRcFlt64, True); break; 5312 case Ity_V128: hregLo = mkHReg(j++, HRcVec128, True); break; 5313 case Ity_D64: hregLo = mkHReg(j++, HRcFlt64, True); break; 5314 case Ity_D128: hregLo = mkHReg(j++, HRcFlt64, True); 5315 hregMedLo = mkHReg(j++, HRcFlt64, True); break; 5316 default: 5317 ppIRType(bb->tyenv->types[i]); 5318 vpanic("iselBB(ppc): IRTemp type"); 5319 } 5320 env->vregmapLo[i] = hregLo; 5321 env->vregmapMedLo[i] = hregMedLo; 5322 if (!mode64) { 5323 env->vregmapMedHi[i] = hregMedHi; 5324 env->vregmapHi[i] = hregHi; 5325 } 5326 } 5327 env->vreg_ctr = j; 5328 5329 /* The very first instruction must be an event check. */ 5330 amCounter = PPCAMode_IR(offs_Host_EvC_Counter, hregPPC_GPR31(mode64)); 5331 amFailAddr = PPCAMode_IR(offs_Host_EvC_FailAddr, hregPPC_GPR31(mode64)); 5332 addInstr(env, PPCInstr_EvCheck(amCounter, amFailAddr)); 5333 5334 /* Possibly a block counter increment (for profiling). At this 5335 point we don't know the address of the counter, so just pretend 5336 it is zero. It will have to be patched later, but before this 5337 translation is used, by a call to LibVEX_patchProfCtr. */ 5338 if (addProfInc) { 5339 addInstr(env, PPCInstr_ProfInc()); 5340 } 5341 5342 /* Ok, finally we can iterate over the statements. */ 5343 for (i = 0; i < bb->stmts_used; i++) 5344 iselStmt(env, bb->stmts[i]); 5345 5346 iselNext(env, bb->next, bb->jumpkind, bb->offsIP); 5347 5348 /* record the number of vregs we used. */ 5349 env->code->n_vregs = env->vreg_ctr; 5350 return env->code; 5351} 5352 5353 5354/*---------------------------------------------------------------*/ 5355/*--- end host_ppc_isel.c ---*/ 5356/*---------------------------------------------------------------*/ 5357