1 2/*--------------------------------------------------------------------*/ 3/*--- begin guest_tilegx_toIR.c ---*/ 4/*--------------------------------------------------------------------*/ 5 6/* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2010-2013 Tilera Corp. 11 12 This program is free software; you can redistribute it and/or 13 modify it under the terms of the GNU General Public License as 14 published by the Free Software Foundation; either version 2 of the 15 License, or (at your option) any later version. 16 17 This program is distributed in the hope that it will be useful, but 18 WITHOUT ANY WARRANTY; without even the implied warranty of 19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 General Public License for more details. 21 22 You should have received a copy of the GNU General Public License 23 along with this program; if not, write to the Free Software 24 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 25 02111-1307, USA. 26 27 The GNU General Public License is contained in the file COPYING. 28*/ 29 30/* Contributed by Zhi-Gang Liu <zliu at tilera dot com> */ 31 32/* Translates TILEGX code to IR. */ 33 34#include "libvex_basictypes.h" 35#include "libvex_ir.h" 36#include "libvex.h" 37#include "libvex_guest_tilegx.h" 38 39#include "main_util.h" 40#include "main_globals.h" 41#include "guest_generic_bb_to_IR.h" 42#include "guest_tilegx_defs.h" 43#include "tilegx_disasm.h" 44 45/*------------------------------------------------------------*/ 46/*--- Globals ---*/ 47/*------------------------------------------------------------*/ 48 49/* These are set at the start of the translation of a instruction, so 50 that we don't have to pass them around endlessly. CONST means does 51 not change during translation of the instruction. 52*/ 53 54/* CONST: is the host bigendian? This has to do with float vs double 55 register accesses on VFP, but it's complex and not properly thought 56 out. */ 57static VexEndness host_endness; 58 59/* Pointer to the guest code area. */ 60static UChar *guest_code; 61 62/* The guest address corresponding to guest_code[0]. */ 63static Addr64 guest_PC_bbstart; 64 65/* CONST: The guest address for the instruction currently being 66 translated. */ 67static Addr64 guest_PC_curr_instr; 68 69/* MOD: The IRSB* into which we're generating code. */ 70static IRSB *irsb; 71 72/*------------------------------------------------------------*/ 73/*--- Debugging output ---*/ 74/*------------------------------------------------------------*/ 75 76#define DIP(format, args...) \ 77 if (vex_traceflags & VEX_TRACE_FE) \ 78 vex_printf(format, ## args) 79 80/*------------------------------------------------------------*/ 81/*--- Helper bits and pieces for deconstructing the ---*/ 82/*--- tilegx insn stream. ---*/ 83/*------------------------------------------------------------*/ 84 85static Int integerGuestRegOffset ( UInt iregNo ) 86{ 87 return 8 * (iregNo); 88} 89 90/*------------------------------------------------------------*/ 91/*--- Field helpers ---*/ 92/*------------------------------------------------------------*/ 93 94/*------------------------------------------------------------*/ 95/*--- Helper bits and pieces for creating IR fragments. ---*/ 96/*------------------------------------------------------------*/ 97 98static IRExpr *mkU8 ( UInt i ) 99{ 100 return IRExpr_Const(IRConst_U8((UChar) i)); 101} 102 103/* Create an expression node for a 32-bit integer constant */ 104static IRExpr *mkU32 ( UInt i ) 105{ 106 return IRExpr_Const(IRConst_U32(i)); 107} 108 109/* Create an expression node for a 64-bit integer constant */ 110static IRExpr *mkU64 ( ULong i ) 111{ 112 return IRExpr_Const(IRConst_U64(i)); 113} 114 115static IRExpr *mkexpr ( IRTemp tmp ) 116{ 117 return IRExpr_RdTmp(tmp); 118} 119 120static IRExpr *unop ( IROp op, IRExpr * a ) 121{ 122 return IRExpr_Unop(op, a); 123} 124 125static IRExpr *binop ( IROp op, IRExpr * a1, IRExpr * a2 ) 126{ 127 return IRExpr_Binop(op, a1, a2); 128} 129 130static IRExpr *load ( IRType ty, IRExpr * addr ) 131{ 132 IRExpr *load1 = NULL; 133 134 load1 = IRExpr_Load(Iend_LE, ty, addr); 135 return load1; 136} 137 138/* Add a statement to the list held by "irsb". */ 139static void stmt ( IRStmt * st ) 140{ 141 addStmtToIRSB(irsb, st); 142} 143 144#define OFFB_PC offsetof(VexGuestTILEGXState, guest_pc) 145 146static void putPC ( IRExpr * e ) 147{ 148 stmt(IRStmt_Put(OFFB_PC, e)); 149} 150 151static void assign ( IRTemp dst, IRExpr * e ) 152{ 153 stmt(IRStmt_WrTmp(dst, e)); 154} 155 156static void store ( IRExpr * addr, IRExpr * data ) 157{ 158 stmt(IRStmt_Store(Iend_LE, addr, data)); 159} 160 161/* Generate a new temporary of the given type. */ 162static IRTemp newTemp ( IRType ty ) 163{ 164 vassert(isPlausibleIRType(ty)); 165 return newIRTemp(irsb->tyenv, ty); 166} 167 168static ULong extend_s_16to64 ( UInt x ) 169{ 170 return (ULong) ((((Long) x) << 48) >> 48); 171} 172 173static ULong extend_s_8to64 ( UInt x ) 174{ 175 return (ULong) ((((Long) x) << 56) >> 56); 176} 177 178static IRExpr *getIReg ( UInt iregNo ) 179{ 180 IRType ty = Ity_I64; 181 if(!(iregNo < 56 || iregNo == 63 || 182 (iregNo >= 70 && iregNo <= 73))) { 183 vex_printf("iregNo=%d\n", iregNo); 184 vassert(0); 185 } 186 return IRExpr_Get(integerGuestRegOffset(iregNo), ty); 187} 188 189static void putIReg ( UInt archreg, IRExpr * e ) 190{ 191 IRType ty = Ity_I64; 192 if(!(archreg < 56 || archreg == 63 || archreg == 70 || 193 archreg == 72 || archreg == 73)) { 194 vex_printf("archreg=%d\n", archreg); 195 vassert(0); 196 } 197 vassert(typeOfIRExpr(irsb->tyenv, e) == ty); 198 if (archreg != 63) 199 stmt(IRStmt_Put(integerGuestRegOffset(archreg), e)); 200} 201 202/* Narrow 8/16/32 bit int expr to 8/16/32. Clearly only some 203 of these combinations make sense. */ 204static IRExpr *narrowTo ( IRType dst_ty, IRExpr * e ) 205{ 206 IRType src_ty = typeOfIRExpr(irsb->tyenv, e); 207 if (src_ty == dst_ty) 208 return e; 209 if (src_ty == Ity_I32 && dst_ty == Ity_I16) 210 return unop(Iop_32to16, e); 211 if (src_ty == Ity_I32 && dst_ty == Ity_I8) 212 return unop(Iop_32to8, e); 213 214 if (src_ty == Ity_I64 && dst_ty == Ity_I8) { 215 return unop(Iop_64to8, e); 216 } 217 if (src_ty == Ity_I64 && dst_ty == Ity_I16) { 218 return unop(Iop_64to16, e); 219 } 220 if (src_ty == Ity_I64 && dst_ty == Ity_I32) { 221 return unop(Iop_64to32, e); 222 } 223 224 if (vex_traceflags & VEX_TRACE_FE) { 225 vex_printf("\nsrc, dst tys are: "); 226 ppIRType(src_ty); 227 vex_printf(", "); 228 ppIRType(dst_ty); 229 vex_printf("\n"); 230 } 231 vpanic("narrowTo(tilegx)"); 232 return e; 233} 234 235#define signExtend(_e, _n) \ 236 ((_n == 32) ? \ 237 unop(Iop_32Sto64, _e) : \ 238 ((_n == 16) ? \ 239 unop(Iop_16Sto64, _e) : \ 240 (binop(Iop_Sar64, binop(Iop_Shl64, _e, mkU8(63 - (_n))), mkU8(63 - (_n)))))) 241 242static IRStmt* dis_branch ( IRExpr* guard, ULong imm ) 243{ 244 IRTemp t0; 245 246 t0 = newTemp(Ity_I1); 247 assign(t0, guard); 248 return IRStmt_Exit(mkexpr(t0), Ijk_Boring, 249 IRConst_U64(imm), OFFB_PC); 250} 251 252#define MARK_REG_WB(_rd, _td) \ 253 do { \ 254 vassert(rd_wb_index < 6); \ 255 rd_wb_temp[rd_wb_index] = _td; \ 256 rd_wb_reg[rd_wb_index] = _rd; \ 257 rd_wb_index++; \ 258 } while(0) 259 260/*------------------------------------------------------------*/ 261/*--- Disassemble a single instruction ---*/ 262/*------------------------------------------------------------*/ 263 264/* Disassemble a single instruction bundle into IR. The bundle is 265 located in host memory at guest_instr, and has guest IP of 266 guest_PC_curr_instr, which will have been set before the call 267 here. */ 268static DisResult disInstr_TILEGX_WRK ( Bool(*resteerOkFn) (void *, Addr), 269 Bool resteerCisOk, 270 void *callback_opaque, 271 Long delta64, 272 const VexArchInfo * archinfo, 273 const VexAbiInfo * abiinfo, 274 Bool sigill_diag ) 275{ 276 struct tilegx_decoded_instruction 277 decoded[TILEGX_MAX_INSTRUCTIONS_PER_BUNDLE]; 278 ULong cins, opcode = -1, rd, ra, rb, imm = 0; 279 ULong opd[4]; 280 ULong opd_src_map, opd_dst_map, opd_imm_map; 281 Int use_dirty_helper; 282 IRTemp t0, t1, t2, t3, t4; 283 IRTemp tb[4]; 284 IRTemp rd_wb_temp[6]; 285 ULong rd_wb_reg[6]; 286 /* Tilegx is a VLIW processor, we have to commit register write after read.*/ 287 Int rd_wb_index; 288 Int n = 0, nr_insn; 289 DisResult dres; 290 291 /* The running delta */ 292 Long delta = delta64; 293 294 /* Holds pc at the start of the insn, so that we can print 295 consistent error messages for unimplemented insns. */ 296 //Long delta_start = delta; 297 298 UChar *code = (UChar *) (guest_code + delta); 299 300 IRStmt *bstmt = NULL; /* Branch statement. */ 301 IRExpr *next = NULL; /* Next bundle expr. */ 302 ULong jumpkind = Ijk_Boring; 303 ULong steering_pc; 304 305 /* Set result defaults. */ 306 dres.whatNext = Dis_Continue; 307 dres.len = 0; 308 dres.continueAt = 0; 309 dres.jk_StopHere = Ijk_INVALID; 310 311 /* Verify the code addr is 8-byte aligned. */ 312 vassert((((Addr)code) & 7) == 0); 313 314 /* Get the instruction bundle. */ 315 cins = *((ULong *)(Addr) code); 316 317 /* "Special" instructions. */ 318 /* Spot the 16-byte preamble: ****tilegx**** 319 0:02b3c7ff91234fff { moveli zero, 4660 ; moveli zero, 22136 } 320 8:0091a7ff95678fff { moveli zero, 22136 ; moveli zero, 4660 } 321 */ 322#define CL_W0 0x02b3c7ff91234fffULL 323#define CL_W1 0x0091a7ff95678fffULL 324 325 if (*((ULong*)(Addr)(code)) == CL_W0 && 326 *((ULong*)(Addr)(code + 8)) == CL_W1) { 327 /* Got a "Special" instruction preamble. Which one is it? */ 328 if (*((ULong*)(Addr)(code + 16)) == 329 0x283a69a6d1483000ULL /* or r13, r13, r13 */ ) { 330 /* r0 = client_request ( r12 ) */ 331 DIP("r0 = client_request ( r12 )\n"); 332 333 putPC(mkU64(guest_PC_curr_instr + 24)); 334 335 dres.jk_StopHere = Ijk_ClientReq; 336 dres.whatNext = Dis_StopHere; 337 dres.len = 24; 338 goto decode_success; 339 340 } else if (*((ULong*)(Addr)(code + 16)) == 341 0x283a71c751483000ULL /* or r14, r14, r14 */ ) { 342 /* r11 = guest_NRADDR */ 343 DIP("r11 = guest_NRADDR\n"); 344 dres.len = 24; 345 putIReg(11, IRExpr_Get(offsetof(VexGuestTILEGXState, guest_NRADDR), 346 Ity_I64)); 347 putPC(mkU64(guest_PC_curr_instr + 8)); 348 goto decode_success; 349 350 } else if (*((ULong*)(Addr)(code + 16)) == 351 0x283a79e7d1483000ULL /* or r15, r15, r15 */ ) { 352 /* branch-and-link-to-noredir r12 */ 353 DIP("branch-and-link-to-noredir r12\n"); 354 dres.len = 24; 355 putIReg(55, mkU64(guest_PC_curr_instr + 24)); 356 357 putPC(getIReg(12)); 358 359 dres.jk_StopHere = Ijk_NoRedir; 360 dres.whatNext = Dis_StopHere; 361 goto decode_success; 362 363 } else if (*((ULong*)(Addr)(code + 16)) == 364 0x283a5965d1483000ULL /* or r11, r11, r11 */ ) { 365 /* vex-inject-ir */ 366 DIP("vex-inject-ir\n"); 367 dres.len = 24; 368 369 vex_inject_ir(irsb, Iend_LE); 370 371 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_CMSTART), 372 mkU64(guest_PC_curr_instr))); 373 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_CMLEN), 374 mkU64(24))); 375 376 /* 2 + 1 = 3 bundles. 24 bytes. */ 377 putPC(mkU64(guest_PC_curr_instr + 24)); 378 379 dres.jk_StopHere = Ijk_InvalICache; 380 dres.whatNext = Dis_StopHere; 381 goto decode_success; 382 } 383 384 /* We don't expect this. */ 385 vex_printf("%s: unexpect special bundles at %lx\n", 386 __func__, (Addr)guest_PC_curr_instr); 387 delta += 16; 388 goto decode_failure; 389 /*NOTREACHED*/ 390 } 391 392 /* To decode the given instruction bundle. */ 393 nr_insn = parse_insn_tilegx((tilegx_bundle_bits)cins, 394 (ULong)(Addr)code, 395 decoded); 396 397 if (vex_traceflags & VEX_TRACE_FE) 398 decode_and_display(&cins, 1, (ULong)(Addr)code); 399 400 /* Init. rb_wb_index */ 401 rd_wb_index = 0; 402 403 steering_pc = -1ULL; 404 405 for (n = 0; n < nr_insn; n++) { 406 opcode = decoded[n].opcode->mnemonic; 407 Int opi; 408 409 rd = ra = rb = -1; 410 opd[0] = opd[1] = opd[2] = opd[3] = -1; 411 opd_dst_map = 0; 412 opd_src_map = 0; 413 opd_imm_map = 0; 414 415 for (opi = 0; opi < decoded[n].opcode->num_operands; opi++) { 416 const struct tilegx_operand *op = decoded[n].operands[opi]; 417 opd[opi] = decoded[n].operand_values[opi]; 418 419 /* Set the operands. rd, ra, rb and imm. */ 420 if (opi < 3) { 421 if (op->is_dest_reg) { 422 if (rd == -1) 423 rd = decoded[n].operand_values[opi]; 424 else if (ra == -1) 425 ra = decoded[n].operand_values[opi]; 426 } else if (op->is_src_reg) { 427 if (ra == -1) { 428 ra = decoded[n].operand_values[opi]; 429 } else if(rb == -1) { 430 rb = decoded[n].operand_values[opi]; 431 } else { 432 vassert(0); 433 } 434 } else { 435 imm = decoded[n].operand_values[opi]; 436 } 437 } 438 439 /* Build bit maps of used dest, source registers 440 and immediate. */ 441 if (op->is_dest_reg) { 442 opd_dst_map |= 1ULL << opi; 443 if(op->is_src_reg) 444 opd_src_map |= 1ULL << opi; 445 } else if(op->is_src_reg) { 446 opd_src_map |= 1ULL << opi; 447 } else { 448 opd_imm_map |= 1ULL << opi; 449 } 450 } 451 452 use_dirty_helper = 0; 453 454 switch (opcode) { 455 case 0: /* "bpt" */ /* "raise" */ 456 /* "bpt" pseudo instruction is an illegal instruction */ 457 opd_imm_map |= (1 << 0); 458 opd[0] = cins; 459 use_dirty_helper = 1; 460 break; 461 case 1: /* "info" */ /* Ignore this instruction. */ 462 break; 463 case 2: /* "infol" */ /* Ignore this instruction. */ 464 break; 465 case 3: /* "ld4s_tls" */ /* Ignore this instruction. */ 466 break; 467 case 4: /* "ld_tls" */ /* Ignore this instruction. */ 468 break; 469 case 5: /* "move" */ 470 t2 = newTemp(Ity_I64); 471 assign(t2, getIReg(ra)); 472 MARK_REG_WB(rd, t2); 473 break; 474 case 6: /* "movei" */ 475 t2 = newTemp(Ity_I64); 476 assign(t2, mkU64(extend_s_8to64(imm))); 477 MARK_REG_WB(rd, t2); 478 break; 479 case 7: /* "moveli" */ 480 t2 = newTemp(Ity_I64); 481 assign(t2, mkU64(extend_s_16to64(imm))); 482 MARK_REG_WB(rd, t2); 483 break; 484 case 8: /* "prefetch" */ /* Ignore. */ 485 break; 486 case 9: /* "prefetch_add_l1" */ /* Ignore. */ 487 break; 488 case 10: /* "prefetch_add_l1_fault" */ /* Ignore. */ 489 break; 490 case 11: /* "prefetch_add_l2" */ /* Ignore. */ 491 break; 492 case 12: /* "prefetch_add_l2_fault" */ /* Ignore. */ 493 break; 494 case 13: /* "prefetch_add_l3" */ /* Ignore. */ 495 break; 496 case 14: /* "prefetch_add_l3_fault" */ /* Ignore. */ 497 break; 498 case 15: /* "prefetch_l1" */ /* Ignore. */ 499 break; 500 case 16: /* "prefetch_l1_fault" */ /* Ignore. */ 501 break; 502 case 17: /* "prefetch_l2" */ /* Ignore. */ 503 break; 504 case 18: /* "prefetch_l2_fault" */ /* Ignore. */ 505 break; 506 case 19: /* "prefetch_l3" */ /* Ignore. */ 507 break; 508 case 20: /* "prefetch_l3_fault" */ /* Ignore. */ 509 break; 510 case 21: /* "raise" */ 511 /* "raise" pseudo instruction is an illegal instruction plusing 512 a "moveli zero, <sig>", so we need save whole bundle in the 513 opd[0], which will be used in the dirty helper. */ 514 opd_imm_map |= (1 << 0); 515 opd[0] = cins; 516 use_dirty_helper = 1; 517 break; 518 case 22: /* "add" */ 519 t2 = newTemp(Ity_I64); 520 assign(t2, binop(Iop_Add64, getIReg(ra), getIReg(rb))); 521 MARK_REG_WB(rd, t2); 522 break; 523 case 23: /* "addi" */ 524 t2 = newTemp(Ity_I64); 525 assign(t2, binop(Iop_Add64, getIReg(ra), 526 mkU64(extend_s_8to64(imm)))); 527 MARK_REG_WB(rd, t2); 528 break; 529 case 24: /* "addli" */ 530 t2 = newTemp(Ity_I64); 531 assign(t2, binop(Iop_Add64, getIReg(ra), 532 mkU64(extend_s_16to64(imm)))); 533 MARK_REG_WB(rd, t2); 534 break; 535 case 25: /* "addx" */ 536 t2 = newTemp(Ity_I64); 537 assign(t2, signExtend(binop(Iop_Add32, 538 narrowTo(Ity_I32, getIReg(ra)), 539 narrowTo(Ity_I32, getIReg(rb))), 540 32)); 541 MARK_REG_WB(rd, t2); 542 break; 543 case 26: /* "addxi" */ 544 t2 = newTemp(Ity_I64); 545 assign(t2, signExtend(binop(Iop_Add32, 546 narrowTo(Ity_I32, getIReg(ra)), 547 mkU32(imm)), 32)); 548 MARK_REG_WB(rd, t2); 549 break; 550 case 27: /* "addxli" */ 551 t2 = newTemp(Ity_I64); 552 assign(t2, signExtend(binop(Iop_Add32, 553 narrowTo(Ity_I32, getIReg(ra)), 554 mkU32(imm)), 32)); 555 556 MARK_REG_WB(rd, t2); 557 break; 558 case 28: /* "addxsc" */ 559 use_dirty_helper = 1; 560 break; 561 case 29: /* "and" */ 562 t2 = newTemp(Ity_I64); 563 assign(t2, binop(Iop_And64, getIReg(ra), getIReg(rb))); 564 MARK_REG_WB(rd, t2); 565 break; 566 case 30: /* "andi" */ 567 t2 = newTemp(Ity_I64); 568 assign(t2, binop(Iop_And64, getIReg(ra), 569 mkU64(extend_s_8to64(imm)))); 570 MARK_REG_WB(rd, t2); 571 break; 572 case 31: /* "beqz" */ 573 /* Fall-through */ 574 case 32: 575 /* "beqzt" */ 576 bstmt = dis_branch(binop(Iop_CmpEQ64, getIReg(ra), mkU64(0)), 577 imm); 578 break; 579 case 33: /* "bfexts" */ 580 { 581 ULong imm0 = decoded[n].operand_values[3]; 582 ULong mask = ((-1ULL) ^ ((-1ULL << ((imm0 - imm) & 63)) << 1)); 583 t0 = newTemp(Ity_I64); 584 t2 = newTemp(Ity_I64); 585 assign(t0, binop(Iop_Xor64, 586 binop(Iop_Sub64, 587 binop(Iop_And64, 588 binop(Iop_Shr64, 589 getIReg(ra), 590 mkU8(imm0)), 591 mkU64(1)), 592 mkU64(1)), 593 mkU64(-1ULL))); 594 assign(t2, 595 binop(Iop_Or64, 596 binop(Iop_And64, 597 binop(Iop_Or64, 598 binop(Iop_Shr64, 599 getIReg(ra), 600 mkU8(imm)), 601 binop(Iop_Shl64, 602 getIReg(ra), 603 mkU8(64 - imm))), 604 mkU64(mask)), 605 binop(Iop_And64, 606 mkexpr(t0), 607 mkU64(~mask)))); 608 609 MARK_REG_WB(rd, t2); 610 } 611 break; 612 case 34: /* "bfextu" */ 613 { 614 ULong imm0 = decoded[n].operand_values[3]; 615 ULong mask = 0; 616 t2 = newTemp(Ity_I64); 617 mask = ((-1ULL) ^ ((-1ULL << ((imm0 - imm) & 63)) << 1)); 618 619 assign(t2, 620 binop(Iop_And64, 621 binop(Iop_Or64, 622 binop(Iop_Shr64, 623 getIReg(ra), 624 mkU8(imm)), 625 binop(Iop_Shl64, 626 getIReg(ra), 627 mkU8(64 - imm))), 628 mkU64(mask))); 629 MARK_REG_WB(rd, t2); 630 } 631 break; 632 case 35: /* "bfins" */ 633 { 634 ULong mask; 635 ULong imm0 = decoded[n].operand_values[3]; 636 t0 = newTemp(Ity_I64); 637 t2 = newTemp(Ity_I64); 638 if (imm <= imm0) 639 { 640 mask = ((-1ULL << imm) ^ ((-1ULL << imm0) << 1)); 641 } 642 else 643 { 644 mask = ((-1ULL << imm) | (-1ULL >> (63 - imm0))); 645 } 646 647 assign(t0, binop(Iop_Or64, 648 binop(Iop_Shl64, 649 getIReg(ra), 650 mkU8(imm)), 651 binop(Iop_Shr64, 652 getIReg(ra), 653 mkU8(64 - imm)))); 654 655 assign(t2, binop(Iop_Or64, 656 binop(Iop_And64, 657 mkexpr(t0), 658 mkU64(mask)), 659 binop(Iop_And64, 660 getIReg(rd), 661 mkU64(~mask)))); 662 663 MARK_REG_WB(rd, t2); 664 } 665 break; 666 case 36: /* "bgez" */ 667 /* Fall-through */ 668 case 37: /* "bgezt" */ 669 bstmt = dis_branch(binop(Iop_CmpEQ64, 670 binop(Iop_And64, 671 getIReg(ra), 672 mkU64(0x8000000000000000ULL)), 673 mkU64(0x0)), 674 imm); 675 break; 676 case 38: /* "bgtz" */ 677 /* Fall-through */ 678 case 39: 679 /* "bgtzt" */ 680 bstmt = dis_branch(unop(Iop_Not1, 681 binop(Iop_CmpLE64S, 682 getIReg(ra), 683 mkU64(0))), 684 imm); 685 break; 686 case 40: /* "blbc" */ 687 /* Fall-through */ 688 case 41: /* "blbct" */ 689 bstmt = dis_branch(unop(Iop_64to1, 690 unop(Iop_Not64, getIReg(ra))), 691 imm); 692 693 break; 694 case 42: /* "blbs" */ 695 /* Fall-through */ 696 case 43: 697 /* "blbst" */ 698 bstmt = dis_branch(unop(Iop_64to1, 699 getIReg(ra)), 700 imm); 701 break; 702 case 44: /* "blez" */ 703 bstmt = dis_branch(binop(Iop_CmpLE64S, getIReg(ra), 704 mkU64(0)), 705 imm); 706 break; 707 case 45: /* "blezt" */ 708 bstmt = dis_branch(binop(Iop_CmpLE64S, getIReg(ra), 709 mkU64(0)), 710 imm); 711 break; 712 case 46: /* "bltz" */ 713 bstmt = dis_branch(binop(Iop_CmpLT64S, getIReg(ra), 714 mkU64(0)), 715 imm); 716 break; 717 case 47: /* "bltzt" */ 718 bstmt = dis_branch(binop(Iop_CmpLT64S, getIReg(ra), 719 mkU64(0)), 720 imm); 721 break; 722 case 48: /* "bnez" */ 723 /* Fall-through */ 724 case 49: 725 /* "bnezt" */ 726 bstmt = dis_branch(binop(Iop_CmpNE64, getIReg(ra), 727 mkU64(0)), 728 imm); 729 break; 730 case 50: /* "clz" */ 731 t2 = newTemp(Ity_I64); 732 assign(t2, unop(Iop_Clz64, getIReg(ra))); 733 734 MARK_REG_WB(rd, t2); 735 break; 736 case 51: /* "cmoveqz rd, ra, rb" */ 737 t2 = newTemp(Ity_I64); 738 assign(t2, IRExpr_ITE(binop(Iop_CmpEQ64, getIReg(ra), mkU64(0)), 739 getIReg(rb), getIReg(rd))); 740 MARK_REG_WB(rd, t2); 741 break; 742 case 52: /* "cmovnez" */ 743 t2 = newTemp(Ity_I64); 744 assign(t2, IRExpr_ITE(binop(Iop_CmpEQ64, getIReg(ra), mkU64(0)), 745 getIReg(rd), getIReg(rb))); 746 MARK_REG_WB(rd, t2); 747 break; 748 case 53: /* "cmpeq" */ 749 t2 = newTemp(Ity_I64); 750 assign(t2, unop(Iop_1Uto64, binop(Iop_CmpEQ64, 751 getIReg(ra), getIReg(rb)))); 752 MARK_REG_WB(rd, t2); 753 break; 754 755 case 54: /* "cmpeqi" */ 756 t2 = newTemp(Ity_I64); 757 assign(t2, unop(Iop_1Uto64, binop(Iop_CmpEQ64, 758 getIReg(ra), 759 mkU64(extend_s_8to64(imm))))); 760 MARK_REG_WB(rd, t2); 761 break; 762 case 55: /* "cmpexch" */ 763 t1 = newTemp(Ity_I64); 764 t2 = newTemp(Ity_I64); 765 766 assign(t1, getIReg(rb)); 767 stmt( IRStmt_CAS(mkIRCAS(IRTemp_INVALID, t2, Iend_LE, 768 getIReg(ra), 769 NULL, binop(Iop_Add64, 770 getIReg(70), 771 getIReg(71)), 772 NULL, mkexpr(t1)))); 773 MARK_REG_WB(rd, t2); 774 break; 775 case 56: /* "cmpexch4" */ 776 t1 = newTemp(Ity_I32); 777 t2 = newTemp(Ity_I64); 778 t3 = newTemp(Ity_I32); 779 780 assign(t1, narrowTo(Ity_I32, getIReg(rb))); 781 stmt( IRStmt_CAS(mkIRCAS(IRTemp_INVALID, t3, Iend_LE, 782 getIReg(ra), 783 NULL, 784 narrowTo(Ity_I32, binop(Iop_Add64, 785 getIReg(70), 786 getIReg(71))), 787 NULL, 788 mkexpr(t1)))); 789 assign(t2, unop(Iop_32Uto64, mkexpr(t3))); 790 MARK_REG_WB(rd, t2); 791 break; 792 case 57: /* "cmples" */ 793 t2 = newTemp(Ity_I64); 794 assign(t2, unop(Iop_1Uto64, 795 binop(Iop_CmpLE64S, getIReg(ra), getIReg(rb)))); 796 MARK_REG_WB(rd, t2); 797 break; 798 case 58: /* "cmpleu" */ 799 t2 = newTemp(Ity_I64); 800 assign(t2, unop(Iop_1Uto64, 801 binop(Iop_CmpLE64U, getIReg(ra), getIReg(rb)))); 802 MARK_REG_WB(rd, t2); 803 break; 804 case 59: /* "cmplts" */ 805 t2 = newTemp(Ity_I64); 806 assign(t2, unop(Iop_1Uto64, 807 binop(Iop_CmpLT64S, getIReg(ra), getIReg(rb)))); 808 MARK_REG_WB(rd, t2); 809 break; 810 case 60: /* "cmpltsi" */ 811 t2 = newTemp(Ity_I64); 812 assign(t2, unop(Iop_1Uto64, 813 binop(Iop_CmpLT64S, 814 getIReg(ra), 815 mkU64(extend_s_8to64(imm))))); 816 MARK_REG_WB(rd, t2); 817 break; 818 case 61: 819 820 /* "cmpltu" */ 821 t2 = newTemp(Ity_I64); 822 assign(t2, unop(Iop_1Uto64, 823 binop(Iop_CmpLT64U, getIReg(ra), getIReg(rb)))); 824 MARK_REG_WB(rd, t2); 825 826 827 break; 828 case 62: /* "cmpltui" */ 829 t2 = newTemp(Ity_I64); 830 assign(t2, unop(Iop_1Uto64, 831 binop(Iop_CmpLT64U, 832 getIReg(ra), 833 mkU64(imm)))); 834 MARK_REG_WB(rd, t2); 835 836 837 break; 838 case 63: /* "cmpne" */ 839 t2 = newTemp(Ity_I64); 840 assign(t2, unop(Iop_1Uto64, 841 binop(Iop_CmpNE64, getIReg(ra), getIReg(rb)))); 842 MARK_REG_WB(rd, t2); 843 844 845 break; 846 case 64: 847 /* Fall-through */ 848 case 65: 849 /* Fall-through */ 850 case 66: 851 /* Fall-through */ 852 case 67: 853 /* Fall-through */ 854 case 68: 855 /* Fall-through */ 856 case 69: 857 /* Fall-through */ 858 case 70: 859 /* Fall-through */ 860 case 71: 861 /* Fall-through */ 862 case 72: 863 use_dirty_helper = 1; 864 break; 865 case 73: /* "ctz" */ 866 t2 = newTemp(Ity_I64); 867 assign(t2, unop(Iop_Ctz64, getIReg(ra))); 868 869 MARK_REG_WB(rd, t2); 870 871 872 break; 873 case 74: /* "dblalign" */ 874 t0 = newTemp(Ity_I64); 875 t1 = newTemp(Ity_I64); 876 t2 = newTemp(Ity_I64); 877 878 /* t0 is the bit shift amount */ 879 assign(t0, binop(Iop_Shl64, 880 binop(Iop_And64, 881 getIReg(rb), 882 mkU64(7)), 883 mkU8(3))); 884 assign(t1, binop(Iop_Sub64, 885 mkU64(64), 886 mkexpr(t0))); 887 888 assign(t2, binop(Iop_Or64, 889 binop(Iop_Shl64, 890 getIReg(ra), 891 unop(Iop_64to8, mkexpr(t1))), 892 binop(Iop_Shr64, 893 getIReg(rd), 894 unop(Iop_64to8, mkexpr(t0))))); 895 896 MARK_REG_WB(rd, t2); 897 break; 898 case 75: 899 /* Fall-through */ 900 case 76: 901 /* Fall-through */ 902 case 77: 903 /* Fall-through */ 904 case 78: 905 /* Fall-through */ 906 case 79: 907 use_dirty_helper = 1; 908 break; 909 case 80: /* "exch" */ 910 t2 = newTemp(Ity_I64); 911 stmt( IRStmt_CAS( 912 mkIRCAS(IRTemp_INVALID, 913 t2, 914 Iend_LE, 915 getIReg(ra), 916 NULL, 917 mkU64(0x0), 918 NULL, 919 getIReg(rb)))); 920 MARK_REG_WB(rd, t2); 921 break; 922 case 81: /* "exch4 rd, ra, rb" */ 923 t0 = newTemp(Ity_I32); 924 t2 = newTemp(Ity_I64); 925 stmt( IRStmt_CAS( 926 mkIRCAS(IRTemp_INVALID, 927 t0, 928 Iend_LE, 929 getIReg(ra), 930 NULL, 931 mkU32(0x0), 932 NULL, 933 narrowTo(Ity_I32, 934 getIReg(rb))))); 935 assign(t2, unop(Iop_32Sto64, mkexpr(t0))); 936 MARK_REG_WB(rd, t2); 937 break; 938 case 82: 939 /* Fall-through */ 940 case 83: 941 /* Fall-through */ 942 case 84: 943 /* Fall-through */ 944 case 85: 945 /* Fall-through */ 946 case 86: 947 /* Fall-through */ 948 case 87: 949 /* Fall-through */ 950 case 88: 951 /* Fall-through */ 952 case 89: 953 use_dirty_helper = 1; 954 break; 955 case 90: /* "fetchadd" */ 956 t2 = newTemp(Ity_I64); 957 stmt( IRStmt_CAS( 958 mkIRCAS(IRTemp_INVALID, 959 t2, 960 Iend_LE, 961 getIReg(ra), 962 NULL, 963 // fetchadd=3 964 mkU64(0x3), 965 NULL, 966 getIReg(rb)))); 967 MARK_REG_WB(rd, t2); 968 break; 969 case 91: /* "fetchadd4" */ 970 t0 = newTemp(Ity_I32); 971 t2 = newTemp(Ity_I64); 972 stmt( IRStmt_CAS( 973 mkIRCAS(IRTemp_INVALID, 974 t0, 975 Iend_LE, 976 getIReg(ra), 977 NULL, 978 // fetchadd=3 979 mkU32(0x3), 980 NULL, 981 narrowTo(Ity_I32, 982 getIReg(rb))))); 983 assign(t2, unop(Iop_32Sto64, mkexpr(t0))); 984 MARK_REG_WB(rd, t2); 985 986 break; 987 case 92: /* "fetchaddgez" */ 988 t2 = newTemp(Ity_I64); 989 stmt( IRStmt_CAS( 990 mkIRCAS(IRTemp_INVALID, 991 t2, 992 Iend_LE, 993 getIReg(ra), 994 NULL, 995 // fetchaddgez=5 996 mkU64(0x5), 997 NULL, 998 getIReg(rb)))); 999 MARK_REG_WB(rd, t2); 1000 break; 1001 case 93: /* "fetchaddgez4" */ 1002 t0 = newTemp(Ity_I32); 1003 t2 = newTemp(Ity_I64); 1004 stmt( IRStmt_CAS( 1005 mkIRCAS(IRTemp_INVALID, 1006 t0, 1007 Iend_LE, 1008 getIReg(ra), 1009 NULL, 1010 // fetchaddgez=5 1011 mkU32(0x5), 1012 NULL, 1013 narrowTo(Ity_I32, 1014 getIReg(rb))))); 1015 assign(t2, unop(Iop_32Sto64, mkexpr(t0))); 1016 MARK_REG_WB(rd, t2); 1017 break; 1018 case 94: /* "fetchand\n") */ 1019 t2 = newTemp(Ity_I64); 1020 stmt( IRStmt_CAS( 1021 mkIRCAS(IRTemp_INVALID, 1022 t2, 1023 Iend_LE, 1024 getIReg(ra), 1025 NULL, 1026 mkU64(0x2), 1027 NULL, 1028 getIReg(rb)))); 1029 MARK_REG_WB(rd, t2); 1030 break; 1031 case 95: 1032 /* mkIRCAS. 1033 0: xch### 1: cmpexch###, 1034 2: fetchand## 3: fetchadd## 1035 4: fetchor## 5: fetchaddgez 1036 */ 1037 /* "fetchand4" */ 1038 t0 = newTemp(Ity_I32); 1039 t2 = newTemp(Ity_I64); 1040 stmt( IRStmt_CAS( 1041 mkIRCAS(IRTemp_INVALID, 1042 t0, 1043 Iend_LE, 1044 getIReg(ra), 1045 NULL, 1046 mkU32(0x2), 1047 NULL, 1048 narrowTo(Ity_I32, 1049 getIReg(rb))))); 1050 assign(t2, unop(Iop_32Sto64, mkexpr(t0))); 1051 MARK_REG_WB(rd, t2); 1052 break; 1053 case 96: /* "fetchor" */ 1054 t2 = newTemp(Ity_I64); 1055 stmt( IRStmt_CAS( 1056 mkIRCAS(IRTemp_INVALID, 1057 t2, 1058 Iend_LE, 1059 getIReg(ra), 1060 NULL, 1061 mkU64(0x4), 1062 NULL, 1063 getIReg(rb)))); 1064 MARK_REG_WB(rd, t2); 1065 break; 1066 case 97: /* "fetchor4" */ 1067 t0 = newTemp(Ity_I32); 1068 t2 = newTemp(Ity_I64); 1069 stmt( IRStmt_CAS( 1070 mkIRCAS(IRTemp_INVALID, 1071 t0, 1072 Iend_LE, 1073 getIReg(ra), 1074 NULL, 1075 mkU32(0x4), 1076 NULL, 1077 narrowTo(Ity_I32, 1078 getIReg(rb))))); 1079 assign(t2, unop(Iop_32Sto64, mkexpr(t0))); 1080 MARK_REG_WB(rd, t2); 1081 break; 1082 case 98: 1083 /* Fall-through */ 1084 case 99: 1085 /* Fall-through */ 1086 case 100: 1087 use_dirty_helper = 1; 1088 break; 1089 case 101: /* "fnop" Ignore */ 1090 break; 1091 case 102: 1092 /* Fall-through */ 1093 case 103: 1094 /* Fall-through */ 1095 case 104: 1096 /* Fall-through */ 1097 case 105: 1098 /* Fall-through */ 1099 case 106: 1100 /* Fall-through */ 1101 case 107: 1102 /* Fall-through */ 1103 case 108: 1104 use_dirty_helper = 1; 1105 break; 1106 case 109: 1107 /* Fall-through */ 1108 case 110: 1109 /* Fall-through */ 1110 case 111: 1111 use_dirty_helper = 1; 1112 break; 1113 case 112: /* "iret" */ 1114 next = mkU64(guest_PC_curr_instr + 8); 1115 jumpkind = Ijk_Ret; 1116 break; 1117 case 113: /* "j" */ 1118 next = mkU64(imm); 1119 /* set steering address. */ 1120 steering_pc = imm; 1121 jumpkind = Ijk_Boring; 1122 break; 1123 case 114: 1124 t2 = newTemp(Ity_I64); 1125 assign(t2, mkU64(guest_PC_curr_instr + 8)); 1126 /* set steering address. */ 1127 steering_pc = imm; 1128 next = mkU64(imm); 1129 jumpkind = Ijk_Call; 1130 MARK_REG_WB(55, t2); 1131 break; 1132 case 115: /* "jalr" */ 1133 /* Fall-through */ 1134 case 116: /* "jalrp" */ 1135 t1 = newTemp(Ity_I64); 1136 t2 = newTemp(Ity_I64); 1137 assign(t1, getIReg(ra)); 1138 assign(t2, mkU64(guest_PC_curr_instr + 8)); 1139 next = mkexpr(t1); 1140 jumpkind = Ijk_Call; 1141 MARK_REG_WB(55, t2); 1142 break; 1143 case 117: /* "jr" */ 1144 /* Fall-through */ 1145 case 118: /* "jrp" */ 1146 next = getIReg(ra); 1147 jumpkind = Ijk_Boring; 1148 break; 1149 case 119: /* "ld" */ 1150 t2 = newTemp(Ity_I64); 1151 assign(t2, load(Ity_I64, (getIReg(ra)))); 1152 MARK_REG_WB(rd, t2); 1153 break; 1154 case 120: /* "ld1s" */ 1155 t2 = newTemp(Ity_I64); 1156 assign(t2, unop(Iop_8Sto64, 1157 load(Ity_I8, (getIReg(ra))))); 1158 MARK_REG_WB(rd, t2); 1159 break; 1160 case 121: /* "ld1s_add" */ 1161 t1 = newTemp(Ity_I64); 1162 t2 = newTemp(Ity_I64); 1163 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1164 assign(t2, unop(Iop_8Sto64, 1165 load(Ity_I8, (getIReg(ra))))); 1166 MARK_REG_WB(ra, t1); 1167 MARK_REG_WB(rd, t2); 1168 break; 1169 case 122: /* "ld1u" */ 1170 t2 = newTemp(Ity_I64); 1171 assign(t2, unop(Iop_8Uto64, 1172 load(Ity_I8, (getIReg(ra))))); 1173 MARK_REG_WB(rd, t2); 1174 1175 break; 1176 case 123: /* "ld1u_add" */ 1177 t1 = newTemp(Ity_I64); 1178 t2 = newTemp(Ity_I64); 1179 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1180 assign(t2, unop(Iop_8Uto64, 1181 load(Ity_I8, (getIReg(ra))))); 1182 MARK_REG_WB(ra, t1); 1183 MARK_REG_WB(rd, t2); 1184 break; 1185 case 124: /* "ld2s" */ 1186 t2 = newTemp(Ity_I64); 1187 assign(t2, unop(Iop_16Sto64, 1188 load(Ity_I16, getIReg(ra)))); 1189 MARK_REG_WB(rd, t2); 1190 break; 1191 case 125: /* "ld2s_add" */ 1192 t1 = newTemp(Ity_I64); 1193 t2 = newTemp(Ity_I64); 1194 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1195 assign(t2, unop(Iop_16Sto64, 1196 load(Ity_I16, getIReg(ra)))); 1197 MARK_REG_WB(rd, t2); 1198 MARK_REG_WB(ra, t1); 1199 break; 1200 case 126: /* "ld2u" */ 1201 t2 = newTemp(Ity_I64); 1202 assign(t2, unop(Iop_16Uto64, 1203 load(Ity_I16, getIReg(ra)))); 1204 MARK_REG_WB(rd, t2); 1205 break; 1206 case 127: /* "ld2u_add" */ 1207 t1 = newTemp(Ity_I64); 1208 t2 = newTemp(Ity_I64); 1209 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1210 assign(t2, unop(Iop_16Uto64, 1211 load(Ity_I16, getIReg(ra)))); 1212 MARK_REG_WB(rd, t2); 1213 MARK_REG_WB(ra, t1); 1214 break; 1215 case 128: /* "ld4s" */ 1216 t2 = newTemp(Ity_I64); 1217 assign(t2, unop(Iop_32Sto64, 1218 load(Ity_I32, (getIReg(ra))))); 1219 MARK_REG_WB(rd, t2); 1220 break; 1221 case 129: /* "ld4s_add" */ 1222 t2 = newTemp(Ity_I64); 1223 t1 = newTemp(Ity_I64); 1224 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1225 assign(t2, unop(Iop_32Sto64, 1226 load(Ity_I32, (getIReg(ra))))); 1227 MARK_REG_WB(rd, t2); 1228 MARK_REG_WB(ra, t1); 1229 break; 1230 case 130: /* "ld4u" */ 1231 t2 = newTemp(Ity_I64); 1232 assign(t2, unop(Iop_32Uto64, 1233 load(Ity_I32, getIReg(ra)))); 1234 MARK_REG_WB(rd, t2); 1235 break; 1236 case 131: /* "ld4u_add" */ 1237 t1 = newTemp(Ity_I64); 1238 t2 = newTemp(Ity_I64); 1239 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1240 assign(t2, unop(Iop_32Uto64, 1241 load(Ity_I32, getIReg(ra)))); 1242 MARK_REG_WB(ra, t1); 1243 MARK_REG_WB(rd, t2); 1244 break; 1245 case 132: /* "ld_add" */ 1246 t1 = newTemp(Ity_I64); 1247 t2 = newTemp(Ity_I64); 1248 assign(t1, load(Ity_I64, getIReg(ra))); 1249 assign(t2, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1250 MARK_REG_WB(ra, t2); 1251 MARK_REG_WB(rd, t1); 1252 break; 1253 case 133: /* "ldna" */ 1254 t2 = newTemp(Ity_I64); 1255 assign(t2, load(Ity_I64, 1256 binop(Iop_And64, 1257 getIReg(ra), 1258 unop(Iop_Not64, 1259 mkU64(7))))); 1260 MARK_REG_WB(rd, t2); 1261 break; 1262 case 134: /* "ldna_add" */ 1263 t1 = newTemp(Ity_I64); 1264 t2 = newTemp(Ity_I64); 1265 1266 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1267 assign(t2, load(Ity_I64, 1268 binop(Iop_And64, 1269 getIReg(ra), 1270 unop(Iop_Not64, 1271 mkU64(7))))); 1272 MARK_REG_WB(ra, t1); 1273 MARK_REG_WB(rd, t2); 1274 break; 1275 case 135: /* "ldnt" */ 1276 /* Valgrind IR has no Non-Temp load. Use normal load. */ 1277 t2 = newTemp(Ity_I64); 1278 assign(t2, load(Ity_I64, (getIReg(ra)))); 1279 MARK_REG_WB(rd, t2); 1280 break; 1281 case 136: /* "ldnt1s" */ 1282 t2 = newTemp(Ity_I64); 1283 assign(t2, unop(Iop_8Sto64, 1284 load(Ity_I8, (getIReg(ra))))); 1285 MARK_REG_WB(rd, t2); 1286 break; 1287 case 137: /* "ldnt1s_add" */ 1288 t1 = newTemp(Ity_I64); 1289 t2 = newTemp(Ity_I64); 1290 assign(t2, unop(Iop_8Sto64, 1291 load(Ity_I8, (getIReg(ra))))); 1292 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1293 MARK_REG_WB(ra, t1); 1294 MARK_REG_WB(rd, t2); 1295 break; 1296 case 138: /* "ldnt1u" */ 1297 t2 = newTemp(Ity_I64); 1298 assign(t2, unop(Iop_8Uto64, 1299 load(Ity_I8, (getIReg(ra))))); 1300 MARK_REG_WB(rd, t2); 1301 break; 1302 case 139: /* "ldnt1u_add" */ 1303 t1 = newTemp(Ity_I64); 1304 t2 = newTemp(Ity_I64); 1305 1306 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1307 assign(t2, unop(Iop_8Uto64, 1308 load(Ity_I8, (getIReg(ra))))); 1309 1310 MARK_REG_WB(ra, t1); 1311 MARK_REG_WB(rd, t2); 1312 break; 1313 case 140: /* "ldnt2s" */ 1314 t2 = newTemp(Ity_I64); 1315 assign(t2, unop(Iop_16Sto64, 1316 load(Ity_I16, getIReg(ra)))); 1317 MARK_REG_WB(rd, t2); 1318 break; 1319 case 141: /* "ldnt2s_add" */ 1320 t1 = newTemp(Ity_I64); 1321 t2 = newTemp(Ity_I64); 1322 assign(t2, unop(Iop_16Sto64, 1323 load(Ity_I16, getIReg(ra)))); 1324 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1325 MARK_REG_WB(ra, t1); 1326 MARK_REG_WB(rd, t2); 1327 break; 1328 case 142: /* "ldnt2u" */ 1329 t2 = newTemp(Ity_I64); 1330 assign(t2, unop(Iop_16Uto64, 1331 load(Ity_I16, getIReg(ra)))); 1332 MARK_REG_WB(rd, t2); 1333 break; 1334 case 143: /* "ldnt2u_add" */ 1335 t1 = newTemp(Ity_I64); 1336 t2 = newTemp(Ity_I64); 1337 assign(t2, unop(Iop_16Uto64, 1338 load(Ity_I16, getIReg(ra)))); 1339 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1340 MARK_REG_WB(ra, t1); 1341 MARK_REG_WB(rd, t2); 1342 break; 1343 case 144: /* "ldnt4s" */ 1344 t2 = newTemp(Ity_I64); 1345 assign(t2, unop(Iop_32Sto64, 1346 load(Ity_I32, (getIReg(ra))))); 1347 MARK_REG_WB(rd, t2); 1348 break; 1349 case 145: /* "ldnt4s_add" */ 1350 t1 = newTemp(Ity_I64); 1351 t2 = newTemp(Ity_I64); 1352 assign(t2, unop(Iop_32Sto64, 1353 load(Ity_I32, (getIReg(ra))))); 1354 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1355 MARK_REG_WB(rd, t2); 1356 MARK_REG_WB(ra, t1); 1357 break; 1358 case 146: /* "ldnt4u" */ 1359 t2 = newTemp(Ity_I64); 1360 assign(t2, unop(Iop_32Uto64, 1361 load(Ity_I32, getIReg(ra)))); 1362 MARK_REG_WB(rd, t2); 1363 break; 1364 case 147: /* "ldnt4u_add" */ 1365 t1 = newTemp(Ity_I64); 1366 t2 = newTemp(Ity_I64); 1367 assign(t2, unop(Iop_32Uto64, 1368 load(Ity_I32, getIReg(ra)))); 1369 assign(t1, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1370 MARK_REG_WB(rd, t2); 1371 MARK_REG_WB(ra, t1); 1372 break; 1373 case 148: /* "ldnt_add" */ 1374 t1 = newTemp(Ity_I64); 1375 t2 = newTemp(Ity_I64); 1376 assign(t1, load(Ity_I64, getIReg(ra))); 1377 assign(t2, binop(Iop_Add64, getIReg(ra), mkU64(imm))); 1378 MARK_REG_WB(rd, t1); 1379 MARK_REG_WB(ra, t2); 1380 break; 1381 case 149: /* "lnk" */ 1382 t2 = newTemp(Ity_I64); 1383 assign(t2, mkU64(guest_PC_curr_instr + 8)); 1384 MARK_REG_WB(rd, t2); 1385 break; 1386 case 150: /* "mf" */ 1387 use_dirty_helper = 1; 1388 break; 1389 case 151: /* "mfspr" */ 1390 t2 = newTemp(Ity_I64); 1391 if (imm == 0x2780) { // Get Cmpexch value 1392 assign(t2, getIReg(70)); 1393 MARK_REG_WB(rd, t2); 1394 } else if (imm == 0x2580) { // Get EX_CONTEXT_0_0 1395 assign(t2, getIReg(576 / 8)); 1396 MARK_REG_WB(rd, t2); 1397 } else if (imm == 0x2581) { // Get EX_CONTEXT_0_1 1398 assign(t2, getIReg(584 / 8)); 1399 MARK_REG_WB(rd, t2); 1400 } else 1401 use_dirty_helper = 1; 1402 break; 1403 case 152: /* "mm" */ 1404 use_dirty_helper = 1; 1405 break; 1406 case 153: /* "mnz" */ 1407 t2 = newTemp(Ity_I64); 1408 assign(t2, binop(Iop_And64, 1409 unop(Iop_1Sto64, binop(Iop_CmpNE64, 1410 getIReg(ra), 1411 mkU64(0))), 1412 getIReg(rb))); 1413 MARK_REG_WB(rd, t2); 1414 break; 1415 case 154: /* "mtspr imm, ra" */ 1416 if (imm == 0x2780) // Set Cmpexch value 1417 putIReg(70, getIReg(ra)); 1418 else if (imm == 0x2580) // set EX_CONTEXT_0_0 1419 putIReg(576/8, getIReg(ra)); 1420 else if (imm == 0x2581) // set EX_CONTEXT_0_1 1421 putIReg(584/8, getIReg(ra)); 1422 else 1423 use_dirty_helper = 1; 1424 break; 1425 case 155: /* "mul_hs_hs" */ 1426 t2 = newTemp(Ity_I64); 1427 assign(t2, binop(Iop_MullS32, 1428 unop(Iop_64to32, 1429 binop(Iop_Shr64, 1430 getIReg(ra), 1431 mkU8(32))), 1432 unop(Iop_64to32, 1433 binop(Iop_Shr64, 1434 getIReg(rb), 1435 mkU8(32))))); 1436 MARK_REG_WB(rd, t2); 1437 break; 1438 case 156: /* "mul_hs_hu" */ 1439 t0 = newTemp(Ity_I64); 1440 t1 = newTemp(Ity_I64); 1441 t2 = newTemp(Ity_I64); 1442 t3 = newTemp(Ity_I64); 1443 1444 assign(t0, unop(Iop_32Sto64, 1445 unop(Iop_64to32, 1446 binop(Iop_Shr64, getIReg(ra), mkU8(32))))); 1447 assign(t1, binop(Iop_MullU32, 1448 unop(Iop_64to32, mkexpr(t0)), 1449 unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32))))); 1450 assign(t3, binop(Iop_MullU32, 1451 unop(Iop_64to32, binop(Iop_Shr64, 1452 mkexpr(t0), 1453 mkU8(32))), 1454 unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32))))); 1455 assign(t2, binop(Iop_Add64, 1456 mkexpr(t1), 1457 binop(Iop_Shl64, 1458 mkexpr(t3), 1459 mkU8(32)))); 1460 MARK_REG_WB(rd, t2); 1461 break; 1462 case 157: /* "mul_hs_ls" */ 1463 t2 = newTemp(Ity_I64); 1464 assign(t2, binop(Iop_MullS32, 1465 unop(Iop_64to32, 1466 binop(Iop_Shr64, 1467 getIReg(ra), 1468 mkU8(32))), 1469 unop(Iop_64to32, 1470 getIReg(rb)))); 1471 MARK_REG_WB(rd, t2); 1472 break; 1473 case 158: /* "mul_hs_lu" */ 1474 t0 = newTemp(Ity_I64); 1475 t1 = newTemp(Ity_I64); 1476 t2 = newTemp(Ity_I64); 1477 t3 = newTemp(Ity_I64); 1478 1479 assign(t0, unop(Iop_32Sto64, 1480 unop(Iop_64to32, 1481 binop(Iop_Shr64, getIReg(ra), mkU8(32))))); 1482 assign(t1, binop(Iop_MullU32, 1483 unop(Iop_64to32, mkexpr(t0)), 1484 unop(Iop_64to32, getIReg(rb)))); 1485 assign(t3, binop(Iop_MullU32, 1486 unop(Iop_64to32, binop(Iop_Shr64, 1487 mkexpr(t0), 1488 mkU8(32))), 1489 unop(Iop_64to32, getIReg(rb)))); 1490 assign(t2, binop(Iop_Add64, 1491 mkexpr(t1), 1492 binop(Iop_Shl64, 1493 mkexpr(t3), 1494 mkU8(32)))); 1495 MARK_REG_WB(rd, t2); 1496 break; 1497 case 159: /* "mul_hu_hu" */ 1498 t2 = newTemp(Ity_I64); 1499 assign(t2, binop(Iop_MullU32, 1500 unop(Iop_64to32, 1501 binop(Iop_Shr64, 1502 getIReg(ra), 1503 mkU8(32))), 1504 unop(Iop_64to32, 1505 binop(Iop_Shr64, 1506 getIReg(rb), 1507 mkU8(32))))); 1508 MARK_REG_WB(rd, t2); 1509 break; 1510 case 160: /* "mul_hu_ls" */ 1511 t0 = newTemp(Ity_I64); 1512 t1 = newTemp(Ity_I64); 1513 t2 = newTemp(Ity_I64); 1514 t3 = newTemp(Ity_I64); 1515 1516 assign(t0, unop(Iop_32Sto64, 1517 unop(Iop_64to32, 1518 getIReg(ra)))); 1519 1520 assign(t1, binop(Iop_MullU32, 1521 unop(Iop_64to32, mkexpr(t0)), 1522 unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32))))); 1523 assign(t3, binop(Iop_MullU32, 1524 unop(Iop_64to32, binop(Iop_Shr64, 1525 mkexpr(t0), 1526 mkU8(32))), 1527 unop(Iop_64to32, binop(Iop_Shr64, getIReg(rb), mkU8(32))))); 1528 assign(t2, binop(Iop_Add64, 1529 mkexpr(t1), 1530 binop(Iop_Shl64, 1531 mkexpr(t3), 1532 mkU8(32)))); 1533 MARK_REG_WB(rd, t2); 1534 break; 1535 case 161: /* "mul_hu_lu" */ 1536 t2 = newTemp(Ity_I64); 1537 assign(t2, binop(Iop_MullU32, 1538 unop(Iop_64to32, 1539 binop(Iop_Shr64, 1540 getIReg(ra), 1541 mkU8(32))), 1542 unop(Iop_64to32, 1543 getIReg(rb)))); 1544 MARK_REG_WB(rd, t2); 1545 break; 1546 case 162: /* "mul_ls_ls" */ 1547 t2 = newTemp(Ity_I64); 1548 assign(t2, binop(Iop_MullS32, 1549 unop(Iop_64to32, getIReg(ra)), 1550 unop(Iop_64to32, getIReg(rb)))); 1551 MARK_REG_WB(rd, t2); 1552 break; 1553 case 163: /* "mul_ls_lu" */ 1554 t0 = newTemp(Ity_I64); 1555 t1 = newTemp(Ity_I64); 1556 t2 = newTemp(Ity_I64); 1557 t3 = newTemp(Ity_I64); 1558 1559 assign(t0, unop(Iop_32Sto64, 1560 unop(Iop_64to32, getIReg(ra)))); 1561 assign(t1, binop(Iop_MullU32, 1562 unop(Iop_64to32, mkexpr(t0)), 1563 unop(Iop_64to32, getIReg(rb)))); 1564 assign(t3, binop(Iop_MullU32, 1565 unop(Iop_64to32, binop(Iop_Shr64, 1566 mkexpr(t0), 1567 mkU8(32))), 1568 unop(Iop_64to32, getIReg(rb)))); 1569 assign(t2, binop(Iop_Add64, 1570 mkexpr(t1), 1571 binop(Iop_Shl64, 1572 mkexpr(t3), 1573 mkU8(32)))); 1574 MARK_REG_WB(rd, t2); 1575 break; 1576 case 164: /* "mul_lu_lu" */ 1577 t2 = newTemp(Ity_I64); 1578 assign(t2, binop(Iop_MullU32, 1579 unop(Iop_64to32, getIReg(ra)), 1580 unop(Iop_64to32, getIReg(rb)))); 1581 MARK_REG_WB(rd, t2); 1582 break; 1583 case 165: /* "mula_hs_hs" */ 1584 t0 = newTemp(Ity_I64); 1585 t2 = newTemp(Ity_I64); 1586 1587 assign(t0, binop(Iop_MullS32, 1588 unop(Iop_64to32, binop(Iop_Shr64, 1589 getIReg(ra), mkU8(32))), 1590 unop(Iop_64to32, binop(Iop_Shr64, 1591 getIReg(rb), mkU8(32))))); 1592 assign(t2, binop(Iop_Add64, getIReg(rd), mkexpr(t0))); 1593 MARK_REG_WB(rd, t2); 1594 break; 1595 case 166: /* "mula_hs_hu" */ 1596 t0 = newTemp(Ity_I64); 1597 t1 = newTemp(Ity_I64); 1598 t2 = newTemp(Ity_I64); 1599 t3 = newTemp(Ity_I64); 1600 t4 = newTemp(Ity_I64); 1601 assign(t0, unop(Iop_32Sto64, 1602 unop(Iop_64to32, 1603 binop(Iop_Shr64, getIReg(ra), mkU8(32))))); 1604 assign(t1, binop(Iop_MullU32, 1605 unop(Iop_64to32, mkexpr(t0)), 1606 unop(Iop_64to32, binop(Iop_Shr64, 1607 getIReg(rb), mkU8(32))))); 1608 assign(t3, binop(Iop_MullU32, 1609 unop(Iop_64to32, binop(Iop_Shr64, 1610 mkexpr(t0), 1611 mkU8(32))), 1612 unop(Iop_64to32, binop(Iop_Shr64, 1613 getIReg(rb), mkU8(32))))); 1614 assign(t2, binop(Iop_Add64, 1615 mkexpr(t1), 1616 binop(Iop_Shl64, 1617 mkexpr(t3), 1618 mkU8(32)))); 1619 assign(t4, binop(Iop_Add64, getIReg(rd), mkexpr(t2))); 1620 MARK_REG_WB(rd, t4); 1621 break; 1622 case 167: /* "mula_hs_ls" */ 1623 t2 = newTemp(Ity_I64); 1624 t4 = newTemp(Ity_I64); 1625 assign(t2, binop(Iop_MullS32, 1626 unop(Iop_64to32, 1627 binop(Iop_Shr64, 1628 getIReg(ra), 1629 mkU8(32))), 1630 unop(Iop_64to32, 1631 getIReg(rb)))); 1632 assign(t4, binop(Iop_Add64, getIReg(rd), mkexpr(t2))); 1633 MARK_REG_WB(rd, t4); 1634 break; 1635 case 168: /* "mula_hs_lu" */ 1636 t0 = newTemp(Ity_I64); 1637 t1 = newTemp(Ity_I64); 1638 t2 = newTemp(Ity_I64); 1639 t3 = newTemp(Ity_I64); 1640 t4 = newTemp(Ity_I64); 1641 assign(t0, unop(Iop_32Sto64, 1642 unop(Iop_64to32, 1643 binop(Iop_Shr64, getIReg(ra), mkU8(32))))); 1644 assign(t1, binop(Iop_MullU32, 1645 unop(Iop_64to32, mkexpr(t0)), 1646 unop(Iop_64to32, getIReg(rb)))); 1647 assign(t3, binop(Iop_MullU32, 1648 unop(Iop_64to32, binop(Iop_Shr64, 1649 mkexpr(t0), 1650 mkU8(32))), 1651 unop(Iop_64to32, getIReg(rb)))); 1652 assign(t2, binop(Iop_Add64, 1653 mkexpr(t1), 1654 binop(Iop_Shl64, 1655 mkexpr(t3), 1656 mkU8(32)))); 1657 assign(t4, binop(Iop_Add64, getIReg(rd), mkexpr(t2))); 1658 MARK_REG_WB(rd, t4); 1659 break; 1660 case 169: /* "mula_hu_hu" */ 1661 use_dirty_helper = 1; 1662 break; 1663 case 170: /* "mula_hu_ls" */ 1664 use_dirty_helper = 1; 1665 break; 1666 case 171: /* "mula_hu_lu" */ 1667 t2 = newTemp(Ity_I64); 1668 assign(t2, binop(Iop_Add64, 1669 binop(Iop_MullU32, 1670 unop(Iop_64to32, 1671 binop(Iop_Shr64, 1672 getIReg(ra), 1673 mkU8(32))), 1674 unop(Iop_64to32, 1675 getIReg(rb))), 1676 getIReg(rd))); 1677 MARK_REG_WB(rd, t2); 1678 break; 1679 case 172: /* "mula_ls_ls" */ 1680 t2 = newTemp(Ity_I64); 1681 assign(t2, binop(Iop_Add64, 1682 getIReg(rd), 1683 binop(Iop_MullS32, 1684 unop(Iop_64to32, getIReg(ra)), 1685 unop(Iop_64to32, getIReg(rb))))); 1686 MARK_REG_WB(rd, t2); 1687 break; 1688 case 173: /* "mula_ls_lu" */ 1689 t0 = newTemp(Ity_I64); 1690 t1 = newTemp(Ity_I64); 1691 t2 = newTemp(Ity_I64); 1692 t3 = newTemp(Ity_I64); 1693 1694 assign(t0, unop(Iop_32Sto64, 1695 unop(Iop_64to32, getIReg(ra)))); 1696 assign(t1, binop(Iop_MullU32, 1697 unop(Iop_64to32, mkexpr(t0)), 1698 unop(Iop_64to32, getIReg(rb)))); 1699 assign(t3, binop(Iop_MullU32, 1700 unop(Iop_64to32, binop(Iop_Shr64, 1701 mkexpr(t0), 1702 mkU8(32))), 1703 unop(Iop_64to32, getIReg(rb)))); 1704 assign(t2, binop(Iop_Add64, 1705 getIReg(rd), 1706 binop(Iop_Add64, 1707 mkexpr(t1), 1708 binop(Iop_Shl64, 1709 mkexpr(t3), 1710 mkU8(32))))); 1711 MARK_REG_WB(rd, t2); 1712 break; 1713 case 174: /* "mula_lu_lu" */ 1714 t2 = newTemp(Ity_I64); 1715 assign(t2, binop(Iop_Add64, 1716 binop(Iop_MullU32, 1717 unop(Iop_64to32, 1718 getIReg(ra)), 1719 unop(Iop_64to32, 1720 getIReg(rb))), 1721 getIReg(rd))); 1722 MARK_REG_WB(rd, t2); 1723 break; 1724 case 175: /* "mulax" */ 1725 t2 = newTemp(Ity_I64); 1726 assign(t2, unop(Iop_32Sto64, 1727 unop(Iop_64to32, 1728 binop(Iop_Add64, 1729 getIReg(rd), 1730 binop(Iop_MullU32, 1731 narrowTo(Ity_I32, getIReg(ra)), 1732 narrowTo(Ity_I32, getIReg(rb))))))); 1733 MARK_REG_WB(rd, t2); 1734 break; 1735 case 176: /* "mulx" */ 1736 t2 = newTemp(Ity_I64); 1737 assign(t2, unop(Iop_32Sto64, 1738 unop(Iop_64to32, 1739 binop(Iop_MullU32, 1740 narrowTo(Ity_I32, getIReg(ra)), 1741 narrowTo(Ity_I32, getIReg(rb)))))); 1742 MARK_REG_WB(rd, t2); 1743 break; 1744 case 177: /* "mz" */ 1745 t2 = newTemp(Ity_I64); 1746 assign(t2, binop(Iop_And64, 1747 unop(Iop_1Sto64, binop(Iop_CmpEQ64, 1748 getIReg(ra), 1749 mkU64(0))), 1750 getIReg(rb))); 1751 MARK_REG_WB(rd, t2); 1752 break; 1753 case 178: /* "nap" */ 1754 break; 1755 case 179: /* "nop" */ 1756 break; 1757 case 180: /* "nor" */ 1758 t2 = newTemp(Ity_I64); 1759 assign(t2, unop(Iop_Not64, 1760 binop(Iop_Or64, 1761 getIReg(ra), 1762 getIReg(rb)))); 1763 MARK_REG_WB(rd, t2); 1764 break; 1765 case 181: /* "or" */ 1766 t2 = newTemp(Ity_I64); 1767 assign(t2, binop(Iop_Or64, 1768 getIReg(ra), 1769 getIReg(rb))); 1770 MARK_REG_WB(rd, t2); 1771 break; 1772 case 182: /* "ori" */ 1773 t2 = newTemp(Ity_I64); 1774 assign(t2, binop(Iop_Or64, 1775 getIReg(ra), 1776 mkU64(imm))); 1777 MARK_REG_WB(rd, t2); 1778 break; 1779 case 183: 1780 /* Fall-through */ 1781 case 184: 1782 /* Fall-through */ 1783 case 185: 1784 use_dirty_helper = 1; 1785 break; 1786 case 186: /* "rotl" */ 1787 t0 = newTemp(Ity_I64); 1788 t1 = newTemp(Ity_I64); 1789 t2 = newTemp(Ity_I64); 1790 assign(t0, binop(Iop_Shl64, 1791 getIReg(ra), 1792 unop(Iop_64to8, getIReg(rb)))); 1793 assign(t1, binop(Iop_Shr64, 1794 getIReg(ra), 1795 unop(Iop_64to8, binop(Iop_Sub64, 1796 mkU64(0), 1797 getIReg(rb))))); 1798 assign(t2, binop(Iop_Or64, mkexpr(t0), mkexpr(t1))); 1799 MARK_REG_WB(rd, t2); 1800 break; 1801 case 187: /* "rotli" */ 1802 t0 = newTemp(Ity_I64); 1803 t1 = newTemp(Ity_I64); 1804 t2 = newTemp(Ity_I64); 1805 assign(t0, binop(Iop_Shl64, 1806 getIReg(ra), 1807 mkU8(imm))); 1808 assign(t1, binop(Iop_Shr64, 1809 getIReg(ra), 1810 mkU8(0 - imm))); 1811 assign(t2, binop(Iop_Or64, mkexpr(t0), mkexpr(t1))); 1812 MARK_REG_WB(rd, t2); 1813 break; 1814 case 188: /* "shl" */ 1815 t2 = newTemp(Ity_I64); 1816 assign(t2, binop(Iop_Shl64, 1817 getIReg(ra), 1818 unop(Iop_64to8, getIReg(rb)))); 1819 MARK_REG_WB(rd, t2); 1820 1821 break; 1822 case 189: /* "shl16insli" */ 1823 t2 = newTemp(Ity_I64); 1824 t3 = newTemp(Ity_I64); 1825 assign(t3, binop(Iop_Shl64, getIReg(ra), mkU8(16))); 1826 imm &= 0xFFFFULL; 1827 if (imm & 0x8000) 1828 { 1829 t4 = newTemp(Ity_I64); 1830 assign(t4, mkU64(imm)); 1831 assign(t2, binop(Iop_Add64, mkexpr(t3), mkexpr(t4))); 1832 } 1833 else 1834 { 1835 assign(t2, binop(Iop_Add64, mkexpr(t3), mkU64(imm))); 1836 } 1837 MARK_REG_WB(rd, t2); 1838 1839 break; 1840 case 190: /* "shl1add" */ 1841 t2 = newTemp(Ity_I64); 1842 assign(t2, binop(Iop_Add64, 1843 binop(Iop_Shl64, 1844 getIReg(ra), mkU8(1)), 1845 getIReg(rb))); 1846 1847 MARK_REG_WB(rd, t2); 1848 break; 1849 case 191: /* "shl1addx" */ 1850 t2 = newTemp(Ity_I64); 1851 assign(t2, 1852 unop(Iop_32Sto64, 1853 unop(Iop_64to32, 1854 binop(Iop_Add64, 1855 binop(Iop_Shl64, 1856 getIReg(ra), mkU8(1)), 1857 getIReg(rb))))); 1858 MARK_REG_WB(rd, t2); 1859 break; 1860 case 192: /* "shl2add" */ 1861 t2 = newTemp(Ity_I64); 1862 assign(t2, binop(Iop_Add64, 1863 binop(Iop_Shl64, 1864 getIReg(ra), mkU8(2)), 1865 getIReg(rb))); 1866 1867 MARK_REG_WB(rd, t2); 1868 1869 break; 1870 case 193: /* "shl2addx" */ 1871 t2 = newTemp(Ity_I64); 1872 assign(t2, 1873 unop(Iop_32Sto64, 1874 unop(Iop_64to32, 1875 binop(Iop_Add64, 1876 binop(Iop_Shl64, 1877 getIReg(ra), mkU8(2)), 1878 getIReg(rb))))); 1879 MARK_REG_WB(rd, t2); 1880 1881 break; 1882 case 194: /* "shl3add" */ 1883 t2 = newTemp(Ity_I64); 1884 assign(t2, binop(Iop_Add64, 1885 binop(Iop_Shl64, 1886 getIReg(ra), mkU8(3)), 1887 getIReg(rb))); 1888 1889 MARK_REG_WB(rd, t2); 1890 break; 1891 case 195: /* "shl3addx" */ 1892 t2 = newTemp(Ity_I64); 1893 assign(t2, 1894 unop(Iop_32Sto64, 1895 unop(Iop_64to32, 1896 binop(Iop_Add64, 1897 binop(Iop_Shl64, 1898 getIReg(ra), mkU8(3)), 1899 getIReg(rb))))); 1900 MARK_REG_WB(rd, t2); 1901 break; 1902 case 196: /* "shli" */ 1903 t2 = newTemp(Ity_I64); 1904 assign(t2, binop(Iop_Shl64, getIReg(ra), 1905 mkU8(imm))); 1906 MARK_REG_WB(rd, t2); 1907 break; 1908 case 197: /* "shlx" */ 1909 t2 = newTemp(Ity_I64); 1910 assign(t2, unop(Iop_32Sto64, 1911 binop(Iop_Shl32, 1912 narrowTo(Ity_I32, getIReg(ra)), 1913 narrowTo(Ity_I8, getIReg(rb))))); 1914 MARK_REG_WB(rd, t2); 1915 break; 1916 case 198: /* "shlxi" */ 1917 t2 = newTemp(Ity_I64); 1918 assign(t2, signExtend(binop(Iop_Shl32, 1919 narrowTo(Ity_I32, getIReg(ra)), 1920 mkU8(imm)), 1921 32)); 1922 MARK_REG_WB(rd, t2); 1923 break; 1924 case 199: /* "shrs" */ 1925 t2 = newTemp(Ity_I64); 1926 assign(t2, binop(Iop_Sar64, getIReg(ra), 1927 narrowTo(Ity_I8, getIReg(rb)))); 1928 1929 MARK_REG_WB(rd, t2); 1930 break; 1931 case 200: /* "shrsi" */ 1932 t2 = newTemp(Ity_I64); 1933 assign(t2, binop(Iop_Sar64, getIReg(ra), 1934 mkU8(imm))); 1935 1936 MARK_REG_WB(rd, t2); 1937 break; 1938 case 201: /* "shru" */ 1939 t2 = newTemp(Ity_I64); 1940 assign(t2, binop(Iop_Shr64, 1941 getIReg(ra), 1942 narrowTo(Ity_I8, (getIReg(rb))))); 1943 1944 MARK_REG_WB(rd, t2); 1945 break; 1946 case 202: /* "shrui" */ 1947 t2 = newTemp(Ity_I64); 1948 assign(t2, binop(Iop_Shr64, getIReg(ra), mkU8(imm))); 1949 1950 MARK_REG_WB(rd, t2); 1951 break; 1952 case 203: /* "shrux" */ 1953 t2 = newTemp(Ity_I64); 1954 assign(t2, unop(Iop_32Sto64, 1955 (binop(Iop_Shr32, 1956 narrowTo(Ity_I32, getIReg(ra)), 1957 narrowTo(Ity_I8, getIReg(rb)))))); 1958 MARK_REG_WB(rd, t2); 1959 break; 1960 case 204: /* "shruxi" */ 1961 t2 = newTemp(Ity_I64); 1962 assign(t2, unop(Iop_32Sto64, 1963 (binop(Iop_Shr32, 1964 narrowTo(Ity_I32, getIReg(ra)), 1965 mkU8(imm))))); 1966 MARK_REG_WB(rd, t2); 1967 break; 1968 case 205: /* "shufflebytes" */ 1969 use_dirty_helper = 1; 1970 break; 1971 case 206: /* "st" */ 1972 store(getIReg(ra), getIReg(rb)); 1973 break; 1974 case 207: /* "st1" */ 1975 store(getIReg(ra), narrowTo(Ity_I8, getIReg(rb))); 1976 break; 1977 case 208: /* "st1_add" */ 1978 t2 = newTemp(Ity_I64); 1979 store(getIReg(opd[0]), narrowTo(Ity_I8, getIReg(opd[1]))); 1980 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2]))); 1981 MARK_REG_WB(opd[0], t2); 1982 break; 1983 case 209: /* "st2" */ 1984 store(getIReg(ra), narrowTo(Ity_I16, getIReg(rb))); 1985 break; 1986 case 210: /* "st2_add" */ 1987 t2 = newTemp(Ity_I64); 1988 store(getIReg(opd[0]), narrowTo(Ity_I16, getIReg(opd[1]))); 1989 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2]))); 1990 MARK_REG_WB(opd[0], t2); 1991 break; 1992 case 211: /* "st4" */ 1993 store(getIReg(ra), narrowTo(Ity_I32, getIReg(rb))); 1994 break; 1995 case 212: /* "st4_add" */ 1996 t2 = newTemp(Ity_I64); 1997 store(getIReg(opd[0]), narrowTo(Ity_I32, getIReg(opd[1]))); 1998 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2]))); 1999 MARK_REG_WB(opd[0], t2); 2000 break; 2001 case 213: /* "st_add" */ 2002 t2 = newTemp(Ity_I64); 2003 store(getIReg(opd[0]), getIReg(opd[1])); 2004 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2]))); 2005 MARK_REG_WB(opd[0], t2); 2006 break; 2007 case 214: /* "stnt" */ 2008 store(getIReg(ra), getIReg(rb)); 2009 break; 2010 case 215: /* "stnt1" */ 2011 store(getIReg(ra), narrowTo(Ity_I8, getIReg(rb))); 2012 break; 2013 case 216: /* "stnt1_add" */ 2014 t2 = newTemp(Ity_I64); 2015 store(getIReg(opd[0]), narrowTo(Ity_I8, getIReg(opd[1]))); 2016 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2]))); 2017 MARK_REG_WB(opd[0], t2); 2018 break; 2019 case 217: /* "stnt2" */ 2020 store(getIReg(ra), narrowTo(Ity_I16, getIReg(rb))); 2021 break; 2022 case 218: /* "stnt2_add" */ 2023 t2 = newTemp(Ity_I64); 2024 store(getIReg(opd[0]), narrowTo(Ity_I16, getIReg(opd[1]))); 2025 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2]))); 2026 MARK_REG_WB(opd[0], t2); 2027 break; 2028 case 219: /* "stnt4" */ 2029 store(getIReg(ra), narrowTo(Ity_I32, getIReg(rb))); 2030 break; 2031 case 220: /* "stnt4_add" */ 2032 t2 = newTemp(Ity_I64); 2033 store(getIReg(opd[0]), narrowTo(Ity_I32, getIReg(opd[1]))); 2034 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2]))); 2035 MARK_REG_WB(opd[0], t2); 2036 break; 2037 case 221: /* "stnt_add" */ 2038 t2 = newTemp(Ity_I64); 2039 store(getIReg(opd[0]), getIReg(opd[1])); 2040 assign(t2, binop(Iop_Add64, getIReg(opd[0]), mkU64(opd[2]))); 2041 MARK_REG_WB(opd[0], t2); 2042 break; 2043 case 222: /* "sub" */ 2044 t2 = newTemp(Ity_I64); 2045 assign(t2, binop(Iop_Sub64, getIReg(ra), 2046 getIReg(rb))); 2047 MARK_REG_WB(rd, t2); 2048 break; 2049 case 223: /* "subx" */ 2050 t2 = newTemp(Ity_I64); 2051 assign(t2, unop(Iop_32Sto64, 2052 binop(Iop_Sub32, 2053 narrowTo(Ity_I32, getIReg(ra)), 2054 narrowTo(Ity_I32, getIReg(rb))))); 2055 MARK_REG_WB(rd, t2); 2056 break; 2057 case 224: /* "subxsc" */ 2058 use_dirty_helper = 1; 2059 break; 2060 case 225: /* "swint0" */ 2061 vex_printf( "\n *** swint0 ***\n"); 2062 vassert(0); 2063 break; 2064 case 226: /* "swint1" */ 2065 next = mkU64(guest_PC_curr_instr + 8); 2066 jumpkind = Ijk_Sys_syscall; 2067 break; 2068 case 227: /* "swint2" */ 2069 vex_printf( "\n *** swint2 ***\n"); 2070 vassert(0); 2071 break; 2072 case 228: /* "swint3" */ 2073 vex_printf( "\n *** swint3 ***\n"); 2074 vassert(0); 2075 break; 2076 case 229: 2077 /* Fall-through */ 2078 case 230: 2079 /* Fall-through */ 2080 case 231: 2081 /* Fall-through */ 2082 case 232: 2083 /* Fall-through */ 2084 case 233: 2085 /* Fall-through */ 2086 case 234: 2087 /* Fall-through */ 2088 case 235: 2089 /* Fall-through */ 2090 case 236: 2091 /* Fall-through */ 2092 case 237: 2093 use_dirty_helper = 1; 2094 break; 2095 case 238: /* "v1cmpeq" */ 2096 t2 = newTemp(Ity_I64); 2097 assign(t2, binop(Iop_CmpEQ8x8, getIReg(ra), 2098 getIReg(rb))); 2099 MARK_REG_WB(rd, t2); 2100 break; 2101 case 239: /* "v1cmpeqi" */ 2102 t2 = newTemp(Ity_I64); 2103 assign(t2, binop(Iop_CmpEQ8x8, getIReg(ra), 2104 mkU64(imm))); 2105 2106 MARK_REG_WB(rd, t2); 2107 break; 2108 case 240: 2109 /* Fall-through */ 2110 case 241: 2111 /* Fall-through */ 2112 case 242: 2113 /* Fall-through */ 2114 case 243: 2115 /* Fall-through */ 2116 case 244: 2117 /* Fall-through */ 2118 case 245: 2119 use_dirty_helper = 1; 2120 break; 2121 case 246: /* "v1cmpne" */ 2122 t2 = newTemp(Ity_I64); 2123 assign(t2, binop(Iop_CmpEQ8x8, 2124 binop(Iop_CmpEQ8x8, getIReg(ra), 2125 getIReg(rb)), 2126 getIReg(63))); 2127 MARK_REG_WB(rd, t2); 2128 break; 2129 case 247: 2130 /* Fall-through */ 2131 case 248: 2132 /* Fall-through */ 2133 case 249: 2134 /* Fall-through */ 2135 case 250: 2136 /* Fall-through */ 2137 case 251: 2138 /* Fall-through */ 2139 case 252: 2140 /* Fall-through */ 2141 case 253: 2142 /* Fall-through */ 2143 case 254: 2144 /* Fall-through */ 2145 case 255: 2146 /* Fall-through */ 2147 case 256: 2148 /* Fall-through */ 2149 case 257: 2150 /* Fall-through */ 2151 case 258: 2152 /* Fall-through */ 2153 case 259: 2154 /* Fall-through */ 2155 case 260: 2156 /* Fall-through */ 2157 case 261: 2158 /* Fall-through */ 2159 case 262: 2160 /* Fall-through */ 2161 case 263: 2162 /* Fall-through */ 2163 case 264: 2164 /* Fall-through */ 2165 case 265: 2166 /* Fall-through */ 2167 case 266: 2168 /* Fall-through */ 2169 case 267: 2170 /* Fall-through */ 2171 case 268: 2172 /* Fall-through */ 2173 case 269: 2174 /* Fall-through */ 2175 case 270: 2176 /* Fall-through */ 2177 case 271: 2178 /* Fall-through */ 2179 case 272: 2180 /* Fall-through */ 2181 case 273: 2182 /* Fall-through */ 2183 case 274: 2184 use_dirty_helper = 1; 2185 break; 2186 case 275: /* "v1shrui" */ 2187 t2 = newTemp(Ity_I64); 2188 assign(t2, binop(Iop_Shr8x8, 2189 getIReg(ra), 2190 mkU64(imm))); 2191 MARK_REG_WB(rd, t2); 2192 break; 2193 case 276: 2194 /* Fall-through */ 2195 case 277: 2196 /* Fall-through */ 2197 case 278: 2198 /* Fall-through */ 2199 case 279: 2200 /* Fall-through */ 2201 case 280: 2202 /* Fall-through */ 2203 case 281: 2204 /* Fall-through */ 2205 case 282: 2206 /* Fall-through */ 2207 case 283: 2208 /* Fall-through */ 2209 case 284: 2210 /* Fall-through */ 2211 case 285: 2212 /* Fall-through */ 2213 case 286: 2214 /* Fall-through */ 2215 case 287: 2216 /* Fall-through */ 2217 case 288: 2218 /* Fall-through */ 2219 case 289: 2220 /* Fall-through */ 2221 case 290: 2222 /* Fall-through */ 2223 case 291: 2224 /* Fall-through */ 2225 case 292: 2226 /* Fall-through */ 2227 case 293: 2228 /* Fall-through */ 2229 case 294: 2230 /* Fall-through */ 2231 case 295: 2232 /* Fall-through */ 2233 case 296: 2234 /* Fall-through */ 2235 case 297: 2236 /* Fall-through */ 2237 case 298: 2238 /* Fall-through */ 2239 case 299: 2240 /* Fall-through */ 2241 case 300: 2242 /* Fall-through */ 2243 case 301: 2244 /* Fall-through */ 2245 case 302: 2246 /* Fall-through */ 2247 case 303: 2248 /* Fall-through */ 2249 case 304: 2250 /* Fall-through */ 2251 case 305: 2252 /* Fall-through */ 2253 case 306: 2254 /* Fall-through */ 2255 case 307: 2256 /* Fall-through */ 2257 case 308: 2258 /* Fall-through */ 2259 case 309: 2260 /* Fall-through */ 2261 case 310: 2262 /* Fall-through */ 2263 case 311: 2264 /* Fall-through */ 2265 case 312: 2266 /* Fall-through */ 2267 case 313: 2268 /* Fall-through */ 2269 case 314: 2270 /* Fall-through */ 2271 case 315: 2272 /* Fall-through */ 2273 case 316: 2274 /* Fall-through */ 2275 case 317: 2276 /* Fall-through */ 2277 case 318: 2278 /* Fall-through */ 2279 case 319: 2280 /* Fall-through */ 2281 case 320: 2282 /* Fall-through */ 2283 case 321: 2284 /* Fall-through */ 2285 case 322: 2286 /* Fall-through */ 2287 case 323: 2288 use_dirty_helper = 1; 2289 break; 2290 case 324: /* "v4int_l" */ 2291 t2 = newTemp(Ity_I64); 2292 assign(t2, binop(Iop_Or64, 2293 binop(Iop_Shl64, 2294 getIReg(ra), 2295 mkU8(32)), 2296 binop(Iop_And64, 2297 getIReg(rb), 2298 mkU64(0xFFFFFFFF)))); 2299 MARK_REG_WB(rd, t2); 2300 break; 2301 case 325: 2302 /* Fall-through */ 2303 case 326: 2304 /* Fall-through */ 2305 case 327: 2306 /* Fall-through */ 2307 case 328: 2308 /* Fall-through */ 2309 case 329: 2310 /* Fall-through */ 2311 case 330: 2312 /* Fall-through */ 2313 case 331: 2314 use_dirty_helper = 1; 2315 break; 2316 case 332: /* "wh64" */ /* Ignore store hint */ 2317 break; 2318 case 333: /* "xor" */ 2319 t2 = newTemp(Ity_I64); 2320 assign(t2, binop(Iop_Xor64, 2321 getIReg(ra), 2322 getIReg(rb))); 2323 MARK_REG_WB(rd, t2); 2324 break; 2325 case 334: /* "xori" */ 2326 t2 = newTemp(Ity_I64); 2327 assign(t2, binop(Iop_Xor64, 2328 getIReg(ra), 2329 mkU64(imm))); 2330 MARK_REG_WB(rd, t2); 2331 break; 2332 case 335: /* "(null)" */ /* ignore */ 2333 break; 2334 default: 2335 2336 decode_failure: 2337 vex_printf("error: %d\n", (Int)opcode); 2338 2339 /* All decode failures end up here. */ 2340 vex_printf("vex tilegx->IR: unhandled instruction: " 2341 "%s 0x%llx 0x%llx 0x%llx 0x%llx\n", 2342 decoded[n].opcode->name, 2343 opd[0], opd[1], opd[2], opd[3]); 2344 2345 /* Tell the dispatcher that this insn cannot be decoded, and so has 2346 not been executed, and (is currently) the next to be executed. */ 2347 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc), 2348 mkU64(guest_PC_curr_instr))); 2349 dres.whatNext = Dis_StopHere; 2350 dres.len = 0; 2351 return dres; 2352 } 2353 2354 /* Hook the dirty helper for rare instruxtions. */ 2355 if (use_dirty_helper) 2356 { 2357 Int i = 0; 2358 Int wbc = 0; 2359 IRExpr *opc_oprand[5]; 2360 2361 opc_oprand[0] = mkU64(opcode); 2362 2363 /* Get the operand registers or immediate. */ 2364 for (i = 0 ; i < 4; i++) 2365 { 2366 opc_oprand[i + 1] = NULL; 2367 2368 if (opd_dst_map & (1ULL << i)) 2369 { 2370 tb[wbc] = newTemp(Ity_I64); 2371 wbc++; 2372 opc_oprand[i + 1] = getIReg(opd[i]); 2373 } 2374 else if (opd_imm_map & (1ULL << i)) 2375 opc_oprand[i + 1] = mkU64(opd[i]); 2376 else if (opd_src_map & (1ULL << i)) 2377 opc_oprand[i + 1] = getIReg(opd[i]); 2378 else 2379 opc_oprand[i + 1] = mkU64(0xfeee); 2380 } 2381 2382 IRExpr **args = mkIRExprVec_5(opc_oprand[0], opc_oprand[1], 2383 opc_oprand[2], opc_oprand[3], 2384 opc_oprand[4]); 2385 IRDirty *genIR = NULL; 2386 2387 switch (wbc) { 2388 case 0: 2389 { 2390 genIR = unsafeIRDirty_0_N (0/*regparms*/, 2391 "tilegx_dirtyhelper_gen", 2392 &tilegx_dirtyhelper_gen, 2393 args); 2394 } 2395 break; 2396 case 1: 2397 { 2398 genIR = unsafeIRDirty_1_N (tb[0], 2399 0/*regparms*/, 2400 "tilegx_dirtyhelper_gen", 2401 &tilegx_dirtyhelper_gen, 2402 args); 2403 } 2404 break; 2405 default: 2406 vex_printf("opc = %d\n", (Int)opcode); 2407 vassert(0); 2408 } 2409 2410 stmt(IRStmt_Dirty(genIR)); 2411 2412 wbc = 0; 2413 for (i = 0 ; i < 4; i++) 2414 { 2415 if(opd_dst_map & (1 << i)) 2416 { 2417 /* Queue the writeback destination registers. */ 2418 MARK_REG_WB(opd[i], tb[wbc]); 2419 wbc++; 2420 } 2421 } 2422 } 2423 } 2424 2425 /* Write back registers for a bundle. Note have to get all source registers 2426 for all instructions in a bundle before write the destinations b/c this is 2427 an VLIW processor. */ 2428 for (n = 0; n < rd_wb_index; n++) 2429 putIReg(rd_wb_reg[n], mkexpr(rd_wb_temp[n])); 2430 2431 /* Add branch IR if apply finally, only upto one branch per bundle. */ 2432 if (bstmt) { 2433 stmt(bstmt); 2434 dres.whatNext = Dis_StopHere; 2435 2436 dres.jk_StopHere = jumpkind; 2437 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc), 2438 mkU64(guest_PC_curr_instr + 8))); 2439 } else if (next) { 2440 if (steering_pc != -1ULL) { 2441 if (resteerOkFn(callback_opaque, steering_pc)) { 2442 dres.whatNext = Dis_ResteerU; 2443 dres.continueAt = steering_pc; 2444 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc), 2445 mkU64(steering_pc))); 2446 } else { 2447 dres.whatNext = Dis_StopHere; 2448 dres.jk_StopHere = jumpkind; 2449 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc), 2450 mkU64(steering_pc))); 2451 } 2452 } else { 2453 dres.whatNext = Dis_StopHere; 2454 dres.jk_StopHere = jumpkind; 2455 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc), next)); 2456 } 2457 } else { 2458 /* As dafault dres.whatNext = Dis_Continue. */ 2459 stmt(IRStmt_Put(offsetof(VexGuestTILEGXState, guest_pc), 2460 mkU64(guest_PC_curr_instr + 8))); 2461 } 2462 2463 irsb->jumpkind = Ijk_Boring; 2464 irsb->next = NULL; 2465 dres.len = 8; 2466 2467 decode_success: 2468 2469 return dres; 2470} 2471 2472/*------------------------------------------------------------*/ 2473/*--- Top-level fn ---*/ 2474/*------------------------------------------------------------*/ 2475 2476/* Disassemble a single instruction into IR. The instruction 2477 is located in host memory at &guest_code[delta]. */ 2478 2479DisResult 2480disInstr_TILEGX ( IRSB* irsb_IN, 2481 Bool (*resteerOkFn) (void *, Addr), 2482 Bool resteerCisOk, 2483 void* callback_opaque, 2484 const UChar* guest_code_IN, 2485 Long delta, 2486 Addr guest_IP, 2487 VexArch guest_arch, 2488 const VexArchInfo* archinfo, 2489 const VexAbiInfo* abiinfo, 2490 VexEndness host_endness_IN, 2491 Bool sigill_diag_IN ) 2492{ 2493 DisResult dres; 2494 2495 /* Set globals (see top of this file) */ 2496 vassert(guest_arch == VexArchTILEGX); 2497 2498 guest_code = (UChar*)(Addr)guest_code_IN; 2499 irsb = irsb_IN; 2500 host_endness = host_endness_IN; 2501 guest_PC_curr_instr = (Addr64) guest_IP; 2502 guest_PC_bbstart = (Addr64) toUInt(guest_IP - delta); 2503 2504 dres = disInstr_TILEGX_WRK(resteerOkFn, resteerCisOk, 2505 callback_opaque, 2506 delta, archinfo, abiinfo, sigill_diag_IN); 2507 2508 return dres; 2509} 2510 2511/*--------------------------------------------------------------------*/ 2512/*--- end guest_tilegx_toIR.c ---*/ 2513/*--------------------------------------------------------------------*/ 2514