libvex_ir.h revision eb0bae136f4eeaaf29761dddb148b118fb824632
1 2/*---------------------------------------------------------------*/ 3/*--- begin libvex_ir.h ---*/ 4/*---------------------------------------------------------------*/ 5 6/* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2004-2013 OpenWorks LLP 11 info@open-works.net 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 26 02110-1301, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29 30 Neither the names of the U.S. Department of Energy nor the 31 University of California nor the names of its contributors may be 32 used to endorse or promote products derived from this software 33 without prior written permission. 34*/ 35 36#ifndef __LIBVEX_IR_H 37#define __LIBVEX_IR_H 38 39#include "libvex_basictypes.h" 40 41 42/*---------------------------------------------------------------*/ 43/*--- High-level IR description ---*/ 44/*---------------------------------------------------------------*/ 45 46/* Vex IR is an architecture-neutral intermediate representation. 47 Unlike some IRs in systems similar to Vex, it is not like assembly 48 language (ie. a list of instructions). Rather, it is more like the 49 IR that might be used in a compiler. 50 51 Code blocks 52 ~~~~~~~~~~~ 53 The code is broken into small code blocks ("superblocks", type: 54 'IRSB'). Each code block typically represents from 1 to perhaps 50 55 instructions. IRSBs are single-entry, multiple-exit code blocks. 56 Each IRSB contains three things: 57 - a type environment, which indicates the type of each temporary 58 value present in the IRSB 59 - a list of statements, which represent code 60 - a jump that exits from the end the IRSB 61 Because the blocks are multiple-exit, there can be additional 62 conditional exit statements that cause control to leave the IRSB 63 before the final exit. Also because of this, IRSBs can cover 64 multiple non-consecutive sequences of code (up to 3). These are 65 recorded in the type VexGuestExtents (see libvex.h). 66 67 Statements and expressions 68 ~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 Statements (type 'IRStmt') represent operations with side-effects, 70 eg. guest register writes, stores, and assignments to temporaries. 71 Expressions (type 'IRExpr') represent operations without 72 side-effects, eg. arithmetic operations, loads, constants. 73 Expressions can contain sub-expressions, forming expression trees, 74 eg. (3 + (4 * load(addr1)). 75 76 Storage of guest state 77 ~~~~~~~~~~~~~~~~~~~~~~ 78 The "guest state" contains the guest registers of the guest machine 79 (ie. the machine that we are simulating). It is stored by default 80 in a block of memory supplied by the user of the VEX library, 81 generally referred to as the guest state (area). To operate on 82 these registers, one must first read ("Get") them from the guest 83 state into a temporary value. Afterwards, one can write ("Put") 84 them back into the guest state. 85 86 Get and Put are characterised by a byte offset into the guest 87 state, a small integer which effectively gives the identity of the 88 referenced guest register, and a type, which indicates the size of 89 the value to be transferred. 90 91 The basic "Get" and "Put" operations are sufficient to model normal 92 fixed registers on the guest. Selected areas of the guest state 93 can be treated as a circular array of registers (type: 94 'IRRegArray'), which can be indexed at run-time. This is done with 95 the "GetI" and "PutI" primitives. This is necessary to describe 96 rotating register files, for example the x87 FPU stack, SPARC 97 register windows, and the Itanium register files. 98 99 Examples, and flattened vs. unflattened code 100 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 101 For example, consider this x86 instruction: 102 103 addl %eax, %ebx 104 105 One Vex IR translation for this code would be this: 106 107 ------ IMark(0x24F275, 7, 0) ------ 108 t3 = GET:I32(0) # get %eax, a 32-bit integer 109 t2 = GET:I32(12) # get %ebx, a 32-bit integer 110 t1 = Add32(t3,t2) # addl 111 PUT(0) = t1 # put %eax 112 113 (For simplicity, this ignores the effects on the condition codes, and 114 the update of the instruction pointer.) 115 116 The "IMark" is an IR statement that doesn't represent actual code. 117 Instead it indicates the address and length of the original 118 instruction. The numbers 0 and 12 are offsets into the guest state 119 for %eax and %ebx. The full list of offsets for an architecture 120 <ARCH> can be found in the type VexGuest<ARCH>State in the file 121 VEX/pub/libvex_guest_<ARCH>.h. 122 123 The five statements in this example are: 124 - the IMark 125 - three assignments to temporaries 126 - one register write (put) 127 128 The six expressions in this example are: 129 - two register reads (gets) 130 - one arithmetic (add) operation 131 - three temporaries (two nested within the Add32, one in the PUT) 132 133 The above IR is "flattened", ie. all sub-expressions are "atoms", 134 either constants or temporaries. An equivalent, unflattened version 135 would be: 136 137 PUT(0) = Add32(GET:I32(0), GET:I32(12)) 138 139 IR is guaranteed to be flattened at instrumentation-time. This makes 140 instrumentation easier. Equivalent flattened and unflattened IR 141 typically results in the same generated code. 142 143 Another example, this one showing loads and stores: 144 145 addl %edx,4(%eax) 146 147 This becomes (again ignoring condition code and instruction pointer 148 updates): 149 150 ------ IMark(0x4000ABA, 3, 0) ------ 151 t3 = Add32(GET:I32(0),0x4:I32) 152 t2 = LDle:I32(t3) 153 t1 = GET:I32(8) 154 t0 = Add32(t2,t1) 155 STle(t3) = t0 156 157 The "le" in "LDle" and "STle" is short for "little-endian". 158 159 No need for deallocations 160 ~~~~~~~~~~~~~~~~~~~~~~~~~ 161 Although there are allocation functions for various data structures 162 in this file, there are no deallocation functions. This is because 163 Vex uses a memory allocation scheme that automatically reclaims the 164 memory used by allocated structures once translation is completed. 165 This makes things easier for tools that instruments/transforms code 166 blocks. 167 168 SSAness and typing 169 ~~~~~~~~~~~~~~~~~~ 170 The IR is fully typed. For every IRSB (IR block) it is possible to 171 say unambiguously whether or not it is correctly typed. 172 Incorrectly typed IR has no meaning and the VEX will refuse to 173 process it. At various points during processing VEX typechecks the 174 IR and aborts if any violations are found. This seems overkill but 175 makes it a great deal easier to build a reliable JIT. 176 177 IR also has the SSA property. SSA stands for Static Single 178 Assignment, and what it means is that each IR temporary may be 179 assigned to only once. This idea became widely used in compiler 180 construction in the mid to late 90s. It makes many IR-level 181 transformations/code improvements easier, simpler and faster. 182 Whenever it typechecks an IR block, VEX also checks the SSA 183 property holds, and will abort if not so. So SSAness is 184 mechanically and rigidly enforced. 185*/ 186 187/*---------------------------------------------------------------*/ 188/*--- Type definitions for the IR ---*/ 189/*---------------------------------------------------------------*/ 190 191/* General comments about naming schemes: 192 193 All publically visible functions contain the name of the primary 194 type on which they operate (IRFoo, IRBar, etc). Hence you should 195 be able to identify these functions by grepping for "IR[A-Z]". 196 197 For some type 'IRFoo': 198 199 - ppIRFoo is the printing method for IRFoo, printing it to the 200 output channel specified in the LibVEX_Initialise call. 201 202 - eqIRFoo is a structural equality predicate for IRFoos. 203 204 - deepCopyIRFoo is a deep copy constructor for IRFoos. 205 It recursively traverses the entire argument tree and 206 produces a complete new tree. All types have a deep copy 207 constructor. 208 209 - shallowCopyIRFoo is the shallow copy constructor for IRFoos. 210 It creates a new top-level copy of the supplied object, 211 but does not copy any sub-objects. Only some types have a 212 shallow copy constructor. 213*/ 214 215/* ------------------ Types ------------------ */ 216 217/* A type indicates the size of a value, and whether it's an integer, a 218 float, or a vector (SIMD) value. */ 219typedef 220 enum { 221 Ity_INVALID=0x1100, 222 Ity_I1, 223 Ity_I8, 224 Ity_I16, 225 Ity_I32, 226 Ity_I64, 227 Ity_I128, /* 128-bit scalar */ 228 Ity_F32, /* IEEE 754 float */ 229 Ity_F64, /* IEEE 754 double */ 230 Ity_D32, /* 32-bit Decimal floating point */ 231 Ity_D64, /* 64-bit Decimal floating point */ 232 Ity_D128, /* 128-bit Decimal floating point */ 233 Ity_F128, /* 128-bit floating point; implementation defined */ 234 Ity_V128, /* 128-bit SIMD */ 235 Ity_V256 /* 256-bit SIMD */ 236 } 237 IRType; 238 239/* Pretty-print an IRType */ 240extern void ppIRType ( IRType ); 241 242/* Get the size (in bytes) of an IRType */ 243extern Int sizeofIRType ( IRType ); 244 245/* Translate 1/2/4/8 into Ity_I{8,16,32,64} respectively. Asserts on 246 any other input. */ 247extern IRType integerIRTypeOfSize ( Int szB ); 248 249 250/* ------------------ Endianness ------------------ */ 251 252/* IREndness is used in load IRExprs and store IRStmts. */ 253typedef 254 enum { 255 Iend_LE=0x1200, /* little endian */ 256 Iend_BE /* big endian */ 257 } 258 IREndness; 259 260 261/* ------------------ Constants ------------------ */ 262 263/* IRConsts are used within 'Const' and 'Exit' IRExprs. */ 264 265/* The various kinds of constant. */ 266typedef 267 enum { 268 Ico_U1=0x1300, 269 Ico_U8, 270 Ico_U16, 271 Ico_U32, 272 Ico_U64, 273 Ico_F32, /* 32-bit IEEE754 floating */ 274 Ico_F32i, /* 32-bit unsigned int to be interpreted literally 275 as a IEEE754 single value. */ 276 Ico_F64, /* 64-bit IEEE754 floating */ 277 Ico_F64i, /* 64-bit unsigned int to be interpreted literally 278 as a IEEE754 double value. */ 279 Ico_V128, /* 128-bit restricted vector constant, with 1 bit 280 (repeated 8 times) for each of the 16 x 1-byte lanes */ 281 Ico_V256 /* 256-bit restricted vector constant, with 1 bit 282 (repeated 8 times) for each of the 32 x 1-byte lanes */ 283 } 284 IRConstTag; 285 286/* A constant. Stored as a tagged union. 'tag' indicates what kind of 287 constant this is. 'Ico' is the union that holds the fields. If an 288 IRConst 'c' has c.tag equal to Ico_U32, then it's a 32-bit constant, 289 and its value can be accessed with 'c.Ico.U32'. */ 290typedef 291 struct _IRConst { 292 IRConstTag tag; 293 union { 294 Bool U1; 295 UChar U8; 296 UShort U16; 297 UInt U32; 298 ULong U64; 299 Float F32; 300 UInt F32i; 301 Double F64; 302 ULong F64i; 303 UShort V128; /* 16-bit value; see Ico_V128 comment above */ 304 UInt V256; /* 32-bit value; see Ico_V256 comment above */ 305 } Ico; 306 } 307 IRConst; 308 309/* IRConst constructors */ 310extern IRConst* IRConst_U1 ( Bool ); 311extern IRConst* IRConst_U8 ( UChar ); 312extern IRConst* IRConst_U16 ( UShort ); 313extern IRConst* IRConst_U32 ( UInt ); 314extern IRConst* IRConst_U64 ( ULong ); 315extern IRConst* IRConst_F32 ( Float ); 316extern IRConst* IRConst_F32i ( UInt ); 317extern IRConst* IRConst_F64 ( Double ); 318extern IRConst* IRConst_F64i ( ULong ); 319extern IRConst* IRConst_V128 ( UShort ); 320extern IRConst* IRConst_V256 ( UInt ); 321 322/* Deep-copy an IRConst */ 323extern IRConst* deepCopyIRConst ( IRConst* ); 324 325/* Pretty-print an IRConst */ 326extern void ppIRConst ( IRConst* ); 327 328/* Compare two IRConsts for equality */ 329extern Bool eqIRConst ( IRConst*, IRConst* ); 330 331 332/* ------------------ Call targets ------------------ */ 333 334/* Describes a helper function to call. The name part is purely for 335 pretty printing and not actually used. regparms=n tells the back 336 end that the callee has been declared 337 "__attribute__((regparm(n)))", although indirectly using the 338 VEX_REGPARM(n) macro. On some targets (x86) the back end will need 339 to construct a non-standard sequence to call a function declared 340 like this. 341 342 mcx_mask is a sop to Memcheck. It indicates which args should be 343 considered 'always defined' when lazily computing definedness of 344 the result. Bit 0 of mcx_mask corresponds to args[0], bit 1 to 345 args[1], etc. If a bit is set, the corresponding arg is excluded 346 (hence "x" in "mcx") from definedness checking. 347*/ 348 349typedef 350 struct { 351 Int regparms; 352 const HChar* name; 353 void* addr; 354 UInt mcx_mask; 355 } 356 IRCallee; 357 358/* Create an IRCallee. */ 359extern IRCallee* mkIRCallee ( Int regparms, const HChar* name, void* addr ); 360 361/* Deep-copy an IRCallee. */ 362extern IRCallee* deepCopyIRCallee ( IRCallee* ); 363 364/* Pretty-print an IRCallee. */ 365extern void ppIRCallee ( IRCallee* ); 366 367 368/* ------------------ Guest state arrays ------------------ */ 369 370/* This describes a section of the guest state that we want to 371 be able to index at run time, so as to be able to describe 372 indexed or rotating register files on the guest. */ 373typedef 374 struct { 375 Int base; /* guest state offset of start of indexed area */ 376 IRType elemTy; /* type of each element in the indexed area */ 377 Int nElems; /* number of elements in the indexed area */ 378 } 379 IRRegArray; 380 381extern IRRegArray* mkIRRegArray ( Int, IRType, Int ); 382 383extern IRRegArray* deepCopyIRRegArray ( IRRegArray* ); 384 385extern void ppIRRegArray ( IRRegArray* ); 386extern Bool eqIRRegArray ( IRRegArray*, IRRegArray* ); 387 388 389/* ------------------ Temporaries ------------------ */ 390 391/* This represents a temporary, eg. t1. The IR optimiser relies on the 392 fact that IRTemps are 32-bit ints. Do not change them to be ints of 393 any other size. */ 394typedef UInt IRTemp; 395 396/* Pretty-print an IRTemp. */ 397extern void ppIRTemp ( IRTemp ); 398 399#define IRTemp_INVALID ((IRTemp)0xFFFFFFFF) 400 401 402/* --------------- Primops (arity 1,2,3 and 4) --------------- */ 403 404/* Primitive operations that are used in Unop, Binop, Triop and Qop 405 IRExprs. Once we take into account integer, floating point and SIMD 406 operations of all the different sizes, there are quite a lot of them. 407 Most instructions supported by the architectures that Vex supports 408 (x86, PPC, etc) are represented. Some more obscure ones (eg. cpuid) 409 are not; they are instead handled with dirty helpers that emulate 410 their functionality. Such obscure ones are thus not directly visible 411 in the IR, but their effects on guest state (memory and registers) 412 are made visible via the annotations in IRDirty structures. 413*/ 414typedef 415 enum { 416 /* -- Do not change this ordering. The IR generators rely on 417 (eg) Iop_Add64 == IopAdd8 + 3. -- */ 418 419 Iop_INVALID=0x1400, 420 Iop_Add8, Iop_Add16, Iop_Add32, Iop_Add64, 421 Iop_Sub8, Iop_Sub16, Iop_Sub32, Iop_Sub64, 422 /* Signless mul. MullS/MullU is elsewhere. */ 423 Iop_Mul8, Iop_Mul16, Iop_Mul32, Iop_Mul64, 424 Iop_Or8, Iop_Or16, Iop_Or32, Iop_Or64, 425 Iop_And8, Iop_And16, Iop_And32, Iop_And64, 426 Iop_Xor8, Iop_Xor16, Iop_Xor32, Iop_Xor64, 427 Iop_Shl8, Iop_Shl16, Iop_Shl32, Iop_Shl64, 428 Iop_Shr8, Iop_Shr16, Iop_Shr32, Iop_Shr64, 429 Iop_Sar8, Iop_Sar16, Iop_Sar32, Iop_Sar64, 430 /* Integer comparisons. */ 431 Iop_CmpEQ8, Iop_CmpEQ16, Iop_CmpEQ32, Iop_CmpEQ64, 432 Iop_CmpNE8, Iop_CmpNE16, Iop_CmpNE32, Iop_CmpNE64, 433 /* Tags for unary ops */ 434 Iop_Not8, Iop_Not16, Iop_Not32, Iop_Not64, 435 436 /* Exactly like CmpEQ8/16/32/64, but carrying the additional 437 hint that these compute the success/failure of a CAS 438 operation, and hence are almost certainly applied to two 439 copies of the same value, which in turn has implications for 440 Memcheck's instrumentation. */ 441 Iop_CasCmpEQ8, Iop_CasCmpEQ16, Iop_CasCmpEQ32, Iop_CasCmpEQ64, 442 Iop_CasCmpNE8, Iop_CasCmpNE16, Iop_CasCmpNE32, Iop_CasCmpNE64, 443 444 /* Exactly like CmpNE8/16/32/64, but carrying the additional 445 hint that these needs expensive definedness tracking. */ 446 Iop_ExpCmpNE8, Iop_ExpCmpNE16, Iop_ExpCmpNE32, Iop_ExpCmpNE64, 447 448 /* -- Ordering not important after here. -- */ 449 450 /* Widening multiplies */ 451 Iop_MullS8, Iop_MullS16, Iop_MullS32, Iop_MullS64, 452 Iop_MullU8, Iop_MullU16, Iop_MullU32, Iop_MullU64, 453 454 /* Wierdo integer stuff */ 455 Iop_Clz64, Iop_Clz32, /* count leading zeroes */ 456 Iop_Ctz64, Iop_Ctz32, /* count trailing zeros */ 457 /* Ctz64/Ctz32/Clz64/Clz32 are UNDEFINED when given arguments of 458 zero. You must ensure they are never given a zero argument. 459 */ 460 461 /* Standard integer comparisons */ 462 Iop_CmpLT32S, Iop_CmpLT64S, 463 Iop_CmpLE32S, Iop_CmpLE64S, 464 Iop_CmpLT32U, Iop_CmpLT64U, 465 Iop_CmpLE32U, Iop_CmpLE64U, 466 467 /* As a sop to Valgrind-Memcheck, the following are useful. */ 468 Iop_CmpNEZ8, Iop_CmpNEZ16, Iop_CmpNEZ32, Iop_CmpNEZ64, 469 Iop_CmpwNEZ32, Iop_CmpwNEZ64, /* all-0s -> all-Os; other -> all-1s */ 470 Iop_Left8, Iop_Left16, Iop_Left32, Iop_Left64, /* \x -> x | -x */ 471 Iop_Max32U, /* unsigned max */ 472 473 /* PowerPC-style 3-way integer comparisons. Without them it is 474 difficult to simulate PPC efficiently. 475 op(x,y) | x < y = 0x8 else 476 | x > y = 0x4 else 477 | x == y = 0x2 478 */ 479 Iop_CmpORD32U, Iop_CmpORD64U, 480 Iop_CmpORD32S, Iop_CmpORD64S, 481 482 /* Division */ 483 /* TODO: clarify semantics wrt rounding, negative values, whatever */ 484 Iop_DivU32, // :: I32,I32 -> I32 (simple div, no mod) 485 Iop_DivS32, // ditto, signed 486 Iop_DivU64, // :: I64,I64 -> I64 (simple div, no mod) 487 Iop_DivS64, // ditto, signed 488 Iop_DivU64E, // :: I64,I64 -> I64 (dividend is 64-bit arg (hi) 489 // concat with 64 0's (low)) 490 Iop_DivS64E, // ditto, signed 491 Iop_DivU32E, // :: I32,I32 -> I32 (dividend is 32-bit arg (hi) 492 // concat with 32 0's (low)) 493 Iop_DivS32E, // ditto, signed 494 495 Iop_DivModU64to32, // :: I64,I32 -> I64 496 // of which lo half is div and hi half is mod 497 Iop_DivModS64to32, // ditto, signed 498 499 Iop_DivModU128to64, // :: V128,I64 -> V128 500 // of which lo half is div and hi half is mod 501 Iop_DivModS128to64, // ditto, signed 502 503 Iop_DivModS64to64, // :: I64,I64 -> I128 504 // of which lo half is div and hi half is mod 505 506 /* Integer conversions. Some of these are redundant (eg 507 Iop_64to8 is the same as Iop_64to32 and then Iop_32to8), but 508 having a complete set reduces the typical dynamic size of IR 509 and makes the instruction selectors easier to write. */ 510 511 /* Widening conversions */ 512 Iop_8Uto16, Iop_8Uto32, Iop_8Uto64, 513 Iop_16Uto32, Iop_16Uto64, 514 Iop_32Uto64, 515 Iop_8Sto16, Iop_8Sto32, Iop_8Sto64, 516 Iop_16Sto32, Iop_16Sto64, 517 Iop_32Sto64, 518 519 /* Narrowing conversions */ 520 Iop_64to8, Iop_32to8, Iop_64to16, 521 /* 8 <-> 16 bit conversions */ 522 Iop_16to8, // :: I16 -> I8, low half 523 Iop_16HIto8, // :: I16 -> I8, high half 524 Iop_8HLto16, // :: (I8,I8) -> I16 525 /* 16 <-> 32 bit conversions */ 526 Iop_32to16, // :: I32 -> I16, low half 527 Iop_32HIto16, // :: I32 -> I16, high half 528 Iop_16HLto32, // :: (I16,I16) -> I32 529 /* 32 <-> 64 bit conversions */ 530 Iop_64to32, // :: I64 -> I32, low half 531 Iop_64HIto32, // :: I64 -> I32, high half 532 Iop_32HLto64, // :: (I32,I32) -> I64 533 /* 64 <-> 128 bit conversions */ 534 Iop_128to64, // :: I128 -> I64, low half 535 Iop_128HIto64, // :: I128 -> I64, high half 536 Iop_64HLto128, // :: (I64,I64) -> I128 537 /* 1-bit stuff */ 538 Iop_Not1, /* :: Ity_Bit -> Ity_Bit */ 539 Iop_32to1, /* :: Ity_I32 -> Ity_Bit, just select bit[0] */ 540 Iop_64to1, /* :: Ity_I64 -> Ity_Bit, just select bit[0] */ 541 Iop_1Uto8, /* :: Ity_Bit -> Ity_I8, unsigned widen */ 542 Iop_1Uto32, /* :: Ity_Bit -> Ity_I32, unsigned widen */ 543 Iop_1Uto64, /* :: Ity_Bit -> Ity_I64, unsigned widen */ 544 Iop_1Sto8, /* :: Ity_Bit -> Ity_I8, signed widen */ 545 Iop_1Sto16, /* :: Ity_Bit -> Ity_I16, signed widen */ 546 Iop_1Sto32, /* :: Ity_Bit -> Ity_I32, signed widen */ 547 Iop_1Sto64, /* :: Ity_Bit -> Ity_I64, signed widen */ 548 549 /* ------ Floating point. We try to be IEEE754 compliant. ------ */ 550 551 /* --- Simple stuff as mandated by 754. --- */ 552 553 /* Binary operations, with rounding. */ 554 /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */ 555 Iop_AddF64, Iop_SubF64, Iop_MulF64, Iop_DivF64, 556 557 /* :: IRRoundingMode(I32) x F32 x F32 -> F32 */ 558 Iop_AddF32, Iop_SubF32, Iop_MulF32, Iop_DivF32, 559 560 /* Variants of the above which produce a 64-bit result but which 561 round their result to a IEEE float range first. */ 562 /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */ 563 Iop_AddF64r32, Iop_SubF64r32, Iop_MulF64r32, Iop_DivF64r32, 564 565 /* Unary operations, without rounding. */ 566 /* :: F64 -> F64 */ 567 Iop_NegF64, Iop_AbsF64, 568 569 /* :: F32 -> F32 */ 570 Iop_NegF32, Iop_AbsF32, 571 572 /* Unary operations, with rounding. */ 573 /* :: IRRoundingMode(I32) x F64 -> F64 */ 574 Iop_SqrtF64, 575 576 /* :: IRRoundingMode(I32) x F32 -> F32 */ 577 Iop_SqrtF32, 578 579 /* Comparison, yielding GT/LT/EQ/UN(ordered), as per the following: 580 0x45 Unordered 581 0x01 LT 582 0x00 GT 583 0x40 EQ 584 This just happens to be the Intel encoding. The values 585 are recorded in the type IRCmpF64Result. 586 */ 587 /* :: F64 x F64 -> IRCmpF64Result(I32) */ 588 Iop_CmpF64, 589 Iop_CmpF32, 590 Iop_CmpF128, 591 592 /* --- Int to/from FP conversions. --- */ 593 594 /* For the most part, these take a first argument :: Ity_I32 (as 595 IRRoundingMode) which is an indication of the rounding mode 596 to use, as per the following encoding ("the standard 597 encoding"): 598 00b to nearest (the default) 599 01b to -infinity 600 10b to +infinity 601 11b to zero 602 This just happens to be the Intel encoding. For reference only, 603 the PPC encoding is: 604 00b to nearest (the default) 605 01b to zero 606 10b to +infinity 607 11b to -infinity 608 Any PPC -> IR front end will have to translate these PPC 609 encodings, as encoded in the guest state, to the standard 610 encodings, to pass to the primops. 611 For reference only, the ARM VFP encoding is: 612 00b to nearest 613 01b to +infinity 614 10b to -infinity 615 11b to zero 616 Again, this will have to be converted to the standard encoding 617 to pass to primops. 618 619 If one of these conversions gets an out-of-range condition, 620 or a NaN, as an argument, the result is host-defined. On x86 621 the "integer indefinite" value 0x80..00 is produced. On PPC 622 it is either 0x80..00 or 0x7F..FF depending on the sign of 623 the argument. 624 625 On ARMvfp, when converting to a signed integer result, the 626 overflow result is 0x80..00 for negative args and 0x7F..FF 627 for positive args. For unsigned integer results it is 628 0x00..00 and 0xFF..FF respectively. 629 630 Rounding is required whenever the destination type cannot 631 represent exactly all values of the source type. 632 */ 633 Iop_F64toI16S, /* IRRoundingMode(I32) x F64 -> signed I16 */ 634 Iop_F64toI32S, /* IRRoundingMode(I32) x F64 -> signed I32 */ 635 Iop_F64toI64S, /* IRRoundingMode(I32) x F64 -> signed I64 */ 636 Iop_F64toI64U, /* IRRoundingMode(I32) x F64 -> unsigned I64 */ 637 638 Iop_F64toI32U, /* IRRoundingMode(I32) x F64 -> unsigned I32 */ 639 640 Iop_I32StoF64, /* signed I32 -> F64 */ 641 Iop_I64StoF64, /* IRRoundingMode(I32) x signed I64 -> F64 */ 642 Iop_I64UtoF64, /* IRRoundingMode(I32) x unsigned I64 -> F64 */ 643 Iop_I64UtoF32, /* IRRoundingMode(I32) x unsigned I64 -> F32 */ 644 645 Iop_I32UtoF32, /* IRRoundingMode(I32) x unsigned I32 -> F32 */ 646 Iop_I32UtoF64, /* unsigned I32 -> F64 */ 647 648 Iop_F32toI32S, /* IRRoundingMode(I32) x F32 -> signed I32 */ 649 Iop_F32toI64S, /* IRRoundingMode(I32) x F32 -> signed I64 */ 650 Iop_F32toI32U, /* IRRoundingMode(I32) x F32 -> unsigned I32 */ 651 Iop_F32toI64U, /* IRRoundingMode(I32) x F32 -> unsigned I64 */ 652 653 Iop_I32StoF32, /* IRRoundingMode(I32) x signed I32 -> F32 */ 654 Iop_I64StoF32, /* IRRoundingMode(I32) x signed I64 -> F32 */ 655 656 /* Conversion between floating point formats */ 657 Iop_F32toF64, /* F32 -> F64 */ 658 Iop_F64toF32, /* IRRoundingMode(I32) x F64 -> F32 */ 659 660 /* Reinterpretation. Take an F64 and produce an I64 with 661 the same bit pattern, or vice versa. */ 662 Iop_ReinterpF64asI64, Iop_ReinterpI64asF64, 663 Iop_ReinterpF32asI32, Iop_ReinterpI32asF32, 664 665 /* Support for 128-bit floating point */ 666 Iop_F64HLtoF128,/* (high half of F128,low half of F128) -> F128 */ 667 Iop_F128HItoF64,/* F128 -> high half of F128 into a F64 register */ 668 Iop_F128LOtoF64,/* F128 -> low half of F128 into a F64 register */ 669 670 /* :: IRRoundingMode(I32) x F128 x F128 -> F128 */ 671 Iop_AddF128, Iop_SubF128, Iop_MulF128, Iop_DivF128, 672 673 /* :: F128 -> F128 */ 674 Iop_NegF128, Iop_AbsF128, 675 676 /* :: IRRoundingMode(I32) x F128 -> F128 */ 677 Iop_SqrtF128, 678 679 Iop_I32StoF128, /* signed I32 -> F128 */ 680 Iop_I64StoF128, /* signed I64 -> F128 */ 681 Iop_I32UtoF128, /* unsigned I32 -> F128 */ 682 Iop_I64UtoF128, /* unsigned I64 -> F128 */ 683 Iop_F32toF128, /* F32 -> F128 */ 684 Iop_F64toF128, /* F64 -> F128 */ 685 686 Iop_F128toI32S, /* IRRoundingMode(I32) x F128 -> signed I32 */ 687 Iop_F128toI64S, /* IRRoundingMode(I32) x F128 -> signed I64 */ 688 Iop_F128toI32U, /* IRRoundingMode(I32) x F128 -> unsigned I32 */ 689 Iop_F128toI64U, /* IRRoundingMode(I32) x F128 -> unsigned I64 */ 690 Iop_F128toF64, /* IRRoundingMode(I32) x F128 -> F64 */ 691 Iop_F128toF32, /* IRRoundingMode(I32) x F128 -> F32 */ 692 693 /* --- guest x86/amd64 specifics, not mandated by 754. --- */ 694 695 /* Binary ops, with rounding. */ 696 /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */ 697 Iop_AtanF64, /* FPATAN, arctan(arg1/arg2) */ 698 Iop_Yl2xF64, /* FYL2X, arg1 * log2(arg2) */ 699 Iop_Yl2xp1F64, /* FYL2XP1, arg1 * log2(arg2+1.0) */ 700 Iop_PRemF64, /* FPREM, non-IEEE remainder(arg1/arg2) */ 701 Iop_PRemC3210F64, /* C3210 flags resulting from FPREM, :: I32 */ 702 Iop_PRem1F64, /* FPREM1, IEEE remainder(arg1/arg2) */ 703 Iop_PRem1C3210F64, /* C3210 flags resulting from FPREM1, :: I32 */ 704 Iop_ScaleF64, /* FSCALE, arg1 * (2^RoundTowardsZero(arg2)) */ 705 /* Note that on x86 guest, PRem1{C3210} has the same behaviour 706 as the IEEE mandated RemF64, except it is limited in the 707 range of its operand. Hence the partialness. */ 708 709 /* Unary ops, with rounding. */ 710 /* :: IRRoundingMode(I32) x F64 -> F64 */ 711 Iop_SinF64, /* FSIN */ 712 Iop_CosF64, /* FCOS */ 713 Iop_TanF64, /* FTAN */ 714 Iop_2xm1F64, /* (2^arg - 1.0) */ 715 Iop_RoundF64toInt, /* F64 value to nearest integral value (still 716 as F64) */ 717 Iop_RoundF32toInt, /* F32 value to nearest integral value (still 718 as F32) */ 719 720 /* --- guest s390 specifics, not mandated by 754. --- */ 721 722 /* Fused multiply-add/sub */ 723 /* :: IRRoundingMode(I32) x F32 x F32 x F32 -> F32 724 (computes arg2 * arg3 +/- arg4) */ 725 Iop_MAddF32, Iop_MSubF32, 726 727 /* --- guest ppc32/64 specifics, not mandated by 754. --- */ 728 729 /* Ternary operations, with rounding. */ 730 /* Fused multiply-add/sub, with 112-bit intermediate 731 precision for ppc. 732 Also used to implement fused multiply-add/sub for s390. */ 733 /* :: IRRoundingMode(I32) x F64 x F64 x F64 -> F64 734 (computes arg2 * arg3 +/- arg4) */ 735 Iop_MAddF64, Iop_MSubF64, 736 737 /* Variants of the above which produce a 64-bit result but which 738 round their result to a IEEE float range first. */ 739 /* :: IRRoundingMode(I32) x F64 x F64 x F64 -> F64 */ 740 Iop_MAddF64r32, Iop_MSubF64r32, 741 742 /* :: F64 -> F64 */ 743 Iop_Est5FRSqrt, /* reciprocal square root estimate, 5 good bits */ 744 Iop_RoundF64toF64_NEAREST, /* frin */ 745 Iop_RoundF64toF64_NegINF, /* frim */ 746 Iop_RoundF64toF64_PosINF, /* frip */ 747 Iop_RoundF64toF64_ZERO, /* friz */ 748 749 /* :: F64 -> F32 */ 750 Iop_TruncF64asF32, /* do F64->F32 truncation as per 'fsts' */ 751 752 /* :: IRRoundingMode(I32) x F64 -> F64 */ 753 Iop_RoundF64toF32, /* round F64 to nearest F32 value (still as F64) */ 754 /* NB: pretty much the same as Iop_F64toF32, except no change 755 of type. */ 756 757 /* ------------------ 32-bit SIMD Integer ------------------ */ 758 759 /* 32x1 saturating add/sub (ok, well, not really SIMD :) */ 760 Iop_QAdd32S, 761 Iop_QSub32S, 762 763 /* 16x2 add/sub, also signed/unsigned saturating variants */ 764 Iop_Add16x2, Iop_Sub16x2, 765 Iop_QAdd16Sx2, Iop_QAdd16Ux2, 766 Iop_QSub16Sx2, Iop_QSub16Ux2, 767 768 /* 16x2 signed/unsigned halving add/sub. For each lane, these 769 compute bits 16:1 of (eg) sx(argL) + sx(argR), 770 or zx(argL) - zx(argR) etc. */ 771 Iop_HAdd16Ux2, Iop_HAdd16Sx2, 772 Iop_HSub16Ux2, Iop_HSub16Sx2, 773 774 /* 8x4 add/sub, also signed/unsigned saturating variants */ 775 Iop_Add8x4, Iop_Sub8x4, 776 Iop_QAdd8Sx4, Iop_QAdd8Ux4, 777 Iop_QSub8Sx4, Iop_QSub8Ux4, 778 779 /* 8x4 signed/unsigned halving add/sub. For each lane, these 780 compute bits 8:1 of (eg) sx(argL) + sx(argR), 781 or zx(argL) - zx(argR) etc. */ 782 Iop_HAdd8Ux4, Iop_HAdd8Sx4, 783 Iop_HSub8Ux4, Iop_HSub8Sx4, 784 785 /* 8x4 sum of absolute unsigned differences. */ 786 Iop_Sad8Ux4, 787 788 /* MISC (vector integer cmp != 0) */ 789 Iop_CmpNEZ16x2, Iop_CmpNEZ8x4, 790 791 /* ------------------ 64-bit SIMD FP ------------------------ */ 792 793 /* Convertion to/from int */ 794 Iop_I32UtoFx2, Iop_I32StoFx2, /* I32x4 -> F32x4 */ 795 Iop_FtoI32Ux2_RZ, Iop_FtoI32Sx2_RZ, /* F32x4 -> I32x4 */ 796 /* Fixed32 format is floating-point number with fixed number of fraction 797 bits. The number of fraction bits is passed as a second argument of 798 type I8. */ 799 Iop_F32ToFixed32Ux2_RZ, Iop_F32ToFixed32Sx2_RZ, /* fp -> fixed-point */ 800 Iop_Fixed32UToF32x2_RN, Iop_Fixed32SToF32x2_RN, /* fixed-point -> fp */ 801 802 /* Binary operations */ 803 Iop_Max32Fx2, Iop_Min32Fx2, 804 /* Pairwise Min and Max. See integer pairwise operations for more 805 details. */ 806 Iop_PwMax32Fx2, Iop_PwMin32Fx2, 807 /* Note: For the following compares, the arm front-end assumes a 808 nan in a lane of either argument returns zero for that lane. */ 809 Iop_CmpEQ32Fx2, Iop_CmpGT32Fx2, Iop_CmpGE32Fx2, 810 811 /* Vector Reciprocal Estimate finds an approximate reciprocal of each 812 element in the operand vector, and places the results in the destination 813 vector. */ 814 Iop_Recip32Fx2, 815 816 /* Vector Reciprocal Step computes (2.0 - arg1 * arg2). 817 Note, that if one of the arguments is zero and another one is infinity 818 of arbitrary sign the result of the operation is 2.0. */ 819 Iop_Recps32Fx2, 820 821 /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal 822 square root of each element in the operand vector. */ 823 Iop_Rsqrte32Fx2, 824 825 /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0. 826 Note, that of one of the arguments is zero and another one is infiinty 827 of arbitrary sign the result of the operation is 1.5. */ 828 Iop_Rsqrts32Fx2, 829 830 /* Unary */ 831 Iop_Neg32Fx2, Iop_Abs32Fx2, 832 833 /* ------------------ 64-bit SIMD Integer. ------------------ */ 834 835 /* MISC (vector integer cmp != 0) */ 836 Iop_CmpNEZ8x8, Iop_CmpNEZ16x4, Iop_CmpNEZ32x2, 837 838 /* ADDITION (normal / unsigned sat / signed sat) */ 839 Iop_Add8x8, Iop_Add16x4, Iop_Add32x2, 840 Iop_QAdd8Ux8, Iop_QAdd16Ux4, Iop_QAdd32Ux2, Iop_QAdd64Ux1, 841 Iop_QAdd8Sx8, Iop_QAdd16Sx4, Iop_QAdd32Sx2, Iop_QAdd64Sx1, 842 843 /* PAIRWISE operations */ 844 /* Iop_PwFoo16x4( [a,b,c,d], [e,f,g,h] ) = 845 [Foo16(a,b), Foo16(c,d), Foo16(e,f), Foo16(g,h)] */ 846 Iop_PwAdd8x8, Iop_PwAdd16x4, Iop_PwAdd32x2, 847 Iop_PwMax8Sx8, Iop_PwMax16Sx4, Iop_PwMax32Sx2, 848 Iop_PwMax8Ux8, Iop_PwMax16Ux4, Iop_PwMax32Ux2, 849 Iop_PwMin8Sx8, Iop_PwMin16Sx4, Iop_PwMin32Sx2, 850 Iop_PwMin8Ux8, Iop_PwMin16Ux4, Iop_PwMin32Ux2, 851 /* Longening variant is unary. The resulting vector contains two times 852 less elements than operand, but they are two times wider. 853 Example: 854 Iop_PAddL16Ux4( [a,b,c,d] ) = [a+b,c+d] 855 where a+b and c+d are unsigned 32-bit values. */ 856 Iop_PwAddL8Ux8, Iop_PwAddL16Ux4, Iop_PwAddL32Ux2, 857 Iop_PwAddL8Sx8, Iop_PwAddL16Sx4, Iop_PwAddL32Sx2, 858 859 /* SUBTRACTION (normal / unsigned sat / signed sat) */ 860 Iop_Sub8x8, Iop_Sub16x4, Iop_Sub32x2, 861 Iop_QSub8Ux8, Iop_QSub16Ux4, Iop_QSub32Ux2, Iop_QSub64Ux1, 862 Iop_QSub8Sx8, Iop_QSub16Sx4, Iop_QSub32Sx2, Iop_QSub64Sx1, 863 864 /* ABSOLUTE VALUE */ 865 Iop_Abs8x8, Iop_Abs16x4, Iop_Abs32x2, 866 867 /* MULTIPLICATION (normal / high half of signed/unsigned / plynomial ) */ 868 Iop_Mul8x8, Iop_Mul16x4, Iop_Mul32x2, 869 Iop_Mul32Fx2, 870 Iop_MulHi16Ux4, 871 Iop_MulHi16Sx4, 872 /* Plynomial multiplication treats it's arguments as coefficients of 873 polynoms over {0, 1}. */ 874 Iop_PolynomialMul8x8, 875 876 /* Vector Saturating Doubling Multiply Returning High Half and 877 Vector Saturating Rounding Doubling Multiply Returning High Half */ 878 /* These IROp's multiply corresponding elements in two vectors, double 879 the results, and place the most significant half of the final results 880 in the destination vector. The results are truncated or rounded. If 881 any of the results overflow, they are saturated. */ 882 Iop_QDMulHi16Sx4, Iop_QDMulHi32Sx2, 883 Iop_QRDMulHi16Sx4, Iop_QRDMulHi32Sx2, 884 885 /* AVERAGING: note: (arg1 + arg2 + 1) >>u 1 */ 886 Iop_Avg8Ux8, 887 Iop_Avg16Ux4, 888 889 /* MIN/MAX */ 890 Iop_Max8Sx8, Iop_Max16Sx4, Iop_Max32Sx2, 891 Iop_Max8Ux8, Iop_Max16Ux4, Iop_Max32Ux2, 892 Iop_Min8Sx8, Iop_Min16Sx4, Iop_Min32Sx2, 893 Iop_Min8Ux8, Iop_Min16Ux4, Iop_Min32Ux2, 894 895 /* COMPARISON */ 896 Iop_CmpEQ8x8, Iop_CmpEQ16x4, Iop_CmpEQ32x2, 897 Iop_CmpGT8Ux8, Iop_CmpGT16Ux4, Iop_CmpGT32Ux2, 898 Iop_CmpGT8Sx8, Iop_CmpGT16Sx4, Iop_CmpGT32Sx2, 899 900 /* COUNT ones / leading zeroes / leading sign bits (not including topmost 901 bit) */ 902 Iop_Cnt8x8, 903 Iop_Clz8Sx8, Iop_Clz16Sx4, Iop_Clz32Sx2, 904 Iop_Cls8Sx8, Iop_Cls16Sx4, Iop_Cls32Sx2, 905 Iop_Clz64x2, 906 907 /* VECTOR x VECTOR SHIFT / ROTATE */ 908 Iop_Shl8x8, Iop_Shl16x4, Iop_Shl32x2, 909 Iop_Shr8x8, Iop_Shr16x4, Iop_Shr32x2, 910 Iop_Sar8x8, Iop_Sar16x4, Iop_Sar32x2, 911 Iop_Sal8x8, Iop_Sal16x4, Iop_Sal32x2, Iop_Sal64x1, 912 913 /* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */ 914 Iop_ShlN8x8, Iop_ShlN16x4, Iop_ShlN32x2, 915 Iop_ShrN8x8, Iop_ShrN16x4, Iop_ShrN32x2, 916 Iop_SarN8x8, Iop_SarN16x4, Iop_SarN32x2, 917 918 /* VECTOR x VECTOR SATURATING SHIFT */ 919 Iop_QShl8x8, Iop_QShl16x4, Iop_QShl32x2, Iop_QShl64x1, 920 Iop_QSal8x8, Iop_QSal16x4, Iop_QSal32x2, Iop_QSal64x1, 921 /* VECTOR x INTEGER SATURATING SHIFT */ 922 Iop_QShlN8Sx8, Iop_QShlN16Sx4, Iop_QShlN32Sx2, Iop_QShlN64Sx1, 923 Iop_QShlN8x8, Iop_QShlN16x4, Iop_QShlN32x2, Iop_QShlN64x1, 924 Iop_QSalN8x8, Iop_QSalN16x4, Iop_QSalN32x2, Iop_QSalN64x1, 925 926 /* NARROWING (binary) 927 -- narrow 2xI64 into 1xI64, hi half from left arg */ 928 /* For saturated narrowing, I believe there are 4 variants of 929 the basic arithmetic operation, depending on the signedness 930 of argument and result. Here are examples that exemplify 931 what I mean: 932 933 QNarrow16Uto8U ( UShort x ) if (x >u 255) x = 255; 934 return x[7:0]; 935 936 QNarrow16Sto8S ( Short x ) if (x <s -128) x = -128; 937 if (x >s 127) x = 127; 938 return x[7:0]; 939 940 QNarrow16Uto8S ( UShort x ) if (x >u 127) x = 127; 941 return x[7:0]; 942 943 QNarrow16Sto8U ( Short x ) if (x <s 0) x = 0; 944 if (x >s 255) x = 255; 945 return x[7:0]; 946 */ 947 Iop_QNarrowBin16Sto8Ux8, 948 Iop_QNarrowBin16Sto8Sx8, Iop_QNarrowBin32Sto16Sx4, 949 Iop_NarrowBin16to8x8, Iop_NarrowBin32to16x4, 950 951 /* INTERLEAVING */ 952 /* Interleave lanes from low or high halves of 953 operands. Most-significant result lane is from the left 954 arg. */ 955 Iop_InterleaveHI8x8, Iop_InterleaveHI16x4, Iop_InterleaveHI32x2, 956 Iop_InterleaveLO8x8, Iop_InterleaveLO16x4, Iop_InterleaveLO32x2, 957 /* Interleave odd/even lanes of operands. Most-significant result lane 958 is from the left arg. Note that Interleave{Odd,Even}Lanes32x2 are 959 identical to Interleave{HI,LO}32x2 and so are omitted.*/ 960 Iop_InterleaveOddLanes8x8, Iop_InterleaveEvenLanes8x8, 961 Iop_InterleaveOddLanes16x4, Iop_InterleaveEvenLanes16x4, 962 963 /* CONCATENATION -- build a new value by concatenating either 964 the even or odd lanes of both operands. Note that 965 Cat{Odd,Even}Lanes32x2 are identical to Interleave{HI,LO}32x2 966 and so are omitted. */ 967 Iop_CatOddLanes8x8, Iop_CatOddLanes16x4, 968 Iop_CatEvenLanes8x8, Iop_CatEvenLanes16x4, 969 970 /* GET / SET elements of VECTOR 971 GET is binop (I64, I8) -> I<elem_size> 972 SET is triop (I64, I8, I<elem_size>) -> I64 */ 973 /* Note: the arm back-end handles only constant second argument */ 974 Iop_GetElem8x8, Iop_GetElem16x4, Iop_GetElem32x2, 975 Iop_SetElem8x8, Iop_SetElem16x4, Iop_SetElem32x2, 976 977 /* DUPLICATING -- copy value to all lanes */ 978 Iop_Dup8x8, Iop_Dup16x4, Iop_Dup32x2, 979 980 /* EXTRACT -- copy 8-arg3 highest bytes from arg1 to 8-arg3 lowest bytes 981 of result and arg3 lowest bytes of arg2 to arg3 highest bytes of 982 result. 983 It is a triop: (I64, I64, I8) -> I64 */ 984 /* Note: the arm back-end handles only constant third argumnet. */ 985 Iop_Extract64, 986 987 /* REVERSE the order of elements in each Half-words, Words, 988 Double-words */ 989 /* Examples: 990 Reverse16_8x8([a,b,c,d,e,f,g,h]) = [b,a,d,c,f,e,h,g] 991 Reverse32_8x8([a,b,c,d,e,f,g,h]) = [d,c,b,a,h,g,f,e] 992 Reverse64_8x8([a,b,c,d,e,f,g,h]) = [h,g,f,e,d,c,b,a] */ 993 Iop_Reverse16_8x8, 994 Iop_Reverse32_8x8, Iop_Reverse32_16x4, 995 Iop_Reverse64_8x8, Iop_Reverse64_16x4, Iop_Reverse64_32x2, 996 997 /* PERMUTING -- copy src bytes to dst, 998 as indexed by control vector bytes: 999 for i in 0 .. 7 . result[i] = argL[ argR[i] ] 1000 argR[i] values may only be in the range 0 .. 7, else behaviour 1001 is undefined. */ 1002 Iop_Perm8x8, 1003 1004 /* MISC CONVERSION -- get high bits of each byte lane, a la 1005 x86/amd64 pmovmskb */ 1006 Iop_GetMSBs8x8, /* I64 -> I8 */ 1007 1008 /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate 1009 See floating-point equiwalents for details. */ 1010 Iop_Recip32x2, Iop_Rsqrte32x2, 1011 1012 /* ------------------ Decimal Floating Point ------------------ */ 1013 1014 /* ARITHMETIC INSTRUCTIONS 64-bit 1015 ---------------------------------- 1016 IRRoundingMode(I32) X D64 X D64 -> D64 1017 */ 1018 Iop_AddD64, Iop_SubD64, Iop_MulD64, Iop_DivD64, 1019 1020 /* ARITHMETIC INSTRUCTIONS 128-bit 1021 ---------------------------------- 1022 IRRoundingMode(I32) X D128 X D128 -> D128 1023 */ 1024 Iop_AddD128, Iop_SubD128, Iop_MulD128, Iop_DivD128, 1025 1026 /* SHIFT SIGNIFICAND INSTRUCTIONS 1027 * The DFP significand is shifted by the number of digits specified 1028 * by the U8 operand. Digits shifted out of the leftmost digit are 1029 * lost. Zeros are supplied to the vacated positions on the right. 1030 * The sign of the result is the same as the sign of the original 1031 * operand. 1032 * 1033 * D64 x U8 -> D64 left shift and right shift respectively */ 1034 Iop_ShlD64, Iop_ShrD64, 1035 1036 /* D128 x U8 -> D128 left shift and right shift respectively */ 1037 Iop_ShlD128, Iop_ShrD128, 1038 1039 1040 /* FORMAT CONVERSION INSTRUCTIONS 1041 * D32 -> D64 1042 */ 1043 Iop_D32toD64, 1044 1045 /* D64 -> D128 */ 1046 Iop_D64toD128, 1047 1048 /* I32S -> D128 */ 1049 Iop_I32StoD128, 1050 1051 /* I32U -> D128 */ 1052 Iop_I32UtoD128, 1053 1054 /* I64S -> D128 */ 1055 Iop_I64StoD128, 1056 1057 /* I64U -> D128 */ 1058 Iop_I64UtoD128, 1059 1060 /* IRRoundingMode(I32) x D64 -> D32 */ 1061 Iop_D64toD32, 1062 1063 /* IRRoundingMode(I32) x D128 -> D64 */ 1064 Iop_D128toD64, 1065 1066 /* I32S -> D64 */ 1067 Iop_I32StoD64, 1068 1069 /* I32U -> D64 */ 1070 Iop_I32UtoD64, 1071 1072 /* IRRoundingMode(I32) x I64 -> D64 */ 1073 Iop_I64StoD64, 1074 1075 /* IRRoundingMode(I32) x I64 -> D64 */ 1076 Iop_I64UtoD64, 1077 1078 /* IRRoundingMode(I32) x D64 -> I32 */ 1079 Iop_D64toI32S, 1080 1081 /* IRRoundingMode(I32) x D64 -> I32 */ 1082 Iop_D64toI32U, 1083 1084 /* IRRoundingMode(I32) x D64 -> I64 */ 1085 Iop_D64toI64S, 1086 1087 /* IRRoundingMode(I32) x D64 -> I64 */ 1088 Iop_D64toI64U, 1089 1090 /* IRRoundingMode(I32) x D128 -> I32 */ 1091 Iop_D128toI32S, 1092 1093 /* IRRoundingMode(I32) x D128 -> I32 */ 1094 Iop_D128toI32U, 1095 1096 /* IRRoundingMode(I32) x D128 -> I64 */ 1097 Iop_D128toI64S, 1098 1099 /* IRRoundingMode(I32) x D128 -> I64 */ 1100 Iop_D128toI64U, 1101 1102 /* IRRoundingMode(I32) x F32 -> D32 */ 1103 Iop_F32toD32, 1104 1105 /* IRRoundingMode(I32) x F32 -> D64 */ 1106 Iop_F32toD64, 1107 1108 /* IRRoundingMode(I32) x F32 -> D128 */ 1109 Iop_F32toD128, 1110 1111 /* IRRoundingMode(I32) x F64 -> D32 */ 1112 Iop_F64toD32, 1113 1114 /* IRRoundingMode(I32) x F64 -> D64 */ 1115 Iop_F64toD64, 1116 1117 /* IRRoundingMode(I32) x F64 -> D128 */ 1118 Iop_F64toD128, 1119 1120 /* IRRoundingMode(I32) x F128 -> D32 */ 1121 Iop_F128toD32, 1122 1123 /* IRRoundingMode(I32) x F128 -> D64 */ 1124 Iop_F128toD64, 1125 1126 /* IRRoundingMode(I32) x F128 -> D128 */ 1127 Iop_F128toD128, 1128 1129 /* IRRoundingMode(I32) x D32 -> F32 */ 1130 Iop_D32toF32, 1131 1132 /* IRRoundingMode(I32) x D32 -> F64 */ 1133 Iop_D32toF64, 1134 1135 /* IRRoundingMode(I32) x D32 -> F128 */ 1136 Iop_D32toF128, 1137 1138 /* IRRoundingMode(I32) x D64 -> F32 */ 1139 Iop_D64toF32, 1140 1141 /* IRRoundingMode(I32) x D64 -> F64 */ 1142 Iop_D64toF64, 1143 1144 /* IRRoundingMode(I32) x D64 -> F128 */ 1145 Iop_D64toF128, 1146 1147 /* IRRoundingMode(I32) x D128 -> F32 */ 1148 Iop_D128toF32, 1149 1150 /* IRRoundingMode(I32) x D128 -> F64 */ 1151 Iop_D128toF64, 1152 1153 /* IRRoundingMode(I32) x D128 -> F128 */ 1154 Iop_D128toF128, 1155 1156 /* ROUNDING INSTRUCTIONS 1157 * IRRoundingMode(I32) x D64 -> D64 1158 * The D64 operand, if a finite number, it is rounded to a 1159 * floating point integer value, i.e. no fractional part. 1160 */ 1161 Iop_RoundD64toInt, 1162 1163 /* IRRoundingMode(I32) x D128 -> D128 */ 1164 Iop_RoundD128toInt, 1165 1166 /* COMPARE INSTRUCTIONS 1167 * D64 x D64 -> IRCmpD64Result(I32) */ 1168 Iop_CmpD64, 1169 1170 /* D128 x D128 -> IRCmpD128Result(I32) */ 1171 Iop_CmpD128, 1172 1173 /* COMPARE BIASED EXPONENET INSTRUCTIONS 1174 * D64 x D64 -> IRCmpD64Result(I32) */ 1175 Iop_CmpExpD64, 1176 1177 /* D128 x D128 -> IRCmpD128Result(I32) */ 1178 Iop_CmpExpD128, 1179 1180 /* QUANTIZE AND ROUND INSTRUCTIONS 1181 * The source operand is converted and rounded to the form with the 1182 * immediate exponent specified by the rounding and exponent parameter. 1183 * 1184 * The second operand is converted and rounded to the form 1185 * of the first operand's exponent and the rounded based on the specified 1186 * rounding mode parameter. 1187 * 1188 * IRRoundingMode(I32) x D64 x D64-> D64 */ 1189 Iop_QuantizeD64, 1190 1191 /* IRRoundingMode(I32) x D128 x D128 -> D128 */ 1192 Iop_QuantizeD128, 1193 1194 /* IRRoundingMode(I32) x I8 x D64 -> D64 1195 * The Decimal Floating point operand is rounded to the requested 1196 * significance given by the I8 operand as specified by the rounding 1197 * mode. 1198 */ 1199 Iop_SignificanceRoundD64, 1200 1201 /* IRRoundingMode(I32) x I8 x D128 -> D128 */ 1202 Iop_SignificanceRoundD128, 1203 1204 /* EXTRACT AND INSERT INSTRUCTIONS 1205 * D64 -> I64 1206 * The exponent of the D32 or D64 operand is extracted. The 1207 * extracted exponent is converted to a 64-bit signed binary integer. 1208 */ 1209 Iop_ExtractExpD64, 1210 1211 /* D128 -> I64 */ 1212 Iop_ExtractExpD128, 1213 1214 /* D64 -> I64 1215 * The number of significand digits of the D64 operand is extracted. 1216 * The number is stored as a 64-bit signed binary integer. 1217 */ 1218 Iop_ExtractSigD64, 1219 1220 /* D128 -> I64 */ 1221 Iop_ExtractSigD128, 1222 1223 /* I64 x D64 -> D64 1224 * The exponent is specified by the first I64 operand the signed 1225 * significand is given by the second I64 value. The result is a D64 1226 * value consisting of the specified significand and exponent whose 1227 * sign is that of the specified significand. 1228 */ 1229 Iop_InsertExpD64, 1230 1231 /* I64 x D128 -> D128 */ 1232 Iop_InsertExpD128, 1233 1234 /* Support for 128-bit DFP type */ 1235 Iop_D64HLtoD128, Iop_D128HItoD64, Iop_D128LOtoD64, 1236 1237 /* I64 -> I64 1238 * Convert 50-bit densely packed BCD string to 60 bit BCD string 1239 */ 1240 Iop_DPBtoBCD, 1241 1242 /* I64 -> I64 1243 * Convert 60 bit BCD string to 50-bit densely packed BCD string 1244 */ 1245 Iop_BCDtoDPB, 1246 1247 /* BCD arithmetic instructions, (V128, V128) -> V128 1248 * The BCD format is the same as that used in the BCD<->DPB conversion 1249 * routines, except using 124 digits (vs 60) plus the trailing 4-bit 1250 * signed code. */ 1251 Iop_BCDAdd, Iop_BCDSub, 1252 1253 /* Conversion I64 -> D64 */ 1254 Iop_ReinterpI64asD64, 1255 1256 /* Conversion D64 -> I64 */ 1257 Iop_ReinterpD64asI64, 1258 1259 /* ------------------ 128-bit SIMD FP. ------------------ */ 1260 1261 /* --- 32x4 vector FP --- */ 1262 1263 /* ternary :: IRRoundingMode(I32) x V128 x V128 -> V128 */ 1264 Iop_Add32Fx4, Iop_Sub32Fx4, Iop_Mul32Fx4, Iop_Div32Fx4, 1265 1266 /* binary */ 1267 Iop_Max32Fx4, Iop_Min32Fx4, 1268 Iop_Add32Fx2, Iop_Sub32Fx2, 1269 /* Note: For the following compares, the ppc and arm front-ends assume a 1270 nan in a lane of either argument returns zero for that lane. */ 1271 Iop_CmpEQ32Fx4, Iop_CmpLT32Fx4, Iop_CmpLE32Fx4, Iop_CmpUN32Fx4, 1272 Iop_CmpGT32Fx4, Iop_CmpGE32Fx4, 1273 1274 /* Pairwise Max and Min. See integer pairwise operations for details. */ 1275 Iop_PwMax32Fx4, Iop_PwMin32Fx4, 1276 1277 /* unary */ 1278 Iop_Abs32Fx4, 1279 Iop_Sqrt32Fx4, Iop_RSqrt32Fx4, 1280 Iop_Neg32Fx4, 1281 1282 /* Vector Reciprocal Estimate finds an approximate reciprocal of each 1283 element in the operand vector, and places the results in the destination 1284 vector. */ 1285 Iop_Recip32Fx4, 1286 1287 /* Vector Reciprocal Step computes (2.0 - arg1 * arg2). 1288 Note, that if one of the arguments is zero and another one is infinity 1289 of arbitrary sign the result of the operation is 2.0. */ 1290 Iop_Recps32Fx4, 1291 1292 /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal 1293 square root of each element in the operand vector. */ 1294 Iop_Rsqrte32Fx4, 1295 1296 /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0. 1297 Note, that of one of the arguments is zero and another one is infiinty 1298 of arbitrary sign the result of the operation is 1.5. */ 1299 Iop_Rsqrts32Fx4, 1300 1301 /* --- Int to/from FP conversion --- */ 1302 /* Unlike the standard fp conversions, these irops take no 1303 rounding mode argument. Instead the irop trailers _R{M,P,N,Z} 1304 indicate the mode: {-inf, +inf, nearest, zero} respectively. */ 1305 Iop_I32UtoFx4, Iop_I32StoFx4, /* I32x4 -> F32x4 */ 1306 Iop_FtoI32Ux4_RZ, Iop_FtoI32Sx4_RZ, /* F32x4 -> I32x4 */ 1307 Iop_QFtoI32Ux4_RZ, Iop_QFtoI32Sx4_RZ, /* F32x4 -> I32x4 (saturating) */ 1308 Iop_RoundF32x4_RM, Iop_RoundF32x4_RP, /* round to fp integer */ 1309 Iop_RoundF32x4_RN, Iop_RoundF32x4_RZ, /* round to fp integer */ 1310 /* Fixed32 format is floating-point number with fixed number of fraction 1311 bits. The number of fraction bits is passed as a second argument of 1312 type I8. */ 1313 Iop_F32ToFixed32Ux4_RZ, Iop_F32ToFixed32Sx4_RZ, /* fp -> fixed-point */ 1314 Iop_Fixed32UToF32x4_RN, Iop_Fixed32SToF32x4_RN, /* fixed-point -> fp */ 1315 1316 /* --- Single to/from half conversion --- */ 1317 /* FIXME: what kind of rounding in F32x4 -> F16x4 case? */ 1318 Iop_F32toF16x4, Iop_F16toF32x4, /* F32x4 <-> F16x4 */ 1319 1320 /* --- 32x4 lowest-lane-only scalar FP --- */ 1321 1322 /* In binary cases, upper 3/4 is copied from first operand. In 1323 unary cases, upper 3/4 is copied from the operand. */ 1324 1325 /* binary */ 1326 Iop_Add32F0x4, Iop_Sub32F0x4, Iop_Mul32F0x4, Iop_Div32F0x4, 1327 Iop_Max32F0x4, Iop_Min32F0x4, 1328 Iop_CmpEQ32F0x4, Iop_CmpLT32F0x4, Iop_CmpLE32F0x4, Iop_CmpUN32F0x4, 1329 1330 /* unary */ 1331 Iop_Recip32F0x4, Iop_Sqrt32F0x4, Iop_RSqrt32F0x4, 1332 1333 /* --- 64x2 vector FP --- */ 1334 1335 /* ternary :: IRRoundingMode(I32) x V128 x V128 -> V128 */ 1336 Iop_Add64Fx2, Iop_Sub64Fx2, Iop_Mul64Fx2, Iop_Div64Fx2, 1337 1338 /* binary */ 1339 Iop_Max64Fx2, Iop_Min64Fx2, 1340 Iop_CmpEQ64Fx2, Iop_CmpLT64Fx2, Iop_CmpLE64Fx2, Iop_CmpUN64Fx2, 1341 1342 /* unary */ 1343 Iop_Abs64Fx2, 1344 Iop_Sqrt64Fx2, Iop_RSqrt64Fx2, 1345 Iop_Neg64Fx2, 1346 1347 /* Vector Reciprocal Estimate */ 1348 Iop_Recip64Fx2, 1349 1350 /* --- 64x2 lowest-lane-only scalar FP --- */ 1351 1352 /* In binary cases, upper half is copied from first operand. In 1353 unary cases, upper half is copied from the operand. */ 1354 1355 /* binary */ 1356 Iop_Add64F0x2, Iop_Sub64F0x2, Iop_Mul64F0x2, Iop_Div64F0x2, 1357 Iop_Max64F0x2, Iop_Min64F0x2, 1358 Iop_CmpEQ64F0x2, Iop_CmpLT64F0x2, Iop_CmpLE64F0x2, Iop_CmpUN64F0x2, 1359 1360 /* unary */ 1361 Iop_Recip64F0x2, Iop_Sqrt64F0x2, Iop_RSqrt64F0x2, 1362 1363 /* --- pack / unpack --- */ 1364 1365 /* 64 <-> 128 bit vector */ 1366 Iop_V128to64, // :: V128 -> I64, low half 1367 Iop_V128HIto64, // :: V128 -> I64, high half 1368 Iop_64HLtoV128, // :: (I64,I64) -> V128 1369 1370 Iop_64UtoV128, 1371 Iop_SetV128lo64, 1372 1373 /* Copies lower 64/32/16/8 bits, zeroes out the rest. */ 1374 Iop_ZeroHI64ofV128, // :: V128 -> V128 1375 Iop_ZeroHI96ofV128, // :: V128 -> V128 1376 Iop_ZeroHI112ofV128, // :: V128 -> V128 1377 Iop_ZeroHI120ofV128, // :: V128 -> V128 1378 1379 /* 32 <-> 128 bit vector */ 1380 Iop_32UtoV128, 1381 Iop_V128to32, // :: V128 -> I32, lowest lane 1382 Iop_SetV128lo32, // :: (V128,I32) -> V128 1383 1384 /* ------------------ 128-bit SIMD Integer. ------------------ */ 1385 1386 /* BITWISE OPS */ 1387 Iop_NotV128, 1388 Iop_AndV128, Iop_OrV128, Iop_XorV128, 1389 1390 /* VECTOR SHIFT (shift amt :: Ity_I8) */ 1391 Iop_ShlV128, Iop_ShrV128, 1392 1393 /* MISC (vector integer cmp != 0) */ 1394 Iop_CmpNEZ8x16, Iop_CmpNEZ16x8, Iop_CmpNEZ32x4, Iop_CmpNEZ64x2, 1395 1396 /* ADDITION (normal / unsigned sat / signed sat) */ 1397 Iop_Add8x16, Iop_Add16x8, Iop_Add32x4, Iop_Add64x2, 1398 Iop_QAdd8Ux16, Iop_QAdd16Ux8, Iop_QAdd32Ux4, Iop_QAdd64Ux2, 1399 Iop_QAdd8Sx16, Iop_QAdd16Sx8, Iop_QAdd32Sx4, Iop_QAdd64Sx2, 1400 1401 /* SUBTRACTION (normal / unsigned sat / signed sat) */ 1402 Iop_Sub8x16, Iop_Sub16x8, Iop_Sub32x4, Iop_Sub64x2, 1403 Iop_QSub8Ux16, Iop_QSub16Ux8, Iop_QSub32Ux4, Iop_QSub64Ux2, 1404 Iop_QSub8Sx16, Iop_QSub16Sx8, Iop_QSub32Sx4, Iop_QSub64Sx2, 1405 1406 /* MULTIPLICATION (normal / high half of signed/unsigned) */ 1407 Iop_Mul8x16, Iop_Mul16x8, Iop_Mul32x4, 1408 Iop_MulHi16Ux8, Iop_MulHi32Ux4, 1409 Iop_MulHi16Sx8, Iop_MulHi32Sx4, 1410 /* (widening signed/unsigned of even lanes, with lowest lane=zero) */ 1411 Iop_MullEven8Ux16, Iop_MullEven16Ux8, Iop_MullEven32Ux4, 1412 Iop_MullEven8Sx16, Iop_MullEven16Sx8, Iop_MullEven32Sx4, 1413 /* FIXME: document these */ 1414 Iop_Mull8Ux8, Iop_Mull8Sx8, 1415 Iop_Mull16Ux4, Iop_Mull16Sx4, 1416 Iop_Mull32Ux2, Iop_Mull32Sx2, 1417 /* Vector Saturating Doubling Multiply Returning High Half and 1418 Vector Saturating Rounding Doubling Multiply Returning High Half */ 1419 /* These IROp's multiply corresponding elements in two vectors, double 1420 the results, and place the most significant half of the final results 1421 in the destination vector. The results are truncated or rounded. If 1422 any of the results overflow, they are saturated. */ 1423 Iop_QDMulHi16Sx8, Iop_QDMulHi32Sx4, 1424 Iop_QRDMulHi16Sx8, Iop_QRDMulHi32Sx4, 1425 /* Doubling saturating multiplication (long) (I64, I64) -> V128 */ 1426 Iop_QDMulLong16Sx4, Iop_QDMulLong32Sx2, 1427 /* Polynomial multiplication treats its arguments as 1428 coefficients of polynomials over {0, 1}. */ 1429 Iop_PolynomialMul8x16, /* (V128, V128) -> V128 */ 1430 Iop_PolynomialMull8x8, /* (I64, I64) -> V128 */ 1431 1432 /* Vector Polynomial multiplication add. (V128, V128) -> V128 1433 1434 *** Below is the algorithm for the instructions. These Iops could 1435 be emulated to get this functionality, but the emulation would 1436 be long and messy. 1437 1438 Example for polynomial multiply add for vector of bytes 1439 do i = 0 to 15 1440 prod[i].bit[0:14] <- 0 1441 srcA <- VR[argL].byte[i] 1442 srcB <- VR[argR].byte[i] 1443 do j = 0 to 7 1444 do k = 0 to j 1445 gbit <- srcA.bit[k] & srcB.bit[j-k] 1446 prod[i].bit[j] <- prod[i].bit[j] ^ gbit 1447 end 1448 end 1449 1450 do j = 8 to 14 1451 do k = j-7 to 7 1452 gbit <- (srcA.bit[k] & srcB.bit[j-k]) 1453 prod[i].bit[j] <- prod[i].bit[j] ^ gbit 1454 end 1455 end 1456 end 1457 1458 do i = 0 to 7 1459 VR[dst].hword[i] <- 0b0 || (prod[2×i] ^ prod[2×i+1]) 1460 end 1461 */ 1462 Iop_PolynomialMulAdd8x16, Iop_PolynomialMulAdd16x8, 1463 Iop_PolynomialMulAdd32x4, Iop_PolynomialMulAdd64x2, 1464 1465 /* PAIRWISE operations */ 1466 /* Iop_PwFoo16x4( [a,b,c,d], [e,f,g,h] ) = 1467 [Foo16(a,b), Foo16(c,d), Foo16(e,f), Foo16(g,h)] */ 1468 Iop_PwAdd8x16, Iop_PwAdd16x8, Iop_PwAdd32x4, 1469 Iop_PwAdd32Fx2, 1470 /* Longening variant is unary. The resulting vector contains two times 1471 less elements than operand, but they are two times wider. 1472 Example: 1473 Iop_PwAddL16Ux4( [a,b,c,d] ) = [a+b,c+d] 1474 where a+b and c+d are unsigned 32-bit values. */ 1475 Iop_PwAddL8Ux16, Iop_PwAddL16Ux8, Iop_PwAddL32Ux4, 1476 Iop_PwAddL8Sx16, Iop_PwAddL16Sx8, Iop_PwAddL32Sx4, 1477 1478 /* Other unary pairwise ops */ 1479 1480 /* Vector bit matrix transpose. (V128) -> V128 */ 1481 /* For each doubleword element of the source vector, an 8-bit x 8-bit 1482 * matrix transpose is performed. */ 1483 Iop_PwBitMtxXpose64x2, 1484 1485 /* ABSOLUTE VALUE */ 1486 Iop_Abs8x16, Iop_Abs16x8, Iop_Abs32x4, 1487 1488 /* AVERAGING: note: (arg1 + arg2 + 1) >>u 1 */ 1489 Iop_Avg8Ux16, Iop_Avg16Ux8, Iop_Avg32Ux4, 1490 Iop_Avg8Sx16, Iop_Avg16Sx8, Iop_Avg32Sx4, 1491 1492 /* MIN/MAX */ 1493 Iop_Max8Sx16, Iop_Max16Sx8, Iop_Max32Sx4, Iop_Max64Sx2, 1494 Iop_Max8Ux16, Iop_Max16Ux8, Iop_Max32Ux4, Iop_Max64Ux2, 1495 Iop_Min8Sx16, Iop_Min16Sx8, Iop_Min32Sx4, Iop_Min64Sx2, 1496 Iop_Min8Ux16, Iop_Min16Ux8, Iop_Min32Ux4, Iop_Min64Ux2, 1497 1498 /* COMPARISON */ 1499 Iop_CmpEQ8x16, Iop_CmpEQ16x8, Iop_CmpEQ32x4, Iop_CmpEQ64x2, 1500 Iop_CmpGT8Sx16, Iop_CmpGT16Sx8, Iop_CmpGT32Sx4, Iop_CmpGT64Sx2, 1501 Iop_CmpGT8Ux16, Iop_CmpGT16Ux8, Iop_CmpGT32Ux4, Iop_CmpGT64Ux2, 1502 1503 /* COUNT ones / leading zeroes / leading sign bits (not including topmost 1504 bit) */ 1505 Iop_Cnt8x16, 1506 Iop_Clz8Sx16, Iop_Clz16Sx8, Iop_Clz32Sx4, 1507 Iop_Cls8Sx16, Iop_Cls16Sx8, Iop_Cls32Sx4, 1508 1509 /* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */ 1510 Iop_ShlN8x16, Iop_ShlN16x8, Iop_ShlN32x4, Iop_ShlN64x2, 1511 Iop_ShrN8x16, Iop_ShrN16x8, Iop_ShrN32x4, Iop_ShrN64x2, 1512 Iop_SarN8x16, Iop_SarN16x8, Iop_SarN32x4, Iop_SarN64x2, 1513 1514 /* VECTOR x VECTOR SHIFT / ROTATE */ 1515 Iop_Shl8x16, Iop_Shl16x8, Iop_Shl32x4, Iop_Shl64x2, 1516 Iop_Shr8x16, Iop_Shr16x8, Iop_Shr32x4, Iop_Shr64x2, 1517 Iop_Sar8x16, Iop_Sar16x8, Iop_Sar32x4, Iop_Sar64x2, 1518 Iop_Sal8x16, Iop_Sal16x8, Iop_Sal32x4, Iop_Sal64x2, 1519 Iop_Rol8x16, Iop_Rol16x8, Iop_Rol32x4, Iop_Rol64x2, 1520 1521 /* VECTOR x VECTOR SATURATING SHIFT */ 1522 Iop_QShl8x16, Iop_QShl16x8, Iop_QShl32x4, Iop_QShl64x2, 1523 Iop_QSal8x16, Iop_QSal16x8, Iop_QSal32x4, Iop_QSal64x2, 1524 /* VECTOR x INTEGER SATURATING SHIFT */ 1525 Iop_QShlN8Sx16, Iop_QShlN16Sx8, Iop_QShlN32Sx4, Iop_QShlN64Sx2, 1526 Iop_QShlN8x16, Iop_QShlN16x8, Iop_QShlN32x4, Iop_QShlN64x2, 1527 Iop_QSalN8x16, Iop_QSalN16x8, Iop_QSalN32x4, Iop_QSalN64x2, 1528 1529 /* NARROWING (binary) 1530 -- narrow 2xV128 into 1xV128, hi half from left arg */ 1531 /* See comments above w.r.t. U vs S issues in saturated narrowing. */ 1532 Iop_QNarrowBin16Sto8Ux16, Iop_QNarrowBin32Sto16Ux8, 1533 Iop_QNarrowBin16Sto8Sx16, Iop_QNarrowBin32Sto16Sx8, 1534 Iop_QNarrowBin16Uto8Ux16, Iop_QNarrowBin32Uto16Ux8, 1535 Iop_NarrowBin16to8x16, Iop_NarrowBin32to16x8, 1536 Iop_QNarrowBin64Sto32Sx4, Iop_QNarrowBin64Uto32Ux4, 1537 Iop_NarrowBin64to32x4, 1538 1539 /* NARROWING (unary) -- narrow V128 into I64 */ 1540 Iop_NarrowUn16to8x8, Iop_NarrowUn32to16x4, Iop_NarrowUn64to32x2, 1541 /* Saturating narrowing from signed source to signed/unsigned 1542 destination */ 1543 Iop_QNarrowUn16Sto8Sx8, Iop_QNarrowUn32Sto16Sx4, Iop_QNarrowUn64Sto32Sx2, 1544 Iop_QNarrowUn16Sto8Ux8, Iop_QNarrowUn32Sto16Ux4, Iop_QNarrowUn64Sto32Ux2, 1545 /* Saturating narrowing from unsigned source to unsigned destination */ 1546 Iop_QNarrowUn16Uto8Ux8, Iop_QNarrowUn32Uto16Ux4, Iop_QNarrowUn64Uto32Ux2, 1547 1548 /* WIDENING -- sign or zero extend each element of the argument 1549 vector to the twice original size. The resulting vector consists of 1550 the same number of elements but each element and the vector itself 1551 are twice as wide. 1552 All operations are I64->V128. 1553 Example 1554 Iop_Widen32Sto64x2( [a, b] ) = [c, d] 1555 where c = Iop_32Sto64(a) and d = Iop_32Sto64(b) */ 1556 Iop_Widen8Uto16x8, Iop_Widen16Uto32x4, Iop_Widen32Uto64x2, 1557 Iop_Widen8Sto16x8, Iop_Widen16Sto32x4, Iop_Widen32Sto64x2, 1558 1559 /* INTERLEAVING */ 1560 /* Interleave lanes from low or high halves of 1561 operands. Most-significant result lane is from the left 1562 arg. */ 1563 Iop_InterleaveHI8x16, Iop_InterleaveHI16x8, 1564 Iop_InterleaveHI32x4, Iop_InterleaveHI64x2, 1565 Iop_InterleaveLO8x16, Iop_InterleaveLO16x8, 1566 Iop_InterleaveLO32x4, Iop_InterleaveLO64x2, 1567 /* Interleave odd/even lanes of operands. Most-significant result lane 1568 is from the left arg. */ 1569 Iop_InterleaveOddLanes8x16, Iop_InterleaveEvenLanes8x16, 1570 Iop_InterleaveOddLanes16x8, Iop_InterleaveEvenLanes16x8, 1571 Iop_InterleaveOddLanes32x4, Iop_InterleaveEvenLanes32x4, 1572 1573 /* CONCATENATION -- build a new value by concatenating either 1574 the even or odd lanes of both operands. */ 1575 Iop_CatOddLanes8x16, Iop_CatOddLanes16x8, Iop_CatOddLanes32x4, 1576 Iop_CatEvenLanes8x16, Iop_CatEvenLanes16x8, Iop_CatEvenLanes32x4, 1577 1578 /* GET elements of VECTOR 1579 GET is binop (V128, I8) -> I<elem_size> */ 1580 /* Note: the arm back-end handles only constant second argument. */ 1581 Iop_GetElem8x16, Iop_GetElem16x8, Iop_GetElem32x4, Iop_GetElem64x2, 1582 1583 /* DUPLICATING -- copy value to all lanes */ 1584 Iop_Dup8x16, Iop_Dup16x8, Iop_Dup32x4, 1585 1586 /* EXTRACT -- copy 16-arg3 highest bytes from arg1 to 16-arg3 lowest bytes 1587 of result and arg3 lowest bytes of arg2 to arg3 highest bytes of 1588 result. 1589 It is a triop: (V128, V128, I8) -> V128 */ 1590 /* Note: the ARM back end handles only constant arg3 in this operation. */ 1591 Iop_ExtractV128, 1592 1593 /* REVERSE the order of elements in each Half-words, Words, 1594 Double-words */ 1595 /* Examples: 1596 Reverse32_16x8([a,b,c,d,e,f,g,h]) = [b,a,d,c,f,e,h,g] 1597 Reverse64_16x8([a,b,c,d,e,f,g,h]) = [d,c,b,a,h,g,f,e] */ 1598 Iop_Reverse16_8x16, 1599 Iop_Reverse32_8x16, Iop_Reverse32_16x8, 1600 Iop_Reverse64_8x16, Iop_Reverse64_16x8, Iop_Reverse64_32x4, 1601 1602 /* PERMUTING -- copy src bytes to dst, 1603 as indexed by control vector bytes: 1604 for i in 0 .. 15 . result[i] = argL[ argR[i] ] 1605 argR[i] values may only be in the range 0 .. 15, else behaviour 1606 is undefined. */ 1607 Iop_Perm8x16, 1608 Iop_Perm32x4, /* ditto, except argR values are restricted to 0 .. 3 */ 1609 1610 /* MISC CONVERSION -- get high bits of each byte lane, a la 1611 x86/amd64 pmovmskb */ 1612 Iop_GetMSBs8x16, /* V128 -> I16 */ 1613 1614 /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate 1615 See floating-point equiwalents for details. */ 1616 Iop_Recip32x4, Iop_Rsqrte32x4, 1617 1618 /* ------------------ 256-bit SIMD Integer. ------------------ */ 1619 1620 /* Pack/unpack */ 1621 Iop_V256to64_0, // V256 -> I64, extract least significant lane 1622 Iop_V256to64_1, 1623 Iop_V256to64_2, 1624 Iop_V256to64_3, // V256 -> I64, extract most significant lane 1625 1626 Iop_64x4toV256, // (I64,I64,I64,I64)->V256 1627 // first arg is most significant lane 1628 1629 Iop_V256toV128_0, // V256 -> V128, less significant lane 1630 Iop_V256toV128_1, // V256 -> V128, more significant lane 1631 Iop_V128HLtoV256, // (V128,V128)->V256, first arg is most signif 1632 1633 Iop_AndV256, 1634 Iop_OrV256, 1635 Iop_XorV256, 1636 Iop_NotV256, 1637 1638 /* MISC (vector integer cmp != 0) */ 1639 Iop_CmpNEZ8x32, Iop_CmpNEZ16x16, Iop_CmpNEZ32x8, Iop_CmpNEZ64x4, 1640 1641 Iop_Add8x32, Iop_Add16x16, Iop_Add32x8, Iop_Add64x4, 1642 Iop_Sub8x32, Iop_Sub16x16, Iop_Sub32x8, Iop_Sub64x4, 1643 1644 Iop_CmpEQ8x32, Iop_CmpEQ16x16, Iop_CmpEQ32x8, Iop_CmpEQ64x4, 1645 Iop_CmpGT8Sx32, Iop_CmpGT16Sx16, Iop_CmpGT32Sx8, Iop_CmpGT64Sx4, 1646 1647 Iop_ShlN16x16, Iop_ShlN32x8, Iop_ShlN64x4, 1648 Iop_ShrN16x16, Iop_ShrN32x8, Iop_ShrN64x4, 1649 Iop_SarN16x16, Iop_SarN32x8, 1650 1651 Iop_Max8Sx32, Iop_Max16Sx16, Iop_Max32Sx8, 1652 Iop_Max8Ux32, Iop_Max16Ux16, Iop_Max32Ux8, 1653 Iop_Min8Sx32, Iop_Min16Sx16, Iop_Min32Sx8, 1654 Iop_Min8Ux32, Iop_Min16Ux16, Iop_Min32Ux8, 1655 1656 Iop_Mul16x16, Iop_Mul32x8, 1657 Iop_MulHi16Ux16, Iop_MulHi16Sx16, 1658 1659 Iop_QAdd8Ux32, Iop_QAdd16Ux16, 1660 Iop_QAdd8Sx32, Iop_QAdd16Sx16, 1661 Iop_QSub8Ux32, Iop_QSub16Ux16, 1662 Iop_QSub8Sx32, Iop_QSub16Sx16, 1663 1664 Iop_Avg8Ux32, Iop_Avg16Ux16, 1665 1666 Iop_Perm32x8, 1667 1668 /* (V128, V128) -> V128 */ 1669 Iop_CipherV128, Iop_CipherLV128, Iop_CipherSV128, 1670 Iop_NCipherV128, Iop_NCipherLV128, 1671 1672 /* Hash instructions, Federal Information Processing Standards 1673 * Publication 180-3 Secure Hash Standard. */ 1674 /* (V128, I8) -> V128; The I8 input arg is (ST | SIX), where ST and 1675 * SIX are fields from the insn. See ISA 2.07 description of 1676 * vshasigmad and vshasigmaw insns.*/ 1677 Iop_SHA512, Iop_SHA256, 1678 1679 /* ------------------ 256-bit SIMD FP. ------------------ */ 1680 1681 /* ternary :: IRRoundingMode(I32) x V256 x V256 -> V256 */ 1682 Iop_Add64Fx4, Iop_Sub64Fx4, Iop_Mul64Fx4, Iop_Div64Fx4, 1683 Iop_Add32Fx8, Iop_Sub32Fx8, Iop_Mul32Fx8, Iop_Div32Fx8, 1684 1685 Iop_Sqrt32Fx8, 1686 Iop_Sqrt64Fx4, 1687 Iop_RSqrt32Fx8, 1688 Iop_Recip32Fx8, 1689 1690 Iop_Max32Fx8, Iop_Min32Fx8, 1691 Iop_Max64Fx4, Iop_Min64Fx4, 1692 Iop_LAST /* must be the last enumerator */ 1693 } 1694 IROp; 1695 1696/* Pretty-print an op. */ 1697extern void ppIROp ( IROp ); 1698 1699 1700/* Encoding of IEEE754-specified rounding modes. 1701 Note, various front and back ends rely on the actual numerical 1702 values of these, so do not change them. */ 1703typedef 1704 enum { 1705 Irrm_NEAREST = 0, // Round to nearest, ties to even 1706 Irrm_NegINF = 1, // Round to negative infinity 1707 Irrm_PosINF = 2, // Round to positive infinity 1708 Irrm_ZERO = 3, // Round toward zero 1709 Irrm_NEAREST_TIE_AWAY_0 = 4, // Round to nearest, ties away from 0 1710 Irrm_PREPARE_SHORTER = 5, // Round to prepare for shorter 1711 // precision 1712 Irrm_AWAY_FROM_ZERO = 6, // Round to away from 0 1713 Irrm_NEAREST_TIE_TOWARD_0 = 7 // Round to nearest, ties towards 0 1714 } 1715 IRRoundingMode; 1716 1717/* Binary floating point comparison result values. 1718 This is also derived from what IA32 does. */ 1719typedef 1720 enum { 1721 Ircr_UN = 0x45, 1722 Ircr_LT = 0x01, 1723 Ircr_GT = 0x00, 1724 Ircr_EQ = 0x40 1725 } 1726 IRCmpFResult; 1727 1728typedef IRCmpFResult IRCmpF32Result; 1729typedef IRCmpFResult IRCmpF64Result; 1730typedef IRCmpFResult IRCmpF128Result; 1731 1732/* Decimal floating point result values. */ 1733typedef IRCmpFResult IRCmpDResult; 1734typedef IRCmpDResult IRCmpD64Result; 1735typedef IRCmpDResult IRCmpD128Result; 1736 1737/* ------------------ Expressions ------------------ */ 1738 1739typedef struct _IRQop IRQop; /* forward declaration */ 1740typedef struct _IRTriop IRTriop; /* forward declaration */ 1741 1742 1743/* The different kinds of expressions. Their meaning is explained below 1744 in the comments for IRExpr. */ 1745typedef 1746 enum { 1747 Iex_Binder=0x1900, 1748 Iex_Get, 1749 Iex_GetI, 1750 Iex_RdTmp, 1751 Iex_Qop, 1752 Iex_Triop, 1753 Iex_Binop, 1754 Iex_Unop, 1755 Iex_Load, 1756 Iex_Const, 1757 Iex_ITE, 1758 Iex_CCall, 1759 Iex_VECRET, 1760 Iex_BBPTR 1761 } 1762 IRExprTag; 1763 1764/* An expression. Stored as a tagged union. 'tag' indicates what kind 1765 of expression this is. 'Iex' is the union that holds the fields. If 1766 an IRExpr 'e' has e.tag equal to Iex_Load, then it's a load 1767 expression, and the fields can be accessed with 1768 'e.Iex.Load.<fieldname>'. 1769 1770 For each kind of expression, we show what it looks like when 1771 pretty-printed with ppIRExpr(). 1772*/ 1773typedef 1774 struct _IRExpr 1775 IRExpr; 1776 1777struct _IRExpr { 1778 IRExprTag tag; 1779 union { 1780 /* Used only in pattern matching within Vex. Should not be seen 1781 outside of Vex. */ 1782 struct { 1783 Int binder; 1784 } Binder; 1785 1786 /* Read a guest register, at a fixed offset in the guest state. 1787 ppIRExpr output: GET:<ty>(<offset>), eg. GET:I32(0) 1788 */ 1789 struct { 1790 Int offset; /* Offset into the guest state */ 1791 IRType ty; /* Type of the value being read */ 1792 } Get; 1793 1794 /* Read a guest register at a non-fixed offset in the guest 1795 state. This allows circular indexing into parts of the guest 1796 state, which is essential for modelling situations where the 1797 identity of guest registers is not known until run time. One 1798 example is the x87 FP register stack. 1799 1800 The part of the guest state to be treated as a circular array 1801 is described in the IRRegArray 'descr' field. It holds the 1802 offset of the first element in the array, the type of each 1803 element, and the number of elements. 1804 1805 The array index is indicated rather indirectly, in a way 1806 which makes optimisation easy: as the sum of variable part 1807 (the 'ix' field) and a constant offset (the 'bias' field). 1808 1809 Since the indexing is circular, the actual array index to use 1810 is computed as (ix + bias) % num-of-elems-in-the-array. 1811 1812 Here's an example. The description 1813 1814 (96:8xF64)[t39,-7] 1815 1816 describes an array of 8 F64-typed values, the 1817 guest-state-offset of the first being 96. This array is 1818 being indexed at (t39 - 7) % 8. 1819 1820 It is important to get the array size/type exactly correct 1821 since IR optimisation looks closely at such info in order to 1822 establish aliasing/non-aliasing between seperate GetI and 1823 PutI events, which is used to establish when they can be 1824 reordered, etc. Putting incorrect info in will lead to 1825 obscure IR optimisation bugs. 1826 1827 ppIRExpr output: GETI<descr>[<ix>,<bias] 1828 eg. GETI(128:8xI8)[t1,0] 1829 */ 1830 struct { 1831 IRRegArray* descr; /* Part of guest state treated as circular */ 1832 IRExpr* ix; /* Variable part of index into array */ 1833 Int bias; /* Constant offset part of index into array */ 1834 } GetI; 1835 1836 /* The value held by a temporary. 1837 ppIRExpr output: t<tmp>, eg. t1 1838 */ 1839 struct { 1840 IRTemp tmp; /* The temporary number */ 1841 } RdTmp; 1842 1843 /* A quaternary operation. 1844 ppIRExpr output: <op>(<arg1>, <arg2>, <arg3>, <arg4>), 1845 eg. MAddF64r32(t1, t2, t3, t4) 1846 */ 1847 struct { 1848 IRQop* details; 1849 } Qop; 1850 1851 /* A ternary operation. 1852 ppIRExpr output: <op>(<arg1>, <arg2>, <arg3>), 1853 eg. MulF64(1, 2.0, 3.0) 1854 */ 1855 struct { 1856 IRTriop* details; 1857 } Triop; 1858 1859 /* A binary operation. 1860 ppIRExpr output: <op>(<arg1>, <arg2>), eg. Add32(t1,t2) 1861 */ 1862 struct { 1863 IROp op; /* op-code */ 1864 IRExpr* arg1; /* operand 1 */ 1865 IRExpr* arg2; /* operand 2 */ 1866 } Binop; 1867 1868 /* A unary operation. 1869 ppIRExpr output: <op>(<arg>), eg. Neg8(t1) 1870 */ 1871 struct { 1872 IROp op; /* op-code */ 1873 IRExpr* arg; /* operand */ 1874 } Unop; 1875 1876 /* A load from memory -- a normal load, not a load-linked. 1877 Load-Linkeds (and Store-Conditionals) are instead represented 1878 by IRStmt.LLSC since Load-Linkeds have side effects and so 1879 are not semantically valid IRExpr's. 1880 ppIRExpr output: LD<end>:<ty>(<addr>), eg. LDle:I32(t1) 1881 */ 1882 struct { 1883 IREndness end; /* Endian-ness of the load */ 1884 IRType ty; /* Type of the loaded value */ 1885 IRExpr* addr; /* Address being loaded from */ 1886 } Load; 1887 1888 /* A constant-valued expression. 1889 ppIRExpr output: <con>, eg. 0x4:I32 1890 */ 1891 struct { 1892 IRConst* con; /* The constant itself */ 1893 } Const; 1894 1895 /* A call to a pure (no side-effects) helper C function. 1896 1897 With the 'cee' field, 'name' is the function's name. It is 1898 only used for pretty-printing purposes. The address to call 1899 (host address, of course) is stored in the 'addr' field 1900 inside 'cee'. 1901 1902 The 'args' field is a NULL-terminated array of arguments. 1903 The stated return IRType, and the implied argument types, 1904 must match that of the function being called well enough so 1905 that the back end can actually generate correct code for the 1906 call. 1907 1908 The called function **must** satisfy the following: 1909 1910 * no side effects -- must be a pure function, the result of 1911 which depends only on the passed parameters. 1912 1913 * it may not look at, nor modify, any of the guest state 1914 since that would hide guest state transitions from 1915 instrumenters 1916 1917 * it may not access guest memory, since that would hide 1918 guest memory transactions from the instrumenters 1919 1920 * it must not assume that arguments are being evaluated in a 1921 particular order. The oder of evaluation is unspecified. 1922 1923 This is restrictive, but makes the semantics clean, and does 1924 not interfere with IR optimisation. 1925 1926 If you want to call a helper which can mess with guest state 1927 and/or memory, instead use Ist_Dirty. This is a lot more 1928 flexible, but you have to give a bunch of details about what 1929 the helper does (and you better be telling the truth, 1930 otherwise any derived instrumentation will be wrong). Also 1931 Ist_Dirty inhibits various IR optimisations and so can cause 1932 quite poor code to be generated. Try to avoid it. 1933 1934 In principle it would be allowable to have the arg vector 1935 contain an IRExpr_VECRET(), although not IRExpr_BBPTR(). However, 1936 at the moment there is no requirement for clean helper calls to 1937 be able to return V128 or V256 values. Hence this is not allowed. 1938 1939 ppIRExpr output: <cee>(<args>):<retty> 1940 eg. foo{0x80489304}(t1, t2):I32 1941 */ 1942 struct { 1943 IRCallee* cee; /* Function to call. */ 1944 IRType retty; /* Type of return value. */ 1945 IRExpr** args; /* Vector of argument expressions. */ 1946 } CCall; 1947 1948 /* A ternary if-then-else operator. It returns iftrue if cond is 1949 nonzero, iffalse otherwise. Note that it is STRICT, ie. both 1950 iftrue and iffalse are evaluated in all cases. 1951 1952 ppIRExpr output: ITE(<cond>,<iftrue>,<iffalse>), 1953 eg. ITE(t6,t7,t8) 1954 */ 1955 struct { 1956 IRExpr* cond; /* Condition */ 1957 IRExpr* iftrue; /* True expression */ 1958 IRExpr* iffalse; /* False expression */ 1959 } ITE; 1960 } Iex; 1961}; 1962 1963/* Expression auxiliaries: a ternary expression. */ 1964struct _IRTriop { 1965 IROp op; /* op-code */ 1966 IRExpr* arg1; /* operand 1 */ 1967 IRExpr* arg2; /* operand 2 */ 1968 IRExpr* arg3; /* operand 3 */ 1969}; 1970 1971/* Expression auxiliaries: a quarternary expression. */ 1972struct _IRQop { 1973 IROp op; /* op-code */ 1974 IRExpr* arg1; /* operand 1 */ 1975 IRExpr* arg2; /* operand 2 */ 1976 IRExpr* arg3; /* operand 3 */ 1977 IRExpr* arg4; /* operand 4 */ 1978}; 1979 1980 1981/* Two special kinds of IRExpr, which can ONLY be used in 1982 argument lists for dirty helper calls (IRDirty.args) and in NO 1983 OTHER PLACES. And then only in very limited ways. */ 1984 1985/* Denotes an argument which (in the helper) takes a pointer to a 1986 (naturally aligned) V128 or V256, into which the helper is expected 1987 to write its result. Use of IRExpr_VECRET() is strictly 1988 controlled. If the helper returns a V128 or V256 value then 1989 IRExpr_VECRET() must appear exactly once in the arg list, although 1990 it can appear anywhere, and the helper must have a C 'void' return 1991 type. If the helper returns any other type, IRExpr_VECRET() may 1992 not appear in the argument list. */ 1993 1994/* Denotes an void* argument which is passed to the helper, which at 1995 run time will point to the thread's guest state area. This can 1996 only appear at most once in an argument list, and it may not appear 1997 at all in argument lists for clean helper calls. */ 1998 1999static inline Bool is_IRExpr_VECRET_or_BBPTR ( IRExpr* e ) { 2000 return e->tag == Iex_VECRET || e->tag == Iex_BBPTR; 2001} 2002 2003 2004/* Expression constructors. */ 2005extern IRExpr* IRExpr_Binder ( Int binder ); 2006extern IRExpr* IRExpr_Get ( Int off, IRType ty ); 2007extern IRExpr* IRExpr_GetI ( IRRegArray* descr, IRExpr* ix, Int bias ); 2008extern IRExpr* IRExpr_RdTmp ( IRTemp tmp ); 2009extern IRExpr* IRExpr_Qop ( IROp op, IRExpr* arg1, IRExpr* arg2, 2010 IRExpr* arg3, IRExpr* arg4 ); 2011extern IRExpr* IRExpr_Triop ( IROp op, IRExpr* arg1, 2012 IRExpr* arg2, IRExpr* arg3 ); 2013extern IRExpr* IRExpr_Binop ( IROp op, IRExpr* arg1, IRExpr* arg2 ); 2014extern IRExpr* IRExpr_Unop ( IROp op, IRExpr* arg ); 2015extern IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr ); 2016extern IRExpr* IRExpr_Const ( IRConst* con ); 2017extern IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args ); 2018extern IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse ); 2019extern IRExpr* IRExpr_VECRET ( void ); 2020extern IRExpr* IRExpr_BBPTR ( void ); 2021 2022/* Deep-copy an IRExpr. */ 2023extern IRExpr* deepCopyIRExpr ( IRExpr* ); 2024 2025/* Pretty-print an IRExpr. */ 2026extern void ppIRExpr ( IRExpr* ); 2027 2028/* NULL-terminated IRExpr vector constructors, suitable for 2029 use as arg lists in clean/dirty helper calls. */ 2030extern IRExpr** mkIRExprVec_0 ( void ); 2031extern IRExpr** mkIRExprVec_1 ( IRExpr* ); 2032extern IRExpr** mkIRExprVec_2 ( IRExpr*, IRExpr* ); 2033extern IRExpr** mkIRExprVec_3 ( IRExpr*, IRExpr*, IRExpr* ); 2034extern IRExpr** mkIRExprVec_4 ( IRExpr*, IRExpr*, IRExpr*, IRExpr* ); 2035extern IRExpr** mkIRExprVec_5 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*, 2036 IRExpr* ); 2037extern IRExpr** mkIRExprVec_6 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*, 2038 IRExpr*, IRExpr* ); 2039extern IRExpr** mkIRExprVec_7 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*, 2040 IRExpr*, IRExpr*, IRExpr* ); 2041extern IRExpr** mkIRExprVec_8 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*, 2042 IRExpr*, IRExpr*, IRExpr*, IRExpr*); 2043 2044/* IRExpr copiers: 2045 - shallowCopy: shallow-copy (ie. create a new vector that shares the 2046 elements with the original). 2047 - deepCopy: deep-copy (ie. create a completely new vector). */ 2048extern IRExpr** shallowCopyIRExprVec ( IRExpr** ); 2049extern IRExpr** deepCopyIRExprVec ( IRExpr** ); 2050 2051/* Make a constant expression from the given host word taking into 2052 account (of course) the host word size. */ 2053extern IRExpr* mkIRExpr_HWord ( HWord ); 2054 2055/* Convenience function for constructing clean helper calls. */ 2056extern 2057IRExpr* mkIRExprCCall ( IRType retty, 2058 Int regparms, const HChar* name, void* addr, 2059 IRExpr** args ); 2060 2061 2062/* Convenience functions for atoms (IRExprs which are either Iex_Tmp or 2063 * Iex_Const). */ 2064static inline Bool isIRAtom ( IRExpr* e ) { 2065 return toBool(e->tag == Iex_RdTmp || e->tag == Iex_Const); 2066} 2067 2068/* Are these two IR atoms identical? Causes an assertion 2069 failure if they are passed non-atoms. */ 2070extern Bool eqIRAtom ( IRExpr*, IRExpr* ); 2071 2072 2073/* ------------------ Jump kinds ------------------ */ 2074 2075/* This describes hints which can be passed to the dispatcher at guest 2076 control-flow transfer points. 2077 2078 Re Ijk_InvalICache and Ijk_FlushDCache: the guest state _must_ have 2079 two pseudo-registers, guest_CMSTART and guest_CMLEN, which specify 2080 the start and length of the region to be invalidated. CM stands 2081 for "Cache Management". These are both the size of a guest word. 2082 It is the responsibility of the relevant toIR.c to ensure that 2083 these are filled in with suitable values before issuing a jump of 2084 kind Ijk_InvalICache or Ijk_FlushDCache. 2085 2086 Ijk_InvalICache requests invalidation of translations taken from 2087 the requested range. Ijk_FlushDCache requests flushing of the D 2088 cache for the specified range. 2089 2090 Re Ijk_EmWarn and Ijk_EmFail: the guest state must have a 2091 pseudo-register guest_EMNOTE, which is 32-bits regardless of the 2092 host or guest word size. That register should be made to hold a 2093 VexEmNote value to indicate the reason for the exit. 2094 2095 In the case of Ijk_EmFail, the exit is fatal (Vex-generated code 2096 cannot continue) and so the jump destination can be anything. 2097 2098 Re Ijk_Sys_ (syscall jumps): the guest state must have a 2099 pseudo-register guest_IP_AT_SYSCALL, which is the size of a guest 2100 word. Front ends should set this to be the IP at the most recently 2101 executed kernel-entering (system call) instruction. This makes it 2102 very much easier (viz, actually possible at all) to back up the 2103 guest to restart a syscall that has been interrupted by a signal. 2104*/ 2105typedef 2106 enum { 2107 Ijk_INVALID=0x1A00, 2108 Ijk_Boring, /* not interesting; just goto next */ 2109 Ijk_Call, /* guest is doing a call */ 2110 Ijk_Ret, /* guest is doing a return */ 2111 Ijk_ClientReq, /* do guest client req before continuing */ 2112 Ijk_Yield, /* client is yielding to thread scheduler */ 2113 Ijk_EmWarn, /* report emulation warning before continuing */ 2114 Ijk_EmFail, /* emulation critical (FATAL) error; give up */ 2115 Ijk_NoDecode, /* current instruction cannot be decoded */ 2116 Ijk_MapFail, /* Vex-provided address translation failed */ 2117 Ijk_InvalICache, /* Inval icache for range [CMSTART, +CMLEN) */ 2118 Ijk_FlushDCache, /* Flush dcache for range [CMSTART, +CMLEN) */ 2119 Ijk_NoRedir, /* Jump to un-redirected guest addr */ 2120 Ijk_SigILL, /* current instruction synths SIGILL */ 2121 Ijk_SigTRAP, /* current instruction synths SIGTRAP */ 2122 Ijk_SigSEGV, /* current instruction synths SIGSEGV */ 2123 Ijk_SigBUS, /* current instruction synths SIGBUS */ 2124 Ijk_SigFPE_IntDiv, /* current instruction synths SIGFPE - IntDiv */ 2125 Ijk_SigFPE_IntOvf, /* current instruction synths SIGFPE - IntOvf */ 2126 /* Unfortunately, various guest-dependent syscall kinds. They 2127 all mean: do a syscall before continuing. */ 2128 Ijk_Sys_syscall, /* amd64/x86 'syscall', ppc 'sc', arm 'svc #0' */ 2129 Ijk_Sys_int32, /* amd64/x86 'int $0x20' */ 2130 Ijk_Sys_int128, /* amd64/x86 'int $0x80' */ 2131 Ijk_Sys_int129, /* amd64/x86 'int $0x81' */ 2132 Ijk_Sys_int130, /* amd64/x86 'int $0x82' */ 2133 Ijk_Sys_sysenter /* x86 'sysenter'. guest_EIP becomes 2134 invalid at the point this happens. */ 2135 } 2136 IRJumpKind; 2137 2138extern void ppIRJumpKind ( IRJumpKind ); 2139 2140 2141/* ------------------ Dirty helper calls ------------------ */ 2142 2143/* A dirty call is a flexible mechanism for calling (possibly 2144 conditionally) a helper function or procedure. The helper function 2145 may read, write or modify client memory, and may read, write or 2146 modify client state. It can take arguments and optionally return a 2147 value. It may return different results and/or do different things 2148 when called repeatedly with the same arguments, by means of storing 2149 private state. 2150 2151 If a value is returned, it is assigned to the nominated return 2152 temporary. 2153 2154 Dirty calls are statements rather than expressions for obvious 2155 reasons. If a dirty call is marked as writing guest state, any 2156 pre-existing values derived from the written parts of the guest 2157 state are invalid. Similarly, if the dirty call is stated as 2158 writing memory, any pre-existing loaded values are invalidated by 2159 it. 2160 2161 In order that instrumentation is possible, the call must state, and 2162 state correctly: 2163 2164 * Whether it reads, writes or modifies memory, and if so where. 2165 2166 * Whether it reads, writes or modifies guest state, and if so which 2167 pieces. Several pieces may be stated, and their extents must be 2168 known at translation-time. Each piece is allowed to repeat some 2169 number of times at a fixed interval, if required. 2170 2171 Normally, code is generated to pass just the args to the helper. 2172 However, if IRExpr_BBPTR() is present in the argument list (at most 2173 one instance is allowed), then the baseblock pointer is passed for 2174 that arg, so that the callee can access the guest state. It is 2175 invalid for .nFxState to be zero but IRExpr_BBPTR() to be present, 2176 since .nFxState==0 is a claim that the call does not access guest 2177 state. 2178 2179 IMPORTANT NOTE re GUARDS: Dirty calls are strict, very strict. The 2180 arguments and 'mFx' are evaluated REGARDLESS of the guard value. 2181 The order of argument evaluation is unspecified. The guard 2182 expression is evaluated AFTER the arguments and 'mFx' have been 2183 evaluated. 'mFx' is expected (by Memcheck) to be a defined value 2184 even if the guard evaluates to false. 2185*/ 2186 2187#define VEX_N_FXSTATE 7 /* enough for FXSAVE/FXRSTOR on x86 */ 2188 2189/* Effects on resources (eg. registers, memory locations) */ 2190typedef 2191 enum { 2192 Ifx_None=0x1B00, /* no effect */ 2193 Ifx_Read, /* reads the resource */ 2194 Ifx_Write, /* writes the resource */ 2195 Ifx_Modify, /* modifies the resource */ 2196 } 2197 IREffect; 2198 2199/* Pretty-print an IREffect */ 2200extern void ppIREffect ( IREffect ); 2201 2202typedef 2203 struct _IRDirty { 2204 /* What to call, and details of args/results. .guard must be 2205 non-NULL. If .tmp is not IRTemp_INVALID, then the call 2206 returns a result which is placed in .tmp. If at runtime the 2207 guard evaluates to false, .tmp has an 0x555..555 bit pattern 2208 written to it. Hence conditional calls that assign .tmp are 2209 allowed. */ 2210 IRCallee* cee; /* where to call */ 2211 IRExpr* guard; /* :: Ity_Bit. Controls whether call happens */ 2212 /* The args vector may contain IRExpr_BBPTR() and/or 2213 IRExpr_VECRET(), in both cases, at most once. */ 2214 IRExpr** args; /* arg vector, ends in NULL. */ 2215 IRTemp tmp; /* to assign result to, or IRTemp_INVALID if none */ 2216 2217 /* Mem effects; we allow only one R/W/M region to be stated */ 2218 IREffect mFx; /* indicates memory effects, if any */ 2219 IRExpr* mAddr; /* of access, or NULL if mFx==Ifx_None */ 2220 Int mSize; /* of access, or zero if mFx==Ifx_None */ 2221 2222 /* Guest state effects; up to N allowed */ 2223 Int nFxState; /* must be 0 .. VEX_N_FXSTATE */ 2224 struct { 2225 IREffect fx:16; /* read, write or modify? Ifx_None is invalid. */ 2226 UShort offset; 2227 UShort size; 2228 UChar nRepeats; 2229 UChar repeatLen; 2230 } fxState[VEX_N_FXSTATE]; 2231 /* The access can be repeated, as specified by nRepeats and 2232 repeatLen. To describe only a single access, nRepeats and 2233 repeatLen should be zero. Otherwise, repeatLen must be a 2234 multiple of size and greater than size. */ 2235 /* Overall, the parts of the guest state denoted by (offset, 2236 size, nRepeats, repeatLen) is 2237 [offset, +size) 2238 and, if nRepeats > 0, 2239 for (i = 1; i <= nRepeats; i++) 2240 [offset + i * repeatLen, +size) 2241 A convenient way to enumerate all segments is therefore 2242 for (i = 0; i < 1 + nRepeats; i++) 2243 [offset + i * repeatLen, +size) 2244 */ 2245 } 2246 IRDirty; 2247 2248/* Pretty-print a dirty call */ 2249extern void ppIRDirty ( IRDirty* ); 2250 2251/* Allocate an uninitialised dirty call */ 2252extern IRDirty* emptyIRDirty ( void ); 2253 2254/* Deep-copy a dirty call */ 2255extern IRDirty* deepCopyIRDirty ( IRDirty* ); 2256 2257/* A handy function which takes some of the tedium out of constructing 2258 dirty helper calls. The called function impliedly does not return 2259 any value and has a constant-True guard. The call is marked as 2260 accessing neither guest state nor memory (hence the "unsafe" 2261 designation) -- you can change this marking later if need be. A 2262 suitable IRCallee is constructed from the supplied bits. */ 2263extern 2264IRDirty* unsafeIRDirty_0_N ( Int regparms, const HChar* name, void* addr, 2265 IRExpr** args ); 2266 2267/* Similarly, make a zero-annotation dirty call which returns a value, 2268 and assign that to the given temp. */ 2269extern 2270IRDirty* unsafeIRDirty_1_N ( IRTemp dst, 2271 Int regparms, const HChar* name, void* addr, 2272 IRExpr** args ); 2273 2274 2275/* --------------- Memory Bus Events --------------- */ 2276 2277typedef 2278 enum { 2279 Imbe_Fence=0x1C00, 2280 /* Needed only on ARM. It cancels a reservation made by a 2281 preceding Linked-Load, and needs to be handed through to the 2282 back end, just as LL and SC themselves are. */ 2283 Imbe_CancelReservation 2284 } 2285 IRMBusEvent; 2286 2287extern void ppIRMBusEvent ( IRMBusEvent ); 2288 2289 2290/* --------------- Compare and Swap --------------- */ 2291 2292/* This denotes an atomic compare and swap operation, either 2293 a single-element one or a double-element one. 2294 2295 In the single-element case: 2296 2297 .addr is the memory address. 2298 .end is the endianness with which memory is accessed 2299 2300 If .addr contains the same value as .expdLo, then .dataLo is 2301 written there, else there is no write. In both cases, the 2302 original value at .addr is copied into .oldLo. 2303 2304 Types: .expdLo, .dataLo and .oldLo must all have the same type. 2305 It may be any integral type, viz: I8, I16, I32 or, for 64-bit 2306 guests, I64. 2307 2308 .oldHi must be IRTemp_INVALID, and .expdHi and .dataHi must 2309 be NULL. 2310 2311 In the double-element case: 2312 2313 .addr is the memory address. 2314 .end is the endianness with which memory is accessed 2315 2316 The operation is the same: 2317 2318 If .addr contains the same value as .expdHi:.expdLo, then 2319 .dataHi:.dataLo is written there, else there is no write. In 2320 both cases the original value at .addr is copied into 2321 .oldHi:.oldLo. 2322 2323 Types: .expdHi, .expdLo, .dataHi, .dataLo, .oldHi, .oldLo must 2324 all have the same type, which may be any integral type, viz: I8, 2325 I16, I32 or, for 64-bit guests, I64. 2326 2327 The double-element case is complicated by the issue of 2328 endianness. In all cases, the two elements are understood to be 2329 located adjacently in memory, starting at the address .addr. 2330 2331 If .end is Iend_LE, then the .xxxLo component is at the lower 2332 address and the .xxxHi component is at the higher address, and 2333 each component is itself stored little-endianly. 2334 2335 If .end is Iend_BE, then the .xxxHi component is at the lower 2336 address and the .xxxLo component is at the higher address, and 2337 each component is itself stored big-endianly. 2338 2339 This allows representing more cases than most architectures can 2340 handle. For example, x86 cannot do DCAS on 8- or 16-bit elements. 2341 2342 How to know if the CAS succeeded? 2343 2344 * if .oldLo == .expdLo (resp. .oldHi:.oldLo == .expdHi:.expdLo), 2345 then the CAS succeeded, .dataLo (resp. .dataHi:.dataLo) is now 2346 stored at .addr, and the original value there was .oldLo (resp 2347 .oldHi:.oldLo). 2348 2349 * if .oldLo != .expdLo (resp. .oldHi:.oldLo != .expdHi:.expdLo), 2350 then the CAS failed, and the original value at .addr was .oldLo 2351 (resp. .oldHi:.oldLo). 2352 2353 Hence it is easy to know whether or not the CAS succeeded. 2354*/ 2355typedef 2356 struct { 2357 IRTemp oldHi; /* old value of *addr is written here */ 2358 IRTemp oldLo; 2359 IREndness end; /* endianness of the data in memory */ 2360 IRExpr* addr; /* store address */ 2361 IRExpr* expdHi; /* expected old value at *addr */ 2362 IRExpr* expdLo; 2363 IRExpr* dataHi; /* new value for *addr */ 2364 IRExpr* dataLo; 2365 } 2366 IRCAS; 2367 2368extern void ppIRCAS ( IRCAS* cas ); 2369 2370extern IRCAS* mkIRCAS ( IRTemp oldHi, IRTemp oldLo, 2371 IREndness end, IRExpr* addr, 2372 IRExpr* expdHi, IRExpr* expdLo, 2373 IRExpr* dataHi, IRExpr* dataLo ); 2374 2375extern IRCAS* deepCopyIRCAS ( IRCAS* ); 2376 2377 2378/* ------------------ Circular Array Put ------------------ */ 2379 2380typedef 2381 struct { 2382 IRRegArray* descr; /* Part of guest state treated as circular */ 2383 IRExpr* ix; /* Variable part of index into array */ 2384 Int bias; /* Constant offset part of index into array */ 2385 IRExpr* data; /* The value to write */ 2386 } IRPutI; 2387 2388extern void ppIRPutI ( IRPutI* puti ); 2389 2390extern IRPutI* mkIRPutI ( IRRegArray* descr, IRExpr* ix, 2391 Int bias, IRExpr* data ); 2392 2393extern IRPutI* deepCopyIRPutI ( IRPutI* ); 2394 2395 2396/* --------------- Guarded loads and stores --------------- */ 2397 2398/* Conditional stores are straightforward. They are the same as 2399 normal stores, with an extra 'guard' field :: Ity_I1 that 2400 determines whether or not the store actually happens. If not, 2401 memory is unmodified. 2402 2403 The semantics of this is that 'addr' and 'data' are fully evaluated 2404 even in the case where 'guard' evaluates to zero (false). 2405*/ 2406typedef 2407 struct { 2408 IREndness end; /* Endianness of the store */ 2409 IRExpr* addr; /* store address */ 2410 IRExpr* data; /* value to write */ 2411 IRExpr* guard; /* Guarding value */ 2412 } 2413 IRStoreG; 2414 2415/* Conditional loads are a little more complex. 'addr' is the 2416 address, 'guard' is the guarding condition. If the load takes 2417 place, the loaded value is placed in 'dst'. If it does not take 2418 place, 'alt' is copied to 'dst'. However, the loaded value is not 2419 placed directly in 'dst' -- it is first subjected to the conversion 2420 specified by 'cvt'. 2421 2422 For example, imagine doing a conditional 8-bit load, in which the 2423 loaded value is zero extended to 32 bits. Hence: 2424 * 'dst' and 'alt' must have type I32 2425 * 'cvt' must be a unary op which converts I8 to I32. In this 2426 example, it would be ILGop_8Uto32. 2427 2428 There is no explicit indication of the type at which the load is 2429 done, since that is inferrable from the arg type of 'cvt'. Note 2430 that the types of 'alt' and 'dst' and the result type of 'cvt' must 2431 all be the same. 2432 2433 Semantically, 'addr' is evaluated even in the case where 'guard' 2434 evaluates to zero (false), and 'alt' is evaluated even when 'guard' 2435 evaluates to one (true). That is, 'addr' and 'alt' are always 2436 evaluated. 2437*/ 2438typedef 2439 enum { 2440 ILGop_INVALID=0x1D00, 2441 ILGop_Ident32, /* 32 bit, no conversion */ 2442 ILGop_16Uto32, /* 16 bit load, Z-widen to 32 */ 2443 ILGop_16Sto32, /* 16 bit load, S-widen to 32 */ 2444 ILGop_8Uto32, /* 8 bit load, Z-widen to 32 */ 2445 ILGop_8Sto32 /* 8 bit load, S-widen to 32 */ 2446 } 2447 IRLoadGOp; 2448 2449typedef 2450 struct { 2451 IREndness end; /* Endianness of the load */ 2452 IRLoadGOp cvt; /* Conversion to apply to the loaded value */ 2453 IRTemp dst; /* Destination (LHS) of assignment */ 2454 IRExpr* addr; /* Address being loaded from */ 2455 IRExpr* alt; /* Value if load is not done. */ 2456 IRExpr* guard; /* Guarding value */ 2457 } 2458 IRLoadG; 2459 2460extern void ppIRStoreG ( IRStoreG* sg ); 2461 2462extern void ppIRLoadGOp ( IRLoadGOp cvt ); 2463 2464extern void ppIRLoadG ( IRLoadG* lg ); 2465 2466extern IRStoreG* mkIRStoreG ( IREndness end, 2467 IRExpr* addr, IRExpr* data, 2468 IRExpr* guard ); 2469 2470extern IRLoadG* mkIRLoadG ( IREndness end, IRLoadGOp cvt, 2471 IRTemp dst, IRExpr* addr, IRExpr* alt, 2472 IRExpr* guard ); 2473 2474 2475/* ------------------ Statements ------------------ */ 2476 2477/* The different kinds of statements. Their meaning is explained 2478 below in the comments for IRStmt. 2479 2480 Those marked META do not represent code, but rather extra 2481 information about the code. These statements can be removed 2482 without affecting the functional behaviour of the code, however 2483 they are required by some IR consumers such as tools that 2484 instrument the code. 2485*/ 2486 2487typedef 2488 enum { 2489 Ist_NoOp=0x1E00, 2490 Ist_IMark, /* META */ 2491 Ist_AbiHint, /* META */ 2492 Ist_Put, 2493 Ist_PutI, 2494 Ist_WrTmp, 2495 Ist_Store, 2496 Ist_LoadG, 2497 Ist_StoreG, 2498 Ist_CAS, 2499 Ist_LLSC, 2500 Ist_Dirty, 2501 Ist_MBE, 2502 Ist_Exit 2503 } 2504 IRStmtTag; 2505 2506/* A statement. Stored as a tagged union. 'tag' indicates what kind 2507 of expression this is. 'Ist' is the union that holds the fields. 2508 If an IRStmt 'st' has st.tag equal to Iex_Store, then it's a store 2509 statement, and the fields can be accessed with 2510 'st.Ist.Store.<fieldname>'. 2511 2512 For each kind of statement, we show what it looks like when 2513 pretty-printed with ppIRStmt(). 2514*/ 2515typedef 2516 struct _IRStmt { 2517 IRStmtTag tag; 2518 union { 2519 /* A no-op (usually resulting from IR optimisation). Can be 2520 omitted without any effect. 2521 2522 ppIRStmt output: IR-NoOp 2523 */ 2524 struct { 2525 } NoOp; 2526 2527 /* META: instruction mark. Marks the start of the statements 2528 that represent a single machine instruction (the end of 2529 those statements is marked by the next IMark or the end of 2530 the IRSB). Contains the address and length of the 2531 instruction. 2532 2533 It also contains a delta value. The delta must be 2534 subtracted from a guest program counter value before 2535 attempting to establish, by comparison with the address 2536 and length values, whether or not that program counter 2537 value refers to this instruction. For x86, amd64, ppc32, 2538 ppc64 and arm, the delta value is zero. For Thumb 2539 instructions, the delta value is one. This is because, on 2540 Thumb, guest PC values (guest_R15T) are encoded using the 2541 top 31 bits of the instruction address and a 1 in the lsb; 2542 hence they appear to be (numerically) 1 past the start of 2543 the instruction they refer to. IOW, guest_R15T on ARM 2544 holds a standard ARM interworking address. 2545 2546 ppIRStmt output: ------ IMark(<addr>, <len>, <delta>) ------, 2547 eg. ------ IMark(0x4000792, 5, 0) ------, 2548 */ 2549 struct { 2550 Addr64 addr; /* instruction address */ 2551 Int len; /* instruction length */ 2552 UChar delta; /* addr = program counter as encoded in guest state 2553 - delta */ 2554 } IMark; 2555 2556 /* META: An ABI hint, which says something about this 2557 platform's ABI. 2558 2559 At the moment, the only AbiHint is one which indicates 2560 that a given chunk of address space, [base .. base+len-1], 2561 has become undefined. This is used on amd64-linux and 2562 some ppc variants to pass stack-redzoning hints to whoever 2563 wants to see them. It also indicates the address of the 2564 next (dynamic) instruction that will be executed. This is 2565 to help Memcheck to origin tracking. 2566 2567 ppIRStmt output: ====== AbiHint(<base>, <len>, <nia>) ====== 2568 eg. ====== AbiHint(t1, 16, t2) ====== 2569 */ 2570 struct { 2571 IRExpr* base; /* Start of undefined chunk */ 2572 Int len; /* Length of undefined chunk */ 2573 IRExpr* nia; /* Address of next (guest) insn */ 2574 } AbiHint; 2575 2576 /* Write a guest register, at a fixed offset in the guest state. 2577 ppIRStmt output: PUT(<offset>) = <data>, eg. PUT(60) = t1 2578 */ 2579 struct { 2580 Int offset; /* Offset into the guest state */ 2581 IRExpr* data; /* The value to write */ 2582 } Put; 2583 2584 /* Write a guest register, at a non-fixed offset in the guest 2585 state. See the comment for GetI expressions for more 2586 information. 2587 2588 ppIRStmt output: PUTI<descr>[<ix>,<bias>] = <data>, 2589 eg. PUTI(64:8xF64)[t5,0] = t1 2590 */ 2591 struct { 2592 IRPutI* details; 2593 } PutI; 2594 2595 /* Assign a value to a temporary. Note that SSA rules require 2596 each tmp is only assigned to once. IR sanity checking will 2597 reject any block containing a temporary which is not assigned 2598 to exactly once. 2599 2600 ppIRStmt output: t<tmp> = <data>, eg. t1 = 3 2601 */ 2602 struct { 2603 IRTemp tmp; /* Temporary (LHS of assignment) */ 2604 IRExpr* data; /* Expression (RHS of assignment) */ 2605 } WrTmp; 2606 2607 /* Write a value to memory. This is a normal store, not a 2608 Store-Conditional. To represent a Store-Conditional, 2609 instead use IRStmt.LLSC. 2610 ppIRStmt output: ST<end>(<addr>) = <data>, eg. STle(t1) = t2 2611 */ 2612 struct { 2613 IREndness end; /* Endianness of the store */ 2614 IRExpr* addr; /* store address */ 2615 IRExpr* data; /* value to write */ 2616 } Store; 2617 2618 /* Guarded store. Note that this is defined to evaluate all 2619 expression fields (addr, data) even if the guard evaluates 2620 to false. 2621 ppIRStmt output: 2622 if (<guard>) ST<end>(<addr>) = <data> */ 2623 struct { 2624 IRStoreG* details; 2625 } StoreG; 2626 2627 /* Guarded load. Note that this is defined to evaluate all 2628 expression fields (addr, alt) even if the guard evaluates 2629 to false. 2630 ppIRStmt output: 2631 t<tmp> = if (<guard>) <cvt>(LD<end>(<addr>)) else <alt> */ 2632 struct { 2633 IRLoadG* details; 2634 } LoadG; 2635 2636 /* Do an atomic compare-and-swap operation. Semantics are 2637 described above on a comment at the definition of IRCAS. 2638 2639 ppIRStmt output: 2640 t<tmp> = CAS<end>(<addr> :: <expected> -> <new>) 2641 eg 2642 t1 = CASle(t2 :: t3->Add32(t3,1)) 2643 which denotes a 32-bit atomic increment 2644 of a value at address t2 2645 2646 A double-element CAS may also be denoted, in which case <tmp>, 2647 <expected> and <new> are all pairs of items, separated by 2648 commas. 2649 */ 2650 struct { 2651 IRCAS* details; 2652 } CAS; 2653 2654 /* Either Load-Linked or Store-Conditional, depending on 2655 STOREDATA. 2656 2657 If STOREDATA is NULL then this is a Load-Linked, meaning 2658 that data is loaded from memory as normal, but a 2659 'reservation' for the address is also lodged in the 2660 hardware. 2661 2662 result = Load-Linked(addr, end) 2663 2664 The data transfer type is the type of RESULT (I32, I64, 2665 etc). ppIRStmt output: 2666 2667 result = LD<end>-Linked(<addr>), eg. LDbe-Linked(t1) 2668 2669 If STOREDATA is not NULL then this is a Store-Conditional, 2670 hence: 2671 2672 result = Store-Conditional(addr, storedata, end) 2673 2674 The data transfer type is the type of STOREDATA and RESULT 2675 has type Ity_I1. The store may fail or succeed depending 2676 on the state of a previously lodged reservation on this 2677 address. RESULT is written 1 if the store succeeds and 0 2678 if it fails. eg ppIRStmt output: 2679 2680 result = ( ST<end>-Cond(<addr>) = <storedata> ) 2681 eg t3 = ( STbe-Cond(t1, t2) ) 2682 2683 In all cases, the address must be naturally aligned for 2684 the transfer type -- any misaligned addresses should be 2685 caught by a dominating IR check and side exit. This 2686 alignment restriction exists because on at least some 2687 LL/SC platforms (ppc), stwcx. etc will trap w/ SIGBUS on 2688 misaligned addresses, and we have to actually generate 2689 stwcx. on the host, and we don't want it trapping on the 2690 host. 2691 2692 Summary of rules for transfer type: 2693 STOREDATA == NULL (LL): 2694 transfer type = type of RESULT 2695 STOREDATA != NULL (SC): 2696 transfer type = type of STOREDATA, and RESULT :: Ity_I1 2697 */ 2698 struct { 2699 IREndness end; 2700 IRTemp result; 2701 IRExpr* addr; 2702 IRExpr* storedata; /* NULL => LL, non-NULL => SC */ 2703 } LLSC; 2704 2705 /* Call (possibly conditionally) a C function that has side 2706 effects (ie. is "dirty"). See the comments above the 2707 IRDirty type declaration for more information. 2708 2709 ppIRStmt output: 2710 t<tmp> = DIRTY <guard> <effects> 2711 ::: <callee>(<args>) 2712 eg. 2713 t1 = DIRTY t27 RdFX-gst(16,4) RdFX-gst(60,4) 2714 ::: foo{0x380035f4}(t2) 2715 */ 2716 struct { 2717 IRDirty* details; 2718 } Dirty; 2719 2720 /* A memory bus event - a fence, or acquisition/release of the 2721 hardware bus lock. IR optimisation treats all these as fences 2722 across which no memory references may be moved. 2723 ppIRStmt output: MBusEvent-Fence, 2724 MBusEvent-BusLock, MBusEvent-BusUnlock. 2725 */ 2726 struct { 2727 IRMBusEvent event; 2728 } MBE; 2729 2730 /* Conditional exit from the middle of an IRSB. 2731 ppIRStmt output: if (<guard>) goto {<jk>} <dst> 2732 eg. if (t69) goto {Boring} 0x4000AAA:I32 2733 If <guard> is true, the guest state is also updated by 2734 PUT-ing <dst> at <offsIP>. This is done because a 2735 taken exit must update the guest program counter. 2736 */ 2737 struct { 2738 IRExpr* guard; /* Conditional expression */ 2739 IRConst* dst; /* Jump target (constant only) */ 2740 IRJumpKind jk; /* Jump kind */ 2741 Int offsIP; /* Guest state offset for IP */ 2742 } Exit; 2743 } Ist; 2744 } 2745 IRStmt; 2746 2747/* Statement constructors. */ 2748extern IRStmt* IRStmt_NoOp ( void ); 2749extern IRStmt* IRStmt_IMark ( Addr64 addr, Int len, UChar delta ); 2750extern IRStmt* IRStmt_AbiHint ( IRExpr* base, Int len, IRExpr* nia ); 2751extern IRStmt* IRStmt_Put ( Int off, IRExpr* data ); 2752extern IRStmt* IRStmt_PutI ( IRPutI* details ); 2753extern IRStmt* IRStmt_WrTmp ( IRTemp tmp, IRExpr* data ); 2754extern IRStmt* IRStmt_Store ( IREndness end, IRExpr* addr, IRExpr* data ); 2755extern IRStmt* IRStmt_StoreG ( IREndness end, IRExpr* addr, IRExpr* data, 2756 IRExpr* guard ); 2757extern IRStmt* IRStmt_LoadG ( IREndness end, IRLoadGOp cvt, IRTemp dst, 2758 IRExpr* addr, IRExpr* alt, IRExpr* guard ); 2759extern IRStmt* IRStmt_CAS ( IRCAS* details ); 2760extern IRStmt* IRStmt_LLSC ( IREndness end, IRTemp result, 2761 IRExpr* addr, IRExpr* storedata ); 2762extern IRStmt* IRStmt_Dirty ( IRDirty* details ); 2763extern IRStmt* IRStmt_MBE ( IRMBusEvent event ); 2764extern IRStmt* IRStmt_Exit ( IRExpr* guard, IRJumpKind jk, IRConst* dst, 2765 Int offsIP ); 2766 2767/* Deep-copy an IRStmt. */ 2768extern IRStmt* deepCopyIRStmt ( IRStmt* ); 2769 2770/* Pretty-print an IRStmt. */ 2771extern void ppIRStmt ( IRStmt* ); 2772 2773 2774/* ------------------ Basic Blocks ------------------ */ 2775 2776/* Type environments: a bunch of statements, expressions, etc, are 2777 incomplete without an environment indicating the type of each 2778 IRTemp. So this provides one. IR temporaries are really just 2779 unsigned ints and so this provides an array, 0 .. n_types_used-1 of 2780 them. 2781*/ 2782typedef 2783 struct { 2784 IRType* types; 2785 Int types_size; 2786 Int types_used; 2787 } 2788 IRTypeEnv; 2789 2790/* Obtain a new IRTemp */ 2791extern IRTemp newIRTemp ( IRTypeEnv*, IRType ); 2792 2793/* Deep-copy a type environment */ 2794extern IRTypeEnv* deepCopyIRTypeEnv ( IRTypeEnv* ); 2795 2796/* Pretty-print a type environment */ 2797extern void ppIRTypeEnv ( IRTypeEnv* ); 2798 2799 2800/* Code blocks, which in proper compiler terminology are superblocks 2801 (single entry, multiple exit code sequences) contain: 2802 2803 - A table giving a type for each temp (the "type environment") 2804 - An expandable array of statements 2805 - An expression of type 32 or 64 bits, depending on the 2806 guest's word size, indicating the next destination if the block 2807 executes all the way to the end, without a side exit 2808 - An indication of any special actions (JumpKind) needed 2809 for this final jump. 2810 - Offset of the IP field in the guest state. This will be 2811 updated before the final jump is done. 2812 2813 "IRSB" stands for "IR Super Block". 2814*/ 2815typedef 2816 struct { 2817 IRTypeEnv* tyenv; 2818 IRStmt** stmts; 2819 Int stmts_size; 2820 Int stmts_used; 2821 IRExpr* next; 2822 IRJumpKind jumpkind; 2823 Int offsIP; 2824 } 2825 IRSB; 2826 2827/* Allocate a new, uninitialised IRSB */ 2828extern IRSB* emptyIRSB ( void ); 2829 2830/* Deep-copy an IRSB */ 2831extern IRSB* deepCopyIRSB ( IRSB* ); 2832 2833/* Deep-copy an IRSB, except for the statements list, which set to be 2834 a new, empty, list of statements. */ 2835extern IRSB* deepCopyIRSBExceptStmts ( IRSB* ); 2836 2837/* Pretty-print an IRSB */ 2838extern void ppIRSB ( IRSB* ); 2839 2840/* Append an IRStmt to an IRSB */ 2841extern void addStmtToIRSB ( IRSB*, IRStmt* ); 2842 2843 2844/*---------------------------------------------------------------*/ 2845/*--- Helper functions for the IR ---*/ 2846/*---------------------------------------------------------------*/ 2847 2848/* For messing with IR type environments */ 2849extern IRTypeEnv* emptyIRTypeEnv ( void ); 2850 2851/* What is the type of this expression? */ 2852extern IRType typeOfIRConst ( IRConst* ); 2853extern IRType typeOfIRTemp ( IRTypeEnv*, IRTemp ); 2854extern IRType typeOfIRExpr ( IRTypeEnv*, IRExpr* ); 2855 2856/* What are the arg and result type for this IRLoadGOp? */ 2857extern void typeOfIRLoadGOp ( IRLoadGOp cvt, 2858 /*OUT*/IRType* t_res, 2859 /*OUT*/IRType* t_arg ); 2860 2861/* Sanity check a BB of IR */ 2862extern void sanityCheckIRSB ( IRSB* bb, 2863 const HChar* caller, 2864 Bool require_flatness, 2865 IRType guest_word_size ); 2866extern Bool isFlatIRStmt ( IRStmt* ); 2867 2868/* Is this any value actually in the enumeration 'IRType' ? */ 2869extern Bool isPlausibleIRType ( IRType ty ); 2870 2871 2872/*---------------------------------------------------------------*/ 2873/*--- IR injection ---*/ 2874/*---------------------------------------------------------------*/ 2875 2876void vex_inject_ir(IRSB *, IREndness); 2877 2878 2879#endif /* ndef __LIBVEX_IR_H */ 2880 2881/*---------------------------------------------------------------*/ 2882/*--- libvex_ir.h ---*/ 2883/*---------------------------------------------------------------*/ 2884