JIT.h revision 231d4e3152a9c27a73b6ac7badbe6be673aa3ddf
1/* 2 * Copyright (C) 2008 Apple Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26#ifndef JIT_h 27#define JIT_h 28 29#include <wtf/Platform.h> 30 31#if ENABLE(JIT) 32 33// We've run into some problems where changing the size of the class JIT leads to 34// performance fluctuations. Try forcing alignment in an attempt to stabalize this. 35#if COMPILER(GCC) 36#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32))) 37#else 38#define JIT_CLASS_ALIGNMENT 39#endif 40 41#include "CodeBlock.h" 42#include "Interpreter.h" 43#include "JITCode.h" 44#include "JITStubs.h" 45#include "Opcode.h" 46#include "RegisterFile.h" 47#include "MacroAssembler.h" 48#include "Profiler.h" 49#include <bytecode/SamplingTool.h> 50#include <wtf/AlwaysInline.h> 51#include <wtf/Vector.h> 52 53namespace JSC { 54 55 class CodeBlock; 56 class JIT; 57 class JSPropertyNameIterator; 58 class Interpreter; 59 class Register; 60 class RegisterFile; 61 class ScopeChainNode; 62 class StructureChain; 63 64 struct CallLinkInfo; 65 struct Instruction; 66 struct OperandTypes; 67 struct PolymorphicAccessStructureList; 68 struct SimpleJumpTable; 69 struct StringJumpTable; 70 struct StructureStubInfo; 71 72 struct CallRecord { 73 MacroAssembler::Call from; 74 unsigned bytecodeIndex; 75 void* to; 76 77 CallRecord() 78 { 79 } 80 81 CallRecord(MacroAssembler::Call from, unsigned bytecodeIndex, void* to = 0) 82 : from(from) 83 , bytecodeIndex(bytecodeIndex) 84 , to(to) 85 { 86 } 87 }; 88 89 struct JumpTable { 90 MacroAssembler::Jump from; 91 unsigned toBytecodeIndex; 92 93 JumpTable(MacroAssembler::Jump f, unsigned t) 94 : from(f) 95 , toBytecodeIndex(t) 96 { 97 } 98 }; 99 100 struct SlowCaseEntry { 101 MacroAssembler::Jump from; 102 unsigned to; 103 unsigned hint; 104 105 SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0) 106 : from(f) 107 , to(t) 108 , hint(h) 109 { 110 } 111 }; 112 113 struct SwitchRecord { 114 enum Type { 115 Immediate, 116 Character, 117 String 118 }; 119 120 Type type; 121 122 union { 123 SimpleJumpTable* simpleJumpTable; 124 StringJumpTable* stringJumpTable; 125 } jumpTable; 126 127 unsigned bytecodeIndex; 128 unsigned defaultOffset; 129 130 SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset, Type type) 131 : type(type) 132 , bytecodeIndex(bytecodeIndex) 133 , defaultOffset(defaultOffset) 134 { 135 this->jumpTable.simpleJumpTable = jumpTable; 136 } 137 138 SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset) 139 : type(String) 140 , bytecodeIndex(bytecodeIndex) 141 , defaultOffset(defaultOffset) 142 { 143 this->jumpTable.stringJumpTable = jumpTable; 144 } 145 }; 146 147 struct PropertyStubCompilationInfo { 148 MacroAssembler::Call callReturnLocation; 149 MacroAssembler::Label hotPathBegin; 150 }; 151 152 struct StructureStubCompilationInfo { 153 MacroAssembler::DataLabelPtr hotPathBegin; 154 MacroAssembler::Call hotPathOther; 155 MacroAssembler::Call callReturnLocation; 156 }; 157 158 struct MethodCallCompilationInfo { 159 MethodCallCompilationInfo(unsigned propertyAccessIndex) 160 : propertyAccessIndex(propertyAccessIndex) 161 { 162 } 163 164 MacroAssembler::DataLabelPtr structureToCompare; 165 unsigned propertyAccessIndex; 166 }; 167 168 // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions. 169 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); 170 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); 171 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction); 172 173 class JIT : private MacroAssembler { 174 friend class JITStubCall; 175 176 using MacroAssembler::Jump; 177 using MacroAssembler::JumpList; 178 using MacroAssembler::Label; 179 180 // NOTES: 181 // 182 // regT0 has two special meanings. The return value from a stub 183 // call will always be in regT0, and by default (unless 184 // a register is specified) emitPutVirtualRegister() will store 185 // the value from regT0. 186 // 187 // regT3 is required to be callee-preserved. 188 // 189 // tempRegister2 is has no such dependencies. It is important that 190 // on x86/x86-64 it is ecx for performance reasons, since the 191 // MacroAssembler will need to plant register swaps if it is not - 192 // however the code will still function correctly. 193#if PLATFORM(X86_64) 194 static const RegisterID returnValueRegister = X86Registers::eax; 195 static const RegisterID cachedResultRegister = X86Registers::eax; 196 static const RegisterID firstArgumentRegister = X86Registers::edi; 197 198 static const RegisterID timeoutCheckRegister = X86Registers::r12; 199 static const RegisterID callFrameRegister = X86Registers::r13; 200 static const RegisterID tagTypeNumberRegister = X86Registers::r14; 201 static const RegisterID tagMaskRegister = X86Registers::r15; 202 203 static const RegisterID regT0 = X86Registers::eax; 204 static const RegisterID regT1 = X86Registers::edx; 205 static const RegisterID regT2 = X86Registers::ecx; 206 static const RegisterID regT3 = X86Registers::ebx; 207 208 static const FPRegisterID fpRegT0 = X86Registers::xmm0; 209 static const FPRegisterID fpRegT1 = X86Registers::xmm1; 210 static const FPRegisterID fpRegT2 = X86Registers::xmm2; 211#elif PLATFORM(X86) 212 static const RegisterID returnValueRegister = X86Registers::eax; 213 static const RegisterID cachedResultRegister = X86Registers::eax; 214 // On x86 we always use fastcall conventions = but on 215 // OS X if might make more sense to just use regparm. 216 static const RegisterID firstArgumentRegister = X86Registers::ecx; 217 218 static const RegisterID timeoutCheckRegister = X86Registers::esi; 219 static const RegisterID callFrameRegister = X86Registers::edi; 220 221 static const RegisterID regT0 = X86Registers::eax; 222 static const RegisterID regT1 = X86Registers::edx; 223 static const RegisterID regT2 = X86Registers::ecx; 224 static const RegisterID regT3 = X86Registers::ebx; 225 226 static const FPRegisterID fpRegT0 = X86Registers::xmm0; 227 static const FPRegisterID fpRegT1 = X86Registers::xmm1; 228 static const FPRegisterID fpRegT2 = X86Registers::xmm2; 229#elif PLATFORM(ARM_THUMB2) 230 static const RegisterID returnValueRegister = ARMRegisters::r0; 231 static const RegisterID cachedResultRegister = ARMRegisters::r0; 232 static const RegisterID firstArgumentRegister = ARMRegisters::r0; 233 234 static const RegisterID regT0 = ARMRegisters::r0; 235 static const RegisterID regT1 = ARMRegisters::r1; 236 static const RegisterID regT2 = ARMRegisters::r2; 237 static const RegisterID regT3 = ARMRegisters::r4; 238 239 static const RegisterID callFrameRegister = ARMRegisters::r5; 240 static const RegisterID timeoutCheckRegister = ARMRegisters::r6; 241 242 static const FPRegisterID fpRegT0 = ARMRegisters::d0; 243 static const FPRegisterID fpRegT1 = ARMRegisters::d1; 244 static const FPRegisterID fpRegT2 = ARMRegisters::d2; 245#elif PLATFORM(ARM_TRADITIONAL) 246 static const RegisterID returnValueRegister = ARMRegisters::r0; 247 static const RegisterID cachedResultRegister = ARMRegisters::r0; 248 static const RegisterID firstArgumentRegister = ARMRegisters::r0; 249 250 static const RegisterID timeoutCheckRegister = ARMRegisters::r5; 251 static const RegisterID callFrameRegister = ARMRegisters::r4; 252 static const RegisterID ctiReturnRegister = ARMRegisters::r6; 253 254 static const RegisterID regT0 = ARMRegisters::r0; 255 static const RegisterID regT1 = ARMRegisters::r1; 256 static const RegisterID regT2 = ARMRegisters::r2; 257 // Callee preserved 258 static const RegisterID regT3 = ARMRegisters::r7; 259 260 static const RegisterID regS0 = ARMRegisters::S0; 261 // Callee preserved 262 static const RegisterID regS1 = ARMRegisters::S1; 263 264 static const RegisterID regStackPtr = ARMRegisters::sp; 265 static const RegisterID regLink = ARMRegisters::lr; 266 267 static const FPRegisterID fpRegT0 = ARMRegisters::d0; 268 static const FPRegisterID fpRegT1 = ARMRegisters::d1; 269 static const FPRegisterID fpRegT2 = ARMRegisters::d2; 270#else 271 #error "JIT not supported on this platform." 272#endif 273 274 static const int patchGetByIdDefaultStructure = -1; 275 // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler 276 // will compress the displacement, and we may not be able to fit a patched offset. 277 static const int patchGetByIdDefaultOffset = 256; 278 279 public: 280 static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock) 281 { 282 return JIT(globalData, codeBlock).privateCompile(); 283 } 284 285 static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress) 286 { 287 JIT jit(globalData, codeBlock); 288 jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, cachedOffset, returnAddress, callFrame); 289 } 290 291 static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset) 292 { 293 JIT jit(globalData, codeBlock); 294 jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, cachedOffset); 295 } 296 static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset) 297 { 298 JIT jit(globalData, codeBlock); 299 jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, cachedOffset, callFrame); 300 } 301 static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset) 302 { 303 JIT jit(globalData, codeBlock); 304 jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, cachedOffset, callFrame); 305 } 306 307 static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress) 308 { 309 JIT jit(globalData, codeBlock); 310 jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, cachedOffset, returnAddress, callFrame); 311 } 312 313 static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress) 314 { 315 JIT jit(globalData, codeBlock); 316 jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress); 317 } 318 319 static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk) 320 { 321 JIT jit(globalData); 322 jit.privateCompileCTIMachineTrampolines(executablePool, globalData, ctiStringLengthTrampoline, ctiVirtualCallLink, ctiVirtualCall, ctiNativeCallThunk); 323 } 324 325 static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress); 326 static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress); 327 static void patchMethodCallProto(CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*, ReturnAddressPtr); 328 329 static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress) 330 { 331 JIT jit(globalData, codeBlock); 332 return jit.privateCompilePatchGetArrayLength(returnAddress); 333 } 334 335 static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode&, CallLinkInfo*, int callerArgCount, JSGlobalData*); 336 static void unlinkCall(CallLinkInfo*); 337 338 private: 339 struct JSRInfo { 340 DataLabelPtr storeLocation; 341 Label target; 342 343 JSRInfo(DataLabelPtr storeLocation, Label targetLocation) 344 : storeLocation(storeLocation) 345 , target(targetLocation) 346 { 347 } 348 }; 349 350 JIT(JSGlobalData*, CodeBlock* = 0); 351 352 void privateCompileMainPass(); 353 void privateCompileLinkPass(); 354 void privateCompileSlowCases(); 355 JITCode privateCompile(); 356 void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame); 357 void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset); 358 void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame); 359 void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame); 360 void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame); 361 void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress); 362 363 void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk); 364 void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress); 365 366 void addSlowCase(Jump); 367 void addSlowCase(JumpList); 368 void addJump(Jump, int); 369 void emitJumpSlowToHot(Jump, int); 370 371 void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex); 372 void compileOpCallVarargs(Instruction* instruction); 373 void compileOpCallInitializeCallFrame(); 374 void compileOpCallSetupArgs(Instruction*); 375 void compileOpCallVarargsSetupArgs(Instruction*); 376 void compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID); 377 void compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter); 378 void compileOpConstructSetupArgs(Instruction*); 379 380 enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq }; 381 void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type); 382 bool isOperandConstantImmediateDouble(unsigned src); 383 384 void emitLoadDouble(unsigned index, FPRegisterID value); 385 void emitLoadInt32ToDouble(unsigned index, FPRegisterID value); 386 387 Address addressFor(unsigned index, RegisterID base = callFrameRegister); 388 389#if USE(JSVALUE32_64) 390 Address tagFor(unsigned index, RegisterID base = callFrameRegister); 391 Address payloadFor(unsigned index, RegisterID base = callFrameRegister); 392 393 bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant); 394 395 void emitLoadTag(unsigned index, RegisterID tag); 396 void emitLoadPayload(unsigned index, RegisterID payload); 397 398 void emitLoad(const JSValue& v, RegisterID tag, RegisterID payload); 399 void emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister); 400 void emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2); 401 402 void emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister); 403 void emitStore(unsigned index, const JSValue constant, RegisterID base = callFrameRegister); 404 void emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32 = false); 405 void emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32 = false); 406 void emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell = false); 407 void emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool = false); 408 void emitStoreDouble(unsigned index, FPRegisterID value); 409 410 bool isLabeled(unsigned bytecodeIndex); 411 void map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload); 412 void unmap(RegisterID); 413 void unmap(); 414 bool isMapped(unsigned virtualRegisterIndex); 415 bool getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload); 416 bool getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag); 417 418 void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex); 419 void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag); 420 void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, unsigned virtualRegisterIndex); 421 422#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) 423 void compileGetByIdHotPath(); 424 void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false); 425#endif 426 void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset); 427 void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset); 428 void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset); 429 430 // Arithmetic opcode helpers 431 void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType); 432 void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType); 433 void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true); 434 435#if PLATFORM(X86) 436 // These architecture specific value are used to enable patching - see comment on op_put_by_id. 437 static const int patchOffsetPutByIdStructure = 7; 438 static const int patchOffsetPutByIdExternalLoad = 13; 439 static const int patchLengthPutByIdExternalLoad = 3; 440 static const int patchOffsetPutByIdPropertyMapOffset1 = 22; 441 static const int patchOffsetPutByIdPropertyMapOffset2 = 28; 442 // These architecture specific value are used to enable patching - see comment on op_get_by_id. 443 static const int patchOffsetGetByIdStructure = 7; 444 static const int patchOffsetGetByIdBranchToSlowCase = 13; 445 static const int patchOffsetGetByIdExternalLoad = 13; 446 static const int patchLengthGetByIdExternalLoad = 3; 447 static const int patchOffsetGetByIdPropertyMapOffset1 = 22; 448 static const int patchOffsetGetByIdPropertyMapOffset2 = 28; 449 static const int patchOffsetGetByIdPutResult = 28; 450#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST) 451 static const int patchOffsetGetByIdSlowCaseCall = 35; 452#elif ENABLE(OPCODE_SAMPLING) 453 static const int patchOffsetGetByIdSlowCaseCall = 37; 454#elif USE(JIT_STUB_ARGUMENT_VA_LIST) 455 static const int patchOffsetGetByIdSlowCaseCall = 25; 456#else 457 static const int patchOffsetGetByIdSlowCaseCall = 27; 458#endif 459 static const int patchOffsetOpCallCompareToJump = 6; 460 461 static const int patchOffsetMethodCheckProtoObj = 11; 462 static const int patchOffsetMethodCheckProtoStruct = 18; 463 static const int patchOffsetMethodCheckPutFunction = 29; 464#else 465#error "JSVALUE32_64 not supported on this platform." 466#endif 467 468#else // USE(JSVALUE32_64) 469 void emitGetVirtualRegister(int src, RegisterID dst); 470 void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2); 471 void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0); 472 473 int32_t getConstantOperandImmediateInt(unsigned src); 474 475 void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst); 476 void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index); 477 478 void killLastResultRegister(); 479 480 Jump emitJumpIfJSCell(RegisterID); 481 Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID); 482 void emitJumpSlowCaseIfJSCell(RegisterID); 483 Jump emitJumpIfNotJSCell(RegisterID); 484 void emitJumpSlowCaseIfNotJSCell(RegisterID); 485 void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg); 486#if USE(JSVALUE64) 487 JIT::Jump emitJumpIfImmediateNumber(RegisterID); 488 JIT::Jump emitJumpIfNotImmediateNumber(RegisterID); 489#else 490 JIT::Jump emitJumpIfImmediateNumber(RegisterID reg) 491 { 492 return emitJumpIfImmediateInteger(reg); 493 } 494 495 JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg) 496 { 497 return emitJumpIfNotImmediateInteger(reg); 498 } 499#endif 500 JIT::Jump emitJumpIfImmediateInteger(RegisterID); 501 JIT::Jump emitJumpIfNotImmediateInteger(RegisterID); 502 JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); 503 void emitJumpSlowCaseIfNotImmediateInteger(RegisterID); 504 void emitJumpSlowCaseIfNotImmediateNumber(RegisterID); 505 void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); 506 507#if !USE(JSVALUE64) 508 void emitFastArithDeTagImmediate(RegisterID); 509 Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID); 510#endif 511 void emitFastArithReTagImmediate(RegisterID src, RegisterID dest); 512 void emitFastArithImmToInt(RegisterID); 513 void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest); 514 515 void emitTagAsBoolImmediate(RegisterID reg); 516 void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi); 517#if USE(JSVALUE64) 518 void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase); 519#else 520 void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes); 521#endif 522 523#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) 524 void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex); 525 void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false); 526#endif 527 void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset); 528 void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset); 529 void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset); 530 531#if PLATFORM(X86_64) 532 // These architecture specific value are used to enable patching - see comment on op_put_by_id. 533 static const int patchOffsetPutByIdStructure = 10; 534 static const int patchOffsetPutByIdExternalLoad = 20; 535 static const int patchLengthPutByIdExternalLoad = 4; 536 static const int patchOffsetPutByIdPropertyMapOffset = 31; 537 // These architecture specific value are used to enable patching - see comment on op_get_by_id. 538 static const int patchOffsetGetByIdStructure = 10; 539 static const int patchOffsetGetByIdBranchToSlowCase = 20; 540 static const int patchOffsetGetByIdExternalLoad = 20; 541 static const int patchLengthGetByIdExternalLoad = 4; 542 static const int patchOffsetGetByIdPropertyMapOffset = 31; 543 static const int patchOffsetGetByIdPutResult = 31; 544#if ENABLE(OPCODE_SAMPLING) 545 static const int patchOffsetGetByIdSlowCaseCall = 64; 546#else 547 static const int patchOffsetGetByIdSlowCaseCall = 41; 548#endif 549 static const int patchOffsetOpCallCompareToJump = 9; 550 551 static const int patchOffsetMethodCheckProtoObj = 20; 552 static const int patchOffsetMethodCheckProtoStruct = 30; 553 static const int patchOffsetMethodCheckPutFunction = 50; 554#elif PLATFORM(X86) 555 // These architecture specific value are used to enable patching - see comment on op_put_by_id. 556 static const int patchOffsetPutByIdStructure = 7; 557 static const int patchOffsetPutByIdExternalLoad = 13; 558 static const int patchLengthPutByIdExternalLoad = 3; 559 static const int patchOffsetPutByIdPropertyMapOffset = 22; 560 // These architecture specific value are used to enable patching - see comment on op_get_by_id. 561 static const int patchOffsetGetByIdStructure = 7; 562 static const int patchOffsetGetByIdBranchToSlowCase = 13; 563 static const int patchOffsetGetByIdExternalLoad = 13; 564 static const int patchLengthGetByIdExternalLoad = 3; 565 static const int patchOffsetGetByIdPropertyMapOffset = 22; 566 static const int patchOffsetGetByIdPutResult = 22; 567#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST) 568 static const int patchOffsetGetByIdSlowCaseCall = 31; 569#elif ENABLE(OPCODE_SAMPLING) 570 static const int patchOffsetGetByIdSlowCaseCall = 33; 571#elif USE(JIT_STUB_ARGUMENT_VA_LIST) 572 static const int patchOffsetGetByIdSlowCaseCall = 21; 573#else 574 static const int patchOffsetGetByIdSlowCaseCall = 23; 575#endif 576 static const int patchOffsetOpCallCompareToJump = 6; 577 578 static const int patchOffsetMethodCheckProtoObj = 11; 579 static const int patchOffsetMethodCheckProtoStruct = 18; 580 static const int patchOffsetMethodCheckPutFunction = 29; 581#elif PLATFORM(ARM_THUMB2) 582 // These architecture specific value are used to enable patching - see comment on op_put_by_id. 583 static const int patchOffsetPutByIdStructure = 10; 584 static const int patchOffsetPutByIdExternalLoad = 20; 585 static const int patchLengthPutByIdExternalLoad = 12; 586 static const int patchOffsetPutByIdPropertyMapOffset = 40; 587 // These architecture specific value are used to enable patching - see comment on op_get_by_id. 588 static const int patchOffsetGetByIdStructure = 10; 589 static const int patchOffsetGetByIdBranchToSlowCase = 20; 590 static const int patchOffsetGetByIdExternalLoad = 20; 591 static const int patchLengthGetByIdExternalLoad = 12; 592 static const int patchOffsetGetByIdPropertyMapOffset = 40; 593 static const int patchOffsetGetByIdPutResult = 44; 594#if ENABLE(OPCODE_SAMPLING) 595 static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE 596#else 597 static const int patchOffsetGetByIdSlowCaseCall = 28; 598#endif 599 static const int patchOffsetOpCallCompareToJump = 10; 600 601 static const int patchOffsetMethodCheckProtoObj = 18; 602 static const int patchOffsetMethodCheckProtoStruct = 28; 603 static const int patchOffsetMethodCheckPutFunction = 46; 604#elif PLATFORM(ARM_TRADITIONAL) 605 // These architecture specific value are used to enable patching - see comment on op_put_by_id. 606 static const int patchOffsetPutByIdStructure = 4; 607 static const int patchOffsetPutByIdExternalLoad = 16; 608 static const int patchLengthPutByIdExternalLoad = 4; 609 static const int patchOffsetPutByIdPropertyMapOffset = 20; 610 // These architecture specific value are used to enable patching - see comment on op_get_by_id. 611 static const int patchOffsetGetByIdStructure = 4; 612 static const int patchOffsetGetByIdBranchToSlowCase = 16; 613 static const int patchOffsetGetByIdExternalLoad = 16; 614 static const int patchLengthGetByIdExternalLoad = 4; 615 static const int patchOffsetGetByIdPropertyMapOffset = 20; 616 static const int patchOffsetGetByIdPutResult = 28; 617#if ENABLE(OPCODE_SAMPLING) 618 #error "OPCODE_SAMPLING is not yet supported" 619#else 620 static const int patchOffsetGetByIdSlowCaseCall = 36; 621#endif 622 static const int patchOffsetOpCallCompareToJump = 12; 623 624 static const int patchOffsetMethodCheckProtoObj = 12; 625 static const int patchOffsetMethodCheckProtoStruct = 20; 626 static const int patchOffsetMethodCheckPutFunction = 32; 627#endif 628#endif // USE(JSVALUE32_64) 629 630#if PLATFORM(ARM_TRADITIONAL) 631 // sequenceOpCall 632 static const int sequenceOpCallInstructionSpace = 12; 633 static const int sequenceOpCallConstantSpace = 2; 634 // sequenceMethodCheck 635 static const int sequenceMethodCheckInstructionSpace = 40; 636 static const int sequenceMethodCheckConstantSpace = 6; 637 // sequenceGetByIdHotPath 638 static const int sequenceGetByIdHotPathInstructionSpace = 28; 639 static const int sequenceGetByIdHotPathConstantSpace = 3; 640 // sequenceGetByIdSlowCase 641 static const int sequenceGetByIdSlowCaseInstructionSpace = 40; 642 static const int sequenceGetByIdSlowCaseConstantSpace = 2; 643 // sequencePutById 644 static const int sequencePutByIdInstructionSpace = 28; 645 static const int sequencePutByIdConstantSpace = 3; 646#endif 647 648#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL 649#define BEGIN_UNINTERRUPTED_SEQUENCE(name) beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace) 650#define END_UNINTERRUPTED_SEQUENCE(name) endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace) 651 652 void beginUninterruptedSequence(int, int); 653 void endUninterruptedSequence(int, int); 654 655#else 656#define BEGIN_UNINTERRUPTED_SEQUENCE(name) 657#define END_UNINTERRUPTED_SEQUENCE(name) 658#endif 659 660 void emit_op_add(Instruction*); 661 void emit_op_bitand(Instruction*); 662 void emit_op_bitnot(Instruction*); 663 void emit_op_bitor(Instruction*); 664 void emit_op_bitxor(Instruction*); 665 void emit_op_call(Instruction*); 666 void emit_op_call_eval(Instruction*); 667 void emit_op_call_varargs(Instruction*); 668 void emit_op_catch(Instruction*); 669 void emit_op_construct(Instruction*); 670 void emit_op_construct_verify(Instruction*); 671 void emit_op_convert_this(Instruction*); 672 void emit_op_create_arguments(Instruction*); 673 void emit_op_debug(Instruction*); 674 void emit_op_del_by_id(Instruction*); 675 void emit_op_div(Instruction*); 676 void emit_op_end(Instruction*); 677 void emit_op_enter(Instruction*); 678 void emit_op_enter_with_activation(Instruction*); 679 void emit_op_eq(Instruction*); 680 void emit_op_eq_null(Instruction*); 681 void emit_op_get_by_id(Instruction*); 682 void emit_op_get_by_val(Instruction*); 683 void emit_op_get_global_var(Instruction*); 684 void emit_op_get_scoped_var(Instruction*); 685 void emit_op_init_arguments(Instruction*); 686 void emit_op_instanceof(Instruction*); 687 void emit_op_jeq_null(Instruction*); 688 void emit_op_jfalse(Instruction*); 689 void emit_op_jmp(Instruction*); 690 void emit_op_jmp_scopes(Instruction*); 691 void emit_op_jneq_null(Instruction*); 692 void emit_op_jneq_ptr(Instruction*); 693 void emit_op_jnless(Instruction*); 694 void emit_op_jnlesseq(Instruction*); 695 void emit_op_jsr(Instruction*); 696 void emit_op_jtrue(Instruction*); 697 void emit_op_load_varargs(Instruction*); 698 void emit_op_loop(Instruction*); 699 void emit_op_loop_if_less(Instruction*); 700 void emit_op_loop_if_lesseq(Instruction*); 701 void emit_op_loop_if_true(Instruction*); 702 void emit_op_lshift(Instruction*); 703 void emit_op_method_check(Instruction*); 704 void emit_op_mod(Instruction*); 705 void emit_op_mov(Instruction*); 706 void emit_op_mul(Instruction*); 707 void emit_op_negate(Instruction*); 708 void emit_op_neq(Instruction*); 709 void emit_op_neq_null(Instruction*); 710 void emit_op_new_array(Instruction*); 711 void emit_op_new_error(Instruction*); 712 void emit_op_new_func(Instruction*); 713 void emit_op_new_func_exp(Instruction*); 714 void emit_op_new_object(Instruction*); 715 void emit_op_new_regexp(Instruction*); 716 void emit_op_next_pname(Instruction*); 717 void emit_op_not(Instruction*); 718 void emit_op_nstricteq(Instruction*); 719 void emit_op_pop_scope(Instruction*); 720 void emit_op_post_dec(Instruction*); 721 void emit_op_post_inc(Instruction*); 722 void emit_op_pre_dec(Instruction*); 723 void emit_op_pre_inc(Instruction*); 724 void emit_op_profile_did_call(Instruction*); 725 void emit_op_profile_will_call(Instruction*); 726 void emit_op_push_new_scope(Instruction*); 727 void emit_op_push_scope(Instruction*); 728 void emit_op_put_by_id(Instruction*); 729 void emit_op_put_by_index(Instruction*); 730 void emit_op_put_by_val(Instruction*); 731 void emit_op_put_getter(Instruction*); 732 void emit_op_put_global_var(Instruction*); 733 void emit_op_put_scoped_var(Instruction*); 734 void emit_op_put_setter(Instruction*); 735 void emit_op_resolve(Instruction*); 736 void emit_op_resolve_base(Instruction*); 737 void emit_op_resolve_global(Instruction*); 738 void emit_op_resolve_skip(Instruction*); 739 void emit_op_resolve_with_base(Instruction*); 740 void emit_op_ret(Instruction*); 741 void emit_op_rshift(Instruction*); 742 void emit_op_sret(Instruction*); 743 void emit_op_strcat(Instruction*); 744 void emit_op_stricteq(Instruction*); 745 void emit_op_sub(Instruction*); 746 void emit_op_switch_char(Instruction*); 747 void emit_op_switch_imm(Instruction*); 748 void emit_op_switch_string(Instruction*); 749 void emit_op_tear_off_activation(Instruction*); 750 void emit_op_tear_off_arguments(Instruction*); 751 void emit_op_throw(Instruction*); 752 void emit_op_to_jsnumber(Instruction*); 753 void emit_op_to_primitive(Instruction*); 754 void emit_op_unexpected_load(Instruction*); 755 756 void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&); 757 void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&); 758 void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&); 759 void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&); 760 void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&); 761 void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&); 762 void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&); 763 void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&); 764 void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&); 765 void emitSlow_op_construct_verify(Instruction*, Vector<SlowCaseEntry>::iterator&); 766 void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&); 767 void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&); 768 void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&); 769 void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&); 770 void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&); 771 void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&); 772 void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&); 773 void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&); 774 void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&); 775 void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&); 776 void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&); 777 void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&); 778 void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&); 779 void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&); 780 void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&); 781 void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&); 782 void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&); 783 void emitSlow_op_negate(Instruction*, Vector<SlowCaseEntry>::iterator&); 784 void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&); 785 void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&); 786 void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&); 787 void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&); 788 void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&); 789 void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&); 790 void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&); 791 void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&); 792 void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&); 793 void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&); 794 void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&); 795 void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&); 796 void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&); 797 void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&); 798 void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&); 799 800 /* These functions are deprecated: Please use JITStubCall instead. */ 801 void emitPutJITStubArg(RegisterID src, unsigned argumentNumber); 802#if USE(JSVALUE32_64) 803 void emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber); 804 void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2); 805#else 806 void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch); 807#endif 808 void emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber); 809 void emitPutJITStubArgConstant(void* value, unsigned argumentNumber); 810 void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst); 811 812 void emitInitRegister(unsigned dst); 813 814 void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry); 815 void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry); 816 void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister); 817 void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister); 818 819 JSValue getConstantOperand(unsigned src); 820 bool isOperandConstantImmediateInt(unsigned src); 821 822 Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter) 823 { 824 return iter++->from; 825 } 826 void linkSlowCase(Vector<SlowCaseEntry>::iterator& iter) 827 { 828 iter->from.link(this); 829 ++iter; 830 } 831 void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int vReg); 832 833 Jump checkStructure(RegisterID reg, Structure* structure); 834 835 void restoreArgumentReference(); 836 void restoreArgumentReferenceForTrampoline(); 837 838 Call emitNakedCall(CodePtr function = CodePtr()); 839 840 void preserveReturnAddressAfterCall(RegisterID); 841 void restoreReturnAddressBeforeReturn(RegisterID); 842 void restoreReturnAddressBeforeReturn(Address); 843 844 void emitTimeoutCheck(); 845#ifndef NDEBUG 846 void printBytecodeOperandTypes(unsigned src1, unsigned src2); 847#endif 848 849#if ENABLE(SAMPLING_FLAGS) 850 void setSamplingFlag(int32_t); 851 void clearSamplingFlag(int32_t); 852#endif 853 854#if ENABLE(SAMPLING_COUNTERS) 855 void emitCount(AbstractSamplingCounter&, uint32_t = 1); 856#endif 857 858#if ENABLE(OPCODE_SAMPLING) 859 void sampleInstruction(Instruction*, bool = false); 860#endif 861 862#if ENABLE(CODEBLOCK_SAMPLING) 863 void sampleCodeBlock(CodeBlock*); 864#else 865 void sampleCodeBlock(CodeBlock*) {} 866#endif 867 868 Interpreter* m_interpreter; 869 JSGlobalData* m_globalData; 870 CodeBlock* m_codeBlock; 871 872 Vector<CallRecord> m_calls; 873 Vector<Label> m_labels; 874 Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo; 875 Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo; 876 Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo; 877 Vector<JumpTable> m_jmpTable; 878 879 unsigned m_bytecodeIndex; 880 Vector<JSRInfo> m_jsrSites; 881 Vector<SlowCaseEntry> m_slowCases; 882 Vector<SwitchRecord> m_switches; 883 884 unsigned m_propertyAccessInstructionIndex; 885 unsigned m_globalResolveInfoIndex; 886 unsigned m_callLinkInfoIndex; 887 888#if USE(JSVALUE32_64) 889 unsigned m_jumpTargetIndex; 890 unsigned m_mappedBytecodeIndex; 891 unsigned m_mappedVirtualRegisterIndex; 892 RegisterID m_mappedTag; 893 RegisterID m_mappedPayload; 894#else 895 int m_lastResultBytecodeRegister; 896 unsigned m_jumpTargetsPosition; 897#endif 898 899#ifndef NDEBUG 900#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL 901 Label m_uninterruptedInstructionSequenceBegin; 902 int m_uninterruptedConstantSequenceBegin; 903#endif 904#endif 905 } JIT_CLASS_ALIGNMENT; 906} // namespace JSC 907 908#endif // ENABLE(JIT) 909 910#endif // JIT_h 911