1// Copyright 2012 the V8 project authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#if V8_TARGET_ARCH_X64 6 7#include "src/codegen.h" 8#include "src/ic/ic.h" 9#include "src/ic/stub-cache.h" 10#include "src/interface-descriptors.h" 11 12namespace v8 { 13namespace internal { 14 15#define __ ACCESS_MASM(masm) 16 17static void ProbeTable(Isolate* isolate, MacroAssembler* masm, 18 Code::Flags flags, StubCache::Table table, 19 Register receiver, Register name, 20 // The offset is scaled by 4, based on 21 // kCacheIndexShift, which is two bits 22 Register offset) { 23 // We need to scale up the pointer by 2 when the offset is scaled by less 24 // than the pointer size. 25 DCHECK(kPointerSize == kInt64Size 26 ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1 27 : kPointerSizeLog2 == StubCache::kCacheIndexShift); 28 ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1; 29 30 DCHECK_EQ(3u * kPointerSize, sizeof(StubCache::Entry)); 31 // The offset register holds the entry offset times four (due to masking 32 // and shifting optimizations). 33 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); 34 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); 35 Label miss; 36 37 // Multiply by 3 because there are 3 fields per entry (name, code, map). 38 __ leap(offset, Operand(offset, offset, times_2, 0)); 39 40 __ LoadAddress(kScratchRegister, key_offset); 41 42 // Check that the key in the entry matches the name. 43 __ cmpp(name, Operand(kScratchRegister, offset, scale_factor, 0)); 44 __ j(not_equal, &miss); 45 46 // Get the map entry from the cache. 47 // Use key_offset + kPointerSize * 2, rather than loading map_offset. 48 DCHECK(isolate->stub_cache()->map_reference(table).address() - 49 isolate->stub_cache()->key_reference(table).address() == 50 kPointerSize * 2); 51 __ movp(kScratchRegister, 52 Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2)); 53 __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset)); 54 __ j(not_equal, &miss); 55 56 // Get the code entry from the cache. 57 __ LoadAddress(kScratchRegister, value_offset); 58 __ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0)); 59 60 // Check that the flags match what we're looking for. 61 __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset)); 62 __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup)); 63 __ cmpl(offset, Immediate(flags)); 64 __ j(not_equal, &miss); 65 66#ifdef DEBUG 67 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { 68 __ jmp(&miss); 69 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { 70 __ jmp(&miss); 71 } 72#endif 73 74 // Jump to the first instruction in the code stub. 75 __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag)); 76 __ jmp(kScratchRegister); 77 78 __ bind(&miss); 79} 80 81 82void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind, 83 Code::Flags flags, Register receiver, 84 Register name, Register scratch, Register extra, 85 Register extra2, Register extra3) { 86 Isolate* isolate = masm->isolate(); 87 Label miss; 88 USE(extra); // The register extra is not used on the X64 platform. 89 USE(extra2); // The register extra2 is not used on the X64 platform. 90 USE(extra3); // The register extra2 is not used on the X64 platform. 91 // Make sure that code is valid. The multiplying code relies on the 92 // entry size being 3 * kPointerSize. 93 DCHECK(sizeof(Entry) == 3 * kPointerSize); 94 95 // Make sure that there are no register conflicts. 96 DCHECK(!scratch.is(receiver)); 97 DCHECK(!scratch.is(name)); 98 99 // Check scratch register is valid, extra and extra2 are unused. 100 DCHECK(!scratch.is(no_reg)); 101 DCHECK(extra2.is(no_reg)); 102 DCHECK(extra3.is(no_reg)); 103 104#ifdef DEBUG 105 // If vector-based ics are in use, ensure that scratch doesn't conflict with 106 // the vector and slot registers, which need to be preserved for a handler 107 // call or miss. 108 if (IC::ICUseVector(ic_kind)) { 109 if (ic_kind == Code::LOAD_IC || ic_kind == Code::LOAD_GLOBAL_IC || 110 ic_kind == Code::KEYED_LOAD_IC) { 111 Register vector = LoadWithVectorDescriptor::VectorRegister(); 112 Register slot = LoadDescriptor::SlotRegister(); 113 DCHECK(!AreAliased(vector, slot, scratch)); 114 } else { 115 DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC); 116 Register vector = VectorStoreICDescriptor::VectorRegister(); 117 Register slot = VectorStoreICDescriptor::SlotRegister(); 118 DCHECK(!AreAliased(vector, slot, scratch)); 119 } 120 } 121#endif 122 123 Counters* counters = masm->isolate()->counters(); 124 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1); 125 126 // Check that the receiver isn't a smi. 127 __ JumpIfSmi(receiver, &miss); 128 129 // Get the map of the receiver and compute the hash. 130 __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); 131 // Use only the low 32 bits of the map pointer. 132 __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); 133 __ xorp(scratch, Immediate(flags)); 134 // We mask out the last two bits because they are not part of the hash and 135 // they are always 01 for maps. Also in the two 'and' instructions below. 136 __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); 137 138 // Probe the primary table. 139 ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch); 140 141 // Primary miss: Compute hash for secondary probe. 142 __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); 143 __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); 144 __ xorp(scratch, Immediate(flags)); 145 __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift)); 146 __ subl(scratch, name); 147 __ addl(scratch, Immediate(flags)); 148 __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift)); 149 150 // Probe the secondary table. 151 ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch); 152 153 // Cache miss: Fall-through and let caller handle the miss by 154 // entering the runtime system. 155 __ bind(&miss); 156 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1); 157} 158 159 160#undef __ 161} // namespace internal 162} // namespace v8 163 164#endif // V8_TARGET_ARCH_X64 165