codegen-arm.cc revision 592a9fc1d8ea420377a2e7efd0600e20b058be2b
1// Copyright 2011 the V8 project authors. All rights reserved. 2// Redistribution and use in source and binary forms, with or without 3// modification, are permitted provided that the following conditions are 4// met: 5// 6// * Redistributions of source code must retain the above copyright 7// notice, this list of conditions and the following disclaimer. 8// * Redistributions in binary form must reproduce the above 9// copyright notice, this list of conditions and the following 10// disclaimer in the documentation and/or other materials provided 11// with the distribution. 12// * Neither the name of Google Inc. nor the names of its 13// contributors may be used to endorse or promote products derived 14// from this software without specific prior written permission. 15// 16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28#include "v8.h" 29 30#if defined(V8_TARGET_ARCH_ARM) 31 32#include "codegen.h" 33#include "macro-assembler.h" 34 35namespace v8 { 36namespace internal { 37 38#define __ ACCESS_MASM(masm) 39 40// ------------------------------------------------------------------------- 41// Platform-specific RuntimeCallHelper functions. 42 43void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { 44 masm->EnterFrame(StackFrame::INTERNAL); 45 ASSERT(!masm->has_frame()); 46 masm->set_has_frame(true); 47} 48 49 50void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { 51 masm->LeaveFrame(StackFrame::INTERNAL); 52 ASSERT(masm->has_frame()); 53 masm->set_has_frame(false); 54} 55 56 57// ------------------------------------------------------------------------- 58// Code generators 59 60void ElementsTransitionGenerator::GenerateSmiOnlyToObject( 61 MacroAssembler* masm) { 62 // ----------- S t a t e ------------- 63 // -- r0 : value 64 // -- r1 : key 65 // -- r2 : receiver 66 // -- lr : return address 67 // -- r3 : target map, scratch for subsequent call 68 // -- r4 : scratch (elements) 69 // ----------------------------------- 70 // Set transitioned map. 71 __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); 72 __ RecordWriteField(r2, 73 HeapObject::kMapOffset, 74 r3, 75 r9, 76 kLRHasNotBeenSaved, 77 kDontSaveFPRegs, 78 EMIT_REMEMBERED_SET, 79 OMIT_SMI_CHECK); 80} 81 82 83void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( 84 MacroAssembler* masm, Label* fail) { 85 // ----------- S t a t e ------------- 86 // -- r0 : value 87 // -- r1 : key 88 // -- r2 : receiver 89 // -- lr : return address 90 // -- r3 : target map, scratch for subsequent call 91 // -- r4 : scratch (elements) 92 // ----------------------------------- 93 Label loop, entry, convert_hole, gc_required; 94 bool vfp3_supported = CpuFeatures::IsSupported(VFP3); 95 __ push(lr); 96 97 __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); 98 __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset)); 99 // r4: source FixedArray 100 // r5: number of elements (smi-tagged) 101 102 // Allocate new FixedDoubleArray. 103 __ mov(lr, Operand(FixedDoubleArray::kHeaderSize)); 104 __ add(lr, lr, Operand(r5, LSL, 2)); 105 __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS); 106 // r6: destination FixedDoubleArray, not tagged as heap object 107 __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex); 108 __ str(r9, MemOperand(r6, HeapObject::kMapOffset)); 109 // Set destination FixedDoubleArray's length. 110 __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset)); 111 // Update receiver's map. 112 113 __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); 114 __ RecordWriteField(r2, 115 HeapObject::kMapOffset, 116 r3, 117 r9, 118 kLRHasBeenSaved, 119 kDontSaveFPRegs, 120 EMIT_REMEMBERED_SET, 121 OMIT_SMI_CHECK); 122 // Replace receiver's backing store with newly created FixedDoubleArray. 123 __ add(r3, r6, Operand(kHeapObjectTag)); 124 __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset)); 125 __ RecordWriteField(r2, 126 JSObject::kElementsOffset, 127 r3, 128 r9, 129 kLRHasBeenSaved, 130 kDontSaveFPRegs, 131 EMIT_REMEMBERED_SET, 132 OMIT_SMI_CHECK); 133 134 // Prepare for conversion loop. 135 __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 136 __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize)); 137 __ add(r6, r7, Operand(r5, LSL, 2)); 138 __ mov(r4, Operand(kHoleNanLower32)); 139 __ mov(r5, Operand(kHoleNanUpper32)); 140 // r3: begin of source FixedArray element fields, not tagged 141 // r4: kHoleNanLower32 142 // r5: kHoleNanUpper32 143 // r6: end of destination FixedDoubleArray, not tagged 144 // r7: begin of FixedDoubleArray element fields, not tagged 145 if (!vfp3_supported) __ Push(r1, r0); 146 147 __ b(&entry); 148 149 // Call into runtime if GC is required. 150 __ bind(&gc_required); 151 __ pop(lr); 152 __ b(fail); 153 154 // Convert and copy elements. 155 __ bind(&loop); 156 __ ldr(r9, MemOperand(r3, 4, PostIndex)); 157 // r9: current element 158 __ JumpIfNotSmi(r9, &convert_hole); 159 160 // Normal smi, convert to double and store. 161 __ SmiUntag(r9); 162 if (vfp3_supported) { 163 CpuFeatures::Scope scope(VFP3); 164 __ vmov(s0, r9); 165 __ vcvt_f64_s32(d0, s0); 166 __ vstr(d0, r7, 0); 167 __ add(r7, r7, Operand(8)); 168 } else { 169 FloatingPointHelper::ConvertIntToDouble(masm, 170 r9, 171 FloatingPointHelper::kCoreRegisters, 172 d0, 173 r0, 174 r1, 175 lr, 176 s0); 177 __ Strd(r0, r1, MemOperand(r7, 8, PostIndex)); 178 } 179 __ b(&entry); 180 181 // Hole found, store the-hole NaN. 182 __ bind(&convert_hole); 183 if (FLAG_debug_code) { 184 __ CompareRoot(r9, Heap::kTheHoleValueRootIndex); 185 __ Assert(eq, "object found in smi-only array"); 186 } 187 __ Strd(r4, r5, MemOperand(r7, 8, PostIndex)); 188 189 __ bind(&entry); 190 __ cmp(r7, r6); 191 __ b(lt, &loop); 192 193 if (!vfp3_supported) __ Pop(r1, r0); 194 __ pop(lr); 195} 196 197 198void ElementsTransitionGenerator::GenerateDoubleToObject( 199 MacroAssembler* masm, Label* fail) { 200 // ----------- S t a t e ------------- 201 // -- r0 : value 202 // -- r1 : key 203 // -- r2 : receiver 204 // -- lr : return address 205 // -- r3 : target map, scratch for subsequent call 206 // -- r4 : scratch (elements) 207 // ----------------------------------- 208 Label entry, loop, convert_hole, gc_required; 209 210 __ push(lr); 211 __ Push(r3, r2, r1, r0); 212 213 __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); 214 __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset)); 215 // r4: source FixedDoubleArray 216 // r5: number of elements (smi-tagged) 217 218 // Allocate new FixedArray. 219 __ mov(r0, Operand(FixedDoubleArray::kHeaderSize)); 220 __ add(r0, r0, Operand(r5, LSL, 1)); 221 __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS); 222 // r6: destination FixedArray, not tagged as heap object 223 __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex); 224 __ str(r9, MemOperand(r6, HeapObject::kMapOffset)); 225 // Set destination FixedDoubleArray's length. 226 __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset)); 227 228 // Prepare for conversion loop. 229 __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4)); 230 __ add(r3, r6, Operand(FixedArray::kHeaderSize)); 231 __ add(r6, r6, Operand(kHeapObjectTag)); 232 __ add(r5, r3, Operand(r5, LSL, 1)); 233 __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); 234 __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex); 235 // Using offsetted addresses in r4 to fully take advantage of post-indexing. 236 // r3: begin of destination FixedArray element fields, not tagged 237 // r4: begin of source FixedDoubleArray element fields, not tagged, +4 238 // r5: end of destination FixedArray, not tagged 239 // r6: destination FixedArray 240 // r7: the-hole pointer 241 // r9: heap number map 242 __ b(&entry); 243 244 // Call into runtime if GC is required. 245 __ bind(&gc_required); 246 __ Pop(r3, r2, r1, r0); 247 __ pop(lr); 248 __ b(fail); 249 250 __ bind(&loop); 251 __ ldr(r1, MemOperand(r4, 8, PostIndex)); 252 // lr: current element's upper 32 bit 253 // r4: address of next element's upper 32 bit 254 __ cmp(r1, Operand(kHoleNanUpper32)); 255 __ b(eq, &convert_hole); 256 257 // Non-hole double, copy value into a heap number. 258 __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required); 259 // r2: new heap number 260 __ ldr(r0, MemOperand(r4, 12, NegOffset)); 261 __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset)); 262 __ mov(r0, r3); 263 __ str(r2, MemOperand(r3, 4, PostIndex)); 264 __ RecordWrite(r6, 265 r0, 266 r2, 267 kLRHasBeenSaved, 268 kDontSaveFPRegs, 269 EMIT_REMEMBERED_SET, 270 OMIT_SMI_CHECK); 271 __ b(&entry); 272 273 // Replace the-hole NaN with the-hole pointer. 274 __ bind(&convert_hole); 275 __ str(r7, MemOperand(r3, 4, PostIndex)); 276 277 __ bind(&entry); 278 __ cmp(r3, r5); 279 __ b(lt, &loop); 280 281 __ Pop(r3, r2, r1, r0); 282 // Update receiver's map. 283 __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); 284 __ RecordWriteField(r2, 285 HeapObject::kMapOffset, 286 r3, 287 r9, 288 kLRHasBeenSaved, 289 kDontSaveFPRegs, 290 EMIT_REMEMBERED_SET, 291 OMIT_SMI_CHECK); 292 // Replace receiver's backing store with newly created and filled FixedArray. 293 __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset)); 294 __ RecordWriteField(r2, 295 JSObject::kElementsOffset, 296 r6, 297 r9, 298 kLRHasBeenSaved, 299 kDontSaveFPRegs, 300 EMIT_REMEMBERED_SET, 301 OMIT_SMI_CHECK); 302 __ pop(lr); 303} 304 305 306void StringCharLoadGenerator::Generate(MacroAssembler* masm, 307 Register string, 308 Register index, 309 Register result, 310 Label* call_runtime) { 311 // Fetch the instance type of the receiver into result register. 312 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset)); 313 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); 314 315 // We need special handling for indirect strings. 316 Label check_sequential; 317 __ tst(result, Operand(kIsIndirectStringMask)); 318 __ b(eq, &check_sequential); 319 320 // Dispatch on the indirect string shape: slice or cons. 321 Label cons_string; 322 __ tst(result, Operand(kSlicedNotConsMask)); 323 __ b(eq, &cons_string); 324 325 // Handle slices. 326 Label indirect_string_loaded; 327 __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); 328 __ add(index, index, Operand(result, ASR, kSmiTagSize)); 329 __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset)); 330 __ jmp(&indirect_string_loaded); 331 332 // Handle cons strings. 333 // Check whether the right hand side is the empty string (i.e. if 334 // this is really a flat string in a cons string). If that is not 335 // the case we would rather go to the runtime system now to flatten 336 // the string. 337 __ bind(&cons_string); 338 __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset)); 339 __ LoadRoot(ip, Heap::kEmptyStringRootIndex); 340 __ cmp(result, ip); 341 __ b(ne, call_runtime); 342 // Get the first of the two strings and load its instance type. 343 __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset)); 344 345 __ bind(&indirect_string_loaded); 346 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset)); 347 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); 348 349 // Distinguish sequential and external strings. Only these two string 350 // representations can reach here (slices and flat cons strings have been 351 // reduced to the underlying sequential or external string). 352 Label external_string, check_encoding; 353 __ bind(&check_sequential); 354 STATIC_ASSERT(kSeqStringTag == 0); 355 __ tst(result, Operand(kStringRepresentationMask)); 356 __ b(ne, &external_string); 357 358 // Prepare sequential strings 359 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); 360 __ add(string, 361 string, 362 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); 363 __ jmp(&check_encoding); 364 365 // Handle external strings. 366 __ bind(&external_string); 367 if (FLAG_debug_code) { 368 // Assert that we do not have a cons or slice (indirect strings) here. 369 // Sequential strings have already been ruled out. 370 __ tst(result, Operand(kIsIndirectStringMask)); 371 __ Assert(eq, "external string expected, but not found"); 372 } 373 // Rule out short external strings. 374 STATIC_CHECK(kShortExternalStringTag != 0); 375 __ tst(result, Operand(kShortExternalStringMask)); 376 __ b(ne, call_runtime); 377 __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset)); 378 379 Label ascii, done; 380 __ bind(&check_encoding); 381 STATIC_ASSERT(kTwoByteStringTag == 0); 382 __ tst(result, Operand(kStringEncodingMask)); 383 __ b(ne, &ascii); 384 // Two-byte string. 385 __ ldrh(result, MemOperand(string, index, LSL, 1)); 386 __ jmp(&done); 387 __ bind(&ascii); 388 // Ascii string. 389 __ ldrb(result, MemOperand(string, index)); 390 __ bind(&done); 391} 392 393#undef __ 394 395} } // namespace v8::internal 396 397#endif // V8_TARGET_ARCH_ARM 398