1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/v8.h"
6
7#if V8_TARGET_ARCH_ARM
8
9#include "src/arm/simulator-arm.h"
10#include "src/codegen.h"
11#include "src/macro-assembler.h"
12
13namespace v8 {
14namespace internal {
15
16
17#define __ masm.
18
19
20#if defined(USE_SIMULATOR)
21byte* fast_exp_arm_machine_code = NULL;
22double fast_exp_simulator(double x) {
23  return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
24      fast_exp_arm_machine_code, x, 0);
25}
26#endif
27
28
29UnaryMathFunction CreateExpFunction() {
30  if (!FLAG_fast_math) return &std::exp;
31  size_t actual_size;
32  byte* buffer =
33      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
34  if (buffer == NULL) return &std::exp;
35  ExternalReference::InitializeMathExpData();
36
37  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
38
39  {
40    DwVfpRegister input = d0;
41    DwVfpRegister result = d1;
42    DwVfpRegister double_scratch1 = d2;
43    DwVfpRegister double_scratch2 = d3;
44    Register temp1 = r4;
45    Register temp2 = r5;
46    Register temp3 = r6;
47
48    if (masm.use_eabi_hardfloat()) {
49      // Input value is in d0 anyway, nothing to do.
50    } else {
51      __ vmov(input, r0, r1);
52    }
53    __ Push(temp3, temp2, temp1);
54    MathExpGenerator::EmitMathExp(
55        &masm, input, result, double_scratch1, double_scratch2,
56        temp1, temp2, temp3);
57    __ Pop(temp3, temp2, temp1);
58    if (masm.use_eabi_hardfloat()) {
59      __ vmov(d0, result);
60    } else {
61      __ vmov(r0, r1, result);
62    }
63    __ Ret();
64  }
65
66  CodeDesc desc;
67  masm.GetCode(&desc);
68  DCHECK(!RelocInfo::RequiresRelocation(desc));
69
70  CpuFeatures::FlushICache(buffer, actual_size);
71  base::OS::ProtectCode(buffer, actual_size);
72
73#if !defined(USE_SIMULATOR)
74  return FUNCTION_CAST<UnaryMathFunction>(buffer);
75#else
76  fast_exp_arm_machine_code = buffer;
77  return &fast_exp_simulator;
78#endif
79}
80
81#if defined(V8_HOST_ARCH_ARM)
82MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
83#if defined(USE_SIMULATOR)
84  return stub;
85#else
86  if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
87  size_t actual_size;
88  byte* buffer =
89      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
90  if (buffer == NULL) return stub;
91
92  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
93
94  Register dest = r0;
95  Register src = r1;
96  Register chars = r2;
97  Register temp1 = r3;
98  Label less_4;
99
100  if (CpuFeatures::IsSupported(NEON)) {
101    Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
102    Label size_less_than_8;
103    __ pld(MemOperand(src, 0));
104
105    __ cmp(chars, Operand(8));
106    __ b(lt, &size_less_than_8);
107    __ cmp(chars, Operand(32));
108    __ b(lt, &less_32);
109    if (CpuFeatures::cache_line_size() == 32) {
110      __ pld(MemOperand(src, 32));
111    }
112    __ cmp(chars, Operand(64));
113    __ b(lt, &less_64);
114    __ pld(MemOperand(src, 64));
115    if (CpuFeatures::cache_line_size() == 32) {
116      __ pld(MemOperand(src, 96));
117    }
118    __ cmp(chars, Operand(128));
119    __ b(lt, &less_128);
120    __ pld(MemOperand(src, 128));
121    if (CpuFeatures::cache_line_size() == 32) {
122      __ pld(MemOperand(src, 160));
123    }
124    __ pld(MemOperand(src, 192));
125    if (CpuFeatures::cache_line_size() == 32) {
126      __ pld(MemOperand(src, 224));
127    }
128    __ cmp(chars, Operand(256));
129    __ b(lt, &less_256);
130    __ sub(chars, chars, Operand(256));
131
132    __ bind(&loop);
133    __ pld(MemOperand(src, 256));
134    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
135    if (CpuFeatures::cache_line_size() == 32) {
136      __ pld(MemOperand(src, 256));
137    }
138    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
139    __ sub(chars, chars, Operand(64), SetCC);
140    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
141    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
142    __ b(ge, &loop);
143    __ add(chars, chars, Operand(256));
144
145    __ bind(&less_256);
146    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
147    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
148    __ sub(chars, chars, Operand(128));
149    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
150    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
151    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
152    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
153    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
154    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
155    __ cmp(chars, Operand(64));
156    __ b(lt, &less_64);
157
158    __ bind(&less_128);
159    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
160    __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
161    __ sub(chars, chars, Operand(64));
162    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
163    __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
164
165    __ bind(&less_64);
166    __ cmp(chars, Operand(32));
167    __ b(lt, &less_32);
168    __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
169    __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
170    __ sub(chars, chars, Operand(32));
171
172    __ bind(&less_32);
173    __ cmp(chars, Operand(16));
174    __ b(le, &_16_or_less);
175    __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
176    __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
177    __ sub(chars, chars, Operand(16));
178
179    __ bind(&_16_or_less);
180    __ cmp(chars, Operand(8));
181    __ b(le, &_8_or_less);
182    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
183    __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
184    __ sub(chars, chars, Operand(8));
185
186    // Do a last copy which may overlap with the previous copy (up to 8 bytes).
187    __ bind(&_8_or_less);
188    __ rsb(chars, chars, Operand(8));
189    __ sub(src, src, Operand(chars));
190    __ sub(dest, dest, Operand(chars));
191    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
192    __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
193
194    __ Ret();
195
196    __ bind(&size_less_than_8);
197
198    __ bic(temp1, chars, Operand(0x3), SetCC);
199    __ b(&less_4, eq);
200    __ ldr(temp1, MemOperand(src, 4, PostIndex));
201    __ str(temp1, MemOperand(dest, 4, PostIndex));
202  } else {
203    Register temp2 = ip;
204    Label loop;
205
206    __ bic(temp2, chars, Operand(0x3), SetCC);
207    __ b(&less_4, eq);
208    __ add(temp2, dest, temp2);
209
210    __ bind(&loop);
211    __ ldr(temp1, MemOperand(src, 4, PostIndex));
212    __ str(temp1, MemOperand(dest, 4, PostIndex));
213    __ cmp(dest, temp2);
214    __ b(&loop, ne);
215  }
216
217  __ bind(&less_4);
218  __ mov(chars, Operand(chars, LSL, 31), SetCC);
219  // bit0 => Z (ne), bit1 => C (cs)
220  __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
221  __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
222  __ ldrb(temp1, MemOperand(src), ne);
223  __ strb(temp1, MemOperand(dest), ne);
224  __ Ret();
225
226  CodeDesc desc;
227  masm.GetCode(&desc);
228  DCHECK(!RelocInfo::RequiresRelocation(desc));
229
230  CpuFeatures::FlushICache(buffer, actual_size);
231  base::OS::ProtectCode(buffer, actual_size);
232  return FUNCTION_CAST<MemCopyUint8Function>(buffer);
233#endif
234}
235
236
237// Convert 8 to 16. The number of character to copy must be at least 8.
238MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
239    MemCopyUint16Uint8Function stub) {
240#if defined(USE_SIMULATOR)
241  return stub;
242#else
243  if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
244  size_t actual_size;
245  byte* buffer =
246      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
247  if (buffer == NULL) return stub;
248
249  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
250
251  Register dest = r0;
252  Register src = r1;
253  Register chars = r2;
254  if (CpuFeatures::IsSupported(NEON)) {
255    Register temp = r3;
256    Label loop;
257
258    __ bic(temp, chars, Operand(0x7));
259    __ sub(chars, chars, Operand(temp));
260    __ add(temp, dest, Operand(temp, LSL, 1));
261
262    __ bind(&loop);
263    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
264    __ vmovl(NeonU8, q0, d0);
265    __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
266    __ cmp(dest, temp);
267    __ b(&loop, ne);
268
269    // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
270    __ rsb(chars, chars, Operand(8));
271    __ sub(src, src, Operand(chars));
272    __ sub(dest, dest, Operand(chars, LSL, 1));
273    __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
274    __ vmovl(NeonU8, q0, d0);
275    __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
276    __ Ret();
277  } else {
278    Register temp1 = r3;
279    Register temp2 = ip;
280    Register temp3 = lr;
281    Register temp4 = r4;
282    Label loop;
283    Label not_two;
284
285    __ Push(lr, r4);
286    __ bic(temp2, chars, Operand(0x3));
287    __ add(temp2, dest, Operand(temp2, LSL, 1));
288
289    __ bind(&loop);
290    __ ldr(temp1, MemOperand(src, 4, PostIndex));
291    __ uxtb16(temp3, Operand(temp1, ROR, 0));
292    __ uxtb16(temp4, Operand(temp1, ROR, 8));
293    __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
294    __ str(temp1, MemOperand(dest));
295    __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
296    __ str(temp1, MemOperand(dest, 4));
297    __ add(dest, dest, Operand(8));
298    __ cmp(dest, temp2);
299    __ b(&loop, ne);
300
301    __ mov(chars, Operand(chars, LSL, 31), SetCC);  // bit0 => ne, bit1 => cs
302    __ b(&not_two, cc);
303    __ ldrh(temp1, MemOperand(src, 2, PostIndex));
304    __ uxtb(temp3, Operand(temp1, ROR, 8));
305    __ mov(temp3, Operand(temp3, LSL, 16));
306    __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
307    __ str(temp3, MemOperand(dest, 4, PostIndex));
308    __ bind(&not_two);
309    __ ldrb(temp1, MemOperand(src), ne);
310    __ strh(temp1, MemOperand(dest), ne);
311    __ Pop(pc, r4);
312  }
313
314  CodeDesc desc;
315  masm.GetCode(&desc);
316
317  CpuFeatures::FlushICache(buffer, actual_size);
318  base::OS::ProtectCode(buffer, actual_size);
319
320  return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
321#endif
322}
323#endif
324
325UnaryMathFunction CreateSqrtFunction() {
326#if defined(USE_SIMULATOR)
327  return &std::sqrt;
328#else
329  size_t actual_size;
330  byte* buffer =
331      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
332  if (buffer == NULL) return &std::sqrt;
333
334  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
335
336  __ MovFromFloatParameter(d0);
337  __ vsqrt(d0, d0);
338  __ MovToFloatResult(d0);
339  __ Ret();
340
341  CodeDesc desc;
342  masm.GetCode(&desc);
343  DCHECK(!RelocInfo::RequiresRelocation(desc));
344
345  CpuFeatures::FlushICache(buffer, actual_size);
346  base::OS::ProtectCode(buffer, actual_size);
347  return FUNCTION_CAST<UnaryMathFunction>(buffer);
348#endif
349}
350
351#undef __
352
353
354// -------------------------------------------------------------------------
355// Platform-specific RuntimeCallHelper functions.
356
357void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
358  masm->EnterFrame(StackFrame::INTERNAL);
359  DCHECK(!masm->has_frame());
360  masm->set_has_frame(true);
361}
362
363
364void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
365  masm->LeaveFrame(StackFrame::INTERNAL);
366  DCHECK(masm->has_frame());
367  masm->set_has_frame(false);
368}
369
370
371// -------------------------------------------------------------------------
372// Code generators
373
374#define __ ACCESS_MASM(masm)
375
376void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
377    MacroAssembler* masm,
378    Register receiver,
379    Register key,
380    Register value,
381    Register target_map,
382    AllocationSiteMode mode,
383    Label* allocation_memento_found) {
384  Register scratch_elements = r4;
385  DCHECK(!AreAliased(receiver, key, value, target_map,
386                     scratch_elements));
387
388  if (mode == TRACK_ALLOCATION_SITE) {
389    DCHECK(allocation_memento_found != NULL);
390    __ JumpIfJSArrayHasAllocationMemento(
391        receiver, scratch_elements, allocation_memento_found);
392  }
393
394  // Set transitioned map.
395  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
396  __ RecordWriteField(receiver,
397                      HeapObject::kMapOffset,
398                      target_map,
399                      r9,
400                      kLRHasNotBeenSaved,
401                      kDontSaveFPRegs,
402                      EMIT_REMEMBERED_SET,
403                      OMIT_SMI_CHECK);
404}
405
406
407void ElementsTransitionGenerator::GenerateSmiToDouble(
408    MacroAssembler* masm,
409    Register receiver,
410    Register key,
411    Register value,
412    Register target_map,
413    AllocationSiteMode mode,
414    Label* fail) {
415  // Register lr contains the return address.
416  Label loop, entry, convert_hole, gc_required, only_change_map, done;
417  Register elements = r4;
418  Register length = r5;
419  Register array = r6;
420  Register array_end = array;
421
422  // target_map parameter can be clobbered.
423  Register scratch1 = target_map;
424  Register scratch2 = r9;
425
426  // Verify input registers don't conflict with locals.
427  DCHECK(!AreAliased(receiver, key, value, target_map,
428                     elements, length, array, scratch2));
429
430  if (mode == TRACK_ALLOCATION_SITE) {
431    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
432  }
433
434  // Check for empty arrays, which only require a map transition and no changes
435  // to the backing store.
436  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
437  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
438  __ b(eq, &only_change_map);
439
440  __ push(lr);
441  __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
442  // length: number of elements (smi-tagged)
443
444  // Allocate new FixedDoubleArray.
445  // Use lr as a temporary register.
446  __ mov(lr, Operand(length, LSL, 2));
447  __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
448  __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
449  // array: destination FixedDoubleArray, not tagged as heap object.
450  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
451  // r4: source FixedArray.
452
453  // Set destination FixedDoubleArray's length and map.
454  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
455  __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
456  // Update receiver's map.
457  __ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
458
459  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
460  __ RecordWriteField(receiver,
461                      HeapObject::kMapOffset,
462                      target_map,
463                      scratch2,
464                      kLRHasBeenSaved,
465                      kDontSaveFPRegs,
466                      OMIT_REMEMBERED_SET,
467                      OMIT_SMI_CHECK);
468  // Replace receiver's backing store with newly created FixedDoubleArray.
469  __ add(scratch1, array, Operand(kHeapObjectTag));
470  __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
471  __ RecordWriteField(receiver,
472                      JSObject::kElementsOffset,
473                      scratch1,
474                      scratch2,
475                      kLRHasBeenSaved,
476                      kDontSaveFPRegs,
477                      EMIT_REMEMBERED_SET,
478                      OMIT_SMI_CHECK);
479
480  // Prepare for conversion loop.
481  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
482  __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
483  __ add(array_end, scratch2, Operand(length, LSL, 2));
484
485  // Repurpose registers no longer in use.
486  Register hole_lower = elements;
487  Register hole_upper = length;
488
489  __ mov(hole_lower, Operand(kHoleNanLower32));
490  __ mov(hole_upper, Operand(kHoleNanUpper32));
491  // scratch1: begin of source FixedArray element fields, not tagged
492  // hole_lower: kHoleNanLower32
493  // hole_upper: kHoleNanUpper32
494  // array_end: end of destination FixedDoubleArray, not tagged
495  // scratch2: begin of FixedDoubleArray element fields, not tagged
496
497  __ b(&entry);
498
499  __ bind(&only_change_map);
500  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
501  __ RecordWriteField(receiver,
502                      HeapObject::kMapOffset,
503                      target_map,
504                      scratch2,
505                      kLRHasNotBeenSaved,
506                      kDontSaveFPRegs,
507                      OMIT_REMEMBERED_SET,
508                      OMIT_SMI_CHECK);
509  __ b(&done);
510
511  // Call into runtime if GC is required.
512  __ bind(&gc_required);
513  __ pop(lr);
514  __ b(fail);
515
516  // Convert and copy elements.
517  __ bind(&loop);
518  __ ldr(lr, MemOperand(scratch1, 4, PostIndex));
519  // lr: current element
520  __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
521
522  // Normal smi, convert to double and store.
523  __ vmov(s0, lr);
524  __ vcvt_f64_s32(d0, s0);
525  __ vstr(d0, scratch2, 0);
526  __ add(scratch2, scratch2, Operand(8));
527  __ b(&entry);
528
529  // Hole found, store the-hole NaN.
530  __ bind(&convert_hole);
531  if (FLAG_debug_code) {
532    // Restore a "smi-untagged" heap object.
533    __ SmiTag(lr);
534    __ orr(lr, lr, Operand(1));
535    __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
536    __ Assert(eq, kObjectFoundInSmiOnlyArray);
537  }
538  __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
539
540  __ bind(&entry);
541  __ cmp(scratch2, array_end);
542  __ b(lt, &loop);
543
544  __ pop(lr);
545  __ bind(&done);
546}
547
548
549void ElementsTransitionGenerator::GenerateDoubleToObject(
550    MacroAssembler* masm,
551    Register receiver,
552    Register key,
553    Register value,
554    Register target_map,
555    AllocationSiteMode mode,
556    Label* fail) {
557  // Register lr contains the return address.
558  Label entry, loop, convert_hole, gc_required, only_change_map;
559  Register elements = r4;
560  Register array = r6;
561  Register length = r5;
562  Register scratch = r9;
563
564  // Verify input registers don't conflict with locals.
565  DCHECK(!AreAliased(receiver, key, value, target_map,
566                     elements, array, length, scratch));
567
568  if (mode == TRACK_ALLOCATION_SITE) {
569    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
570  }
571
572  // Check for empty arrays, which only require a map transition and no changes
573  // to the backing store.
574  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
575  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
576  __ b(eq, &only_change_map);
577
578  __ push(lr);
579  __ Push(target_map, receiver, key, value);
580  __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
581  // elements: source FixedDoubleArray
582  // length: number of elements (smi-tagged)
583
584  // Allocate new FixedArray.
585  // Re-use value and target_map registers, as they have been saved on the
586  // stack.
587  Register array_size = value;
588  Register allocate_scratch = target_map;
589  __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
590  __ add(array_size, array_size, Operand(length, LSL, 1));
591  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
592              NO_ALLOCATION_FLAGS);
593  // array: destination FixedArray, not tagged as heap object
594  // Set destination FixedDoubleArray's length and map.
595  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
596  __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
597  __ str(scratch, MemOperand(array, HeapObject::kMapOffset));
598
599  // Prepare for conversion loop.
600  Register src_elements = elements;
601  Register dst_elements = target_map;
602  Register dst_end = length;
603  Register heap_number_map = scratch;
604  __ add(src_elements, elements,
605         Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
606  __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
607  __ add(array, array, Operand(kHeapObjectTag));
608  __ add(dst_end, dst_elements, Operand(length, LSL, 1));
609  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
610  // Using offsetted addresses in src_elements to fully take advantage of
611  // post-indexing.
612  // dst_elements: begin of destination FixedArray element fields, not tagged
613  // src_elements: begin of source FixedDoubleArray element fields,
614  //               not tagged, +4
615  // dst_end: end of destination FixedArray, not tagged
616  // array: destination FixedArray
617  // heap_number_map: heap number map
618  __ b(&entry);
619
620  // Call into runtime if GC is required.
621  __ bind(&gc_required);
622  __ Pop(target_map, receiver, key, value);
623  __ pop(lr);
624  __ b(fail);
625
626  __ bind(&loop);
627  Register upper_bits = key;
628  __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
629  // upper_bits: current element's upper 32 bit
630  // src_elements: address of next element's upper 32 bit
631  __ cmp(upper_bits, Operand(kHoleNanUpper32));
632  __ b(eq, &convert_hole);
633
634  // Non-hole double, copy value into a heap number.
635  Register heap_number = receiver;
636  Register scratch2 = value;
637  __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
638                        &gc_required);
639  // heap_number: new heap number
640  __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
641  __ Strd(scratch2, upper_bits,
642          FieldMemOperand(heap_number, HeapNumber::kValueOffset));
643  __ mov(scratch2, dst_elements);
644  __ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
645  __ RecordWrite(array,
646                 scratch2,
647                 heap_number,
648                 kLRHasBeenSaved,
649                 kDontSaveFPRegs,
650                 EMIT_REMEMBERED_SET,
651                 OMIT_SMI_CHECK);
652  __ b(&entry);
653
654  // Replace the-hole NaN with the-hole pointer.
655  __ bind(&convert_hole);
656  __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
657  __ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
658
659  __ bind(&entry);
660  __ cmp(dst_elements, dst_end);
661  __ b(lt, &loop);
662
663  __ Pop(target_map, receiver, key, value);
664  // Replace receiver's backing store with newly created and filled FixedArray.
665  __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
666  __ RecordWriteField(receiver,
667                      JSObject::kElementsOffset,
668                      array,
669                      scratch,
670                      kLRHasBeenSaved,
671                      kDontSaveFPRegs,
672                      EMIT_REMEMBERED_SET,
673                      OMIT_SMI_CHECK);
674  __ pop(lr);
675
676  __ bind(&only_change_map);
677  // Update receiver's map.
678  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
679  __ RecordWriteField(receiver,
680                      HeapObject::kMapOffset,
681                      target_map,
682                      scratch,
683                      kLRHasNotBeenSaved,
684                      kDontSaveFPRegs,
685                      OMIT_REMEMBERED_SET,
686                      OMIT_SMI_CHECK);
687}
688
689
690void StringCharLoadGenerator::Generate(MacroAssembler* masm,
691                                       Register string,
692                                       Register index,
693                                       Register result,
694                                       Label* call_runtime) {
695  // Fetch the instance type of the receiver into result register.
696  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
697  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
698
699  // We need special handling for indirect strings.
700  Label check_sequential;
701  __ tst(result, Operand(kIsIndirectStringMask));
702  __ b(eq, &check_sequential);
703
704  // Dispatch on the indirect string shape: slice or cons.
705  Label cons_string;
706  __ tst(result, Operand(kSlicedNotConsMask));
707  __ b(eq, &cons_string);
708
709  // Handle slices.
710  Label indirect_string_loaded;
711  __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
712  __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
713  __ add(index, index, Operand::SmiUntag(result));
714  __ jmp(&indirect_string_loaded);
715
716  // Handle cons strings.
717  // Check whether the right hand side is the empty string (i.e. if
718  // this is really a flat string in a cons string). If that is not
719  // the case we would rather go to the runtime system now to flatten
720  // the string.
721  __ bind(&cons_string);
722  __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
723  __ CompareRoot(result, Heap::kempty_stringRootIndex);
724  __ b(ne, call_runtime);
725  // Get the first of the two strings and load its instance type.
726  __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
727
728  __ bind(&indirect_string_loaded);
729  __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
730  __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
731
732  // Distinguish sequential and external strings. Only these two string
733  // representations can reach here (slices and flat cons strings have been
734  // reduced to the underlying sequential or external string).
735  Label external_string, check_encoding;
736  __ bind(&check_sequential);
737  STATIC_ASSERT(kSeqStringTag == 0);
738  __ tst(result, Operand(kStringRepresentationMask));
739  __ b(ne, &external_string);
740
741  // Prepare sequential strings
742  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
743  __ add(string,
744         string,
745         Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
746  __ jmp(&check_encoding);
747
748  // Handle external strings.
749  __ bind(&external_string);
750  if (FLAG_debug_code) {
751    // Assert that we do not have a cons or slice (indirect strings) here.
752    // Sequential strings have already been ruled out.
753    __ tst(result, Operand(kIsIndirectStringMask));
754    __ Assert(eq, kExternalStringExpectedButNotFound);
755  }
756  // Rule out short external strings.
757  STATIC_ASSERT(kShortExternalStringTag != 0);
758  __ tst(result, Operand(kShortExternalStringMask));
759  __ b(ne, call_runtime);
760  __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
761
762  Label one_byte, done;
763  __ bind(&check_encoding);
764  STATIC_ASSERT(kTwoByteStringTag == 0);
765  __ tst(result, Operand(kStringEncodingMask));
766  __ b(ne, &one_byte);
767  // Two-byte string.
768  __ ldrh(result, MemOperand(string, index, LSL, 1));
769  __ jmp(&done);
770  __ bind(&one_byte);
771  // One-byte string.
772  __ ldrb(result, MemOperand(string, index));
773  __ bind(&done);
774}
775
776
777static MemOperand ExpConstant(int index, Register base) {
778  return MemOperand(base, index * kDoubleSize);
779}
780
781
782void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
783                                   DwVfpRegister input,
784                                   DwVfpRegister result,
785                                   DwVfpRegister double_scratch1,
786                                   DwVfpRegister double_scratch2,
787                                   Register temp1,
788                                   Register temp2,
789                                   Register temp3) {
790  DCHECK(!input.is(result));
791  DCHECK(!input.is(double_scratch1));
792  DCHECK(!input.is(double_scratch2));
793  DCHECK(!result.is(double_scratch1));
794  DCHECK(!result.is(double_scratch2));
795  DCHECK(!double_scratch1.is(double_scratch2));
796  DCHECK(!temp1.is(temp2));
797  DCHECK(!temp1.is(temp3));
798  DCHECK(!temp2.is(temp3));
799  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
800  DCHECK(!masm->serializer_enabled());  // External references not serializable.
801
802  Label zero, infinity, done;
803
804  __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
805
806  __ vldr(double_scratch1, ExpConstant(0, temp3));
807  __ VFPCompareAndSetFlags(double_scratch1, input);
808  __ b(ge, &zero);
809
810  __ vldr(double_scratch2, ExpConstant(1, temp3));
811  __ VFPCompareAndSetFlags(input, double_scratch2);
812  __ b(ge, &infinity);
813
814  __ vldr(double_scratch1, ExpConstant(3, temp3));
815  __ vldr(result, ExpConstant(4, temp3));
816  __ vmul(double_scratch1, double_scratch1, input);
817  __ vadd(double_scratch1, double_scratch1, result);
818  __ VmovLow(temp2, double_scratch1);
819  __ vsub(double_scratch1, double_scratch1, result);
820  __ vldr(result, ExpConstant(6, temp3));
821  __ vldr(double_scratch2, ExpConstant(5, temp3));
822  __ vmul(double_scratch1, double_scratch1, double_scratch2);
823  __ vsub(double_scratch1, double_scratch1, input);
824  __ vsub(result, result, double_scratch1);
825  __ vmul(double_scratch2, double_scratch1, double_scratch1);
826  __ vmul(result, result, double_scratch2);
827  __ vldr(double_scratch2, ExpConstant(7, temp3));
828  __ vmul(result, result, double_scratch2);
829  __ vsub(result, result, double_scratch1);
830  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
831  DCHECK(*reinterpret_cast<double*>
832         (ExternalReference::math_exp_constants(8).address()) == 1);
833  __ vmov(double_scratch2, 1);
834  __ vadd(result, result, double_scratch2);
835  __ mov(temp1, Operand(temp2, LSR, 11));
836  __ Ubfx(temp2, temp2, 0, 11);
837  __ add(temp1, temp1, Operand(0x3ff));
838
839  // Must not call ExpConstant() after overwriting temp3!
840  __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
841  __ add(temp3, temp3, Operand(temp2, LSL, 3));
842  __ ldm(ia, temp3, temp2.bit() | temp3.bit());
843  // The first word is loaded is the lower number register.
844  if (temp2.code() < temp3.code()) {
845    __ orr(temp1, temp3, Operand(temp1, LSL, 20));
846    __ vmov(double_scratch1, temp2, temp1);
847  } else {
848    __ orr(temp1, temp2, Operand(temp1, LSL, 20));
849    __ vmov(double_scratch1, temp3, temp1);
850  }
851  __ vmul(result, result, double_scratch1);
852  __ b(&done);
853
854  __ bind(&zero);
855  __ vmov(result, kDoubleRegZero);
856  __ b(&done);
857
858  __ bind(&infinity);
859  __ vldr(result, ExpConstant(2, temp3));
860
861  __ bind(&done);
862}
863
864#undef __
865
866#ifdef DEBUG
867// add(r0, pc, Operand(-8))
868static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
869#endif
870
871CodeAgingHelper::CodeAgingHelper() {
872  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
873  // Since patcher is a large object, allocate it dynamically when needed,
874  // to avoid overloading the stack in stress conditions.
875  // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
876  // the process, before ARM simulator ICache is setup.
877  SmartPointer<CodePatcher> patcher(
878      new CodePatcher(young_sequence_.start(),
879                      young_sequence_.length() / Assembler::kInstrSize,
880                      CodePatcher::DONT_FLUSH));
881  PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
882  patcher->masm()->PushFixedFrame(r1);
883  patcher->masm()->nop(ip.code());
884  patcher->masm()->add(
885      fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
886}
887
888
889#ifdef DEBUG
890bool CodeAgingHelper::IsOld(byte* candidate) const {
891  return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
892}
893#endif
894
895
896bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
897  bool result = isolate->code_aging_helper()->IsYoung(sequence);
898  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
899  return result;
900}
901
902
903void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
904                               MarkingParity* parity) {
905  if (IsYoungSequence(isolate, sequence)) {
906    *age = kNoAgeCodeAge;
907    *parity = NO_MARKING_PARITY;
908  } else {
909    Address target_address = Memory::Address_at(
910        sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
911    Code* stub = GetCodeFromTargetAddress(target_address);
912    GetCodeAgeAndParity(stub, age, parity);
913  }
914}
915
916
917void Code::PatchPlatformCodeAge(Isolate* isolate,
918                                byte* sequence,
919                                Code::Age age,
920                                MarkingParity parity) {
921  uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
922  if (age == kNoAgeCodeAge) {
923    isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
924    CpuFeatures::FlushICache(sequence, young_length);
925  } else {
926    Code* stub = GetCodeAgeStub(isolate, age, parity);
927    CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
928    patcher.masm()->add(r0, pc, Operand(-8));
929    patcher.masm()->ldr(pc, MemOperand(pc, -4));
930    patcher.masm()->emit_code_stub_address(stub);
931  }
932}
933
934
935} }  // namespace v8::internal
936
937#endif  // V8_TARGET_ARCH_ARM
938