1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/v8.h"
6
7#if V8_TARGET_ARCH_X87
8
9#include "src/codegen.h"
10#include "src/heap/heap.h"
11#include "src/macro-assembler.h"
12
13namespace v8 {
14namespace internal {
15
16
17// -------------------------------------------------------------------------
18// Platform-specific RuntimeCallHelper functions.
19
20void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
21  masm->EnterFrame(StackFrame::INTERNAL);
22  DCHECK(!masm->has_frame());
23  masm->set_has_frame(true);
24}
25
26
27void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
28  masm->LeaveFrame(StackFrame::INTERNAL);
29  DCHECK(masm->has_frame());
30  masm->set_has_frame(false);
31}
32
33
34#define __ masm.
35
36
37UnaryMathFunction CreateExpFunction() {
38  // No SSE2 support
39  return &std::exp;
40}
41
42
43UnaryMathFunction CreateSqrtFunction() {
44  // No SSE2 support
45  return &std::sqrt;
46}
47
48
49// Helper functions for CreateMemMoveFunction.
50#undef __
51#define __ ACCESS_MASM(masm)
52
53enum Direction { FORWARD, BACKWARD };
54enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
55
56
57void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
58  __ pop(esi);
59  __ pop(edi);
60  __ ret(0);
61}
62
63
64#undef __
65#define __ masm.
66
67
68class LabelConverter {
69 public:
70  explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
71  int32_t address(Label* l) const {
72    return reinterpret_cast<int32_t>(buffer_) + l->pos();
73  }
74 private:
75  byte* buffer_;
76};
77
78
79MemMoveFunction CreateMemMoveFunction() {
80  size_t actual_size;
81  // Allocate buffer in executable space.
82  byte* buffer =
83      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
84  if (buffer == NULL) return NULL;
85  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
86  LabelConverter conv(buffer);
87
88  // Generated code is put into a fixed, unmovable buffer, and not into
89  // the V8 heap. We can't, and don't, refer to any relocatable addresses
90  // (e.g. the JavaScript nan-object).
91
92  // 32-bit C declaration function calls pass arguments on stack.
93
94  // Stack layout:
95  // esp[12]: Third argument, size.
96  // esp[8]: Second argument, source pointer.
97  // esp[4]: First argument, destination pointer.
98  // esp[0]: return address
99
100  const int kDestinationOffset = 1 * kPointerSize;
101  const int kSourceOffset = 2 * kPointerSize;
102  const int kSizeOffset = 3 * kPointerSize;
103
104  int stack_offset = 0;  // Update if we change the stack height.
105
106  Label backward, backward_much_overlap;
107  Label forward_much_overlap, small_size, medium_size, pop_and_return;
108  __ push(edi);
109  __ push(esi);
110  stack_offset += 2 * kPointerSize;
111  Register dst = edi;
112  Register src = esi;
113  Register count = ecx;
114  __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
115  __ mov(src, Operand(esp, stack_offset + kSourceOffset));
116  __ mov(count, Operand(esp, stack_offset + kSizeOffset));
117
118  __ cmp(dst, src);
119  __ j(equal, &pop_and_return);
120
121  // No SSE2.
122  Label forward;
123  __ cmp(count, 0);
124  __ j(equal, &pop_and_return);
125  __ cmp(dst, src);
126  __ j(above, &backward);
127  __ jmp(&forward);
128  {
129    // Simple forward copier.
130    Label forward_loop_1byte, forward_loop_4byte;
131    __ bind(&forward_loop_4byte);
132    __ mov(eax, Operand(src, 0));
133    __ sub(count, Immediate(4));
134    __ add(src, Immediate(4));
135    __ mov(Operand(dst, 0), eax);
136    __ add(dst, Immediate(4));
137    __ bind(&forward);  // Entry point.
138    __ cmp(count, 3);
139    __ j(above, &forward_loop_4byte);
140    __ bind(&forward_loop_1byte);
141    __ cmp(count, 0);
142    __ j(below_equal, &pop_and_return);
143    __ mov_b(eax, Operand(src, 0));
144    __ dec(count);
145    __ inc(src);
146    __ mov_b(Operand(dst, 0), eax);
147    __ inc(dst);
148    __ jmp(&forward_loop_1byte);
149  }
150  {
151    // Simple backward copier.
152    Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
153    __ bind(&backward);
154    __ add(src, count);
155    __ add(dst, count);
156    __ cmp(count, 3);
157    __ j(below_equal, &entry_shortcut);
158
159    __ bind(&backward_loop_4byte);
160    __ sub(src, Immediate(4));
161    __ sub(count, Immediate(4));
162    __ mov(eax, Operand(src, 0));
163    __ sub(dst, Immediate(4));
164    __ mov(Operand(dst, 0), eax);
165    __ cmp(count, 3);
166    __ j(above, &backward_loop_4byte);
167    __ bind(&backward_loop_1byte);
168    __ cmp(count, 0);
169    __ j(below_equal, &pop_and_return);
170    __ bind(&entry_shortcut);
171    __ dec(src);
172    __ dec(count);
173    __ mov_b(eax, Operand(src, 0));
174    __ dec(dst);
175    __ mov_b(Operand(dst, 0), eax);
176    __ jmp(&backward_loop_1byte);
177  }
178
179  __ bind(&pop_and_return);
180  MemMoveEmitPopAndReturn(&masm);
181
182  CodeDesc desc;
183  masm.GetCode(&desc);
184  DCHECK(!RelocInfo::RequiresRelocation(desc));
185  CpuFeatures::FlushICache(buffer, actual_size);
186  base::OS::ProtectCode(buffer, actual_size);
187  // TODO(jkummerow): It would be nice to register this code creation event
188  // with the PROFILE / GDBJIT system.
189  return FUNCTION_CAST<MemMoveFunction>(buffer);
190}
191
192
193#undef __
194
195// -------------------------------------------------------------------------
196// Code generators
197
198#define __ ACCESS_MASM(masm)
199
200
201void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
202    MacroAssembler* masm,
203    Register receiver,
204    Register key,
205    Register value,
206    Register target_map,
207    AllocationSiteMode mode,
208    Label* allocation_memento_found) {
209  Register scratch = edi;
210  DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
211
212  if (mode == TRACK_ALLOCATION_SITE) {
213    DCHECK(allocation_memento_found != NULL);
214    __ JumpIfJSArrayHasAllocationMemento(
215        receiver, scratch, allocation_memento_found);
216  }
217
218  // Set transitioned map.
219  __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
220  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
221                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
222}
223
224
225void ElementsTransitionGenerator::GenerateSmiToDouble(
226    MacroAssembler* masm,
227    Register receiver,
228    Register key,
229    Register value,
230    Register target_map,
231    AllocationSiteMode mode,
232    Label* fail) {
233  // Return address is on the stack.
234  DCHECK(receiver.is(edx));
235  DCHECK(key.is(ecx));
236  DCHECK(value.is(eax));
237  DCHECK(target_map.is(ebx));
238
239  Label loop, entry, convert_hole, gc_required, only_change_map;
240
241  if (mode == TRACK_ALLOCATION_SITE) {
242    __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
243  }
244
245  // Check for empty arrays, which only require a map transition and no changes
246  // to the backing store.
247  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
248  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
249  __ j(equal, &only_change_map);
250
251  __ push(eax);
252  __ push(ebx);
253
254  __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
255
256  // Allocate new FixedDoubleArray.
257  // edx: receiver
258  // edi: length of source FixedArray (smi-tagged)
259  AllocationFlags flags =
260      static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
261  __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
262              REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
263
264  // eax: destination FixedDoubleArray
265  // edi: number of elements
266  // edx: receiver
267  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
268         Immediate(masm->isolate()->factory()->fixed_double_array_map()));
269  __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
270  __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
271  // Replace receiver's backing store with newly created FixedDoubleArray.
272  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
273  __ mov(ebx, eax);
274  __ RecordWriteField(edx, JSObject::kElementsOffset, ebx, edi, kDontSaveFPRegs,
275                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
276
277  __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
278
279  // Prepare for conversion loop.
280  ExternalReference canonical_the_hole_nan_reference =
281      ExternalReference::address_of_the_hole_nan();
282  __ jmp(&entry);
283
284  // Call into runtime if GC is required.
285  __ bind(&gc_required);
286  // Restore registers before jumping into runtime.
287  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
288  __ pop(ebx);
289  __ pop(eax);
290  __ jmp(fail);
291
292  // Convert and copy elements
293  // esi: source FixedArray
294  __ bind(&loop);
295  __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
296  // ebx: current element from source
297  // edi: index of current element
298  __ JumpIfNotSmi(ebx, &convert_hole);
299
300  // Normal smi, convert it to double and store.
301  __ SmiUntag(ebx);
302  __ push(ebx);
303  __ fild_s(Operand(esp, 0));
304  __ pop(ebx);
305  __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
306  __ jmp(&entry);
307
308  // Found hole, store hole_nan_as_double instead.
309  __ bind(&convert_hole);
310
311  if (FLAG_debug_code) {
312    __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
313    __ Assert(equal, kObjectFoundInSmiOnlyArray);
314  }
315
316  __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
317  __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
318
319  __ bind(&entry);
320  __ sub(edi, Immediate(Smi::FromInt(1)));
321  __ j(not_sign, &loop);
322
323  __ pop(ebx);
324  __ pop(eax);
325
326  // Restore esi.
327  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
328
329  __ bind(&only_change_map);
330  // eax: value
331  // ebx: target map
332  // Set transitioned map.
333  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
334  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
335                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
336}
337
338
339void ElementsTransitionGenerator::GenerateDoubleToObject(
340    MacroAssembler* masm,
341    Register receiver,
342    Register key,
343    Register value,
344    Register target_map,
345    AllocationSiteMode mode,
346    Label* fail) {
347  // Return address is on the stack.
348  DCHECK(receiver.is(edx));
349  DCHECK(key.is(ecx));
350  DCHECK(value.is(eax));
351  DCHECK(target_map.is(ebx));
352
353  Label loop, entry, convert_hole, gc_required, only_change_map, success;
354
355  if (mode == TRACK_ALLOCATION_SITE) {
356    __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
357  }
358
359  // Check for empty arrays, which only require a map transition and no changes
360  // to the backing store.
361  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
362  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
363  __ j(equal, &only_change_map);
364
365  __ push(eax);
366  __ push(edx);
367  __ push(ebx);
368
369  __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
370
371  // Allocate new FixedArray.
372  // ebx: length of source FixedDoubleArray (smi-tagged)
373  __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
374  __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
375
376  // eax: destination FixedArray
377  // ebx: number of elements
378  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
379         Immediate(masm->isolate()->factory()->fixed_array_map()));
380  __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
381  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
382
383  __ jmp(&entry);
384
385  // ebx: target map
386  // edx: receiver
387  // Set transitioned map.
388  __ bind(&only_change_map);
389  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
390  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
391                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
392  __ jmp(&success);
393
394  // Call into runtime if GC is required.
395  __ bind(&gc_required);
396  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
397  __ pop(ebx);
398  __ pop(edx);
399  __ pop(eax);
400  __ jmp(fail);
401
402  // Box doubles into heap numbers.
403  // edi: source FixedDoubleArray
404  // eax: destination FixedArray
405  __ bind(&loop);
406  // ebx: index of current element (smi-tagged)
407  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
408  __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
409  __ j(equal, &convert_hole);
410
411  // Non-hole double, copy value into a heap number.
412  __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
413  // edx: new heap number
414  __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
415  __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
416  __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
417  __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
418  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
419  __ mov(esi, ebx);
420  __ RecordWriteArray(eax, edx, esi, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
421                      OMIT_SMI_CHECK);
422  __ jmp(&entry, Label::kNear);
423
424  // Replace the-hole NaN with the-hole pointer.
425  __ bind(&convert_hole);
426  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
427         masm->isolate()->factory()->the_hole_value());
428
429  __ bind(&entry);
430  __ sub(ebx, Immediate(Smi::FromInt(1)));
431  __ j(not_sign, &loop);
432
433  __ pop(ebx);
434  __ pop(edx);
435  // ebx: target map
436  // edx: receiver
437  // Set transitioned map.
438  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
439  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
440                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
441  // Replace receiver's backing store with newly created and filled FixedArray.
442  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
443  __ RecordWriteField(edx, JSObject::kElementsOffset, eax, edi, kDontSaveFPRegs,
444                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
445
446  // Restore registers.
447  __ pop(eax);
448  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
449
450  __ bind(&success);
451}
452
453
454void StringCharLoadGenerator::Generate(MacroAssembler* masm,
455                                       Factory* factory,
456                                       Register string,
457                                       Register index,
458                                       Register result,
459                                       Label* call_runtime) {
460  // Fetch the instance type of the receiver into result register.
461  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
462  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
463
464  // We need special handling for indirect strings.
465  Label check_sequential;
466  __ test(result, Immediate(kIsIndirectStringMask));
467  __ j(zero, &check_sequential, Label::kNear);
468
469  // Dispatch on the indirect string shape: slice or cons.
470  Label cons_string;
471  __ test(result, Immediate(kSlicedNotConsMask));
472  __ j(zero, &cons_string, Label::kNear);
473
474  // Handle slices.
475  Label indirect_string_loaded;
476  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
477  __ SmiUntag(result);
478  __ add(index, result);
479  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
480  __ jmp(&indirect_string_loaded, Label::kNear);
481
482  // Handle cons strings.
483  // Check whether the right hand side is the empty string (i.e. if
484  // this is really a flat string in a cons string). If that is not
485  // the case we would rather go to the runtime system now to flatten
486  // the string.
487  __ bind(&cons_string);
488  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
489         Immediate(factory->empty_string()));
490  __ j(not_equal, call_runtime);
491  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
492
493  __ bind(&indirect_string_loaded);
494  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
495  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
496
497  // Distinguish sequential and external strings. Only these two string
498  // representations can reach here (slices and flat cons strings have been
499  // reduced to the underlying sequential or external string).
500  Label seq_string;
501  __ bind(&check_sequential);
502  STATIC_ASSERT(kSeqStringTag == 0);
503  __ test(result, Immediate(kStringRepresentationMask));
504  __ j(zero, &seq_string, Label::kNear);
505
506  // Handle external strings.
507  Label one_byte_external, done;
508  if (FLAG_debug_code) {
509    // Assert that we do not have a cons or slice (indirect strings) here.
510    // Sequential strings have already been ruled out.
511    __ test(result, Immediate(kIsIndirectStringMask));
512    __ Assert(zero, kExternalStringExpectedButNotFound);
513  }
514  // Rule out short external strings.
515  STATIC_ASSERT(kShortExternalStringTag != 0);
516  __ test_b(result, kShortExternalStringMask);
517  __ j(not_zero, call_runtime);
518  // Check encoding.
519  STATIC_ASSERT(kTwoByteStringTag == 0);
520  __ test_b(result, kStringEncodingMask);
521  __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
522  __ j(not_equal, &one_byte_external, Label::kNear);
523  // Two-byte string.
524  __ movzx_w(result, Operand(result, index, times_2, 0));
525  __ jmp(&done, Label::kNear);
526  __ bind(&one_byte_external);
527  // One-byte string.
528  __ movzx_b(result, Operand(result, index, times_1, 0));
529  __ jmp(&done, Label::kNear);
530
531  // Dispatch on the encoding: one-byte or two-byte.
532  Label one_byte;
533  __ bind(&seq_string);
534  STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
535  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
536  __ test(result, Immediate(kStringEncodingMask));
537  __ j(not_zero, &one_byte, Label::kNear);
538
539  // Two-byte string.
540  // Load the two-byte character code into the result register.
541  __ movzx_w(result, FieldOperand(string,
542                                  index,
543                                  times_2,
544                                  SeqTwoByteString::kHeaderSize));
545  __ jmp(&done, Label::kNear);
546
547  // One-byte string.
548  // Load the byte into the result register.
549  __ bind(&one_byte);
550  __ movzx_b(result, FieldOperand(string,
551                                  index,
552                                  times_1,
553                                  SeqOneByteString::kHeaderSize));
554  __ bind(&done);
555}
556
557
558#undef __
559
560
561CodeAgingHelper::CodeAgingHelper() {
562  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
563  CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
564  patcher.masm()->push(ebp);
565  patcher.masm()->mov(ebp, esp);
566  patcher.masm()->push(esi);
567  patcher.masm()->push(edi);
568}
569
570
571#ifdef DEBUG
572bool CodeAgingHelper::IsOld(byte* candidate) const {
573  return *candidate == kCallOpcode;
574}
575#endif
576
577
578bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
579  bool result = isolate->code_aging_helper()->IsYoung(sequence);
580  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
581  return result;
582}
583
584
585void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
586                               MarkingParity* parity) {
587  if (IsYoungSequence(isolate, sequence)) {
588    *age = kNoAgeCodeAge;
589    *parity = NO_MARKING_PARITY;
590  } else {
591    sequence++;  // Skip the kCallOpcode byte
592    Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
593        Assembler::kCallTargetAddressOffset;
594    Code* stub = GetCodeFromTargetAddress(target_address);
595    GetCodeAgeAndParity(stub, age, parity);
596  }
597}
598
599
600void Code::PatchPlatformCodeAge(Isolate* isolate,
601                                byte* sequence,
602                                Code::Age age,
603                                MarkingParity parity) {
604  uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
605  if (age == kNoAgeCodeAge) {
606    isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
607    CpuFeatures::FlushICache(sequence, young_length);
608  } else {
609    Code* stub = GetCodeAgeStub(isolate, age, parity);
610    CodePatcher patcher(sequence, young_length);
611    patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
612  }
613}
614
615
616} }  // namespace v8::internal
617
618#endif  // V8_TARGET_ARCH_X87
619