1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if V8_TARGET_ARCH_MIPS
31
32#include "codegen.h"
33#include "macro-assembler.h"
34#include "simulator-mips.h"
35
36namespace v8 {
37namespace internal {
38
39
40UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
41  switch (type) {
42    case TranscendentalCache::SIN: return &sin;
43    case TranscendentalCache::COS: return &cos;
44    case TranscendentalCache::TAN: return &tan;
45    case TranscendentalCache::LOG: return &log;
46    default: UNIMPLEMENTED();
47  }
48  return NULL;
49}
50
51
52#define __ masm.
53
54
55#if defined(USE_SIMULATOR)
56byte* fast_exp_mips_machine_code = NULL;
57double fast_exp_simulator(double x) {
58  return Simulator::current(Isolate::Current())->CallFP(
59      fast_exp_mips_machine_code, x, 0);
60}
61#endif
62
63
64UnaryMathFunction CreateExpFunction() {
65  if (!FLAG_fast_math) return &exp;
66  size_t actual_size;
67  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
68  if (buffer == NULL) return &exp;
69  ExternalReference::InitializeMathExpData();
70
71  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
72
73  {
74    DoubleRegister input = f12;
75    DoubleRegister result = f0;
76    DoubleRegister double_scratch1 = f4;
77    DoubleRegister double_scratch2 = f6;
78    Register temp1 = t0;
79    Register temp2 = t1;
80    Register temp3 = t2;
81
82    if (!IsMipsSoftFloatABI) {
83      // Input value is in f12 anyway, nothing to do.
84    } else {
85      __ Move(input, a0, a1);
86    }
87    __ Push(temp3, temp2, temp1);
88    MathExpGenerator::EmitMathExp(
89        &masm, input, result, double_scratch1, double_scratch2,
90        temp1, temp2, temp3);
91    __ Pop(temp3, temp2, temp1);
92    if (!IsMipsSoftFloatABI) {
93      // Result is already in f0, nothing to do.
94    } else {
95      __ Move(v0, v1, result);
96    }
97    __ Ret();
98  }
99
100  CodeDesc desc;
101  masm.GetCode(&desc);
102  ASSERT(!RelocInfo::RequiresRelocation(desc));
103
104  CPU::FlushICache(buffer, actual_size);
105  OS::ProtectCode(buffer, actual_size);
106
107#if !defined(USE_SIMULATOR)
108  return FUNCTION_CAST<UnaryMathFunction>(buffer);
109#else
110  fast_exp_mips_machine_code = buffer;
111  return &fast_exp_simulator;
112#endif
113}
114
115
116#undef __
117
118
119UnaryMathFunction CreateSqrtFunction() {
120  return &sqrt;
121}
122
123
124// -------------------------------------------------------------------------
125// Platform-specific RuntimeCallHelper functions.
126
127void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
128  masm->EnterFrame(StackFrame::INTERNAL);
129  ASSERT(!masm->has_frame());
130  masm->set_has_frame(true);
131}
132
133
134void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
135  masm->LeaveFrame(StackFrame::INTERNAL);
136  ASSERT(masm->has_frame());
137  masm->set_has_frame(false);
138}
139
140
141// -------------------------------------------------------------------------
142// Code generators
143
144#define __ ACCESS_MASM(masm)
145
146void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
147    MacroAssembler* masm, AllocationSiteMode mode,
148    Label* allocation_memento_found) {
149  // ----------- S t a t e -------------
150  //  -- a0    : value
151  //  -- a1    : key
152  //  -- a2    : receiver
153  //  -- ra    : return address
154  //  -- a3    : target map, scratch for subsequent call
155  //  -- t0    : scratch (elements)
156  // -----------------------------------
157  if (mode == TRACK_ALLOCATION_SITE) {
158    ASSERT(allocation_memento_found != NULL);
159    __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found);
160  }
161
162  // Set transitioned map.
163  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
164  __ RecordWriteField(a2,
165                      HeapObject::kMapOffset,
166                      a3,
167                      t5,
168                      kRAHasNotBeenSaved,
169                      kDontSaveFPRegs,
170                      EMIT_REMEMBERED_SET,
171                      OMIT_SMI_CHECK);
172}
173
174
175void ElementsTransitionGenerator::GenerateSmiToDouble(
176    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
177  // ----------- S t a t e -------------
178  //  -- a0    : value
179  //  -- a1    : key
180  //  -- a2    : receiver
181  //  -- ra    : return address
182  //  -- a3    : target map, scratch for subsequent call
183  //  -- t0    : scratch (elements)
184  // -----------------------------------
185  Label loop, entry, convert_hole, gc_required, only_change_map, done;
186
187  Register scratch = t6;
188
189  if (mode == TRACK_ALLOCATION_SITE) {
190    __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
191  }
192
193  // Check for empty arrays, which only require a map transition and no changes
194  // to the backing store.
195  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
196  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
197  __ Branch(&only_change_map, eq, at, Operand(t0));
198
199  __ push(ra);
200  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
201  // t0: source FixedArray
202  // t1: number of elements (smi-tagged)
203
204  // Allocate new FixedDoubleArray.
205  __ sll(scratch, t1, 2);
206  __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
207  __ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT);
208  // t2: destination FixedDoubleArray, not tagged as heap object
209
210  // Set destination FixedDoubleArray's length and map.
211  __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
212  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
213  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
214  // Update receiver's map.
215
216  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
217  __ RecordWriteField(a2,
218                      HeapObject::kMapOffset,
219                      a3,
220                      t5,
221                      kRAHasBeenSaved,
222                      kDontSaveFPRegs,
223                      OMIT_REMEMBERED_SET,
224                      OMIT_SMI_CHECK);
225  // Replace receiver's backing store with newly created FixedDoubleArray.
226  __ Addu(a3, t2, Operand(kHeapObjectTag));
227  __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
228  __ RecordWriteField(a2,
229                      JSObject::kElementsOffset,
230                      a3,
231                      t5,
232                      kRAHasBeenSaved,
233                      kDontSaveFPRegs,
234                      EMIT_REMEMBERED_SET,
235                      OMIT_SMI_CHECK);
236
237
238  // Prepare for conversion loop.
239  __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
240  __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
241  __ sll(t2, t1, 2);
242  __ Addu(t2, t2, t3);
243  __ li(t0, Operand(kHoleNanLower32));
244  __ li(t1, Operand(kHoleNanUpper32));
245  // t0: kHoleNanLower32
246  // t1: kHoleNanUpper32
247  // t2: end of destination FixedDoubleArray, not tagged
248  // t3: begin of FixedDoubleArray element fields, not tagged
249
250  __ Branch(&entry);
251
252  __ bind(&only_change_map);
253  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
254  __ RecordWriteField(a2,
255                      HeapObject::kMapOffset,
256                      a3,
257                      t5,
258                      kRAHasNotBeenSaved,
259                      kDontSaveFPRegs,
260                      OMIT_REMEMBERED_SET,
261                      OMIT_SMI_CHECK);
262  __ Branch(&done);
263
264  // Call into runtime if GC is required.
265  __ bind(&gc_required);
266  __ pop(ra);
267  __ Branch(fail);
268
269  // Convert and copy elements.
270  __ bind(&loop);
271  __ lw(t5, MemOperand(a3));
272  __ Addu(a3, a3, kIntSize);
273  // t5: current element
274  __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
275
276  // Normal smi, convert to double and store.
277  __ mtc1(t5, f0);
278  __ cvt_d_w(f0, f0);
279  __ sdc1(f0, MemOperand(t3));
280  __ Addu(t3, t3, kDoubleSize);
281
282  __ Branch(&entry);
283
284  // Hole found, store the-hole NaN.
285  __ bind(&convert_hole);
286  if (FLAG_debug_code) {
287    // Restore a "smi-untagged" heap object.
288    __ SmiTag(t5);
289    __ Or(t5, t5, Operand(1));
290    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
291    __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
292  }
293  __ sw(t0, MemOperand(t3));  // mantissa
294  __ sw(t1, MemOperand(t3, kIntSize));  // exponent
295  __ Addu(t3, t3, kDoubleSize);
296
297  __ bind(&entry);
298  __ Branch(&loop, lt, t3, Operand(t2));
299
300  __ pop(ra);
301  __ bind(&done);
302}
303
304
305void ElementsTransitionGenerator::GenerateDoubleToObject(
306    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
307  // ----------- S t a t e -------------
308  //  -- a0    : value
309  //  -- a1    : key
310  //  -- a2    : receiver
311  //  -- ra    : return address
312  //  -- a3    : target map, scratch for subsequent call
313  //  -- t0    : scratch (elements)
314  // -----------------------------------
315  Label entry, loop, convert_hole, gc_required, only_change_map;
316
317  if (mode == TRACK_ALLOCATION_SITE) {
318    __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
319  }
320
321  // Check for empty arrays, which only require a map transition and no changes
322  // to the backing store.
323  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
324  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
325  __ Branch(&only_change_map, eq, at, Operand(t0));
326
327  __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
328
329  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
330  // t0: source FixedArray
331  // t1: number of elements (smi-tagged)
332
333  // Allocate new FixedArray.
334  __ sll(a0, t1, 1);
335  __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
336  __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
337  // t2: destination FixedArray, not tagged as heap object
338  // Set destination FixedDoubleArray's length and map.
339  __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
340  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
341  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
342
343  // Prepare for conversion loop.
344  __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
345  __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
346  __ Addu(t2, t2, Operand(kHeapObjectTag));
347  __ sll(t1, t1, 1);
348  __ Addu(t1, a3, t1);
349  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
350  __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
351  // Using offsetted addresses.
352  // a3: begin of destination FixedArray element fields, not tagged
353  // t0: begin of source FixedDoubleArray element fields, not tagged, +4
354  // t1: end of destination FixedArray, not tagged
355  // t2: destination FixedArray
356  // t3: the-hole pointer
357  // t5: heap number map
358  __ Branch(&entry);
359
360  // Call into runtime if GC is required.
361  __ bind(&gc_required);
362  __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
363
364  __ Branch(fail);
365
366  __ bind(&loop);
367  __ lw(a1, MemOperand(t0));
368  __ Addu(t0, t0, kDoubleSize);
369  // a1: current element's upper 32 bit
370  // t0: address of next element's upper 32 bit
371  __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
372
373  // Non-hole double, copy value into a heap number.
374  __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
375  // a2: new heap number
376  __ lw(a0, MemOperand(t0, -12));
377  __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
378  __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
379  __ mov(a0, a3);
380  __ sw(a2, MemOperand(a3));
381  __ Addu(a3, a3, kIntSize);
382  __ RecordWrite(t2,
383                 a0,
384                 a2,
385                 kRAHasBeenSaved,
386                 kDontSaveFPRegs,
387                 EMIT_REMEMBERED_SET,
388                 OMIT_SMI_CHECK);
389  __ Branch(&entry);
390
391  // Replace the-hole NaN with the-hole pointer.
392  __ bind(&convert_hole);
393  __ sw(t3, MemOperand(a3));
394  __ Addu(a3, a3, kIntSize);
395
396  __ bind(&entry);
397  __ Branch(&loop, lt, a3, Operand(t1));
398
399  __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
400  // Replace receiver's backing store with newly created and filled FixedArray.
401  __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
402  __ RecordWriteField(a2,
403                      JSObject::kElementsOffset,
404                      t2,
405                      t5,
406                      kRAHasBeenSaved,
407                      kDontSaveFPRegs,
408                      EMIT_REMEMBERED_SET,
409                      OMIT_SMI_CHECK);
410  __ pop(ra);
411
412  __ bind(&only_change_map);
413  // Update receiver's map.
414  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
415  __ RecordWriteField(a2,
416                      HeapObject::kMapOffset,
417                      a3,
418                      t5,
419                      kRAHasNotBeenSaved,
420                      kDontSaveFPRegs,
421                      OMIT_REMEMBERED_SET,
422                      OMIT_SMI_CHECK);
423}
424
425
426void StringCharLoadGenerator::Generate(MacroAssembler* masm,
427                                       Register string,
428                                       Register index,
429                                       Register result,
430                                       Label* call_runtime) {
431  // Fetch the instance type of the receiver into result register.
432  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
433  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
434
435  // We need special handling for indirect strings.
436  Label check_sequential;
437  __ And(at, result, Operand(kIsIndirectStringMask));
438  __ Branch(&check_sequential, eq, at, Operand(zero_reg));
439
440  // Dispatch on the indirect string shape: slice or cons.
441  Label cons_string;
442  __ And(at, result, Operand(kSlicedNotConsMask));
443  __ Branch(&cons_string, eq, at, Operand(zero_reg));
444
445  // Handle slices.
446  Label indirect_string_loaded;
447  __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
448  __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
449  __ sra(at, result, kSmiTagSize);
450  __ Addu(index, index, at);
451  __ jmp(&indirect_string_loaded);
452
453  // Handle cons strings.
454  // Check whether the right hand side is the empty string (i.e. if
455  // this is really a flat string in a cons string). If that is not
456  // the case we would rather go to the runtime system now to flatten
457  // the string.
458  __ bind(&cons_string);
459  __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
460  __ LoadRoot(at, Heap::kempty_stringRootIndex);
461  __ Branch(call_runtime, ne, result, Operand(at));
462  // Get the first of the two strings and load its instance type.
463  __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
464
465  __ bind(&indirect_string_loaded);
466  __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
467  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
468
469  // Distinguish sequential and external strings. Only these two string
470  // representations can reach here (slices and flat cons strings have been
471  // reduced to the underlying sequential or external string).
472  Label external_string, check_encoding;
473  __ bind(&check_sequential);
474  STATIC_ASSERT(kSeqStringTag == 0);
475  __ And(at, result, Operand(kStringRepresentationMask));
476  __ Branch(&external_string, ne, at, Operand(zero_reg));
477
478  // Prepare sequential strings
479  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
480  __ Addu(string,
481          string,
482          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
483  __ jmp(&check_encoding);
484
485  // Handle external strings.
486  __ bind(&external_string);
487  if (FLAG_debug_code) {
488    // Assert that we do not have a cons or slice (indirect strings) here.
489    // Sequential strings have already been ruled out.
490    __ And(at, result, Operand(kIsIndirectStringMask));
491    __ Assert(eq, kExternalStringExpectedButNotFound,
492        at, Operand(zero_reg));
493  }
494  // Rule out short external strings.
495  STATIC_CHECK(kShortExternalStringTag != 0);
496  __ And(at, result, Operand(kShortExternalStringMask));
497  __ Branch(call_runtime, ne, at, Operand(zero_reg));
498  __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
499
500  Label ascii, done;
501  __ bind(&check_encoding);
502  STATIC_ASSERT(kTwoByteStringTag == 0);
503  __ And(at, result, Operand(kStringEncodingMask));
504  __ Branch(&ascii, ne, at, Operand(zero_reg));
505  // Two-byte string.
506  __ sll(at, index, 1);
507  __ Addu(at, string, at);
508  __ lhu(result, MemOperand(at));
509  __ jmp(&done);
510  __ bind(&ascii);
511  // Ascii string.
512  __ Addu(at, string, index);
513  __ lbu(result, MemOperand(at));
514  __ bind(&done);
515}
516
517
518static MemOperand ExpConstant(int index, Register base) {
519  return MemOperand(base, index * kDoubleSize);
520}
521
522
523void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
524                                   DoubleRegister input,
525                                   DoubleRegister result,
526                                   DoubleRegister double_scratch1,
527                                   DoubleRegister double_scratch2,
528                                   Register temp1,
529                                   Register temp2,
530                                   Register temp3) {
531  ASSERT(!input.is(result));
532  ASSERT(!input.is(double_scratch1));
533  ASSERT(!input.is(double_scratch2));
534  ASSERT(!result.is(double_scratch1));
535  ASSERT(!result.is(double_scratch2));
536  ASSERT(!double_scratch1.is(double_scratch2));
537  ASSERT(!temp1.is(temp2));
538  ASSERT(!temp1.is(temp3));
539  ASSERT(!temp2.is(temp3));
540  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
541
542  Label zero, infinity, done;
543
544  __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
545
546  __ ldc1(double_scratch1, ExpConstant(0, temp3));
547  __ BranchF(&zero, NULL, ge, double_scratch1, input);
548
549  __ ldc1(double_scratch2, ExpConstant(1, temp3));
550  __ BranchF(&infinity, NULL, ge, input, double_scratch2);
551
552  __ ldc1(double_scratch1, ExpConstant(3, temp3));
553  __ ldc1(result, ExpConstant(4, temp3));
554  __ mul_d(double_scratch1, double_scratch1, input);
555  __ add_d(double_scratch1, double_scratch1, result);
556  __ FmoveLow(temp2, double_scratch1);
557  __ sub_d(double_scratch1, double_scratch1, result);
558  __ ldc1(result, ExpConstant(6, temp3));
559  __ ldc1(double_scratch2, ExpConstant(5, temp3));
560  __ mul_d(double_scratch1, double_scratch1, double_scratch2);
561  __ sub_d(double_scratch1, double_scratch1, input);
562  __ sub_d(result, result, double_scratch1);
563  __ mul_d(double_scratch2, double_scratch1, double_scratch1);
564  __ mul_d(result, result, double_scratch2);
565  __ ldc1(double_scratch2, ExpConstant(7, temp3));
566  __ mul_d(result, result, double_scratch2);
567  __ sub_d(result, result, double_scratch1);
568  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
569  ASSERT(*reinterpret_cast<double*>
570         (ExternalReference::math_exp_constants(8).address()) == 1);
571  __ Move(double_scratch2, 1);
572  __ add_d(result, result, double_scratch2);
573  __ srl(temp1, temp2, 11);
574  __ Ext(temp2, temp2, 0, 11);
575  __ Addu(temp1, temp1, Operand(0x3ff));
576
577  // Must not call ExpConstant() after overwriting temp3!
578  __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
579  __ sll(at, temp2, 3);
580  __ Addu(temp3, temp3, Operand(at));
581  __ lw(temp2, MemOperand(temp3, 0));
582  __ lw(temp3, MemOperand(temp3, kPointerSize));
583  // The first word is loaded is the lower number register.
584  if (temp2.code() < temp3.code()) {
585    __ sll(at, temp1, 20);
586    __ Or(temp1, temp3, at);
587    __ Move(double_scratch1, temp2, temp1);
588  } else {
589    __ sll(at, temp1, 20);
590    __ Or(temp1, temp2, at);
591    __ Move(double_scratch1, temp3, temp1);
592  }
593  __ mul_d(result, result, double_scratch1);
594  __ Branch(&done);
595
596  __ bind(&zero);
597  __ Move(result, kDoubleRegZero);
598  __ Branch(&done);
599
600  __ bind(&infinity);
601  __ ldc1(result, ExpConstant(2, temp3));
602
603  __ bind(&done);
604}
605
606
607// nop(CODE_AGE_MARKER_NOP)
608static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
609
610static byte* GetNoCodeAgeSequence(uint32_t* length) {
611  // The sequence of instructions that is patched out for aging code is the
612  // following boilerplate stack-building prologue that is found in FUNCTIONS
613  static bool initialized = false;
614  static uint32_t sequence[kNoCodeAgeSequenceLength];
615  byte* byte_sequence = reinterpret_cast<byte*>(sequence);
616  *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
617  if (!initialized) {
618    CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
619    patcher.masm()->Push(ra, fp, cp, a1);
620    patcher.masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
621    patcher.masm()->Addu(fp, sp,
622        Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
623    initialized = true;
624  }
625  return byte_sequence;
626}
627
628
629bool Code::IsYoungSequence(byte* sequence) {
630  uint32_t young_length;
631  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
632  bool result = !memcmp(sequence, young_sequence, young_length);
633  ASSERT(result ||
634         Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
635  return result;
636}
637
638
639void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
640                               MarkingParity* parity) {
641  if (IsYoungSequence(sequence)) {
642    *age = kNoAgeCodeAge;
643    *parity = NO_MARKING_PARITY;
644  } else {
645    Address target_address = Assembler::target_address_at(
646        sequence + Assembler::kInstrSize);
647    Code* stub = GetCodeFromTargetAddress(target_address);
648    GetCodeAgeAndParity(stub, age, parity);
649  }
650}
651
652
653void Code::PatchPlatformCodeAge(Isolate* isolate,
654                                byte* sequence,
655                                Code::Age age,
656                                MarkingParity parity) {
657  uint32_t young_length;
658  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
659  if (age == kNoAgeCodeAge) {
660    CopyBytes(sequence, young_sequence, young_length);
661    CPU::FlushICache(sequence, young_length);
662  } else {
663    Code* stub = GetCodeAgeStub(isolate, age, parity);
664    CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
665    // Mark this code sequence for FindPlatformCodeAgeSequence().
666    patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
667    // Load the stub address to t9 and call it,
668    // GetCodeAgeAndParity() extracts the stub address from this instruction.
669    patcher.masm()->li(
670        t9,
671        Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
672        CONSTANT_SIZE);
673    patcher.masm()->nop();  // Prevent jalr to jal optimization.
674    patcher.masm()->jalr(t9, a0);
675    patcher.masm()->nop();  // Branch delay slot nop.
676    patcher.masm()->nop();  // Pad the empty space.
677  }
678}
679
680
681#undef __
682
683} }  // namespace v8::internal
684
685#endif  // V8_TARGET_ARCH_MIPS
686