1// Copyright 2012 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6//     * Redistributions of source code must retain the above copyright
7//       notice, this list of conditions and the following disclaimer.
8//     * Redistributions in binary form must reproduce the above
9//       copyright notice, this list of conditions and the following
10//       disclaimer in the documentation and/or other materials provided
11//       with the distribution.
12//     * Neither the name of Google Inc. nor the names of its
13//       contributors may be used to endorse or promote products derived
14//       from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if V8_TARGET_ARCH_IA32
31
32#include "codegen.h"
33#include "heap.h"
34#include "macro-assembler.h"
35
36namespace v8 {
37namespace internal {
38
39
40// -------------------------------------------------------------------------
41// Platform-specific RuntimeCallHelper functions.
42
43void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
44  masm->EnterFrame(StackFrame::INTERNAL);
45  ASSERT(!masm->has_frame());
46  masm->set_has_frame(true);
47}
48
49
50void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
51  masm->LeaveFrame(StackFrame::INTERNAL);
52  ASSERT(masm->has_frame());
53  masm->set_has_frame(false);
54}
55
56
57#define __ masm.
58
59
60UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
61  size_t actual_size;
62  // Allocate buffer in executable space.
63  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
64                                                 &actual_size,
65                                                 true));
66  if (buffer == NULL) {
67    // Fallback to library function if function cannot be created.
68    switch (type) {
69      case TranscendentalCache::SIN: return &sin;
70      case TranscendentalCache::COS: return &cos;
71      case TranscendentalCache::TAN: return &tan;
72      case TranscendentalCache::LOG: return &log;
73      default: UNIMPLEMENTED();
74    }
75  }
76
77  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
78  // esp[1 * kPointerSize]: raw double input
79  // esp[0 * kPointerSize]: return address
80  // Move double input into registers.
81
82  __ push(ebx);
83  __ push(edx);
84  __ push(edi);
85  __ fld_d(Operand(esp, 4 * kPointerSize));
86  __ mov(ebx, Operand(esp, 4 * kPointerSize));
87  __ mov(edx, Operand(esp, 5 * kPointerSize));
88  TranscendentalCacheStub::GenerateOperation(&masm, type);
89  // The return value is expected to be on ST(0) of the FPU stack.
90  __ pop(edi);
91  __ pop(edx);
92  __ pop(ebx);
93  __ Ret();
94
95  CodeDesc desc;
96  masm.GetCode(&desc);
97  ASSERT(!RelocInfo::RequiresRelocation(desc));
98
99  CPU::FlushICache(buffer, actual_size);
100  OS::ProtectCode(buffer, actual_size);
101  return FUNCTION_CAST<UnaryMathFunction>(buffer);
102}
103
104
105UnaryMathFunction CreateExpFunction() {
106  if (!CpuFeatures::IsSupported(SSE2)) return &exp;
107  if (!FLAG_fast_math) return &exp;
108  size_t actual_size;
109  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
110  if (buffer == NULL) return &exp;
111  ExternalReference::InitializeMathExpData();
112
113  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
114  // esp[1 * kPointerSize]: raw double input
115  // esp[0 * kPointerSize]: return address
116  {
117    CpuFeatureScope use_sse2(&masm, SSE2);
118    XMMRegister input = xmm1;
119    XMMRegister result = xmm2;
120    __ movsd(input, Operand(esp, 1 * kPointerSize));
121    __ push(eax);
122    __ push(ebx);
123
124    MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
125
126    __ pop(ebx);
127    __ pop(eax);
128    __ movsd(Operand(esp, 1 * kPointerSize), result);
129    __ fld_d(Operand(esp, 1 * kPointerSize));
130    __ Ret();
131  }
132
133  CodeDesc desc;
134  masm.GetCode(&desc);
135  ASSERT(!RelocInfo::RequiresRelocation(desc));
136
137  CPU::FlushICache(buffer, actual_size);
138  OS::ProtectCode(buffer, actual_size);
139  return FUNCTION_CAST<UnaryMathFunction>(buffer);
140}
141
142
143UnaryMathFunction CreateSqrtFunction() {
144  size_t actual_size;
145  // Allocate buffer in executable space.
146  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
147                                                 &actual_size,
148                                                 true));
149  // If SSE2 is not available, we can use libc's implementation to ensure
150  // consistency since code by fullcodegen's calls into runtime in that case.
151  if (buffer == NULL || !CpuFeatures::IsSupported(SSE2)) return &sqrt;
152  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
153  // esp[1 * kPointerSize]: raw double input
154  // esp[0 * kPointerSize]: return address
155  // Move double input into registers.
156  {
157    CpuFeatureScope use_sse2(&masm, SSE2);
158    __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
159    __ sqrtsd(xmm0, xmm0);
160    __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
161    // Load result into floating point register as return value.
162    __ fld_d(Operand(esp, 1 * kPointerSize));
163    __ Ret();
164  }
165
166  CodeDesc desc;
167  masm.GetCode(&desc);
168  ASSERT(!RelocInfo::RequiresRelocation(desc));
169
170  CPU::FlushICache(buffer, actual_size);
171  OS::ProtectCode(buffer, actual_size);
172  return FUNCTION_CAST<UnaryMathFunction>(buffer);
173}
174
175
176// Helper functions for CreateMemMoveFunction.
177#undef __
178#define __ ACCESS_MASM(masm)
179
180enum Direction { FORWARD, BACKWARD };
181enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
182
183// Expects registers:
184// esi - source, aligned if alignment == ALIGNED
185// edi - destination, always aligned
186// ecx - count (copy size in bytes)
187// edx - loop count (number of 64 byte chunks)
188void MemMoveEmitMainLoop(MacroAssembler* masm,
189                         Label* move_last_15,
190                         Direction direction,
191                         Alignment alignment) {
192  Register src = esi;
193  Register dst = edi;
194  Register count = ecx;
195  Register loop_count = edx;
196  Label loop, move_last_31, move_last_63;
197  __ cmp(loop_count, 0);
198  __ j(equal, &move_last_63);
199  __ bind(&loop);
200  // Main loop. Copy in 64 byte chunks.
201  if (direction == BACKWARD) __ sub(src, Immediate(0x40));
202  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
203  __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
204  __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
205  __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
206  if (direction == FORWARD) __ add(src, Immediate(0x40));
207  if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
208  __ movdqa(Operand(dst, 0x00), xmm0);
209  __ movdqa(Operand(dst, 0x10), xmm1);
210  __ movdqa(Operand(dst, 0x20), xmm2);
211  __ movdqa(Operand(dst, 0x30), xmm3);
212  if (direction == FORWARD) __ add(dst, Immediate(0x40));
213  __ dec(loop_count);
214  __ j(not_zero, &loop);
215  // At most 63 bytes left to copy.
216  __ bind(&move_last_63);
217  __ test(count, Immediate(0x20));
218  __ j(zero, &move_last_31);
219  if (direction == BACKWARD) __ sub(src, Immediate(0x20));
220  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
221  __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
222  if (direction == FORWARD) __ add(src, Immediate(0x20));
223  if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
224  __ movdqa(Operand(dst, 0x00), xmm0);
225  __ movdqa(Operand(dst, 0x10), xmm1);
226  if (direction == FORWARD) __ add(dst, Immediate(0x20));
227  // At most 31 bytes left to copy.
228  __ bind(&move_last_31);
229  __ test(count, Immediate(0x10));
230  __ j(zero, move_last_15);
231  if (direction == BACKWARD) __ sub(src, Immediate(0x10));
232  __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
233  if (direction == FORWARD) __ add(src, Immediate(0x10));
234  if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
235  __ movdqa(Operand(dst, 0), xmm0);
236  if (direction == FORWARD) __ add(dst, Immediate(0x10));
237}
238
239
240void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
241  __ pop(esi);
242  __ pop(edi);
243  __ ret(0);
244}
245
246
247#undef __
248#define __ masm.
249
250
251class LabelConverter {
252 public:
253  explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
254  int32_t address(Label* l) const {
255    return reinterpret_cast<int32_t>(buffer_) + l->pos();
256  }
257 private:
258  byte* buffer_;
259};
260
261
262OS::MemMoveFunction CreateMemMoveFunction() {
263  size_t actual_size;
264  // Allocate buffer in executable space.
265  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
266  if (buffer == NULL) return NULL;
267  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
268  LabelConverter conv(buffer);
269
270  // Generated code is put into a fixed, unmovable buffer, and not into
271  // the V8 heap. We can't, and don't, refer to any relocatable addresses
272  // (e.g. the JavaScript nan-object).
273
274  // 32-bit C declaration function calls pass arguments on stack.
275
276  // Stack layout:
277  // esp[12]: Third argument, size.
278  // esp[8]: Second argument, source pointer.
279  // esp[4]: First argument, destination pointer.
280  // esp[0]: return address
281
282  const int kDestinationOffset = 1 * kPointerSize;
283  const int kSourceOffset = 2 * kPointerSize;
284  const int kSizeOffset = 3 * kPointerSize;
285
286  // When copying up to this many bytes, use special "small" handlers.
287  const size_t kSmallCopySize = 8;
288  // When copying up to this many bytes, use special "medium" handlers.
289  const size_t kMediumCopySize = 63;
290  // When non-overlapping region of src and dst is less than this,
291  // use a more careful implementation (slightly slower).
292  const size_t kMinMoveDistance = 16;
293  // Note that these values are dictated by the implementation below,
294  // do not just change them and hope things will work!
295
296  int stack_offset = 0;  // Update if we change the stack height.
297
298  Label backward, backward_much_overlap;
299  Label forward_much_overlap, small_size, medium_size, pop_and_return;
300  __ push(edi);
301  __ push(esi);
302  stack_offset += 2 * kPointerSize;
303  Register dst = edi;
304  Register src = esi;
305  Register count = ecx;
306  Register loop_count = edx;
307  __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
308  __ mov(src, Operand(esp, stack_offset + kSourceOffset));
309  __ mov(count, Operand(esp, stack_offset + kSizeOffset));
310
311  __ cmp(dst, src);
312  __ j(equal, &pop_and_return);
313
314  if (CpuFeatures::IsSupported(SSE2)) {
315    CpuFeatureScope sse2_scope(&masm, SSE2);
316    __ prefetch(Operand(src, 0), 1);
317    __ cmp(count, kSmallCopySize);
318    __ j(below_equal, &small_size);
319    __ cmp(count, kMediumCopySize);
320    __ j(below_equal, &medium_size);
321    __ cmp(dst, src);
322    __ j(above, &backward);
323
324    {
325      // |dst| is a lower address than |src|. Copy front-to-back.
326      Label unaligned_source, move_last_15, skip_last_move;
327      __ mov(eax, src);
328      __ sub(eax, dst);
329      __ cmp(eax, kMinMoveDistance);
330      __ j(below, &forward_much_overlap);
331      // Copy first 16 bytes.
332      __ movdqu(xmm0, Operand(src, 0));
333      __ movdqu(Operand(dst, 0), xmm0);
334      // Determine distance to alignment: 16 - (dst & 0xF).
335      __ mov(edx, dst);
336      __ and_(edx, 0xF);
337      __ neg(edx);
338      __ add(edx, Immediate(16));
339      __ add(dst, edx);
340      __ add(src, edx);
341      __ sub(count, edx);
342      // dst is now aligned. Main copy loop.
343      __ mov(loop_count, count);
344      __ shr(loop_count, 6);
345      // Check if src is also aligned.
346      __ test(src, Immediate(0xF));
347      __ j(not_zero, &unaligned_source);
348      // Copy loop for aligned source and destination.
349      MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
350      // At most 15 bytes to copy. Copy 16 bytes at end of string.
351      __ bind(&move_last_15);
352      __ and_(count, 0xF);
353      __ j(zero, &skip_last_move, Label::kNear);
354      __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
355      __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
356      __ bind(&skip_last_move);
357      MemMoveEmitPopAndReturn(&masm);
358
359      // Copy loop for unaligned source and aligned destination.
360      __ bind(&unaligned_source);
361      MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
362      __ jmp(&move_last_15);
363
364      // Less than kMinMoveDistance offset between dst and src.
365      Label loop_until_aligned, last_15_much_overlap;
366      __ bind(&loop_until_aligned);
367      __ mov_b(eax, Operand(src, 0));
368      __ inc(src);
369      __ mov_b(Operand(dst, 0), eax);
370      __ inc(dst);
371      __ dec(count);
372      __ bind(&forward_much_overlap);  // Entry point into this block.
373      __ test(dst, Immediate(0xF));
374      __ j(not_zero, &loop_until_aligned);
375      // dst is now aligned, src can't be. Main copy loop.
376      __ mov(loop_count, count);
377      __ shr(loop_count, 6);
378      MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
379                          FORWARD, MOVE_UNALIGNED);
380      __ bind(&last_15_much_overlap);
381      __ and_(count, 0xF);
382      __ j(zero, &pop_and_return);
383      __ cmp(count, kSmallCopySize);
384      __ j(below_equal, &small_size);
385      __ jmp(&medium_size);
386    }
387
388    {
389      // |dst| is a higher address than |src|. Copy backwards.
390      Label unaligned_source, move_first_15, skip_last_move;
391      __ bind(&backward);
392      // |dst| and |src| always point to the end of what's left to copy.
393      __ add(dst, count);
394      __ add(src, count);
395      __ mov(eax, dst);
396      __ sub(eax, src);
397      __ cmp(eax, kMinMoveDistance);
398      __ j(below, &backward_much_overlap);
399      // Copy last 16 bytes.
400      __ movdqu(xmm0, Operand(src, -0x10));
401      __ movdqu(Operand(dst, -0x10), xmm0);
402      // Find distance to alignment: dst & 0xF
403      __ mov(edx, dst);
404      __ and_(edx, 0xF);
405      __ sub(dst, edx);
406      __ sub(src, edx);
407      __ sub(count, edx);
408      // dst is now aligned. Main copy loop.
409      __ mov(loop_count, count);
410      __ shr(loop_count, 6);
411      // Check if src is also aligned.
412      __ test(src, Immediate(0xF));
413      __ j(not_zero, &unaligned_source);
414      // Copy loop for aligned source and destination.
415      MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
416      // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
417      __ bind(&move_first_15);
418      __ and_(count, 0xF);
419      __ j(zero, &skip_last_move, Label::kNear);
420      __ sub(src, count);
421      __ sub(dst, count);
422      __ movdqu(xmm0, Operand(src, 0));
423      __ movdqu(Operand(dst, 0), xmm0);
424      __ bind(&skip_last_move);
425      MemMoveEmitPopAndReturn(&masm);
426
427      // Copy loop for unaligned source and aligned destination.
428      __ bind(&unaligned_source);
429      MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
430      __ jmp(&move_first_15);
431
432      // Less than kMinMoveDistance offset between dst and src.
433      Label loop_until_aligned, first_15_much_overlap;
434      __ bind(&loop_until_aligned);
435      __ dec(src);
436      __ dec(dst);
437      __ mov_b(eax, Operand(src, 0));
438      __ mov_b(Operand(dst, 0), eax);
439      __ dec(count);
440      __ bind(&backward_much_overlap);  // Entry point into this block.
441      __ test(dst, Immediate(0xF));
442      __ j(not_zero, &loop_until_aligned);
443      // dst is now aligned, src can't be. Main copy loop.
444      __ mov(loop_count, count);
445      __ shr(loop_count, 6);
446      MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
447                          BACKWARD, MOVE_UNALIGNED);
448      __ bind(&first_15_much_overlap);
449      __ and_(count, 0xF);
450      __ j(zero, &pop_and_return);
451      // Small/medium handlers expect dst/src to point to the beginning.
452      __ sub(dst, count);
453      __ sub(src, count);
454      __ cmp(count, kSmallCopySize);
455      __ j(below_equal, &small_size);
456      __ jmp(&medium_size);
457    }
458    {
459      // Special handlers for 9 <= copy_size < 64. No assumptions about
460      // alignment or move distance, so all reads must be unaligned and
461      // must happen before any writes.
462      Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
463
464      __ bind(&f9_16);
465      __ movsd(xmm0, Operand(src, 0));
466      __ movsd(xmm1, Operand(src, count, times_1, -8));
467      __ movsd(Operand(dst, 0), xmm0);
468      __ movsd(Operand(dst, count, times_1, -8), xmm1);
469      MemMoveEmitPopAndReturn(&masm);
470
471      __ bind(&f17_32);
472      __ movdqu(xmm0, Operand(src, 0));
473      __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
474      __ movdqu(Operand(dst, 0x00), xmm0);
475      __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
476      MemMoveEmitPopAndReturn(&masm);
477
478      __ bind(&f33_48);
479      __ movdqu(xmm0, Operand(src, 0x00));
480      __ movdqu(xmm1, Operand(src, 0x10));
481      __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
482      __ movdqu(Operand(dst, 0x00), xmm0);
483      __ movdqu(Operand(dst, 0x10), xmm1);
484      __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
485      MemMoveEmitPopAndReturn(&masm);
486
487      __ bind(&f49_63);
488      __ movdqu(xmm0, Operand(src, 0x00));
489      __ movdqu(xmm1, Operand(src, 0x10));
490      __ movdqu(xmm2, Operand(src, 0x20));
491      __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
492      __ movdqu(Operand(dst, 0x00), xmm0);
493      __ movdqu(Operand(dst, 0x10), xmm1);
494      __ movdqu(Operand(dst, 0x20), xmm2);
495      __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
496      MemMoveEmitPopAndReturn(&masm);
497
498      __ bind(&medium_handlers);
499      __ dd(conv.address(&f9_16));
500      __ dd(conv.address(&f17_32));
501      __ dd(conv.address(&f33_48));
502      __ dd(conv.address(&f49_63));
503
504      __ bind(&medium_size);  // Entry point into this block.
505      __ mov(eax, count);
506      __ dec(eax);
507      __ shr(eax, 4);
508      if (FLAG_debug_code) {
509        Label ok;
510        __ cmp(eax, 3);
511        __ j(below_equal, &ok);
512        __ int3();
513        __ bind(&ok);
514      }
515      __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
516      __ jmp(eax);
517    }
518    {
519      // Specialized copiers for copy_size <= 8 bytes.
520      Label small_handlers, f0, f1, f2, f3, f4, f5_8;
521      __ bind(&f0);
522      MemMoveEmitPopAndReturn(&masm);
523
524      __ bind(&f1);
525      __ mov_b(eax, Operand(src, 0));
526      __ mov_b(Operand(dst, 0), eax);
527      MemMoveEmitPopAndReturn(&masm);
528
529      __ bind(&f2);
530      __ mov_w(eax, Operand(src, 0));
531      __ mov_w(Operand(dst, 0), eax);
532      MemMoveEmitPopAndReturn(&masm);
533
534      __ bind(&f3);
535      __ mov_w(eax, Operand(src, 0));
536      __ mov_b(edx, Operand(src, 2));
537      __ mov_w(Operand(dst, 0), eax);
538      __ mov_b(Operand(dst, 2), edx);
539      MemMoveEmitPopAndReturn(&masm);
540
541      __ bind(&f4);
542      __ mov(eax, Operand(src, 0));
543      __ mov(Operand(dst, 0), eax);
544      MemMoveEmitPopAndReturn(&masm);
545
546      __ bind(&f5_8);
547      __ mov(eax, Operand(src, 0));
548      __ mov(edx, Operand(src, count, times_1, -4));
549      __ mov(Operand(dst, 0), eax);
550      __ mov(Operand(dst, count, times_1, -4), edx);
551      MemMoveEmitPopAndReturn(&masm);
552
553      __ bind(&small_handlers);
554      __ dd(conv.address(&f0));
555      __ dd(conv.address(&f1));
556      __ dd(conv.address(&f2));
557      __ dd(conv.address(&f3));
558      __ dd(conv.address(&f4));
559      __ dd(conv.address(&f5_8));
560      __ dd(conv.address(&f5_8));
561      __ dd(conv.address(&f5_8));
562      __ dd(conv.address(&f5_8));
563
564      __ bind(&small_size);  // Entry point into this block.
565      if (FLAG_debug_code) {
566        Label ok;
567        __ cmp(count, 8);
568        __ j(below_equal, &ok);
569        __ int3();
570        __ bind(&ok);
571      }
572      __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
573      __ jmp(eax);
574    }
575  } else {
576    // No SSE2.
577    Label forward;
578    __ cmp(count, 0);
579    __ j(equal, &pop_and_return);
580    __ cmp(dst, src);
581    __ j(above, &backward);
582    __ jmp(&forward);
583    {
584      // Simple forward copier.
585      Label forward_loop_1byte, forward_loop_4byte;
586      __ bind(&forward_loop_4byte);
587      __ mov(eax, Operand(src, 0));
588      __ sub(count, Immediate(4));
589      __ add(src, Immediate(4));
590      __ mov(Operand(dst, 0), eax);
591      __ add(dst, Immediate(4));
592      __ bind(&forward);  // Entry point.
593      __ cmp(count, 3);
594      __ j(above, &forward_loop_4byte);
595      __ bind(&forward_loop_1byte);
596      __ cmp(count, 0);
597      __ j(below_equal, &pop_and_return);
598      __ mov_b(eax, Operand(src, 0));
599      __ dec(count);
600      __ inc(src);
601      __ mov_b(Operand(dst, 0), eax);
602      __ inc(dst);
603      __ jmp(&forward_loop_1byte);
604    }
605    {
606      // Simple backward copier.
607      Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
608      __ bind(&backward);
609      __ add(src, count);
610      __ add(dst, count);
611      __ cmp(count, 3);
612      __ j(below_equal, &entry_shortcut);
613
614      __ bind(&backward_loop_4byte);
615      __ sub(src, Immediate(4));
616      __ sub(count, Immediate(4));
617      __ mov(eax, Operand(src, 0));
618      __ sub(dst, Immediate(4));
619      __ mov(Operand(dst, 0), eax);
620      __ cmp(count, 3);
621      __ j(above, &backward_loop_4byte);
622      __ bind(&backward_loop_1byte);
623      __ cmp(count, 0);
624      __ j(below_equal, &pop_and_return);
625      __ bind(&entry_shortcut);
626      __ dec(src);
627      __ dec(count);
628      __ mov_b(eax, Operand(src, 0));
629      __ dec(dst);
630      __ mov_b(Operand(dst, 0), eax);
631      __ jmp(&backward_loop_1byte);
632    }
633  }
634
635  __ bind(&pop_and_return);
636  MemMoveEmitPopAndReturn(&masm);
637
638  CodeDesc desc;
639  masm.GetCode(&desc);
640  ASSERT(!RelocInfo::RequiresRelocation(desc));
641  CPU::FlushICache(buffer, actual_size);
642  OS::ProtectCode(buffer, actual_size);
643  // TODO(jkummerow): It would be nice to register this code creation event
644  // with the PROFILE / GDBJIT system.
645  return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
646}
647
648
649#undef __
650
651// -------------------------------------------------------------------------
652// Code generators
653
654#define __ ACCESS_MASM(masm)
655
656
657void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
658    MacroAssembler* masm, AllocationSiteMode mode,
659    Label* allocation_memento_found) {
660  // ----------- S t a t e -------------
661  //  -- eax    : value
662  //  -- ebx    : target map
663  //  -- ecx    : key
664  //  -- edx    : receiver
665  //  -- esp[0] : return address
666  // -----------------------------------
667  if (mode == TRACK_ALLOCATION_SITE) {
668    ASSERT(allocation_memento_found != NULL);
669    __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
670  }
671
672  // Set transitioned map.
673  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
674  __ RecordWriteField(edx,
675                      HeapObject::kMapOffset,
676                      ebx,
677                      edi,
678                      kDontSaveFPRegs,
679                      EMIT_REMEMBERED_SET,
680                      OMIT_SMI_CHECK);
681}
682
683
684void ElementsTransitionGenerator::GenerateSmiToDouble(
685    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
686  // ----------- S t a t e -------------
687  //  -- eax    : value
688  //  -- ebx    : target map
689  //  -- ecx    : key
690  //  -- edx    : receiver
691  //  -- esp[0] : return address
692  // -----------------------------------
693  Label loop, entry, convert_hole, gc_required, only_change_map;
694
695  if (mode == TRACK_ALLOCATION_SITE) {
696    __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
697  }
698
699  // Check for empty arrays, which only require a map transition and no changes
700  // to the backing store.
701  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
702  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
703  __ j(equal, &only_change_map);
704
705  __ push(eax);
706  __ push(ebx);
707
708  __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
709
710  // Allocate new FixedDoubleArray.
711  // edx: receiver
712  // edi: length of source FixedArray (smi-tagged)
713  AllocationFlags flags =
714      static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
715  __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
716              REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
717
718  // eax: destination FixedDoubleArray
719  // edi: number of elements
720  // edx: receiver
721  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
722         Immediate(masm->isolate()->factory()->fixed_double_array_map()));
723  __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
724  __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
725  // Replace receiver's backing store with newly created FixedDoubleArray.
726  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
727  __ mov(ebx, eax);
728  __ RecordWriteField(edx,
729                      JSObject::kElementsOffset,
730                      ebx,
731                      edi,
732                      kDontSaveFPRegs,
733                      EMIT_REMEMBERED_SET,
734                      OMIT_SMI_CHECK);
735
736  __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
737
738  // Prepare for conversion loop.
739  ExternalReference canonical_the_hole_nan_reference =
740      ExternalReference::address_of_the_hole_nan();
741  XMMRegister the_hole_nan = xmm1;
742  if (CpuFeatures::IsSupported(SSE2)) {
743    CpuFeatureScope use_sse2(masm, SSE2);
744    __ movsd(the_hole_nan,
745              Operand::StaticVariable(canonical_the_hole_nan_reference));
746  }
747  __ jmp(&entry);
748
749  // Call into runtime if GC is required.
750  __ bind(&gc_required);
751  // Restore registers before jumping into runtime.
752  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
753  __ pop(ebx);
754  __ pop(eax);
755  __ jmp(fail);
756
757  // Convert and copy elements
758  // esi: source FixedArray
759  __ bind(&loop);
760  __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
761  // ebx: current element from source
762  // edi: index of current element
763  __ JumpIfNotSmi(ebx, &convert_hole);
764
765  // Normal smi, convert it to double and store.
766  __ SmiUntag(ebx);
767  if (CpuFeatures::IsSupported(SSE2)) {
768    CpuFeatureScope fscope(masm, SSE2);
769    __ Cvtsi2sd(xmm0, ebx);
770    __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
771              xmm0);
772  } else {
773    __ push(ebx);
774    __ fild_s(Operand(esp, 0));
775    __ pop(ebx);
776    __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
777  }
778  __ jmp(&entry);
779
780  // Found hole, store hole_nan_as_double instead.
781  __ bind(&convert_hole);
782
783  if (FLAG_debug_code) {
784    __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
785    __ Assert(equal, kObjectFoundInSmiOnlyArray);
786  }
787
788  if (CpuFeatures::IsSupported(SSE2)) {
789    CpuFeatureScope use_sse2(masm, SSE2);
790    __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
791              the_hole_nan);
792  } else {
793    __ fld_d(Operand::StaticVariable(canonical_the_hole_nan_reference));
794    __ fstp_d(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize));
795  }
796
797  __ bind(&entry);
798  __ sub(edi, Immediate(Smi::FromInt(1)));
799  __ j(not_sign, &loop);
800
801  __ pop(ebx);
802  __ pop(eax);
803
804  // Restore esi.
805  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
806
807  __ bind(&only_change_map);
808  // eax: value
809  // ebx: target map
810  // Set transitioned map.
811  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
812  __ RecordWriteField(edx,
813                      HeapObject::kMapOffset,
814                      ebx,
815                      edi,
816                      kDontSaveFPRegs,
817                      OMIT_REMEMBERED_SET,
818                      OMIT_SMI_CHECK);
819}
820
821
822void ElementsTransitionGenerator::GenerateDoubleToObject(
823    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
824  // ----------- S t a t e -------------
825  //  -- eax    : value
826  //  -- ebx    : target map
827  //  -- ecx    : key
828  //  -- edx    : receiver
829  //  -- esp[0] : return address
830  // -----------------------------------
831  Label loop, entry, convert_hole, gc_required, only_change_map, success;
832
833  if (mode == TRACK_ALLOCATION_SITE) {
834    __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
835  }
836
837  // Check for empty arrays, which only require a map transition and no changes
838  // to the backing store.
839  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
840  __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
841  __ j(equal, &only_change_map);
842
843  __ push(eax);
844  __ push(edx);
845  __ push(ebx);
846
847  __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
848
849  // Allocate new FixedArray.
850  // ebx: length of source FixedDoubleArray (smi-tagged)
851  __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
852  __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
853
854  // eax: destination FixedArray
855  // ebx: number of elements
856  __ mov(FieldOperand(eax, HeapObject::kMapOffset),
857         Immediate(masm->isolate()->factory()->fixed_array_map()));
858  __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
859  __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
860
861  __ jmp(&entry);
862
863  // ebx: target map
864  // edx: receiver
865  // Set transitioned map.
866  __ bind(&only_change_map);
867  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
868  __ RecordWriteField(edx,
869                      HeapObject::kMapOffset,
870                      ebx,
871                      edi,
872                      kDontSaveFPRegs,
873                      OMIT_REMEMBERED_SET,
874                      OMIT_SMI_CHECK);
875  __ jmp(&success);
876
877  // Call into runtime if GC is required.
878  __ bind(&gc_required);
879  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
880  __ pop(ebx);
881  __ pop(edx);
882  __ pop(eax);
883  __ jmp(fail);
884
885  // Box doubles into heap numbers.
886  // edi: source FixedDoubleArray
887  // eax: destination FixedArray
888  __ bind(&loop);
889  // ebx: index of current element (smi-tagged)
890  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
891  __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
892  __ j(equal, &convert_hole);
893
894  // Non-hole double, copy value into a heap number.
895  __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
896  // edx: new heap number
897  if (CpuFeatures::IsSupported(SSE2)) {
898    CpuFeatureScope fscope(masm, SSE2);
899    __ movsd(xmm0,
900              FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
901    __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
902  } else {
903    __ mov(esi, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
904    __ mov(FieldOperand(edx, HeapNumber::kValueOffset), esi);
905    __ mov(esi, FieldOperand(edi, ebx, times_4, offset));
906    __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
907  }
908  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
909  __ mov(esi, ebx);
910  __ RecordWriteArray(eax,
911                      edx,
912                      esi,
913                      kDontSaveFPRegs,
914                      EMIT_REMEMBERED_SET,
915                      OMIT_SMI_CHECK);
916  __ jmp(&entry, Label::kNear);
917
918  // Replace the-hole NaN with the-hole pointer.
919  __ bind(&convert_hole);
920  __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
921         masm->isolate()->factory()->the_hole_value());
922
923  __ bind(&entry);
924  __ sub(ebx, Immediate(Smi::FromInt(1)));
925  __ j(not_sign, &loop);
926
927  __ pop(ebx);
928  __ pop(edx);
929  // ebx: target map
930  // edx: receiver
931  // Set transitioned map.
932  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
933  __ RecordWriteField(edx,
934                      HeapObject::kMapOffset,
935                      ebx,
936                      edi,
937                      kDontSaveFPRegs,
938                      OMIT_REMEMBERED_SET,
939                      OMIT_SMI_CHECK);
940  // Replace receiver's backing store with newly created and filled FixedArray.
941  __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
942  __ RecordWriteField(edx,
943                      JSObject::kElementsOffset,
944                      eax,
945                      edi,
946                      kDontSaveFPRegs,
947                      EMIT_REMEMBERED_SET,
948                      OMIT_SMI_CHECK);
949
950  // Restore registers.
951  __ pop(eax);
952  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
953
954  __ bind(&success);
955}
956
957
958void StringCharLoadGenerator::Generate(MacroAssembler* masm,
959                                       Factory* factory,
960                                       Register string,
961                                       Register index,
962                                       Register result,
963                                       Label* call_runtime) {
964  // Fetch the instance type of the receiver into result register.
965  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
966  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
967
968  // We need special handling for indirect strings.
969  Label check_sequential;
970  __ test(result, Immediate(kIsIndirectStringMask));
971  __ j(zero, &check_sequential, Label::kNear);
972
973  // Dispatch on the indirect string shape: slice or cons.
974  Label cons_string;
975  __ test(result, Immediate(kSlicedNotConsMask));
976  __ j(zero, &cons_string, Label::kNear);
977
978  // Handle slices.
979  Label indirect_string_loaded;
980  __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
981  __ SmiUntag(result);
982  __ add(index, result);
983  __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
984  __ jmp(&indirect_string_loaded, Label::kNear);
985
986  // Handle cons strings.
987  // Check whether the right hand side is the empty string (i.e. if
988  // this is really a flat string in a cons string). If that is not
989  // the case we would rather go to the runtime system now to flatten
990  // the string.
991  __ bind(&cons_string);
992  __ cmp(FieldOperand(string, ConsString::kSecondOffset),
993         Immediate(factory->empty_string()));
994  __ j(not_equal, call_runtime);
995  __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
996
997  __ bind(&indirect_string_loaded);
998  __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
999  __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
1000
1001  // Distinguish sequential and external strings. Only these two string
1002  // representations can reach here (slices and flat cons strings have been
1003  // reduced to the underlying sequential or external string).
1004  Label seq_string;
1005  __ bind(&check_sequential);
1006  STATIC_ASSERT(kSeqStringTag == 0);
1007  __ test(result, Immediate(kStringRepresentationMask));
1008  __ j(zero, &seq_string, Label::kNear);
1009
1010  // Handle external strings.
1011  Label ascii_external, done;
1012  if (FLAG_debug_code) {
1013    // Assert that we do not have a cons or slice (indirect strings) here.
1014    // Sequential strings have already been ruled out.
1015    __ test(result, Immediate(kIsIndirectStringMask));
1016    __ Assert(zero, kExternalStringExpectedButNotFound);
1017  }
1018  // Rule out short external strings.
1019  STATIC_CHECK(kShortExternalStringTag != 0);
1020  __ test_b(result, kShortExternalStringMask);
1021  __ j(not_zero, call_runtime);
1022  // Check encoding.
1023  STATIC_ASSERT(kTwoByteStringTag == 0);
1024  __ test_b(result, kStringEncodingMask);
1025  __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
1026  __ j(not_equal, &ascii_external, Label::kNear);
1027  // Two-byte string.
1028  __ movzx_w(result, Operand(result, index, times_2, 0));
1029  __ jmp(&done, Label::kNear);
1030  __ bind(&ascii_external);
1031  // Ascii string.
1032  __ movzx_b(result, Operand(result, index, times_1, 0));
1033  __ jmp(&done, Label::kNear);
1034
1035  // Dispatch on the encoding: ASCII or two-byte.
1036  Label ascii;
1037  __ bind(&seq_string);
1038  STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
1039  STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
1040  __ test(result, Immediate(kStringEncodingMask));
1041  __ j(not_zero, &ascii, Label::kNear);
1042
1043  // Two-byte string.
1044  // Load the two-byte character code into the result register.
1045  __ movzx_w(result, FieldOperand(string,
1046                                  index,
1047                                  times_2,
1048                                  SeqTwoByteString::kHeaderSize));
1049  __ jmp(&done, Label::kNear);
1050
1051  // Ascii string.
1052  // Load the byte into the result register.
1053  __ bind(&ascii);
1054  __ movzx_b(result, FieldOperand(string,
1055                                  index,
1056                                  times_1,
1057                                  SeqOneByteString::kHeaderSize));
1058  __ bind(&done);
1059}
1060
1061
1062static Operand ExpConstant(int index) {
1063  return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
1064}
1065
1066
1067void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
1068                                   XMMRegister input,
1069                                   XMMRegister result,
1070                                   XMMRegister double_scratch,
1071                                   Register temp1,
1072                                   Register temp2) {
1073  ASSERT(!input.is(double_scratch));
1074  ASSERT(!input.is(result));
1075  ASSERT(!result.is(double_scratch));
1076  ASSERT(!temp1.is(temp2));
1077  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
1078
1079  Label done;
1080
1081  __ movsd(double_scratch, ExpConstant(0));
1082  __ xorpd(result, result);
1083  __ ucomisd(double_scratch, input);
1084  __ j(above_equal, &done);
1085  __ ucomisd(input, ExpConstant(1));
1086  __ movsd(result, ExpConstant(2));
1087  __ j(above_equal, &done);
1088  __ movsd(double_scratch, ExpConstant(3));
1089  __ movsd(result, ExpConstant(4));
1090  __ mulsd(double_scratch, input);
1091  __ addsd(double_scratch, result);
1092  __ movd(temp2, double_scratch);
1093  __ subsd(double_scratch, result);
1094  __ movsd(result, ExpConstant(6));
1095  __ mulsd(double_scratch, ExpConstant(5));
1096  __ subsd(double_scratch, input);
1097  __ subsd(result, double_scratch);
1098  __ movsd(input, double_scratch);
1099  __ mulsd(input, double_scratch);
1100  __ mulsd(result, input);
1101  __ mov(temp1, temp2);
1102  __ mulsd(result, ExpConstant(7));
1103  __ subsd(result, double_scratch);
1104  __ add(temp1, Immediate(0x1ff800));
1105  __ addsd(result, ExpConstant(8));
1106  __ and_(temp2, Immediate(0x7ff));
1107  __ shr(temp1, 11);
1108  __ shl(temp1, 20);
1109  __ movd(input, temp1);
1110  __ pshufd(input, input, static_cast<uint8_t>(0xe1));  // Order: 11 10 00 01
1111  __ movsd(double_scratch, Operand::StaticArray(
1112      temp2, times_8, ExternalReference::math_exp_log_table()));
1113  __ orps(input, double_scratch);
1114  __ mulsd(result, input);
1115  __ bind(&done);
1116}
1117
1118#undef __
1119
1120
1121static byte* GetNoCodeAgeSequence(uint32_t* length) {
1122  static bool initialized = false;
1123  static byte sequence[kNoCodeAgeSequenceLength];
1124  *length = kNoCodeAgeSequenceLength;
1125  if (!initialized) {
1126    // The sequence of instructions that is patched out for aging code is the
1127    // following boilerplate stack-building prologue that is found both in
1128    // FUNCTION and OPTIMIZED_FUNCTION code:
1129    CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
1130    patcher.masm()->push(ebp);
1131    patcher.masm()->mov(ebp, esp);
1132    patcher.masm()->push(esi);
1133    patcher.masm()->push(edi);
1134    initialized = true;
1135  }
1136  return sequence;
1137}
1138
1139
1140bool Code::IsYoungSequence(byte* sequence) {
1141  uint32_t young_length;
1142  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
1143  bool result = (!memcmp(sequence, young_sequence, young_length));
1144  ASSERT(result || *sequence == kCallOpcode);
1145  return result;
1146}
1147
1148
1149void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
1150                               MarkingParity* parity) {
1151  if (IsYoungSequence(sequence)) {
1152    *age = kNoAgeCodeAge;
1153    *parity = NO_MARKING_PARITY;
1154  } else {
1155    sequence++;  // Skip the kCallOpcode byte
1156    Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
1157        Assembler::kCallTargetAddressOffset;
1158    Code* stub = GetCodeFromTargetAddress(target_address);
1159    GetCodeAgeAndParity(stub, age, parity);
1160  }
1161}
1162
1163
1164void Code::PatchPlatformCodeAge(Isolate* isolate,
1165                                byte* sequence,
1166                                Code::Age age,
1167                                MarkingParity parity) {
1168  uint32_t young_length;
1169  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
1170  if (age == kNoAgeCodeAge) {
1171    CopyBytes(sequence, young_sequence, young_length);
1172    CPU::FlushICache(sequence, young_length);
1173  } else {
1174    Code* stub = GetCodeAgeStub(isolate, age, parity);
1175    CodePatcher patcher(sequence, young_length);
1176    patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
1177  }
1178}
1179
1180
1181} }  // namespace v8::internal
1182
1183#endif  // V8_TARGET_ARCH_IA32
1184